From 817423c70651f10efc4ef3301850b8af0d6b9137 Mon Sep 17 00:00:00 2001 From: Raphael Sourty Date: Tue, 25 Jan 2022 12:26:42 +0100 Subject: [PATCH] Update semanlink dataset and provide sentence similarityt models wrapper. --- .gitignore | 3 +- ckb/__version__.py | 4 +- ckb/datasets/semanlink.py | 53 +- ckb/datasets/semanlink/labels.json | 11905 ++++ ckb/datasets/semanlink/questions.csv | 3158 - ckb/datasets/semanlink/test.csv | 9434 ++- ckb/datasets/semanlink/train.csv | 85169 ++++++++++++++++++++++-- ckb/datasets/semanlink/valid.csv | 7915 ++- ckb/models/__init__.py | 2 + ckb/models/similarity.py | 162 + ckb/models/transformer.py | 2 +- docs/api/datasets/Semanlink.md | 2 + docs/api/evaluation/Evaluation.md | 14 +- docs/api/models/BaseModel.md | 34 +- docs/api/models/DistillBert.md | 34 +- docs/api/models/FlauBERT.md | 34 +- docs/api/models/Transformer.md | 36 +- docs/api/sampling/NegativeSampling.md | 32 +- setup.py | 1 + 19 files changed, 106662 insertions(+), 11332 deletions(-) create mode 100644 ckb/datasets/semanlink/labels.json delete mode 100644 ckb/datasets/semanlink/questions.csv create mode 100644 ckb/models/similarity.py diff --git a/.gitignore b/.gitignore index f4eab11..846ca96 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ __pycache__/ *.pickle *.icloud models/ -api/ \ No newline at end of file +api/ +*__pycache__ \ No newline at end of file diff --git a/ckb/__version__.py b/ckb/__version__.py index b0cbbbc..a6b2ab8 100644 --- a/ckb/__version__.py +++ b/ckb/__version__.py @@ -1,3 +1,3 @@ -VERSION = (0, 0, 2) +VERSION = (0, 0, 3) -__version__ = '.'.join(map(str, VERSION)) +__version__ = ".".join(map(str, VERSION)) diff --git a/ckb/datasets/semanlink.py b/ckb/datasets/semanlink.py index 325cf6b..912aac5 100644 --- a/ckb/datasets/semanlink.py +++ b/ckb/datasets/semanlink.py @@ -1,23 +1,25 @@ -import os +import json import pathlib -import pandas as pd - from mkb import datasets as mkb_datasets from ..utils import read_csv - __all__ = ["Semanlink"] class Semanlink(mkb_datasets.Dataset): """Semanlink dataset. + Train triplets gather entities created before 2019-06-01. + Valid triplets gather entities created between 2019-06-01 and 2020-06-01. + Test triplets gather entities created between 2020-06-01 and 2021-10-27. + Parameters ---------- batch_size (int): Size of the batch. - shuffle (bool): Whether to shuffle the dataset or not. + use_labels (bool): + shuffle (bool): Replaces the identifier of the entities with their textual label. pre_compute (bool): Pre-compute parameters such as weights when using translationnal model (TransE, DistMult, RotatE, pRotatE, ComplEx). num_workers (int): Number of workers dedicated to iterate on the dataset. @@ -38,32 +40,51 @@ class Semanlink(mkb_datasets.Dataset): >>> from ckb import datasets - >>> dataset = datasets.Semanlink(batch_size=1, pre_compute=True, shuffle=True, seed=42) + >>> dataset = datasets.Semanlink(batch_size=1, pre_compute=False, shuffle=True, seed=42) >>> dataset Semanlink dataset Batch size 1 - Entities 5454 - Relations 4 - Shuffle True - Train triples 6422 - Validation triples 803 - Test triples 803 + Entities 32502 + Relations 40 + Shuffle True + Train triples 73828 + Validation triples 5035 + Test triples 6094 """ def __init__( - self, batch_size, shuffle=True, pre_compute=True, num_workers=1, seed=None + self, + batch_size, + use_labels=True, + shuffle=True, + pre_compute=True, + num_workers=1, + seed=None, ): self.filename = "semanlink" path = pathlib.Path(__file__).parent.joinpath(self.filename) + if use_labels: + with open(f"{path}/labels.json", "r") as entities_labels: + labels = json.load(entities_labels) + + train = read_csv(path=f"{path}/train.csv", sep="|") + valid = read_csv(path=f"{path}/valid.csv", sep="|") + test = read_csv(path=f"{path}/test.csv", sep="|") + + if use_labels: + train = [(labels.get(h, h), r, labels.get(t, t)) for h, r, t in train] + valid = [(labels.get(h, h), r, labels.get(t, t)) for h, r, t in valid] + test = [(labels.get(h, h), r, labels.get(t, t)) for h, r, t in test] + super().__init__( - train=read_csv(path=f"{path}/train.csv", sep="|"), - valid=read_csv(path=f"{path}/valid.csv", sep="|"), - test=read_csv(path=f"{path}/test.csv", sep="|"), + train=train, + valid=valid, + test=test, classification=False, pre_compute=pre_compute, batch_size=batch_size, diff --git a/ckb/datasets/semanlink/labels.json b/ckb/datasets/semanlink/labels.json new file mode 100644 index 0000000..7a7b15c --- /dev/null +++ b/ckb/datasets/semanlink/labels.json @@ -0,0 +1,11905 @@ +{ + "http://www.semanlink.net/tag/tomcat_tips": "Tomcat tips", + "http://www.semanlink.net/tag/memoire_informatique": "M\u00e9moire (informatique)", + "http://www.semanlink.net/tag/css": "css", + "http://www.semanlink.net/tag/medicaments": "M\u00e9dicaments", + "http://www.semanlink.net/tag/daimler": "Daimler", + "http://www.semanlink.net/tag/nouvelle_route_de_la_soie": "Nouvelle Route de la Soie", + "http://www.semanlink.net/tag/mechant": "M\u00e9chant", + "http://www.semanlink.net/tag/peter_chilson": "Peter Chilson", + "http://www.semanlink.net/tag/scala": "Scala", + "http://www.semanlink.net/tag/jsonld_java": "Jsonld-java", + "http://www.semanlink.net/tag/biterm_topic_model": "Biterm Topic Model", + "http://www.semanlink.net/tag/rupert_westenthaler": "Rupert Westenthaler", + "http://www.semanlink.net/tag/gouvernement_sarkozy": "Gouvernement Sarkozy", + "http://www.semanlink.net/tag/belleme": "Bell\u00eame", + "http://www.semanlink.net/tag/json_2_json_ld": "JSON 2 JSON-LD", + "http://www.semanlink.net/tag/grande_bretagne": "Grande-Bretagne", + "http://www.semanlink.net/tag/birmanie": "Birmanie", + "http://www.semanlink.net/tag/google": "Google", + "http://www.semanlink.net/tag/securite": "S\u00e9curit\u00e9", + "http://www.semanlink.net/tag/category_embedding": "Category Embedding", + "http://www.semanlink.net/tag/entity_recommendation": "Entity recommendation", + "http://www.semanlink.net/tag/iswc": "ISWC", + "http://www.semanlink.net/tag/ml_domaines_d_application": "IA/ML: domaines d'application", + "http://www.semanlink.net/tag/phishing": "Phishing", + "http://www.semanlink.net/tag/salaire": "Salaire", + "http://www.semanlink.net/tag/hydra_templated_links": "Hydra/Templated Links", + "http://www.semanlink.net/tag/overfitting": "Overfitting/Generalization", + "http://www.semanlink.net/tag/rdf_tools": "RDF Tools", + "http://www.semanlink.net/tag/diacritics": "Diacritics", + "http://www.semanlink.net/tag/gilles_taddei": "Gilles Taddei", + "http://www.semanlink.net/tag/lava_jato": "Lava-jato", + "http://www.semanlink.net/tag/c2gweb_js": "C2GWeb-JS", + "http://www.semanlink.net/tag/latex": "LaTeX", + "http://www.semanlink.net/tag/ruby_on_rails": "Ruby on Rails", + "http://www.semanlink.net/tag/film_turc": "Film turc", + "http://www.semanlink.net/tag/ant": "Ant", + "http://www.semanlink.net/tag/toutankhamon": "Toutankhamon", + "http://www.semanlink.net/tag/liberte_d_expression": "Libert\u00e9 d'expression", + "http://www.semanlink.net/tag/uriqr": "Uriqr", + "http://www.semanlink.net/tag/googleplus": "GooglePlus", + "http://www.semanlink.net/tag/ai_cloud_service": "AI cloud service", + "http://www.semanlink.net/tag/javascript_promises": "JavaScript Promises", + "http://www.semanlink.net/tag/mbilia_bel": "Mbilia Bel", + "http://www.semanlink.net/tag/youtube_tutorial": "Youtube tutorial", + "http://www.semanlink.net/tag/david_ricardo": "David Ricardo", + "http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s": "ElasticSearch: nearest neighbor(s)", + "http://www.semanlink.net/tag/random_forest": "Random forest", + "http://www.semanlink.net/tag/guerres_puniques": "Guerres puniques", + "http://www.semanlink.net/tag/offshore_leaks": "Offshore leaks", + "http://www.semanlink.net/tag/c2gweb": "C2GWeb", + "http://www.semanlink.net/tag/how_much_information_in_a_language": "How much information in a language?", + "http://www.semanlink.net/tag/anne_hidalgo": "Anne Hidalgo", + "http://www.semanlink.net/tag/apache_org": "apache.org", + "http://www.semanlink.net/tag/fichage_genetique": "Fichage g\u00e9n\u00e9tique", + "http://www.semanlink.net/tag/shanghai_expo_2010": "Shanghai Expo 2010", + "http://www.semanlink.net/tag/cryptography": "Cryptography", + "http://www.semanlink.net/tag/mobile_device": "Mobile device", + "http://www.semanlink.net/tag/dystopia": "Dystopia", + "http://www.semanlink.net/tag/coling2020": "COLING2020", + "http://www.semanlink.net/tag/bijan_parsia": "Bijan Parsia", + "http://www.semanlink.net/tag/attention_in_graphs": "Attention in Graphs", + "http://www.semanlink.net/tag/luis_bunuel": "Luis Bu\u00f1uel", + "http://www.semanlink.net/tag/gorille": "Gorille", + "http://www.semanlink.net/tag/k_nearest_neighbors_algorithm": "k-nearest neighbors algorithm", + "http://www.semanlink.net/tag/machine_learning": "Machine learning", + "http://www.semanlink.net/tag/neoliberalism": "Neoliberalism", + "http://www.semanlink.net/tag/rideshare": "Rideshare", + "http://www.semanlink.net/tag/youtube": "YouTube", + "http://www.semanlink.net/tag/turtle": "Turtle", + "http://www.semanlink.net/tag/rdf_fails": "RDF Fails", + "http://www.semanlink.net/tag/semantic_web_project": "Semantic Web project", + "http://www.semanlink.net/tag/gnu_octave": "GNU Octave", + "http://www.semanlink.net/tag/lod_limitations_on_browseable_data": "LOD: Limitations on browseable data", + "http://www.semanlink.net/tag/summly": "summly", + "http://www.semanlink.net/tag/dropbox": "Dropbox", + "http://www.semanlink.net/tag/elasticsearch_annotated_text_field": "ElasticSearch: annotated text field", + "http://www.semanlink.net/tag/film_italien": "Film italien", + "http://www.semanlink.net/tag/croisades": "Croisades", + "http://www.semanlink.net/tag/apprendre": "Apprendre", + "http://www.semanlink.net/tag/mandelbrot": "Mandelbrot", + "http://www.semanlink.net/tag/intelligence_collective": "Intelligence collective", + "http://www.semanlink.net/tag/knowledge_graph_search_engine": "Knowledge graph search engine", + "http://www.semanlink.net/tag/lenka_zdeborova": "Lenka Zdeborov\u00e1", + "http://www.semanlink.net/tag/histoire_de_l_astronomie": "Histoire de l'astronomie", + "http://www.semanlink.net/tag/drupal_modules": "Drupal modules", + "http://www.semanlink.net/tag/talis_platform": "Talis platform", + "http://www.semanlink.net/tag/angela_merkel": "Angela Merkel", + "http://www.semanlink.net/tag/porto_rico": "Porto Rico", + "http://www.semanlink.net/tag/web_application_threats": "Web Application Threats", + "http://www.semanlink.net/tag/knowledge_base": "Knowledge bases", + "http://www.semanlink.net/tag/sinai": "Sina\u00ef", + "http://www.semanlink.net/tag/ian_horrocks": "Ian Horrocks", + "http://www.semanlink.net/tag/architecture_of_the_world_wide_web": "Web architecture", + "http://www.semanlink.net/tag/semantic_web_platform": "Semantic Web Platform", + "http://www.semanlink.net/tag/d2rq": "D2RQ", + "http://www.semanlink.net/tag/musee": "Mus\u00e9e", + "http://www.semanlink.net/tag/espace": "Espace", + "http://www.semanlink.net/tag/ane": "\u00c2ne", + "http://www.semanlink.net/tag/data_interchange_format": "Data Interchange Format", + "http://www.semanlink.net/tag/ml_sequential_data": "ML: Sequential data", + "http://www.semanlink.net/tag/word_embedding": "Word embeddings", + "http://www.semanlink.net/tag/semantic_web_sites": "semantic web sites", + "http://www.semanlink.net/tag/coursera_machine_learning": "Coursera: Machine Learning", + "http://www.semanlink.net/tag/pfizer": "Pfizer", + "http://www.semanlink.net/tag/uncertainty_in_deep_learning": "Uncertainty in Deep Learning", + "http://www.semanlink.net/tag/cruaute": "Cruaut\u00e9", + "http://www.semanlink.net/tag/leopard": "Leopard", + "http://www.semanlink.net/tag/okapi_bm25": "BM25", + "http://www.semanlink.net/tag/aichi": "A\u00efchi", + "http://www.semanlink.net/tag/ambre": "Ambre", + "http://www.semanlink.net/tag/transductive_learning": "Transductive Learning", + "http://www.semanlink.net/tag/embeddings": "Embeddings", + "http://www.semanlink.net/tag/arxiv": "Arxiv", + "http://www.semanlink.net/tag/www2007": "WWW 2007", + "http://www.semanlink.net/tag/le_pen": "Le Pen", + "http://www.semanlink.net/tag/semantic_web_crm": "Semantic Web: CRM", + "http://www.semanlink.net/tag/reinforcement_learning": "Reinforcement learning", + "http://www.semanlink.net/tag/rdf_net_api": "RDF Net API", + "http://www.semanlink.net/tag/periodes_glacieres": "P\u00e9riodes glaci\u00e8res", + "http://www.semanlink.net/tag/taliban": "Taliban", + "http://www.semanlink.net/tag/multidevice": "Multidevice", + "http://www.semanlink.net/tag/knowledge_distillation": "Knowledge distillation", + "http://www.semanlink.net/tag/recommender_systems": "Recommender Systems", + "http://www.semanlink.net/tag/del_icio_us": "del.icio.us", + "http://www.semanlink.net/tag/clustering_small_sets_of_short_texts": "Clustering small sets of short texts", + "http://www.semanlink.net/tag/encoding": "Encoding", + "http://www.semanlink.net/tag/adn_mitochondrial": "ADN mitochondrial", + "http://www.semanlink.net/tag/google_web_toolkit": "Google Web Toolkit", + "http://www.semanlink.net/tag/aidan_hogan": "Aidan Hogan", + "http://www.semanlink.net/tag/caetano_veloso": "Caetano Veloso", + "http://www.semanlink.net/tag/ouganda": "Ouganda", + "http://www.semanlink.net/tag/public_key_cryptography_in_browsers": "public key cryptography in browsers", + "http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data": "Coursera: Web Intelligence and Big Data", + "http://www.semanlink.net/tag/reporters_sans_frontieres": "Reporters sans fronti\u00e8res", + "http://www.semanlink.net/tag/communisme": "Communisme", + "http://www.semanlink.net/tag/www_2012": "WWW 2012", + "http://www.semanlink.net/tag/shenzhen": "Shenzhen", + "http://www.semanlink.net/tag/meryl_streep": "Meryl Streep", + "http://www.semanlink.net/tag/cassandra": "Cassandra", + "http://www.semanlink.net/tag/semantic_web_client_library": "Semantic Web Client Library", + "http://www.semanlink.net/tag/configuration_and_sw": "Configuration and SW", + "http://www.semanlink.net/tag/bon_prof": "Bon prof", + "http://www.semanlink.net/tag/documentation_tool": "Documentation tool", + "http://www.semanlink.net/tag/plastic_print": "Plastic print", + "http://www.semanlink.net/tag/robert_mcliam_wilson": "Robert McLiam Wilson", + "http://www.semanlink.net/tag/open_data": "Open Data", + "http://www.semanlink.net/tag/histoire_anglaise": "Histoire anglaise", + "http://www.semanlink.net/tag/pablo_neruda": "Pablo Neruda", + "http://www.semanlink.net/tag/search": "Search", + "http://www.semanlink.net/tag/latent_semantic_analysis": "Latent Semantic Analysis", + "http://www.semanlink.net/tag/industrie_pharmaceutique": "Industrie pharmaceutique", + "http://www.semanlink.net/tag/tf_idf": "TF-IDF", + "http://www.semanlink.net/tag/cross_validation": "Cross-validation", + "http://www.semanlink.net/tag/asie_mineure": "Asie mineure", + "http://www.semanlink.net/tag/juridique": "Juridique", + "http://www.semanlink.net/tag/convolutional_neural_network": "Convolutional neural network", + "http://www.semanlink.net/tag/torture": "Torture", + "http://www.semanlink.net/tag/mussolini": "Mussolini", + "http://www.semanlink.net/tag/functional_programming": "Functional programming", + "http://www.semanlink.net/tag/sdmx": "SDMX", + "http://www.semanlink.net/tag/stem_cell": "Cellule souche", + "http://www.semanlink.net/tag/baidu": "Baidu", + "http://www.semanlink.net/tag/aspect_detection": "Aspect Detection", + "http://www.semanlink.net/tag/politique": "Politique", + "http://www.semanlink.net/tag/transition_energetique": "Transition \u00e9nerg\u00e9tique", + "http://www.semanlink.net/tag/ml_nlp_blog": "ML/NLP blog", + "http://www.semanlink.net/tag/talis": "Talis", + "http://www.semanlink.net/tag/a_voir": "A voir", + "http://www.semanlink.net/tag/semantic_pingback": "Semantic pingback", + "http://www.semanlink.net/tag/ranked_entities_in_search_results": "Ranked Entities in Search Results", + "http://www.semanlink.net/tag/bombe_atomique": "Bombe atomique", + "http://www.semanlink.net/tag/support_vector_machine": "Support vector machine", + "http://www.semanlink.net/tag/forms": "Forms", + "http://www.semanlink.net/tag/web_tv": "Web TV", + "http://www.semanlink.net/tag/dense_passage_retrieval": "Dense Passage Retrieval", + "http://www.semanlink.net/tag/henri_iv": "Henri IV", + "http://www.semanlink.net/tag/negative_sampling": "Negative Sampling", + "http://www.semanlink.net/tag/fps_ec_web_14": "fps@EC-Web'14", + "http://www.semanlink.net/tag/entity_type_prediction": "Entity type prediction", + "http://www.semanlink.net/tag/virtual_knowledge_graph": "Virtual knowledge graph", + "http://www.semanlink.net/tag/freie_universitat_berlin": "Freie Universit\u00e4t Berlin", + "http://www.semanlink.net/tag/pantheon_paris": "Panth\u00e9on (Paris)", + "http://www.semanlink.net/tag/good_practice_when_generating_uris": "Good Practice When Generating URIs", + "http://www.semanlink.net/tag/domain_knowledge_deep_learning": "Domain Knowledge + Deep Learning", + "http://www.semanlink.net/tag/notre_dame_de_paris": "Notre-Dame de Paris", + "http://www.semanlink.net/tag/sonia_braga": "S\u00f4nia Braga", + "http://www.semanlink.net/tag/musubi": "Musubi", + "http://www.semanlink.net/tag/biohackers": "Biohackers", + "http://www.semanlink.net/tag/word_embeddings_with_lexical_resources": "Word embeddings with lexical resources", + "http://www.semanlink.net/tag/publication_scientifique": "Publication scientifique", + "http://www.semanlink.net/tag/googling": "Googling", + "http://www.semanlink.net/tag/cms": "CMS", + "http://www.semanlink.net/tag/linked_data": "Linked Data", + "http://www.semanlink.net/tag/nlp_short_texts": "NLP: short texts", + "http://www.semanlink.net/tag/chretiente": "Chr\u00e9tient\u00e9", + "http://www.semanlink.net/tag/ldow2012": "LDOW2012", + "http://www.semanlink.net/tag/kg_embeddings_library": "KG Embeddings Library", + "http://www.semanlink.net/tag/heinrich_barth": "Heinrich Barth", + "http://www.semanlink.net/tag/denisovan": "Denisovan", + "http://www.semanlink.net/tag/spring_boot": "Spring-Boot", + "http://www.semanlink.net/tag/web_search": "Web search", + "http://www.semanlink.net/tag/rest": "REST", + "http://www.semanlink.net/tag/apres_guerre": "Apr\u00e8s guerre", + "http://www.semanlink.net/tag/cancer": "Cancer", + "http://www.semanlink.net/tag/jena": "Jena", + "http://www.semanlink.net/tag/bernard_vatant": "Bernard Vatant", + "http://www.semanlink.net/tag/ernie": "ERNIE", + "http://www.semanlink.net/tag/sorting": "Sorting", + "http://www.semanlink.net/tag/535": "535", + "http://www.semanlink.net/tag/genetique_histoire": "G\u00e9n\u00e9tique + Histoire", + "http://www.semanlink.net/tag/roger_penrose": "Roger Penrose", + "http://www.semanlink.net/tag/erta_ale": "Erta Ale", + "http://www.semanlink.net/tag/linked_data_platform": "Linked Data Platform", + "http://www.semanlink.net/tag/artificial_neural_network": "Neural networks", + "http://www.semanlink.net/tag/referentiel_des_operations": "R\u00e9f\u00e9rentiel des op\u00e9rations", + "http://www.semanlink.net/tag/euro_2016": "Euro 2016", + "http://www.semanlink.net/tag/chalutage_en_eaux_profondes": "Chalutage en eaux profondes", + "http://www.semanlink.net/tag/www08": "WWW 2008", + "http://www.semanlink.net/tag/http_get_vs_post": "HTTP GET vs POST", + "http://www.semanlink.net/tag/web_dev": "Web dev", + "http://www.semanlink.net/tag/graph_convolutional_networks": "Graph Convolutional Networks", + "http://www.semanlink.net/tag/ulrike_sattler": "Ulrike Sattler", + "http://www.semanlink.net/tag/fast_ai_course": "Fast.ai course", + "http://www.semanlink.net/tag/bosch": "Bosch", + "http://www.semanlink.net/tag/intent_detection": "Intent detection", + "http://www.semanlink.net/tag/television": "T\u00e9l\u00e9vision", + "http://www.semanlink.net/tag/rdf_next_steps": "RDF Next Steps", + "http://www.semanlink.net/tag/machine_learning_course": "Machine Learning Course", + "http://www.semanlink.net/tag/semantic_fingerprints": "Semantic fingerprints", + "http://www.semanlink.net/tag/logistic_regression": "Logistic regression", + "http://www.semanlink.net/tag/fado_tropical": "Fado tropical", + "http://www.semanlink.net/tag/kapuscinski": "Kapuscinski", + "http://www.semanlink.net/tag/ciao_vito": "Ciao Vito", + "http://www.semanlink.net/tag/genetic_algorithm": "Genetic algorithm", + "http://www.semanlink.net/tag/acl_2019": "ACL 2019", + "http://www.semanlink.net/tag/provocation_policiere": "Provocation polici\u00e8re", + "http://www.semanlink.net/tag/coupe_du_monde_1998": "Coupe du monde 1998", + "http://www.semanlink.net/tag/first_americans": "First Americans", + "http://www.semanlink.net/tag/rdf_and_database": "RDF and database", + "http://www.semanlink.net/tag/semtechbiz_berlin_2012": "SemTechBiz Berlin 2012", + "http://www.semanlink.net/tag/knowledge_compilation": "Knowledge Compilation", + "http://www.semanlink.net/tag/fps_pres": "fps pres", + "http://www.semanlink.net/tag/linked_data_api": "Linked Data API", + "http://www.semanlink.net/tag/amazon_alexa": "Amazon Alexa", + "http://www.semanlink.net/tag/deep_learning": "Deep Learning", + "http://www.semanlink.net/tag/securite_informatique": "Cybersecurity", + "http://www.semanlink.net/tag/etat_de_la_france": "Etat de la France", + "http://www.semanlink.net/tag/turquie": "Turquie", + "http://www.semanlink.net/tag/programming": "Programming", + "http://www.semanlink.net/tag/nso_pegasus": "NSO/Pegasus", + "http://www.semanlink.net/tag/guerre": "War", + "http://www.semanlink.net/tag/artificial_intelligence": "Artificial Intelligence", + "http://www.semanlink.net/tag/snorql": "snorql", + "http://www.semanlink.net/tag/semantic_web_databases": "Semantic Web: databases", + "http://www.semanlink.net/tag/tim_bray": "Tim Bray", + "http://www.semanlink.net/tag/fermi_paradox": "Fermi's paradox", + "http://www.semanlink.net/tag/esa": "esa", + "http://www.semanlink.net/tag/darpa": "DARPA", + "http://www.semanlink.net/tag/apache_hive": "Apache Hive", + "http://www.semanlink.net/tag/http": "HTTP", + "http://www.semanlink.net/tag/rdf_template": "RDF Template", + "http://www.semanlink.net/tag/microsoft_research": "Microsoft Research", + "http://www.semanlink.net/tag/concept_learning": "Concept learning", + "http://www.semanlink.net/tag/immune_system": "Syst\u00e8me immunitaire", + "http://www.semanlink.net/tag/w3c_data_activity": "W3C Data Activity", + "http://www.semanlink.net/tag/stardog": "Stardog", + "http://www.semanlink.net/tag/tanis_kt": "Tanis-KT", + "http://www.semanlink.net/tag/tom_heath": "Tom Heath", + "http://www.semanlink.net/tag/driverless_car": "Driverless car", + "http://www.semanlink.net/tag/knowledge_graph_deep_learning": "Knowledge Graph + Deep Learning", + "http://www.semanlink.net/tag/annees_50": "Ann\u00e9es 50", + "http://www.semanlink.net/tag/retard_technologique_francais": "Retard technologique fran\u00e7ais", + "http://www.semanlink.net/tag/general_nlp_tasks": "General NLP tasks", + "http://www.semanlink.net/tag/big_brother": "Big Brother", + "http://www.semanlink.net/tag/openstructs": "OpenStructs", + "http://www.semanlink.net/tag/apache_spark": "Apache Spark", + "http://www.semanlink.net/tag/wiki_service": "Wiki service", + "http://www.semanlink.net/tag/seq2seq_encoder_decoder": "Sequence-To-Sequence Encoder-Decoder Architecture", + "http://www.semanlink.net/tag/christine_golbreich": "Christine Golbreich", + "http://www.semanlink.net/tag/ami": "Ami", + "http://www.semanlink.net/tag/tensorflow": "TensorFlow", + "http://www.semanlink.net/tag/hdmi": "HDMI", + "http://www.semanlink.net/tag/ouigour": "Ou\u00efgour", + "http://www.semanlink.net/tag/francois_scharffe": "Fran\u00e7ois Scharffe", + "http://www.semanlink.net/tag/url": "URL", + "http://www.semanlink.net/tag/osema_deri_renault_paper": "OSEMA/DERI-Renault paper", + "http://www.semanlink.net/tag/internet_regulation": "Internet regulation", + "http://www.semanlink.net/tag/fabien_gandon": "Fabien Gandon", + "http://www.semanlink.net/tag/petrole": "P\u00e9trole", + "http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn": "Sequence Modeling: CNN vs RNN", + "http://www.semanlink.net/tag/tech_company": "Tech company", + "http://www.semanlink.net/tag/europe_aberrations": "Europe : aberrations", + "http://www.semanlink.net/tag/antiquite_de_l_inde": "Antiquit\u00e9 de l'Inde", + "http://www.semanlink.net/tag/antonin_artaud": "Antonin Artaud", + "http://www.semanlink.net/tag/web_dev_framework": "Web dev framework", + "http://www.semanlink.net/tag/modernisation_de_l_etat": "Modernisation de l'\u00e9tat", + "http://www.semanlink.net/tag/lycee": "Lyc\u00e9e", + "http://www.semanlink.net/tag/semanlink_tag_finder": "Semanlink Tag Finder", + "http://www.semanlink.net/tag/carte_d_identite": "Carte d'identit\u00e9", + "http://www.semanlink.net/tag/roam": "Roam", + "http://www.semanlink.net/tag/lumieres": "Lumi\u00e8res", + "http://www.semanlink.net/tag/perche": "Perche", + "http://www.semanlink.net/tag/ornithorynque": "Ornithorynque", + "http://www.semanlink.net/tag/berkeley": "Berkeley", + "http://www.semanlink.net/tag/k_means_clustering": "k-means clustering", + "http://www.semanlink.net/tag/politique_francaise": "Politique fran\u00e7aise", + "http://www.semanlink.net/tag/nlp_pretraining": "NLP: pretraining", + "http://www.semanlink.net/tag/bill_de_hora": "Bill de h\u00d3ra", + "http://www.semanlink.net/tag/phrases_nlp": "Phrases (NLP)", + "http://www.semanlink.net/tag/molecular_biology": "Molecular Biology", + "http://www.semanlink.net/tag/tattoo": "Tattoo", + "http://www.semanlink.net/tag/rdf_and_soa": "RDF and SOA", + "http://www.semanlink.net/tag/semantic_hashing": "Semantic Hashing", + "http://www.semanlink.net/tag/hierarchical_classification_evaluation": "Hierarchical classification: evaluation", + "http://www.semanlink.net/tag/nosql_pour_les_nuls": "NoSQL pour les nuls", + "http://www.semanlink.net/tag/jussieu": "Jussieu", + "http://www.semanlink.net/tag/nips_2017": "NIPS 2017", + "http://www.semanlink.net/tag/rdf_browser": "RDF browser", + "http://www.semanlink.net/tag/la_france_est_un_pays_regicide": "La France est un pays r\u00e9gicide", + "http://www.semanlink.net/tag/incipit": "Incipit", + "http://www.semanlink.net/tag/pseudo_relevance_feedback": "Pseudo relevance feedback", + "http://www.semanlink.net/tag/desktop_search": "Desktop search", + "http://www.semanlink.net/tag/syntax_trees": "Syntax trees", + "http://www.semanlink.net/tag/andrew_mccallum": "Andrew McCallum", + "http://www.semanlink.net/tag/distributional_semantics": "Distributional semantics", + "http://www.semanlink.net/tag/browserify": "browserify", + "http://www.semanlink.net/tag/exploit": "Exploit", + "http://www.semanlink.net/tag/disco_hyperdata_browser": "Disco Hyperdata Browser", + "http://www.semanlink.net/tag/paul_miller": "Paul Miller", + "http://www.semanlink.net/tag/taxe_carbone": "Taxe carbone", + "http://www.semanlink.net/tag/vso": "VSO", + "http://www.semanlink.net/tag/semantic_web": "Semantic Web", + "http://www.semanlink.net/tag/ebooks": "ebooks", + "http://www.semanlink.net/tag/para": "Par\u00e1", + "http://www.semanlink.net/tag/configurator": "Configurator", + "http://www.semanlink.net/tag/atom_github": "ATOM (Text editor)", + "http://www.semanlink.net/tag/bibliotheconomie": "Biblioth\u00e9conomie", + "http://www.semanlink.net/tag/filets_a_nuages": "Filets \u00e0 nuages", + "http://www.semanlink.net/tag/tenere": "T\u00e9n\u00e9r\u00e9", + "http://www.semanlink.net/tag/nosql": "NOSQL", + "http://www.semanlink.net/tag/debug": "Debug", + "http://www.semanlink.net/tag/cia": "CIA", + "http://www.semanlink.net/tag/rdf_bus": "RDF bus", + "http://www.semanlink.net/tag/obamacare": "Obamacare", + "http://www.semanlink.net/tag/rdf_in_json": "RDF-in-JSON", + "http://www.semanlink.net/tag/osema_2011": "OSEMA 2011", + "http://www.semanlink.net/tag/snorkel": "Snorkel", + "http://www.semanlink.net/tag/knowledge_graph_conference_2019": "Knowledge Graph Conference 2019", + "http://www.semanlink.net/tag/alexandria": "Alexandria", + "http://www.semanlink.net/tag/paradise_papers": "Paradise Papers", + "http://www.semanlink.net/tag/open_university": "Open University", + "http://www.semanlink.net/tag/michael_rakowitz": "Michael Rakowitz", + "http://www.semanlink.net/tag/conjecture_de_goldbach": "Conjecture de Goldbach", + "http://www.semanlink.net/tag/computational_neuroscience": "Computational Neuroscience", + "http://www.semanlink.net/tag/cazuza": "Cazuza", + "http://www.semanlink.net/tag/multi_task_learning": "Multi-task learning", + "http://www.semanlink.net/tag/jeff_hawkins": "Jeff Hawkins", + "http://www.semanlink.net/tag/ldow2013": "LDOW2013", + "http://www.semanlink.net/tag/lod_museum": "LOD & museum", + "http://www.semanlink.net/tag/firefighter": "Firefighter", + "http://www.semanlink.net/tag/semanlink_feature_request": "Semanlink Feature Request", + "http://www.semanlink.net/tag/chine_usa": "Chine-USA", + "http://www.semanlink.net/tag/swrl": "SWRL", + "http://www.semanlink.net/tag/pythagore": "Pythagore", + "http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes": "Placentaires, marsupiaux et monotr\u00e8mes", + "http://www.semanlink.net/tag/acoustique_musicale": "Acoustique musicale", + "http://www.semanlink.net/tag/semantic_search": "Semantic Search", + "http://www.semanlink.net/tag/hepp_s_propertyvalue": "Hepp's PropertyValue", + "http://www.semanlink.net/tag/jdbc": "JDBC", + "http://www.semanlink.net/tag/resources_oriented_web_services": "Resources-Oriented Web Services", + "http://www.semanlink.net/tag/backpropagation_vs_biology": "Backpropagation vs Biology", + "http://www.semanlink.net/tag/swoogle": "Swoogle", + "http://www.semanlink.net/tag/textblob": "TextBlob", + "http://www.semanlink.net/tag/big_bang": "Big bang", + "http://www.semanlink.net/tag/www_2009": "WWW 2009", + "http://www.semanlink.net/tag/jsonld_mongodb": "JsonLD + MongoDB", + "http://www.semanlink.net/tag/choose_science": "Choose science", + "http://www.semanlink.net/tag/sparql_sample_code": "SPARQL: sample code", + "http://www.semanlink.net/tag/droit_et_internet": "Droit et internet", + "http://www.semanlink.net/tag/information_extraction": "Information extraction", + "http://www.semanlink.net/tag/ogm": "GMO", + "http://www.semanlink.net/tag/solr_documentation": "Solr documentation", + "http://www.semanlink.net/tag/linked_data_collaborative_editing": "Linked Data / collaborative editing", + "http://www.semanlink.net/tag/portland_or": "Portland (OR)", + "http://www.semanlink.net/tag/culture_et_sem_web": "Culture et sem web", + "http://www.semanlink.net/tag/sweo_renault_use_case": "SWEO: Renault use case", + "http://www.semanlink.net/tag/mais_ogm": "Ma\u00efs OGM", + "http://www.semanlink.net/tag/trusted_computing": "Trusted Computing", + "http://www.semanlink.net/tag/pericles": "P\u00e9ricl\u00e8s", + "http://www.semanlink.net/tag/java_dev": "Java dev", + "http://www.semanlink.net/tag/rdf_forms": "RDF forms", + "http://www.semanlink.net/tag/distilbert": "DistilBERT", + "http://www.semanlink.net/tag/curl": "cURL", + "http://www.semanlink.net/tag/inductive_bias": "Inductive bias", + "http://www.semanlink.net/tag/microblogs": "Microblogs", + "http://www.semanlink.net/tag/developpement": "D\u00e9veloppement", + "http://www.semanlink.net/tag/java_5": "Java 5", + "http://www.semanlink.net/tag/normale_sup": "Normale Sup", + "http://www.semanlink.net/tag/end_to_end_learning": "End-to-End Learning", + "http://www.semanlink.net/tag/musee_de_niamey": "Mus\u00e9e de Niamey", + "http://www.semanlink.net/tag/hierarchical_multi_label_classification": "Hierarchical multi-label classification ", + "http://www.semanlink.net/tag/scikit_learn": "scikit-learn", + "http://www.semanlink.net/tag/tchernobyl": "Tchernobyl", + "http://www.semanlink.net/tag/attentats_13_11_2015": "Attentats 13-11-2015", + "http://www.semanlink.net/tag/chevenement": "Chev\u00e8nement", + "http://www.semanlink.net/tag/nlp_using_knowledge_graphs": "Knowledge Graphs in NLP", + "http://www.semanlink.net/tag/iphone": "iphone", + "http://www.semanlink.net/tag/annotations": "Annotations", + "http://www.semanlink.net/tag/html_parsing": "HTML parsing", + "http://www.semanlink.net/tag/tensor": "Tensor", + "http://www.semanlink.net/tag/beethoven": "Beethoven", + "http://www.semanlink.net/tag/sem_web_future": "Sem web: future", + "http://www.semanlink.net/tag/abstractions_in_ai": "Abstractions in AI", + "http://www.semanlink.net/tag/optimisation_fiscale": "Optimisation fiscale", + "http://www.semanlink.net/tag/linked_data_publishing": "Linked Data publishing", + "http://www.semanlink.net/tag/sparql_demo": "SPARQL Demo", + "http://www.semanlink.net/tag/smalltalk": "Smalltalk", + "http://www.semanlink.net/tag/arameen": "Aram\u00e9en", + "http://www.semanlink.net/tag/memory_requirements_in_nn": "Memory requirements in NN", + "http://www.semanlink.net/tag/myfaces": "MyFaces", + "http://www.semanlink.net/tag/resteasy": "RESTEasy", + "http://www.semanlink.net/tag/real_time_communications": "Real-Time Communications", + "http://www.semanlink.net/tag/wtp": "WTP", + "http://www.semanlink.net/tag/text_feature_extraction": "Text feature extraction", + "http://www.semanlink.net/tag/jure_leskovec": "Jure Leskovec", + "http://www.semanlink.net/tag/yves_peirsman": "Yves Peirsman", + "http://www.semanlink.net/tag/beatles": "Beatles", + "http://www.semanlink.net/tag/self_organizing_maps": "Self-Organizing Maps", + "http://www.semanlink.net/tag/docker_python": "Docker-Python", + "http://www.semanlink.net/tag/open_endedness": "Open-endedness", + "http://www.semanlink.net/tag/justice_internationale": "Justice internationale", + "http://www.semanlink.net/tag/javascript_rdf_parser_in_ie": "Javascript RDF Parser in IE", + "http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer": "Bacterial to Animal Gene Transfer", + "http://www.semanlink.net/tag/tag_ontology": "Tag ontology", + "http://www.semanlink.net/tag/government_data_as_linked_data": "Government data as Linked Data", + "http://www.semanlink.net/tag/markus_lanthaler": "Markus Lanthaler", + "http://www.semanlink.net/tag/john_sofakolle": "John Sofakolle", + "http://www.semanlink.net/tag/ben_adida": "Ben Adida", + "http://www.semanlink.net/tag/anaconda": "Anaconda", + "http://www.semanlink.net/tag/chameau": "Chameau", + "http://www.semanlink.net/tag/global_brain": "Global brain", + "http://www.semanlink.net/tag/ntic_et_developpement": "NTIC et d\u00e9veloppement", + "http://www.semanlink.net/tag/ckb": "CKB", + "http://www.semanlink.net/tag/attali": "Attali", + "http://www.semanlink.net/tag/kde": "KDE", + "http://www.semanlink.net/tag/olaf_hartig": "Olaf Hartig", + "http://www.semanlink.net/tag/stack_overflow": "Stack Overflow", + "http://www.semanlink.net/tag/texas": "Texas", + "http://www.semanlink.net/tag/jena_fuseki": "Jena Fuseki", + "http://www.semanlink.net/tag/les_100_pieges_de_l_anglais": "Les 100 pi\u00e8ges de l'Anglais", + "http://www.semanlink.net/tag/minimum_wage": "Minimum wage", + "http://www.semanlink.net/tag/www_conference": "TheWebConf", + "http://www.semanlink.net/tag/regression_analysis": "Regression analysis", + "http://www.semanlink.net/tag/humour_noir": "Humour noir", + "http://www.semanlink.net/tag/peche": "P\u00eache", + "http://www.semanlink.net/tag/poutine": "Poutine", + "http://www.semanlink.net/tag/blackboxnlp_workshop_2018": "BlackboxNLP (2018 workshop)", + "http://www.semanlink.net/tag/sylvain": "Sylvain", + "http://www.semanlink.net/tag/semantic_feature_extraction": "Semantic feature extraction", + "http://www.semanlink.net/tag/thriller": "Thriller", + "http://www.semanlink.net/tag/barnaby_jack": "Barnaby Jack", + "http://www.semanlink.net/tag/cosmic_inflation": "Cosmic inflation", + "http://www.semanlink.net/tag/document_embeddings": "Document embeddings", + "http://www.semanlink.net/tag/emnlp_2021": "EMNLP 2021", + "http://www.semanlink.net/tag/rio_tinto": "Rio Tinto", + "http://www.semanlink.net/tag/syrie": "Syrie", + "http://www.semanlink.net/tag/applet": "Applet", + "http://www.semanlink.net/tag/liberalisme": "Lib\u00e9ralisme", + "http://www.semanlink.net/tag/paleolithique": "Pal\u00e9olithique", + "http://www.semanlink.net/tag/children_s_language_acquisition": "Children\u2019s language acquisition", + "http://www.semanlink.net/tag/cheat_sheet": "Cheat sheet", + "http://www.semanlink.net/tag/tandja": "Tandja", + "http://www.semanlink.net/tag/bioinformatics": "Bioinformatics", + "http://www.semanlink.net/tag/facebook_open_graph": "Facebook Open Graph", + "http://www.semanlink.net/tag/cloud_and_linked_data": "Cloud and Linked Data", + "http://www.semanlink.net/tag/ivan_herman": "Ivan Herman", + "http://www.semanlink.net/tag/multi_label_classification": "Multi-label classification", + "http://www.semanlink.net/tag/keras": "Keras", + "http://www.semanlink.net/tag/brain_machine_interface": "Brain-Machine Interface", + "http://www.semanlink.net/tag/knowledge_graph_augmented_language_models": "KG-augmented Language Models", + "http://www.semanlink.net/tag/australia_s_evolutionary_history": "Australia's evolutionary history", + "http://www.semanlink.net/tag/vaccin": "Vaccin", + "http://www.semanlink.net/tag/vint_cerf": "Vint Cerf", + "http://www.semanlink.net/tag/service_description": "Service description", + "http://www.semanlink.net/tag/readwriteweb_com": "ReadWriteWeb.com", + "http://www.semanlink.net/tag/yago": "Yago", + "http://www.semanlink.net/tag/bruxelles": "Bruxelles", + "http://www.semanlink.net/tag/litterature": "Litt\u00e9rature", + "http://www.semanlink.net/tag/doc_by_google": "Doc by Google", + "http://www.semanlink.net/tag/bertology": "Bertology", + "http://www.semanlink.net/tag/word_sense_disambiguation": "Word-sense disambiguation", + "http://www.semanlink.net/tag/client_side_xslt": "Client side XSLT", + "http://www.semanlink.net/tag/digital_economy": "Digital economy", + "http://www.semanlink.net/tag/embeddings_in_nlp": "Embeddings in NLP", + "http://www.semanlink.net/tag/chinua_achebe": "Chinua Achebe", + "http://www.semanlink.net/tag/tests": "Tests", + "http://www.semanlink.net/tag/semi_supervised_learning": "Semi-supervised learning", + "http://www.semanlink.net/tag/jeux_en_ligne": "Jeux en ligne", + "http://www.semanlink.net/tag/camembert_nlp": "CamemBERT", + "http://www.semanlink.net/tag/glove": "GloVe", + "http://www.semanlink.net/tag/tribunal_penal_international": "Tribunal P\u00e9nal International", + "http://www.semanlink.net/tag/knowledge_augmented_language_models": "Knowledge-augmented Language Models", + "http://www.semanlink.net/tag/eurogroupe": "Eurogroupe", + "http://www.semanlink.net/tag/plastic_waste_trade": "Plastic waste trade", + "http://www.semanlink.net/tag/cvs": "CVS", + "http://www.semanlink.net/tag/litterature_russe": "Litt\u00e9rature russe", + "http://www.semanlink.net/tag/tables": "Tables", + "http://www.semanlink.net/tag/inverse_functional_properties": "Inverse-functional properties", + "http://www.semanlink.net/tag/gilets_jaunes": "Gilets jaunes", + "http://www.semanlink.net/tag/hypiosvocampparismay2010": "HypiosVoCampParisMay2010", + "http://www.semanlink.net/tag/siren": "SIREn", + "http://www.semanlink.net/tag/yves_raymond": "Yves Raymond", + "http://www.semanlink.net/tag/linkedin": "LinkedIn", + "http://www.semanlink.net/tag/allen_institute_for_ai_a2i": "Allen Institute for AI (A2I)", + "http://www.semanlink.net/tag/okkam": "OKKAM", + "http://www.semanlink.net/tag/aho_corasick_algorithm": "Aho\u2013Corasick algorithm", + "http://www.semanlink.net/tag/quentin_tarantino": "Quentin Tarantino", + "http://www.semanlink.net/tag/relation_learning": "Relation Learning", + "http://www.semanlink.net/tag/reshaping": "Reshaping", + "http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys": "IBM SPSS Text Analytics for Surveys", + "http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles": "Loi sur les oeuvres indisponibles", + "http://www.semanlink.net/tag/cons_de_francais": "Cons de Fran\u00e7ais", + "http://www.semanlink.net/tag/stackoverflow_q": "StackOverFlow Q", + "http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind": "Tyrannical exploitation of nature by mankind", + "http://www.semanlink.net/tag/nltk": "NLTK", + "http://www.semanlink.net/tag/benjamin_nowack": "Benjamin Nowack", + "http://www.semanlink.net/tag/schema_org_roles": "Schema.org roles", + "http://www.semanlink.net/tag/sagesse_du_langage": "Sagesse du langage", + "http://www.semanlink.net/tag/aster_aweke": "Aster Aweke", + "http://www.semanlink.net/tag/tomcat_7": "Tomcat 7", + "http://www.semanlink.net/tag/principle_of_least_power": "Principle of least power", + "http://www.semanlink.net/tag/ml_conditioning": "ML: conditioning", + "http://www.semanlink.net/tag/9_3": "93", + "http://www.semanlink.net/tag/perceptron": "Perceptron", + "http://www.semanlink.net/tag/trump": "Trump", + "http://www.semanlink.net/tag/uri_dereferencing": "URI dereferencing", + "http://www.semanlink.net/tag/velo": "V\u00e9lo", + "http://www.semanlink.net/tag/neocortex": "Neocortex", + "http://www.semanlink.net/tag/class_based_language_models": "Class based language models", + "http://www.semanlink.net/tag/byzantine_fault_tolerance": "Byzantine fault tolerance", + "http://www.semanlink.net/tag/limule": "limule", + "http://www.semanlink.net/tag/points_of_interest": "Points of Interest", + "http://www.semanlink.net/tag/eric_schmidt": "Eric Schmidt", + "http://www.semanlink.net/tag/thomas_wolf": "Thomas Wolf", + "http://www.semanlink.net/tag/evangelistes": "Evang\u00e9listes", + "http://www.semanlink.net/tag/exxonmobil": "ExxonMobil", + "http://www.semanlink.net/tag/science_fiction": "Science fiction", + "http://www.semanlink.net/tag/graphs_machine_learning": "Graphs+Machine Learning", + "http://www.semanlink.net/tag/java_tool": "Java tool", + "http://www.semanlink.net/tag/cassini_huygens": "Cassini-Huygens", + "http://www.semanlink.net/tag/owled_2007": "OWLED 2007", + "http://www.semanlink.net/tag/parthenogenese": "Parth\u00e9nogen\u00e8se", + "http://www.semanlink.net/tag/exponential_organizations": "Exponential Organizations", + "http://www.semanlink.net/tag/sparqlpress": "SparqlPress", + "http://www.semanlink.net/tag/bulgarie": "Bulgarie", + "http://www.semanlink.net/tag/hippies": "Hippies", + "http://www.semanlink.net/tag/instant_messaging": "Instant Messaging", + "http://www.semanlink.net/tag/belzoni": "Belzoni", + "http://www.semanlink.net/tag/garce": "Garce", + "http://www.semanlink.net/tag/subspace_clustering": "Subspace clustering", + "http://www.semanlink.net/tag/text_kg_and_embeddings": "Text, KG and embeddings", + "http://www.semanlink.net/tag/dynamic_object_model_pattern": "Dynamic Object Model Pattern", + "http://www.semanlink.net/tag/machine_learning_business": "Machine Learning: business", + "http://www.semanlink.net/tag/apple_photos": "Apple Photos", + "http://www.semanlink.net/tag/mughal_empire": "Empire moghol", + "http://www.semanlink.net/tag/training_data_nlp": "Training Data (NLP)", + "http://www.semanlink.net/tag/sw_coreferences": "SW: coreferences", + "http://www.semanlink.net/tag/expression": "Expression", + "http://www.semanlink.net/tag/rdf_embeddings": "RDF embeddings", + "http://www.semanlink.net/tag/semantic_web_and_ai": "Semantic web and AI", + "http://www.semanlink.net/tag/nn_symbolic_ai_hybridation": "NN / Symbolic AI hybridation", + "http://www.semanlink.net/tag/w3c_incubator_group_report": "W3C Incubator Group Report", + "http://www.semanlink.net/tag/few_shot_learning": "Few-shot learning", + "http://www.semanlink.net/tag/encoder_decoder_architecture": "Encoder-Decoder architecture", + "http://www.semanlink.net/tag/jpl": "JPL", + "http://www.semanlink.net/tag/site_web_gouvernemental": "Site web gouvernemental", + "http://www.semanlink.net/tag/ai_knowledge_bases": "AI + Knowledge Bases", + "http://www.semanlink.net/tag/sarkozy_immigration": "Sarkozy : immigration", + "http://www.semanlink.net/tag/grand_challenge": "DARPA Grand Challenge", + "http://www.semanlink.net/tag/emnlp": "EMNLP", + "http://www.semanlink.net/tag/akkadian_language": "Akkadian language", + "http://www.semanlink.net/tag/detroit": "Detroit", + "http://www.semanlink.net/tag/france_telecom": "France T\u00e9l\u00e9com", + "http://www.semanlink.net/tag/retrieval_augmented_lm": "Retrieval augmented LM", + "http://www.semanlink.net/tag/comete": "Com\u00e8te", + "http://www.semanlink.net/tag/miriam_makeba": "Miriam Makeba", + "http://www.semanlink.net/tag/enfer_administratif": "Enfer administratif", + "http://www.semanlink.net/tag/encryption": "Encryption", + "http://www.semanlink.net/tag/insecte": "Insecte", + "http://www.semanlink.net/tag/grillon": "Grillon", + "http://www.semanlink.net/tag/jena_grddl_reader": "Jena GRDDL Reader", + "http://www.semanlink.net/tag/christian_faure": "Christian Faur\u00e9", + "http://www.semanlink.net/tag/davos": "Davos", + "http://www.semanlink.net/tag/news_website": "News website", + "http://www.semanlink.net/tag/de_extinction": "De-extinction", + "http://www.semanlink.net/tag/linking_open_data": "Linking Open Data", + "http://www.semanlink.net/tag/cosmologie": "Cosmologie", + "http://www.semanlink.net/tag/ldp_updates": "LDP: updates", + "http://www.semanlink.net/tag/statistics": "Statistics", + "http://www.semanlink.net/tag/bat": "Bat", + "http://www.semanlink.net/tag/virtuoso": "Virtuoso", + "http://www.semanlink.net/tag/cancers_pediatriques": "Cancers p\u00e9diatriques", + "http://www.semanlink.net/tag/graph_attention_networks": "Graph Attention Networks ", + "http://www.semanlink.net/tag/film_noir": "Film noir", + "http://www.semanlink.net/tag/lobby": "Lobby", + "http://www.semanlink.net/tag/the_knowledge_graph_conference": "The Knowledge Graph Conference", + "http://www.semanlink.net/tag/japon": "Japon", + "http://www.semanlink.net/tag/sense2vec": "Sense2vec", + "http://www.semanlink.net/tag/aubrac": "Aubrac", + "http://www.semanlink.net/tag/configuration_ontology": "Configuration ontology", + "http://www.semanlink.net/tag/microcredit": "Microcr\u00e9dit", + "http://www.semanlink.net/tag/bulle_speculative": "Bulle sp\u00e9culative", + "http://www.semanlink.net/tag/nlp_4_africa": "NLP 4 Africa", + "http://www.semanlink.net/tag/neuroscience_of_consciousness": "Neuroscience of Consciousness", + "http://www.semanlink.net/tag/messenger": "Messenger", + "http://www.semanlink.net/tag/mutual_learning": "Mutual Learning", + "http://www.semanlink.net/tag/nosql_vs_sql": "NOSQL vs SQL", + "http://www.semanlink.net/tag/fact_verification": "Fact verification", + "http://www.semanlink.net/tag/database_to_rdf_mapping": "Database to RDF mapping", + "http://www.semanlink.net/tag/cybersurveillance": "Cybersurveillance", + "http://www.semanlink.net/tag/uruguay": "Uruguay", + "http://www.semanlink.net/tag/thesaurus_taxonomies": "Thesaurus & Taxonomies ", + "http://www.semanlink.net/tag/wordnet": "Wordnet", + "http://www.semanlink.net/tag/mali": "Mali", + "http://www.semanlink.net/tag/parente_a_plaisanterie": "Parent\u00e9 \u00e0 plaisanterie", + "http://www.semanlink.net/tag/co_": "Gaz carbonique", + "http://www.semanlink.net/tag/merisier": "Merisier", + "http://www.semanlink.net/tag/java_server_faces": "Java Server Faces", + "http://www.semanlink.net/tag/sea_peoples": "Sea Peoples", + "http://www.semanlink.net/tag/cognitive_computing": "Cognitive computing", + "http://www.semanlink.net/tag/mainstream_media": "Mainstream media", + "http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model": "DSSM (Deep Semantic Similarity Model)", + "http://www.semanlink.net/tag/paradis_fiscaux": "Paradis fiscaux", + "http://www.semanlink.net/tag/hip_hop": "Hip Hop", + "http://www.semanlink.net/tag/enseignement_en_afrique": "Enseignement en Afrique", + "http://www.semanlink.net/tag/del_icio": "del icio", + "http://www.semanlink.net/tag/ontologie_visualization": "Ontologie visualization", + "http://www.semanlink.net/tag/contrastive_self_supervised_learning": "Contrastive Self-Supervised Learning", + "http://www.semanlink.net/tag/solr": "Solr", + "http://www.semanlink.net/tag/belgique": "Belgique", + "http://www.semanlink.net/tag/encyclopedia_of_life": "Encyclopedia of Life", + "http://www.semanlink.net/tag/rdf_in_files": "RDF in files", + "http://www.semanlink.net/tag/educational_resources": "Educational resources", + "http://www.semanlink.net/tag/ml_and_physics": "ML and physics", + "http://www.semanlink.net/tag/dynamic_semantic_publishing": "Dynamic Semantic Publishing", + "http://www.semanlink.net/tag/phoenix_mars_lander": "Phoenix Mars Lander", + "http://www.semanlink.net/tag/sud_des_etats_unis": "Sud des Etats-Unis", + "http://www.semanlink.net/tag/hierarchical_classification": "Hierarchical Classification", + "http://www.semanlink.net/tag/diamant": "Diamant", + "http://www.semanlink.net/tag/git": "Git", + "http://www.semanlink.net/tag/http_redirect": "HTTP Redirect", + "http://www.semanlink.net/tag/florence_aubenas": "Florence Aubenas", + "http://www.semanlink.net/tag/consciousness_prior": "Consciousness Prior", + "http://www.semanlink.net/tag/generative_adversarial_network": "GAN", + "http://www.semanlink.net/tag/memory_prediction_framework": "Memory-prediction framework", + "http://www.semanlink.net/tag/nlp_facebook": "NLP@Facebook", + "http://www.semanlink.net/tag/soap_vs_rest": "SOAP vs REST", + "http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox": "Coursera: The Data Scientist\u2019s Toolbox", + "http://www.semanlink.net/tag/gael_de_chalendar": "Ga\u00ebl de Chalendar", + "http://www.semanlink.net/tag/table_based_fact_verification": "Table-based Fact Verification", + "http://www.semanlink.net/tag/hierarchical_temporal_memory": "Hierarchical temporal memory", + "http://www.semanlink.net/tag/christiane_taubira": "Christiane Taubira", + "http://www.semanlink.net/tag/semantic_tagging": "Semantic tagging", + "http://www.semanlink.net/tag/ludovic_denoyer": "Ludovic Denoyer", + "http://www.semanlink.net/tag/lod2": "LOD2", + "http://www.semanlink.net/tag/hydrogen_economy": "Hydrogen economy", + "http://www.semanlink.net/tag/knowledge_vault": "Knowledge Vault", + "http://www.semanlink.net/tag/parthe": "Parthe", + "http://www.semanlink.net/tag/google_cloud_platform": "Google Cloud Platform", + "http://www.semanlink.net/tag/salsa": "Salsa", + "http://www.semanlink.net/tag/using_literal_descriptions_of_entities": "Using literal descriptions of entities", + "http://www.semanlink.net/tag/plantu": "Plantu", + "http://www.semanlink.net/tag/grand_homme": "Grand Homme", + "http://www.semanlink.net/tag/navier_stokes": "Navier\u2013Stokes", + "http://www.semanlink.net/tag/marseillaise": "Marseillaise", + "http://www.semanlink.net/tag/amadou_hampate_ba": "Amadou Hamp\u00e2t\u00e9 B\u00e2", + "http://www.semanlink.net/tag/darfour": "Darfour", + "http://www.semanlink.net/tag/eclipse_project": "Eclipse project", + "http://www.semanlink.net/tag/rdf_schema_querying": "RDF Schema querying", + "http://www.semanlink.net/tag/pape": "Pape", + "http://www.semanlink.net/tag/axel_ngonga": "Axel Ngonga", + "http://www.semanlink.net/tag/propriete_privee": "Propri\u00e9t\u00e9 priv\u00e9e", + "http://www.semanlink.net/tag/user_driven_modelling": "User Driven Modelling", + "http://www.semanlink.net/tag/antiracisme": "Antiracisme", + "http://www.semanlink.net/tag/lexical_ambiguity": "Word sense / Lexical ambiguity", + "http://www.semanlink.net/tag/mladic": "Mladic", + "http://www.semanlink.net/tag/princeton": "Princeton", + "http://www.semanlink.net/tag/don_t_waste_my_time": "Don't waste my time", + "http://www.semanlink.net/tag/dean_allemang": "Dean Allemang", + "http://www.semanlink.net/tag/technology_enhanced_learning": "Technology Enhanced Learning", + "http://www.semanlink.net/tag/hash_uris": "Hash URIs", + "http://www.semanlink.net/tag/cloud": "Cloud", + "http://www.semanlink.net/tag/dan_brickley": "Dan Brickley", + "http://www.semanlink.net/tag/semantics_of_skos_concept": "Semantics of skos:Concept", + "http://www.semanlink.net/tag/vue_js": "Vue.js", + "http://www.semanlink.net/tag/machine_learned_ranking": "Learning to rank", + "http://www.semanlink.net/tag/bert_kb": "BERT + KB", + "http://www.semanlink.net/tag/livre_a_lire": "Livre \u00e0 lire", + "http://www.semanlink.net/tag/relational_inductive_biases": "Relational inductive biases", + "http://www.semanlink.net/tag/information_retrieval": "Information retrieval", + "http://www.semanlink.net/tag/outliner": "Outliner", + "http://www.semanlink.net/tag/prohibition_des_narcotiques": "Prohibition des narcotiques", + "http://www.semanlink.net/tag/manu_dibango": "Manu Dibango", + "http://www.semanlink.net/tag/dao_attack": "DAO attack", + "http://www.semanlink.net/tag/linked_data_dev": "Linked Data Dev", + "http://www.semanlink.net/tag/metropolitan_museum_of_art": "Metropolitan Museum of Art", + "http://www.semanlink.net/tag/google_structured_data_testing_tool": "Google Structured Data Testing Tool", + "http://www.semanlink.net/tag/russie": "Russie", + "http://www.semanlink.net/tag/age_du_bronze": "\u00c2ge du bronze", + "http://www.semanlink.net/tag/peace_corps": "Peace Corps", + "http://www.semanlink.net/tag/open_knowledge_network": "Open Knowledge Network", + "http://www.semanlink.net/tag/chef_d_etat": "Chef d'\u00e9tat", + "http://www.semanlink.net/tag/owl_editor": "OWL editor", + "http://www.semanlink.net/tag/library_of_congress": "Library of Congress", + "http://www.semanlink.net/tag/word2vec_howto": "Word2vec: howto", + "http://www.semanlink.net/tag/language_models_size": "Language Models: size", + "http://www.semanlink.net/tag/galileo": "Galileo", + "http://www.semanlink.net/tag/rdf_driven_web_sites": "RDF-driven web sites", + "http://www.semanlink.net/tag/film_danois": "Film danois", + "http://www.semanlink.net/tag/14_juillet": "14 juillet", + "http://www.semanlink.net/tag/genetic_data": "Genetic data", + "http://www.semanlink.net/tag/identity_crisis_in_linked_data": "Identity Crisis in Linked Data", + "http://www.semanlink.net/tag/online_course_materials": "Online Course Materials", + "http://www.semanlink.net/tag/horizontal_gene_transfer": "Horizontal gene transfer", + "http://www.semanlink.net/tag/querying_remote_sparql_services": "Querying Remote SPARQL Services", + "http://www.semanlink.net/tag/twitterature": "Twitt\u00e9rature", + "http://www.semanlink.net/tag/rev": "Rev", + "http://www.semanlink.net/tag/judaisme": "Juda\u00efsme", + "http://www.semanlink.net/tag/azawad": "Azawad", + "http://www.semanlink.net/tag/finder": "Finder", + "http://www.semanlink.net/tag/java_1_5_mac_os_x": "Java 1.5 Mac OS X", + "http://www.semanlink.net/tag/elmo": "ELMo", + "http://www.semanlink.net/tag/zaire": "Za\u00efre", + "http://www.semanlink.net/tag/eigenvectors": "EigenVectors", + "http://www.semanlink.net/tag/nlp_data_anonymization": "NLP: Data Anonymization", + "http://www.semanlink.net/tag/ui": "UI", + "http://www.semanlink.net/tag/dialogs_in_javascript": "Dialogs in javascript", + "http://www.semanlink.net/tag/crise_financiere": "Crise financi\u00e8re", + "http://www.semanlink.net/tag/venus_de_brassempouy": "Venus de Brassempouy", + "http://www.semanlink.net/tag/data_scientists": "Data Scientists", + "http://www.semanlink.net/tag/folie": "Folie", + "http://www.semanlink.net/tag/sebastian_ruder": "Sebastian Ruder", + "http://www.semanlink.net/tag/brouteur": "Brouteur", + "http://www.semanlink.net/tag/stromatolithes": "Stromatolithes", + "http://www.semanlink.net/tag/apache_mahout": "Apache Mahout", + "http://www.semanlink.net/tag/dave_winer": "Dave Winer", + "http://www.semanlink.net/tag/fleuve": "Fleuve", + "http://www.semanlink.net/tag/extreme_multi_label_classification": "Extreme Multi-label Classification", + "http://www.semanlink.net/tag/relations_franco_americaines": "Relations franco-am\u00e9ricaines", + "http://www.semanlink.net/tag/krakatoa": "Krakatoa", + "http://www.semanlink.net/tag/mike_bergman": "Mike Bergman", + "http://www.semanlink.net/tag/fps_ldow_2013": "fps @ LDOW 2013", + "http://www.semanlink.net/tag/freebase": "Freebase", + "http://www.semanlink.net/tag/productivite": "Productivit\u00e9", + "http://www.semanlink.net/tag/berlin": "Berlin", + "http://www.semanlink.net/tag/rap": "Rap", + "http://www.semanlink.net/tag/verger_de_gado_a_niamey": "Verger de Gado \u00e0 Niamey", + "http://www.semanlink.net/tag/sparql_extensions": "SPARQL extensions", + "http://www.semanlink.net/tag/huggingface_transformers": "huggingface/transformers", + "http://www.semanlink.net/tag/ng4j": "NG4J", + "http://www.semanlink.net/tag/backbone_js": "Backbone.js", + "http://www.semanlink.net/tag/neuroevolution": "Neuroevolution", + "http://www.semanlink.net/tag/neural_bag_of_words": "Neural Bag of Words", + "http://www.semanlink.net/tag/pekin_2008": "P\u00e9kin 2008", + "http://www.semanlink.net/tag/cross_lingual_nlp": "Cross-lingual NLP", + "http://www.semanlink.net/tag/dbpedia_francophone": "dbpedia francophone", + "http://www.semanlink.net/tag/java_profiling": "Java profiling", + "http://www.semanlink.net/tag/jsfiddle": "jsFiddle", + "http://www.semanlink.net/tag/al_gore": "Al Gore", + "http://www.semanlink.net/tag/jail": "Jail", + "http://www.semanlink.net/tag/big_data_tools": "Big Data Tools", + "http://www.semanlink.net/tag/graph_embeddings": "Graph Embeddings", + "http://www.semanlink.net/tag/wikileaks": "Wikileaks", + "http://www.semanlink.net/tag/munich": "Munich", + "http://www.semanlink.net/tag/europe_ecologie": "Europe \u00e9cologie", + "http://www.semanlink.net/tag/cea_list": "CEA, LIST", + "http://www.semanlink.net/tag/spreadsheets": "Spreadsheets", + "http://www.semanlink.net/tag/similarity_learning": "Similarity learning", + "http://www.semanlink.net/tag/feedly": "Feedly", + "http://www.semanlink.net/tag/sl_gui": "SL GUI", + "http://www.semanlink.net/tag/peche_industrielle": "P\u00eache industrielle", + "http://www.semanlink.net/tag/slides_fps": "slides fps", + "http://www.semanlink.net/tag/monopolies": "Monopolies", + "http://www.semanlink.net/tag/latent_variable_model": "Latent variable model", + "http://www.semanlink.net/tag/nlp_hierarchical_text_classification": "Hierarchical text classification", + "http://www.semanlink.net/tag/string_searching_algorithm": "String-searching algorithm", + "http://www.semanlink.net/tag/inversion_of_control": "Inversion of Control", + "http://www.semanlink.net/tag/acteur": "Acteur", + "http://www.semanlink.net/tag/workshop": "Workshop", + "http://www.semanlink.net/tag/cour_europeenne_de_justice": "Cour europ\u00e9enne de justice", + "http://www.semanlink.net/tag/coal_seam_fire": "Coal seam fire", + "http://www.semanlink.net/tag/biomedical_data": "Biomedical data", + "http://www.semanlink.net/tag/ronan_collobert": "Ronan Collobert", + "http://www.semanlink.net/tag/differentiable_programming": "Differentiable Programming", + "http://www.semanlink.net/tag/declaration_des_droits_de_l_homme": "D\u00e9claration des droits de l'homme", + "http://www.semanlink.net/tag/wiki": "Wiki", + "http://www.semanlink.net/tag/xtech_2006": "XTech 2006", + "http://www.semanlink.net/tag/jeffrey_t_pollock": "Jeffrey T. Pollock", + "http://www.semanlink.net/tag/invasion_d_especes_etrangeres": "Esp\u00e8ces invasives", + "http://www.semanlink.net/tag/ipod": "iPod", + "http://www.semanlink.net/tag/design_pattern": "Design pattern", + "http://www.semanlink.net/tag/retrovirus": "R\u00e9trovirus", + "http://www.semanlink.net/tag/cassini": "Cassini", + "http://www.semanlink.net/tag/hadoop": "Hadoop", + "http://www.semanlink.net/tag/sanjeev_arora": "Sanjeev Arora", + "http://www.semanlink.net/tag/lambda_calculus": "Lambda calculus", + "http://www.semanlink.net/tag/alex_allauzen": "Alex Allauzen", + "http://www.semanlink.net/tag/hypothes_is": "Hypothes.is", + "http://www.semanlink.net/tag/claude_hartmann": "Claude Hartmann", + "http://www.semanlink.net/tag/wikification": "Wikification", + "http://www.semanlink.net/tag/services_publics": "Services publics", + "http://www.semanlink.net/tag/african_origin_of_modern_humans": "African origin of modern humans", + "http://www.semanlink.net/tag/mondeca": "Mondeca", + "http://www.semanlink.net/tag/wikipedia_page_to_concept": "Wikipedia page to concept", + "http://www.semanlink.net/tag/tag_clusters": "Tag Clusters", + "http://www.semanlink.net/tag/rdfa_tool": "RDFa tool", + "http://www.semanlink.net/tag/doc2vec": "Doc2Vec", + "http://www.semanlink.net/tag/m3_multi_media_museum": "M3 Multi Media Museum", + "http://www.semanlink.net/tag/semantic_annotation": "Semantic annotation", + "http://www.semanlink.net/tag/sarraounia_mangou": "Sarraounia Mangou", + "http://www.semanlink.net/tag/bootstrap_aggregating_bagging": "Bootstrap aggregating (Bagging)", + "http://www.semanlink.net/tag/thomas_piketty": "Thomas Piketty", + "http://www.semanlink.net/tag/rdf_schema": "RDF Schema", + "http://www.semanlink.net/tag/salesforce": "salesforce", + "http://www.semanlink.net/tag/hippopotame": "Hippopotame", + "http://www.semanlink.net/tag/contestation": "Contestation", + "http://www.semanlink.net/tag/communaute_internationale": "Communaut\u00e9 internationale", + "http://www.semanlink.net/tag/samare_de_l_erable": "Samare de l'\u00e9rable", + "http://www.semanlink.net/tag/champignon": "Champignons", + "http://www.semanlink.net/tag/lod_mailing_list": "LOD mailing list", + "http://www.semanlink.net/tag/ei": "Etat islamique", + "http://www.semanlink.net/tag/francois_chollet": "Fran\u00e7ois Chollet", + "http://www.semanlink.net/tag/manu_sporny": "Manu Sporny", + "http://www.semanlink.net/tag/j_hallucine": "J'hallucine", + "http://www.semanlink.net/tag/these_renault_embeddings": "Th\u00e8se IRIT-Renault NLP-KB", + "http://www.semanlink.net/tag/drug_resistant_germs": "Drug-resistant germs", + "http://www.semanlink.net/tag/semanlink2": "Semanlink2", + "http://www.semanlink.net/tag/ibm_watson_and_speech_to_text": "Watson Speech-to-Text", + "http://www.semanlink.net/tag/japonais": "Japonais", + "http://www.semanlink.net/tag/apache_shiro": "Apache Shiro", + "http://www.semanlink.net/tag/good_question": "Good question", + "http://www.semanlink.net/tag/mercure": "Mercure (Plan\u00e8te)", + "http://www.semanlink.net/tag/chine_technologie": "Chine : technologie", + "http://www.semanlink.net/tag/vary_header": "Vary Header", + "http://www.semanlink.net/tag/1ere_guerre_mondiale": "1ere guerre mondiale", + "http://www.semanlink.net/tag/restaurant": "Restaurant", + "http://www.semanlink.net/tag/multi_class_classification": "Multi-class classification", + "http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf": "Mapping data from spreadsheets to RDF", + "http://www.semanlink.net/tag/mpaa": "MPAA", + "http://www.semanlink.net/tag/peter_bloem": "Peter Bloem", + "http://www.semanlink.net/tag/manik_varma": "Manik Varma", + "http://www.semanlink.net/tag/phil_archer": "Phil Archer", + "http://www.semanlink.net/tag/json_ld": "JSON-LD", + "http://www.semanlink.net/tag/volvic": "Volvic", + "http://www.semanlink.net/tag/moussa_kaka": "Moussa Kaka", + "http://www.semanlink.net/tag/webmasters_google": "Webmasters @ Google", + "http://www.semanlink.net/tag/computational_universe": "Computational Universe", + "http://www.semanlink.net/tag/hydra": "Hydra", + "http://www.semanlink.net/tag/best_practices": "Best Practices", + "http://www.semanlink.net/tag/gpt_2": "GPT-2", + "http://www.semanlink.net/tag/lhc": "LHC", + "http://www.semanlink.net/tag/bert": "BERT", + "http://www.semanlink.net/tag/social_semantic_web": "Social Semantic Web", + "http://www.semanlink.net/tag/information_bottleneck_method": "Information bottleneck method", + "http://www.semanlink.net/tag/wolfram_language": "Wolfram Language", + "http://www.semanlink.net/tag/cryptographie_quantique": "Cryptographie quantique", + "http://www.semanlink.net/tag/deri": "DERI", + "http://www.semanlink.net/tag/pollueurs_payeurs": "Pollueurs payeurs", + "http://www.semanlink.net/tag/pays_d_europe": "Pays d'Europe", + "http://www.semanlink.net/tag/graph_database": "Graph database", + "http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena": "SDB: A SPARQL Database for Jena", + "http://www.semanlink.net/tag/zotero": "Zotero", + "http://www.semanlink.net/tag/malware": "Malware", + "http://www.semanlink.net/tag/human_level_ai": "Human Level AI", + "http://www.semanlink.net/tag/swift": "Swift", + "http://www.semanlink.net/tag/feature_learning": "Feature learning", + "http://www.semanlink.net/tag/sang": "Sang", + "http://www.semanlink.net/tag/iks_workshop_salzburg_2012": "IKS Workshop Salzburg 2012", + "http://www.semanlink.net/tag/swing": "Swing", + "http://www.semanlink.net/tag/deep_latent_variable_models": "Deep latent variable models", + "http://www.semanlink.net/tag/coupe_du_monde_2018": "Coupe du monde 2018", + "http://www.semanlink.net/tag/google_brain": "Google Brain", + "http://www.semanlink.net/tag/microdata": "Microdata", + "http://www.semanlink.net/tag/first_order_logic": "First-order logic", + "http://www.semanlink.net/tag/relativite": "Relativit\u00e9", + "http://www.semanlink.net/tag/acl_2018": "ACL 2018", + "http://www.semanlink.net/tag/cultural_heritage": "Cultural heritage", + "http://www.semanlink.net/tag/stefan_zweig": "Stefan Zweig", + "http://www.semanlink.net/tag/langues_anciennes": "Langues anciennes", + "http://www.semanlink.net/tag/feynman": "Feynman", + "http://www.semanlink.net/tag/semanlink2_related": "Semanlink2 related", + "http://www.semanlink.net/tag/mark_birbeck": "Mark Birbeck", + "http://www.semanlink.net/tag/r_d": "R&D", + "http://www.semanlink.net/tag/jeanne_d_arc": "Jeanne d'Arc", + "http://www.semanlink.net/tag/language_models_knowledge": "Language Models + Knowledge", + "http://www.semanlink.net/tag/web_of_needs": "Web of Needs", + "http://www.semanlink.net/tag/bayesian_reasoning": "Bayesian Reasoning", + "http://www.semanlink.net/tag/rss_dev": "RSS Dev", + "http://www.semanlink.net/tag/evolutionary_computation": "Evolutionary computation", + "http://www.semanlink.net/tag/eswc_2011": "ESWC 2011", + "http://www.semanlink.net/tag/insect_collapse": "Insect collapse", + "http://www.semanlink.net/tag/okfn_datahub": "OKFN Datahub", + "http://www.semanlink.net/tag/jena_introduction": "Jena : Introduction", + "http://www.semanlink.net/tag/verts": "Verts", + "http://www.semanlink.net/tag/elasticsearch": "ElasticSearch", + "http://www.semanlink.net/tag/semantic_overflow": "Semantic Overflow", + "http://www.semanlink.net/tag/protege": "Prot\u00e9g\u00e9", + "http://www.semanlink.net/tag/semantic_hierarchies": "Semantic hierarchies", + "http://www.semanlink.net/tag/diy": "DIY", + "http://www.semanlink.net/tag/test_adn_de_filiation": "Test ADN de filiation", + "http://www.semanlink.net/tag/ville": "Ville", + "http://www.semanlink.net/tag/11_septembre_2001": "11 septembre 2001", + "http://www.semanlink.net/tag/thinking_tools": "Thinking tools", + "http://www.semanlink.net/tag/intervention_francaise_au_mali": "Intervention fran\u00e7aise au Mali", + "http://www.semanlink.net/tag/famine": "Famine", + "http://www.semanlink.net/tag/intel": "Intel", + "http://www.semanlink.net/tag/h5n1": "H5N1", + "http://www.semanlink.net/tag/xsparql": "XSPARQL", + "http://www.semanlink.net/tag/starspace": "StarSpace", + "http://www.semanlink.net/tag/diacritics_in_uri": "Diacritics in URI", + "http://www.semanlink.net/tag/feature_extraction": "Feature extraction", + "http://www.semanlink.net/tag/unobtrusive_javascript": "Unobtrusive JavaScript", + "http://www.semanlink.net/tag/rdf2vec": "RDF2VEC", + "http://www.semanlink.net/tag/semanlink": "Semanlink", + "http://www.semanlink.net/tag/graphql": "GraphQL", + "http://www.semanlink.net/tag/cimba": "CIMBA", + "http://www.semanlink.net/tag/requirements_engineering": "Requirements Engineering", + "http://www.semanlink.net/tag/falashas": "Falashas", + "http://www.semanlink.net/tag/cocteau": "Cocteau", + "http://www.semanlink.net/tag/automobile": "Automobile", + "http://www.semanlink.net/tag/public_lod_w3_org": "public-lod@w3.org", + "http://www.semanlink.net/tag/combining_statistics_and_semantics": "Combining Statistics and Semantics", + "http://www.semanlink.net/tag/music_source_separation": "Music source separation", + "http://www.semanlink.net/tag/j_ai_un_petit_probleme": "J'ai un petit probl\u00e8me", + "http://www.semanlink.net/tag/troll": "Troll", + "http://www.semanlink.net/tag/africa_s_last_wild_places": "Africa's Last Wild Places", + "http://www.semanlink.net/tag/mutual_information": "Mutual information", + "http://www.semanlink.net/tag/parti_socialiste": "Parti socialiste", + "http://www.semanlink.net/tag/human_like_ai": "Human-like AI", + "http://www.semanlink.net/tag/zero_shot_learning": "Zero-Shot Learning", + "http://www.semanlink.net/tag/fourier": "Fourier", + "http://www.semanlink.net/tag/grece": "Gr\u00e8ce", + "http://www.semanlink.net/tag/hyperlinks": "Hyperlinks", + "http://www.semanlink.net/tag/taiwan": "Taiwan", + "http://www.semanlink.net/tag/scientologie": "Scientologie", + "http://www.semanlink.net/tag/v_s_naipaul": "V.S. Naipaul", + "http://www.semanlink.net/tag/kg_and_nlp": "Knowledge Graphs and NLP", + "http://www.semanlink.net/tag/erreur_judiciaire": "Erreur judiciaire", + "http://www.semanlink.net/tag/hindu_muslim_riots": "Hindu/Muslim riots", + "http://www.semanlink.net/tag/email": "email", + "http://www.semanlink.net/tag/developing_countries": "Developing countries", + "http://www.semanlink.net/tag/symbiose": "Symbiose", + "http://www.semanlink.net/tag/ihm_web": "IHM web", + "http://www.semanlink.net/tag/semanticpedia": "S\u00e9manticP\u00e9dia", + "http://www.semanlink.net/tag/shallow_parsing_chunking": "Shallow parsing (Chunking)", + "http://www.semanlink.net/tag/immigration": "Immigration", + "http://www.semanlink.net/tag/os": "OS", + "http://www.semanlink.net/tag/yandex": "Yandex", + "http://www.semanlink.net/tag/eclassowl": "eClassOWL", + "http://www.semanlink.net/tag/exalead": "Exalead", + "http://www.semanlink.net/tag/sigma_js": "Sigma.js", + "http://www.semanlink.net/tag/demo": "Demo", + "http://www.semanlink.net/tag/banlieue": "Banlieue", + "http://www.semanlink.net/tag/artificial_neurons": "Artificial neurons", + "http://www.semanlink.net/tag/linked_media_framework": "Linked Media Framework", + "http://www.semanlink.net/tag/job_title_normalization": "Job title normalization", + "http://www.semanlink.net/tag/mer": "Mer", + "http://www.semanlink.net/tag/video": "Video", + "http://www.semanlink.net/tag/nn_dev": "NN dev", + "http://www.semanlink.net/tag/three_way_decisions": "Three-way decisions", + "http://www.semanlink.net/tag/benchmark": "Benchmark", + "http://www.semanlink.net/tag/mobile_apps": "Mobile apps", + "http://www.semanlink.net/tag/management": "Management", + "http://www.semanlink.net/tag/violence": "Violence", + "http://www.semanlink.net/tag/freedom_box": "Freedom Box", + "http://www.semanlink.net/tag/rapidminer": "RapidMiner", + "http://www.semanlink.net/tag/therapie_genique": "Th\u00e9rapie g\u00e9nique", + "http://www.semanlink.net/tag/natural_selection": "Natural selection", + "http://www.semanlink.net/tag/link_prediction": "Link Prediction", + "http://www.semanlink.net/tag/samba": "Samba", + "http://www.semanlink.net/tag/cross_domain_data_fetching": "cross-domain data fetching", + "http://www.semanlink.net/tag/saudade": "Saudade", + "http://www.semanlink.net/tag/hack": "Hack", + "http://www.semanlink.net/tag/rdfa_1_1": "RDFa 1.1", + "http://www.semanlink.net/tag/ajar": "AJAR", + "http://www.semanlink.net/tag/salzburg": "Salzburg", + "http://www.semanlink.net/tag/yagan": "Yag\u00e1n", + "http://www.semanlink.net/tag/artificial_human_intelligence": "Artificial Human Intelligence", + "http://www.semanlink.net/tag/uri_synonymity": "URI Synonymity", + "http://www.semanlink.net/tag/allennlp": "AllenNLP", + "http://www.semanlink.net/tag/facebook_graph_search": "Facebook Graph Search", + "http://www.semanlink.net/tag/multimedia": "Multimedia", + "http://www.semanlink.net/tag/egit": "Egit", + "http://www.semanlink.net/tag/word2vec": "Word2vec", + "http://www.semanlink.net/tag/backdoor": "Backdoor", + "http://www.semanlink.net/tag/roc_curve": "ROC Curve", + "http://www.semanlink.net/tag/labeling_data": "Labeling data", + "http://www.semanlink.net/tag/coreference_resolution": "Coreference resolution", + "http://www.semanlink.net/tag/hbase": "HBase\u0099", + "http://www.semanlink.net/tag/ccfd": "CCFD", + "http://www.semanlink.net/tag/acl": "ACL", + "http://www.semanlink.net/tag/delocalisations": "D\u00e9localisations", + "http://www.semanlink.net/tag/spikes": "Spikes", + "http://www.semanlink.net/tag/richard_cyganiak": "Richard Cyganiak", + "http://www.semanlink.net/tag/affaires_de_gado_a_niamey": "Affaires de Gado \u00e0 Niamey", + "http://www.semanlink.net/tag/apigee": "Apigee", + "http://www.semanlink.net/tag/michael_moore": "Michael Moore", + "http://www.semanlink.net/tag/file_convert": "File convert", + "http://www.semanlink.net/tag/ast_workshop": "AST workshop", + "http://www.semanlink.net/tag/odf": "ODF", + "http://www.semanlink.net/tag/innoraise": "Innoraise", + "http://www.semanlink.net/tag/artificial_general_intelligence": "Artificial general intelligence", + "http://www.semanlink.net/tag/hadopi_riposte_graduee": "HADOPI", + "http://www.semanlink.net/tag/logic": "Logic", + "http://www.semanlink.net/tag/innovation": "Innovation", + "http://www.semanlink.net/tag/championnat_du_monde_d_athletisme": "Championnat du monde d'athl\u00e9tisme", + "http://www.semanlink.net/tag/read_write_linked_data": "Read-Write Linked Data", + "http://www.semanlink.net/tag/jpa": "JPA", + "http://www.semanlink.net/tag/car_diversity": "Car diversity", + "http://www.semanlink.net/tag/google_hummingbird": "Google Hummingbird", + "http://www.semanlink.net/tag/slow_food": "Slow food", + "http://www.semanlink.net/tag/kd_mkb_related": "KD-MKB related", + "http://www.semanlink.net/tag/ai_facebook": "AI@Facebook", + "http://www.semanlink.net/tag/bookmarklet": "Bookmarklet", + "http://www.semanlink.net/tag/text_tools": "Text tools", + "http://www.semanlink.net/tag/archeologie": "Arch\u00e9ologie", + "http://www.semanlink.net/tag/java_in_python": "Java in python", + "http://www.semanlink.net/tag/handwriting_recognition": "Handwriting recognition", + "http://www.semanlink.net/tag/uriburner_com": "URIBurner", + "http://www.semanlink.net/tag/giovanni_tummarello": "Giovanni Tummarello", + "http://www.semanlink.net/tag/duckduckgo": "DuckDuckGo", + "http://www.semanlink.net/tag/sharepoint": "Sharepoint", + "http://www.semanlink.net/tag/grounded_language_learning": "Grounded Language Learning", + "http://www.semanlink.net/tag/david_cameron": "David Cameron", + "http://www.semanlink.net/tag/new_horizons": "New Horizons", + "http://www.semanlink.net/tag/jena_and_database": "Jena and database", + "http://www.semanlink.net/tag/entity_type": "Entity type", + "http://www.semanlink.net/tag/david_beckett": "David Beckett", + "http://www.semanlink.net/tag/bittorrent": "BitTorrent", + "http://www.semanlink.net/tag/multilevel_model": "Hierarchical linear model", + "http://www.semanlink.net/tag/maxent_classifier": "MaxEnt classifier (Multinomial logistic regression)", + "http://www.semanlink.net/tag/apache": "Apache web server", + "http://www.semanlink.net/tag/loosely_formatted_text": "Loosely formatted text", + "http://www.semanlink.net/tag/identification_of_similar_documents": "Identification of similar documents", + "http://www.semanlink.net/tag/neural_memory": "Neural Memory", + "http://www.semanlink.net/tag/external_memory_algorithm": "External memory algorithm", + "http://www.semanlink.net/tag/wikidata": "Wikidata", + "http://www.semanlink.net/tag/violence_policiere": "Violence polici\u00e8re", + "http://www.semanlink.net/tag/autoencoder": "Autoencoder", + "http://www.semanlink.net/tag/knowledge_extraction": "Knowledge Extraction", + "http://www.semanlink.net/tag/leonardo_da_vinci": "Leonardo da Vinci", + "http://www.semanlink.net/tag/belem": "Bel\u00e9m", + "http://www.semanlink.net/tag/de_broglie": "de Broglie", + "http://www.semanlink.net/tag/france_afrique": "France / Afrique", + "http://www.semanlink.net/tag/automobile_and_w3c": "Automotive AND W3C", + "http://www.semanlink.net/tag/national_taiwan_university": "National Taiwan University", + "http://www.semanlink.net/tag/neurones": "Neurones", + "http://www.semanlink.net/tag/entity_alignment": "Entity alignment", + "http://www.semanlink.net/tag/dan_connolly": "Dan Connolly", + "http://www.semanlink.net/tag/postman": "Postman", + "http://www.semanlink.net/tag/guerres_coloniales": "Guerres coloniales", + "http://www.semanlink.net/tag/non_negative_matrix_factorization": "Non-negative matrix factorization", + "http://www.semanlink.net/tag/amour": "Amour", + "http://www.semanlink.net/tag/scaling": "Scaling", + "http://www.semanlink.net/tag/attention_knowledge_graphs": "Attention + Knowledge Graphs", + "http://www.semanlink.net/tag/latent_dirichlet_allocation": "Latent Dirichlet allocation", + "http://www.semanlink.net/tag/programming_language": "Programming language", + "http://www.semanlink.net/tag/crise_ecologique": "Crise \u00e9cologique", + "http://www.semanlink.net/tag/loudness_war": "Loudness war", + "http://www.semanlink.net/tag/target_entity_disambiguation": "Target Entity Disambiguation", + "http://www.semanlink.net/tag/marcel_frohlich": "Marcel Fr\u00f6hlich", + "http://www.semanlink.net/tag/dev_tools": "Dev tools", + "http://www.semanlink.net/tag/zune": "Zune", + "http://www.semanlink.net/tag/jacqueline_de_romilly": "Jacqueline de Romilly", + "http://www.semanlink.net/tag/nlp_event": "NLP event", + "http://www.semanlink.net/tag/text_search": "Text Search", + "http://www.semanlink.net/tag/openai_gpt": "OpenAI GPT", + "http://www.semanlink.net/tag/tpu": "TPU", + "http://www.semanlink.net/tag/fix_it": "Fix it", + "http://www.semanlink.net/tag/transfer_learning": "Transfer learning", + "http://www.semanlink.net/tag/boura": "Boura", + "http://www.semanlink.net/tag/evolutionary_algorithm": "Evolutionary algorithm", + "http://www.semanlink.net/tag/darwin": "Darwin", + "http://www.semanlink.net/tag/physicien": "Physicien", + "http://www.semanlink.net/tag/histoire_de_la_chine": "Histoire de la Chine", + "http://www.semanlink.net/tag/duplicate_detection": "Duplicate Detection", + "http://www.semanlink.net/tag/ldp_implementations": "LDP: implementations", + "http://www.semanlink.net/tag/gaussian_embedding": "Gaussian embedding", + "http://www.semanlink.net/tag/wii": "Wii", + "http://www.semanlink.net/tag/dbpedia": "dbpedia", + "http://www.semanlink.net/tag/clark_and_parsia": "Clark and Parsia", + "http://www.semanlink.net/tag/active_learning": "Active learning", + "http://www.semanlink.net/tag/sable": "Sable", + "http://www.semanlink.net/tag/galileo_spacecraft": "Galileo (spacecraft)", + "http://www.semanlink.net/tag/representation_learning_for_nlp": "Representation Learning for NLP", + "http://www.semanlink.net/tag/similarity_queries": "Similarity queries", + "http://www.semanlink.net/tag/frequently_cited_paper": "Frequently cited paper", + "http://www.semanlink.net/tag/zero_shot_entity_linking": "Zero-shot Entity Linking", + "http://www.semanlink.net/tag/chine_afrique": "Chine / Afrique", + "http://www.semanlink.net/tag/topic_modeling_over_short_texts": "Topic Modeling over Short Texts", + "http://www.semanlink.net/tag/note_taking_app": "Note taking app", + "http://www.semanlink.net/tag/hateoas": "HATEOAS", + "http://www.semanlink.net/tag/collaborative_editing": "Collaborative editing", + "http://www.semanlink.net/tag/sequence_labeling": "Sequence labeling", + "http://www.semanlink.net/tag/irrigation": "Irrigation", + "http://www.semanlink.net/tag/linked_data_cache": "Linked Data Cache", + "http://www.semanlink.net/tag/lalibela": "Lalibela", + "http://www.semanlink.net/tag/question_answering": "Question Answering", + "http://www.semanlink.net/tag/windows": "Windows", + "http://www.semanlink.net/tag/ruslan_salakhutdinov": "Ruslan Salakhutdinov", + "http://www.semanlink.net/tag/declin_de_la_france": "D\u00e9clin de la France", + "http://www.semanlink.net/tag/taxonomies": "Taxonomies", + "http://www.semanlink.net/tag/poisson": "Poisson", + "http://www.semanlink.net/tag/thomson_reuters": "Thomson Reuters", + "http://www.semanlink.net/tag/axoum": "Axoum", + "http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles": "Num\u00e9risation des \u0153uvres indisponibles", + "http://www.semanlink.net/tag/safari": "Safari", + "http://www.semanlink.net/tag/4store": "4store", + "http://www.semanlink.net/tag/banque": "Banque", + "http://www.semanlink.net/tag/datalift": "Datalift", + "http://www.semanlink.net/tag/theorie_des_cordes": "Th\u00e9orie des cordes", + "http://www.semanlink.net/tag/170_rue_de_lourmel": "170, rue de Lourmel", + "http://www.semanlink.net/tag/dremel": "Dremel", + "http://www.semanlink.net/tag/persistent_data_structure": "Persistent data structure", + "http://www.semanlink.net/tag/indo_europeen": "Indo-europ\u00e9en", + "http://www.semanlink.net/tag/evolvable_systems": "Evolvable systems", + "http://www.semanlink.net/tag/publicite": "Publicit\u00e9", + "http://www.semanlink.net/tag/music_of_africa": "Music of Africa", + "http://www.semanlink.net/tag/turing_test": "Turing test", + "http://www.semanlink.net/tag/siri": "Siri", + "http://www.semanlink.net/tag/money": "Money", + "http://www.semanlink.net/tag/gaulois": "Gaulois", + "http://www.semanlink.net/tag/reasoning": "Reasoning", + "http://www.semanlink.net/tag/matlab": "Matlab", + "http://www.semanlink.net/tag/global_workspace_theory": "Global workspace theory", + "http://www.semanlink.net/tag/arthropodes": "Arthropodes", + "http://www.semanlink.net/tag/samy_bengio": "Samy Bengio", + "http://www.semanlink.net/tag/xavier_bertrand": "Xavier Bertrand", + "http://www.semanlink.net/tag/passage_ai": "Passage AI", + "http://www.semanlink.net/tag/list_only_entity_linking": "List-only Entity Linking", + "http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen": "Paradoxe Einstein-Podolsky-Rosen", + "http://www.semanlink.net/tag/simile_timeline": "SIMILE Timeline", + "http://www.semanlink.net/tag/sparse_distributed_memory": "Sparse distributed memory", + "http://www.semanlink.net/tag/cryptocurrency": "Cryptocurrency", + "http://www.semanlink.net/tag/cybersex": "Cybersex", + "http://www.semanlink.net/tag/coupling": "Coupling", + "http://www.semanlink.net/tag/cognition_as_a_service": "Cognition-as-a-Service", + "http://www.semanlink.net/tag/semantic_folding": "Semantic folding", + "http://www.semanlink.net/tag/combining_word_and_entity_embeddings": "Word + Entity embeddings", + "http://www.semanlink.net/tag/one_shot_generalization": "One-Shot Learning", + "http://www.semanlink.net/tag/texaco": "Texaco", + "http://www.semanlink.net/tag/selective_classification": "Selective Classification", + "http://www.semanlink.net/tag/patent_landscaping": "Patent Landscaping", + "http://www.semanlink.net/tag/supraconductivite": "Supraconductivit\u00e9", + "http://www.semanlink.net/tag/asimov": "Asimov", + "http://www.semanlink.net/tag/gastronomie": "Gastronomie", + "http://www.semanlink.net/tag/supervised_machine_learning": "Supervised machine learning", + "http://www.semanlink.net/tag/semantic_web_portal": "Semantic Web : Portal", + "http://www.semanlink.net/tag/rnn_based_language_model": "RNN based Language Model", + "http://www.semanlink.net/tag/neural_coding": "Neural coding", + "http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web": "Uncertainty Reasoning AND Semantic Web", + "http://www.semanlink.net/tag/datao": "Datao", + "http://www.semanlink.net/tag/terre_de_feu": "Terre de Feu", + "http://www.semanlink.net/tag/volkswagen": "Volkswagen", + "http://www.semanlink.net/tag/dev_tips": "Dev tips", + "http://www.semanlink.net/tag/alexandre_monnin": "Alexandre Monnin", + "http://www.semanlink.net/tag/tiger": "Tiger", + "http://www.semanlink.net/tag/ird": "IRD", + "http://www.semanlink.net/tag/m2eclipse": "m2eclipse", + "http://www.semanlink.net/tag/mac_os_x_web_serving": "Mac OS X Web serving", + "http://www.semanlink.net/tag/rdflib": "RDFLib", + "http://www.semanlink.net/tag/bhaskar_mitra": "Bhaskar Mitra", + "http://www.semanlink.net/tag/film_a_voir": "Film \u00e0 voir", + "http://www.semanlink.net/tag/film_bresilien": "Filme brasileiro", + "http://www.semanlink.net/tag/film_argentin": "Film argentin", + "http://www.semanlink.net/tag/google_sucks": "Google sucks", + "http://www.semanlink.net/tag/thewebconf_2021": "TheWebConf 2021", + "http://www.semanlink.net/tag/google_advertising": "Google + Advertising", + "http://www.semanlink.net/tag/semantic_framework": "Semantic framework", + "http://www.semanlink.net/tag/herschel_telescope": "Herschel telescope", + "http://www.semanlink.net/tag/homere": "Hom\u00e8re", + "http://www.semanlink.net/tag/nsa_spying_scandal": "NSA spying scandal", + "http://www.semanlink.net/tag/nlp_reading_comprehension": "NLP: Reading Comprehension", + "http://www.semanlink.net/tag/antimilitarisme": "Antimilitarisme", + "http://www.semanlink.net/tag/stefano_mazzocchi": "Stefano Mazzocchi", + "http://www.semanlink.net/tag/afrique_subsaharienne": "Afrique subsaharienne", + "http://www.semanlink.net/tag/ntic": "NTIC", + "http://www.semanlink.net/tag/artificial_life": "Artificial life", + "http://www.semanlink.net/tag/googletechtalks": "GoogleTechTalks", + "http://www.semanlink.net/tag/hayabusa": "Hayabusa", + "http://www.semanlink.net/tag/solr_rdf": "Solr + RDF", + "http://www.semanlink.net/tag/skos": "SKOS", + "http://www.semanlink.net/tag/statistical_machine_translation": "Statistical machine translation", + "http://www.semanlink.net/tag/pagerank": "PageRank", + "http://www.semanlink.net/tag/sw_wiki": "SW Wiki", + "http://www.semanlink.net/tag/sony": "Sony", + "http://www.semanlink.net/tag/age_du_fer": "\u00c2ge du fer", + "http://www.semanlink.net/tag/css_example": "css example", + "http://www.semanlink.net/tag/corent": "Corent", + "http://www.semanlink.net/tag/archeologue": "Arch\u00e9ologue", + "http://www.semanlink.net/tag/scientifique": "Scientifique", + "http://www.semanlink.net/tag/ouzbekistan": "Ouzb\u00e9kistan", + "http://www.semanlink.net/tag/rome": "Rome", + "http://www.semanlink.net/tag/vieux": "Vieux", + "http://www.semanlink.net/tag/venus_express": "Venus Express", + "http://www.semanlink.net/tag/ulmfit": "ULMFiT", + "http://www.semanlink.net/tag/lstm_networks": "LSTM", + "http://www.semanlink.net/tag/mathieu_d_aquin": "Mathieu d'Aquin", + "http://www.semanlink.net/tag/impot": "Imp\u00f4t", + "http://www.semanlink.net/tag/constraints_in_the_sw": "Constraints in the SW", + "http://www.semanlink.net/tag/three_mile_island": "Three Mile Island", + "http://www.semanlink.net/tag/nn_4_nlp": "NN 4 NLP", + "http://www.semanlink.net/tag/xviie_siecle": "XVIIe\u00a0si\u00e8cle", + "http://www.semanlink.net/tag/shingles": "Shingles", + "http://www.semanlink.net/tag/amnesty_international": "Amnesty International", + "http://www.semanlink.net/tag/quantum_neuromorphic_computing": "Quantum neuromorphic computing", + "http://www.semanlink.net/tag/web_2_0": "Web 2.0", + "http://www.semanlink.net/tag/sursauts_gamma": "Sursauts gamma", + "http://www.semanlink.net/tag/ajax_applications": "Ajax applications", + "http://www.semanlink.net/tag/erythree": "\u00c9rythr\u00e9e", + "http://www.semanlink.net/tag/chris_manning": "Chris Manning", + "http://www.semanlink.net/tag/litterature_africaine": "Litt\u00e9rature africaine", + "http://www.semanlink.net/tag/bertrand_sajus": "Bertrand Sajus", + "http://www.semanlink.net/tag/reine": "Reine", + "http://www.semanlink.net/tag/linear_classifier": "Linear classifier", + "http://www.semanlink.net/tag/histoire_de_la_vie": "Histoire de la vie", + "http://www.semanlink.net/tag/as400": "AS400", + "http://www.semanlink.net/tag/semantic_browsing": "Semantic browsing", + "http://www.semanlink.net/tag/openoffice": "OpenOffice", + "http://www.semanlink.net/tag/chirac": "Chirac", + "http://www.semanlink.net/tag/rsa": "RSA", + "http://www.semanlink.net/tag/medicaments_generiques": "M\u00e9dicaments g\u00e9n\u00e9riques", + "http://www.semanlink.net/tag/rdfa": "RDFa", + "http://www.semanlink.net/tag/p_np": "P=NP", + "http://www.semanlink.net/tag/dkrl": "DKRL", + "http://www.semanlink.net/tag/lod_use_case": "LOD use case", + "http://www.semanlink.net/tag/hosting": "Hosting", + "http://www.semanlink.net/tag/topic_embeddings": "Topic embeddings", + "http://www.semanlink.net/tag/short_text_clustering": "Short Text Clustering", + "http://www.semanlink.net/tag/linked_data_fragments": "Linked Data Fragments", + "http://www.semanlink.net/tag/reader_mode_browsers": "Reader mode (browsers)", + "http://www.semanlink.net/tag/systeme_solaire": "Syst\u00e8me solaire", + "http://www.semanlink.net/tag/ile_maurice": "\u00cele Maurice", + "http://www.semanlink.net/tag/public_vocabs_w3_org": "public-vocabs@w3.org", + "http://www.semanlink.net/tag/web_schemas_task_force": "Web Schemas Task Force", + "http://www.semanlink.net/tag/low_code": "Low-code", + "http://www.semanlink.net/tag/data_augmentation": "Data Augmentation", + "http://www.semanlink.net/tag/end_to_end_entity_linking": "End-To-End Entity Linking", + "http://www.semanlink.net/tag/bioterrorisme": "Bioterrorisme", + "http://www.semanlink.net/tag/asie": "Asie", + "http://www.semanlink.net/tag/language_models_as_knowledge_bases": "Language Models as Knowledge Bases", + "http://www.semanlink.net/tag/aaron_swartz": "Aaron Swartz", + "http://www.semanlink.net/tag/makolab_semantic_day": "Makolab Semantic Day", + "http://www.semanlink.net/tag/technical_girls_and_guys": "Technical girls and guys", + "http://www.semanlink.net/tag/engelbart": "Engelbart", + "http://www.semanlink.net/tag/mitochondries": "Mitochondries", + "http://www.semanlink.net/tag/nazisme": "Nazisme", + "http://www.semanlink.net/tag/distant_supervision": "Distant Supervision", + "http://www.semanlink.net/tag/marches_financiers": "March\u00e9s financiers", + "http://www.semanlink.net/tag/france_police": "France : police", + "http://www.semanlink.net/tag/bookmarks": "Bookmarks", + "http://www.semanlink.net/tag/hongrie": "Hongrie", + "http://www.semanlink.net/tag/javaone": "JavaOne", + "http://www.semanlink.net/tag/continual_learning": "Continual Learning", + "http://www.semanlink.net/tag/radix_trees": "Radix trees", + "http://www.semanlink.net/tag/mots_expressions_remarquables": "Mots/expressions remarquables", + "http://www.semanlink.net/tag/videosurveillance": "Vid\u00e9osurveillance", + "http://www.semanlink.net/tag/ai_conference": "AI Conference", + "http://www.semanlink.net/tag/sport": "Sport", + "http://www.semanlink.net/tag/libshorttext": "LibShortText", + "http://www.semanlink.net/tag/inserm": "INSERM", + "http://www.semanlink.net/tag/langage": "Language", + "http://www.semanlink.net/tag/powder": "POWDER", + "http://www.semanlink.net/tag/xss": "XSS", + "http://www.semanlink.net/tag/semantics": "Semantics", + "http://www.semanlink.net/tag/foxconn": "Foxconn", + "http://www.semanlink.net/tag/nerds": "nerds", + "http://www.semanlink.net/tag/covid19_conneries_gouvernementales": "Covid19 : incurie gouvernementale", + "http://www.semanlink.net/tag/semantic_blog": "Semantic Blog", + "http://www.semanlink.net/tag/node_embeddings": "Node Embeddings", + "http://www.semanlink.net/tag/intelligence": "Intelligence", + "http://www.semanlink.net/tag/distant_reading": "Distant reading", + "http://www.semanlink.net/tag/aol": "AOL", + "http://www.semanlink.net/tag/roy_t_fielding": "Roy T. Fielding", + "http://www.semanlink.net/tag/kubernetes": "Kubernetes", + "http://www.semanlink.net/tag/fps_tweet": "fps' tweet", + "http://www.semanlink.net/tag/knowledge_engineering": "Knowledge Engineering", + "http://www.semanlink.net/tag/procrastination": "Procrastination", + "http://www.semanlink.net/tag/multiword_expressions": "Multiword Expressions", + "http://www.semanlink.net/tag/nlp_sample_code": "NLP sample code", + "http://www.semanlink.net/tag/desobeissance_civile": "D\u00e9sob\u00e9issance civile", + "http://www.semanlink.net/tag/rosetta": "Rosetta", + "http://www.semanlink.net/tag/pocketsphinx": "PocketSphinx", + "http://www.semanlink.net/tag/rdf": "RDF", + "http://www.semanlink.net/tag/metagenomics": "Metagenomics", + "http://www.semanlink.net/tag/linked_data_exploration": "Linked Data Exploration", + "http://www.semanlink.net/tag/afghanistan": "Afghanistan", + "http://www.semanlink.net/tag/voice_ai": "Voice AI", + "http://www.semanlink.net/tag/knowledge_discovery": "Knowledge Discovery", + "http://www.semanlink.net/tag/mouchard": "Mouchard", + "http://www.semanlink.net/tag/nlp_4_semanlink": "NLP 4 Semanlink", + "http://www.semanlink.net/tag/jose_moreno": "Jos\u00e9 Moreno", + "http://www.semanlink.net/tag/destination_prediction": "Destination prediction", + "http://www.semanlink.net/tag/quantum_biology": "Quantum biology", + "http://www.semanlink.net/tag/astrophysique": "Astrophysique", + "http://www.semanlink.net/tag/drogues": "Drogues", + "http://www.semanlink.net/tag/francophonie": "Francophonie", + "http://www.semanlink.net/tag/equitation": "Equitation", + "http://www.semanlink.net/tag/ontologies_use_cases": "Ontologies: use cases", + "http://www.semanlink.net/tag/rdf_repository": "RDF repository", + "http://www.semanlink.net/tag/word_embedding_evaluation": "Word embedding: evaluation", + "http://www.semanlink.net/tag/basic": "Basic", + "http://www.semanlink.net/tag/reproducible_research": "Reproducible Research", + "http://www.semanlink.net/tag/ramanujan": "Ramanujan", + "http://www.semanlink.net/tag/paolo_castagna": "Paolo Castagna", + "http://www.semanlink.net/tag/w3c_submission": "W3C Submission", + "http://www.semanlink.net/tag/commonsense_question_answering": "Commonsense Question Answering", + "http://www.semanlink.net/tag/commerce_mondial": "Commerce mondial", + "http://www.semanlink.net/tag/embedding_evaluation": "Embedding evaluation", + "http://www.semanlink.net/tag/linguistique": "Linguistique", + "http://www.semanlink.net/tag/struts": "Struts", + "http://www.semanlink.net/tag/junit": "JUnit", + "http://www.semanlink.net/tag/magnetisme_terrestre": "Magn\u00e9tisme terrestre", + "http://www.semanlink.net/tag/tesla_inc": "Tesla, Inc", + "http://www.semanlink.net/tag/film_japonais": "Film japonais", + "http://www.semanlink.net/tag/rumba": "Rumba", + "http://www.semanlink.net/tag/code": "Code", + "http://www.semanlink.net/tag/histropedia": "Histropedia", + "http://www.semanlink.net/tag/semantic_web_services": "Semantic Web Services", + "http://www.semanlink.net/tag/private_equity": "Private equity", + "http://www.semanlink.net/tag/fascisme": "Fascisme", + "http://www.semanlink.net/tag/java_web_dev": "Java web dev", + "http://www.semanlink.net/tag/photo_aerienne": "Photo a\u00e9rienne", + "http://www.semanlink.net/tag/prix_nobel": "Prix Nobel", + "http://www.semanlink.net/tag/patrick_gallinari": "Patrick Gallinari", + "http://www.semanlink.net/tag/infini": "Infini", + "http://www.semanlink.net/tag/topic_models_word_embedding": "Topic Models + Word embedding", + "http://www.semanlink.net/tag/ecrevisse": "\u00c9crevisse", + "http://www.semanlink.net/tag/joseki": "Joseki", + "http://www.semanlink.net/tag/feature_selection": "Feature selection", + "http://www.semanlink.net/tag/virtuoso_open_source_edition": "Virtuoso Open-Source Edition", + "http://www.semanlink.net/tag/robobees": "Robobees", + "http://www.semanlink.net/tag/rdfa_1_1_lite": "RDFa 1.1 Lite", + "http://www.semanlink.net/tag/gina_lollobrigida": "Gina Lollobrigida", + "http://www.semanlink.net/tag/word_mover_s_distance": "Word Mover\u2019s Distance", + "http://www.semanlink.net/tag/lee_feigenbaum": "Lee Feigenbaum", + "http://www.semanlink.net/tag/content_sharing": "Content Sharing", + "http://www.semanlink.net/tag/big_data_semantic_web": "Big data & semantic web", + "http://www.semanlink.net/tag/nsa": "NSA", + "http://www.semanlink.net/tag/flaubert": "Flaubert", + "http://www.semanlink.net/tag/oleoduc": "Ol\u00e9oduc", + "http://www.semanlink.net/tag/yoshua_bengio": "Yoshua Bengio", + "http://www.semanlink.net/tag/chute_de_l_empire_romain": "Chute de l'empire romain", + "http://www.semanlink.net/tag/nlp_text_classification": "Text Classification", + "http://www.semanlink.net/tag/elevage": "Elevage", + "http://www.semanlink.net/tag/subtitles": "Subtitles", + "http://www.semanlink.net/tag/google_spreadsheets": "Google Spreadsheets", + "http://www.semanlink.net/tag/langues_vivantes": "Langues vivantes", + "http://www.semanlink.net/tag/bigtable": "Bigtable", + "http://www.semanlink.net/tag/adolescents": "Adolescents", + "http://www.semanlink.net/tag/liban": "Liban", + "http://www.semanlink.net/tag/java_8_lambdas": "Java 8 lambdas", + "http://www.semanlink.net/tag/especes_menacees": "Esp\u00e8ces menac\u00e9es", + "http://www.semanlink.net/tag/thewebconf_2018": "TheWebConf 2018", + "http://www.semanlink.net/tag/tchad": "Tchad", + "http://www.semanlink.net/tag/markov_model": "Markov model", + "http://www.semanlink.net/tag/architecture": "Architecture", + "http://www.semanlink.net/tag/sem_web_context": "Sem web: context", + "http://www.semanlink.net/tag/philippe_cudre_mauroux": "Philippe Cudr\u00e9-Mauroux", + "http://www.semanlink.net/tag/bible": "Bible", + "http://www.semanlink.net/tag/carrot2": "Carrot2", + "http://www.semanlink.net/tag/models_of_consciousness": "Models of consciousness", + "http://www.semanlink.net/tag/consensus": "Consensus", + "http://www.semanlink.net/tag/reptile": "Reptile", + "http://www.semanlink.net/tag/sida": "Sida", + "http://www.semanlink.net/tag/w3c_working_group": "W3C Working group", + "http://www.semanlink.net/tag/gpt_3": "GPT-3", + "http://www.semanlink.net/tag/realite_virtuelle": "R\u00e9alit\u00e9 virtuelle", + "http://www.semanlink.net/tag/sparse_dictionary_learning": "Sparse coding", + "http://www.semanlink.net/tag/sorcellerie": "Sorcellerie", + "http://www.semanlink.net/tag/roman": "Roman", + "http://www.semanlink.net/tag/earth_map": "Earth map", + "http://www.semanlink.net/tag/variational_autoencoder_vae": "Variational autoencoder (VAE)", + "http://www.semanlink.net/tag/actualite": "Actualit\u00e9", + "http://www.semanlink.net/tag/syrian_civil_war": "Syrian Civil War", + "http://www.semanlink.net/tag/genocide": "G\u00e9nocide", + "http://www.semanlink.net/tag/jazz": "Jazz", + "http://www.semanlink.net/tag/finance": "Finance", + "http://www.semanlink.net/tag/nlp_harvard": "NLP@Harvard", + "http://www.semanlink.net/tag/zemanta": "Zemanta", + "http://www.semanlink.net/tag/java_8": "Java 8", + "http://www.semanlink.net/tag/pandas": "pandas", + "http://www.semanlink.net/tag/physique": "Physique", + "http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse": "Un ivrogne dans la brousse", + "http://www.semanlink.net/tag/image_recognition": "Image recognition", + "http://www.semanlink.net/tag/delicious_java": "delicious java", + "http://www.semanlink.net/tag/semweb_pro": "SemWeb Pro", + "http://www.semanlink.net/tag/yann_lecun": "Yann LeCun", + "http://www.semanlink.net/tag/conway_s_game_of_life": "Conway's Game of Life", + "http://www.semanlink.net/tag/explosions_cosmiques": "Explosions cosmiques", + "http://www.semanlink.net/tag/discounted_cumulative_gain": "Discounted cumulative gain", + "http://www.semanlink.net/tag/nlp_conference": "NLP conference", + "http://www.semanlink.net/tag/wiki_software": "Wiki Software", + "http://www.semanlink.net/tag/mobile_apps_dev": "Mobile apps dev", + "http://www.semanlink.net/tag/unsupervised_deep_pre_training": "Unsupervised deep pre-training", + "http://www.semanlink.net/tag/irit": "IRIT", + "http://www.semanlink.net/tag/owl": "OWL", + "http://www.semanlink.net/tag/nlp_problem": "NLP tasks / problems", + "http://www.semanlink.net/tag/antibiotic_resistance": "Antibiotic resistance", + "http://www.semanlink.net/tag/content_industries": "Content industries", + "http://www.semanlink.net/tag/free_will": "Free will", + "http://www.semanlink.net/tag/petrobras": "Petrobras", + "http://www.semanlink.net/tag/institutions_europeennes": "Institutions europ\u00e9ennes", + "http://www.semanlink.net/tag/rdf_framework": "RDF Framework", + "http://www.semanlink.net/tag/netflix": "Netflix", + "http://www.semanlink.net/tag/publicite_politique": "Publicit\u00e9 politique", + "http://www.semanlink.net/tag/pac": "PAC", + "http://www.semanlink.net/tag/r2rml": "R2RML", + "http://www.semanlink.net/tag/uranium": "Uranium", + "http://www.semanlink.net/tag/contrastive_learning": "Contrastive Learning", + "http://www.semanlink.net/tag/patti_smith": "Patti Smith", + "http://www.semanlink.net/tag/pollution_des_oceans": "Pollution des oc\u00e9ans", + "http://www.semanlink.net/tag/post_verite": "\"Post-V\u00e9rit\u00e9\"", + "http://www.semanlink.net/tag/gafa": "GAFA", + "http://www.semanlink.net/tag/niger_festival_de_la_jeunesse": "Niger : festival de la jeunesse", + "http://www.semanlink.net/tag/haoussa": "Hausa", + "http://www.semanlink.net/tag/championnat_du_monde": "Championnat du monde", + "http://www.semanlink.net/tag/deep_learning_frameworks": "Deep Learning frameworks", + "http://www.semanlink.net/tag/guerre_chimique": "Guerre chimique", + "http://www.semanlink.net/tag/hayabusa2": "Hayabusa-2", + "http://www.semanlink.net/tag/npm": "npm", + "http://www.semanlink.net/tag/xcode": "XCode", + "http://www.semanlink.net/tag/sentiment_analysis": "Sentiment analysis", + "http://www.semanlink.net/tag/journaliste": "Journaliste", + "http://www.semanlink.net/tag/bitcoin": "Bitcoin", + "http://www.semanlink.net/tag/tim_berners_lee": "Tim Berners-Lee", + "http://www.semanlink.net/tag/orri_erling": "Orri Erling", + "http://www.semanlink.net/tag/slot_tagging": "Slot tagging", + "http://www.semanlink.net/tag/film_fantastique": "Film fantastique", + "http://www.semanlink.net/tag/occam_s_razor": "Occam's razor", + "http://www.semanlink.net/tag/quantum_computing": "Quantum computing", + "http://www.semanlink.net/tag/shoira_otabekova": "Shoira Otabekova", + "http://www.semanlink.net/tag/microscope": "Microscope", + "http://www.semanlink.net/tag/cinema_africain": "Cin\u00e9ma africain", + "http://www.semanlink.net/tag/ai_event": "AI Event", + "http://www.semanlink.net/tag/automatic_summarization": "Text Summarization", + "http://www.semanlink.net/tag/greffe_arbre": "Greffe (arbre)", + "http://www.semanlink.net/tag/cell": "Cell", + "http://www.semanlink.net/tag/fpservant_slideshare": "fpservant@slideshare", + "http://www.semanlink.net/tag/ecrivain": "Ecrivain", + "http://www.semanlink.net/tag/triple_store_powered_site": "Triple-store powered site", + "http://www.semanlink.net/tag/virtual_personal_assistant": "Virtual Personal Assistant", + "http://www.semanlink.net/tag/radio": "Radio", + "http://www.semanlink.net/tag/nlp_ibm": "NLP@IBM", + "http://www.semanlink.net/tag/lilian_weng": "Lilian Weng", + "http://www.semanlink.net/tag/disparition_d_especes": "Disparition d'esp\u00e8ces", + "http://www.semanlink.net/tag/foaf_ssl": "foaf+ssl", + "http://www.semanlink.net/tag/android": "Android", + "http://www.semanlink.net/tag/diffbot": "Diffbot", + "http://www.semanlink.net/tag/rdf_star": "RDF*", + "http://www.semanlink.net/tag/semantic_startup": "Semantic startup", + "http://www.semanlink.net/tag/concept_bottleneck_models": "Concept Bottleneck Models", + "http://www.semanlink.net/tag/niger": "Niger", + "http://www.semanlink.net/tag/fps_and_ldow2008": "fps AND LDOW2008", + "http://www.semanlink.net/tag/civilisations_precolombiennes": "Civilisations pr\u00e9colombiennes", + "http://www.semanlink.net/tag/new_yorker": "New Yorker", + "http://www.semanlink.net/tag/peinture_rupestre": "Peinture rupestre", + "http://www.semanlink.net/tag/economies_d_energie": "Economies d'\u00e9nergie", + "http://www.semanlink.net/tag/text_multi_label_classification": "Multi-label Text classification", + "http://www.semanlink.net/tag/paris": "Paris", + "http://www.semanlink.net/tag/machine_learning_techniques": "Machine learning: techniques", + "http://www.semanlink.net/tag/converting_data_into_rdf": "Converting data into RDF", + "http://www.semanlink.net/tag/astronomie_multi_signaux": "Astronomie multi-signaux", + "http://www.semanlink.net/tag/graphs_nlp": "Graphs + NLP", + "http://www.semanlink.net/tag/boolean": "Boolean", + "http://www.semanlink.net/tag/jupiter_europe": "Jupiter/Europe", + "http://www.semanlink.net/tag/zitgist": "Zitgist", + "http://www.semanlink.net/tag/biopiles": "Biopiles", + "http://www.semanlink.net/tag/nlp_french": "NLP: French", + "http://www.semanlink.net/tag/rip": "RIP", + "http://www.semanlink.net/tag/film_francais": "Film fran\u00e7ais", + "http://www.semanlink.net/tag/web_intelligence": "Web Intelligence", + "http://www.semanlink.net/tag/uri_reference": "URI Reference", + "http://www.semanlink.net/tag/kullback_leibler_divergence": "Kullback\u2013Leibler divergence", + "http://www.semanlink.net/tag/optimization": "Optimization", + "http://www.semanlink.net/tag/film_policier": "Film policier", + "http://www.semanlink.net/tag/microsoft": "Microsoft", + "http://www.semanlink.net/tag/adaboost": "AdaBoost", + "http://www.semanlink.net/tag/antoine_bordes": "Antoine Bordes", + "http://www.semanlink.net/tag/localization": "localization", + "http://www.semanlink.net/tag/blues": "Blues", + "http://www.semanlink.net/tag/google_colab": "Google Colab", + "http://www.semanlink.net/tag/hidden_markov_model": "Hidden Markov model", + "http://www.semanlink.net/tag/swad_e": "SWAD-E", + "http://www.semanlink.net/tag/african_languages": "African languages", + "http://www.semanlink.net/tag/google_car": "Google car", + "http://www.semanlink.net/tag/servlet": "Servlet", + "http://www.semanlink.net/tag/pytorch": "PyTorch", + "http://www.semanlink.net/tag/volkswagate": "Volkswagate", + "http://www.semanlink.net/tag/nepomuk": "NEPOMUK", + "http://www.semanlink.net/tag/vehicular_communication_systems": "Vehicular communication systems", + "http://www.semanlink.net/tag/fasttext": "FastText", + "http://www.semanlink.net/tag/pierre_de_volvic": "Pierre de Volvic", + "http://www.semanlink.net/tag/it_failures": "IT failures", + "http://www.semanlink.net/tag/identifying_triples": "Identifying triples", + "http://www.semanlink.net/tag/milliardaire": "Milliardaire", + "http://www.semanlink.net/tag/ai_dangers": "AI: dangers", + "http://www.semanlink.net/tag/recette_de_cuisine": "Recette de cuisine", + "http://www.semanlink.net/tag/zapata": "Zapata", + "http://www.semanlink.net/tag/delon": "Delon", + "http://www.semanlink.net/tag/portugal": "Portugal", + "http://www.semanlink.net/tag/nokia": "Nokia", + "http://www.semanlink.net/tag/ai_knowledge": "AI + Knowledge", + "http://www.semanlink.net/tag/nlp_human_resources": "NLP + Human Resources", + "http://www.semanlink.net/tag/goldman_sachs": "Goldman Sachs", + "http://www.semanlink.net/tag/chirac_ami_des_africains": "Chirac ami des Africains", + "http://www.semanlink.net/tag/dbpedia_mobile": "DBpedia Mobile", + "http://www.semanlink.net/tag/nlp_google": "NLP@Google", + "http://www.semanlink.net/tag/the_dao": "The DAO", + "http://www.semanlink.net/tag/regions_polaires": "R\u00e9gions polaires", + "http://www.semanlink.net/tag/guerre_de_yougoslavie": "Guerre de Yougoslavie", + "http://www.semanlink.net/tag/patent": "Patent", + "http://www.semanlink.net/tag/inference": "Inference", + "http://www.semanlink.net/tag/antiquite": "Antiquit\u00e9", + "http://www.semanlink.net/tag/exomars": "ExoMars", + "http://www.semanlink.net/tag/ghana_empire": "Ghana Empire", + "http://www.semanlink.net/tag/mac_os_x_10_8": "Mac OS X 10.8", + "http://www.semanlink.net/tag/real_time": "Real-Time", + "http://www.semanlink.net/tag/nlu_is_hard": "NLU is hard", + "http://www.semanlink.net/tag/fps_notes": "fps notes", + "http://www.semanlink.net/tag/neolithique": "N\u00e9olithique", + "http://www.semanlink.net/tag/data_interoperability": "Data Interoperability", + "http://www.semanlink.net/tag/allemagne": "Allemagne", + "http://www.semanlink.net/tag/c2gweb_product_description_and_makolab": "C2GWeb, Product description and Makolab", + "http://www.semanlink.net/tag/good_idea": "Good idea", + "http://www.semanlink.net/tag/nlp": "NLP", + "http://www.semanlink.net/tag/survey": "Survey / Review", + "http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie": "Cit\u00e9 des sciences et de l'industrie", + "http://www.semanlink.net/tag/constraint_programming": "Constraint Programming", + "http://www.semanlink.net/tag/nlu": "NLU", + "http://www.semanlink.net/tag/decroissance": "D\u00e9croissance", + "http://www.semanlink.net/tag/browser_back_button": "Browser : back button", + "http://www.semanlink.net/tag/multimedia_ld": "Multimedia + LD", + "http://www.semanlink.net/tag/proletaires2_0": "Prol\u00e9taires2.0", + "http://www.semanlink.net/tag/contextualised_word_representations": "Contextualized word representations", + "http://www.semanlink.net/tag/michel_servet": "Michel Servet", + "http://www.semanlink.net/tag/explainable_nlp": "Explainable NLP", + "http://www.semanlink.net/tag/video_ina_fr": "Vid\u00e9o Ina.fr", + "http://www.semanlink.net/tag/cosine_similarity": "Cosine similarity", + "http://www.semanlink.net/tag/text_to_semantic_data": "Text to semantic data", + "http://www.semanlink.net/tag/europeana": "Europeana", + "http://www.semanlink.net/tag/os_x_unix": "OS X Unix", + "http://www.semanlink.net/tag/alphago": "Alphago", + "http://www.semanlink.net/tag/ibm_developerworks": "IBM developerWorks", + "http://www.semanlink.net/tag/crowd_sourcing": "Crowd sourcing", + "http://www.semanlink.net/tag/ford": "Ford", + "http://www.semanlink.net/tag/natural_language_semantic_search": "Natural Language Semantic Search", + "http://www.semanlink.net/tag/alexandre_passant": "Alexandre Passant", + "http://www.semanlink.net/tag/common_web_language": "Common Web Language", + "http://www.semanlink.net/tag/organizer": "Organizer", + "http://www.semanlink.net/tag/phrase_mining": "Phrase mining", + "http://www.semanlink.net/tag/seyni_kountche": "Seyni Kountch\u00e9", + "http://www.semanlink.net/tag/prohibition": "Prohibition", + "http://www.semanlink.net/tag/label_embedding": "Label Embedding", + "http://www.semanlink.net/tag/hashtag": "Hashtag", + "http://www.semanlink.net/tag/molecular_clock": "Molecular clock", + "http://www.semanlink.net/tag/labeled_data": "Labeled Data", + "http://www.semanlink.net/tag/social_democracy": "Social democracy", + "http://www.semanlink.net/tag/mycarevent": "MyCarEvent", + "http://www.semanlink.net/tag/roosevelt": "Roosevelt", + "http://www.semanlink.net/tag/infringing_material": "Infringing material", + "http://www.semanlink.net/tag/neurala_lifelong_dnn": "Neurala: Lifelong-DNN", + "http://www.semanlink.net/tag/gouvernement_chirac": "Gouvernement Chirac", + "http://www.semanlink.net/tag/greenpeace": "Greenpeace", + "http://www.semanlink.net/tag/webcomponents": "Web Components", + "http://www.semanlink.net/tag/lip6": "LIP6", + "http://www.semanlink.net/tag/biomedical_nlp": "Biomedical NLP", + "http://www.semanlink.net/tag/captcha": "Captcha", + "http://www.semanlink.net/tag/gueant": "Gu\u00e9ant", + "http://www.semanlink.net/tag/world_bank": "World Bank", + "http://www.semanlink.net/tag/ner_unseen_mentions": "NER: unseen mentions", + "http://www.semanlink.net/tag/obama": "Obama", + "http://www.semanlink.net/tag/anzo": "Anzo", + "http://www.semanlink.net/tag/paul_krugman": "Paul Krugman", + "http://www.semanlink.net/tag/validation_xml_vs_rdf": "Validation: XML vs RDF", + "http://www.semanlink.net/tag/ubuntu": "Ubuntu", + "http://www.semanlink.net/tag/economie_de_la_gratuite": "Economie de la gratuit\u00e9", + "http://www.semanlink.net/tag/ukraine": "Ukraine", + "http://www.semanlink.net/tag/sindice": "sindice", + "http://www.semanlink.net/tag/kigali": "Kigali", + "http://www.semanlink.net/tag/openstreetmap": "OpenStreetMap", + "http://www.semanlink.net/tag/vocamp": "VoCamp", + "http://www.semanlink.net/tag/bob_ducharme": "Bob DuCharme", + "http://www.semanlink.net/tag/electricite": "Electricit\u00e9", + "http://www.semanlink.net/tag/neo_fascites": "Neo-fascites", + "http://www.semanlink.net/tag/automotive_ontology_working_group": "Automotive Ontology Working Group", + "http://www.semanlink.net/tag/cocoon": "Cocoon", + "http://www.semanlink.net/tag/tombe_d_amphipolis": "Tombe d'amphipolis", + "http://www.semanlink.net/tag/elliotte_rusty_harold": "Elliotte Rusty Harold", + "http://www.semanlink.net/tag/porc": "Porc", + "http://www.semanlink.net/tag/nlp_using_knowledge": "NLP: using Knowledge", + "http://www.semanlink.net/tag/agriculture_industrielle": "Agriculture industrielle", + "http://www.semanlink.net/tag/opml": "OPML", + "http://www.semanlink.net/tag/douglas_rushkoff": "Douglas Rushkoff", + "http://www.semanlink.net/tag/hash_bang_uris": "Hash-bang URIs", + "http://www.semanlink.net/tag/romancier": "Romancier", + "http://www.semanlink.net/tag/niger_petrole": "Niger : p\u00e9trole", + "http://www.semanlink.net/tag/fungal_infections": "Fungal infections", + "http://www.semanlink.net/tag/rdf_service": "RDF Service", + "http://www.semanlink.net/tag/sentiment": "Sentiment", + "http://www.semanlink.net/tag/cognitive_search": "Cognitive Search", + "http://www.semanlink.net/tag/agriculture_biologique": "Agriculture biologique", + "http://www.semanlink.net/tag/panama_papers": "Panama papers", + "http://www.semanlink.net/tag/banksy": "Banksy", + "http://www.semanlink.net/tag/sphere_packing": "Sphere packing", + "http://www.semanlink.net/tag/african_land_grab": "African land grab", + "http://www.semanlink.net/tag/virus": "Virus", + "http://www.semanlink.net/tag/cern": "CERN", + "http://www.semanlink.net/tag/developpement_humain": "\"D\u00e9veloppement humain\"", + "http://www.semanlink.net/tag/semantic_web_life_sciences": "Semantic Web: Life Sciences", + "http://www.semanlink.net/tag/google_patents": "Google Patents", + "http://www.semanlink.net/tag/ontowiki": "OntoWiki", + "http://www.semanlink.net/tag/proletaire": "Prol\u00e9taire", + "http://www.semanlink.net/tag/courtadon": "Courtadon", + "http://www.semanlink.net/tag/to_do": "To do", + "http://www.semanlink.net/tag/synonymy": "Synonymy", + "http://www.semanlink.net/tag/python_tips": "Python tips", + "http://www.semanlink.net/tag/pesticide": "Pesticides", + "http://www.semanlink.net/tag/martynas_jusevicius": "Martynas Jusevicius", + "http://www.semanlink.net/tag/satori": "Satori", + "http://www.semanlink.net/tag/minting_uris": "Minting URIs", + "http://www.semanlink.net/tag/gene_editing": "Gene editing", + "http://www.semanlink.net/tag/stanford_pos_tagger": "Stanford POS Tagger", + "http://www.semanlink.net/tag/schema_org": "schema.org", + "http://www.semanlink.net/tag/elites": "Elites", + "http://www.semanlink.net/tag/tibet": "Tibet", + "http://www.semanlink.net/tag/university_of_maryland": "University of Maryland", + "http://www.semanlink.net/tag/election": "Election", + "http://www.semanlink.net/tag/create_js": "create.js", + "http://www.semanlink.net/tag/geste_ecologique": "Geste \u00e9cologique", + "http://www.semanlink.net/tag/refugies": "R\u00e9fugi\u00e9s", + "http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy": "Massacre de la Saint-Barth\u00e9lemy", + "http://www.semanlink.net/tag/football": "Football", + "http://www.semanlink.net/tag/nginx": "Nginx", + "http://www.semanlink.net/tag/cnil": "cnil", + "http://www.semanlink.net/tag/entailment": "Entailment", + "http://www.semanlink.net/tag/taxonomy_expansion_task": "Taxonomy expansion task", + "http://www.semanlink.net/tag/eclipse_tip": "Eclipse tip", + "http://www.semanlink.net/tag/internet_en_afrique": "Internet en Afrique", + "http://www.semanlink.net/tag/gouvernement": "Gouvernement", + "http://www.semanlink.net/tag/humanitaire": "Humanitaire", + "http://www.semanlink.net/tag/poker": "Poker", + "http://www.semanlink.net/tag/statistical_relational_learning": "Statistical relational learning", + "http://www.semanlink.net/tag/alibaba": "Alibaba", + "http://www.semanlink.net/tag/umbel": "Umbel", + "http://www.semanlink.net/tag/macronie": "Macronie", + "http://www.semanlink.net/tag/etat_policier": "Etat policier", + "http://www.semanlink.net/tag/lobby_nucleaire": "Lobby nucl\u00e9aire", + "http://www.semanlink.net/tag/product_knowledge_graph": "Product Knowledge Graph", + "http://www.semanlink.net/tag/google_visualization_api": "Google Visualization API", + "http://www.semanlink.net/tag/gene_therapy": "Gene therapy", + "http://www.semanlink.net/tag/semantic_web_blog": "Semantic Web blog", + "http://www.semanlink.net/tag/terres_agricoles": "Terres agricoles", + "http://www.semanlink.net/tag/audio_classification": "Audio classification", + "http://www.semanlink.net/tag/open_knowledge_foundation": "Open Knowledge Foundation", + "http://www.semanlink.net/tag/amazon_mechanical_turk": "Amazon Mechanical Turk", + "http://www.semanlink.net/tag/fujitsu": "Fujitsu", + "http://www.semanlink.net/tag/crise_des_migrants": "Crise des migrants", + "http://www.semanlink.net/tag/sw_in_technical_automotive_documentation": "SW in Technical Automotive Documentation", + "http://www.semanlink.net/tag/travail": "Travail", + "http://www.semanlink.net/tag/evgeny_morozov": "Evgeny Morozov", + "http://www.semanlink.net/tag/memory_leak": "Memory leak", + "http://www.semanlink.net/tag/ray_kurzweil": "Ray Kurzweil", + "http://www.semanlink.net/tag/bernard_maris": "Bernard Maris", + "http://www.semanlink.net/tag/language_model": "Language model", + "http://www.semanlink.net/tag/tcp": "TCP", + "http://www.semanlink.net/tag/ann_introduction": "ANN: introduction", + "http://www.semanlink.net/tag/the_web_sucks": "The web sucks", + "http://www.semanlink.net/tag/rdfa_parser": "RDFa parser", + "http://www.semanlink.net/tag/times": "Times", + "http://www.semanlink.net/tag/rdf_templating": "RDF Templating", + "http://www.semanlink.net/tag/knowledge": "Knowledge", + "http://www.semanlink.net/tag/rosetta_project": "Rosetta Project", + "http://www.semanlink.net/tag/musicbrainz": "MusicBrainz", + "http://www.semanlink.net/tag/fossile_vivant": "Fossile vivant", + "http://www.semanlink.net/tag/arduino": "Arduino", + "http://www.semanlink.net/tag/histoire_du_niger": "Histoire du Niger", + "http://www.semanlink.net/tag/censure_et_maltraitance_animale": "Censure et maltraitance animale", + "http://www.semanlink.net/tag/vie_privee": "Privacy", + "http://www.semanlink.net/tag/sigmund_freud": "Sigmund Freud", + "http://www.semanlink.net/tag/discute_avec_raphael": "Discut\u00e9 avec Rapha\u00ebl", + "http://www.semanlink.net/tag/brain_vs_deep_learning": "Brain vs Deep Learning", + "http://www.semanlink.net/tag/ecologie": "\u00c9cologie", + "http://www.semanlink.net/tag/javascript_patterns": "Javascript patterns", + "http://www.semanlink.net/tag/elections_americaines_2020": "Elections am\u00e9ricaines 2020", + "http://www.semanlink.net/tag/pregnancy": "Pregnancy", + "http://www.semanlink.net/tag/part_of_speech_tagging": "Part Of Speech Tagging", + "http://www.semanlink.net/tag/magnetisme": "Magn\u00e9tisme", + "http://www.semanlink.net/tag/metadata_indexing": "Metadata indexing", + "http://www.semanlink.net/tag/sense_embeddings": "Sense embeddings", + "http://www.semanlink.net/tag/uberisation": "Uberisation", + "http://www.semanlink.net/tag/nlp_topic_extraction": "Keyword/keyphrase extraction", + "http://www.semanlink.net/tag/silicon_valley": "Silicon Valley", + "http://www.semanlink.net/tag/ontoprise": "Ontoprise", + "http://www.semanlink.net/tag/html_data": "HTML Data", + "http://www.semanlink.net/tag/jvisualvm": "JVisualVM", + "http://www.semanlink.net/tag/minimum_description_length_principle": "Minimum Description Length Principle", + "http://www.semanlink.net/tag/ontology_based_data_access": "Ontology-based Data Access", + "http://www.semanlink.net/tag/compagnies_petrolieres": "Compagnies p\u00e9troli\u00e8res", + "http://www.semanlink.net/tag/security": "Security", + "http://www.semanlink.net/tag/madonna": "Madonna", + "http://www.semanlink.net/tag/lynda_tamine": "Lynda Tamine", + "http://www.semanlink.net/tag/kristallnacht": "Kristallnacht", + "http://www.semanlink.net/tag/conquistadores": "Conquistadores", + "http://www.semanlink.net/tag/github_project": "GitHub project", + "http://www.semanlink.net/tag/partial_differential_equations": "Partial differential equations", + "http://www.semanlink.net/tag/singular_value_decomposition": "Singular Value Decomposition", + "http://www.semanlink.net/tag/classification_relations_between_classes": "Classification: dependencies between labels", + "http://www.semanlink.net/tag/georges_brassens": "Georges Brassens", + "http://www.semanlink.net/tag/video_games": "Video games", + "http://www.semanlink.net/tag/hymne_national": "Hymne national", + "http://www.semanlink.net/tag/armee_americaine": "Arm\u00e9e am\u00e9ricaine", + "http://www.semanlink.net/tag/self_supervised_learning": "Self-Supervised Learning", + "http://www.semanlink.net/tag/google_rich_cards": "Google Rich Cards", + "http://www.semanlink.net/tag/pekin": "P\u00e9kin", + "http://www.semanlink.net/tag/deepwalk": "DeepWalk", + "http://www.semanlink.net/tag/smartphone": "Smartphone", + "http://www.semanlink.net/tag/about_rdf": "About RDF", + "http://www.semanlink.net/tag/jena_rules": "Jena rules", + "http://www.semanlink.net/tag/google_ranking": "Google ranking", + "http://www.semanlink.net/tag/test_of_independent_invention": "Test of independent invention", + "http://www.semanlink.net/tag/stemming": "Stemming", + "http://www.semanlink.net/tag/nlp_low_resource_scenarios": "Low-Resource NLP", + "http://www.semanlink.net/tag/judea_pearl": "Judea Pearl", + "http://www.semanlink.net/tag/disparition_de_langues_vivantes": "Disparition de langues vivantes", + "http://www.semanlink.net/tag/ckan": "CKAN", + "http://www.semanlink.net/tag/colbert": "ColBERT", + "http://www.semanlink.net/tag/emnlp_2018": "EMNLP 2018", + "http://www.semanlink.net/tag/owllink_protocol": "OWLlink Protocol", + "http://www.semanlink.net/tag/visual_search": "Visual search", + "http://www.semanlink.net/tag/sony_hack": "Sony Hack", + "http://www.semanlink.net/tag/restricted_boltzmann_machine": "Restricted Boltzmann machine", + "http://www.semanlink.net/tag/hector_lavoe": "H\u00e9ctor Lavoe", + "http://www.semanlink.net/tag/naftali_tishby": "Naftali Tishby", + "http://www.semanlink.net/tag/java": "Java", + "http://www.semanlink.net/tag/kaguya": "Kaguya", + "http://www.semanlink.net/tag/anne_haour": " Anne Haour", + "http://www.semanlink.net/tag/sparql_construct": "SPARQL Construct", + "http://www.semanlink.net/tag/semantic_web_outliner": "Semantic Web Outliner", + "http://www.semanlink.net/tag/text_processing": "Text processing", + "http://www.semanlink.net/tag/pre_trained_models": "Pre-trained Models", + "http://www.semanlink.net/tag/synaptic_web": "Synaptic Web", + "http://www.semanlink.net/tag/secheresse": "S\u00e9cheresse", + "http://www.semanlink.net/tag/zero_shot_text_classifier": "Zero-shot Text Classifier", + "http://www.semanlink.net/tag/r": "R", + "http://www.semanlink.net/tag/multilinguisme": "Multilinguisme", + "http://www.semanlink.net/tag/dalai_lama": "Dalai Lama", + "http://www.semanlink.net/tag/sonarqube": "SonarQube", + "http://www.semanlink.net/tag/civilisation_elamite": "Civilisation \u00e9lamite", + "http://www.semanlink.net/tag/exploration_marsienne": "Exploration marsienne", + "http://www.semanlink.net/tag/openlink": "OpenLink Software", + "http://www.semanlink.net/tag/alphafold": "AlphaFold", + "http://www.semanlink.net/tag/medecins_sans_frontieres": "M\u00e9decins sans fronti\u00e8res", + "http://www.semanlink.net/tag/deep_learning_attention": "Attention mechanism", + "http://www.semanlink.net/tag/meetup": "Meetup", + "http://www.semanlink.net/tag/disque_a_retrouver": "Disque \u00e0 retrouver", + "http://www.semanlink.net/tag/monsanto": "Monsanto", + "http://www.semanlink.net/tag/western": "Western", + "http://www.semanlink.net/tag/txtai": "txtai", + "http://www.semanlink.net/tag/credit_card": "Credit card", + "http://www.semanlink.net/tag/sarkozy_et_la_recherche": "Sarkozy et la recherche", + "http://www.semanlink.net/tag/coursera_r_programming": "Coursera: R Programming", + "http://www.semanlink.net/tag/linked_data_application": "Linked Data: application", + "http://www.semanlink.net/tag/ios": "iOS", + "http://www.semanlink.net/tag/semantic_web_p2p": "Semantic Web P2P", + "http://www.semanlink.net/tag/huggingface_bigscience": "HuggingFace BigScience", + "http://www.semanlink.net/tag/fps_ontologies": "fps ontologies", + "http://www.semanlink.net/tag/italie": "Italie", + "http://www.semanlink.net/tag/edd_dumbill": "Edd Dumbill", + "http://www.semanlink.net/tag/federated_sparql_queries": "Federated SPARQL queries", + "http://www.semanlink.net/tag/to_see": "To see", + "http://www.semanlink.net/tag/technological_singularity": "Technological singularity", + "http://www.semanlink.net/tag/multimodal_classification": "Multimodal classification", + "http://www.semanlink.net/tag/microsoft_azure": "Microsoft Azure", + "http://www.semanlink.net/tag/entities_to_topics": "Entities to topics", + "http://www.semanlink.net/tag/spin_functions": "SPIN functions", + "http://www.semanlink.net/tag/chene": "Ch\u00eane", + "http://www.semanlink.net/tag/guantanamo": "Guantanamo", + "http://www.semanlink.net/tag/john_steinbeck": "John Steinbeck", + "http://www.semanlink.net/tag/json": "JSON", + "http://www.semanlink.net/tag/keras_embedding_layer": "Keras embedding layer", + "http://www.semanlink.net/tag/rayons_cosmiques": "Rayons cosmiques", + "http://www.semanlink.net/tag/a_la_carte_embedding": "A La Carte Embedding", + "http://www.semanlink.net/tag/bertrand_russell": "Bertrand Russell", + "http://www.semanlink.net/tag/finite_state_transducer": "Finite-state transducer", + "http://www.semanlink.net/tag/pre_trained_language_models": "Pre-Trained Language Models", + "http://www.semanlink.net/tag/steve_cayzer": "Steve Cayzer", + "http://www.semanlink.net/tag/nlp_rare_words": "Rare words (NLP)", + "http://www.semanlink.net/tag/hierarchical_memory_networks": "Hierarchical Memory Networks", + "http://www.semanlink.net/tag/paggr": "paggr", + "http://www.semanlink.net/tag/recyclage": "Recyclage", + "http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest": "Boucle ferroviaire d\u2019Afrique de l\u2019Ouest", + "http://www.semanlink.net/tag/cinema_americain": "Cin\u00e9ma am\u00e9ricain", + "http://www.semanlink.net/tag/malicious_code": "Malicious code", + "http://www.semanlink.net/tag/programmers": "Programmers", + "http://www.semanlink.net/tag/film_cubain": "Film cubain", + "http://www.semanlink.net/tag/kids": "Kids", + "http://www.semanlink.net/tag/kirghizistan": "Kirghizistan", + "http://www.semanlink.net/tag/text_dim_reduction": "Text: dimension reduction", + "http://www.semanlink.net/tag/algebre": "Alg\u00e8bre", + "http://www.semanlink.net/tag/enigmes_de_la_physique": "Enigmes de la physique", + "http://www.semanlink.net/tag/danemark": "Danemark", + "http://www.semanlink.net/tag/pandora_papers": "Pandora Papers", + "http://www.semanlink.net/tag/lord_s_resistance_army": "Lord's Resistance Army", + "http://www.semanlink.net/tag/facebook_fair": "Facebook FAIR", + "http://www.semanlink.net/tag/elephant": "El\u00e9phant", + "http://www.semanlink.net/tag/kd_mkb_biblio": "KD-MKB biblio", + "http://www.semanlink.net/tag/masse_manquante": "Masse manquante", + "http://www.semanlink.net/tag/jersey_cache_control": "Jersey Cache-Control", + "http://www.semanlink.net/tag/perelman": "Perelman", + "http://www.semanlink.net/tag/microsoft_concept_graph": "Microsoft Concept Graph", + "http://www.semanlink.net/tag/pubby": "Pubby", + "http://www.semanlink.net/tag/civilisation_de_l_indus": "Civilisation de l'Indus", + "http://www.semanlink.net/tag/personal_archives": "Personal archives", + "http://www.semanlink.net/tag/soap": "SOAP", + "http://www.semanlink.net/tag/benin": "B\u00e9nin", + "http://www.semanlink.net/tag/capsule_networks": "Capsule networks", + "http://www.semanlink.net/tag/emotions": "Emotions", + "http://www.semanlink.net/tag/loi_sur_le_telechargement": "Loi sur le t\u00e9l\u00e9chargement", + "http://www.semanlink.net/tag/javascript_tips": "Javascript tips", + "http://www.semanlink.net/tag/cinema_francais": "Cin\u00e9ma fran\u00e7ais", + "http://www.semanlink.net/tag/sauver_la_planete": "Sauver la plan\u00e8te", + "http://www.semanlink.net/tag/ofir": "Ofir", + "http://www.semanlink.net/tag/intellectuel": "Intellectuel", + "http://www.semanlink.net/tag/deep_learning_book": "Deep Learning Book", + "http://www.semanlink.net/tag/telelamarna": "Telelamarna", + "http://www.semanlink.net/tag/semantic_web_propaganda": "Semantic Web propaganda", + "http://www.semanlink.net/tag/matrix_calculus": "Matrix calculus", + "http://www.semanlink.net/tag/stanford_ner": "Stanford NER", + "http://www.semanlink.net/tag/pauli": "Pauli", + "http://www.semanlink.net/tag/semantic_web_training": "Semantic web: training", + "http://www.semanlink.net/tag/hittite": "Hittite", + "http://www.semanlink.net/tag/i_b_m_s_watson": "IBM Watson", + "http://www.semanlink.net/tag/conjecture_de_poincare": "Conjecture de Poincar\u00e9", + "http://www.semanlink.net/tag/caterpillar": "Caterpillar", + "http://www.semanlink.net/tag/link_to_me": "Link to me", + "http://www.semanlink.net/tag/simple_idea": "Simple idea", + "http://www.semanlink.net/tag/venus": "V\u00e9nus", + "http://www.semanlink.net/tag/cortical_io": "Cortical.io", + "http://www.semanlink.net/tag/short_sales": "Short selling", + "http://www.semanlink.net/tag/what_s_encoded_by_a_nn": "What's encoded by a NN", + "http://www.semanlink.net/tag/skos_owl": "SKOS/OWL", + "http://www.semanlink.net/tag/mathematica": "Mathematica", + "http://www.semanlink.net/tag/autriche": "Autriche", + "http://www.semanlink.net/tag/marsupiaux": "Marsupiaux", + "http://www.semanlink.net/tag/chiffres": "Chiffres", + "http://www.semanlink.net/tag/empire_romain": "Empire romain", + "http://www.semanlink.net/tag/guha": "Guha", + "http://www.semanlink.net/tag/primate": "Primate", + "http://www.semanlink.net/tag/spec": "Spec", + "http://www.semanlink.net/tag/rdf_binary": "RDF/binary", + "http://www.semanlink.net/tag/statistical_classification": "Classification", + "http://www.semanlink.net/tag/judo": "Judo", + "http://www.semanlink.net/tag/explorateur": "Explorateur", + "http://www.semanlink.net/tag/macron": "Macron", + "http://www.semanlink.net/tag/construction_europeenne": "Construction europ\u00e9enne", + "http://www.semanlink.net/tag/chris_bizer": "Chris Bizer", + "http://www.semanlink.net/tag/aspect_based_sentiment_analysis": "Aspect-Based Sentiment Analysis", + "http://www.semanlink.net/tag/nuxeo": "Nuxeo", + "http://www.semanlink.net/tag/marisa_monte": "Marisa Monte", + "http://www.semanlink.net/tag/projet_pharaonique": "Projet pharaonique", + "http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque": "Crise de la dette publique grecque", + "http://www.semanlink.net/tag/sarkozyland": "Sarkozyland", + "http://www.semanlink.net/tag/economie_allemande": "Economie allemande", + "http://www.semanlink.net/tag/richard_socher": "Richard Socher", + "http://www.semanlink.net/tag/brains_in_silicon": "Bio inspired computing devices", + "http://www.semanlink.net/tag/google_fusion_tables": "Google Fusion Tables", + "http://www.semanlink.net/tag/jena_tdb": "Jena TDB", + "http://www.semanlink.net/tag/ambiguity_nlp": "Ambiguity (NLP)", + "http://www.semanlink.net/tag/peter_mika": "Peter Mika", + "http://www.semanlink.net/tag/atoll": "Atoll", + "http://www.semanlink.net/tag/plante": "Plante", + "http://www.semanlink.net/tag/chomage": "Ch\u00f4mage", + "http://www.semanlink.net/tag/bayer": "Bayer", + "http://www.semanlink.net/tag/second_life": "Second Life", + "http://www.semanlink.net/tag/sentence_similarity": "Sentence Similarity", + "http://www.semanlink.net/tag/phase_transition": "Phase transition", + "http://www.semanlink.net/tag/chatbot": "Chatbots", + "http://www.semanlink.net/tag/deep_nlp": "Deep NLP", + "http://www.semanlink.net/tag/re_decentralize_the_web": "(Re-)decentralize the Web", + "http://www.semanlink.net/tag/hydrogen_cars": "Hydrogen Cars", + "http://www.semanlink.net/tag/gaussian_process": "Gaussian process", + "http://www.semanlink.net/tag/semantic_enterprise": "Semantic Enterprise", + "http://www.semanlink.net/tag/homme_politique": "Homme politique", + "http://www.semanlink.net/tag/neandertal": "N\u00e9andertal", + "http://www.semanlink.net/tag/car_options_ontology": "Car Options Ontology", + "http://www.semanlink.net/tag/droit_d_auteur": "Droit d'auteur", + "http://www.semanlink.net/tag/palestine": "Palestine", + "http://www.semanlink.net/tag/criquet": "Criquet", + "http://www.semanlink.net/tag/denny_britz": "Denny Britz", + "http://www.semanlink.net/tag/alistair_miles": "Alistair Miles", + "http://www.semanlink.net/tag/what_could_go_wrong": "What could go wrong?", + "http://www.semanlink.net/tag/reinhard_mey": "Reinhard Mey", + "http://www.semanlink.net/tag/continent_de_plastique": "Continent de plastique", + "http://www.semanlink.net/tag/antilles": "Antilles", + "http://www.semanlink.net/tag/ijcai": "IJCAI", + "http://www.semanlink.net/tag/supernova": "Supernova", + "http://www.semanlink.net/tag/gouvernement_francais": "Gouvernement fran\u00e7ais", + "http://www.semanlink.net/tag/stacking_ensemble_learning": "Stacking (ensemble learning)", + "http://www.semanlink.net/tag/meta_content_framework": "Meta Content Framework", + "http://www.semanlink.net/tag/w3c": "W3C", + "http://www.semanlink.net/tag/henri_bergius": "Henri Bergius", + "http://www.semanlink.net/tag/ml_engineering": "ML Engineering", + "http://www.semanlink.net/tag/redis": "Redis", + "http://www.semanlink.net/tag/baikal": "Ba\u00efkal", + "http://www.semanlink.net/tag/knowledge_graph_completion": "Knowledge Graph Completion", + "http://www.semanlink.net/tag/pain": "Pain", + "http://www.semanlink.net/tag/todo_read": "TODO-READ!", + "http://www.semanlink.net/tag/conflits": "Conflits", + "http://www.semanlink.net/tag/satellite_images": "Satellite images", + "http://www.semanlink.net/tag/lord_of_the_flies": "Lord of the Flies", + "http://www.semanlink.net/tag/ldap": "LDAP", + "http://www.semanlink.net/tag/abuse_of_power": "Abuse of power", + "http://www.semanlink.net/tag/rdf_performance_issues": "RDF performance issues", + "http://www.semanlink.net/tag/friedrich_nietzsche": "Nietzsche", + "http://www.semanlink.net/tag/entity_linking": "Entity linking", + "http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets": "VoID (Vocabulary of Interlinked Datasets)", + "http://www.semanlink.net/tag/ora_lassila": "Ora Lassila", + "http://www.semanlink.net/tag/rplug": "rplug", + "http://www.semanlink.net/tag/topbraid_spin": "TopBraid/SPIN", + "http://www.semanlink.net/tag/evilstreak_markdown_js": "evilstreak/markdown-js", + "http://www.semanlink.net/tag/neuroscience": "Neuroscience", + "http://www.semanlink.net/tag/explainable_ai": "Explainable AI", + "http://www.semanlink.net/tag/ballmer": "Ballmer", + "http://www.semanlink.net/tag/uri_encoding": "URI encoding", + "http://www.semanlink.net/tag/politique_de_l_innovation": "Politique de l'innovation", + "http://www.semanlink.net/tag/david_peterson": "David Peterson", + "http://www.semanlink.net/tag/ontologies": "Ontologies", + "http://www.semanlink.net/tag/film_indien": "Film indien", + "http://www.semanlink.net/tag/arte": "Arte", + "http://www.semanlink.net/tag/n3": "N3", + "http://www.semanlink.net/tag/critique_du_liberalisme": "Critique du lib\u00e9ralisme", + "http://www.semanlink.net/tag/sebastian_schaffert": "Sebastian Schaffert", + "http://www.semanlink.net/tag/aterm": "ATerm", + "http://www.semanlink.net/tag/sparte": "Sparte", + "http://www.semanlink.net/tag/drupal": "Drupal", + "http://www.semanlink.net/tag/origines_de_l_homme": "Origines de l'homme", + "http://www.semanlink.net/tag/mars_curiosity": "Mars/Curiosity", + "http://www.semanlink.net/tag/french_semantic_web_company": "French Semantic web company", + "http://www.semanlink.net/tag/flickr": "Flickr", + "http://www.semanlink.net/tag/right_to_explanation": "Right to explanation", + "http://www.semanlink.net/tag/creve_coeur": "Cr\u00e8ve c\u0153ur", + "http://www.semanlink.net/tag/rant": "Rant", + "http://www.semanlink.net/tag/conceptual_clustering": "Conceptual clustering", + "http://www.semanlink.net/tag/grands_singes": "Grands Singes", + "http://www.semanlink.net/tag/arq": "ARQ", + "http://www.semanlink.net/tag/digital_media": "Digital Media", + "http://www.semanlink.net/tag/enterprise_content_management": "Enterprise Content Management", + "http://www.semanlink.net/tag/owlsight": "OwlSight", + "http://www.semanlink.net/tag/hello_world": "Hello World", + "http://www.semanlink.net/tag/backpropagation": "Backpropagation", + "http://www.semanlink.net/tag/rapidminer_java": "RapidMiner/Java", + "http://www.semanlink.net/tag/naive_bayes_classifier": "Naive Bayes classifier", + "http://www.semanlink.net/tag/jena_dev": "Jena dev", + "http://www.semanlink.net/tag/mesh_network": "Mesh network", + "http://www.semanlink.net/tag/xenophon": "Xenophon", + "http://www.semanlink.net/tag/neural_machine_translation": "Neural machine translation", + "http://www.semanlink.net/tag/ai_application": "AI Application", + "http://www.semanlink.net/tag/la_main_a_la_pate": "La main \u00e0 la p\u00e2te", + "http://www.semanlink.net/tag/markets": "Markets", + "http://www.semanlink.net/tag/html_editor": "HTML Editor", + "http://www.semanlink.net/tag/matthew_honnibal": "Matthew Honnibal", + "http://www.semanlink.net/tag/python_tools": "Python tools", + "http://www.semanlink.net/tag/owl2vec": "OWL2Vec", + "http://www.semanlink.net/tag/xsl": "XSL", + "http://www.semanlink.net/tag/kingsley_idehen": "Kingsley Idehen", + "http://www.semanlink.net/tag/personal_cloud": "Personal cloud", + "http://www.semanlink.net/tag/dsi": "DSI", + "http://www.semanlink.net/tag/mac_os_x": "Mac OS X", + "http://www.semanlink.net/tag/automobile_manuals": "Automobile manuals", + "http://www.semanlink.net/tag/bbc_semantic_publishing": "BBC semantic publishing", + "http://www.semanlink.net/tag/dette": "Debt", + "http://www.semanlink.net/tag/common_sense": "Common Sense", + "http://www.semanlink.net/tag/good_related_work_section": "Good related work section", + "http://www.semanlink.net/tag/france_politique_etrangere": "France : politique \u00e9trang\u00e8re", + "http://www.semanlink.net/tag/crise_des_banlieues": "Crise des banlieues", + "http://www.semanlink.net/tag/speech_recognition": "Speech-to-Text", + "http://www.semanlink.net/tag/cea": "CEA", + "http://www.semanlink.net/tag/land_degradation": "Land Degradation", + "http://www.semanlink.net/tag/botanique": "Botanique", + "http://www.semanlink.net/tag/semantic_camp_paris": "Semantic Camp Paris", + "http://www.semanlink.net/tag/markdown_ittt": "markdown-it", + "http://www.semanlink.net/tag/olivier_rossel": "Olivier Rossel", + "http://www.semanlink.net/tag/language_identification": "Language Identification", + "http://www.semanlink.net/tag/new_york": "New York", + "http://www.semanlink.net/tag/realite_augmentee": "R\u00e9alit\u00e9 augment\u00e9e", + "http://www.semanlink.net/tag/croisade_des_enfants": "Croisade des enfants", + "http://www.semanlink.net/tag/nlp_girls_and_guys": "NLP girls and guys", + "http://www.semanlink.net/tag/livres_audio": "Livres audio", + "http://www.semanlink.net/tag/entity_embeddings": "Entity embeddings", + "http://www.semanlink.net/tag/loropeni": "Lorop\u00e9ni", + "http://www.semanlink.net/tag/type_system": "Type system", + "http://www.semanlink.net/tag/web_services": "Web Services", + "http://www.semanlink.net/tag/prediction": "Prediction", + "http://www.semanlink.net/tag/niamey": "Niamey", + "http://www.semanlink.net/tag/semantic_web_evangelization": "Semantic web: evangelization", + "http://www.semanlink.net/tag/stanislas_dehaene": "Stanislas Dehaene", + "http://www.semanlink.net/tag/causal_inference": "Causal inference", + "http://www.semanlink.net/tag/data_gouv_fr": "data.gouv.fr", + "http://www.semanlink.net/tag/sioc": "SIOC", + "http://www.semanlink.net/tag/drupal_rdf": "Drupal/RDF", + "http://www.semanlink.net/tag/unsupervised_keyphrase_extraction": "Unsupervised keyphrase extraction", + "http://www.semanlink.net/tag/matiere_noire": "Dark matter", + "http://www.semanlink.net/tag/fanatisme": "Fanatisme", + "http://www.semanlink.net/tag/gensim": "gensim", + "http://www.semanlink.net/tag/owl_dl": "OWL DL", + "http://www.semanlink.net/tag/foaf": "foaf", + "http://www.semanlink.net/tag/web_apis": "Web APIs", + "http://www.semanlink.net/tag/top_k": "Top-k", + "http://www.semanlink.net/tag/bielorussie": "Bi\u00e9lorussie", + "http://www.semanlink.net/tag/crise_des_subprimes": "Crise des subprimes", + "http://www.semanlink.net/tag/java_library": "Java library", + "http://www.semanlink.net/tag/guerre_civile": "Guerre civile", + "http://www.semanlink.net/tag/administration": "Administration", + "http://www.semanlink.net/tag/tips": "Tips", + "http://www.semanlink.net/tag/nlp_and_humanities": "NLP and humanities", + "http://www.semanlink.net/tag/script_tag_hack": "Script tag hack", + "http://www.semanlink.net/tag/afrique": "Afrique", + "http://www.semanlink.net/tag/frameworks": "Frameworks", + "http://www.semanlink.net/tag/metaweb": "Metaweb", + "http://www.semanlink.net/tag/multimodal_models": "Multimodal Models", + "http://www.semanlink.net/tag/lov_linked_open_vocabularies": "(LOV) Linked Open Vocabularies", + "http://www.semanlink.net/tag/eclipse": "Eclipse", + "http://www.semanlink.net/tag/bart_van_leeuwen": "Bart van Leeuwen", + "http://www.semanlink.net/tag/servlet_3_0": "Servlet 3.0", + "http://www.semanlink.net/tag/hypothese_de_riemann": "Riemann Hypothesis", + "http://www.semanlink.net/tag/chine": "Chine", + "http://www.semanlink.net/tag/sweo_interest_group": "SWEO Interest Group", + "http://www.semanlink.net/tag/royaume_uni": "Royaume Uni", + "http://www.semanlink.net/tag/traders": "Traders", + "http://www.semanlink.net/tag/concept_search": "Concept Search", + "http://www.semanlink.net/tag/laure_soulier": "Laure Soulier", + "http://www.semanlink.net/tag/cost_of_linked_data": "Cost of Linked Data", + "http://www.semanlink.net/tag/chimie": "Chimie", + "http://www.semanlink.net/tag/kernel_method": "Kernel methods", + "http://www.semanlink.net/tag/garamantes": "Garamantes", + "http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation": "Regroupement familial et test ADN de filiation", + "http://www.semanlink.net/tag/nlp_princeton": "NLP@Princeton", + "http://www.semanlink.net/tag/knowbert": "KnowBert", + "http://www.semanlink.net/tag/pedro_almodovar": "Pedro Almod\u00f3var", + "http://www.semanlink.net/tag/credit_default_swap": "Credit default swap", + "http://www.semanlink.net/tag/biotech_industry": "Biotech industry", + "http://www.semanlink.net/tag/masakhane": "Masakhane", + "http://www.semanlink.net/tag/owl_ontology": "OWL ontology", + "http://www.semanlink.net/tag/clerezza": "Clerezza", + "http://www.semanlink.net/tag/coronavirus": "Covid19", + "http://www.semanlink.net/tag/suede": "Su\u00e8de", + "http://www.semanlink.net/tag/surprises_me": "Surprising", + "http://www.semanlink.net/tag/mac_dev": "Mac dev", + "http://www.semanlink.net/tag/low_tech": "Low-Tech", + "http://www.semanlink.net/tag/transfer_learning_in_nlp": "Transfer learning in NLP", + "http://www.semanlink.net/tag/hebbian_theory": "Hebb's rule", + "http://www.semanlink.net/tag/treeview": "Treeview", + "http://www.semanlink.net/tag/analyse_semantique": "Analyse s\u00e9mantique", + "http://www.semanlink.net/tag/wikipedia": "Wikipedia", + "http://www.semanlink.net/tag/souverainete_numerique": "Souverainet\u00e9 num\u00e9rique", + "http://www.semanlink.net/tag/chrome": "Chrome", + "http://www.semanlink.net/tag/rene_vautier": "Ren\u00e9 Vautier", + "http://www.semanlink.net/tag/industrie_nucleaire": "Industrie nucl\u00e9aire", + "http://www.semanlink.net/tag/skos_editor": "SKOS editor", + "http://www.semanlink.net/tag/moat": "MOAT", + "http://www.semanlink.net/tag/generative_model": "Generative model", + "http://www.semanlink.net/tag/multilingual_nlp": "Multilingual NLP", + "http://www.semanlink.net/tag/chimere": "Chim\u00e8re", + "http://www.semanlink.net/tag/fbi": "FBI", + "http://www.semanlink.net/tag/ip_ir_ml_ia": "AI 4 IP", + "http://www.semanlink.net/tag/risk_analysis": "Risk analysis", + "http://www.semanlink.net/tag/pensee": "Pens\u00e9e", + "http://www.semanlink.net/tag/blackbox_nlp": "Blackbox NLP", + "http://www.semanlink.net/tag/learning": "Learning", + "http://www.semanlink.net/tag/semantic_web_critique": "Semantic Web : critique", + "http://www.semanlink.net/tag/feature_hashing": "Feature hashing (\"Hashing trick\")", + "http://www.semanlink.net/tag/w3c_working_draft": "W3C Working Draft", + "http://www.semanlink.net/tag/mathematiques": "Math\u00e9matiques", + "http://www.semanlink.net/tag/pillage_du_palais_d_ete": "Pillage du palais d'\u00e9t\u00e9", + "http://www.semanlink.net/tag/sw_at_renault": "SW at Renault", + "http://www.semanlink.net/tag/xlnet": "XLNet", + "http://www.semanlink.net/tag/grands_problemes": "Grands probl\u00e8mes", + "http://www.semanlink.net/tag/commercialising_the_semantic_web": "Commercialising the Semantic Web", + "http://www.semanlink.net/tag/topic_modeling": "Topic Modeling", + "http://www.semanlink.net/tag/knowledge_graph_construction": "Knowledge Graph Construction", + "http://www.semanlink.net/tag/hoax": "Hoax", + "http://www.semanlink.net/tag/tahar_ben_jelloun": "Tahar Ben Jelloun", + "http://www.semanlink.net/tag/devops": "DevOps", + "http://www.semanlink.net/tag/calais": "Calais", + "http://www.semanlink.net/tag/xvie_siecle": "XVIe si\u00e8cle", + "http://www.semanlink.net/tag/krill": "Krill", + "http://www.semanlink.net/tag/voyage_en_chine": "Voyage en Chine", + "http://www.semanlink.net/tag/greasemonkey": "Greasemonkey", + "http://www.semanlink.net/tag/encelade": "Encelade", + "http://www.semanlink.net/tag/virtual_currency": "Digital currency", + "http://www.semanlink.net/tag/automotive_ontology_community_group": "Automotive Ontology Community Group", + "http://www.semanlink.net/tag/ftp": "FTP", + "http://www.semanlink.net/tag/edf": "EDF", + "http://www.semanlink.net/tag/semantic_media_wiki": "Semantic Media Wiki", + "http://www.semanlink.net/tag/publicite_internet": "Publicit\u00e9 Internet", + "http://www.semanlink.net/tag/howto": "Howto", + "http://www.semanlink.net/tag/text_preprocessing": "Text preprocessing", + "http://www.semanlink.net/tag/espionnage": "Espionnage", + "http://www.semanlink.net/tag/crete_antique": "Cr\u00e8te antique", + "http://www.semanlink.net/tag/revolution_francaise": "R\u00e9volution fran\u00e7aise", + "http://www.semanlink.net/tag/semblog": "semblog", + "http://www.semanlink.net/tag/tasmanie": "Tasmanie", + "http://www.semanlink.net/tag/itunes": "iTunes", + "http://www.semanlink.net/tag/france_culture": "France Culture", + "http://www.semanlink.net/tag/xhtml": "XHTML", + "http://www.semanlink.net/tag/faq": "FAQ", + "http://www.semanlink.net/tag/slime_mold": "Slime mold", + "http://www.semanlink.net/tag/zika": "Zika", + "http://www.semanlink.net/tag/spatial_search": "Spatial search", + "http://www.semanlink.net/tag/master_data_management": "Master Data Management", + "http://www.semanlink.net/tag/reading": "Reading", + "http://www.semanlink.net/tag/semantic_web_ui": "Semantic Web : UI", + "http://www.semanlink.net/tag/apache_opennlp": "Apache OpenNLP", + "http://www.semanlink.net/tag/tombouctou": "Tombouctou", + "http://www.semanlink.net/tag/prix_nobel_de_physique": "Prix Nobel de physique", + "http://www.semanlink.net/tag/siamese_network": "Siamese networks", + "http://www.semanlink.net/tag/principal_component_analysis": "Principal component analysis", + "http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce": "La communaut\u00e9 internationale est une garce", + "http://www.semanlink.net/tag/revolution": "R\u00e9volution", + "http://www.semanlink.net/tag/everipedia": "Everipedia", + "http://www.semanlink.net/tag/the_guardian": "The Guardian", + "http://www.semanlink.net/tag/evaluation_measures": "Evaluation measures", + "http://www.semanlink.net/tag/penseur": "Penseur", + "http://www.semanlink.net/tag/adn": "ADN", + "http://www.semanlink.net/tag/concept_extraction": "Concept Extraction / Linking", + "http://www.semanlink.net/tag/serverless": "Serverless", + "http://www.semanlink.net/tag/geolocalisation": "Geolocalisation", + "http://www.semanlink.net/tag/os_x_app": "OS X app", + "http://www.semanlink.net/tag/linked_data_browser": "Linked Data Browser", + "http://www.semanlink.net/tag/color_naming": "Color naming", + "http://www.semanlink.net/tag/genetic_programming": "Genetic Programming", + "http://www.semanlink.net/tag/pierre_rebour": "Pierre Rebour", + "http://www.semanlink.net/tag/giec": "GIEC", + "http://www.semanlink.net/tag/jean_paul": "Jean-Paul Cardinal", + "http://www.semanlink.net/tag/paleontologie": "Pal\u00e9ontologie", + "http://www.semanlink.net/tag/edith_piaf": "Edith Piaf", + "http://www.semanlink.net/tag/prejuges": "Pr\u00e9jug\u00e9s", + "http://www.semanlink.net/tag/islande": "Islande", + "http://www.semanlink.net/tag/phpmyadmin": "phpMyAdmin", + "http://www.semanlink.net/tag/yrjana_rankka": "Yrj\u00e4n\u00e4 Rankka", + "http://www.semanlink.net/tag/semantic_web_crawler": "Semantic Web Crawler", + "http://www.semanlink.net/tag/bullshit_web": "Bullshit Web", + "http://www.semanlink.net/tag/arts_premiers": "Arts premiers", + "http://www.semanlink.net/tag/carl_lewis": "Carl Lewis", + "http://www.semanlink.net/tag/spectral_clustering": "Spectral clustering", + "http://www.semanlink.net/tag/tap": "TAP", + "http://www.semanlink.net/tag/jeremy_howard": "Jeremy Howard", + "http://www.semanlink.net/tag/poolparty": "PoolParty", + "http://www.semanlink.net/tag/nli": "Natural Language Inference", + "http://www.semanlink.net/tag/maali_mnasri": "Ma\u00e2li Mnasri", + "http://www.semanlink.net/tag/insectes_fossiles": "Insectes fossiles", + "http://www.semanlink.net/tag/tensorflow_2_0": "TensorFlow 2.0", + "http://www.semanlink.net/tag/arbres": "Arbres", + "http://www.semanlink.net/tag/anonymous": "Anonymous", + "http://www.semanlink.net/tag/virtuoso_universal_server": "Virtuoso Universal Server", + "http://www.semanlink.net/tag/naomi_klein": "Naomi Klein", + "http://www.semanlink.net/tag/graph_based_semi_supervised_learning": "Graph-based Semi-Supervised Learning", + "http://www.semanlink.net/tag/hong_kong": "Hong Kong", + "http://www.semanlink.net/tag/woody_allen": "Woody Allen", + "http://www.semanlink.net/tag/reagan": "Reagan", + "http://www.semanlink.net/tag/tabulator": "Tabulator", + "http://www.semanlink.net/tag/nova_spivak": "Nova Spivak", + "http://www.semanlink.net/tag/tagging": "Tagging", + "http://www.semanlink.net/tag/palmyra": "Palmyra", + "http://www.semanlink.net/tag/cyborg": "Cyborg", + "http://www.semanlink.net/tag/hixie": "Hixie", + "http://www.semanlink.net/tag/context_free_grammar": "Context-free grammar", + "http://www.semanlink.net/tag/fruit": "Fruit", + "http://www.semanlink.net/tag/attention_is_all_you_need": "Transformers", + "http://www.semanlink.net/tag/souvenirs": "Souvenirs", + "http://www.semanlink.net/tag/industrie_textile": "Industrie textile", + "http://www.semanlink.net/tag/graph_neural_networks": "Graph neural networks", + "http://www.semanlink.net/tag/fps_paper": "fps: paper", + "http://www.semanlink.net/tag/cathares": "Cathares", + "http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche": "Minist\u00e8re de l'enseignement sup\u00e9rieur et de la recherche", + "http://www.semanlink.net/tag/millennium_goal": "Millennium Goal", + "http://www.semanlink.net/tag/ldow2011": "LDOW2011", + "http://www.semanlink.net/tag/multnomah_falls": "Multnomah Falls", + "http://www.semanlink.net/tag/openlink_ajax_toolkit_oat": "OpenLink Ajax Toolkit (OAT)", + "http://www.semanlink.net/tag/andy_seaborne": "Andy Seaborne", + "http://www.semanlink.net/tag/auvergne": "Auvergne", + "http://www.semanlink.net/tag/web_2_0_businesses": "Web 2.0 businesses", + "http://www.semanlink.net/tag/delip_rao": "Delip Rao", + "http://www.semanlink.net/tag/paris_2024": "Paris 2024", + "http://www.semanlink.net/tag/minhash": "MinHash", + "http://www.semanlink.net/tag/units_of_measure": "Units of measure", + "http://www.semanlink.net/tag/semantic_text_matching": "Semantic Text Matching", + "http://www.semanlink.net/tag/imperialisme_americain": "Imp\u00e9rialisme am\u00e9ricain", + "http://www.semanlink.net/tag/jeux_olympiques": "Jeux Olympiques", + "http://www.semanlink.net/tag/object_oriented_programming": "Object Oriented Programming", + "http://www.semanlink.net/tag/cohn_bendit": "Cohn-Bendit", + "http://www.semanlink.net/tag/novartis": "Novartis", + "http://www.semanlink.net/tag/congo_kinshasa": "RDC", + "http://www.semanlink.net/tag/julien_cardinal": "Julien Cardinal", + "http://www.semanlink.net/tag/nlp_task_as_qa_problem": "NLP task as a QA problem", + "http://www.semanlink.net/tag/euro": "Euro", + "http://www.semanlink.net/tag/arbres_remarquables": "Arbres remarquables", + "http://www.semanlink.net/tag/akhenaton": "Akh\u00eanaton", + "http://www.semanlink.net/tag/predicting_numeric_values_from_text": "Predicting numeric values from text", + "http://www.semanlink.net/tag/politique_economique_francaise": "Politique \u00e9conomique fran\u00e7aise", + "http://www.semanlink.net/tag/rdfj": "RDFj", + "http://www.semanlink.net/tag/mars_2004": "Mars 2004", + "http://www.semanlink.net/tag/my_old_things": "My old things", + "http://www.semanlink.net/tag/mark_zuckerberg": "Zuckerberg", + "http://www.semanlink.net/tag/docker_tomcat": "Docker-Tomcat", + "http://www.semanlink.net/tag/meaning_in_nlp": "Meaning in NLP", + "http://www.semanlink.net/tag/esprit_de_resistance": "Esprit de r\u00e9sistance", + "http://www.semanlink.net/tag/oceanie": "Oc\u00e9anie", + "http://www.semanlink.net/tag/cedric_villani": "C\u00e9dric Villani", + "http://www.semanlink.net/tag/madame_bovary": "Madame Bovary", + "http://www.semanlink.net/tag/peter_patel_schneider": "Peter Patel-Schneider", + "http://www.semanlink.net/tag/variabilite_du_genome_humain": "Variabilit\u00e9 du g\u00e9nome humain", + "http://www.semanlink.net/tag/fps_post": "fps' post", + "http://www.semanlink.net/tag/finding_rdf_documents": "Finding RDF documents", + "http://www.semanlink.net/tag/emnlp_2019": "EMNLP 2019", + "http://www.semanlink.net/tag/knowledge_graph_embeddings": "Knowledge Graph Embeddings", + "http://www.semanlink.net/tag/natural_language_generation": "Natural language generation", + "http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings": "Hierarchy-Aware KG Embeddings", + "http://www.semanlink.net/tag/isp": "ISP", + "http://www.semanlink.net/tag/yougoslavie": "Yougoslavie", + "http://www.semanlink.net/tag/cool": "Cool", + "http://www.semanlink.net/tag/equateur": "Equateur", + "http://www.semanlink.net/tag/c2gweb_and_product_description": "C2GWeb and Product description", + "http://www.semanlink.net/tag/prelevements_obligatoires": "Pr\u00e9l\u00e8vements obligatoires", + "http://www.semanlink.net/tag/chlorinated_chicken": "Chlorinated chicken", + "http://www.semanlink.net/tag/owled": "OWLED", + "http://www.semanlink.net/tag/loi_renseignement": "Loi Renseignement", + "http://www.semanlink.net/tag/fn": "FN", + "http://www.semanlink.net/tag/installing_apps": "Installing apps", + "http://www.semanlink.net/tag/kaggle": "Kaggle", + "http://www.semanlink.net/tag/philosophe": "Philosophe", + "http://www.semanlink.net/tag/w3c_note": "W3C Note", + "http://www.semanlink.net/tag/keras_functional_api": "Keras Functional API", + "http://www.semanlink.net/tag/theatre": "Th\u00e9atre", + "http://www.semanlink.net/tag/film_americain": "Film am\u00e9ricain", + "http://www.semanlink.net/tag/ernest": "Ernest Ilisca", + "http://www.semanlink.net/tag/science": "Science", + "http://www.semanlink.net/tag/haskell": "Haskell", + "http://www.semanlink.net/tag/lobbies_economiques": "Lobbies \u00e9conomiques", + "http://www.semanlink.net/tag/walmart": "Walmart", + "http://www.semanlink.net/tag/dereferencing_http_uris": "Dereferencing HTTP URIs", + "http://www.semanlink.net/tag/httprange_14": "httpRange-14", + "http://www.semanlink.net/tag/hierarchical_tags": "Hierarchical tags", + "http://www.semanlink.net/tag/liberte_de_pensee": "Libert\u00e9 de pens\u00e9e", + "http://www.semanlink.net/tag/exploration_spatiale": "Exploration spatiale", + "http://www.semanlink.net/tag/faiss": "faiss", + "http://www.semanlink.net/tag/approximate_nearest_neighbor": "Approximate nearest-neighbor", + "http://www.semanlink.net/tag/nlp_class": "Coursera: NLP class", + "http://www.semanlink.net/tag/james_stewart": "James Stewart", + "http://www.semanlink.net/tag/orne": "Orne", + "http://www.semanlink.net/tag/iran": "Iran", + "http://www.semanlink.net/tag/faceted_search": "Faceted Search", + "http://www.semanlink.net/tag/nlp_use_cases": "NLP: use cases", + "http://www.semanlink.net/tag/fastai_nbdev": "nbdev.fast.ai", + "http://www.semanlink.net/tag/automobile_2_0": "Automobile 2.0", + "http://www.semanlink.net/tag/mvc": "MVC", + "http://www.semanlink.net/tag/accountable_ai": "Accountable AI", + "http://www.semanlink.net/tag/xtech_2007": "XTech 2007", + "http://www.semanlink.net/tag/machines_teaching_machines": "Machines teaching machines", + "http://www.semanlink.net/tag/markup": "Markup", + "http://www.semanlink.net/tag/user_manuals": "User manuals", + "http://www.semanlink.net/tag/swse": "SWSE", + "http://www.semanlink.net/tag/jsonp": "JSONP", + "http://www.semanlink.net/tag/semantic_web_services_vs_soap": "Semantic Web Services vs SOAP", + "http://www.semanlink.net/tag/tolerance": "Tol\u00e9rance", + "http://www.semanlink.net/tag/gui": "GUI", + "http://www.semanlink.net/tag/mort": "Mort", + "http://www.semanlink.net/tag/presentation_tool": "Presentation tool", + "http://www.semanlink.net/tag/arundhati_roy": "Arundhati Roy", + "http://www.semanlink.net/tag/validation": "Validator", + "http://www.semanlink.net/tag/sicile": "Sicile", + "http://www.semanlink.net/tag/weak_supervision": "Weak supervision", + "http://www.semanlink.net/tag/fourmi": "Fourmi", + "http://www.semanlink.net/tag/rif": "RIF", + "http://www.semanlink.net/tag/notes_d_install": "Notes d'install", + "http://www.semanlink.net/tag/access_token": "Access Token", + "http://www.semanlink.net/tag/sarkozy_et_extreme_droite": "Sarkozy et extr\u00e8me droite", + "http://www.semanlink.net/tag/hackers": "Hackers", + "http://www.semanlink.net/tag/boko_haram": "Boko Haram", + "http://www.semanlink.net/tag/w3c_community_group": "W3C Community Group", + "http://www.semanlink.net/tag/semantic_seo": "Semantic SEO", + "http://www.semanlink.net/tag/data_visualization_tools": "Visualization Tools", + "http://www.semanlink.net/tag/charlton_heston": "Charlton Heston", + "http://www.semanlink.net/tag/obsidian": "Obsidian", + "http://www.semanlink.net/tag/syngenta": "Syngenta", + "http://www.semanlink.net/tag/pape_francois": "Pape Fran\u00e7ois", + "http://www.semanlink.net/tag/responsive_design": "Responsive Design", + "http://www.semanlink.net/tag/gradient_boosting": "Gradient boosting", + "http://www.semanlink.net/tag/jax_rs": "JAX-RS", + "http://www.semanlink.net/tag/celte": "Celte", + "http://www.semanlink.net/tag/nikolai_vavilov": "Nikolai Vavilov", + "http://www.semanlink.net/tag/lauryn_hill": "Lauryn Hill", + "http://www.semanlink.net/tag/presidentielles_2007": "Pr\u00e9sidentielles 2007", + "http://www.semanlink.net/tag/chip": "Chip", + "http://www.semanlink.net/tag/geologie": "G\u00e9ologie", + "http://www.semanlink.net/tag/debarquement": "D\u00e9barquement", + "http://www.semanlink.net/tag/nebra_sky_disc": "Nebra Sky Disc", + "http://www.semanlink.net/tag/anticolonialisme": "Anticolonialisme", + "http://www.semanlink.net/tag/implementing_a_jena_graph": "Implementing a Jena Graph", + "http://www.semanlink.net/tag/researchgate": "ResearchGate", + "http://www.semanlink.net/tag/pydev": "PyDev", + "http://www.semanlink.net/tag/ikuya_yamada": "Ikuya Yamada", + "http://www.semanlink.net/tag/n_gram": "N-grams", + "http://www.semanlink.net/tag/javascript_tool": "Javascript tool", + "http://www.semanlink.net/tag/cory_doctorow": "Cory Doctorow", + "http://www.semanlink.net/tag/triplestore": "TripleStore", + "http://www.semanlink.net/tag/apple": "Apple", + "http://www.semanlink.net/tag/ec_web": "EC-Web", + "http://www.semanlink.net/tag/medical_ir_ml_ia": "Medical IR, ML, IA", + "http://www.semanlink.net/tag/tulipe": "Tulipe", + "http://www.semanlink.net/tag/triplet_loss": "Triplet Loss", + "http://www.semanlink.net/tag/nils_reimers": "Nils Reimers", + "http://www.semanlink.net/tag/these_irit_renault_biblio": "Th\u00e8se IRIT-Renault: biblio", + "http://www.semanlink.net/tag/archeologie_percheronne": "Archeologie percheronne", + "http://www.semanlink.net/tag/aventure": "Aventure", + "http://www.semanlink.net/tag/image_classification": "Image classification", + "http://www.semanlink.net/tag/rotate": "RotatE", + "http://www.semanlink.net/tag/spam": "Spam", + "http://www.semanlink.net/tag/moyen_age": "Moyen-\u00e2ge", + "http://www.semanlink.net/tag/cringely": "Cringely", + "http://www.semanlink.net/tag/economic_sanctions": "Economic sanctions", + "http://www.semanlink.net/tag/time_series": "Time Series", + "http://www.semanlink.net/tag/femme": "Femme", + "http://www.semanlink.net/tag/sql_to_rdf_mapping": "SQL to RDF mapping", + "http://www.semanlink.net/tag/enswers": "Enswers", + "http://www.semanlink.net/tag/information_retrieval_techniques": "Information retrieval: techniques", + "http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne": "Nous vivons une \u00e9poque moderne", + "http://www.semanlink.net/tag/linked_learning": "Linked Learning", + "http://www.semanlink.net/tag/multilingual_embeddings": "Multilingual embeddings", + "http://www.semanlink.net/tag/liberte_de_la_presse": "Libert\u00e9 de la presse", + "http://www.semanlink.net/tag/telephone": "T\u00e9l\u00e9phone", + "http://www.semanlink.net/tag/vector_space_model": "Vector space model", + "http://www.semanlink.net/tag/qotd": "QOTD", + "http://www.semanlink.net/tag/firefox": "Firefox", + "http://www.semanlink.net/tag/rapport_villani_sur_l_ia": "Mission Villani sur l'IA", + "http://www.semanlink.net/tag/deutsch": "Deutsch", + "http://www.semanlink.net/tag/episodic_memory": "Episodic Memory", + "http://www.semanlink.net/tag/hierarchical_clustering": "Hierarchical Clustering", + "http://www.semanlink.net/tag/social_content_services": "Social Content Services", + "http://www.semanlink.net/tag/conversational_ai": "Conversational AI", + "http://www.semanlink.net/tag/oauth2": "OAuth2", + "http://www.semanlink.net/tag/peintre": "Peintre", + "http://www.semanlink.net/tag/puceron": "Puceron", + "http://www.semanlink.net/tag/jean_rouch": "Jean Rouch", + "http://www.semanlink.net/tag/map_territory_relation": "Map\u2013territory relation", + "http://www.semanlink.net/tag/property_graphs": "Property Graphs", + "http://www.semanlink.net/tag/predictions": "Predictions", + "http://www.semanlink.net/tag/gradient_descent": "Gradient descent", + "http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions": "France : dysfonctionnement des institutions", + "http://www.semanlink.net/tag/hypermedia": "Hypermedia", + "http://www.semanlink.net/tag/semantic_web_conferences": "Semantic Web conferences", + "http://www.semanlink.net/tag/monde_moderne": "Monde moderne", + "http://www.semanlink.net/tag/social_web": "Social Web", + "http://www.semanlink.net/tag/information_visualization": "Information visualization", + "http://www.semanlink.net/tag/semanlink_todo": "Semanlink todo", + "http://www.semanlink.net/tag/luis_von_ahn": "Luis von Ahn", + "http://www.semanlink.net/tag/sculpture": "Sculpture", + "http://www.semanlink.net/tag/web_pollution": "Web Pollution", + "http://www.semanlink.net/tag/lodr": "LODr", + "http://www.semanlink.net/tag/knowledge_based_ai": "Knowledge-based AI", + "http://www.semanlink.net/tag/these_irit_renault_biblio_initiale": "Th\u00e8se IRIT-Renault: biblio initiale", + "http://www.semanlink.net/tag/virtualbox": "VirtualBox", + "http://www.semanlink.net/tag/entreprise": "Entreprise", + "http://www.semanlink.net/tag/ecole_montessori": "Ecole Montessori", + "http://www.semanlink.net/tag/python_nlp": "Python-NLP", + "http://www.semanlink.net/tag/concise_bounded_description": "Concise Bounded Description", + "http://www.semanlink.net/tag/mixture_distribution": "Mixture distribution", + "http://www.semanlink.net/tag/candidate_sampling": "Candidate Sampling", + "http://www.semanlink.net/tag/trou_noir": "Trou noir", + "http://www.semanlink.net/tag/google_deepmind": "DeepMind", + "http://www.semanlink.net/tag/grece_mycenienne": "Gr\u00e8ce myc\u00e9nienne", + "http://www.semanlink.net/tag/nosql_and_eventual_consistency": "NoSQL and eventual consistency", + "http://www.semanlink.net/tag/musee_archeologique_de_bagdad": "Mus\u00e9e arch\u00e9ologique de Bagdad", + "http://www.semanlink.net/tag/chine_ecologie": "Chine : \u00e9cologie", + "http://www.semanlink.net/tag/cornell": "Cornell", + "http://www.semanlink.net/tag/manuscrits_de_tombouctou": "Manuscrits de Tombouctou", + "http://www.semanlink.net/tag/twitter": "Twitter", + "http://www.semanlink.net/tag/pedra_furada": "Pedra Furada", + "http://www.semanlink.net/tag/nearest_neighbor_search": "Nearest neighbor search", + "http://www.semanlink.net/tag/annotation_tools": "Annotation tools", + "http://www.semanlink.net/tag/turtle_in_html": "Turtle in HTML", + "http://www.semanlink.net/tag/marklogic": "Marklogic", + "http://www.semanlink.net/tag/learned_index_structures": "Learned Index Structures", + "http://www.semanlink.net/tag/chrome_extension": "Chrome extension", + "http://www.semanlink.net/tag/reformer": "Reformer", + "http://www.semanlink.net/tag/norilsk": "Norilsk", + "http://www.semanlink.net/tag/insecticide": "Insecticide", + "http://www.semanlink.net/tag/ministere_de_la_culture": "Minist\u00e8re de la culture", + "http://www.semanlink.net/tag/new_york_times": "New York Times", + "http://www.semanlink.net/tag/sota": "SOTA", + "http://www.semanlink.net/tag/fps": "fps", + "http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings": "Convolutional Knowledge Graph Embeddings", + "http://www.semanlink.net/tag/spark_java_web_framework": "Spark (Java web framework)", + "http://www.semanlink.net/tag/ai_black_box": "AI black box", + "http://www.semanlink.net/tag/owl_full": "OWL-Full", + "http://www.semanlink.net/tag/neelie_kroes": "Neelie Kroes", + "http://www.semanlink.net/tag/pollution_de_l_eau": "Pollution de l'eau", + "http://www.semanlink.net/tag/meta_reinforcement_learning": "Meta Reinforcement Learning", + "http://www.semanlink.net/tag/graph_based_text_representations": "Graph-based Text Representations", + "http://www.semanlink.net/tag/colombie": "Colombie", + "http://www.semanlink.net/tag/negociations_climat": "N\u00e9gociations climat ", + "http://www.semanlink.net/tag/extinction_de_masse": "Extinction de masse", + "http://www.semanlink.net/tag/rest_security": "REST Security", + "http://www.semanlink.net/tag/nlp_long_documents": "Long documents", + "http://www.semanlink.net/tag/macron_et_l_ecologie": "Macron et l'\u00e9cologie", + "http://www.semanlink.net/tag/ihm": "IHM", + "http://www.semanlink.net/tag/eau_extraterrestre": "Eau extraterrestre", + "http://www.semanlink.net/tag/lego": "Lego", + "http://www.semanlink.net/tag/semantic_gap": "Semantic gap", + "http://www.semanlink.net/tag/api_design": "API design", + "http://www.semanlink.net/tag/riches": "Riches", + "http://www.semanlink.net/tag/javascript_closures": "Javascript closures", + "http://www.semanlink.net/tag/dimensionality_reduction": "Dimensionality reduction", + "http://www.semanlink.net/tag/liberation": "Lib\u00e9ration", + "http://www.semanlink.net/tag/social_networkd_are_bad": "Social Networkd are bad", + "http://www.semanlink.net/tag/api_management": "API management", + "http://www.semanlink.net/tag/poincare": "Poincar\u00e9", + "http://www.semanlink.net/tag/noos": "Noos", + "http://www.semanlink.net/tag/islam": "Islam", + "http://www.semanlink.net/tag/arn": "RNA", + "http://www.semanlink.net/tag/sparql_tips": "SPARQL Tips", + "http://www.semanlink.net/tag/cuba": "Cuba", + "http://www.semanlink.net/tag/common_tag": "Common Tag", + "http://www.semanlink.net/tag/administration_francaise": "Administration fran\u00e7aise", + "http://www.semanlink.net/tag/automatic_tagging": "Automatic tagging", + "http://www.semanlink.net/tag/wiktionnaire": "Wiktionnaire", + "http://www.semanlink.net/tag/pretrained_models": "Pretrained models", + "http://www.semanlink.net/tag/scientific_information_extraction": "Scientific information extraction", + "http://www.semanlink.net/tag/statistical_data": "Statistical data", + "http://www.semanlink.net/tag/maladie": "Maladie", + "http://www.semanlink.net/tag/flash": "Flash", + "http://www.semanlink.net/tag/national_geographic": "National Geographic", + "http://www.semanlink.net/tag/python_install": "Python install", + "http://www.semanlink.net/tag/richesses_sous_marines": "Richesses sous-marines", + "http://www.semanlink.net/tag/quora_question_pairs": "Quora Question Pairs", + "http://www.semanlink.net/tag/photo": "Photo", + "http://www.semanlink.net/tag/obelisque": "Ob\u00e9lisque", + "http://www.semanlink.net/tag/citation": "Quote", + "http://www.semanlink.net/tag/emmanuel_ledinot": "Emmanuel Ledinot", + "http://www.semanlink.net/tag/spotlight_osx": "Spotlight (OSX)", + "http://www.semanlink.net/tag/rockart": "Rockart", + "http://www.semanlink.net/tag/mythologie": "Mythologie", + "http://www.semanlink.net/tag/arxiv_doc": "Arxiv Doc", + "http://www.semanlink.net/tag/photons_correles": "Photons corr\u00e9l\u00e9s", + "http://www.semanlink.net/tag/locality_sensitive_hashing": "Locality Sensitive Hashing", + "http://www.semanlink.net/tag/diffa": "Diffa", + "http://www.semanlink.net/tag/rdf123": "RDF123", + "http://www.semanlink.net/tag/genocide_rwandais": "G\u00e9nocide rwandais", + "http://www.semanlink.net/tag/menace": "Menace", + "http://www.semanlink.net/tag/or": "Or", + "http://www.semanlink.net/tag/lucene": "Lucene", + "http://www.semanlink.net/tag/3d": "3D", + "http://www.semanlink.net/tag/ghana": "Ghana", + "http://www.semanlink.net/tag/maroc": "Maroc", + "http://www.semanlink.net/tag/richard_stallman": "Richard Stallman", + "http://www.semanlink.net/tag/solar_storm": "Solar storm", + "http://www.semanlink.net/tag/ai_girls_and_guys": "AI girls and guys", + "http://www.semanlink.net/tag/craig_venter_institute": "Craig Venter Institute", + "http://www.semanlink.net/tag/google_uber_alles": "Google \u00fcber alles", + "http://www.semanlink.net/tag/social_software": "Social software", + "http://www.semanlink.net/tag/email_classification": "email classification", + "http://www.semanlink.net/tag/politique_et_environnement": "Politique et environnement", + "http://www.semanlink.net/tag/absurde": "Absurde", + "http://www.semanlink.net/tag/dietrich_schulten": "Dietrich Schulten", + "http://www.semanlink.net/tag/nlp_automotive": "NLP+Automotive", + "http://www.semanlink.net/tag/nicolas_hulot": "Nicolas Hulot", + "http://www.semanlink.net/tag/patrimoine": "Patrimoine", + "http://www.semanlink.net/tag/probabilistic_graphical_models": "Probabilistic Graphical Models", + "http://www.semanlink.net/tag/www_2013": "WWW 2013", + "http://www.semanlink.net/tag/biologie": "Biology", + "http://www.semanlink.net/tag/mit": "MIT", + "http://www.semanlink.net/tag/knowledge_graph_ml": "Knowledge Graph + ML", + "http://www.semanlink.net/tag/makolab": "Makolab", + "http://www.semanlink.net/tag/cross_origin_resource_sharing": "Cross-Origin Resource Sharing", + "http://www.semanlink.net/tag/dimitris": "Dimitris", + "http://www.semanlink.net/tag/public_hydra_w3_org": "public-hydra@w3.org", + "http://www.semanlink.net/tag/osiris_rex": "OSIRIS-REx", + "http://www.semanlink.net/tag/bureaucratie": "Bureaucratie", + "http://www.semanlink.net/tag/iphone_app": "iphone app", + "http://www.semanlink.net/tag/spritz": "Spritz", + "http://www.semanlink.net/tag/burkina_faso": "Burkina Faso", + "http://www.semanlink.net/tag/eccenca": "eccenca", + "http://www.semanlink.net/tag/les_petites_cases": "Les petites cases", + "http://www.semanlink.net/tag/digital_audio": "Digital Audio", + "http://www.semanlink.net/tag/moussa_poussi": "Moussa Poussi", + "http://www.semanlink.net/tag/imbalanced_data": "Imbalanced Data", + "http://www.semanlink.net/tag/londres": "Londres", + "http://www.semanlink.net/tag/disparition_des_abeilles": "Disparition des abeilles", + "http://www.semanlink.net/tag/drm": "DRM", + "http://www.semanlink.net/tag/carte": "Carte", + "http://www.semanlink.net/tag/lod_cloud": "LOD cloud", + "http://www.semanlink.net/tag/grant_ingersoll": "Grant Ingersoll", + "http://www.semanlink.net/tag/nlp_amazon": "NLP@Amazon", + "http://www.semanlink.net/tag/xbrl": "XBRL", + "http://www.semanlink.net/tag/octo": "OCTO", + "http://www.semanlink.net/tag/about_semanlink": "About Semanlink", + "http://www.semanlink.net/tag/vie_extraterrestre": "Vie extraterrestre", + "http://www.semanlink.net/tag/brad_pitt": "Brad Pitt", + "http://www.semanlink.net/tag/nick_clegg": "Nick Clegg", + "http://www.semanlink.net/tag/voile_mer": "Voile (bateau)", + "http://www.semanlink.net/tag/death_of_hyperlink": "Death of Hyperlink", + "http://www.semanlink.net/tag/court_metrage": "Court m\u00e9trage", + "http://www.semanlink.net/tag/sourceforge": "SourceForge", + "http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp": "Combining text and structured data (ML-NLP)", + "http://www.semanlink.net/tag/blogmarks": "blogmarks", + "http://www.semanlink.net/tag/indifference": "Indiff\u00e9rence", + "http://www.semanlink.net/tag/kg_tasks": "KG: tasks", + "http://www.semanlink.net/tag/rembrandt": "Rembrandt", + "http://www.semanlink.net/tag/facebook_cambridge_analytica": "Cambridge Analytica", + "http://www.semanlink.net/tag/coding": "Coding", + "http://www.semanlink.net/tag/francis_pisani": "Francis Pisani", + "http://www.semanlink.net/tag/visually_rich_documents": "Visually rich documents", + "http://www.semanlink.net/tag/concept_hierarchies": "Concept hierarchies", + "http://www.semanlink.net/tag/semantic_web_company": "Semantic web company", + "http://www.semanlink.net/tag/drones": "Drones", + "http://www.semanlink.net/tag/pierres_precieuses": "Pierres pr\u00e9cieuses", + "http://www.semanlink.net/tag/excel_and_sw": "Excel and SW", + "http://www.semanlink.net/tag/semantic_wiki": "Semantic Wiki", + "http://www.semanlink.net/tag/net": ".NET", + "http://www.semanlink.net/tag/dita": "DITA", + "http://www.semanlink.net/tag/semweb_pro_2012": "SemWeb Pro 2012", + "http://www.semanlink.net/tag/urbanisation": "Urbanisation", + "http://www.semanlink.net/tag/megalith": "Megalith", + "http://www.semanlink.net/tag/language_model_fine_tuning": "Language Model Fine-tuning", + "http://www.semanlink.net/tag/erich_maria_remarque": "Erich Maria Remarque", + "http://www.semanlink.net/tag/whisky": "Whisky", + "http://www.semanlink.net/tag/product_description": "Product description", + "http://www.semanlink.net/tag/training_data": "Training data", + "http://www.semanlink.net/tag/histoire_de_l_inde": "Histoire de l'Inde", + "http://www.semanlink.net/tag/canada": "Canada", + "http://www.semanlink.net/tag/honda": "Honda", + "http://www.semanlink.net/tag/backplanejs": "backplanejs", + "http://www.semanlink.net/tag/tagged": "Tagged", + "http://www.semanlink.net/tag/formal_knowledge_representation_language": "Formal knowledge representation language", + "http://www.semanlink.net/tag/semantic_data_wiki": "Semantic data wiki", + "http://www.semanlink.net/tag/web_serving": "Web Serving", + "http://www.semanlink.net/tag/machine_learning_semantic_web": "Machine Learning + Semantic Web", + "http://www.semanlink.net/tag/sequence_to_sequence_learning": "Sequence-to-sequence learning", + "http://www.semanlink.net/tag/linux_hosting": "Linux hosting", + "http://www.semanlink.net/tag/number_of_neurons": "Number of neurons", + "http://www.semanlink.net/tag/strange": "Strange", + "http://www.semanlink.net/tag/liberte": "Libert\u00e9", + "http://www.semanlink.net/tag/universites_americaines": "Universit\u00e9s am\u00e9ricaines", + "http://www.semanlink.net/tag/business_case_semantic_web": "Business case: semantic web", + "http://www.semanlink.net/tag/ranking": "Ranking", + "http://www.semanlink.net/tag/bob_dylan": "Bob Dylan", + "http://www.semanlink.net/tag/talis_rdf_json": "Talis RDF/JSON", + "http://www.semanlink.net/tag/lynn_margulis": "Lynn Margulis", + "http://www.semanlink.net/tag/co_training": "Co-training", + "http://www.semanlink.net/tag/leo_sauermann": "Leo Sauermann", + "http://www.semanlink.net/tag/learning_by_imitation": "Learning by imitation", + "http://www.semanlink.net/tag/ranking_svm": "Ranking SVM", + "http://www.semanlink.net/tag/open_source": "Open Source", + "http://www.semanlink.net/tag/javascript": "JavaScript", + "http://www.semanlink.net/tag/text_editor": "Text Editor", + "http://www.semanlink.net/tag/aspect_target_sentiment_classification": "Aspect-Target Sentiment Classification", + "http://www.semanlink.net/tag/job_openings": "Job openings", + "http://www.semanlink.net/tag/ws_vs_pox_http": "WS-* vs. POX/HTTP", + "http://www.semanlink.net/tag/femme_celebre": "Femme c\u00e9l\u00e8bre (o\u00f9 qui m\u00e9rite de l'\u00eatre)", + "http://www.semanlink.net/tag/tony_blair": "Tony Blair", + "http://www.semanlink.net/tag/coursera_deep_learning": "Coursera: Deep Learning", + "http://www.semanlink.net/tag/memory_networks": "Memory networks", + "http://www.semanlink.net/tag/cnes": "CNES", + "http://www.semanlink.net/tag/restful_semantic_web_services": "Restful semantic web services", + "http://www.semanlink.net/tag/javascript_frameork": "Javascript framework", + "http://www.semanlink.net/tag/altavista": "AltaVista", + "http://www.semanlink.net/tag/fps_dev": "fps dev", + "http://www.semanlink.net/tag/dictionnaire": "Dictionnaire", + "http://www.semanlink.net/tag/ml_google": "AI@Google", + "http://www.semanlink.net/tag/deforestation": "D\u00e9forestation", + "http://www.semanlink.net/tag/reparation_automobile": "R\u00e9paration automobile", + "http://www.semanlink.net/tag/cookie": "Cookies", + "http://www.semanlink.net/tag/martin_hepp": "Martin Hepp", + "http://www.semanlink.net/tag/the_limits_to_growth": "The Limits to Growth", + "http://www.semanlink.net/tag/topbraid": "TopBraid", + "http://www.semanlink.net/tag/one_laptop_per_child": "One Laptop Per Child", + "http://www.semanlink.net/tag/javascript_rdf": "Javascript RDF", + "http://www.semanlink.net/tag/erudition": "Erudition", + "http://www.semanlink.net/tag/guillaume_lample": "Guillaume Lample", + "http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model": "Not Encoding Factual Knowledge in Language Model", + "http://www.semanlink.net/tag/norman_walsh": "Norman Walsh", + "http://www.semanlink.net/tag/dataportability": "DataPortability", + "http://www.semanlink.net/tag/graphdb": "GraphDB", + "http://www.semanlink.net/tag/thought_alone_controlled_device": "Thought alone controlled device", + "http://www.semanlink.net/tag/bactria_margiana_archaeological_complex": "Bactria\u2013Margiana Archaeological Complex", + "http://www.semanlink.net/tag/james_hendler": "James Hendler", + "http://www.semanlink.net/tag/robert": "Robert", + "http://www.semanlink.net/tag/stochastic_parrots": "Stochastic Parrots", + "http://www.semanlink.net/tag/film_britannique": "Film britannique", + "http://www.semanlink.net/tag/sw_is_flawed": "SW is flawed", + "http://www.semanlink.net/tag/google_knowledge_graph": "Google Knowledge Graph", + "http://www.semanlink.net/tag/library_code": "Library (code)", + "http://www.semanlink.net/tag/liberte_egalite_fraternite": "Libert\u00e9, \u00e9galit\u00e9, fraternit\u00e9", + "http://www.semanlink.net/tag/mur_de_berlin": "Mur de Berlin", + "http://www.semanlink.net/tag/christopher_olah": "Chris Olah", + "http://www.semanlink.net/tag/robotisation": "Robotisation", + "http://www.semanlink.net/tag/lutte_anti_terroriste": "Lutte anti-terroriste", + "http://www.semanlink.net/tag/regex": "Regex", + "http://www.semanlink.net/tag/jean_claude_juncker": "Jean-Claude Juncker", + "http://www.semanlink.net/tag/java_microframeworks": "Java microframeworks", + "http://www.semanlink.net/tag/musees_africains": "Mus\u00e9es africains", + "http://www.semanlink.net/tag/chanson": "Chanson", + "http://www.semanlink.net/tag/tasmanian_devil": "Tasmanian devil", + "http://www.semanlink.net/tag/brexit": "Brexit", + "http://www.semanlink.net/tag/dark_side_of_tech": "Dark side of Tech", + "http://www.semanlink.net/tag/voaf": "VOAF", + "http://www.semanlink.net/tag/content_negotiation": "Content negotiation", + "http://www.semanlink.net/tag/java_7": "Java 7", + "http://www.semanlink.net/tag/neural_models_for_information_retrieval": "Neural Search", + "http://www.semanlink.net/tag/conceptual_modeling": "Conceptual modeling", + "http://www.semanlink.net/tag/mutualisme": "Mutualisme", + "http://www.semanlink.net/tag/denny_vrandecic": "Denny Vrande\u010di\u0107", + "http://www.semanlink.net/tag/apprendre_a_apprendre": "Apprendre \u00e0 apprendre", + "http://www.semanlink.net/tag/rechauffement_climatique": "Climate crisis", + "http://www.semanlink.net/tag/katie_portwin": "Katie Portwin", + "http://www.semanlink.net/tag/sent2vec": "Sent2Vec", + "http://www.semanlink.net/tag/unit_test": "Unit test", + "http://www.semanlink.net/tag/thewebconf_2020": "TheWebConf 2020", + "http://www.semanlink.net/tag/tomcat_in_eclipse": "Tomcat in Eclipse", + "http://www.semanlink.net/tag/astronomie": "Astronomie", + "http://www.semanlink.net/tag/hal": "HAL", + "http://www.semanlink.net/tag/accelerated_mobile_pages": "Accelerated Mobile Pages (AMP)", + "http://www.semanlink.net/tag/espagne": "Espagne", + "http://www.semanlink.net/tag/reboisement": "Reboisement", + "http://www.semanlink.net/tag/gravitation": "Gravitation", + "http://www.semanlink.net/tag/ours": "Ours", + "http://www.semanlink.net/tag/dosso": "Dosso", + "http://www.semanlink.net/tag/parlement_europeen": "Parlement europ\u00e9en", + "http://www.semanlink.net/tag/product_types_ontology": "Product Types Ontology", + "http://www.semanlink.net/tag/charbon": "Charbon", + "http://www.semanlink.net/tag/etat_du_monde": "Etat du monde", + "http://www.semanlink.net/tag/liberte_liberte_cherie": "Libert\u00e9, libert\u00e9 ch\u00e9rie", + "http://www.semanlink.net/tag/reto_bachmann_gmur": "Reto Bachmann-Gm\u00fcr", + "http://www.semanlink.net/tag/squeak": "Squeak", + "http://www.semanlink.net/tag/favoris": "Favoris", + "http://www.semanlink.net/tag/separation_of_man_and_ape": "Separation of man and ape", + "http://www.semanlink.net/tag/entity_discovery_and_linking": "Entity discovery and linking", + "http://www.semanlink.net/tag/a_history_of_the_world_since_1300": "Coursera: A History of the World since 1300", + "http://www.semanlink.net/tag/nelson_mandela": "Nelson Mandela", + "http://www.semanlink.net/tag/robotic_imitation": "Robotic imitation", + "http://www.semanlink.net/tag/nlp_stanford": "NLP@Stanford", + "http://www.semanlink.net/tag/jeopardy": "Jeopardy", + "http://www.semanlink.net/tag/missing_labels_ml": "Missing Labels (ML)", + "http://www.semanlink.net/tag/goodrelations_renault": "GoodRelations/Renault", + "http://www.semanlink.net/tag/clustering_of_text_documents": "Clustering of text documents", + "http://www.semanlink.net/tag/aldous_huxley": "Aldous Huxley", + "http://www.semanlink.net/tag/uncontacted_peoples": "Uncontacted peoples", + "http://www.semanlink.net/tag/go_game": "Go (Game)", + "http://www.semanlink.net/tag/explosion_cambrienne": "Explosion cambrienne", + "http://www.semanlink.net/tag/personal_assistant": "Personal assistant", + "http://www.semanlink.net/tag/economie_ecologique": "\u00c9conomie \u00e9cologique", + "http://www.semanlink.net/tag/ldow2008": "LDOW2008", + "http://www.semanlink.net/tag/nlp_and_search": "NLP and Search", + "http://www.semanlink.net/tag/automotive_and_web_technologies": "Automotive and web technologies", + "http://www.semanlink.net/tag/afrique_du_nord": "Afrique du Nord", + "http://www.semanlink.net/tag/uml": "UML", + "http://www.semanlink.net/tag/wordpress": "WordPress", + "http://www.semanlink.net/tag/greve_du_sexe": "Gr\u00e8ve du sexe", + "http://www.semanlink.net/tag/semantic_desktop": "Semantic Desktop", + "http://www.semanlink.net/tag/google_play": "Google Play", + "http://www.semanlink.net/tag/graph_parsing": "Graph Parsing", + "http://www.semanlink.net/tag/niger_agriculture": "Niger : agriculture", + "http://www.semanlink.net/tag/dbtune": "DBTune", + "http://www.semanlink.net/tag/sustainable_materials_lifecycle": "Sustainable materials lifecycle", + "http://www.semanlink.net/tag/keep_new": "Keep new", + "http://www.semanlink.net/tag/oiseau": "Oiseau", + "http://www.semanlink.net/tag/telescope": "T\u00e9lescope", + "http://www.semanlink.net/tag/human_ai_collaboration": "Human-AI collaboration", + "http://www.semanlink.net/tag/grippe_aviaire": "Grippe aviaire", + "http://www.semanlink.net/tag/java_concurrency": "Java concurrency", + "http://www.semanlink.net/tag/max_halford": "Max Halford", + "http://www.semanlink.net/tag/nissan": "Nissan", + "http://www.semanlink.net/tag/wikidata_rdf": "Wikidata/RDF", + "http://www.semanlink.net/tag/docker": "Docker", + "http://www.semanlink.net/tag/decouverte_d_especes_inconnues": "D\u00e9couverte d'esp\u00e8ces inconnues", + "http://www.semanlink.net/tag/jupyter": "Jupyter", + "http://www.semanlink.net/tag/junk_dna": "Junk DNA", + "http://www.semanlink.net/tag/pfia_2018": "PFIA 2018", + "http://www.semanlink.net/tag/renato_matos": "Renato Matos", + "http://www.semanlink.net/tag/histoire_des_jermas": "Histoire des Jermas", + "http://www.semanlink.net/tag/franceconnect": "FranceConnect", + "http://www.semanlink.net/tag/e_commerce": "e-commerce", + "http://www.semanlink.net/tag/tours": "Tours", + "http://www.semanlink.net/tag/lingo": "Lingo", + "http://www.semanlink.net/tag/sparql_and_jena": "SPARQL AND Jena", + "http://www.semanlink.net/tag/big_data": "Big Data", + "http://www.semanlink.net/tag/julian_assange": "Julian Assange", + "http://www.semanlink.net/tag/julie_grollier": "Julie Grollier", + "http://www.semanlink.net/tag/probing_ml": "Probing (ML)", + "http://www.semanlink.net/tag/mlm": "MLM", + "http://www.semanlink.net/tag/yuval_noah_harari": "Yuval Noah Harari", + "http://www.semanlink.net/tag/amerique_latine": "Am\u00e9rique latine", + "http://www.semanlink.net/tag/film_allemand": "Film allemand", + "http://www.semanlink.net/tag/glaciologie": "Glaciologie", + "http://www.semanlink.net/tag/risks": "Risks", + "http://www.semanlink.net/tag/mami_wata": "Mami Wata", + "http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet": "Fournisseurs d'acc\u00e8s \u00e0 internet", + "http://www.semanlink.net/tag/samuel_goto": "Samuel Goto", + "http://www.semanlink.net/tag/winch5": "Winch 5", + "http://www.semanlink.net/tag/deprecated": "Deprecated", + "http://www.semanlink.net/tag/conscience_artificielle": "Conscience artificielle", + "http://www.semanlink.net/tag/lune": "Lune", + "http://www.semanlink.net/tag/memory_embeddings": "Memory Embeddings", + "http://www.semanlink.net/tag/boulgakov": "Boulgakov", + "http://www.semanlink.net/tag/agriculture_africaine": "Agriculture africaine", + "http://www.semanlink.net/tag/france_inter": "France Inter", + "http://www.semanlink.net/tag/mobile_search": "Mobile search", + "http://www.semanlink.net/tag/encyclopedie_collaborative": "Encyclop\u00e9die collaborative", + "http://www.semanlink.net/tag/owl_rl": "OWL RL", + "http://www.semanlink.net/tag/quicktime": "QuickTime", + "http://www.semanlink.net/tag/corruption": "Corruption", + "http://www.semanlink.net/tag/bayesian_deep_learning": "Bayesian Deep Learning", + "http://www.semanlink.net/tag/nlp_text_representation": "NLP: Text Representation", + "http://www.semanlink.net/tag/eglise_catholique": "Eglise catholique", + "http://www.semanlink.net/tag/jardinage": "Jardinage", + "http://www.semanlink.net/tag/travailler_moins": "Travailler moins", + "http://www.semanlink.net/tag/fbi_v_apple": "FBI v. Apple", + "http://www.semanlink.net/tag/chimpanze": "Chimpanz\u00e9", + "http://www.semanlink.net/tag/artiste": "Artiste", + "http://www.semanlink.net/tag/text_corpora_and_lexical_resources": "Text Corpora and Lexical Resources", + "http://www.semanlink.net/tag/pollution": "Pollution", + "http://www.semanlink.net/tag/brain_computer_interface": "Brain-computer interface", + "http://www.semanlink.net/tag/converter": "Converter", + "http://www.semanlink.net/tag/comet_wild_2": "Comet Wild 2", + "http://www.semanlink.net/tag/model_driven_development": "Model Driven Development", + "http://www.semanlink.net/tag/nodalities": "Nodalities", + "http://www.semanlink.net/tag/knn_in_mlc": "KNN in MLC", + "http://www.semanlink.net/tag/question_raciale": "Question raciale", + "http://www.semanlink.net/tag/temis": "TEMIS", + "http://www.semanlink.net/tag/punk": "Punk", + "http://www.semanlink.net/tag/actrice": "Actrice", + "http://www.semanlink.net/tag/rdf_context": "RDF: context", + "http://www.semanlink.net/tag/nlp_microsoft": "NLP@Microsoft", + "http://www.semanlink.net/tag/gilberto_gil": "Gilberto Gil", + "http://www.semanlink.net/tag/bibliotheque": "Biblioth\u00e8que", + "http://www.semanlink.net/tag/soa": "SOA", + "http://www.semanlink.net/tag/pointwise_mutual_information": "Pointwise mutual information", + "http://www.semanlink.net/tag/bug": "bug", + "http://www.semanlink.net/tag/missions_spatiales": "Missions spatiales", + "http://www.semanlink.net/tag/nlp_based_ir": "NLP based IR", + "http://www.semanlink.net/tag/glyphosate": "Glyphosate", + "http://www.semanlink.net/tag/danse": "Danse", + "http://www.semanlink.net/tag/alan_kay": "Alan Kay", + "http://www.semanlink.net/tag/t_sne": "t-SNE", + "http://www.semanlink.net/tag/sesame": "Sesame", + "http://www.semanlink.net/tag/afrique_francophone": "Afrique francophone", + "http://www.semanlink.net/tag/amerindien": "Am\u00e9rindien", + "http://www.semanlink.net/tag/quora": "Quora", + "http://www.semanlink.net/tag/poete": "Po\u00e8te", + "http://www.semanlink.net/tag/sparse_matrix": "Sparse matrix", + "http://www.semanlink.net/tag/zinder": "Zinder", + "http://www.semanlink.net/tag/xml": "XML", + "http://www.semanlink.net/tag/privacy_and_internet": "Privacy and internet", + "http://www.semanlink.net/tag/personnage_historique": "Personnage historique", + "http://www.semanlink.net/tag/late_bronze_age_collapse": "Late Bronze Age collapse", + "http://www.semanlink.net/tag/bag_of_words": "Bag-of-words", + "http://www.semanlink.net/tag/sparql_endpoint": "SPARQL endpoint", + "http://www.semanlink.net/tag/fulani": "Fulani", + "http://www.semanlink.net/tag/eve_africaine": "Eve africaine", + "http://www.semanlink.net/tag/aws": "AWS", + "http://www.semanlink.net/tag/jarriel_perlman": "Jarriel Perlman", + "http://www.semanlink.net/tag/museum_d_histoire_naturelle": "Museum d'Histoire Naturelle", + "http://www.semanlink.net/tag/maltraitance_animale": "Maltraitance animale", + "http://www.semanlink.net/tag/jquery": "jQuery", + "http://www.semanlink.net/tag/pdf_extract": "pdf extract", + "http://www.semanlink.net/tag/antiquite_africaine": "Arch\u00e9ologie africaine", + "http://www.semanlink.net/tag/publishing_rdf_vocabularies": "Publishing RDF Vocabularies", + "http://www.semanlink.net/tag/fact_checking": "Fact-checking", + "http://www.semanlink.net/tag/responsabilite": "Responsabilit\u00e9", + "http://www.semanlink.net/tag/antiwork": "Antiwork", + "http://www.semanlink.net/tag/us_vs_europe": "US vs Europe", + "http://www.semanlink.net/tag/juif": "Juifs", + "http://www.semanlink.net/tag/clandestins": "Clandestins", + "http://www.semanlink.net/tag/quizz": "Quizz", + "http://www.semanlink.net/tag/areva": "Areva", + "http://www.semanlink.net/tag/histoire": "Histoire", + "http://www.semanlink.net/tag/linked_data_gui": "Linked Data GUI", + "http://www.semanlink.net/tag/commission_europeenne": "Commission europ\u00e9enne", + "http://www.semanlink.net/tag/jobbotization": "AI, robots and jobs", + "http://www.semanlink.net/tag/unsupervised_machine_learning": "Unsupervised machine learning", + "http://www.semanlink.net/tag/recurrent_neural_network": "Recurrent neural network", + "http://www.semanlink.net/tag/sproutcore": "SproutCore", + "http://www.semanlink.net/tag/internet_of_things": "Internet of Things", + "http://www.semanlink.net/tag/excel": "Excel", + "http://www.semanlink.net/tag/microservices": "Microservices", + "http://www.semanlink.net/tag/boston_dynamics": "Boston Dynamics", + "http://www.semanlink.net/tag/log4j": "log4j", + "http://www.semanlink.net/tag/closure": "Closure", + "http://www.semanlink.net/tag/tweet": "Tweet", + "http://www.semanlink.net/tag/dette_covid": "Dette Covid", + "http://www.semanlink.net/tag/lagos": "Lagos", + "http://www.semanlink.net/tag/sacha_guitry": "Sacha Guitry", + "http://www.semanlink.net/tag/medecine": "M\u00e9decine", + "http://www.semanlink.net/tag/rebellion_touaregue": "R\u00e9bellion touar\u00e8gue", + "http://www.semanlink.net/tag/voip": "VoIP", + "http://www.semanlink.net/tag/neural_network_interpretability": "Neural network interpretability", + "http://www.semanlink.net/tag/soja": "Soja", + "http://www.semanlink.net/tag/thomas_more": "Thomas More", + "http://www.semanlink.net/tag/geometrie": "G\u00e9om\u00e9trie", + "http://www.semanlink.net/tag/flask": "Flask", + "http://www.semanlink.net/tag/responsabilite_de_la_france": "Responsabilit\u00e9 de la France", + "http://www.semanlink.net/tag/apprendre_une_langue": "Apprendre une langue", + "http://www.semanlink.net/tag/information_sur_internet": "Information sur internet", + "http://www.semanlink.net/tag/chine_vs_occident": "Chine vs Occident", + "http://www.semanlink.net/tag/daphne_koller": "Daphne Koller", + "http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con": "Le gouvernement Chirac est trop con", + "http://www.semanlink.net/tag/heredite": "H\u00e9r\u00e9dit\u00e9", + "http://www.semanlink.net/tag/catastrophic_forgetting": "Catastrophic forgetting", + "http://www.semanlink.net/tag/ruben_verborgh": "Ruben Verborgh", + "http://www.semanlink.net/tag/c2gweb_on_the_web": "C2GWeb on the web", + "http://www.semanlink.net/tag/guerres_de_religion": "Guerres de religion", + "http://www.semanlink.net/tag/wikidata_browser": "Wikidata browser", + "http://www.semanlink.net/tag/neonicotinoides": "N\u00e9onicotino\u00efdes", + "http://www.semanlink.net/tag/epimorphics_json_rdf": "Epimorphics json-rdf", + "http://www.semanlink.net/tag/apache_stanbol": "Stanbol", + "http://www.semanlink.net/tag/langue_electronique": "Langue \u00e9lectronique", + "http://www.semanlink.net/tag/owl_introduction": "OWL: Introduction", + "http://www.semanlink.net/tag/medaille_fields": "M\u00e9daille Fields", + "http://www.semanlink.net/tag/trust": "Trust", + "http://www.semanlink.net/tag/philae": "Philae", + "http://www.semanlink.net/tag/textrank": "TextRank", + "http://www.semanlink.net/tag/koskas": "Koskas", + "http://www.semanlink.net/tag/bibliotheque_numerique": "Biblioth\u00e8que num\u00e9rique", + "http://www.semanlink.net/tag/romeo_dallaire": "Rom\u00e9o Dallaire", + "http://www.semanlink.net/tag/gmail": "Gmail", + "http://www.semanlink.net/tag/aws_machine_learning": "AWS Machine Learning", + "http://www.semanlink.net/tag/ai_chip": "AI Chip", + "http://www.semanlink.net/tag/genetique_humaine": "G\u00e9n\u00e9tique humaine", + "http://www.semanlink.net/tag/sentence_embeddings": "Sentence Embeddings", + "http://www.semanlink.net/tag/digital_collections": "Digital Collections", + "http://www.semanlink.net/tag/images_stereoscopiques": "Images st\u00e9r\u00e9oscopiques", + "http://www.semanlink.net/tag/imovie": "iMovie", + "http://www.semanlink.net/tag/nkos": "NKOS", + "http://www.semanlink.net/tag/fake_news": "Fake news", + "http://www.semanlink.net/tag/agro_industrie": "Agro-industrie", + "http://www.semanlink.net/tag/parrot": "Parrot", + "http://www.semanlink.net/tag/using_word_embedding": "Using word embeddings", + "http://www.semanlink.net/tag/twine": "Twine", + "http://www.semanlink.net/tag/sommet_de_copenhague": "Sommet de Copenhague", + "http://www.semanlink.net/tag/banco": "Banco", + "http://www.semanlink.net/tag/booking_com": "booking.com", + "http://www.semanlink.net/tag/rwanda": "Rwanda", + "http://www.semanlink.net/tag/fun": "Fun", + "http://www.semanlink.net/tag/bayesian_classification": "Bayesian classification", + "http://www.semanlink.net/tag/himalaya": "Himalaya", + "http://www.semanlink.net/tag/uri_identity": "URI Identity", + "http://www.semanlink.net/tag/rdf_data_visualization": "RDF data visualization", + "http://www.semanlink.net/tag/president_des_usa": "Pr\u00e9sident des USA", + "http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest": "Afrique de l'ouest : histoire", + "http://www.semanlink.net/tag/insolite": "Insolite", + "http://www.semanlink.net/tag/personal_ontology": "Personal ontology", + "http://www.semanlink.net/tag/rdf_and_social_networks": "RDF and social networks", + "http://www.semanlink.net/tag/bing": "Bing", + "http://www.semanlink.net/tag/bert_and_sentence_embeddings": "BERT + Sentence Embeddings", + "http://www.semanlink.net/tag/prism_surveillance_program": "PRISM", + "http://www.semanlink.net/tag/ai_business_perspectives": "AI: business perspectives", + "http://www.semanlink.net/tag/rdf_working_group": "RDF Working Group", + "http://www.semanlink.net/tag/john_calvin": "Calvin", + "http://www.semanlink.net/tag/patent_finding": "Patent finding", + "http://www.semanlink.net/tag/ld_patch": "LD-PATCH", + "http://www.semanlink.net/tag/cluster_analysis": "Clustering", + "http://www.semanlink.net/tag/embeddings_in_ir": "Embeddings in Information Retrieval", + "http://www.semanlink.net/tag/nasca": "Nasca", + "http://www.semanlink.net/tag/combining_knowledge_graphs": "Combining knowledge graphs", + "http://www.semanlink.net/tag/restful_web_services": "RESTful Web Services", + "http://www.semanlink.net/tag/yves_roth": "Yves Roth", + "http://www.semanlink.net/tag/kd_mkb": "KD-MKB", + "http://www.semanlink.net/tag/rdf_data_source": "RDF Data source", + "http://www.semanlink.net/tag/apple_developer_connection": "Apple Developer Connection", + "http://www.semanlink.net/tag/seo": "SEO", + "http://www.semanlink.net/tag/lime": "LIME", + "http://www.semanlink.net/tag/mongodb": "MongoDB", + "http://www.semanlink.net/tag/internet": "Internet", + "http://www.semanlink.net/tag/amy_winehouse": "Amy Winehouse", + "http://www.semanlink.net/tag/manuscrits": "Manuscrits", + "http://www.semanlink.net/tag/ai_stanford": "AI@Stanford", + "http://www.semanlink.net/tag/subventions_agricoles": "Subventions agricoles", + "http://www.semanlink.net/tag/accident_climatique": "Accident climatique", + "http://www.semanlink.net/tag/garbage_collector": "Garbage Collector", + "http://www.semanlink.net/tag/synonym_uris": "Synonym URIs", + "http://www.semanlink.net/tag/general_motors": "General Motors", + "http://www.semanlink.net/tag/modeling_car_diversity": "Modeling car diversity", + "http://www.semanlink.net/tag/college": "Coll\u00e8ge", + "http://www.semanlink.net/tag/constraint_satisfaction_problem": "Constraint Satisfaction Problem", + "http://www.semanlink.net/tag/technologie": "Technologie", + "http://www.semanlink.net/tag/bonne_nouvelle": "Bonne nouvelle", + "http://www.semanlink.net/tag/critique_du_capitalisme": "Critique du capitalisme", + "http://www.semanlink.net/tag/tv_advertising": "TV advertising", + "http://www.semanlink.net/tag/low_resource_languages": "Low-Resource Languages", + "http://www.semanlink.net/tag/nlp_current_state": "NLP: current state", + "http://www.semanlink.net/tag/mission_voulet_chanoine": "Mission \"Voulet-Chanoine\"", + "http://www.semanlink.net/tag/hugging_face": "Hugging Face", + "http://www.semanlink.net/tag/e_learning": "Online Learning", + "http://www.semanlink.net/tag/corse": "Corse", + "http://www.semanlink.net/tag/blog_software": "Blog software", + "http://www.semanlink.net/tag/juliana_rotich": "Juliana Rotich", + "http://www.semanlink.net/tag/introduction": "Introduction", + "http://www.semanlink.net/tag/match_de_legende": "Match de l\u00e9gende", + "http://www.semanlink.net/tag/street_art": "Street art", + "http://www.semanlink.net/tag/pierre_fresnay": "Pierre Fresnay", + "http://www.semanlink.net/tag/linked_learning_2012": "Linked Learning 2012", + "http://www.semanlink.net/tag/eswc_2007": "ESWC 2007", + "http://www.semanlink.net/tag/rss": "RSS", + "http://www.semanlink.net/tag/fao": "FAO", + "http://www.semanlink.net/tag/enfants_soldats": "Enfants soldats", + "http://www.semanlink.net/tag/technique_de_l_insecte_sterile": "Technique de l'insecte st\u00e9rile", + "http://www.semanlink.net/tag/fibo": "FIBO", + "http://www.semanlink.net/tag/plantation_d_arbres": "Plantation d'arbres", + "http://www.semanlink.net/tag/chris_welty": "Chris Welty", + "http://www.semanlink.net/tag/charlottesville": "Charlottesville", + "http://www.semanlink.net/tag/firefox_extension": "Firefox extension", + "http://www.semanlink.net/tag/semantic_web_introduction": "Semantic Web : introduction", + "http://www.semanlink.net/tag/con_de_chirac": "Con de Chirac", + "http://www.semanlink.net/tag/redland": "Redland", + "http://www.semanlink.net/tag/fire": "Fire", + "http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp": "CNN 4 NLP", + "http://www.semanlink.net/tag/text_classification_using_label_names_only": "Text Classification Using Label Names Only", + "http://www.semanlink.net/tag/amazon": "Amazon", + "http://www.semanlink.net/tag/punition_des_mechants": "Punition des m\u00e9chants", + "http://www.semanlink.net/tag/what_is_life": "What is life ?", + "http://www.semanlink.net/tag/multiple_knowledge_bases": "Multiple Knowledge Bases", + "http://www.semanlink.net/tag/sap": "SAP", + "http://www.semanlink.net/tag/paleoclimatologie": "Paleoclimatologie", + "http://www.semanlink.net/tag/live_clipboard": "Live Clipboard", + "http://www.semanlink.net/tag/michel_serres": "Michel Serres", + "http://www.semanlink.net/tag/demographie": "D\u00e9mographie", + "http://www.semanlink.net/tag/chinois": "Chinois", + "http://www.semanlink.net/tag/prison": "Prison", + "http://www.semanlink.net/tag/solr_not_english_only": "Solr (not english only)", + "http://www.semanlink.net/tag/jeni_tennison": "Jeni Tennison", + "http://www.semanlink.net/tag/animal_rights": "Animal rights", + "http://www.semanlink.net/tag/glue": "Glue", + "http://www.semanlink.net/tag/genetique": "Genetics", + "http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles": "Energies fossiles \"non conventionnelles\"", + "http://www.semanlink.net/tag/semantic_web_assisted_learning": "Semantic Web Assisted Learning", + "http://www.semanlink.net/tag/online_tool": "Online tool", + "http://www.semanlink.net/tag/gbif": "GBIF", + "http://www.semanlink.net/tag/eric_baetens": "Eric Baetens", + "http://www.semanlink.net/tag/socrate": "Socrate", + "http://www.semanlink.net/tag/fake_blogs": "Fake Blogs", + "http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien": "Extinction de masse de la fin du permien", + "http://www.semanlink.net/tag/sdmx_rdf": "SDMX-RDF", + "http://www.semanlink.net/tag/owl_1_1": "OWL 1.1", + "http://www.semanlink.net/tag/google_refine": "Google Refine", + "http://www.semanlink.net/tag/urbanisme": "Urbanisme", + "http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning": "Unsupervised Sentence Embedding Learning", + "http://www.semanlink.net/tag/boris_johnson": "Boris Johnson", + "http://www.semanlink.net/tag/axel_polleres": "Axel Polleres", + "http://www.semanlink.net/tag/rats": "Rats", + "http://www.semanlink.net/tag/ec_web_14": "EC-Web'14", + "http://www.semanlink.net/tag/exil": "Exil", + "http://www.semanlink.net/tag/rare_events": "Rare events", + "http://www.semanlink.net/tag/wikipedia2vec": "Wikipedia2Vec", + "http://www.semanlink.net/tag/job_matching": "Job matching", + "http://www.semanlink.net/tag/universal_decimal_classification": "Universal Decimal Classification", + "http://www.semanlink.net/tag/self_attention": "Self-Attention", + "http://www.semanlink.net/tag/eswc_2019": "ESWC 2019", + "http://www.semanlink.net/tag/transposon": "Transposon", + "http://www.semanlink.net/tag/jcs_java_caching_system": "JCS - Java Caching System", + "http://www.semanlink.net/tag/epigenetics": "Epigenetics", + "http://www.semanlink.net/tag/semantic_mashups": "Semantic mashups", + "http://www.semanlink.net/tag/sahara": "Sahara", + "http://www.semanlink.net/tag/computer_game": "Computer game", + "http://www.semanlink.net/tag/neuroscience_and_ai": "Neuroscience AND AI", + "http://www.semanlink.net/tag/pont_couvert": "Pont couvert", + "http://www.semanlink.net/tag/sparql_clipboard": "SPARQL Clipboard", + "http://www.semanlink.net/tag/triple_pattern_fragment": "Triple Pattern Fragment", + "http://www.semanlink.net/tag/france_delabrement": "France : d\u00e9labrement", + "http://www.semanlink.net/tag/livesearch": "Livesearch", + "http://www.semanlink.net/tag/psychanalyse": "Psychanalyse", + "http://www.semanlink.net/tag/relations_europe_usa": "Relations Europe-USA", + "http://www.semanlink.net/tag/right_to_be_forgotten": "Right to Be Forgotten", + "http://www.semanlink.net/tag/sylvain_gugger": "Sylvain Gugger", + "http://www.semanlink.net/tag/jean_jacques_annaud": "Jean-Jacques Annaud", + "http://www.semanlink.net/tag/semences_paysanes": "Semences paysanes", + "http://www.semanlink.net/tag/ammonite": "Ammonite", + "http://www.semanlink.net/tag/hollywood": "Hollywood", + "http://www.semanlink.net/tag/semantic_web_presentation": "Semantic web : pr\u00e9sentation", + "http://www.semanlink.net/tag/vito": "Vito", + "http://www.semanlink.net/tag/selfie": "Selfie", + "http://www.semanlink.net/tag/nlp_ens": "NLP@ENS", + "http://www.semanlink.net/tag/open_world_assumption": "Open World Assumption", + "http://www.semanlink.net/tag/fleuve_niger": "Fleuve Niger", + "http://www.semanlink.net/tag/unix_howto": "UNIX Tips", + "http://www.semanlink.net/tag/mallet": "Mallet", + "http://www.semanlink.net/tag/apache_marmotta": "Apache Marmotta", + "http://www.semanlink.net/tag/girafe": "Girafe", + "http://www.semanlink.net/tag/relational_databases_and_the_semantic_web": "Relational Databases and the Semantic Web", + "http://www.semanlink.net/tag/noise_contrastive_estimation": "Noise contrastive estimation", + "http://www.semanlink.net/tag/open_education": "Open Education", + "http://www.semanlink.net/tag/rdf_graph_versioning": "RDF graph versioning", + "http://www.semanlink.net/tag/domain_specific_bert": "Domain-Specific BERT", + "http://www.semanlink.net/tag/catastrophe_humanitaire": "Catastrophe humanitaire", + "http://www.semanlink.net/tag/amerique_profonde": "Am\u00e9rique profonde", + "http://www.semanlink.net/tag/catastrophe": "Catastrophe", + "http://www.semanlink.net/tag/machine_learning_basics": "Machine Learning Basics", + "http://www.semanlink.net/tag/france_bureaucratie": "France : bureaucratie", + "http://www.semanlink.net/tag/www_2015": "WWW 2015", + "http://www.semanlink.net/tag/dengue": "Dengue", + "http://www.semanlink.net/tag/ethereum": "Ethereum", + "http://www.semanlink.net/tag/entity_attribute_value_model": "Entity-attribute-value model", + "http://www.semanlink.net/tag/flair": "Flair", + "http://www.semanlink.net/tag/mai_68": "Mai 68", + "http://www.semanlink.net/tag/louis_jouvet": "Louis Jouvet", + "http://www.semanlink.net/tag/security_and_rest": "Security and REST", + "http://www.semanlink.net/tag/songhai": "Songha\u00ef", + "http://www.semanlink.net/tag/no_more_drm": "No more DRM", + "http://www.semanlink.net/tag/codec": "Codec", + "http://www.semanlink.net/tag/justice": "Justice", + "http://www.semanlink.net/tag/memoire_humaine": "M\u00e9moire humaine", + "http://www.semanlink.net/tag/knowledge_graph": "Knowledge Graphs", + "http://www.semanlink.net/tag/terrorisme": "Terrorisme", + "http://www.semanlink.net/tag/lituanie": "Lituanie", + "http://www.semanlink.net/tag/kode": "KODE", + "http://www.semanlink.net/tag/herve_kempf": "Herv\u00e9 Kempf", + "http://www.semanlink.net/tag/mondialisation": "Mondialisation", + "http://www.semanlink.net/tag/rda": "RDA", + "http://www.semanlink.net/tag/facebook": "Facebook", + "http://www.semanlink.net/tag/cifre": "CIFRE", + "http://www.semanlink.net/tag/manaal_faruqui": "Manaal Faruqui", + "http://www.semanlink.net/tag/httpunit": "HttpUnit", + "http://www.semanlink.net/tag/presidentielles_2012": "Pr\u00e9sidentielles 2012", + "http://www.semanlink.net/tag/extreme_classification": "Extreme classification", + "http://www.semanlink.net/tag/kd_mkb_paper": "KD-MKB paper", + "http://www.semanlink.net/tag/cancel_culture": "Cancel culture", + "http://www.semanlink.net/tag/histoire_naturelle": "Histoire naturelle", + "http://www.semanlink.net/tag/folksonomies_ontologies": "folksonomies ontologies", + "http://www.semanlink.net/tag/google_app_engine": "Google App Engine", + "http://www.semanlink.net/tag/knowledge_driven_embeddings": "Knowledge-driven embeddings", + "http://www.semanlink.net/tag/hp": "HP", + "http://www.semanlink.net/tag/on_device_nlp": "On device NLP", + "http://www.semanlink.net/tag/fractales": "Fractales", + "http://www.semanlink.net/tag/robot_humanoide": "Robot humano\u00efde", + "http://www.semanlink.net/tag/planeteafrique": "PlaneteAfrique", + "http://www.semanlink.net/tag/jerma": "Jerma", + "http://www.semanlink.net/tag/pour_les_nuls": "Pour les nuls", + "http://www.semanlink.net/tag/web_site_design": "Web site design", + "http://www.semanlink.net/tag/sorbonne": "Sorbonne", + "http://www.semanlink.net/tag/sport_de_combat": "Sport de combat", + "http://www.semanlink.net/tag/anisotropy_in_lm_space": "Anisotropy in LM space", + "http://www.semanlink.net/tag/combinatorial_generalization": "Combinatorial generalization", + "http://www.semanlink.net/tag/library_of_alexandria": "Library of Alexandria", + "http://www.semanlink.net/tag/html_dev": "HTML Dev", + "http://www.semanlink.net/tag/mirek_sopek": "Mirek Sopek", + "http://www.semanlink.net/tag/democratie": "D\u00e9mocratie", + "http://www.semanlink.net/tag/aratta": "Aratta", + "http://www.semanlink.net/tag/semantic_web_and_oop": "Semantic Web and OOP", + "http://www.semanlink.net/tag/sportif": "Sportif", + "http://www.semanlink.net/tag/exploration_test": "Exploration test", + "http://www.semanlink.net/tag/neo_nazis": "Neo-Nazis", + "http://www.semanlink.net/tag/apple_sucks": "Apple sucks", + "http://www.semanlink.net/tag/pierre_yves_vandenbussche": " Pierre-Yves Vandenbussche", + "http://www.semanlink.net/tag/chico_buarque": "Chico Buarque", + "http://www.semanlink.net/tag/serbie": "Serbie", + "http://www.semanlink.net/tag/bolsonaro": "Bolsonaro", + "http://www.semanlink.net/tag/dispersion_des_graines": "Dispersion des graines", + "http://www.semanlink.net/tag/rachel_thomas": "Rachel Thomas", + "http://www.semanlink.net/tag/inria": "INRIA", + "http://www.semanlink.net/tag/archeologie_chinoise": "Arch\u00e9ologie chinoise", + "http://www.semanlink.net/tag/amsterdam": "Amsterdam", + "http://www.semanlink.net/tag/son_3d": "Son 3D", + "http://www.semanlink.net/tag/mooc": "MOOC", + "http://www.semanlink.net/tag/blink": "BLINK", + "http://www.semanlink.net/tag/aqmi": "AQMI", + "http://www.semanlink.net/tag/zouk": "Zouk", + "http://www.semanlink.net/tag/police": "Police", + "http://www.semanlink.net/tag/keyword_spotting": "Keyword Spotting", + "http://www.semanlink.net/tag/in_memory_computing": "In-memory computing", + "http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre": "L'humanit\u00e9 m\u00e9rite de dispara\u00eetre", + "http://www.semanlink.net/tag/eau": "Eau", + "http://www.semanlink.net/tag/decisions_en_entreprise": "D\u00e9cisions en entreprise", + "http://www.semanlink.net/tag/decentralized_social_network": "Decentralized social network", + "http://www.semanlink.net/tag/ai_ibm": "AI@IBM", + "http://www.semanlink.net/tag/brain_to_brain_interface": "Brain-to-Brain Interface", + "http://www.semanlink.net/tag/problemes_sanitaires": "Probl\u00e8mes sanitaires", + "http://www.semanlink.net/tag/epr": "EPR", + "http://www.semanlink.net/tag/sitemaps": "Sitemaps", + "http://www.semanlink.net/tag/south_by_southwest": "South by Southwest", + "http://www.semanlink.net/tag/semantic_cms": "Semantic CMS", + "http://www.semanlink.net/tag/norvege": "Norv\u00e8ge", + "http://www.semanlink.net/tag/photo_numerique": "Photo num\u00e9rique", + "http://www.semanlink.net/tag/olivier_grisel": "Olivier Grisel", + "http://www.semanlink.net/tag/maxent_for_nlp": "MaxEnt for NLP", + "http://www.semanlink.net/tag/singe": "Singe", + "http://www.semanlink.net/tag/factory_farming": "Factory farming", + "http://www.semanlink.net/tag/lombok": "Lombok", + "http://www.semanlink.net/tag/ville_sans_voiture": "Ville sans voiture", + "http://www.semanlink.net/tag/c2gweb_seo": "C2GWeb: SEO", + "http://www.semanlink.net/tag/good": "Good", + "http://www.semanlink.net/tag/acoustique": "Acoustique", + "http://www.semanlink.net/tag/income_inequality": "Income inequality", + "http://www.semanlink.net/tag/prix_nobel_d_economie": "Prix Nobel d'\u00e9conomie", + "http://www.semanlink.net/tag/pentagon": "Pentagon", + "http://www.semanlink.net/tag/internet_libre": "Internet libre", + "http://www.semanlink.net/tag/fonds_speculatifs": "Fonds sp\u00e9culatifs", + "http://www.semanlink.net/tag/ebola": "Ebola", + "http://www.semanlink.net/tag/sig_ma": "sig.ma", + "http://www.semanlink.net/tag/emnlp_2020": "EMNLP 2020", + "http://www.semanlink.net/tag/enseignement_superieur": "Enseignement sup\u00e9rieur", + "http://www.semanlink.net/tag/nn_tips": "NN tips", + "http://www.semanlink.net/tag/read_write_secure_data_web": "Read-Write Secure Data Web", + "http://www.semanlink.net/tag/evernote": "Evernote", + "http://www.semanlink.net/tag/graph_database_and_nlp": "Graph database and NLP", + "http://www.semanlink.net/tag/war_on_drugs": "war on drugs", + "http://www.semanlink.net/tag/flippant": "Flippant", + "http://www.semanlink.net/tag/https": "HTTPS", + "http://www.semanlink.net/tag/al_qaida": "Al-Qaida", + "http://www.semanlink.net/tag/ecosse": "Ecosse", + "http://www.semanlink.net/tag/erdf": "eRDF", + "http://www.semanlink.net/tag/ridley_scott": "Ridley Scott", + "http://www.semanlink.net/tag/google_seo": "Google: SEO", + "http://www.semanlink.net/tag/afrique_australe": "Afrique australe", + "http://www.semanlink.net/tag/semantic_statistics": "Semantic Statistics", + "http://www.semanlink.net/tag/grands_problemes_mathematiques": "Grands probl\u00e8mes math\u00e9matiques", + "http://www.semanlink.net/tag/australie": "Australie", + "http://www.semanlink.net/tag/bias": "Bias", + "http://www.semanlink.net/tag/text_similarity": "Text Similarity", + "http://www.semanlink.net/tag/hypios": "Hypios", + "http://www.semanlink.net/tag/lucilie_bouchere": "Lucilie bouch\u00e8re", + "http://www.semanlink.net/tag/epidemie": "\u00c9pid\u00e9mie", + "http://www.semanlink.net/tag/blog": "Blog", + "http://www.semanlink.net/tag/equivalence_mining": "Equivalence mining", + "http://www.semanlink.net/tag/cinema": "Cin\u00e9ma", + "http://www.semanlink.net/tag/kz": "KZ", + "http://www.semanlink.net/tag/toumai": "Touma\u00ef", + "http://www.semanlink.net/tag/todo_list": "Todo list", + "http://www.semanlink.net/tag/origine_de_la_vie": "Origine de la vie", + "http://www.semanlink.net/tag/shacl": "SHACL", + "http://www.semanlink.net/tag/one_learning_algorithm_hypothesis": "\u201cone learning algorithm\u201d hypothesis", + "http://www.semanlink.net/tag/hubble": "Hubble", + "http://www.semanlink.net/tag/feed_aggregator": "Feed aggregator", + "http://www.semanlink.net/tag/activity_streams": "Activity Streams", + "http://www.semanlink.net/tag/la_france_vue_de_l_etranger": "La France vue de l'\u00e9tranger", + "http://www.semanlink.net/tag/multilingual_language_models": "Multilingual Language Models", + "http://www.semanlink.net/tag/archeologie_du_niger": "Arch\u00e9ologie du Niger", + "http://www.semanlink.net/tag/service_descriptors": "Service Descriptors", + "http://www.semanlink.net/tag/sharing_economy": "Sharing economy", + "http://www.semanlink.net/tag/probabilistic_relevance_model": "Probabilistic relevance model", + "http://www.semanlink.net/tag/metric_learning": "Metric Learning", + "http://www.semanlink.net/tag/conferences": "Conf\u00e9rences", + "http://www.semanlink.net/tag/nothing_to_hide_argument": "Nothing to hide argument", + "http://www.semanlink.net/tag/windows_media_player": "Windows Media Player", + "http://www.semanlink.net/tag/abstract_meaning_representation": "Abstract Meaning Representation", + "http://www.semanlink.net/tag/j_y_etais": "J'y \u00e9tais", + "http://www.semanlink.net/tag/hannibal": "Hannibal", + "http://www.semanlink.net/tag/vie_vienna_iks_editables": "VIE Vienna IKS Editables", + "http://www.semanlink.net/tag/paris_nlp_meetup": "Paris NLP meetup", + "http://www.semanlink.net/tag/computer_vision": "Computer vision", + "http://www.semanlink.net/tag/kindle": "Kindle", + "http://www.semanlink.net/tag/rfid": "RFID", + "http://www.semanlink.net/tag/ipv6": "IPv6", + "http://www.semanlink.net/tag/acronyms_nlp": "Acronyms (NLP)", + "http://www.semanlink.net/tag/thermodynamique": "Thermodynamique", + "http://www.semanlink.net/tag/bernard_stiegler": "Bernard Stiegler", + "http://www.semanlink.net/tag/accueil_etranger": "Accueil \u00e9tranger", + "http://www.semanlink.net/tag/industrie_de_l_armement": "Industrie de l'armement", + "http://www.semanlink.net/tag/reputation_system": "Reputation system", + "http://www.semanlink.net/tag/lula": "Lula", + "http://www.semanlink.net/tag/synthetic_life": "Synthetic life", + "http://www.semanlink.net/tag/antiscience": "Antiscience", + "http://www.semanlink.net/tag/genomique": "G\u00e9nomique", + "http://www.semanlink.net/tag/d3js": "D3js", + "http://www.semanlink.net/tag/scraping": "Scraping", + "http://www.semanlink.net/tag/minoen": "Minoen", + "http://www.semanlink.net/tag/named_entity_recognition": "Named Entity Recognition", + "http://www.semanlink.net/tag/modeles_economiques": "Mod\u00e8les \u00e9conomiques", + "http://www.semanlink.net/tag/enigme": "Enigme", + "http://www.semanlink.net/tag/dynamic_topic_model": "Dynamic topic model", + "http://www.semanlink.net/tag/onu": "ONU", + "http://www.semanlink.net/tag/2020": "2020", + "http://www.semanlink.net/tag/startups": "Startups", + "http://www.semanlink.net/tag/ondes_gravitationnelles": "Ondes gravitationnelles", + "http://www.semanlink.net/tag/loi_sur_le_voile": "Loi sur le voile", + "http://www.semanlink.net/tag/benjamin_franklin": "Benjamin Franklin", + "http://www.semanlink.net/tag/foreign_policy_of_the_united_states": "USA: Foreign policy", + "http://www.semanlink.net/tag/text": "Text", + "http://www.semanlink.net/tag/perse": "Perse", + "http://www.semanlink.net/tag/maxwell_s_demon": "Maxwell's demon", + "http://www.semanlink.net/tag/ils_commencent_a_me_gonfler": "Ils commencent \u00e0 me gonfler", + "http://www.semanlink.net/tag/orwell": "Orwell", + "http://www.semanlink.net/tag/henri_verdier": "Henri Verdier", + "http://www.semanlink.net/tag/microformats": "Microformats", + "http://www.semanlink.net/tag/differentiable_reasoning_over_text": "Differentiable Reasoning over Text", + "http://www.semanlink.net/tag/handwriting": "Handwriting", + "http://www.semanlink.net/tag/uri_opacity": "URI opacity", + "http://www.semanlink.net/tag/raphaelsty": "Rapha\u00ebl Sourty", + "http://www.semanlink.net/tag/nanotechnologies": "Nanotechnologies", + "http://www.semanlink.net/tag/keywords": "Keywords", + "http://www.semanlink.net/tag/perrier": "Perrier", + "http://www.semanlink.net/tag/universal_income": "Universal income", + "http://www.semanlink.net/tag/ajax": "Ajax", + "http://www.semanlink.net/tag/clonage": "Clonage", + "http://www.semanlink.net/tag/petrole_et_corruption": "P\u00e9trole et corruption", + "http://www.semanlink.net/tag/euro_crisis": "Euro Crisis", + "http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents": "Hierarchical clustering of text documents", + "http://www.semanlink.net/tag/alzheimer": "Alzheimer", + "http://www.semanlink.net/tag/verite": "V\u00e9rit\u00e9", + "http://www.semanlink.net/tag/youtube_video": "YouTube video", + "http://www.semanlink.net/tag/faille_de_securite": "Faille de s\u00e9curit\u00e9", + "http://www.semanlink.net/tag/covid19_vaccin": "Covid19 : vaccin", + "http://www.semanlink.net/tag/stardust": "Stardust", + "http://www.semanlink.net/tag/sgnn": "SGNN", + "http://www.semanlink.net/tag/photon": "Photon", + "http://www.semanlink.net/tag/text_mining": "Text mining", + "http://www.semanlink.net/tag/ieml": "IEML", + "http://www.semanlink.net/tag/propriete_intellectuelle": "Propri\u00e9t\u00e9 intellectuelle", + "http://www.semanlink.net/tag/ginco_culture": "GINCO (Culture)", + "http://www.semanlink.net/tag/epimorphics": "Epimorphics", + "http://www.semanlink.net/tag/altruisme": "Altruisme", + "http://www.semanlink.net/tag/react_js": "React.js", + "http://www.semanlink.net/tag/python": "Python", + "http://www.semanlink.net/tag/musique": "Musique", + "http://www.semanlink.net/tag/bernhard_haslhofer": "Bernhard Haslhofer", + "http://www.semanlink.net/tag/authentication": "Authentication", + "http://www.semanlink.net/tag/payment": "Payment", + "http://www.semanlink.net/tag/knowledge_management": "Knowledge management", + "http://www.semanlink.net/tag/semantic_web_application": "Semantic Web : Application", + "http://www.semanlink.net/tag/crise_de_la_dette": "Crise de la dette", + "http://www.semanlink.net/tag/docker_volumes": "Docker-Volumes", + "http://www.semanlink.net/tag/franco_allemand": "Franco-Allemand", + "http://www.semanlink.net/tag/shanghai": "Shangha\u00ef", + "http://www.semanlink.net/tag/education": "Education", + "http://www.semanlink.net/tag/self_training": "Self-training", + "http://www.semanlink.net/tag/optique": "Optique", + "http://www.semanlink.net/tag/obscurantisme": "Obscurantisme", + "http://www.semanlink.net/tag/barcamp": "Barcamp", + "http://www.semanlink.net/tag/nlp_teams": "NLP Teams", + "http://www.semanlink.net/tag/sparql_tutorial": "SPARQL Tutorial", + "http://www.semanlink.net/tag/musicien": "Musicien", + "http://www.semanlink.net/tag/simile_exhibit": "SIMILE Exhibit", + "http://www.semanlink.net/tag/antitrust": "Antitrust", + "http://www.semanlink.net/tag/retrieval_based_nlp": "Retrieval-based NLP", + "http://www.semanlink.net/tag/internet_explorer": "Internet Explorer", + "http://www.semanlink.net/tag/topquadrant": "TopQuadrant", + "http://www.semanlink.net/tag/extinction_des_dinosaures": "Extinction des dinosaures", + "http://www.semanlink.net/tag/venus_divinite": "V\u00e9nus (divinit\u00e9)", + "http://www.semanlink.net/tag/webid": "WebID", + "http://www.semanlink.net/tag/future_of_the_web": "Future of the web", + "http://www.semanlink.net/tag/kurdes": "Kurdes", + "http://www.semanlink.net/tag/memory_in_deep_learning": "Memory in deep learning", + "http://www.semanlink.net/tag/agriculture": "Agriculture", + "http://www.semanlink.net/tag/reddit": "Reddit", + "http://www.semanlink.net/tag/bien_envoye": "Bien envoy\u00e9", + "http://www.semanlink.net/tag/luxembourg": "Luxembourg", + "http://www.semanlink.net/tag/baobab": "Baobab", + "http://www.semanlink.net/tag/soren_auer": "S\u00f6ren Auer", + "http://www.semanlink.net/tag/ai_book": "AI: books & journals", + "http://www.semanlink.net/tag/data_warehouse": "Data Warehouse", + "http://www.semanlink.net/tag/cosmic_microwave_background": "Cosmic microwave background", + "http://www.semanlink.net/tag/nlp_introduction": "NLP: introduction", + "http://www.semanlink.net/tag/xgboost": "xgboost", + "http://www.semanlink.net/tag/lac_de_lave": "Lac de lave", + "http://www.semanlink.net/tag/missoula_floods": "Missoula Floods", + "http://www.semanlink.net/tag/python_sample_code": "Python sample code", + "http://www.semanlink.net/tag/catalhoyuk": "\u00c7atalh\u00f6y\u00fck", + "http://www.semanlink.net/tag/description_logic": "Description Logic", + "http://www.semanlink.net/tag/goodrelations": "GoodRelations", + "http://www.semanlink.net/tag/i_like": "I like", + "http://www.semanlink.net/tag/ressources_halieutiques": "Ressources halieutiques", + "http://www.semanlink.net/tag/future_combat_systems": "Future Combat Systems", + "http://www.semanlink.net/tag/finlande": "Finlande", + "http://www.semanlink.net/tag/jermakoye": "Djermakoye", + "http://www.semanlink.net/tag/jupiter": "Jupiter", + "http://www.semanlink.net/tag/bas_salaires": "Bas salaires", + "http://www.semanlink.net/tag/gpl": "GPL", + "http://www.semanlink.net/tag/asteroide": "Ast\u00e9ro\u00efde", + "http://www.semanlink.net/tag/machine_translation": "Machine translation", + "http://www.semanlink.net/tag/semencier": "Semencier", + "http://www.semanlink.net/tag/republique_tcheque": "R\u00e9publique Tch\u00e8que", + "http://www.semanlink.net/tag/fukushima": "Fukushima", + "http://www.semanlink.net/tag/billionaires": "Billionaires", + "http://www.semanlink.net/tag/apple_carplay": "Apple CarPlay", + "http://www.semanlink.net/tag/andrej_karpathy": "Andrej Karpathy", + "http://www.semanlink.net/tag/compatibilite_javascript": "Compatibilit\u00e9 Javascript", + "http://www.semanlink.net/tag/triple_classification": "Triple Classification", + "http://www.semanlink.net/tag/film": "Film", + "http://www.semanlink.net/tag/enseignement_scientifique": "Enseignement scientifique", + "http://www.semanlink.net/tag/tomcat": "Tomcat", + "http://www.semanlink.net/tag/nombres_premiers": "Nombres premiers", + "http://www.semanlink.net/tag/eswc_2012": "ESWC 2012", + "http://www.semanlink.net/tag/enterprise_knowledge_graph": "Enterprise Knowledge Graph", + "http://www.semanlink.net/tag/internet_related_technologies": "Internet Related Technologies", + "http://www.semanlink.net/tag/openrefine": "OpenRefine", + "http://www.semanlink.net/tag/montagne": "Montagne", + "http://www.semanlink.net/tag/marlon_brando": "Marlon Brando", + "http://www.semanlink.net/tag/public_linked_json_w3_org": "public-linked-json@w3.org", + "http://www.semanlink.net/tag/quoc_le": "Quoc V. Le", + "http://www.semanlink.net/tag/iapetus": "Iapetus", + "http://www.semanlink.net/tag/pont": "Pont", + "http://www.semanlink.net/tag/searchmonkey": "SearchMonkey", + "http://www.semanlink.net/tag/webs_alternatifs": "webs alternatifs", + "http://www.semanlink.net/tag/probability_distribution": "Probability distribution", + "http://www.semanlink.net/tag/archive": "Archive", + "http://www.semanlink.net/tag/fadi_badra": "Fadi Badra", + "http://www.semanlink.net/tag/subword_embeddings": "Subword embeddings", + "http://www.semanlink.net/tag/pyramide": "Pyramide", + "http://www.semanlink.net/tag/function_closures": "Function closures", + "http://www.semanlink.net/tag/france": "France", + "http://www.semanlink.net/tag/photo_journalisme": "Photo journalisme", + "http://www.semanlink.net/tag/bill_joy": "Bill Joy", + "http://www.semanlink.net/tag/web_tools": "Web tools", + "http://www.semanlink.net/tag/taxi": "Taxi", + "http://www.semanlink.net/tag/critical_evaluation": "Critical evaluation", + "http://www.semanlink.net/tag/ontology_mapping": "Ontology Mapping", + "http://www.semanlink.net/tag/ng": "Andrew Ng", + "http://www.semanlink.net/tag/svg": "SVG", + "http://www.semanlink.net/tag/new_africa": "New Africa", + "http://www.semanlink.net/tag/cache_buster": "Cache buster", + "http://www.semanlink.net/tag/entity_type_representation": "Entity type representation", + "http://www.semanlink.net/tag/google_rich_snippets": "Google Rich Snippets", + "http://www.semanlink.net/tag/etl": "ETL", + "http://www.semanlink.net/tag/owl_tool": "OWL tool", + "http://www.semanlink.net/tag/carnet_de_voyage": "Carnet de voyage", + "http://www.semanlink.net/tag/representation_learning": "Representation learning", + "http://www.semanlink.net/tag/orange_data_mining": "Orange (data mining)", + "http://www.semanlink.net/tag/unsupervised_text_classification": "Unsupervised Text Classification", + "http://www.semanlink.net/tag/allegrograph": "AllegroGraph", + "http://www.semanlink.net/tag/xslt": "XSLT", + "http://www.semanlink.net/tag/janis_joplin": "Janis Joplin", + "http://www.semanlink.net/tag/music_store": "Music store", + "http://www.semanlink.net/tag/chine_leadership": "Chine: leadership", + "http://www.semanlink.net/tag/nlp_techniques": "NLP techniques", + "http://www.semanlink.net/tag/bosnie": "Bosnie", + "http://www.semanlink.net/tag/vie_sur_mars": "Vie sur Mars", + "http://www.semanlink.net/tag/simile": "SIMILE", + "http://www.semanlink.net/tag/beijing_genomics_institute": "Beijing Genomics Institute", + "http://www.semanlink.net/tag/ca_craint": "Ca craint", + "http://www.semanlink.net/tag/semantic_web_search_engine": "Semantic Web search engine", + "http://www.semanlink.net/tag/documentaire_tv": "Documentaire TV", + "http://www.semanlink.net/tag/laurent_lafforgue": "Laurent Lafforgue", + "http://www.semanlink.net/tag/ia_limites": "AI: limits", + "http://www.semanlink.net/tag/dev": "Dev", + "http://www.semanlink.net/tag/rake": "RAKE", + "http://www.semanlink.net/tag/maven": "Maven", + "http://www.semanlink.net/tag/arima": "ARIMA", + "http://www.semanlink.net/tag/securite_sociale": "S\u00e9curit\u00e9 sociale", + "http://www.semanlink.net/tag/extreme_droite": "Extr\u00e8me droite", + "http://www.semanlink.net/tag/chelsea_manning": "Chelsea Manning", + "http://www.semanlink.net/tag/tf1": "TF1", + "http://www.semanlink.net/tag/yahoo": "Yahoo!", + "http://www.semanlink.net/tag/ina": "INA", + "http://www.semanlink.net/tag/sif_embeddings": "SIF embeddings", + "http://www.semanlink.net/tag/graphviz": "Graphviz", + "http://www.semanlink.net/tag/developer_documentation": "Developer documentation", + "http://www.semanlink.net/tag/elevage_porcin": "Elevage porcin", + "http://www.semanlink.net/tag/irlande": "Irlande", + "http://www.semanlink.net/tag/lumiere": "Lumi\u00e8re", + "http://www.semanlink.net/tag/jamendo": "Jamendo", + "http://www.semanlink.net/tag/paleoanthropology_genetics": "Paleoanthropology + Genetics", + "http://www.semanlink.net/tag/francois_yvon": "Fran\u00e7ois Yvon", + "http://www.semanlink.net/tag/gaz_de_schiste": "Gaz de schiste", + "http://www.semanlink.net/tag/diplomatie_americaine": "Diplomatie am\u00e9ricaine", + "http://www.semanlink.net/tag/blogger": "Blogger", + "http://www.semanlink.net/tag/oliviers": "Oliviers", + "http://www.semanlink.net/tag/syndicalisme": "Syndicalisme", + "http://www.semanlink.net/tag/decision_tree_learning": "Decision tree learning", + "http://www.semanlink.net/tag/ian_goodfellow": "Ian Goodfellow", + "http://www.semanlink.net/tag/data_model": "Data model", + "http://www.semanlink.net/tag/firewall": "Firewall", + "http://www.semanlink.net/tag/eswc": "ESWC", + "http://www.semanlink.net/tag/maidsafe": "Maidsafe", + "http://www.semanlink.net/tag/documentation": "Documentation", + "http://www.semanlink.net/tag/extinction_d_especes": "Extinction d'esp\u00e8ces", + "http://www.semanlink.net/tag/college_de_france": "Coll\u00e8ge de France", + "http://www.semanlink.net/tag/archeologie_amazonienne": "Arch\u00e9ologie amazonienne", + "http://www.semanlink.net/tag/plutonium": "Plutonium", + "http://www.semanlink.net/tag/text_aware_kg_embedding": "Text-Aware KG embedding", + "http://www.semanlink.net/tag/configuration": "Configuration", + "http://www.semanlink.net/tag/nubie": "Nubie", + "http://www.semanlink.net/tag/templatic_documents": "Templatic documents", + "http://www.semanlink.net/tag/fibonacci": "Fibonacci", + "http://www.semanlink.net/tag/cross_modal_retrieval": "Cross-Modal Retrieval", + "http://www.semanlink.net/tag/semantically_searchable_distributed_repository": "Semantically searchable distributed repository", + "http://www.semanlink.net/tag/high_frequency_trading": "High-frequency trading", + "http://www.semanlink.net/tag/hierarchical_multi_label_text_classification": "Hierarchical multi-label text classification ", + "http://www.semanlink.net/tag/journal": "Presse", + "http://www.semanlink.net/tag/sw_guys": "SW guys (and girls)", + "http://www.semanlink.net/tag/bitmap_index": "Bitmap index", + "http://www.semanlink.net/tag/oauth": "OAuth", + "http://www.semanlink.net/tag/arctique": "Arctique", + "http://www.semanlink.net/tag/backtranslation": "Backtranslation", + "http://www.semanlink.net/tag/apple_java": "Apple Java", + "http://www.semanlink.net/tag/neural_symbolic_computing": "Neural-Symbolic Computing", + "http://www.semanlink.net/tag/cliqz": "Cliqz", + "http://www.semanlink.net/tag/bi_lstm": "bi-LSTM", + "http://www.semanlink.net/tag/web_server": "Web server", + "http://www.semanlink.net/tag/rural_india": "Rural India", + "http://www.semanlink.net/tag/universite": "Universit\u00e9", + "http://www.semanlink.net/tag/protection_de_la_nature": "Protection de la nature", + "http://www.semanlink.net/tag/angularjs": "AngularJS", + "http://www.semanlink.net/tag/saas": "SaaS", + "http://www.semanlink.net/tag/oasis_specs": "OASIS: specs", + "http://www.semanlink.net/tag/face_recognition": "Face recognition", + "http://www.semanlink.net/tag/societe_de_consommation": "Soci\u00e9t\u00e9 de consommation", + "http://www.semanlink.net/tag/chili": "Chili", + "http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur": "J'ai un petit probl\u00e8me avec mon ordinateur", + "http://www.semanlink.net/tag/industrie_du_disque": "Industrie du disque", + "http://www.semanlink.net/tag/sebastian_germesin": "Sebastian Germesin", + "http://www.semanlink.net/tag/information": "Information", + "http://www.semanlink.net/tag/pellet": "Pellet", + "http://www.semanlink.net/tag/edward_snowden": "Edward Snowden", + "http://www.semanlink.net/tag/entity_mining": "Entity mining", + "http://www.semanlink.net/tag/grand_voyageur": "Grand voyageur", + "http://www.semanlink.net/tag/enterprise_system": "Enterprise System", + "http://www.semanlink.net/tag/histoire_de_l_afrique": "Histoire de l'Afrique", + "http://www.semanlink.net/tag/dallol": "Dallol", + "http://www.semanlink.net/tag/weapon_of_mass_distraction": "Weapon of mass distraction", + "http://www.semanlink.net/tag/synthetic_genome": "Synthetic Genome", + "http://www.semanlink.net/tag/website_creation": "Website: creation", + "http://www.semanlink.net/tag/hypersolutions": "hyperSOLutions", + "http://www.semanlink.net/tag/technical_documentation": "Technical documentation", + "http://www.semanlink.net/tag/prehistoire": "Pr\u00e9histoire", + "http://www.semanlink.net/tag/religion": "Religion", + "http://www.semanlink.net/tag/mailing_list": "Mailing list", + "http://www.semanlink.net/tag/path": "$PATH", + "http://www.semanlink.net/tag/linking_enterprise_data": "Linking Enterprise Data", + "http://www.semanlink.net/tag/bbc_programmes": "BBC - Programmes", + "http://www.semanlink.net/tag/canal": "Canal+", + "http://www.semanlink.net/tag/gartner": "Gartner", + "http://www.semanlink.net/tag/abrutis": "Abrutis", + "http://www.semanlink.net/tag/manipulations_politiques": "Manipulations politiques", + "http://www.semanlink.net/tag/solid": "Solid", + "http://www.semanlink.net/tag/patent_infringement": "Patent Infringement", + "http://www.semanlink.net/tag/marlene_dietrich": "Marlene Dietrich", + "http://www.semanlink.net/tag/institutions_internationales": "Institutions internationales", + "http://www.semanlink.net/tag/boosting": "Boosting", + "http://www.semanlink.net/tag/fiction": "Fiction", + "http://www.semanlink.net/tag/bp": "BP", + "http://www.semanlink.net/tag/alexandria_ocasio_cortez": "Alexandria Ocasio-Cortez", + "http://www.semanlink.net/tag/machine_learning_tool": "Machine Learning tool", + "http://www.semanlink.net/tag/origine_de_l_agriculture": "Origine de l'agriculture", + "http://www.semanlink.net/tag/general_semantics": "General semantics", + "http://www.semanlink.net/tag/tabous": "Tabous", + "http://www.semanlink.net/tag/bash": "bash", + "http://www.semanlink.net/tag/vanishing_gradient": "Vanishing gradient", + "http://www.semanlink.net/tag/hotel": "H\u00f4tel", + "http://www.semanlink.net/tag/targeted_ads": "Targeted ads", + "http://www.semanlink.net/tag/train": "Train", + "http://www.semanlink.net/tag/gnowsis": "gnowsis", + "http://www.semanlink.net/tag/moral_machines": "Moral machines", + "http://www.semanlink.net/tag/2eme_guerre_mondiale": "2eme guerre mondiale", + "http://www.semanlink.net/tag/la_ronde_de_nuit": "La Ronde de Nuit", + "http://www.semanlink.net/tag/neuromorphic_system": "Neuromorphic system", + "http://www.semanlink.net/tag/sw_has_failed": "SW has failed", + "http://www.semanlink.net/tag/algorithmes": "Algorithmes", + "http://www.semanlink.net/tag/private_wiki": "Private wiki", + "http://www.semanlink.net/tag/certificat_de_nationalite": "Certificat de nationalit\u00e9", + "http://www.semanlink.net/tag/web_3_0": "Web 3.0", + "http://www.semanlink.net/tag/target_sense_verification": "Target Sense Verification", + "http://www.semanlink.net/tag/solrcloud": "SolrCloud", + "http://www.semanlink.net/tag/einstein": "Einstein", + "http://www.semanlink.net/tag/federated_database_system": "Federated database system", + "http://www.semanlink.net/tag/c2gweb_rdf": "C2GWeb RDF", + "http://www.semanlink.net/tag/rijksmuseum": "Rijksmuseum", + "http://www.semanlink.net/tag/bug_brother": "Bug Brother", + "http://www.semanlink.net/tag/2d_nlp": "2D-NLP", + "http://www.semanlink.net/tag/bookmark_managers": "Bookmark Managers", + "http://www.semanlink.net/tag/qwant": "Qwant", + "http://www.semanlink.net/tag/droits_de_l_homme": "Droits de l'Homme", + "http://www.semanlink.net/tag/charlie_hebdo": "Charlie Hebdo", + "http://www.semanlink.net/tag/oum_kalsoum": "Oum Kalsoum", + "http://www.semanlink.net/tag/alexandre_le_grand": "Alexandre le Grand", + "http://www.semanlink.net/tag/copyright": "Copyright", + "http://www.semanlink.net/tag/deep_generative_modeling": "Deep Generative Modeling", + "http://www.semanlink.net/tag/macintosh": "Macintosh", + "http://www.semanlink.net/tag/intent_classification_and_slot_filling": "Intent classification and slot filling", + "http://www.semanlink.net/tag/sparql_perfs": "SPARQL perfs", + "http://www.semanlink.net/tag/newton": "Newton", + "http://www.semanlink.net/tag/python_4_data_science": "Python 4 Data science", + "http://www.semanlink.net/tag/mona_lisa": "Mona Lisa", + "http://www.semanlink.net/tag/arnaque": "Arnaque", + "http://www.semanlink.net/tag/recit_de_voyage": "R\u00e9cit de voyage", + "http://www.semanlink.net/tag/changement_climatique": "Changement climatique", + "http://www.semanlink.net/tag/phrase_embeddings": "Phrase embeddings", + "http://www.semanlink.net/tag/owl_ontology_browser": "OWL ontology browser", + "http://www.semanlink.net/tag/online_security": "Online Security", + "http://www.semanlink.net/tag/poincare_embeddings": "Poincar\u00e9 Embeddings", + "http://www.semanlink.net/tag/brain_initiative": "BRAIN Initiative", + "http://www.semanlink.net/tag/france_fiasco_administratif": "France : dysfonctionnement administratif", + "http://www.semanlink.net/tag/nigeria": "Nigeria", + "http://www.semanlink.net/tag/information_system": "Information System", + "http://www.semanlink.net/tag/web": "Web", + "http://www.semanlink.net/tag/mp3": "MP3", + "http://www.semanlink.net/tag/unknown_tag": "Unknown Tag", + "http://www.semanlink.net/tag/nuclear_war": "Nuclear war", + "http://www.semanlink.net/tag/tree_embeddings": "Tree embeddings", + "http://www.semanlink.net/tag/out_of_distribution_detection": "Outlier Detection", + "http://www.semanlink.net/tag/displaying_xml_with_css": "Displaying XML with css", + "http://www.semanlink.net/tag/sand": "Sand", + "http://www.semanlink.net/tag/polynomial": "Polynomial", + "http://www.semanlink.net/tag/banque_mondiale": "Banque mondiale", + "http://www.semanlink.net/tag/personal_data": "Personal data", + "http://www.semanlink.net/tag/adsense": "AdSense", + "http://www.semanlink.net/tag/aborigenes": "Aborig\u00e8nes", + "http://www.semanlink.net/tag/nature_journal": "Nature (journal)", + "http://www.semanlink.net/tag/meetup_web_semantique": "Meetup Web S\u00e9mantique", + "http://www.semanlink.net/tag/semantic_markup_in_html": "Semantic markup in HTML", + "http://www.semanlink.net/tag/dropout": "Dropout", + "http://www.semanlink.net/tag/manager_snippet": "Snippet Manager", + "http://www.semanlink.net/tag/apis_and_linked_data": "APIs and Linked Data", + "http://www.semanlink.net/tag/coursera_computational_neuroscience": "Coursera: Computational Neuroscience", + "http://www.semanlink.net/tag/ian_davis": "Ian Davis", + "http://www.semanlink.net/tag/genetique_et_evolution": "G\u00e9n\u00e9tique et \u00c9volution", + "http://www.semanlink.net/tag/europe_and_uk": "Europe and UK", + "http://www.semanlink.net/tag/business_case": "Business case", + "http://www.semanlink.net/tag/data_integration": "Data integration", + "http://www.semanlink.net/tag/path_queries": "Path queries", + "http://www.semanlink.net/tag/rigolo": "Rigolo", + "http://www.semanlink.net/tag/techlash": "Techlash", + "http://www.semanlink.net/tag/java_jni": "Java: JNI", + "http://www.semanlink.net/tag/antidot": "Antidot", + "http://www.semanlink.net/tag/windows_vista": "Windows Vista", + "http://www.semanlink.net/tag/wikilinks_corpus": "Wikilinks Corpus", + "http://www.semanlink.net/tag/schema_org_actions": "Schema.org Actions", + "http://www.semanlink.net/tag/proletarisation": "Prol\u00e9tarisation", + "http://www.semanlink.net/tag/turkmenistan": "Turkmenistan", + "http://www.semanlink.net/tag/zoroastre": "Zoroastre", + "http://www.semanlink.net/tag/kadhafi": "Kadhafi", + "http://www.semanlink.net/tag/integrating_tomcat_with_apache": "Integrating Tomcat with Apache", + "http://www.semanlink.net/tag/zinder_alimentation_en_eau": "Zinder : alimentation en eau", + "http://www.semanlink.net/tag/placements_ethiques": "Placements \u00e9thiques", + "http://www.semanlink.net/tag/herodote": "H\u00e9rodote", + "http://www.semanlink.net/tag/croisade_des_albigeois": "Croisade des Albigeois", + "http://www.semanlink.net/tag/offres_d_emploi": "Offres d'emploi", + "http://www.semanlink.net/tag/infotechnocratie": "Infotechnocratie", + "http://www.semanlink.net/tag/guide_d_achat": "Guide d'achat", + "http://www.semanlink.net/tag/jeu_d_echecs": "Jeu d'\u00e9checs", + "http://www.semanlink.net/tag/natural_language_supervision": "Natural Language Supervision", + "http://www.semanlink.net/tag/extractive_summarization": "Extractive Text Summarization", + "http://www.semanlink.net/tag/open_standards": "Open standards", + "http://www.semanlink.net/tag/dave_reynolds": "Dave Reynolds", + "http://www.semanlink.net/tag/deep_learning_optimization_methods": "Deep Learning: Optimization methods", + "http://www.semanlink.net/tag/teaching_kids_to_code": "Teaching Kids to Code", + "http://www.semanlink.net/tag/fps_and_www_2008": "fps and WWW 2008", + "http://www.semanlink.net/tag/classical_mechanics": "Classical mechanics", + "http://www.semanlink.net/tag/vive_le_capitalisme": "Vive le capitalisme !", + "http://www.semanlink.net/tag/cinema_bresilien": "Cin\u00e9ma br\u00e9silien", + "http://www.semanlink.net/tag/fait_divers": "Fait divers", + "http://www.semanlink.net/tag/skos_w3c_document": "SKOS W3C document", + "http://www.semanlink.net/tag/manipulations_genetiques": "Manipulations g\u00e9n\u00e9tiques", + "http://www.semanlink.net/tag/polynesians": "Polynesians", + "http://www.semanlink.net/tag/semtechbiz": "SemTechBiz", + "http://www.semanlink.net/tag/guillaume_genthial": "Guillaume Genthial", + "http://www.semanlink.net/tag/apprentissage": "Apprentissage", + "http://www.semanlink.net/tag/google_cloud": "Google Cloud", + "http://www.semanlink.net/tag/protection_de_l_environnement": "Protection de l'environnement", + "http://www.semanlink.net/tag/sarkozy": "Sarkozy", + "http://www.semanlink.net/tag/rdf_and_property_graphs": "RDF and Property Graphs", + "http://www.semanlink.net/tag/blojsom": "blojsom", + "http://www.semanlink.net/tag/covid19_impreparation": "Covid19 : impr\u00e9paration", + "http://www.semanlink.net/tag/entity_salience": "Entity salience", + "http://www.semanlink.net/tag/multi_hop_reasonning": "Multi-hop reasonning", + "http://www.semanlink.net/tag/audi": "Audi", + "http://www.semanlink.net/tag/ong": "ONG", + "http://www.semanlink.net/tag/la_terre_vue_du_ciel": "La Terre vue du ciel", + "http://www.semanlink.net/tag/web_services_document_vs_rpc_style": "Web services: document vs RPC Style", + "http://www.semanlink.net/tag/seevl": "Seevl", + "http://www.semanlink.net/tag/drm_in_html_5": "DRM in HTML 5", + "http://www.semanlink.net/tag/carbon_sequestration": "Carbon sequestration", + "http://www.semanlink.net/tag/cloud_based_lod_platform": "Cloud based LOD platform", + "http://www.semanlink.net/tag/yahoo_my_web_2_0": "Yahoo - My Web 2.0", + "http://www.semanlink.net/tag/thought_vector": "Thought Vector", + "http://www.semanlink.net/tag/provocative_idea": "Provocative idea", + "http://www.semanlink.net/tag/javadoc": "Javadoc", + "http://www.semanlink.net/tag/bush": "Bush", + "http://www.semanlink.net/tag/wifi": "WIFI", + "http://www.semanlink.net/tag/damian_steer": "Damian Steer", + "http://www.semanlink.net/tag/sante": "Sant\u00e9", + "http://www.semanlink.net/tag/mafia": "Mafia", + "http://www.semanlink.net/tag/linux": "Linux", + "http://www.semanlink.net/tag/domain_specific_nlp": "Domain-Specific NLP", + "http://www.semanlink.net/tag/heroisme": "H\u00e9ro\u00efsme", + "http://www.semanlink.net/tag/regroupement_familial": "Regroupement familial", + "http://www.semanlink.net/tag/peuples": "Peuples", + "http://www.semanlink.net/tag/truffe": "Truffe", + "http://www.semanlink.net/tag/gilles_lepin": "Gilles Lepin", + "http://www.semanlink.net/tag/silk_road": "Silk Road", + "http://www.semanlink.net/tag/semantic_web_w3_org": "semantic-web@w3.org", + "http://www.semanlink.net/tag/morale": "Morale", + "http://www.semanlink.net/tag/nlp_juridique": "NLP + juridique", + "http://www.semanlink.net/tag/blosxom": "Blosxom", + "http://www.semanlink.net/tag/government_data": "Government data", + "http://www.semanlink.net/tag/lesk_algorithm": "Lesk algorithm", + "http://www.semanlink.net/tag/afrique_du_sud": "Afrique du Sud", + "http://www.semanlink.net/tag/pbs": "PBS", + "http://www.semanlink.net/tag/emmanuelle_bernes": "Emmanuelle Bernes", + "http://www.semanlink.net/tag/genome": "G\u00e9nome", + "http://www.semanlink.net/tag/shelley_powers": "Shelley Powers", + "http://www.semanlink.net/tag/documentaire": "Documentaire", + "http://www.semanlink.net/tag/electric_car": "Electric car", + "http://www.semanlink.net/tag/lobby_agroalimentaire": "Lobby agroalimentaire", + "http://www.semanlink.net/tag/transport": "Transport", + "http://www.semanlink.net/tag/piratage_des_oeuvres": "Piratage des \u0153uvres", + "http://www.semanlink.net/tag/empire_colonial_francais": "Empire colonial fran\u00e7ais", + "http://www.semanlink.net/tag/virtuoso_doc": "Virtuoso:doc", + "http://www.semanlink.net/tag/mobile_computing": "Mobile computing", + "http://www.semanlink.net/tag/zero_shot": "Zero shot", + "http://www.semanlink.net/tag/jean_rohmer": "Jean Rohmer", + "http://www.semanlink.net/tag/uris_within_uris": "URIs within URIs", + "http://www.semanlink.net/tag/foundation_models": "Foundation Models", + "http://www.semanlink.net/tag/nlp_baidu": "NLP@Baidu", + "http://www.semanlink.net/tag/rdf4j": "RDF4J", + "http://www.semanlink.net/tag/data_publica": "Data Publica", + "http://www.semanlink.net/tag/google_ai_blog": "Google AI Blog", + "http://www.semanlink.net/tag/chant": "Chant", + "http://www.semanlink.net/tag/ecriture": "Ecriture", + "http://www.semanlink.net/tag/argentine": "Argentine", + "http://www.semanlink.net/tag/ocr": "OCR", + "http://www.semanlink.net/tag/venus_prehistoriques": "V\u00e9nus pr\u00e9historiques", + "http://www.semanlink.net/tag/2018": "2018", + "http://www.semanlink.net/tag/ldp_w3c": "LDP @ W3C", + "http://www.semanlink.net/tag/histoire_des_sciences": "Histoire des sciences", + "http://www.semanlink.net/tag/armement": "Armement", + "http://www.semanlink.net/tag/aggregators": "Aggregators", + "http://www.semanlink.net/tag/lehman_brothers": "Lehman Brothers", + "http://www.semanlink.net/tag/q_a": "Q&A", + "http://www.semanlink.net/tag/hierarchies_in_ml": "Hierarchies in ML", + "http://www.semanlink.net/tag/crm": "CRM", + "http://www.semanlink.net/tag/whistleblower": "Whistleblower", + "http://www.semanlink.net/tag/ruby": "Ruby", + "http://www.semanlink.net/tag/cafard": "Cafard", + "http://www.semanlink.net/tag/leaks": "Leaks", + "http://www.semanlink.net/tag/city_states": "City-States", + "http://www.semanlink.net/tag/monotremes": "Monotr\u00e8mes", + "http://www.semanlink.net/tag/e_commerce_data": "e-commerce data", + "http://www.semanlink.net/tag/opera_do_malandro": "\u00d3pera do Malandro", + "http://www.semanlink.net/tag/caroline_fourest": "Caroline Fourest", + "http://www.semanlink.net/tag/thucydide": "Thucydide", + "http://www.semanlink.net/tag/web_marchand": "Web marchand", + "http://www.semanlink.net/tag/mongol": "Mongol", + "http://www.semanlink.net/tag/entities": "Entities", + "http://www.semanlink.net/tag/crustace": "Crustac\u00e9", + "http://www.semanlink.net/tag/illusion_d_optique": "Illusion d'optique", + "http://www.semanlink.net/tag/anticipation": "Anticipation", + "http://www.semanlink.net/tag/danny_ayers": "Danny Ayers", + "http://www.semanlink.net/tag/eclipse_juno": "Eclipse Juno", + "http://www.semanlink.net/tag/hierarchical_categories": "Hierarchical Categories", + "http://www.semanlink.net/tag/linear_algebra": "Linear algebra", + "http://www.semanlink.net/tag/numpy": "NumPy", + "http://www.semanlink.net/tag/urss": "Ex URSS", + "http://www.semanlink.net/tag/w3c_tag": "W3C TAG", + "http://www.semanlink.net/tag/dan_jurafsky": "Dan Jurafsky", + "http://www.semanlink.net/tag/rdfquery": "rdfQuery", + "http://www.semanlink.net/tag/jeu": "Jeu", + "http://www.semanlink.net/tag/agriculture_francaise": "Agriculture fran\u00e7aise", + "http://www.semanlink.net/tag/amazonie": "Amazonie", + "http://www.semanlink.net/tag/github": "GitHub", + "http://www.semanlink.net/tag/semantic_interoperability": "Semantic Interoperability", + "http://www.semanlink.net/tag/asynchronous": "Asynchronous", + "http://www.semanlink.net/tag/personal_knowledge_graph": "Personal Knowledge Graph", + "http://www.semanlink.net/tag/google_research": "Google Research", + "http://www.semanlink.net/tag/scotland": "Scotland", + "http://www.semanlink.net/tag/conditional_random_field": "Conditional random fields", + "http://www.semanlink.net/tag/experience_scientifique": "Exp\u00e9rience scientifique", + "http://www.semanlink.net/tag/justice_americaine": "Justice am\u00e9ricaine", + "http://www.semanlink.net/tag/fete_nationale": "F\u00eate nationale", + "http://www.semanlink.net/tag/collaborative_ontologie_creation": "Collaborative ontologie creation", + "http://www.semanlink.net/tag/personal_information_management": "Personal-information management", + "http://www.semanlink.net/tag/afripedia": "Afripedia", + "http://www.semanlink.net/tag/google_maps": "Google Maps", + "http://www.semanlink.net/tag/nlp_datasets": "NLP datasets", + "http://www.semanlink.net/tag/transductive_svm": "Transductive SVM", + "http://www.semanlink.net/tag/social_networks": "Social Networks", + "http://www.semanlink.net/tag/mobilite": "Mobilit\u00e9", + "http://www.semanlink.net/tag/rdf_application": "RDF Application", + "http://www.semanlink.net/tag/elevage_industriel": "Elevage industriel", + "http://www.semanlink.net/tag/business_intelligence": "Business intelligence", + "http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007": "L'Afrique \u00e0 la Bastille - 13 juillet 2007", + "http://www.semanlink.net/tag/retard_technologique_europeen": "Retard technologique europ\u00e9en", + "http://www.semanlink.net/tag/markown_javascript": "Markown / Javascript", + "http://www.semanlink.net/tag/nasa": "NASA", + "http://www.semanlink.net/tag/harry_halpin": "Harry Halpin", + "http://www.semanlink.net/tag/map_reduce": "Map-reduce", + "http://www.semanlink.net/tag/the_web_is_dying": "The Web is dying", + "http://www.semanlink.net/tag/database": "Database", + "http://www.semanlink.net/tag/personal_knowledge_management": "Personal Knowledge Management", + "http://www.semanlink.net/tag/capitalisme_financier": "Capitalisme financier", + "http://www.semanlink.net/tag/semanlink_archives": "Semanlink: archives", + "http://www.semanlink.net/tag/over_engineering": "Over-Engineering", + "http://www.semanlink.net/tag/csv": "CSV", + "http://www.semanlink.net/tag/peste": "Peste", + "http://www.semanlink.net/tag/duck_typing": "Duck Typing", + "http://www.semanlink.net/tag/devices": "Devices", + "http://www.semanlink.net/tag/progres_technique": "Progr\u00e8s technique", + "http://www.semanlink.net/tag/apple_software": "Apple Software", + "http://www.semanlink.net/tag/jena_user_conference": "Jena User Conference", + "http://www.semanlink.net/tag/combining_numerical_and_text_features": "Combining numerical and text features", + "http://www.semanlink.net/tag/business_intelligence_and_semantic_web": "Business Intelligence and Semantic Web", + "http://www.semanlink.net/tag/information_resources": "Information resources", + "http://www.semanlink.net/tag/piraterie": "Piraterie", + "http://www.semanlink.net/tag/carnaval": "Carnaval", + "http://www.semanlink.net/tag/installing_wordpress": "Installing WordPress", + "http://www.semanlink.net/tag/nlp_as_a_service": "NLP as a service", + "http://www.semanlink.net/tag/logiciel_libre": "Logiciel libre", + "http://www.semanlink.net/tag/mineralogie": "Min\u00e9ralogie", + "http://www.semanlink.net/tag/peter_breunig": "Peter Breunig", + "http://www.semanlink.net/tag/html": "HTML", + "http://www.semanlink.net/tag/electronic_frontier_foundation": "Electronic Frontier Foundation", + "http://www.semanlink.net/tag/semweb_china": "SemWeb China", + "http://www.semanlink.net/tag/apache_on_my_mac": "Apache on my mac", + "http://www.semanlink.net/tag/politique_monetaire": "Politique mon\u00e9taire", + "http://www.semanlink.net/tag/iphoto": "iphoto", + "http://www.semanlink.net/tag/green_new_deal": "Green New Deal", + "http://www.semanlink.net/tag/abeille": "Abeille", + "http://www.semanlink.net/tag/incremental_clustering": "Incremental Clustering", + "http://www.semanlink.net/tag/applescript": "AppleScript", + "http://www.semanlink.net/tag/semantic_negotiation": "Semantic Negotiation", + "http://www.semanlink.net/tag/svn": "svn", + "http://www.semanlink.net/tag/histoire_de_l_art": "Histoire de l'art", + "http://www.semanlink.net/tag/thesaurus": "Thesaurus", + "http://www.semanlink.net/tag/rocard": "Rocard", + "http://www.semanlink.net/tag/signal": "Signal", + "http://www.semanlink.net/tag/web_app": "Web app", + "http://www.semanlink.net/tag/information_theory": "Information theory", + "http://www.semanlink.net/tag/dictateur": "Dictateur", + "http://www.semanlink.net/tag/leningrad": "Leningrad", + "http://www.semanlink.net/tag/fast_ai": "fast.ai", + "http://www.semanlink.net/tag/bourbaki": "Bourbaki", + "http://www.semanlink.net/tag/physique_des_particules": "Physique des particules", + "http://www.semanlink.net/tag/greffe_de_tete": "Greffe de t\u00eate", + "http://www.semanlink.net/tag/colza_transgenique": "Colza transg\u00e9nique", + "http://www.semanlink.net/tag/airport": "Airport", + "http://www.semanlink.net/tag/destruction_de_vestiges_antiques": "Destruction de vestiges antiques", + "http://www.semanlink.net/tag/gig_economy": "Gig economy", + "http://www.semanlink.net/tag/online_dictionary": "Online dictionary", + "http://www.semanlink.net/tag/os_x_10_6_snow_leopard": "OS X 10.6 - Snow leopard", + "http://www.semanlink.net/tag/sem_web_demo": "Sem web demo", + "http://www.semanlink.net/tag/platonov": "Platonov", + "http://www.semanlink.net/tag/data_sniffer": "Data Sniffer", + "http://www.semanlink.net/tag/quasicrystals": "Quasicrystals", + "http://www.semanlink.net/tag/tiers_monde": "Tiers-Monde", + "http://www.semanlink.net/tag/climat": "Climat", + "http://www.semanlink.net/tag/gnizr": "gnizr", + "http://www.semanlink.net/tag/nlp_sem_web": "NLP + Sem web", + "http://www.semanlink.net/tag/links": "Links", + "http://www.semanlink.net/tag/text_to_sql": "Text to SQL", + "http://www.semanlink.net/tag/seq2seq_with_attention": "Seq2Seq with Attention", + "http://www.semanlink.net/tag/baselines": "Baselines", + "http://www.semanlink.net/tag/html5": "HTML5", + "http://www.semanlink.net/tag/patrick_boucheron": "Patrick Boucheron", + "http://www.semanlink.net/tag/knowledge_resources": "Knowledge resources", + "http://www.semanlink.net/tag/python_library": "Python library", + "http://www.semanlink.net/tag/logic_and_semantic_web": "Logic and semantic web", + "http://www.semanlink.net/tag/antiquite_iranienne": "Antiquit\u00e9 iranienne", + "http://www.semanlink.net/tag/http_cache": "HTTP Cache", + "http://www.semanlink.net/tag/pandemie": "Pand\u00e9mie", + "http://www.semanlink.net/tag/edvige": "Edvige", + "http://www.semanlink.net/tag/grandes_invasions": "Grandes invasions", + "http://www.semanlink.net/tag/extremisme_islamique": "Extr\u00e9misme islamique", + "http://www.semanlink.net/tag/greve": "Gr\u00e8ve", + "http://www.semanlink.net/tag/centrales_nucleaires": "Centrales nucl\u00e9aires", + "http://www.semanlink.net/tag/temps": "Temps", + "http://www.semanlink.net/tag/markdown": "Markdown", + "http://www.semanlink.net/tag/maven_tips": "Maven tips", + "http://www.semanlink.net/tag/niklas_lindstrom": "Niklas Lindstr\u00f6m", + "http://www.semanlink.net/tag/union_europeenne": "Union europ\u00e9enne", + "http://www.semanlink.net/tag/data_ownership": "Data ownership", + "http://www.semanlink.net/tag/peinture": "Painting", + "http://www.semanlink.net/tag/nlp_in_enterprise": "NLP in enterprise", + "http://www.semanlink.net/tag/conscience": "Consciousness", + "http://www.semanlink.net/tag/financial_data": "Financial Data", + "http://www.semanlink.net/tag/neo4j": "Neo4j", + "http://www.semanlink.net/tag/genome_editing": "Genome editing", + "http://www.semanlink.net/tag/orson_welles": "Orson Welles", + "http://www.semanlink.net/tag/recommended_reading": "Recommended reading ", + "http://www.semanlink.net/tag/cerveau": "Brain", + "http://www.semanlink.net/tag/rdf_vs_xml": "RDF vs XML", + "http://www.semanlink.net/tag/momie": "Momie", + "http://www.semanlink.net/tag/uri": "URI", + "http://www.semanlink.net/tag/trust_in_the_web_of_data": "Trust in the Web of Data", + "http://www.semanlink.net/tag/rdf2h_browser": "RDF2h", + "http://www.semanlink.net/tag/comedie_policiere": "Com\u00e9die polici\u00e8re", + "http://www.semanlink.net/tag/n_importe_quoi": "N'importe quoi", + "http://www.semanlink.net/tag/internet_tool": "Internet tool", + "http://www.semanlink.net/tag/huile_de_palme": "Huile de palme", + "http://www.semanlink.net/tag/zombie": "Zombie", + "http://www.semanlink.net/tag/leon_levy_bencheton": "L\u00e9on L\u00e9vy-Bencheton", + "http://www.semanlink.net/tag/computational_complexity": "Computational complexity", + "http://www.semanlink.net/tag/dans_deep_averaging_neural_networks": "DANs (Deep Averaging Neural Networks)", + "http://www.semanlink.net/tag/api": "API", + "http://www.semanlink.net/tag/interactive_knowledge_stack": "Interactive Knowledge Stack", + "http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003": "Championnats du monde \u00e0 Paris-Saint Denis, 2003", + "http://www.semanlink.net/tag/micropayments_on_the_web": "Micropayments on the web", + "http://www.semanlink.net/tag/musique_bresilienne": "Musique br\u00e9silienne", + "http://www.semanlink.net/tag/imprimantes": "Imprimantes", + "http://www.semanlink.net/tag/amerique": "Am\u00e9rique", + "http://www.semanlink.net/tag/semanlink_related": "Semanlink related", + "http://www.semanlink.net/tag/mars": "Mars", + "http://www.semanlink.net/tag/hydrogen": "Hydrogen", + "http://www.semanlink.net/tag/langue_francaise": "Langue fran\u00e7aise", + "http://www.semanlink.net/tag/toyota": "Toyota", + "http://www.semanlink.net/tag/semantic_web_business": "Semantic Web : Business", + "http://www.semanlink.net/tag/raphael_troncy": "Rapha\u00ebl Troncy", + "http://www.semanlink.net/tag/jean_dujardin": "Jean Dujardin", + "http://www.semanlink.net/tag/semantic_integration_hub": "Semantic Integration Hub", + "http://www.semanlink.net/tag/oov": "OOV", + "http://www.semanlink.net/tag/owl_2": "OWL 2", + "http://www.semanlink.net/tag/crispr_cas9": "CRISPR", + "http://www.semanlink.net/tag/ethnologie": "Ethnologie", + "http://www.semanlink.net/tag/event_camera": "Event camera", + "http://www.semanlink.net/tag/pangolin": "Pangolin", + "http://www.semanlink.net/tag/search_engines": "Search Engines", + "http://www.semanlink.net/tag/data_mining": "Data mining", + "http://www.semanlink.net/tag/mesure_du_temps": "Mesure du temps", + "http://www.semanlink.net/tag/electronics": "Electronics", + "http://www.semanlink.net/tag/elda": "Elda", + "http://www.semanlink.net/tag/three_parent_embryo": "Three-parent embryo", + "http://www.semanlink.net/tag/data_mining_tools": "Data mining tools", + "http://www.semanlink.net/tag/ranking_information_retrieval": "Ranking (information retrieval)", + "http://www.semanlink.net/tag/mime_type": "MIME type", + "http://www.semanlink.net/tag/memristor": "Memristor", + "http://www.semanlink.net/tag/immigration_familiale": "Immigration familiale", + "http://www.semanlink.net/tag/histoire_coloniale": "Histoire coloniale", + "http://www.semanlink.net/tag/bbc": "BBC", + "http://www.semanlink.net/tag/structured_data_embedding": "Structured Data Embedding", + "http://www.semanlink.net/tag/folksonomies_vs_ontologies": "Folksonomies vs ontologies", + "http://www.semanlink.net/tag/lab_grown_organs": "Lab-grown organs", + "http://www.semanlink.net/tag/antifascisme": "Antifascisme", + "http://www.semanlink.net/tag/ble": "Bl\u00e9", + "http://www.semanlink.net/tag/pays_bas": "Pays-Bas", + "http://www.semanlink.net/tag/features_machine_learning": "Features (Machine Learning)", + "http://www.semanlink.net/tag/los_angeles": "Los Angeles", + "http://www.semanlink.net/tag/pdf_format": "pdf format", + "http://www.semanlink.net/tag/automl": "AutoML", + "http://www.semanlink.net/tag/jersey_rdf": "jersey/RDF", + "http://www.semanlink.net/tag/numericable": "Num\u00e9ricable", + "http://www.semanlink.net/tag/control_theory": "Control theory", + "http://www.semanlink.net/tag/edward_curry": "Edward Curry", + "http://www.semanlink.net/tag/timeline": "Timeline", + "http://www.semanlink.net/tag/huge_rdf_data_source": "Huge RDF data source", + "http://www.semanlink.net/tag/obelisque_d_axoum": "Ob\u00e9lisque d'Axoum", + "http://www.semanlink.net/tag/rules": "Rules", + "http://www.semanlink.net/tag/terre_cuite": "Terre cuite", + "http://www.semanlink.net/tag/sql": "SQL", + "http://www.semanlink.net/tag/data_visualisation": "Data visualisation", + "http://www.semanlink.net/tag/thrace": "Thrace", + "http://www.semanlink.net/tag/henry_story": "Henry Story", + "http://www.semanlink.net/tag/folksonomy": "Folksonomy", + "http://www.semanlink.net/tag/piggy_bank": "Piggy Bank", + "http://www.semanlink.net/tag/tsunami": "Tsunami", + "http://www.semanlink.net/tag/xquery": "XQuery", + "http://www.semanlink.net/tag/deletefb": "DeleteFB", + "http://www.semanlink.net/tag/universites_francaises": "Universit\u00e9s fran\u00e7aises", + "http://www.semanlink.net/tag/rdf_blank_nodes": "RDF blank nodes", + "http://www.semanlink.net/tag/artificial_artificial_intelligence": "Artificial, Artificial Intelligence", + "http://www.semanlink.net/tag/optical_computing": "Optical computing", + "http://www.semanlink.net/tag/cross_lingual_word_embeddings": "Cross-lingual Word Embeddings", + "http://www.semanlink.net/tag/realisateur": "R\u00e9alisateur", + "http://www.semanlink.net/tag/data_management_platform": "Data management platform", + "http://www.semanlink.net/tag/bricolage_mac": "Bricolage Mac", + "http://www.semanlink.net/tag/curiosite_naturelle": "Curiosit\u00e9s naturelles", + "http://www.semanlink.net/tag/digital_entertainment": "Digital entertainment", + "http://www.semanlink.net/tag/node2vec": "Node2Vec", + "http://www.semanlink.net/tag/bricolage": "Bricolage", + "http://www.semanlink.net/tag/antimatiere": "Antimati\u00e8re", + "http://www.semanlink.net/tag/java_tip": "Java tip", + "http://www.semanlink.net/tag/censorship": "Censorship", + "http://www.semanlink.net/tag/deep_links": "Deep Links", + "http://www.semanlink.net/tag/renaissance": "Renaissance", + "http://www.semanlink.net/tag/gamestop": "Gamestop", + "http://www.semanlink.net/tag/rock": "Rock", + "http://www.semanlink.net/tag/islamisme": "Islamisme", + "http://www.semanlink.net/tag/neutrino": "Neutrino", + "http://www.semanlink.net/tag/enfance": "Enfance", + "http://www.semanlink.net/tag/commandant_cousteau": "Commandant Cousteau", + "http://www.semanlink.net/tag/eswc_2021": "ESWC 2021", + "http://www.semanlink.net/tag/fleur": "Fleur", + "http://www.semanlink.net/tag/politique_de_l_enfant_unique": "Politique de l'enfant unique", + "http://www.semanlink.net/tag/colonisation": "Colonisation", + "http://www.semanlink.net/tag/wiki_markup": "Wiki markup", + "http://www.semanlink.net/tag/delocalisation_des_services": "D\u00e9localisation des services", + "http://www.semanlink.net/tag/web_2_0_application": "Web 2.0 application", + "http://www.semanlink.net/tag/nullite_francaise": "Nullit\u00e9 fran\u00e7aise", + "http://www.semanlink.net/tag/data_portal": "Data portal", + "http://www.semanlink.net/tag/animal": "Animal", + "http://www.semanlink.net/tag/xri": "XRI", + "http://www.semanlink.net/tag/reseaux_bayesiens": "Bayesian analysis", + "http://www.semanlink.net/tag/sexe": "Sexe", + "http://www.semanlink.net/tag/biodiversite_declin": "Biodiversit\u00e9 : effondrement", + "http://www.semanlink.net/tag/journal_le_monde": "Journal Le Monde", + "http://www.semanlink.net/tag/jersey": "jersey", + "http://www.semanlink.net/tag/rdf_schema_inferencing": "RDF Schema inferencing", + "http://www.semanlink.net/tag/rdf_thrift": "RDF Thrift", + "http://www.semanlink.net/tag/rdf_access_to_relational_databases": "RDF Access to Relational Databases", + "http://www.semanlink.net/tag/xml_schema": "XML Schema", + "http://www.semanlink.net/tag/rio_de_janeiro": "Rio de Janeiro", + "http://www.semanlink.net/tag/dna_nanotechnology": "DNA nanotechnology", + "http://www.semanlink.net/tag/tika": "Tika", + "http://www.semanlink.net/tag/jdd_apple": "JDD Apple", + "http://www.semanlink.net/tag/ecocide": "Ecocide", + "http://www.semanlink.net/tag/rdf_graphs": "RDF graphs", + "http://www.semanlink.net/tag/harvard": "Harvard", + "http://www.semanlink.net/tag/fondamentalisme_islamique": "Fondamentalisme islamique", + "http://www.semanlink.net/tag/spiking_neural_network": "Spiking Neural Network", + "http://www.semanlink.net/tag/asie_centrale": "Asie centrale", + "http://www.semanlink.net/tag/china_s_social_credit_system": "China's Social Credit System", + "http://www.semanlink.net/tag/ims_vdex": "IMS VDEX", + "http://www.semanlink.net/tag/knowledge_representation": "Knowledge Representation", + "http://www.semanlink.net/tag/enseignement_francais": "Enseignement fran\u00e7ais", + "http://www.semanlink.net/tag/ocean": "Oc\u00e9an", + "http://www.semanlink.net/tag/jiroft": "Jiroft", + "http://www.semanlink.net/tag/enceintes_connectees": "Enceintes connect\u00e9es", + "http://www.semanlink.net/tag/vietnam": "Vietnam", + "http://www.semanlink.net/tag/usa_histoire": "USA : histoire", + "http://www.semanlink.net/tag/spellchecker": "Spellchecker", + "http://www.semanlink.net/tag/medical_data": "Medical Data", + "http://www.semanlink.net/tag/californie": "Californie", + "http://www.semanlink.net/tag/enterprise_data": "Enterprise Data", + "http://www.semanlink.net/tag/jsp": "JSP", + "http://www.semanlink.net/tag/venezuela": "Venezuela", + "http://www.semanlink.net/tag/lycee_alain": "Lyc\u00e9e Alain", + "http://www.semanlink.net/tag/david_blei": "David Blei", + "http://www.semanlink.net/tag/michael_hausenblas": "Michael Hausenblas", + "http://www.semanlink.net/tag/atos_origin": "Atos Origin", + "http://www.semanlink.net/tag/ensemble_learning": "Ensemble learning", + "http://www.semanlink.net/tag/bollore": "Bollor\u00e9", + "http://www.semanlink.net/tag/afrique_medievale": "Afrique m\u00e9di\u00e9vale", + "http://www.semanlink.net/tag/goths": "Goths", + "http://www.semanlink.net/tag/jouet": "Jouet", + "http://www.semanlink.net/tag/wolfram": "Stephen Wolfram", + "http://www.semanlink.net/tag/kassav": "Kassav'", + "http://www.semanlink.net/tag/moustique": "Moustique", + "http://www.semanlink.net/tag/kbpedia": "KBPedia", + "http://www.semanlink.net/tag/flashtext_algorithm": "FlashText algorithm", + "http://www.semanlink.net/tag/extremophiles": "Extr\u00e9mophiles", + "http://www.semanlink.net/tag/research_papers": "Research papers", + "http://www.semanlink.net/tag/emmanuelle_charpentier": "Emmanuelle Charpentier", + "http://www.semanlink.net/tag/tree_of_life": "Tree of life", + "http://www.semanlink.net/tag/recherche": "Recherche", + "http://www.semanlink.net/tag/machine_learning_problems": "Machine learning: problems", + "http://www.semanlink.net/tag/lybie": "Lybie", + "http://www.semanlink.net/tag/clint_eastwood": "Clint Eastwood", + "http://www.semanlink.net/tag/rdf_validator": "RDF Validator", + "http://www.semanlink.net/tag/plastic": "Plastic", + "http://www.semanlink.net/tag/cross_entropy": "Cross-Entropy", + "http://www.semanlink.net/tag/gore_vidal": "Gore Vidal", + "http://www.semanlink.net/tag/accaparement_des_terres_agricoles": "Accaparement des terres agricoles", + "http://www.semanlink.net/tag/semweb_pro_2011": "SemWeb Pro 2011", + "http://www.semanlink.net/tag/node_js": "node.js", + "http://www.semanlink.net/tag/art_d_afrique": "Art d'Afrique", + "http://www.semanlink.net/tag/crime_contre_l_humanite": "Crime contre l'Humanit\u00e9", + "http://www.semanlink.net/tag/euphrasie": "Euphrasie", + "http://www.semanlink.net/tag/nlp_papers": "NLP papers", + "http://www.semanlink.net/tag/rdf2rdfa": "RDF2RDFa", + "http://www.semanlink.net/tag/mensonge_d_etat": "Mensonge d'\u00e9tat", + "http://www.semanlink.net/tag/rdfa_lite": "RDFa Lite", + "http://www.semanlink.net/tag/geoffrey_hinton": "Geoffrey Hinton", + "http://www.semanlink.net/tag/c2g": "C2G", + "http://www.semanlink.net/tag/software": "Software", + "http://www.semanlink.net/tag/zhang_qian": "Zhang Qian", + "http://www.semanlink.net/tag/uri_template": "URI Template", + "http://www.semanlink.net/tag/alpinisme": "Alpinisme", + "http://www.semanlink.net/tag/encyclopedie": "Encyclop\u00e9die", + "http://www.semanlink.net/tag/1789": "1789", + "http://www.semanlink.net/tag/systemes_distribues": "Syst\u00e8mes distribu\u00e9s", + "http://www.semanlink.net/tag/laser": "Laser", + "http://www.semanlink.net/tag/amphibiens": "Amphibiens", + "http://www.semanlink.net/tag/anglais": "Anglais", + "http://www.semanlink.net/tag/multitask_learning_in_nlp": "Multitask Learning in NLP", + "http://www.semanlink.net/tag/classe_moyenne": "Classe moyenne", + "http://www.semanlink.net/tag/semantic_indexing": "Semantic indexing", + "http://www.semanlink.net/tag/bigquery": "BigQuery", + "http://www.semanlink.net/tag/datalakes": "Datalakes", + "http://www.semanlink.net/tag/monuments_historiques": "Monuments historiques", + "http://www.semanlink.net/tag/nemrud": "Nemrud", + "http://www.semanlink.net/tag/desktop_applications": "Desktop applications", + "http://www.semanlink.net/tag/wsdl": "WSDL", + "http://www.semanlink.net/tag/coursera": "Coursera", + "http://www.semanlink.net/tag/mozilla": "Mozilla", + "http://www.semanlink.net/tag/bert_fine_tuning": "BERT fine-tuning", + "http://www.semanlink.net/tag/riz": "Riz", + "http://www.semanlink.net/tag/kevin_kostner": "Kevin Kostner", + "http://www.semanlink.net/tag/hymne_a_la_joie": "Hymne \u00e0 la joie", + "http://www.semanlink.net/tag/pluton": "Pluton", + "http://www.semanlink.net/tag/seattle": "Seattle", + "http://www.semanlink.net/tag/koure": "Kour\u00e9", + "http://www.semanlink.net/tag/karen_blixen": "Karen Blixen", + "http://www.semanlink.net/tag/mobile_phone": "Mobile phone", + "http://www.semanlink.net/tag/couple_mixte": "Couple mixte", + "http://www.semanlink.net/tag/tim_cook": "Tim Cook", + "http://www.semanlink.net/tag/medical_information_search": "Medical Information Search", + "http://www.semanlink.net/tag/cjnn": "CJNN", + "http://www.semanlink.net/tag/gautier_poupeau": "Gautier Poupeau", + "http://www.semanlink.net/tag/lutte_traditionnelle": "Lutte traditionnelle", + "http://www.semanlink.net/tag/rosee": "Ros\u00e9e", + "http://www.semanlink.net/tag/json_ld_frame": "JSON-LD frame", + "http://www.semanlink.net/tag/esclavage": "Esclavage", + "http://www.semanlink.net/tag/overview": "Overview", + "http://www.semanlink.net/tag/sequencage_du_genome": "S\u00e9quen\u00e7age du g\u00e9nome", + "http://www.semanlink.net/tag/global_semantic_context": "Global Semantic Context", + "http://www.semanlink.net/tag/journalisme": "Journalisme", + "http://www.semanlink.net/tag/europe": "Europe", + "http://www.semanlink.net/tag/evolution": "Evolution", + "http://www.semanlink.net/tag/delicious_api": "delicious api", + "http://www.semanlink.net/tag/domain_adaptation": "Domain adaptation", + "http://www.semanlink.net/tag/google_groups": "Google Groups", + "http://www.semanlink.net/tag/modification_du_genome_humain": "Modification du g\u00e9nome humain", + "http://www.semanlink.net/tag/vision": "Vision", + "http://www.semanlink.net/tag/relation_extraction": "Relation Extraction", + "http://www.semanlink.net/tag/bio_engineering": "Bio-Engineering", + "http://www.semanlink.net/tag/nlp_4_requirements_engineering": "NLP 4 Requirements Engineering", + "http://www.semanlink.net/tag/software_design": "Software design", + "http://www.semanlink.net/tag/physique_des_particules_modele_standard": "Physique des particules : mod\u00e8le standard", + "http://www.semanlink.net/tag/random_walk": "Random walk", + "http://www.semanlink.net/tag/synchrotron": "Synchrotron", + "http://www.semanlink.net/tag/configuration_as_linked_data": "Configuration as Linked Data", + "http://www.semanlink.net/tag/night": "Night", + "http://www.semanlink.net/tag/everest": "Everest", + "http://www.semanlink.net/tag/semantic_enterprise_architecture": "Semantic Enterprise Architecture", + "http://www.semanlink.net/tag/photos_online": "photos online", + "http://www.semanlink.net/tag/jeunesse": "Jeunesse", + "http://www.semanlink.net/tag/revolte": "R\u00e9volte", + "http://www.semanlink.net/tag/crime": "Crime", + "http://www.semanlink.net/tag/word_embedding_compositionality": "Word Embedding Compositionality", + "http://www.semanlink.net/tag/sparql_1_1": "SPARQL 1.1", + "http://www.semanlink.net/tag/requin": "Requin", + "http://www.semanlink.net/tag/histoire_de_l_europe": "Histoire de l'Europe", + "http://www.semanlink.net/tag/meteorite": "M\u00e9t\u00e9orite", + "http://www.semanlink.net/tag/environnement": "Environnement", + "http://www.semanlink.net/tag/desertification": "D\u00e9sertification", + "http://www.semanlink.net/tag/ayrault": "Ayrault", + "http://www.semanlink.net/tag/livre": "Livre", + "http://www.semanlink.net/tag/brain_implants": "Brain implants", + "http://www.semanlink.net/tag/undecidability": "Undecidability", + "http://www.semanlink.net/tag/afrique_equatoriale": "Afrique \u00e9quatoriale", + "http://www.semanlink.net/tag/wuhan": "Wuhan", + "http://www.semanlink.net/tag/shoah": "Shoah", + "http://www.semanlink.net/tag/jsonld_jena": "Jsonld/Jena", + "http://www.semanlink.net/tag/vie_artificielle": "Vie artificielle", + "http://www.semanlink.net/tag/nuclear_power_no_thanks": "Nuclear Power? No thanks", + "http://www.semanlink.net/tag/forward_chaining": "Forward chaining", + "http://www.semanlink.net/tag/solr_and_nlp": "Solr and NLP", + "http://www.semanlink.net/tag/web_services_for_javascript": "Web Services for JavaScript", + "http://www.semanlink.net/tag/alimentation": "Alimentation", + "http://www.semanlink.net/tag/magie": "Magie", + "http://www.semanlink.net/tag/sparql_update": "SPARQL Update", + "http://www.semanlink.net/tag/imac": "iMac", + "http://www.semanlink.net/tag/arq_property_functions": "ARQ property functions", + "http://www.semanlink.net/tag/services_secrets": "Services secrets", + "http://www.semanlink.net/tag/ibm": "IBM", + "http://www.semanlink.net/tag/javascript_rdf_parser": "Javascript RDF Parser", + "http://www.semanlink.net/tag/troubleshooting": "Troubleshooting", + "http://www.semanlink.net/tag/rfi": "RFI", + "http://www.semanlink.net/tag/afia": "AFIA", + "http://www.semanlink.net/tag/pimo": "PIMO", + "http://www.semanlink.net/tag/jena_assembler": "Jena: assembler", + "http://www.semanlink.net/tag/economiste": "Economiste", + "http://www.semanlink.net/tag/smart_energy_grids": "Smart energy grids", + "http://www.semanlink.net/tag/http_patch": "HTTP PATCH", + "http://www.semanlink.net/tag/mines_d_or": "Mines d'or", + "http://www.semanlink.net/tag/thewebconf_2019": "TheWebConf 2019", + "http://www.semanlink.net/tag/calais_jungle": "Calais (jungle)", + "http://www.semanlink.net/tag/coursera_introduction_to_data_science": "Coursera: Introduction to Data Science", + "http://www.semanlink.net/tag/exposition_universelle": "Exposition universelle", + "http://www.semanlink.net/tag/linked_data_service": "Linked Data Service", + "http://www.semanlink.net/tag/heredia": "Heredia", + "http://www.semanlink.net/tag/automotive_ontologies": "Automotive ontologies", + "http://www.semanlink.net/tag/grddl": "GRDDL", + "http://www.semanlink.net/tag/fichage": "Fichage", + "http://www.semanlink.net/tag/congo_belge": "Congo belge", + "http://www.semanlink.net/tag/openai": "OpenAI", + "http://www.semanlink.net/tag/decouverte_archeologique": "D\u00e9couverte arch\u00e9ologique", + "http://www.semanlink.net/tag/industrie_miniere": "Industrie mini\u00e8re", + "http://www.semanlink.net/tag/touareg": "Touareg", + "http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices": "Genetically Engineered Micro and Nanodevices", + "http://www.semanlink.net/tag/semanlink_dev": "Semanlink dev", + "http://www.semanlink.net/tag/docker_mac": "Docker-Mac", + "http://www.semanlink.net/tag/brinxmat": "Brinxmat", + "http://www.semanlink.net/tag/mathematicien": "Math\u00e9maticien", + "http://www.semanlink.net/tag/wikimedia": "Wikimedia", + "http://www.semanlink.net/tag/stanford": "Stanford", + "http://www.semanlink.net/tag/cache": "Cache", + "http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web": "N-ary Relations on the Semantic Web", + "http://www.semanlink.net/tag/rdfizers": "RDFizers", + "http://www.semanlink.net/tag/coupe_du_monde_de_football": "Coupe du monde de football", + "http://www.semanlink.net/tag/cold_start_problem": "Cold start", + "http://www.semanlink.net/tag/faim": "Faim", + "http://www.semanlink.net/tag/owled_2007_and_fps": "OWLED 2007 AND fps", + "http://www.semanlink.net/tag/athenes": "Ath\u00e8nes", + "http://www.semanlink.net/tag/droit": "Droit", + "http://www.semanlink.net/tag/travailler_le_dimanche": "Travailler le dimanche", + "http://www.semanlink.net/tag/phd": "PhD", + "http://www.semanlink.net/tag/cognition": "Cognition", + "http://www.semanlink.net/tag/angularjs_module": "AngularJS module", + "http://www.semanlink.net/tag/emergence": "Emergence", + "http://www.semanlink.net/tag/kiwi_project": "KIWI project", + "http://www.semanlink.net/tag/rdf_editor": "RDF editor", + "http://www.semanlink.net/tag/matieres_premieres": "Mati\u00e8res premi\u00e8res", + "http://www.semanlink.net/tag/honteux": "Honteux", + "http://www.semanlink.net/tag/spacy": "spaCy", + "http://www.semanlink.net/tag/mental_typewriter": "Mental typewriter", + "http://www.semanlink.net/tag/distributed_computing": "Distributed computing", + "http://www.semanlink.net/tag/capitalisme": "Capitalisme", + "http://www.semanlink.net/tag/polluted_places": "Polluted places", + "http://www.semanlink.net/tag/rdf_dev": "RDF dev", + "http://www.semanlink.net/tag/thumbnails": "Thumbnails", + "http://www.semanlink.net/tag/divers": "Divers", + "http://www.semanlink.net/tag/cannibalisme": "Cannibalisme", + "http://www.semanlink.net/tag/computers": "Computers", + "http://www.semanlink.net/tag/knowledge_maps": "Knowledge Maps", + "http://www.semanlink.net/tag/deep_unsupervised_learning": "Deep Unsupervised Learning", + "http://www.semanlink.net/tag/sbert": "Sentence-BERT", + "http://www.semanlink.net/tag/mars_express": "Mars Express", + "http://www.semanlink.net/tag/ocean_indien": "Oc\u00e9an indien", + "http://www.semanlink.net/tag/nlp_tools": "NLP tools", + "http://www.semanlink.net/tag/juan_sequeda": "Juan Sequeda", + "http://www.semanlink.net/tag/php": "PHP", + "http://www.semanlink.net/tag/irlande_du_nord": "Irlande du Nord", + "http://www.semanlink.net/tag/comedie": "Com\u00e9die", + "http://www.semanlink.net/tag/uncertainty_reasoning": "Uncertainty Reasoning", + "http://www.semanlink.net/tag/musique_en_ligne": "Musique en ligne", + "http://www.semanlink.net/tag/variational_bayesian_methods": "Variational Bayesian methods", + "http://www.semanlink.net/tag/privatisation_du_vivant": "Privatisation du vivant", + "http://www.semanlink.net/tag/acl_2021": "ACL 2021", + "http://www.semanlink.net/tag/bill_gates": "Bill Gates", + "http://www.semanlink.net/tag/guaranteed_basic_income": "Universal basic income", + "http://www.semanlink.net/tag/stanford_classifier": "Stanford classifier", + "http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne": "Scandale des \u00e9coutes en Allemagne", + "http://www.semanlink.net/tag/elias_torres": "Elias Torres", + "http://www.semanlink.net/tag/disruptive_change": "Disruptive change", + "http://www.semanlink.net/tag/energie": "Energie", + "http://www.semanlink.net/tag/horreur_economique": "Horreur \u00e9conomique", + "http://www.semanlink.net/tag/historic_images": "Historic images", + "http://www.semanlink.net/tag/collective_punishment": "Collective punishment", + "http://www.semanlink.net/tag/maximum_entropy": "Maximum Entropy", + "http://www.semanlink.net/tag/irak": "Irak", + "http://www.semanlink.net/tag/film_espagnol": "Film espagnol", + "http://www.semanlink.net/tag/nlp_negation": "NLP: negation", + "http://www.semanlink.net/tag/exploitation_petroliere": "Exploitation p\u00e9troli\u00e8re", + "http://www.semanlink.net/tag/lda2vec": "LDA2vec", + "http://www.semanlink.net/tag/egypte": "Egypte", + "http://www.semanlink.net/tag/sun_microsystems": "Sun Microsystems", + "http://www.semanlink.net/tag/metaverse": "Metaverse", + "http://www.semanlink.net/tag/histoire_du_monde": "Histoire du monde", + "http://www.semanlink.net/tag/ted": "TED", + "http://www.semanlink.net/tag/phd_thesis": "PhD Thesis", + "http://www.semanlink.net/tag/european_project": "European project", + "http://www.semanlink.net/tag/event": "Event", + "http://www.semanlink.net/tag/markov": "Markov", + "http://www.semanlink.net/tag/athletisme": "Athl\u00e9tisme", + "http://www.semanlink.net/tag/statistical_physics": "Statistical physics", + "http://www.semanlink.net/tag/information_theory_and_deep_learning": "Information theory AND Deep Learning", + "http://www.semanlink.net/tag/virtuoso_review": "Virtuoso: review", + "http://www.semanlink.net/tag/lost_city": "Lost City", + "http://www.semanlink.net/tag/analytics": "Analytics", + "http://www.semanlink.net/tag/ipython": "IPython", + "http://www.semanlink.net/tag/pauvrete": "Pauvret\u00e9", + "http://www.semanlink.net/tag/address_book": "Address Book", + "http://www.semanlink.net/tag/john_pereira": "John Pereira", + "http://www.semanlink.net/tag/pakistan": "Pakistan", + "http://www.semanlink.net/tag/haystack": "Haystack", + "http://www.semanlink.net/tag/httprange_14_solution": "httpRange-14 (solution)", + "http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco": "Sites du Patrimoine mondial de l'Unesco", + "http://www.semanlink.net/tag/eme": "EME", + "http://www.semanlink.net/tag/facial_recognition": "Facial Recognition", + "http://www.semanlink.net/tag/goldfire": "Goldfire", + "http://www.semanlink.net/tag/humour": "Humour", + "http://www.semanlink.net/tag/categorical_variables": "Categorical Variables", + "http://www.semanlink.net/tag/mesopotamie": "M\u00e9sopotamie", + "http://www.semanlink.net/tag/societe": "Soci\u00e9t\u00e9", + "http://www.semanlink.net/tag/meroe": "M\u00e9ro\u00e9", + "http://www.semanlink.net/tag/semantic_web_use_cases": "Semantic web : Use cases", + "http://www.semanlink.net/tag/disruption": "Disruption", + "http://www.semanlink.net/tag/guepe": "Gu\u00eape", + "http://www.semanlink.net/tag/machine_learning_library": "Machine Learning library", + "http://www.semanlink.net/tag/enterprise_knowledge_graph_platform": "Enterprise Knowledge Graph Platform", + "http://www.semanlink.net/tag/voltaire": "Voltaire", + "http://www.semanlink.net/tag/biological_data": "Biological data", + "http://www.semanlink.net/tag/aspect_nlp": "Aspect (NLP)", + "http://www.semanlink.net/tag/sparql_extension_functions": "SPARQL Extension Functions", + "http://www.semanlink.net/tag/quads": "Quads", + "http://www.semanlink.net/tag/dieu": "dieu", + "http://www.semanlink.net/tag/energies_renouvelables": "Energies renouvelables", + "http://www.semanlink.net/tag/leigh_dodds": "Leigh Dodds", + "http://www.semanlink.net/tag/malbouffe": "Malbouffe", + "http://www.semanlink.net/tag/belo_horizonte": "Belo Horizonte", + "http://www.semanlink.net/tag/multiagent_ai": "Multiagent AI", + "http://www.semanlink.net/tag/beer": "Beer", + "http://www.semanlink.net/tag/multi_document_summarization": "Multi-Document Summarization", + "http://www.semanlink.net/tag/peer_to_peer": "Peer to peer", + "http://www.semanlink.net/tag/rdf_parser": "RDF Parser", + "http://www.semanlink.net/tag/nok": "Nok", + "http://www.semanlink.net/tag/social_bookmarking": "Social bookmarking", + "http://www.semanlink.net/tag/saturne": "Saturne", + "http://www.semanlink.net/tag/cuivre": "Cuivre", + "http://www.semanlink.net/tag/sani_aboussa": "Sani Aboussa", + "http://www.semanlink.net/tag/terrorisme_islamiste": "Terrorisme islamiste", + "http://www.semanlink.net/tag/economie": "Economie", + "http://www.semanlink.net/tag/isp_servlet_hosting": "ISP / Servlet Hosting", + "http://www.semanlink.net/tag/origines_du_sida": "Origines du sida", + "http://www.semanlink.net/tag/musique_du_niger": "Musique du Niger", + "http://www.semanlink.net/tag/apple_intel": "Apple-Intel", + "http://www.semanlink.net/tag/homme_de_flores": "Homme de Flor\u00e8s", + "http://www.semanlink.net/tag/cooperation": "Coop\u00e9ration", + "http://www.semanlink.net/tag/sample_code": "Sample code", + "http://www.semanlink.net/tag/louvre": "Louvre", + "http://www.semanlink.net/tag/multi_language_support": "Multi-language support", + "http://www.semanlink.net/tag/jedi_blue": "Jedi Blue", + "http://www.semanlink.net/tag/alexandre_bertails": "Alexandre Bertails", + "http://www.semanlink.net/tag/geometry_of_language_embeddings": "Geometry of language embeddings", + "http://www.semanlink.net/tag/militaire": "Militaire", + "http://www.semanlink.net/tag/perou": "P\u00e9rou", + "http://www.semanlink.net/tag/airbus": "Airbus", + "http://www.semanlink.net/tag/titan": "Titan", + "http://www.semanlink.net/tag/threat_models": "Threat models", + "http://www.semanlink.net/tag/voute_nubienne": "Vo\u00fbte nubienne", + "http://www.semanlink.net/tag/rdf_owl_documentation_tool": "RDF-OWL documentation tool", + "http://www.semanlink.net/tag/named_graphs": "Named Graphs", + "http://www.semanlink.net/tag/siberie": "Sib\u00e9rie", + "http://www.semanlink.net/tag/the_lancet": "The Lancet", + "http://www.semanlink.net/tag/rules_language": "Rules language", + "http://www.semanlink.net/tag/poesie": "Po\u00e9sie", + "http://www.semanlink.net/tag/semantic_web_products": "Semantic Web Products", + "http://www.semanlink.net/tag/brown_corpus": "Brown Corpus", + "http://www.semanlink.net/tag/json_visualization": "JSON Visualization", + "http://www.semanlink.net/tag/patricia_highsmith": "Patricia Highsmith", + "http://www.semanlink.net/tag/code_on_demand": "Code on demand", + "http://www.semanlink.net/tag/afrique_de_l_est": "Afrique de l'Est", + "http://www.semanlink.net/tag/time_in_rdf": "Time in RDF", + "http://www.semanlink.net/tag/culture": "Culture", + "http://www.semanlink.net/tag/competitivite": "Comp\u00e9titivit\u00e9", + "http://www.semanlink.net/tag/abel_prize": "Abel Prize", + "http://www.semanlink.net/tag/blockchain": "Blockchain", + "http://www.semanlink.net/tag/softmax": "Softmax", + "http://www.semanlink.net/tag/semantic_web_tools": "Semantic Web : Tools", + "http://www.semanlink.net/tag/frederick_giasson": "Frederick Giasson", + "http://www.semanlink.net/tag/fps_blog": "fps blog", + "http://www.semanlink.net/tag/sahel": "Sahel", + "http://www.semanlink.net/tag/mac_software": "Mac software", + "http://www.semanlink.net/tag/semantic_web_dev": "Semantic Web Dev", + "http://www.semanlink.net/tag/transe": "TransE", + "http://www.semanlink.net/tag/cfpm": "CFPM", + "http://www.semanlink.net/tag/tortures_americaines": "Tortures am\u00e9ricaines", + "http://www.semanlink.net/tag/paludisme": "Paludisme", + "http://www.semanlink.net/tag/survey_analysis": "Survey analysis", + "http://www.semanlink.net/tag/cameroun": "Cameroun", + "http://www.semanlink.net/tag/uber": "Uber", + "http://www.semanlink.net/tag/architecture_en_terre": "Architecture en terre", + "http://www.semanlink.net/tag/inde_moderne": "Inde moderne", + "http://www.semanlink.net/tag/gao": "GAO", + "http://www.semanlink.net/tag/mspace": "mSpace", + "http://www.semanlink.net/tag/tor_anonymity_network": "TOR", + "http://www.semanlink.net/tag/memoire": "M\u00e9moire", + "http://www.semanlink.net/tag/javascript_dom": "JavaScript DOM", + "http://www.semanlink.net/tag/voyager": "Voyager", + "http://www.semanlink.net/tag/afrique_de_l_ouest": "Afrique de l'Ouest", + "http://www.semanlink.net/tag/paleoanthropology": "Paleoanthropology", + "http://www.semanlink.net/tag/event_extraction": "Event extraction", + "http://www.semanlink.net/tag/data_science": "Data science", + "http://www.semanlink.net/tag/w3c_recommendation": "W3C Recommendation", + "http://www.semanlink.net/tag/informatique": "Informatique", + "http://www.semanlink.net/tag/catholicisme": "Catholicisme", + "http://www.semanlink.net/tag/croissance": "Croissance", + "http://www.semanlink.net/tag/hugo": "Victor Hugo", + "http://www.semanlink.net/tag/biodiversite": "Biodiversit\u00e9", + "http://www.semanlink.net/tag/pic_de_hubbert": "Pic de Hubbert", + "http://www.semanlink.net/tag/dinosaures": "Dinosaures", + "http://www.semanlink.net/tag/ipython_notebook": "IPython notebook", + "http://www.semanlink.net/tag/country_ontologies": "Country ontologies", + "http://www.semanlink.net/tag/rdf_vocabularies": "RDF Vocabularies", + "http://www.semanlink.net/tag/mauritanie": "Mauritanie", + "http://www.semanlink.net/tag/coupe_du_monde_2010": "Coupe du monde 2010", + "http://www.semanlink.net/tag/catastrophe_industrielle": "Catastrophe industrielle", + "http://www.semanlink.net/tag/matiere_premiere": "Mati\u00e8re premi\u00e8re", + "http://www.semanlink.net/tag/nationalisme": "Nationalisme", + "http://www.semanlink.net/tag/blaspheme": "Blasph\u00e8me", + "http://www.semanlink.net/tag/centrafrique": "Centrafrique", + "http://www.semanlink.net/tag/jackson": "Jackson", + "http://www.semanlink.net/tag/antiquite_romaine": "Antiquit\u00e9 romaine", + "http://www.semanlink.net/tag/drag_and_drop": "Drag-and-Drop", + "http://www.semanlink.net/tag/lee_sedol": "Lee Sedol", + "http://www.semanlink.net/tag/slideshare": "SlideShare", + "http://www.semanlink.net/tag/mecanique_quantique": "M\u00e9canique quantique", + "http://www.semanlink.net/tag/suisse": "Suisse", + "http://www.semanlink.net/tag/antisemitisme": "Antis\u00e9mitisme", + "http://www.semanlink.net/tag/gabon": "Gabon", + "http://www.semanlink.net/tag/ip_address": "IP address", + "http://www.semanlink.net/tag/unsupervised_machine_translation": "Unsupervised machine translation", + "http://www.semanlink.net/tag/pharaon": "Pharaon", + "http://www.semanlink.net/tag/google_guy": "Google guy", + "http://www.semanlink.net/tag/maintenance": "Maintenance", + "http://www.semanlink.net/tag/ldow": "LDOW", + "http://www.semanlink.net/tag/grece_antique": "Gr\u00e8ce antique", + "http://www.semanlink.net/tag/dependency_injection": "Dependency Injection", + "http://www.semanlink.net/tag/ben_laden": "Ben Laden", + "http://www.semanlink.net/tag/homme_celebre": "Homme c\u00e9l\u00e8bre", + "http://www.semanlink.net/tag/bon_chef_d_etat": "Bon chef d'\u00e9tat", + "http://www.semanlink.net/tag/antiquite_du_pakistan": "Antiquit\u00e9 du Pakistan", + "http://www.semanlink.net/tag/surveillance_capitalism": "Surveillance Capitalism", + "http://www.semanlink.net/tag/pixelwise_dense_prediction": "Pixelwise dense prediction", + "http://www.semanlink.net/tag/exode_rural": "Exode rural", + "http://www.semanlink.net/tag/voyage": "Voyage", + "http://www.semanlink.net/tag/hugh_glaser": "Hugh Glaser", + "http://www.semanlink.net/tag/zombie_pcs": "Zombie PCs", + "http://www.semanlink.net/tag/enterprise_search": "Enterprise Search", + "http://www.semanlink.net/tag/fossile": "Fossile", + "http://www.semanlink.net/tag/maxent_models": "Maxent models", + "http://www.semanlink.net/tag/massively_multiplayer_online_games": "Massively multiplayer online games", + "http://www.semanlink.net/tag/developpement_durable": "D\u00e9veloppement durable", + "http://www.semanlink.net/tag/linkto_semanlink": "LinkTo Semanlink", + "http://www.semanlink.net/tag/racisme": "Racisme", + "http://www.semanlink.net/tag/banque_centrale_europeenne": "Banque Centrale Europ\u00e9enne", + "http://www.semanlink.net/tag/jeremy_carroll": "Jeremy Carroll", + "http://www.semanlink.net/tag/mindmap": "MindMap", + "http://www.semanlink.net/tag/media_fragments": "Media fragments", + "http://www.semanlink.net/tag/geographie": "G\u00e9ographie", + "http://www.semanlink.net/tag/education_and_linked_data": "Education and Linked Data", + "http://www.semanlink.net/tag/valls": "Valls", + "http://www.semanlink.net/tag/biodiversity_data": "Biodiversity data", + "http://www.semanlink.net/tag/nathan_rixham": "Nathan Rixham", + "http://www.semanlink.net/tag/mac_os_x_tip": "Mac OS X Tip", + "http://www.semanlink.net/tag/ml_evaluation": "ML: evaluation", + "http://www.semanlink.net/tag/ecole_des_mines": "Ecole des Mines", + "http://www.semanlink.net/tag/fair_use": "Fair use", + "http://www.semanlink.net/tag/db2connect": "DB2Connect", + "http://www.semanlink.net/tag/tomas_mikolov": "Tomas Mikolov", + "http://www.semanlink.net/tag/jean_claude_ameisen": "Jean-Claude Ameisen", + "http://www.semanlink.net/tag/rada_mihalcea": "Rada Mihalcea", + "http://www.semanlink.net/tag/termite": "Termite", + "http://www.semanlink.net/tag/edgar_morin": "Edgar Morin", + "http://www.semanlink.net/tag/pierre_larrouturou": "Pierre Larrouturou", + "http://www.semanlink.net/tag/ultralingua": "Ultralingua", + "http://www.semanlink.net/tag/ml_as_a_service": "ML as a service", + "http://www.semanlink.net/tag/pompe_a_eau": "Pompe \u00e0 eau", + "http://www.semanlink.net/tag/probabilites": "Probabilit\u00e9s", + "http://www.semanlink.net/tag/cnrs": "CNRS", + "http://www.semanlink.net/tag/supply_chain": "Supply chain", + "http://www.semanlink.net/tag/droit_a_l_information": "Droit \u00e0 l'information", + "http://www.semanlink.net/tag/oracle": "Oracle", + "http://www.semanlink.net/tag/classification_systems": "Classification systems", + "http://www.semanlink.net/tag/cheval": "Cheval", + "http://www.semanlink.net/tag/yoav_goldberg": "Yoav Goldberg", + "http://www.semanlink.net/tag/xtech": "XTech", + "http://www.semanlink.net/tag/atom": "ATOM (format)", + "http://www.semanlink.net/tag/catastrophe_ecologique": "Catastrophe \u00e9cologique", + "http://www.semanlink.net/tag/wikidata_query_service": "Wikidata query service", + "http://www.semanlink.net/tag/avatar": "Avatar", + "http://www.semanlink.net/tag/riemann": "Riemann", + "http://www.semanlink.net/tag/solr_autocomplete": "Solr - autocomplete", + "http://www.semanlink.net/tag/parasitisme": "Parasitisme", + "http://www.semanlink.net/tag/ged": "GED", + "http://www.semanlink.net/tag/minos_neutrino_experiment": "MINOS Neutrino Experiment", + "http://www.semanlink.net/tag/antibiotiques": "Antibiotiques", + "http://www.semanlink.net/tag/sparql_en_javascript": "SPARQL en javascript", + "http://www.semanlink.net/tag/github_pages": "GitHub Pages", + "http://www.semanlink.net/tag/dur_a_trouver": "Dur \u00e0 trouver", + "http://www.semanlink.net/tag/volcan": "Volcan", + "http://www.semanlink.net/tag/synthetic_biology": "Synthetic biology", + "http://www.semanlink.net/tag/dark_web": "Dark Web", + "http://www.semanlink.net/tag/eswc_2008": "ESWC 2008", + "http://www.semanlink.net/tag/social_graph": "Social Graph", + "http://www.semanlink.net/tag/porsche": "Porsche", + "http://www.semanlink.net/tag/semantic_technology": "Semantic technology", + "http://www.semanlink.net/tag/learning_to_hash": "Learning to hash", + "http://www.semanlink.net/tag/foret": "For\u00eat", + "http://www.semanlink.net/tag/dl_why_does_it_work": "DL: why does it work?", + "http://www.semanlink.net/tag/gregg_kellogg": "Gregg Kellogg", + "http://www.semanlink.net/tag/inde": "Inde", + "http://www.semanlink.net/tag/assemblee_nationale": "Assembl\u00e9e nationale", + "http://www.semanlink.net/tag/energie_du_vide": "Energie du vide", + "http://www.semanlink.net/tag/scandinavie": "Scandinavie", + "http://www.semanlink.net/tag/usa": "USA", + "http://www.semanlink.net/tag/mind_mapping": "Mind mapping", + "http://www.semanlink.net/tag/media_player": "Media Player", + "http://www.semanlink.net/tag/seamless_journey": "Seamless journey", + "http://www.semanlink.net/tag/ai_teams": "AI teams", + "http://www.semanlink.net/tag/sparql_shortcomings": "SPARQL: shortcomings", + "http://www.semanlink.net/tag/knowledge_mining": "Knowledge mining", + "http://www.semanlink.net/tag/gnu": "GNU", + "http://www.semanlink.net/tag/eau_de_mars": "Eau de Mars", + "http://www.semanlink.net/tag/open_domain_question_answering": "Open Domain Question Answering", + "http://www.semanlink.net/tag/javascript_librairies": "JavaScript librairies", + "http://www.semanlink.net/tag/touchgraph": "TouchGraph", + "http://www.semanlink.net/tag/rfid_passports": "RFID passports", + "http://www.semanlink.net/tag/historien": "Historien", + "http://www.semanlink.net/tag/celera_ou_craig_venter": "Celera ou Craig Venter", + "http://www.semanlink.net/tag/social_manipulation": "Social manipulation", + "http://www.semanlink.net/tag/lyrics": "Lyrics", + "http://www.semanlink.net/tag/energie_solaire": "Energie solaire", + "http://www.semanlink.net/tag/smushing": "Smushing", + "http://www.semanlink.net/tag/connectionist_vs_symbolic_debate": "Connectionist vs symbolic debate", + "http://www.semanlink.net/tag/dictature": "Dictature", + "http://www.semanlink.net/tag/afrique_centrale": "Afrique Centrale", + "http://www.semanlink.net/tag/anthrax": "Anthrax", + "http://www.semanlink.net/tag/diplomatie": "Diplomatie", + "http://www.semanlink.net/tag/sw_demo": "SW demo", + "http://www.semanlink.net/tag/rdfjs": "RDFJS", + "http://www.semanlink.net/tag/slides": "Slides", + "http://www.semanlink.net/tag/css_html_templates": "css/html templates", + "http://www.semanlink.net/tag/jerusalem": "J\u00e9rusalem", + "http://www.semanlink.net/tag/chine_europe": "Chine-Europe", + "http://www.semanlink.net/tag/imperialisme": "Imp\u00e9rialisme", + "http://www.semanlink.net/tag/colza": "Colza", + "http://www.semanlink.net/tag/egypte_antique": "Egypte antique", + "http://www.semanlink.net/tag/apv_evolution": "APV evolution", + "http://www.semanlink.net/tag/france_is_ai_2018": "France is AI 2018", + "http://www.semanlink.net/tag/product_modelling": "Product Modelling", + "http://www.semanlink.net/tag/bombay": "Bombay", + "http://www.semanlink.net/tag/graph_editor": "Graph Editor", + "http://www.semanlink.net/tag/cascade": "Cascade", + "http://www.semanlink.net/tag/fecondation": "F\u00e9condation", + "http://www.semanlink.net/tag/peak_everything": "Peak Everything", + "http://www.semanlink.net/tag/film_de_guerre": "Film de guerre", + "http://www.semanlink.net/tag/tensor2tensor": "Tensor2Tensor", + "http://www.semanlink.net/tag/hypercard": "Hypercard", + "http://www.semanlink.net/tag/global_human_sensor_net": "Global human sensor net", + "http://www.semanlink.net/tag/news": "News", + "http://www.semanlink.net/tag/djibouti": "Djibouti", + "http://www.semanlink.net/tag/bacteries": "Bacteria", + "http://www.semanlink.net/tag/philosophie": "Philosophie", + "http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest": "Empires d'Afrique de l'Ouest", + "http://www.semanlink.net/tag/kleenex": "Kleenex", + "http://www.semanlink.net/tag/graph_visualization": "Graph visualization", + "http://www.semanlink.net/tag/archeologie_europeenne": "Arch\u00e9ologie europ\u00e9enne", + "http://www.semanlink.net/tag/maladie_contagieuse": "Maladie contagieuse", + "http://www.semanlink.net/tag/francafrique": "Fran\u00e7afrique", + "http://www.semanlink.net/tag/bonus": "Bonus", + "http://www.semanlink.net/tag/transnets": "Transnets", + "http://www.semanlink.net/tag/matplotlib": "matplotlib", + "http://www.semanlink.net/tag/horreur": "Horreur", + "http://www.semanlink.net/tag/media_conversationnel": "M\u00e9dia conversationnel", + "http://www.semanlink.net/tag/howto_tutorial_faq": "Howto, tutorial, FAQ", + "http://www.semanlink.net/tag/steve_jobs": "Steve Jobs", + "http://www.semanlink.net/tag/bnf": "BNF", + "http://www.semanlink.net/tag/depense_publique": "D\u00e9pense publique", + "http://www.semanlink.net/tag/pillage_de_vestiges_antiques": "Pillage de vestiges antiques", + "http://www.semanlink.net/tag/semantic_data": "Semantic data", + "http://www.semanlink.net/tag/ign": "IGN", + "http://www.semanlink.net/tag/spurl": "spurl", + "http://www.semanlink.net/tag/ai_amazon": "AI@Amazon", + "http://www.semanlink.net/tag/magie_noire": "Magie noire", + "http://www.semanlink.net/tag/enfants": "Enfants", + "http://www.semanlink.net/tag/mutualart_com": "MutualArt.com", + "http://www.semanlink.net/tag/recherche_francaise": "France : recherche", + "http://www.semanlink.net/tag/web_notebook": "Web notebook", + "http://www.semanlink.net/tag/debug_deep_learning": "Debug Deep Learning", + "http://www.semanlink.net/tag/heliosphere": "H\u00e9liosph\u00e8re", + "http://www.semanlink.net/tag/1984": "1984", + "http://www.semanlink.net/tag/eswc_2014": "ESWC 2014", + "http://www.semanlink.net/tag/words": "Words", + "http://www.semanlink.net/tag/energie_sombre": "Energie sombre", + "http://www.semanlink.net/tag/histoire_de_france": "Histoire de France", + "http://www.semanlink.net/tag/trafic_de_drogue": "Trafic de drogue", + "http://www.semanlink.net/tag/self_organizing_systems": "Self-organizing systems", + "http://www.semanlink.net/tag/tepuys": "Tepuys", + "http://www.semanlink.net/tag/hortefeux": "Hortefeux", + "http://www.semanlink.net/tag/graph": "Graph", + "http://www.semanlink.net/tag/pologne": "Pologne", + "http://www.semanlink.net/tag/migrations_humaines": "Migrations humaines", + "http://www.semanlink.net/tag/robotique": "Robotique", + "http://www.semanlink.net/tag/crete": "Cr\u00e8te", + "http://www.semanlink.net/tag/smart_contracts": "Smart contracts", + "http://www.semanlink.net/tag/english_grammar": "English-grammar", + "http://www.semanlink.net/tag/paul_graham": "Paul Graham", + "http://www.semanlink.net/tag/unesco": "UNESCO", + "http://www.semanlink.net/tag/access_control": "Access Control", + "http://www.semanlink.net/tag/glacier": "Glacier", + "http://www.semanlink.net/tag/coree_du_sud": "Cor\u00e9e du Sud", + "http://www.semanlink.net/tag/digital_humanities": "Digital Humanities", + "http://www.semanlink.net/tag/medias": "M\u00e9dias", + "http://www.semanlink.net/tag/bresil": "Br\u00e9sil", + "http://www.semanlink.net/tag/pnud": "PNUD", + "http://www.semanlink.net/tag/catastrophe_naturelle": "Catastrophe naturelle", + "http://www.semanlink.net/tag/crochemelier": "Crochemelier", + "http://www.semanlink.net/tag/desert": "D\u00e9sert", + "http://www.semanlink.net/tag/biotechnologies": "Biotechnologies", + "http://www.semanlink.net/tag/marketing": "Marketing", + "http://www.semanlink.net/tag/text_embeddings": "Text Embeddings", + "http://www.semanlink.net/tag/indiens_du_bresil": "Indiens du Br\u00e9sil", + "http://www.semanlink.net/tag/ai_startups": "AI: startups", + "http://www.semanlink.net/tag/parfum": "Parfum", + "http://www.semanlink.net/tag/tag_cloud": "Tag cloud", + "http://www.semanlink.net/tag/groenland": "Groenland", + "http://www.semanlink.net/tag/israel": "Isra\u00ebl", + "http://www.semanlink.net/tag/speculation": "Sp\u00e9culation", + "http://www.semanlink.net/tag/mais": "Ma\u00efs", + "http://www.semanlink.net/tag/capitalistes": "Capitalistes", + "http://www.semanlink.net/tag/ecole": "Ecole", + "http://www.semanlink.net/tag/histoire_de_l_asie": "Histoire de l'Asie", + "http://www.semanlink.net/tag/tutorial": "Tutorial", + "http://www.semanlink.net/tag/public_data": "Public data", + "http://www.semanlink.net/tag/sparql": "SPARQL", + "http://www.semanlink.net/tag/deep_learning_implementing": "Deep learning: implementing", + "http://www.semanlink.net/tag/encrypted_media_extensions": "Encrypted Media Extensions", + "http://www.semanlink.net/tag/craig_venter": "Craig Venter", + "http://www.semanlink.net/tag/data_web": "Web of data", + "http://www.semanlink.net/tag/cyc": "Cyc", + "http://www.semanlink.net/tag/turing": "Turing", + "http://www.semanlink.net/tag/megalithes": "M\u00e9galithes", + "http://www.semanlink.net/tag/petition": "P\u00e9tition", + "http://www.semanlink.net/tag/oregon": "Oregon", + "http://www.semanlink.net/tag/frog": "Frog", + "http://www.semanlink.net/tag/tools": "Tools", + "http://www.semanlink.net/tag/digital_video": "Digital Video", + "http://www.semanlink.net/tag/javascript_and_tutorial": "JavaScript Tutorial", + "http://www.semanlink.net/tag/likelihood": "Likelihood", + "http://www.semanlink.net/tag/rss_extensions": "RSS extensions", + "http://www.semanlink.net/tag/conquete_spatiale": "Conqu\u00eate spatiale", + "http://www.semanlink.net/tag/learning_english": "Learning english", + "http://www.semanlink.net/tag/constitution_europeenne": "Constitution europ\u00e9enne", + "http://www.semanlink.net/tag/outsourcing": "Outsourcing", + "http://www.semanlink.net/tag/language_learning": "Language learning", + "http://www.semanlink.net/tag/center_media_microsoft": "Microsoft Media Center", + "http://www.semanlink.net/tag/acl_2020": "ACL 2020", + "http://www.semanlink.net/tag/linked_data_demo": "Linked Data demo", + "http://www.semanlink.net/tag/mac_mini": "Mac Mini", + "http://www.semanlink.net/tag/sumer": "Sumer", + "http://www.semanlink.net/tag/sw_online_tools": "SW online tools", + "http://www.semanlink.net/tag/relativite_generale": "Relativit\u00e9 g\u00e9n\u00e9rale", + "http://www.semanlink.net/tag/societe_francaise": "Soci\u00e9t\u00e9 fran\u00e7aise", + "http://www.semanlink.net/tag/nike": "Nike", + "http://www.semanlink.net/tag/blogs_le_monde": "Blogs Le Monde", + "http://www.semanlink.net/tag/ssl": "SSL", + "http://www.semanlink.net/tag/jardin": "Jardin", + "http://www.semanlink.net/tag/population_mondiale": "Population mondiale", + "http://www.semanlink.net/tag/eminem": "Eminem", + "http://www.semanlink.net/tag/literary_criticism": "Literary criticism", + "http://www.semanlink.net/tag/paradoxe": "Paradoxe", + "http://www.semanlink.net/tag/delinquance": "D\u00e9linquance", + "http://www.semanlink.net/tag/zones_intertropicales": "Zones intertropicales", + "http://www.semanlink.net/tag/json_ld_apis": "JSON-LD APIs", + "http://www.semanlink.net/tag/benjamin_heinzerling": "Benjamin Heinzerling", + "http://www.semanlink.net/tag/mexique": "Mexique", + "http://www.semanlink.net/tag/kenya": "Kenya", + "http://www.semanlink.net/tag/art": "Art", + "http://www.semanlink.net/tag/manipulation": "Manipulation", + "http://www.semanlink.net/tag/coupe_du_monde_2006": "Coupe du monde 2006", + "http://www.semanlink.net/tag/parc_du_w": "Parc du W", + "http://www.semanlink.net/tag/complexite": "Complexit\u00e9", + "http://www.semanlink.net/tag/histoire_du_xxe_siecle": "Histoire du XXe si\u00e8cle", + "http://www.semanlink.net/tag/economie_francaise": "Economie fran\u00e7aise", + "http://www.semanlink.net/tag/caf": "CAF", + "http://www.semanlink.net/tag/nature": "Nature", + "http://www.semanlink.net/tag/red_hat": "Red Hat", + "http://www.semanlink.net/tag/semantic_web_web_2_0": "Semantic Web / Web 2.0", + "http://www.semanlink.net/tag/technorati": "Technorati", + "http://www.semanlink.net/tag/ours_polaire": "Ours polaire", + "http://www.semanlink.net/tag/boudhisme": "Boudhisme", + "http://www.semanlink.net/tag/semantic_components": "Semantic Components", + "http://www.semanlink.net/tag/killer_app": "Killer App", + "http://www.semanlink.net/tag/serpent": "Serpent", + "http://www.semanlink.net/tag/human_in_the_loop": "Human in the loop", + "http://www.semanlink.net/tag/dvd": "DVD", + "http://www.semanlink.net/tag/planet_under_pressure": "Planet under pressure", + "http://www.semanlink.net/tag/mind_control": "Mind control", + "http://www.semanlink.net/tag/methodes_agiles": "M\u00e9thodes agiles", + "http://www.semanlink.net/tag/ted_nelson": "Ted Nelson", + "http://www.semanlink.net/tag/couleur": "Couleur", + "http://www.semanlink.net/tag/maria": "Maria", + "http://www.semanlink.net/tag/jeux": "Jeux", + "http://www.semanlink.net/tag/orange": "Orange (telecom)", + "http://www.semanlink.net/tag/nlp_book": "NLP: book", + "http://www.semanlink.net/tag/ethiopie": "Ethiopie", + "http://www.semanlink.net/tag/web_services_critique": "Web services : critique", + "http://www.semanlink.net/tag/iguane": "Iguane", + "http://www.semanlink.net/tag/eruption_volcanique": "Eruption volcanique", + "http://www.semanlink.net/tag/industrie": "industrie", + "http://www.semanlink.net/tag/semantic_networks": "Semantic Networks", + "http://www.semanlink.net/tag/batteries": "Batteries", + "http://www.semanlink.net/tag/net_neutrality": "Net Neutrality", + "http://www.semanlink.net/tag/soudan": "Soudan", + "http://www.semanlink.net/tag/pulsar": "Pulsar", + "http://www.semanlink.net/tag/download_execute_javascript": "Download & Execute Javascript", + "http://www.semanlink.net/tag/langues": "Langues", + "http://www.semanlink.net/tag/a_suivre": "A suivre", + "http://www.semanlink.net/tag/anthropocene": "Anthropoc\u00e8ne", + "http://www.semanlink.net/tag/marie_jo_perec": "Marie-Jo P\u00e9rec", + "http://www.semanlink.net/tag/lisp": "Lisp", + "http://www.semanlink.net/tag/inegalites": "In\u00e9galit\u00e9s", + "http://www.semanlink.net/tag/exoplanetes": "Exoplan\u00e8tes", + "http://www.semanlink.net/tag/mashups": "Mashups", + "http://www.semanlink.net/tag/pbs_program": "PBS program", + "http://www.semanlink.net/tag/critique_de_la_societe_occidentale": "Critique de la soci\u00e9t\u00e9 occidentale", + "http://www.semanlink.net/tag/declin_de_l_europe": "D\u00e9clin de l'Europe", + "http://www.semanlink.net/tag/passwords": "Passwords", + "http://www.semanlink.net/tag/deloitte": "Deloitte", + "http://www.semanlink.net/tag/unix": "Unix", + "http://www.semanlink.net/tag/marchands_d_arme": "Marchands d'arme", + "http://www.semanlink.net/tag/soleil": "Soleil", + "http://www.semanlink.net/tag/photos_du_niger": "Photos du Niger", + "http://www.semanlink.net/tag/pib": "PIB", + "http://www.semanlink.net/tag/bourse": "Bourse", + "http://www.semanlink.net/tag/g8": "G8", + "http://www.semanlink.net/tag/indonesie": "Indon\u00e9sie", + "http://www.semanlink.net/tag/coree": "Cor\u00e9e", + "http://www.semanlink.net/tag/boube_gado": "Boube Gado", + "http://www.semanlink.net/tag/amerique_du_sud": "Am\u00e9rique du sud", + "http://www.semanlink.net/tag/antarctique": "Antarctique", + "http://www.semanlink.net/tag/mysql": "MySQL", + "http://www.semanlink.net/tag/orstom": "ORSTOM", + "http://www.semanlink.net/tag/gado": "Gado", + "http://www.semanlink.net/tag/senegal": "S\u00e9n\u00e9gal", + "http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf": "Semantic Web technologies for digital preservation : the SPAR project", + "https://fasttext.cc/docs/en/cheatsheet.html": "Cheatsheet \u00b7 fastText", + "https://arxiv.org/abs/1404.5367": "[1404.5367] Lexicon Infused Phrase Embeddings for Named Entity Resolution", + "http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/": "GRDDL in OwlSight Thinking Clearly", + "http://www.mkbergman.com/?p=355.": "OpenLink Plugs the Gaps in the Structured Web \u00bb AI3:::Adaptive Information", + "http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/": "Beating the Teacher: Neural Ranking Models with Weak Supervision \u2013 Mostafa Dehghani", + "http://www.irt.org/": "irt.org Home Page (\"Internet Related Technologies\")", + "https://arxiv.org/abs/1808.07699": "[1808.07699] End-to-End Neural Entity Linking", + "http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre": "[1909.04120] Span Selection Pre-training for Question Answering", + "http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e": "Your Money and Your Life - by Edward Snowden - Continuing Ed \u2014 with Edward Snowden", + "http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html": "@itemid and URL properties in schema.org from Jeni Tennison on 2011-11-04 (public-vocabs@w3.org from November 2011)", + "http://i.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf": "Sparse Distributed Memory: Principles and Operation", + "https://arxiv.org/abs/1512.00765": "[1512.00765] Learning Semantic Similarity for Very Short Texts", + "http://www.laquadrature.net/en/dominant-telcos-try-to-end-net-neutrality-through-itu": "Dominant Telcos Try to End Net Neutrality Through ITU La Quadrature du Net", + "http://www.fortune.com/fortune/print/0,15935,1101810,00.html": "Fortune.com: The Law of Unintended Consequences", + "https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb": "Training on TPU", + "http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_": "dicksontsai/stanford-nlp-local-extension: Chrome extension for sending content to localhost server running Stanford NLP tools.", + "http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch": "Snorkel by HazyResearch", + "http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174": "Ecologistes et agriculteurs s'affrontent durement sur la d\u00e9forestation de l'Amazonie", + "https://dl.acm.org/citation.cfm?doid=3184558.3186906": "That Makes Sense: Joint Sense Retrofitting from Contextual and Ontological Information", + "http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin": "Hugging Face sur Twitter : \"Fine-Tuning @facebookai's Wav2Vec2 for Speech Recognition is now possible in Transformers", + "http://www.pbs.org/wgbh/nova/elegant/": "String theory: The elegant universe", + "https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership": "An open letter to the W3C Director, CEO, team and membership Electronic Frontier Foundation", + "http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite": "Pre-trained Models GraphVite", + "http://robohub.org/machine-consciousness-fact-or-fiction/": "Machine consciousness: Fact or fiction? Robohub", + "http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique": "Open Graph Protocol : Facebook se met au Web s\u00e9mantique ? Les petites cases", + "http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc": "Homemade manuscript OCR (1): OCRopy", + "http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf": "Semantic Web Solutions at Work in the enterprise - TopQuadrant white paper", + "https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie": "La croissance, une affaire d'\u00e9nergie CNRS Le journal", + "http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi": "Lainey Doyle sur Twitter : Basic things: Ireland and the UK started this pandemic with roughly the same...\"", + "http://www.laquadrature.net/": "La Quadrature du Net", + "http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au": "www.msf.fr : P\u00c9TITION NOVARTIS - Un mauvais proc\u00e8s, une menace pour les malades des pays pauvres", + "http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_": "Blackstone Concept Extractor \u2014 ICLR&D", + "http://www.milk.com/wall-o-shame/heavy_boots.html": "Heavy Boots", + "http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_": "Jeffrey P. Clemens sur Twitter : \"An underrated mistake in US policy in Afghanistan was its long-running effort to suppress the cultivation of opium poppy and, in turn, the production of heroin and other opiates. A thread. 1/19\" / Twitter", + "http://www.msnbc.msn.com/id/47225834#.T7vDLI7UN9p": "Strange organism has unique roots in tree of life - Technology & science - Science - LiveScience - msnbc.com", + "http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity": "[2001.01447] Improving Entity Linking by Modeling Latent Entity Type Information", + "https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini": "Ampoules de Lorenzini \u2014 Wikip\u00e9dia", + "http://cs231n.github.io/": "CS231n Convolutional Neural Networks for Visual Recognition", + "http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob": "Designing and Interpreting Probes \u00b7 John Hewitt", + "http://jsfiddle.net/germesin/7EYxP/": "VIE.js - Skeleton - jsFiddle", + "http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c": "GitHub - explosion/sense2vec: Contextually-keyed word vectors", + "http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/": "Quel go\u00fbt a la chair d\u2019homme\u00a0? Passeur de sciences", + "http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html": "Appreciating SPARQL CONSTRUCT more - bobdc.blog", + "http://refine.deri.ie/": "GRefine RDF Extension", + "http://www.programcreek.com/java-api-examples/index.php?source_dir=jena-master/jena-arq/src/main/java/org/apache/jena/riot/out/JsonLDWriter.java": "JsonLdOptions", + "https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit": "Revealed: how US billionaire helped to back Brexit Politics The Guardian", + "http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776": "La technique de l'insecte st\u00e9rile va \u00eatre test\u00e9e pour lutter contre le paludisme et le chikungunya", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html": "Une colonie de cafards dompt\u00e9e par un mini-robot", + "http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan": "A Hybrid Graph Model for Distant Supervision Relation Extraction Springer for Research & Development (ESWC 2019)", + "http://www.the153club.org/citroen7.jpg": "Cavaliers Jermas", + "http://www.technorati.com": "", + "http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/": "M.I.T. Game-Changer: Free Online Education For All - Forbes", + "http://www.bytemark.co.uk/index.html": "Bytemark: Welcome", + "http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met": "HazyResearch/metal: Snorkel MeTaL: A framework for training models with multi-task weak supervision", + "http://www.ysearchblog.com/archives/000654.html": "Yahoo! Search Blog: Yahoo! Search BOSS Releases Key Terms", + "http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast": "Text similarity search in Elasticsearch using vector fields Elastic Blog", + "http://www.w3.org/wiki/Html-data-tf": "Html-data-tf - W3C Wiki", + "http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_": "Artificial Human Intelligence: The Programmer\u2019s Apprentice - Tom Dean and Rishabh Singh - Google Research", + "http://semanticweb.com/down-with-the-data-warehouse-long-live-the-semantic-data-warehouse_b23245": "Down with the Data Warehouse! Long Live the Semantic Data Warehouse! - semanticweb.com", + "http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr": "[1807.08447] LinkNBed: Multi-Graph Representation Learning with Entity Linkage", + "http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures": "The City-State in Five Cultures Department of History University of Washington", + "http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478": "KEYNOTE Panel: Semantics in the Automotive Industry", + "https://arxiv.org/abs/1709.02840": "[1709.02840] A Brief Introduction to Machine Learning for Engineers", + "http://groups.drupal.org/semantic-web": "Semantic Web groups.drupal.org", + "http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning": "Exploring DNA with Deep Learning", + "http://www.lesahel.org/index.php/culture/item/2999-r%C3%A9tro-festival-de-la-jeunesse-au-ccog--les-succ%C3%A8s-dhier-expos%C3%A9s-%C3%A0-la-jeune-g%C3%A9n%C3%A9ration": "R\u00e9tro-festival de la jeunesse au CCOG : Les succ\u00e8s d'hier expos\u00e9s \u00e0 la jeune g\u00e9n\u00e9ration", + "http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l": "[2109.08133] Phrase Retrieval Learns Passage Retrieval, Too", + "http://www.histropedia.com/": "Histropedia - The Timeline of Everything", + "http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html": "bobdc.blog: Some great W3C explanations of basic ontology concepts", + "http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new": "Nils Reimers sur Twitter : \"New models for Neural Information Retrieval...\"", + "http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html": "Re: Meaning of property \"url\" from Ed Summers on 2012-10-23 (public-vocabs@w3.org from October 2012)", + "https://arxiv.org/abs/1803.05651": "[1803.05651] Word2Bits - Quantized Word Vectors", + "http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/": "Should you feel guilty for buying your iPhone?", + "http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain": "Meetup Paris #40 : Beyond plain text: elasticsearch\u2019s annotated text field type (en anglais) - YouTube", + "https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1": "What a Disentangled Net We Weave: Representation Learning in VAEs (Pt. 1)", + "http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec": "Evolutionary & Physiological Ecology Ruuskanen Group", + "https://fr.wikipedia.org/wiki/La_Fable_des_abeilles": "Mandeville : la Fable des Abeilles", + "http://fr.wikipedia.org/wiki/Giovanni_Battista_Belzoni": "Giovanni Battista Belzoni - Wikip\u00e9dia", + "https://theclevermachine.wordpress.com/2014/09/06/derivation-error-backpropagation-gradient-descent-for-neural-networks/": "Derivation: Error Backpropagation & Gradient Descent for Neural Networks The Clever Machine", + "http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html": "Making AJAX Applications Crawlable - Google Code", + "https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/": "While U.S. Workers Fear Automation, Swedish Employees Welcome It - MIT Technology Review", + "http://flask.pocoo.org/": "Flask (A Python Microframework)", + "http://www.institut-de-france.fr/education/serres.html": "Les nouveaux d\u00e9fis de l'\u00e9ducation - Petite Poucette par M.\u00a0Michel Serres", + "http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_": "Karl Sharro sur Twitter : \"The British are finally experiencing what's it like to have the British rule your country\" / Twitter", + "http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_": "Rodrigo Nogueira sur Twitter : \"Slides of our WSDM 2021 tutorial \"Pretrained Transformers for Text Ranking: BERT and Beyond\"", + "https://arxiv.org/abs/1709.03856": "[1709.03856] StarSpace: Embed All The Things!", + "https://youtu.be/KAzhAXjUG28": "ilha das flores (filme curta metragem)", + "http://designshack.co.uk/tutorials/10-css-form-examples": "10 CSS Form Examples Design Shack", + "http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034": "Official Google Webmaster Central Blog: Introducing Rich Snippets - Welcome Google, really.", + "http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter": "Kingsley Uyi Idehen sur Twitter : \"When I read this & other articles, I leverage our @datasniff browser ext. for highlighting key terms;", + "http://www.offconvex.org/2016/02/14/word-embeddings-2/": "Word Embeddings: Explaining their properties \u2013 Off the convex path (2016)", + "http://mobisocial.stanford.edu/musubi/public/": "Musubi - A Mobile Social Network and App Platform", + "http://www.eclipsezone.com/eclipse/forums/t88527.html": "EclipseZone - Problems publishing webapp to Tomcat ...", + "http://blog.restcase.com/restful-api-versioning-insights/": "RESTFul API Versioning Insights", + "http://www.networkworld.com/community/node/33361": "DARPA Mathematical Challenges", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html": "Re: Should we adopt SKOS? from Mike Bergman on 2013-01-11 (public-vocabs@w3.org from January 2013)", + "http://colah.github.io/posts/2015-08-Backprop/": "Calculus on Computational Graphs: Backpropagation -- colah's blog", + "http://arxiv.org/pdf/1608.04062v1.pdf": "[1608.04062] Stacked Approximated Regression Machine: A Simple Deep Learning Approach", + "http://www.semweb.pro/view?rql=Any+T+GROUPBY+T+ORDERBY+ST+WHERE+T+location+L%2C+T+start_time+ST%2C+U+leads+T%2C+T+in_conf+C%2C+T+in_track+TR%2C+T+in_state+S%2C+T+title+TT%2C+S+name+%22accepted%22%2C+C+url_id+%22semwebpro2012%22&vid=primary": "SemWeb.Pro 2012 : pr\u00e9sentations", + "https://gist.github.com/jonschlinkert/5854601": "A better markdown cheatsheet", + "http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe": "'Slavery' uncovered on trawlers fishing for Europe The Guardian", + "http://www.semanlink.net/doc/2020/06/le_troupeau_film_": "Le Troupeau (film)", + "http://www.pbs.org/kcet/shapeoflife/index.html": "The shape of life", + "http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf": "Putting the Enterprise into the Enterprise System by Thomas H. Davenport", + "http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf": "Open eBusiness Ontology Usage: Investigating Community Implementation of GoodRelations", + "http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_": "Your own hosted blog, the easy, free, open way\u00b7 fast.ai", + "http://arxiv.org/abs/1601.01272": "[1601.01272] Recurrent Memory Networks for Language Modeling", + "http://alias-i.com/lingpipe/": "LingPipe", + "http://www.futurecrimesbook.com/": "Future Crimes", + "http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi": "[2107.12708] QA Dataset Explosion: A Taxonomy of NLP Resources for Question Answering and Reading Comprehension", + "http://www.apple.com/html5/": "Apple - HTML5", + "http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of": "Projota - A Rezadeira (Video Oficial) - YouTube", + "http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html": "Des mod\u00e8les informatiques pour expliquer le d\u00e9veloppement de la vision (Biologie au CNRS)", + "http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you": "Which flavor of BERT should you use for your QA task? by Olesya Bondarenko Towards Data Science", + "http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res": "Improving Quality of Search Results Clustering with Approximate Matrix Factorisations (2006)", + "http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty": "Who\u2019s the true enemy of internet freedom - China, Russia, or the US? Evgeny Morozov", + "http://swse.deri.org/faq.html": "SWSE FAQ", + "http://www.pbs.org/wnet/secrets/case_plague/clues.html": "Secrets of the Dead . Mystery of the Black Death-2 PBS", + "http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m": "Latent graph neural networks: Manifold learning 2.0? by Michael Bronstein Sep, 2020", + "http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm": "RDF and SOA", + "http://www.aclweb.org/anthology/Q16-1028": "A Latent Variable Model Approach to PMI-based Word Embeddings (2016)", + "https://arxiv.org/abs/1807.06036": "[1807.06036] Pangloss: Fast Entity Linking in Noisy Text Environments", + "http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount": "L'\u00e2me damn\u00e9e du pr\u00e9sident Kountch\u00e9 (1983)", + "http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html": "Official Google Webmaster Central Blog: Introducing Rich Snippets", + "http://www.w3.org/2007/08/pyRdfa/": "RDFa Distiller and Parser", + "http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html": "Java for Mac OS X 10.6 Update 3 and 10.5 Update 8 Release Notes: New and Noteworthy", + "http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html": "The Austerity Agenda - NYTimes.com - Paul Krugman", + "https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53": "[Learning Note] StarSpace For Multi-label Text Classification", + "http://manning.com/ingersoll/": "Manning: Taming Text", + "https://bpcatalog.dev.java.net/nonav/ajax/progress-bar/design.html": "Progress Bar Using AJAX", + "http://pfia2018.loria.fr/journee-tal/": "Journee:TAL PFIA 2018", + "http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno": "[2104.08663] BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models", + "https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f": "Semantic Web design The limits of responsive design and the API-driven web", + "http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer": "Copier le fonctionnement du cerveau pour \u00e9conomiser de l'\u00e9nergie Techniques de l'Ing\u00e9nieur", + "https://www.dataquest.io/blog/data-science-quickstart-with-docker/": "How to setup a data science environment in minutes using Docker and Jupyter", + "http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html": "NOVA Slime Mold Smarts", + "https://www.topbots.com/most-important-ai-research-papers-2018/": "Easy-To-Read Summary of Important AI Research Papers of 2018", + "https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html": "Research Blog: Introducing Semantic Experiences with Talk to Books and Semantris", + "https://stats385.github.io/": "Theories of Deep Learning (STATS 385)", + "https://jalammar.github.io/illustrated-bert/": "The Illustrated BERT, ELMo, and co. (How NLP Cracked Transfer Learning) \u2013 Jay Alammar", + "https://sutheeblog.wordpress.com/2017/03/20/a-biterm-topic-model-for-short-texts/": "A Biterm Topic Model for Short Texts", + "http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf": "Provable Algorithms for Machine Learning Problems by Rong Ge.", + "http://www2009.eprints.org/92/1/p911.pdf": "Why is the Web Loosely Coupled? A Multi-Faceted Metric for Service Design", + "http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light": "Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT", + "http://rdfa.info/wiki/Tools": "Tools - RDFaWiki", + "http://www.newscientist.com/article.ns?id=dn7470&print=true": "Mission to build a simulated brain begins", + "http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html": "Getting started with Sesame - bobdc.blog", + "http://solr-vs-elasticsearch.com/": "Apache Solr vs ElasticSearch - the Feature Smackdown!", + "http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1": "eLearn Magazine: The Semantic Web and E-learning", + "http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_": "Dans les quartiers populaires, \u00ab\u00a0si on remplit le frigo, on chope le corona\u00a0\u00bb", + "http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/": "Semergence \u00bb Blog Archive \u00bb Why Flickr Doesn\u2019t Do FOAF", + "http://www.google.com/support/webmasters/bin/answer.py?answer=40349&ctx=related": "How can I create a Google-friendly site?", + "https://arxiv.org/abs/1806.04411": "[1806.04411] Named Entity Recognition with Extremely Limited Data", + "http://searchengineland.com/bing-britannica-partnership-123930": "Bing Gets Its Own Knowledge Graph Via Britannica Partnership", + "http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch": "Could an AI duet be the next chart-topper? Financial Times", + "http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w": "Canwen Xu sur Twitter : \"WTF? We brutally dismember BERT and replace all his organs?\"", + "http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript": "How can I pretty-print JSON using JavaScript? - Stack Overflow", + "http://www.technorati.com/": "http://www.technorati.com", + "http://www.wired.com/wired/archive/14.04/collide.html": "Wired 14.04: When Virtual Worlds Collide", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937": "[Linking-open-data] synonym URIs", + "http://www.conceptnet.io/": "ConceptNet", + "http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html": "De la libellule au microdrone : comment les insectes nous apprennent \u00e0 voler", + "https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html": "As D.I.Y. Gene Editing Gains Popularity, \u2018Someone Is Going to Get Hurt\u2019 - The New York Times", + "http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html": "\u00ab\u00a0Le terrorisme ne se nourrit pas de la technologie, mais de la col\u00e8re et de l\u2019ignorance\u00a0\u00bb", + "http://flowplayer.org/tools/index.html": "jQuery TOOLS - The missing UI library for the Web", + "http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/": "Using Cognonto to Generate Domain Specific word2vec Models Frederick Giasson", + "http://www.w3.org/TR/#tr_Linked_Data": "W3C - Linked Data: Standards and Drafts", + "http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa": "Serran\u00eda de La Lindosa", + "http://www.cliffsnotes.com/": "Get Homework Help with CliffsNotes Study Guides", + "http://www.google.com/support/webmasters/bin/answer.py?answer=146750": "Marking up products for rich snippets", + "http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb": "[2001.08053] Contextualized Embeddings in Named-Entity Recognition: An Empirical Study on Generalization", + "http://www.proxml.be/users/paul/": "\"Living in the XML and RDF world\"", + "https://arxiv.org/abs/1803.01271": "[1803.01271] An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling", + "http://snap.stanford.edu/proj/embeddings-www/index.html#materials": "TUTORIAL: Representation Learning on Networks - TheWebConf 2018", + "http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign": "Whistleblower Says Facebook Ignored Global Political Manipulation", + "http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww": "Why electric car maker Tesla has torn up its patents - opinion - 16 June 2014 - New Scientist", + "http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github": "Hosting a Maven repository on github - Stack Overflow", + "http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101": "Indice du d\u00e9veloppement humain 2006. Esp\u00e9rance de vie, niveau d\u2019\u00e9ducation et le revenu par habitant: le Niger bon dernier", + "http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_": "Paris NLP Season 4 Meetup #3 \u2013 Paris NLP", + "http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu": "Cheap cab ride? You must have missed Uber\u2019s true cost Evgeny Morozov Opinion The Guardian", + "http://www.ldodds.com/blog/2003/09/rdf-forms/": "http://code.google.com/p/rforms/", + "http://www.programmableweb.com/": "ProgrammableWeb: Mashups and the Web as Platform", + "http://www.w3.org/community/rdfjs/wiki/Comparison_of_RDFJS_libraries": "Comparison of RDFJS libraries - RDF JavaScript Libraries Community Group", + "http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c": "[2002.02925] BERT-of-Theseus: Compressing BERT by Progressive Module Replacing", + "http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno": "[2008.08995] Constructing a Knowledge Graph from Unstructured Documents without External Alignment", + "http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t": "Javier Abell\u00e1n sur Twitter : \"Tired of not fully understanding the Attention of the Transformer? I've made this illustration for you.\"", + "https://arxiv.org/abs/1902.05309v1": "[1902.05309] Transfer Learning for Sequence Labeling Using Source Model and Target Data", + "http://www.semanlink.net/doc/2020/06/representation_learning_for_inf": "Representation Learning for Information Extraction from Form-like Documents \u2013 Google Research", + "http://blogs.zdnet.com/semantic-web/?p=131": "Sir Tim Berners-Lee addresses WWW2008 in Beijing", + "http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of": "TAGME: on-the-fly annotation of short text fragments!", + "https://arxiv.org/abs/1903.05823": "[1903.05823] Deep Patent Landscaping Model Using Transformer and Graph Embedding", + "http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation": "http - Which CDN solutions support caching with content negotiation? - Stack Overflow", + "https://dandelion.eu/": "Dandelion API - Semantic Text Analytics as a service", + "https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf": "Improving the Compositionality of Word Embeddings (2017)", + "http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference": "localization - JavaScript for detecting browser language preference - Stack Overflow", + "http://infomesh.net/2002/rdfinhtml/": "RDF in HTML: Approaches", + "http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_": "Christopher Dengs\u00f8 sur Twitter : \"The moderation API now detects addresses in addition to other personal details.\"", + "http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html": "Re: Using DBpedia resources as skos:Concepts? from Fran\u00e7ois-Paul Servant on 2012-11-12 (public-esw-thes@w3.org from November 2012)", + "http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese": "[2010.02353] Participatory Research for Low-resourced Machine Translation: A Case Study in African Languages", + "https://github.com/cmader/qSKOS/wiki/Quality-Criteria-for-SKOS-Vocabularies": "Quality Criteria for SKOS Vocabularies - GitHub", + "http://news.bbc.co.uk/1/hi/health/4222460.stm": "BBC NEWS - Health - 'Proof' our brains are evolving", + "https://forums.fast.ai/t/fast-ai-with-google-colab/18882": "Fast.ai with Google Colab", + "https://aclweb.org/anthology/papers/D/D15/D15-1077/": "Name List Only? Target Entity Disambiguation in Short Texts - ACL Anthology (2015)", + "http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se": "How to improve Elasticsearch search relevance with boolean queries Elastic Blog", + "http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/": "L\u2019homme qui ne descendait pas d\u2019Adam Passeur de sciences", + "http://trueg.wordpress.com/2011/06/08/nepomuk-2-0-and-the-data-management-service/": "Nepomuk 2.0 and the Data Management Service \u00ab Trueg's Blog", + "http://fr.slideshare.net/tonyh/semweb-meetupmarch2013": "Techniques used in RDF Data Publishing at Nature Publishing Group", + "http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1": "Les \u00c9tats-Unis et la drogue ARTE", + "http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto": "Le Monde selon Monsanto", + "http://bl.ocks.org/danja/ef309b9d3f392145c9c3": "Life on the Ocean Wave", + "http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html": "Expensive links in Linked Data from SERVANT Francois-Paul on 2012-09-28 (public-lod@w3.org from September 2012)", + "http://java-source.net/open-source/html-parsers": "", + "http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate": "[2009.12030] AutoETER: Automated Entity Type Representation for Knowledge Graph Embedding", + "http://alex.nederlof.com/blog/2013/07/28/caching-using-annotations-with-jersey/": "Cache-Control using annotations with Jersey - LexTech", + "http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html": "Terrorisme Mali : deux ans apr\u00e8s Serval, Aqmi reprend ses quartiers au Nord de Tombouctou Jeuneafrique.com - le premier site d'information et d'actualit\u00e9 sur l'Afrique", + "http://www.jspwiki.org": "", + "https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496": "Yes we scan", + "https://blog.risingstack.com/the-react-way-getting-started-tutorial/": "The React.js Way: Getting Started Tutorial RisingStack", + "http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html": "Evet - Ich will!", + "http://www.cominvent.com/2012/01/25/super-flexible-autocomplete-with-solr/": "Super flexible AutoComplete with Solr Cominvent AS - Enterprise search consultants", + "http://www.semanlink.net/doc/2020/07/sandstorm": "Sandstorm", + "http://hyperscope.org/": "HyperScope", + "http://www.w3.org/TR/2005/WD-swbp-skos-core-guide-20050510/": "SKOS Core Guide", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf": "SPARQL Query Mediation over RDF Data Sources with Disparate Contexts", + "http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp": "Semantic Web Meets BI In New Project Whose Partners Include SAP, Sheffield Hallam University, Ontotext", + "http://dannyayers.com/2006/07/11/hey-lazyweb--live": "Hey LazyWeb! : Live Clipboard and Timeline", + "http://blogs.zdnet.com/semantic-web/?p=128": "Linked Data on the Web, WWW2008 The Semantic Web ZDNet.com", + "http://www.semanlink.net/doc/2021/06/contrastive_representation_lear": "Contrastive Representation Learning", + "http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/": "LH tools; a sparql endpoint for WordPress Shawfactor", + "http://snurf.bdash.net.nz/": "Snurf: a Python-based Blogging System", + "http://danbri.org/words/2007/11/04/223": "danbri\u2019s foaf stories \u00bb FOAF diagram", + "http://en.gravatar.com/hyperfp": "fps - Gravatar Profile", + "http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_": "Retour sur Terre de Chang\u2019e-5, une sonde spatiale chinoise transportant des \u00e9chantillons lunaires", + "https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software": "NLTK: Installing Third Party Software \u00b7 nltk Wiki", + "http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html": "Bernard Stiegler\u00a0: \u00ab\u00a0Ce n\u2019est qu\u2019en projetant un v\u00e9ritable avenir qu\u2019on pourra combattre Daech\u00a0\u00bb", + "http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg": "Topic modeling with network regularization", + "http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build": "Elasticsearch meets BERT: Building Search Engine with Elasticsearch and BERT", + "http://turtlescript.sourceforge.net/": "TurtleScript", + "http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_": "Retrieval Augmented Generation with Huggingface Transformers and Ray Distributed Computing with Ray", + "http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi": "How to extract text from PDF files - dida Machine Learning", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf": "A Semantic Web Representation of a Product Range Specification based on Constraint Satisfaction Problem in the Automotive Industry", + "http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with": "Java UTF\u20138 international character support with Tomcat and Oracle, 26/03/07, Kieran's blog", + "http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr": "The Unreasonable Syntactic Expressivity of RNNs \u00b7 John Hewitt", + "http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho": "[2001.11631] Enhancement of Short Text Clustering by Iterative Classification", + "https://twitter.com/akulith/status/1080460889839595522": "clarification about how the process under Article 50 TEU works", + "http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia": "Philippe Aghion \u2014 Wikip\u00e9dia", + "http://lists.w3.org/Archives/Public/uri/2003Jan/0005": "Rationalizing the term URI", + "http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf": "Finding Similar Items", + "http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_": "[1902.10197] RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space", + "http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html": "An\u00e7ar Eddine va d\u00e9truire tous les mausol\u00e9es de Tombouctou", + "http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa": "PaintTransformer - a Hugging Face Space by akhaliq", + "http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc": "Teddy Koker sur Twitter : \"Torchsort, an implementation of \"Fast Differentiable Sorting and Ranking\" in PyTorch\"", + "http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_": "[1405.5893] Computerization of African languages-French dictionaries", + "https://arxiv.org/abs/1810.04805": "[1810.04805] BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", + "https://arxiv.org/abs/1604.06737": "[1604.06737] Entity Embeddings of Categorical Variables", + "http://www.sparqlets.org/clipboard/home": "SPARQL Clipboard Demo", + "http://www.volkswagen.co.uk/vocabularies/vvo/ns": "The Volkswagen Vehicles Ontology (VVO)", + "https://arxiv.org/abs/1603.08861": "[1603.08861] Revisiting Semi-Supervised Learning with Graph Embeddings", + "http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html": "A Billion Words: Because today's language modeling standard should be higher", + "http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose": "BBC - Culture - Twenty-six words we don\u2019t want to lose", + "http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno": "raphaelsty/textokb: Extract knowledge from raw text", + "https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)": "Seven Years in Tibet (Jean-Jacques Annaud)", + "https://franceisai.com/conferences/conference-2018": "Conference - France is AI", + "http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html": "REST: Jersey configuration on Tomcat - Surya Suravarapu's Blog", + "http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1": "Question answering over Linked Data - Interacting with Linked Data", + "https://github.com/robert-bor/aho-corasick": "Aho-Corasick (java implementation)", + "http://www.ird.fr/bani/": "BANI (Base d'Anthropologie physique du Niger) Plan de BANI", + "http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html": "How convolutional neural networks see the world", + "http://www.w3.org/Submission/SWRL/": "SWRL: A Semantic Web Rule Language Combining OWL and RuleML", + "http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_": "La surveillance, stade supr\u00eame du capitalisme\u00a0?", + "http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co": "As Europe's governments lose control of Covid, revolt is in the air The Guardian", + "https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest": "The last great tree: a majestic relic of Canada's vanishing rainforest Environment The Guardian", + "http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges": "Mashups: The new breed of Web app", + "http://i.stanford.edu/~ullman/mmds.html": "Mining of Massive Datasets", + "https://hal.archives-ouvertes.fr/hal-01626196/document": "Combining word and entity embeddings for entity linking (ESWC 2017)", + "https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme": "Seigneurie de Bell\u00eame", + "http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic": "AI has cracked a key mathematical puzzle for understanding our world MIT Technology Review", + "http://www.semanticpedia.org": "S\u00e9manticp\u00e9dia", + "https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html": "Using pre-trained word embeddings in a Keras model", + "http://www.creativesynthesis.net/blog/projects/graph-gear/": "Graph Gear :: Creative Synthesis", + "http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01": "Nissan announces electric car deal with Oregon The Detroit News", + "http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/": "Seven Pillars of the Open Semantic Enterprise \u00bb AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2021/10/selective_classification_can_ma": "Selective Classification Can Magnify Disparities Across Groups SAIL Blog", + "http://hallojs.org/markdown.html": "Hallo.js - Editing Markdown in WYSIWYG", + "http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/": "JAX-RS RESTEasy Cache control with ETag example - How To Do In Java", + "http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/": "Genetic Programming: Evolution of Mona Lisa \u00ab Roger Alsing Weblog", + "http://www.lab41.org/anything2vec/": "2Vec or Not 2Vec?", + "http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs": "Goldfire Cognitive Search IHS Markit", + "http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html": "Tools for exporting SKOS to human readable form (HTML) from Peter Krantz on 2012-10-24 (public-esw-thes@w3.org from October 2012)", + "http://www.assemblee-nationale.fr/12/rap-info/i3061.asp": "Rapport d'information de M. Jean-Marie Rolland sur l'enseignement des disciplines scientifiques dans le primaire et le secondaire", + "https://karpathy.github.io/2019/04/25/recipe/": "A Recipe for Training Neural Networks (Andrej Karpathy blog)", + "http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin": "Sofie Van Landeghem: Entity linking functionality in spaCy (spaCy IRL 2019) - YouTube", + "https://aclweb.org/anthology/papers/C/C18/C18-1139/": "Contextual String Embeddings for Sequence Labeling - ACL Anthology (2018)", + "http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html": "La revanche des bact\u00e9riophages sur CRISPR-Cas9 - CNRS", + "http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/": "Recurrent Neural Networks Tutorial, Part 1 \u2013 Introduction to RNNs WildML", + "https://www.youtube.com/watch?v=029RscqsNLk": "Reinhard Mey - Annabelle 1972 - YouTube", + "http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki": "KiWi: Knowledge in a Wiki", + "http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840": "IRIN - NIGER: Des rebelles accus\u00e9s de mener une campagne de terreur urbaine inspir\u00e9e de l\u2019Irak", + "https://arxiv.org/abs/1503.00759": "[1503.00759] A Review of Relational Machine Learning for Knowledge Graphs", + "https://www.youtube.com/watch?v=gUilOCTqPC4": "Jean Rouch \u2013 Les Ma\u00eetres Fous [1955] [1/2] - YouTube", + "http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov": "Virtual Knowledge Graphs: An Overview of Systems and Use Cases MIT Press Journals (2019)", + "http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it": "lucene - SOLR and Natural Language Parsing - Can I use it? - Stack Overflow", + "http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html": "Pierre Rabhi : \"Vivre sobrement, c'est une forme de lib\u00e9ration\" - LeMonde.fr", + "https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery": "Chiribiquete National Park - Wikipedia", + "https://github.com/src-d/wmd-relax": "Calculates Word Mover's Distance Insanely Fast", + "http://www.semanlink.net/doc/2019/10/le_viager": "Le Viager", + "http://www.mkbergman.com/?p=447": "What is Linked Data? \u00bb AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo": "[2004.05150] Longformer: The Long-Document Transformer", + "https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/": "Automatic Tag Recommendation Algorithms for Social Recommender Systems - Microsoft Research (2009)", + "http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base": "Enabling Spike-Based Backpropagation for Training Deep Neural Network Architectures", + "http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf": "The Remaking of Reading: Data Mining and the Digital Humanities", + "http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b": "\u00c9loge des \u00e9liminatoires \u2013 Une balle dans le pied", + "https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79": "wikidata-query-gui/nearby.html at master \u00b7 wikimedia/wikidata-query-gui", + "http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient": "Covid-19 : \u00ab\u00a0L\u2019inculture scientifique des \u00e9lites fran\u00e7aises a des effets profonds sur la conduite des affaires de l\u2019Etat \u00bb", + "http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf": "TextRank: Bringing Order into Texts (2004)", + "http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag": "Efficient multi-lingual language model fine-tuning \u00b7 fast.ai NLP", + "http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html": "Toward a Basic Profile for Linked Data", + "http://en.wikipedia.org/wiki/Zhang_Qian": "Zhang Qian - Wikipedia, the free encyclopedia", + "http://blog.erratasec.com/2016/06/etheriumdao-hack-similfied.html#.V3UTeo56_-k": "Errata Security: Ethereum/TheDAO hack simplified", + "https://www.tensorflow.org/extras/candidate_sampling.pdf": "What is Candidate Sampling", + "http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced": "[1909.04164] Knowledge Enhanced Contextual Word Representations", + "http://esw.w3.org/Foaf+ssl": "Foaf+ssl - ESW Wiki", + "http://paulgraham.com/95.html": "Let the Other 95% of Great Programmers In", + "https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html": "The Hidden Automation Agenda of the Davos Elite - The New York Times", + "https://arxiv.org/abs/1806.05662": "[1806.05662] GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations", + "https://github.com/d3/d3": "D3: Data-Driven Documents", + "http://www.semanlink.net/doc/2021/01/weaviate": "Weaviate", + "http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800": "La Hongrie rong\u00e9e par la haine", + "https://arxiv.org/abs/1601.03764": "[1601.03764] Linear Algebraic Structure of Word Senses, with Applications to Polysemy", + "http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html": "Calcul\u00e9e comme la d\u00e9pense publique, la d\u00e9pense priv\u00e9e d\u00e9passerait 200% du PIB - 8 mai 2014 - L'Obs", + "http://claudehartmann1.wixsite.com/clhartmann": "Claude Hartmann", + "http://www.apollon.uio.no/vis/art/2007_4/Artikler/The_Tree_of_Life": "Apollon: The Tree of Life Has Lost a Branch", + "https://webmasters.googleblog.com/2016/05/introducing-rich-cards.html": "Official Google Webmaster Central Blog: Introducing rich cards", + "http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_": "[2010.05234] A Practical Guide to Graph Neural Networks", + "http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf": "Managing URI Synonymity to Enable Consistent Reference on the Semantic Web", + "http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html": "A Learning Advance in Artificial Intelligence Rivals Human Abilities - The New York Times", + "https://mailman.stanford.edu/pipermail/java-nlp-user/2009-November/000300.html": "[java-nlp-user] Stanford NER: confidence scores", + "http://grc.com/dos/grcdos.htm": "The Attacks on GRC.COM", + "http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html": "Dans les vergers du Sichuan, les hommes font le travail des abeilles", + "http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_": "[1011.4088] An Introduction to Conditional Random Fields", + "http://wiki.apache.org/incubator/LindaProposal#Abstract": "LindaProposal - Incubator Wiki", + "http://www.scottaaronson.com/blog/?p=2756": "\u201cCan computers become conscious?\u201d: My reply to Roger Penrose", + "http://www.xs4all.nl/~kspaink/fishman/home.html": "Karin Spaink - The Fishman Affidavit: contents", + "https://www.wintellect.com/creating-machine-learning-web-api-flask/": "Creating a Machine Learning Web API with Flask - Wintellect", + "http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html": "Biologists on the Verge of Creating New Form of Life Wired Science from Wired.com", + "https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials": "Gensim tutorials", + "http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg": "Dosso", + "http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa": "Le centenaire de la reconnaissance internationale du Kurdistan \u2013 Un si Proche Orient", + "http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v": "Extraction de relation via la validation de relation", + "http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate": "Knowledge-Based Short Text Categorization Using Entity and Category Embedding Springer for Research & Development (2019)", + "http://css.maxdesign.com.au/selectutorial/index.htm": "Selectutorial: CSS selectors", + "http://www.semweb.pro/talk/2474": "Linked Data et description du produit automobile (SemWeb.Pro)", + "http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html": "Developing a Fax Machine to Copy Life on Mars - NYTimes.com", + "http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf": "Trying to Understand Recurrent Neural Networks for Language Processing (slides)", + "http://km.aifb.uni-karlsruhe.de/ws/esoe2007": "International Workshop on Emergent Semantics and Ontology Evolution", + "http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with": "Iterative Entity Alignment with Improved Neural Attribute Embedding", + "http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c": "Combining Knowledge with Deep Convolutional Neural Networks for Short Text Classification (2017)", + "https://arxiv.org/abs/1602.04938": "[1602.04938] \"Why Should I Trust You?\": Explaining the Predictions of Any Classifier", + "http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun": "Enterprise Knowledge Graph Foundation", + "http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p": "Named Entity Recognition with Pytorch Transformers \u2013 Pierre-Yves Vandenbussche", + "http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles": "\u00ab Le Mariage de Roland \u00bb, Victor Hugo, La L\u00e9gende des Si\u00e8cles, 1859.", + "http://events.linkeddata.org/ldow2008/": "Linked Data on the Web (LDOW2008) - Workshop at WWW2008, Beijing, China", + "https://blogs.oracle.com/bblfish/entry/temporal_relations": "Temporal Relations (The Sun BabelFish Blog)", + "http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering": "General Architecture for Text Engineering (GATE) - Wikipedia, the free encyclopedia", + "http://pchere.blogspot.com/2005/02/absolutely-delicious-complete-tool.html": "Quick Online Tips: Absolutely Del.icio.us - Complete Tool Collection", + "https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0": "Universal Basic Income Is Silicon Valley\u2019s Latest Scam", + "http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav": "Guide sur WatchService dans Java NIO2", + "https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u": "AN OPEN LETTER TO MY PARENTS \u2014 The Daily Show \u2014 Medium", + "https://www.elastic.co/fr/blog/elastic-enterprise-search-beta1-released": "Introducing the Elastic Enterprise Search Beta: Search Everything, Anywhere Elastic Blog", + "http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/": "IBM and Microsoft Will Let You Roll Your Own Blockchain WIRED", + "http://www.lemonde.fr/idees/article/2013/03/25/les-moocs-a-l-assaut-du-mammouth_1853831_3232.html": "Les MOOCs \u00e0 l'assaut du mammouth", + "http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in": "China Is Forcing Tourists to Install Text-Stealing Malware at its Border", + "http://www.w3.org/TR/skos-reference/": "SKOS Reference", + "http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/": "on the issue of conflicting requirements when creating linked data applications. \u2013 webr3.org", + "http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de": "Quand Rome a \u00e9t\u00e9 vaincue par des r\u00e9fugi\u00e9s", + "http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t": "Walker Bragman sur Twitter : \"This is how you respond when someone asks \u201chow will you pay for it?\u201d", + "https://www.npmjs.com/package/markdown-it-relativelink": "markdown-it-relativelink", + "http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien": "[1902.00751] Parameter-Efficient Transfer Learning for NLP", + "http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra": "Hugging Face sur Twitter : \"Transformers release of the Retrieval-Augmented Generation model in collaboration with @facebookai!\"", + "http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i": "Language, trees, and geometry in neural networks", + "http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D": "wiki to dbpedia with sparql", + "http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa": "An Embarrassingly Simple Approach for Transfer Learning from Pretrained Language Models (NAACL 2019)", + "http://www.sciencedirect.com/science/article/pii/S0925231215014502": "Semantic expansion using word embedding clustering and convolutional neural network for improving short text classification - ScienceDirect", + "http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/": "\u201cThe semantic web enables us to use portals in a more intelligent fashion, so we can do business more efficiently\u201d at Cloudlands", + "http://www.rioscenarium.com.br/": "Rio Scenarium", + "http://validator.w3.org/": "The W3C Markup Validation Service", + "http://stackoverflow.com/questions/978061/http-get-with-request-body": "rest - HTTP GET with request body - Stack Overflow", + "http://www.paulgraham.com/gh.html": "Great Hackers", + "http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo": "[1906.04341] What Does BERT Look At? An Analysis of BERT's Attention", + "http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/": "Sugata Mitra opens his first independent learning lab in India TED Blog", + "https://tomaugspurger.github.io/sklearn-dask-tabular.html": "datas-frame \u2013 Tabular Data in Scikit-Learn and Dask-ML", + "http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable": "Understanding HBase and BigTable - Jimbojw.com", + "http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l": "[1911.03814] Zero-shot Entity Linking with Dense Entity Retrieval", + "http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308": "Cognitive computing can take the semantic Web to the next level Big Data - InfoWorld", + "http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la": "[1805.04174] Joint Embedding of Words and Labels for Text Classification (ACL Anthology 2018)", + "https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf": "Probability Cheatsheet", + "http://en.wikipedia.org/wiki/American_Gangster_(film)": "American Gangster", + "https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71": "Summarize Documents using Tf-Idf \u2013 Alexander Crosson \u2013 Medium", + "http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454": "SindiceTech Helps Enterprises Build Private Linked Data Clouds - Semanticweb.com", + "http://fr.wikipedia.org/wiki/Le_Festin_de_Babette": "Le Festin de Babette", + "http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/": "Using DBpedia to generate SKOS thesauri \u00ab About the social semantic web", + "http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181": "Laba Sosseh", + "http://wit.tuwien.ac.at/people/michlmayr/addatag/": "WIT - The Add-A-Tag algorithm", + "http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de": "Patrick Boucheron - Histoire des pouvoirs en Europe occidentale, XIII\u1d49-XVI\u1d49 si\u00e8cle - Coll\u00e8ge de France", + "http://python-guide-pt-br.readthedocs.io/en/latest/writing/style/": "Code Style \u2014 The Hitchhiker's Guide to Python", + "http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/": "La NSA, la DGSE et la DCRI ne disent pas merci \u00e0 l\u2019Hadopi - BUG BROTHER - Blog LeMonde.fr", + "http://download.tensorflow.org/paper/whitepaper2015.pdf": "TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems", + "http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D": "\u00ab\u00a0Le \u201cbiodiversit\u00e9-scepticisme\u201d, plus discret que celui contre le d\u00e9r\u00e8glement climatique, est en un sens bien plus inqui\u00e9tant\u00a0\u00bb", + "http://autourduciel.blog.lemonde.fr/2014/11/11/suivez-en-direct-latterrissage-de-philae-sur-la-comete-67p-le-12-novembre/": "Suivez en direct l\u2019atterrissage de Philae sur la com\u00e8te 67P le 12 novembre Autour du Ciel", + "https://www6.software.ibm.com/developerworks/education/x-xmlcss/": "Display XML with Cascading Stylesheets, Part 1: Use Cascading Stylesheets to display XML", + "http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/": "Pregnancy is a war between mother and child \u2013 Suzanne Sadedin \u2013 Aeon", + "http://www.lemonde.fr/afrique/article/2017/08/01/le-jour-de-l-umuganda-tout-le-monde-travaille-au-rwanda_5167306_3212.html": "Le jour de l\u2019umuganda, tout le monde travaille au Rwanda", + "http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext": "SLEEC: Sparse Local Embeddings for Extreme Multi-label Classification (2015)", + "http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/": "Using RDFa to make a web page editable - Henri Bergius", + "http://www.paulgraham.com/marginal.html": "The Power of the Marginal", + "http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo": "[1709.03933] Hash Embeddings for Efficient Word Representations", + "http://www.jenitennison.com/blog/node/94": "rdfQuery: Progressive Enhancement with RDFa Jeni's Musings", + "https://github.com/lanthaler/JsonLD": "lanthaler/JsonLD processor written in PHP, Github", + "http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_": "Emily M. Bender sur Twitter : \"On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?\"", + "http://newsletter.ruder.io/": "NLP News Revue", + "https://www.cnrs.fr/occitanie-ouest/actualites/article/alerte-presse-les-plantes-graminees-peuvent-acquerir-les-genes-de-leurs": "CNRS - Les plantes gramin\u00e9es peuvent acqu\u00e9rir les g\u00e8nes de leurs voisines", + "http://comments.gmane.org/gmane.org.w3c.semantic-web/18585": "Knowledge Graph links to Freebase - W3C Semantic Web discussion list", + "http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html": "Les mauvais comptes des mineurs d\u00e9linquants", + "http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html": "Washington \u00e0 la conqu\u00eate du \"9-3\"", + "https://nordicapis.com/5-best-speech-to-text-apis/": "5 Best Speech-to-Text APIs Nordic APIs ", + "http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired": "Julie Grollier, a (bio)inspired researcher CNRS News", + "http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th": "Jeremy Howard sur Twitter : \"The fastai paper (with @GuggerSylvain) covers v2...\"", + "https://www.youtube.com/watch?v=KR46z_V0BVw": "Sanjeev Arora on \"A theoretical approach to semantic representations\" - YouTube (2016)", + "http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate": "[2004.09095] The State and Fate of Linguistic Diversity and Inclusion in the NLP World", + "http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_": "Henry Every, le pirate le plus recherch\u00e9 du XVIIe\u00a0si\u00e8cle, refait surface en Nouvelle-Angleterre", + "http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai": "[2004.10964] Don't Stop Pretraining: Adapt Language Models to Domains and Tasks", + "http://news.bbc.co.uk/2/hi/americas/7460364.stm": "BBC NEWS Peru's 'copper mountain' in Chinese hands", + "http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb": "[1911.03876] Dynamic Neuro-Symbolic Knowledge Graph Construction for Zero-shot Commonsense Question Answering", + "http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/": "Apache Stanbol now with multi-language support IKS Blog \u2013 The Semantic CMS Community", + "http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf": "A Tutorial on Spectral Clustering", + "http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr": "[1905.07854] KGAT: Knowledge Graph Attention Network for Recommendation", + "https://arxiv.org/abs/1511.07972": "[1511.07972] Learning with Memory Embeddings", + "http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_": "Petar Veli\u010dkovi\u0107 sur Twitter : \"resources I'd recommend for getting started with Graph Neural Nets (GNNs),", + "http://www.blosxom.com/": "blosxom :: the zen of blogging :: documentation/users/configure/static.txt blosxom", + "http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html": "URIs within URIs from Luca Matteis on 2014-08-22 (public-lod@w3.org from August 2014)", + "http://www.lespetitescases.net/dbpedia-en-action-la-suite": "Dbpedia en action la suite Les petites cases", + "https://hal.archives-ouvertes.fr/hal-01841594": " A Tri-Partite Neural Document Language Model for Semantic Information Retrieval (2018 - ESWC conference)", + "http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf": "[1911.05507] Compressive Transformers for Long-Range Sequence Modelling", + "https://www.genuitec.com/jersey-resteasy-comparison/": "Jersey vs. RESTEasy: A JAX-RS Implementation Comparison - Genuitec", + "https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html": "Les sites arch\u00e9ologiques de Bura Asinda-Sikka au Niger", + "http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten": "On Google's Right to Know vs. Europe's Right to Be Forgotten New Republic", + "http://googleresearch.blogspot.fr/2015/11/tensorflow-googles-latest-machine_9.html?m=1": "Research Blog: TensorFlow - Google\u2019s latest machine learning system, open sourced for everyone", + "http://gabrielecirulli.github.io/2048/": "2048", + "http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui": "\u00ab\u00a0Mauritius Leaks\u00a0\u00bb\u00a0: l\u2019\u00eele qui siphonne les rentr\u00e9es fiscales de l\u2019Afrique", + "http://leobard.twoday.net/stories/2817481/": "semantic weltbild 2.0: PhD step1: integrating data into the semantic desktop", + "http://developers.any23.org/": "Anything to Triples", + "http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi": "A survey of hierarchical classification across different application domains (2011)", + "http://www.uea.ac.uk/~mga07vju/JWP.pdf": "One Hundred Years of Archaeology in Niger", + "http://www.w3.org/DesignIssues/ReadWriteLinkedData.html": "Read-Write linked data - Design Issues", + "http://wiki.ontoworld.org/": "Wiki@OntoWorld", + "http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo": "[2011.02260] Graph Neural Networks in Recommender Systems: A Survey", + "http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_": "Aakash Kumar Nain sur Twitter : \"I want to start reading about Graph NNs but I have two questions in my mind: 1. Applications of GNNs 2. Which paper should I start with?\" / Twitter", + "https://arxiv.org/abs/1503.08895": "[1503.08895] End-To-End Memory Networks", + "http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier": "java - Method(s) to output confidence score from Stanford Classifier? - Stack Overflow", + "http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi": "Devant l\u2019imminence de l\u2019interdiction du chlorpyrifos\u00a0en Europe, les fabricants contre-attaquent", + "http://rogerdudler.github.io/git-guide/index.fr.html": "git - petit guide - no deep shit!", + "https://arxiv.org/abs/1904.08398": "[1904.08398] DocBERT: BERT for Document Classification", + "http://www.bbc.com/news/science-environment-30055383": "BBC News - Comet lander: Future of Philae probe 'uncertain'", + "http://landonf.bikemonkey.org/static/soylatte/": "SoyLatte: Java 6 Port for Mac OS X 10.4 and 10.5 (Intel)", + "http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335": "The Semantic Web For the Sustainable Materials Lifecycle - semanticweb.com", + "http://www.sitepoint.com/blogs/2005/12/22/mvc-and-web-apps-oil-and-water/": "SitePoint Blogs \u00bb MVC and web apps: oil and water", + "https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html": "Google AI Blog: Machine Learning in Google BigQuery (2018)", + "https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code": "NLP using Word Vectors with Spacy - CLDSPN Kaggle", + "http://www.visteon.com/media/newsroom/2013/130517_story1.html": "Visteon Cockpit Concept Learns the Driver's HABIT", + "https://groups.google.com/forum/?fromgroups#!forum/lmf-users": "LMF Users - Google Groups", + "http://www.la-grange.net/2011/07/14/api-version": "API et versions", + "http://discoveryhub.co/": "Discovery Hub", + "http://jt400.sourceforge.net/": "JTOpen Overview", + "http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/": "Un fichier de 45M de \u00ab\u00a0gens honn\u00eates\u00a0\u00bb BUG BROTHER", + "https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15": "A Theoretical Approach to Semantic Coding and Hashing Simons Institute for the Theory of Computing (2016)", + "http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_": "Detecting Duplicate Questions (2019)", + "http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020": "Les Aventures de Pinocchio", + "http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b": "[1909.01259] Neural Attentive Bag-of-Entities Model for Text Classification", + "http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon": "The Terrible Truth About Amazon Alexa and Privacy", + "http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/": "Katy Tur Talks Covering Donald Trump's Candidacy for NBC", + "http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin": "I, Cringely I have my doubts about Bitcoin", + "http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group": "The Berkeley NLP Group", + "http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra": "[1907.03950] Learning by Abstraction: The Neural State Machine", + "http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques": "Olaf Hartig sur Twitter : \"Question: are there any production-ready tools for providing a SPARQL endpoint over a REST-based Web service?\"", + "http://www.polymer-project.org/": "Polymer", + "http://n2.talis.com/wiki/Platform_API": "Platform API - n\u00b2 wiki", + "http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons": "Au royaume des champignons", + "http://morph.talis.com/": "Talis Semantic Web Formats Converter", + "http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI": "Description of: http://demo.openlinksw.com/Northwind/Customer/ALFKI", + "http://dbpedia.org/search/": "Search DBpedia.org", + "http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html": "Crise financi\u00e8re : comment \u00e9viter l'explosion ?, par Pierre Larrouturou - LeMonde.fr", + "http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/": "Combining numerical and text features in (deep) neural networks - Digital Thinking", + "http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer": "State-of-the-art neural coreference resolution for chatbots", + "https://www.youtube.com/watch?v=DQKQKCe1xl0": "Douglas Rushkoff Distributed: A New OS for the Digital Economy SXSW Interactive 2016 - YouTube", + "http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a": "Sant\u00e9, environnement\u00a0: \u00ab\u00a0O\u00f9 s\u2019arr\u00eate le droit de dire la v\u00e9rit\u00e9, et o\u00f9 commence le d\u00e9nigrement\u00a0?\u00a0\u00bb", + "http://maple.cs.umbc.edu/": "MAPLE Lab @ UMBC Multi-Agent Planning and Learning", + "http://www.english-for-techies.net/": "ENGLISH\u00a0 FOR TECHIES (AND NON-TECHIES, TOO)", + "http://www.youtube.com/watch?v=BGS7SpI7obY": "Asimbonanga", + "https://openreview.net/forum?id=rJedbn0ctQ": "Zero-training Sentence Embedding via Orthogonal Basis OpenReview", + "http://kill3.wordpress.com/": "Managing Javascript Libraries as Maven WAR Overlays - A Needle in a Stack Trace", + "http://lists.w3.org/Archives/Public/public-hydra/2014May/0003.html": "RE: Newbie questions about Issue tracker demo API", + "http://en.wikipedia.org/wiki/HTTP_ETag": "HTTP ETag - Wikipedia, the free encyclopedia", + "http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html": "TemplatedLink", + "http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part": "How to extract Highlighted Parts from PDF files - Stack Overflow", + "http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le": "[2011.06993] FLERT: Document-Level Features for Named Entity Recognition", + "http://www.nature.com/news/game-playing-software-holds-lessons-for-neuroscience-1.16979": "Game-playing software holds lessons for neuroscience : Nature News & Comment", + "http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster": "[2103.12953] Supporting Clustering with Contrastive Learning", + "http://www.w3.org/TR/skos-primer/": "SKOS Simple Knowledge Organization System Primer", + "https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow": "What I wish Tim Berners-Lee understood about DRM Technology The Guardian", + "http://www.census.gov/ipc/www/worldhis.html": "Historical Estimates of World Population", + "http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html": "Saving Months, Not Milliseconds: Do More Faster with the Semantic Web - TechnicaLee Speaking", + "http://www.rfi.fr/francais/actu/articles/091/article_53988.asp": "RFI - Les promesses de l\u2019uranium du Niger \u00e0 l\u2019\u00e9preuve", + "http://delicious-java.sourceforge.net/": "delicious java API", + "http://www.newscientist.com/channel/life/dn14094-bacteria-make-major-evolutionary-shift-in-the-lab.html": "Bacteria make major evolutionary shift in the lab -New Scientist", + "http://blog.semantic-web.at/2014/07/15/from-taxonomies-over-ontologies-to-knowledge-graphs/": "From Taxonomies over Ontologies to Knowledge Graphs The Semantic Puzzle", + "http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_": "[1909.03186] On Extractive and Abstractive Neural Document Summarization with Transformer Language Models", + "http://www.semanlink.net/doc/2019/08/sebastien_castellion": "S\u00e9bastien Castellion", + "http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins": "ElastiK Nearest Neighbors", + "https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/": "So I created workable JSON-LD Brinxmat's blog", + "http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm": "LE BRISEUR DE CHAINES", + "https://en.wikipedia.org/wiki/Match_Point": "Match Point", + "https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings": "[Jaeyoung2018] Patent Document Clustering with Deep Embeddings", + "https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43": "Semantic Enriched Short Text Clustering SpringerLink", + "http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html": "OGM : qui en produit, qui en importe en Europe ?", + "http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_": "Michel Zecler au \u00ab\u00a0Monde\u00a0\u00bb\u00a0: \u00ab\u00a0Il fallait que ces trois policiers se sentent en confiance pour aller aussi loin dans leurs actes\u00a0\u00bb", + "http://research.microsoft.com/apps/pubs/?id=175447": "Destination Prediction by Sub-Trajectory Synthesis and Privacy Protection Against Such Prediction - Microsoft Research", + "https://rare-technologies.com/implementing-poincare-embeddings/": "Implementing Poincar\u00e9 Embeddings RARE Technologies", + "http://www.prestashop.com/": "PrestaShop Free Open-Source e-Commerce Software for Web 2.0", + "https://arxiv.org/abs/1710.04087": "[1710.04087] Word Translation Without Parallel Data", + "http://en.wikipedia.org/wiki/List_of_animals_by_number_of_neurons": "List of animals by number of neurons - Wikipedia, the free encyclopedia", + "http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_": "Chaitanya Joshi sur Twitter : \"Excited to share a blog post on the connection between #Transformers for NLP and #GraphNeuralNetworks\"", + "http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html": "Highlight RDFa with CSS from Niklas Lindstr\u00f6m on 2012-05-20 (public-rdfa-wg@w3.org from May 2012)", + "http://deeplearning4j.org/": "Deeplearning4j - Open-source, distributed deep learning for the JVM", + "http://www.semanlink.net/doc/2020/01/joint_intent_classification_and": "Joint Intent Classification and Slot Filling with Transformers (Jupyter Notebook Viewer)", + "https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/": "A Ranking Approach to Keyphrase Extraction - Microsoft Research (2009)", + "http://www.gigamonkeys.com/book/": "Practical Common Lisp", + "https://arxiv.org/abs/1506.08422": "[1506.08422] Topic2Vec: Learning Distributed Representations of Topics", + "http://nlp.stanford.edu/software/index.shtml": "The Stanford NLP (Natural Language Processing) Group / software", + "https://blog.slock.it/a-dao-counter-attack-613548408dd7#.ovxo55nv1": "A DAO Counter-Attack \u2014 Slock.it Blog", + "http://fr.wikipedia.org/wiki/Zarmas": "Zarmas - Wikip\u00e9dia", + "http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html": "aterm javadoc", + "http://www.bbc.co.uk/blogs/thereporters/maggieshiels/2010/05/the_antifacebook.html": "dot.Maggie: The anti-Facebook", + "http://www.openlinksw.com/blog/~kidehen/?id=1384": "Linked Data in Action: Library of Congress", + "http://ipython.org/": "IPython", + "https://arxiv.org/abs/1812.04616": "[1812.04616] Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs", + "http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/": "Principal Component Analysis (PCA) for Feature Selection and some of its Pitfalls \u00b7 Johannes Otterbach", + "http://www.qwant.com/": "", + "http://rapid-i.com/wiki/index.php?title=Integrating_RapidMiner_into_your_application": "Integrating RapidMiner into your application - Rapid-I-Wiki", + "https://www.depends-on-the-definition.com/": "Depends on the definition - it's about machine learning, data science and more", + "http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d": "Zexuan Zhong sur Twitter : ...Does this really mean dense models are better? No Our #EMNLP2021 paper shows dense retrievers even fail to answer simple entity-centric questions", + "http://ceur-ws.org/Vol-840/03-paper-26.pdf": "Semantic CMS and Wikis as Platforms for Linked Learning", + "http://www.awesometapes.com/": "Awesome Tapes From Africa", + "http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy": "Nils Reimers sur Twitter : EasyNMT Easy-to-use (3 lines of code), state-of-the-art neural machine translations", + "http://www.w3.org/blog/news/archives/3943": "Draft Model for Tabular Data and Metadata on the Web, and a Draft Metadata Vocabulary for Tabular Data Published W3C News", + "http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv": "Softmax classifier (CS231n Convolutional Neural Networks for Visual Recognition)", + "http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera": "[1803.07828] Expeditious Generation of Knowledge Graph Embeddings", + "http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri": "[1804.03235] Large scale distributed neural network training through online distillation", + "http://ruiyan.me/pubs/tutorial-emnlp18.pdf": "Deep Chit-Chat: deep learning for chatbots (EMNLP 2018 Tutorial)", + "http://www.infoworld.com/d/adventures-in-it/13-best-practices-it-outsourcing-034": "13 best practices for IT outsourcing Adventures in IT - InfoWorld", + "http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm": "BBC NEWS Science/Nature Scientists hope to ease GM fears", + "http://www.sauverledarfour.org/appel.php": "Sauver Le Darfour - Appel", + "http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html": "La conqu\u00eate de l\u2019ouest (de l\u2019Afrique) : Demande \u00e0 la poussi\u00e8re", + "https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf": "Effect of Non-linear Deep Architecture in Sequence Labeling", + "http://opensourceconnections.com/blog/2013/06/07/search-as-you-type-with-solr/": "Search-As-You-Type with Solr", + "https://metadatacenter.org/": "Home CEDAR - Center for Expanded Data Annotation and Retrieval", + "http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa": "Distilling BERT models with spaCy - Towards Data Science (2019)", + "http://news.bbc.co.uk/2/hi/science/nature/4467676.stm": "BBC NEWS - Probe 'gathers asteroid material'", + "https://livebook.manning.com/#!/book/deep-learning-with-python": "liveBook - Deep Learning with Python", + "http://www.pandia.com/sew/481-gartner.html": "\u00bb Google: one million servers and counting", + "http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s": "YAKE! Keyword extraction from single documents using multiple local features (2019)", + "https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482": "Word Mover's Embedding: From Word2Vec to Document Embedding (2018)", + "https://jakevdp.github.io/PythonDataScienceHandbook/02.02-the-basics-of-numpy-arrays.html": "The Basics of NumPy Arrays Python Data Science Handbook", + "https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas": "python - Select rows from a DataFrame based on values in a column in pandas - Stack Overflow", + "http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932": "Reuters - Secret contract tied NSA and security industry pioneer", + "http://developer.yahoo.net/yui/": "Yahoo! UI Library", + "http://www.veotag.com/": "veotag", + "http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html": "Export of Wikidata in RDF", + "http://wordle.net/": "Wordle - Beautiful Word Clouds", + "http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1": "On two iterative methods for approximating the roots of a polynomial", + "http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat": "[cmp-lg/9511007] Using Information Content to Evaluate Semantic Similarity in a Taxonomy (1995)", + "http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di": "Neutrinos Lead to Unexpected Discovery in Basic Math Quanta Magazine", + "http://bibliontology.com/": "Bibliographic Ontology Specification", + "https://www.w3.org/blog/2017/02/on-eme-in-html5/": "On EME in HTML5 W3C Blog", + "http://www.amundsen.com/blog/archives/1072": "mca blog [Hypermedia: data as a first-class element]", + "http://passeurdesciences.blog.lemonde.fr/2014/05/11/qui-mangeait-qui-il-y-a-500-millions-dannees/": "Qui mangeait qui il y a 500 millions d\u2019ann\u00e9es Passeur de sciences", + "http://www.semanlink.net/doc/2019/11/camembert": "CamemBERT", + "https://arxiv.org/abs/1811.06031": "[1811.06031] A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks", + "http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/": "Java 8 - tutorial", + "http://code.google.com/p/jsdoc-toolkit/": "jsdoc-toolkit", + "http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/": "Emerging Web Technologies, Facing the Future of Education \u2014 EducTice", + "http://leobard.twoday.net/stories/2879108/": "semantic weltbild 2.0: PhD step2: the research question and how can I answer it (is it possible to write a PhD on gnowsis?)", + "http://idealliance.org/proceedings/xtech05/papers/02-07-04/": "Connecting Social Content Services using FOAF, RDF and REST", + "http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models": "[2003.08271] Pre-trained Models for Natural Language Processing: A Survey", + "http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html": "Pourquoi les Chinois veulent la peau des \u00e2nes africains", + "https://dl.acm.org/citation.cfm?doid=3178876.3186024": "Weakly-supervised Relation Extraction by Pattern-enhanced Embedding Learning", + "https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d": "Visualising Top Features in Linear SVM with Scikit Learn and Matplotlib", + "http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr": "[1807.00082] Amanuensis: The Programmer's Apprentice", + "http://arxiv.org/abs/1312.6184v5": "[1312.6184] Do Deep Nets Really Need to be Deep?", + "https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/": "Toyota Motor Europe use of schema.org and auto.schema.org vocabularies Automotive Ontology Community Group", + "http://www.hotel-la-desirade.com/": "HOTEL LA DESIRADE Belle Ile en mer : chambres et suites dans un h\u00f4tel-village", + "http://www.bbc.com/news/technology-30691393": "BBC News - CES 2015: Toyota opens up hydrogen patents", + "http://www.liberation.fr/culture/01012357658-petite-poucette-la-generation-mutante": "Petite Poucette, la g\u00e9n\u00e9ration mutante", + "https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development": "Using Machine Learning to Support Continuous Ontology Development (2010)", + "http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/": "The Semantic Puzzle Seevl: Explore the cultural universe based on semantic web technologies", + "http://www.speech.sri.com/projects/srilm/": "SRILM - The SRI Language Modeling Toolkit", + "http://www.lemonde.fr/pixels/article/2017/12/12/d-anciens-cadres-de-facebook-expriment-leur-culpabilite-d-avoir-contribue-a-son-succes_5228538_4408996.html": "D\u2019anciens cadres de Facebook expriment leur \u00ab\u00a0culpabilit\u00e9\u00a0\u00bb d\u2019avoir contribu\u00e9 \u00e0 son succ\u00e8s", + "http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/": "Intro to Automatic Keyphrase Extraction", + "http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1": "GoogleMap Niamey", + "http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l": "[1812.05944] A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments", + "https://www.wired.com/story/itunes-downloads-https-encryption/": "iTunes Doesn't Encrypt Downloads\u2014on Purpose WIRED", + "https://arxiv.org/abs/1809.00782": "[1809.00782] Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text", + "https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw": "Christopher Manning - \"Building Neural Network Models That Can Reason\" (TCSDLS 2017-2018) - YouTube", + "http://semanticweb.com/rdfa-1-1-lite_b24088#more-24088": "Introduction to: RDFa 1.1 Lite - semanticweb.com", + "http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182": "jquery.rdf.turtle.js - rdfquery - RDF processing in your browser - Google Project Hosting", + "https://arxiv.org/abs/1806.01261": "[1806.01261] Relational inductive biases, deep learning, and graph networks", + "http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve": "Pourquoi le travail est-il devenu absurde ? \u2013 InternetActu", + "https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf": "Open Knowledge Network", + "https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/": "Building With Node.js At Netflix", + "http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html": "Comment les sites de commerce nous manipulent", + "http://www.semanlink.net/doc/2021/01/gaspard_koenig": "Gaspard Koenig", + "http://aclweb.org/anthology/C18-1139": "Contextual String Embeddings for Sequence Labeling (2018)", + "http://inverseprobability.com/talks/notes/gaussian-processes.html": "Gaussian Processes", + "https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus": "French equivalent of the brown corpus - Open Data Stack Exchange", + "http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_": "Exposition - Royaumes oubli\u00e9s - De l'empire hittite aux Aram\u00e9ens Mus\u00e9e du Louvre Paris", + "http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e": "Google AI Blog: Reformer: The Efficient Transformer", + "http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a": "[2002.08909] REALM: Retrieval-Augmented Language Model Pre-Training", + "http://www.dw-world.de/dw/0,,2617,00.html": "Apprendre l\u2019allemand Deutsche Welle", + "http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat": "Pipelines and composite estimators / ColumnTransformer for heterogeneous data \u2014 scikit-learn documentation", + "http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws": "European Commission VP Neelie Kroes Weighs in on Aaron Swartz", + "http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_": "[2106.04098] Ultra-Fine Entity Typing with Weak Supervision from a Masked Language Model", + "https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md": "docker-solr/Docker-FAQ.md at master \u00b7 docker-solr/docker-solr", + "https://arxiv.org/abs/1607.01759": "[1607.01759] Bag of Tricks for Efficient Text Classification", + "http://www.hydra-cg.com/": "Hydra W3C Community Group", + "https://github.com/thunlp/OpenKE": "thunlp/OpenKE: An Open-Source Package for Knowledge Embedding (KE)", + "https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011": "Associative Multichannel Autoencoder for Multimodal Word Representation (2018)", + "http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html": "Marc Andreessen on Why Software Is Eating the World - WSJ.com", + "http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml": "BBC - Radio Labs: Brands, series, categories and tracklists on the new BBC Programmes", + "http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases": "Practical Semantic Web Use Cases on ESW Wiki", + "http://deeplearning.net/software_links/": "Software links \u00ab Deep Learning", + "http://i.imgur.com/dAtcCfH.gif": "snake byte", + "http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html": "Managing Co-reference (Was: A Semantic Elephant?)", + "https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html": "\u00ab\u00a0Gilets jaunes\u00a0\u00bb : \u00ab La r\u00e9volte des ronds-points \u00bb, par Florence Aubenas", + "http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0": "Configuration avanc\u00e9e du moteur Solr techoop", + "http://www.semanlink.net/doc/2020/05/neruda_film_": "Neruda (film)", + "http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_": "Google AI Blog: From Vision to Language: Semi-supervised Learning in Action\u2026at Scale", + "https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/": "Quanta\u2019s Year in Biology (2018) Quanta Magazine", + "http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html": "I, Cringely . The Pulpit . The $200 Billion Lunch PBS", + "http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/": "\u00ab\u00a0Les humains sont apparent\u00e9s aux virus\u00a0\u00bb Passeur de sciences", + "http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git": "importing a maven project into eclipse from git - Stack Overflow", + "https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb": "News classification with topic models in gensim", + "http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d": "\u00ab\u00a0L\u2019enjeu environnemental est d\u00e9sormais au c\u0153ur d\u2019une rupture du pacte d\u00e9mocratique\u00a0\u00bb", + "http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm": "Le r\u00f4le des virus dans l'\u00e9volution", + "http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one": "Siamese Neural Networks for One-shot Image Recognition (2015)", + "http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/": "Africa\u2019s Information Highway - The AfDB Launches Open Data Platforms for 20 African Countries - African Development Bank", + "http://twill.apache.org/": "Apache Twill \u2013 Home", + "https://alexgkendall.com/computer_vision/bayesian_deep_learning_for_safe_ai/": "Deep Learning Is Not Good Enough, We Need Bayesian Deep Learning for Safe AI - Home", + "http://keshif.me/demo/VisTools": "Visualization Tools", + "http://www.bbc.co.uk/news/technology-16306742": "BBC News - British teenage designer of Summly app hits jackpot", + "http://dig.csail.mit.edu/breadcrumbs/node/149": "JavaScript RDF/XML Parser", + "http://n2.talis.com/wiki/RDF_JSON_Specification": "RDF JSON Specification - n\u00b2 wiki", + "http://www.youtube.com/watch?v=Hjc5H1Blw6g": "\"O carnaval \u00c9 a maior caricatura Na folia O povo esquece a amargura\" (Salgueiro - Samba-enredo 1983)", + "http://www.cs.umd.edu/hcil/piccolo/index.shtml": "Piccolo Home Page", + "http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html": "[httpRange-14] Resolved from Roy T. Fielding on 2005-06-19 (www-tag@w3.org from June 2005)", + "http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu": "ringgaard/sling: SLING - A natural language frame semantics parser", + "http://www.w3.org/2004/02/skos/mapping.rdf": "SKOS Mapping [RDF/OWL Description]", + "http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int": "[2006.13365] Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework", + "http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html": "kylescholz.com :: blog: Force Directed Graphs in Javascript?", + "http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/": "La vengeance de Pos\u00e9idon \u00e9tait bien un tsunami Passeur de sciences", + "http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_": "[1812.06280] Wikipedia2Vec: An Efficient Toolkit for Learning and Visualizing the Embeddings of Words and Entities from Wikipedia", + "http://www.gillesbalmisse.com/annuaire/": "Gilles Balmisse - Annuaire des outils de knowledge management, de travail collaboratif et de veille", + "https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360": "Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction", + "http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/": "\u201cWe need people to translate the whole Web\u201d: Luis Von Ahn WWW 2013 \u2013 Rio de Janeiro, Brazil", + "http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis": "Cassandra vs MongoDB vs CouchDB vs Redis vs Riak vs HBase vs Couchbase vs Hypertable vs ElasticSearch vs Accumulo vs VoltDB vs Scalaris comparison :: Software architect Kristof Kovacs", + "http://nlp.town/blog/ner-and-the-road-to-deep-learning/": "Named Entity Recognition and the Road to Deep Learning (2017)", + "http://dannyayers.com/2012/02/26/Everyone-has-a-Graph-Store": "Danny on : Everyone has a Graph Store", + "http://brandonrose.org/clustering": "Document Clustering with Python", + "http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int": "[1902.10909] BERT for Joint Intent Classification and Slot Filling", + "http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html": "I, Cringely . The Pulpit . War of the Worlds PBS", + "http://www.epimorphics.com/web/projects/linked-data-api": "Linked data API Epimorphics", + "http://www.thefigtrees.net/lee/blog/2006/06/exploring_the_sparql_clipboard.html": "TechnicaLee Speaking: Exploring the SPARQL Clipboard Demo", + "http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html": "VSO / Goodrelations", + "https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/": "Translation from Haskell to JavaScript of selected portions of the best introduction to monads I've ever read \u2013 The If Works", + "http://www.europarl.europa.eu/RegData/etudes/STUD/2017/595374/IPOL_STU(2017)595374_EN.pdf": "Economic Impact of Brexit on the EU27", + "https://arxiv.org/abs/1709.08568": "[1709.08568] The Consciousness Prior", + "http://www.semanlink.net/doc/2019/06/kokopelli_association_": "Kokopelli (association)", + "http://www.semanlink.net/doc/2021/06/hausanlp_research_group": "HausaNLP Research Group", + "http://www.xml.com/pub/a/2007/02/14/introducing-rdfa.html": "XML.com: Introducing RDFa", + "http://www.besthistorysites.net/AncientBiblical_Africa.shtml": "www.besthistorysites.net: Ancient/Biblical - Africa", + "http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php": "Nodalities: GRDDL Specifications (and Quick Reference)", + "https://www.atlantis-press.com/journals/ijcis/25868611": "A Supervised Requirement-oriented Patent Classification Scheme Based on the Combination of Metadata and Citation Information (2015)", + "http://courses.blockgeeks.com/": "Blockgeeks", + "http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/": "A-t-on atteint un \u00ab\u00a0pic des objets\u00a0\u00bb ? Eco(lo)", + "http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7": "BERT Word Embeddings Tutorial \u00b7 Chris McCormick", + "http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_": "How I Track and Actually Learn New Things - Stevie Chancellor - Medium", + "https://en.wikipedia.org/wiki/Django_Unchained": "Django Unchained", + "http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/": "Peut-on gu\u00e9rir le diable du cancer? Un \u00e9l\u00e9phant dans mon salon", + "http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_": "[2007.00849] Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge", + "http://www.newscientist.com/article.ns?id=mg18625054.000": "Quantum computer springs a leak - Technology Print New Scientist", + "http://ruder.io/semi-supervised/": "An overview of proxy-label approaches for semi-supervised learning", + "http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo": "mozilla/DeepSpeech: A TensorFlow implementation of Baidu's DeepSpeech architecture", + "http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy": "SPARQLScript - Semantic Mashups made easy - benjamin nowack's blog", + "http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai": "Rick Wierenga's blog posts about fast.ai", + "http://www.iesl.cs.umass.edu/data/wiki-links": "Wikilinks - Information Extraction and Synthesis Laboratory", + "http://www.mkbergman.com/?page_id=325": "AI3\u2019s Comprehensive Listing of Semantic Web and Related Tools", + "http://internetactu.blog.lemonde.fr/2013/10/18/la-nouvelle-ecologie-du-temps/": "La nouvelle \u00e9cologie du temps InternetActu", + "http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive": "Google's Hummingbird Update: Friend or Foe of the Marketing Executive? ClickZ", + "http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html": "Re: See Other from Hugh Glaser", + "http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom": "De quoi le Big Data est-il le nom ? Les petites cases", + "http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep": "Language and Perception in Deep Learning - Florian Strub DeepMind, Univ. Lille, Inria", + "http://www.semanlink.net/doc/2019/10/document_embedding_techniques": "Document Embedding Techniques", + "http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise": "CS294-158-SP20 Deep Unsupervised Learning Spring 2020", + "http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing": "Say Goodbye to 'Made in China' - Bloomberg View", + "http://www.co-ode.org/": "CO-ODE", + "http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_": "20 French AI startups to watch Sifted", + "http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_": "Modeling AI on the Language of Brain Circuits and Architecture Wu Tsai Neurosciences Institute", + "http://fr.slideshare.net/fpservant/ldow2013": "Describing Customizable Products on the Web of Data (LDOW 2013)", + "http://torrez.us/archives/2007/05/17/531/": "Elias Torres \u00bb Blog Archive \u00bb Operator Overload", + "http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope": "LinkedDataHub - AtomGraph's open-source Knowledge Graph management system", + "http://www.securityweek.com/hackers-used-sophisticated-smb-worm-tool-attack-sony": "Hackers Used Sophisticated SMB Worm Tool to Attack Sony SecurityWeek.Com", + "http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images": "CLIP: Connecting Text and Images", + "http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/": "Po\u00e9sie et esth\u00e9tisme du logiciel : interview de G\u00e9rard Huet binaire", + "http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a": "Quand l'histoire fait dates - arte.tv", + "http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex": "X-BERT: eXtreme Multi-label Text Classification using Bidirectional Encoder Representations from Transformers", + "http://www.zones-sensibles.org/index.php?mod=auteurs&a=06": "Zones sensibles", + "http://learningsys.org/nips17/assets/slides/dean-nips17.pdf": "Machine Learning for Systems and Systems for Machine Learning (NIPS 2017)", + "http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_": "\u00ab Nous avons assist\u00e9 \u00e0 l\u2019effondrement de l\u2019Etat \u00bb : des maires de grandes villes racontent les premiers mois de la pand\u00e9mie - par Vanessa Schneider (Le Monde)", + "http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac": "[1910.03524] Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs", + "http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/": "More on MRSA on Farms and in Farm Workers, and the Arguments for and Against - Wired Science", + "http://searchcio.techtarget.com/news/950602/Top-10-risks-of-offshore-outsourcing": "Top 10 risks of offshore outsourcing", + "http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa": "The knowledge graph as the default data model for learning on heterogeneous knowledge (2017)", + "http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric": "[1904.13001] Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine", + "http://www.france-universite-numerique.fr/": "France Universit\u00e9 Num\u00e9rique - D\u00e9couvrir, apprendre et r\u00e9ussir", + "http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp": "Physicists Seek To Lose The Lecture As Teaching Tool : NPR", + "http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf": "Towards an integrated approach to cultural heritage for Europe", + "https://www.w3.org/Data/events/data-ws-2019/papers.html": "W3C Workshop on Web Standardization for Graph Data", + "http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s": "Transports : la Chine pr\u00e9pare sa \u00ab soci\u00e9t\u00e9 de l\u2019hydrog\u00e8ne \u00bb - Le Parisien", + "http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding": "Configuring Tomcat's URI encoding", + "http://www.pnetliteratura.pt/noticia.asp?id=1555": "Chefes Rui Paula e Ofir Oliveira unem sabores selvagens da Amaz\u00f3nia aos tradicionais do Douro", + "http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit": "Coronavirus\u00a0: un rapport au vitriol des pompiers d\u00e9nonce la gestion de la crise", + "https://arxiv.org/abs/1703.03129": "[1703.03129] Learning to Remember Rare Events", + "http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c": "Coronavirus\u00a0: qui ont \u00e9t\u00e9 les contamin\u00e9s du confinement\u00a0?", + "http://www.liberation.fr/actualite/societe/271050.FR.php": "La \u00abconsonance isra\u00e9lite\u00bb r\u00e9veille le z\u00e8le administratif", + "http://docs.jquery.com/Main_Page": "jQuery JavaScript Library", + "http://www.eclipsezone.com/eclipse/forums/t76213.html": "EclipseZone - J2EE Module Dependencies not persisting ...", + "https://arxiv.org/abs/1103.0398": "[1103.0398] Natural Language Processing (almost) from Scratch", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/": "D2RQ - Treating Non-RDF Databases as Virtual RDF Graphs - Chris Bizer", + "http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/": "SKOS Now Interoperates with OWL 2 AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_": "online-ml/river (Online machine learning in Python)", + "https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier": "UK cannot have a special deal for the City, says EU's Brexit negotiator Politics The Guardian", + "http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm": "CNRS INSIS - Un pas vers les puces miniatures intelligentes", + "http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/": "Volkswagen\u2019s Use of structWSF in their Semantic Web Platform at Frederick Giasson\u2019s Weblog", + "http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/": "My first mapping from RDB to RDF using R2RML \u00ab Ivan\u2019s private site", + "http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes": "Live SPARQL end-point for BBC Programmes - DBTune blog", + "http://thisisafrica.me/land-grabbing-africa-new-colonialism/": "Land Grabbing in Africa, the new colonialism This Is Africa", + "http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html": "Langues anciennes, cibles \u00e9mouvantes", + "http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_": "Heinrich Barth and the Western Sudan", + "http://ged2018.sci-web.net/index.html": "Graph embedding Day - Lyon", + "http://www.w3.org/2013/04/odw/odw13_submission_53.pdf": "schema.org's position paper for the Open Data on the Web workshop", + "http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php": "KnowledgeWeb - DaimlerChrysler Semantic Web Portal business case", + "http://www.quickonlinetips.com/archives/2005/02/absolutely-delicious-complete-tools-collection/": "Quick Online Tips \u00bb Absolutely Del.icio.us - Complete Tools Collection", + "https://dl.acm.org/citation.cfm?doid=3184558.3186979": "Smart-MD: Neural Paragraph Retrieval of Medical Topics", + "http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage": "Cory Doctorow Draws the Line On Net Neutrality", + "http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu": "\u00ab\u00a0Le devoir de toute soci\u00e9t\u00e9 humaine est de se prot\u00e9ger contre les d\u00e9viances de ceux qui d\u00e9truisent la plan\u00e8te\u00a0\u00bb", + "http://www.xml.com/pub/a/2002/05/08/deviant.html": "XML.com: REST Roundup", + "https://www.npmjs.com/package/jsonld": "jsonld.js: A JSON-LD Processor and API implementation in JavaScript.", + "https://arxiv.org/abs/1707.00418": "[1707.00418] Learning Deep Latent Spaces for Multi-Label Classification", + "https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail": "Six sc\u00e9narios d'un monde sans travail CNRS Le journal", + "http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter": "Souverainet\u00e9 num\u00e9rique: la piste industrielle", + "http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php": "Une loi scandaleuse et ridicule - Conversation avec Jacques Attali - Lexpress", + "http://www.diigo.com/": "Diigo - Web Highlighter and Sticky Notes, Online Bookmarking and Annotation, Personal Learning Network.", + "http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen": "[1908.10084] Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", + "http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html": "Codes of Interest: Using Bottleneck Features for Multi-Class Classification in Keras and TensorFlow", + "http://bennolan.com/behaviour/": "Behaviour : Using CSS selectors to apply Javascript behaviours", + "http://martinfowler.com/nosql.html": "NoSQL Guide", + "http://videolectures.net/eswc2012_chevalier_servant_product_customization/": "Product customization as Linked Data - videolectures.net", + "http://www2012.org/proceedings/proceedings/p449.pdf": "LINDEN: Linking Named Entities with Knowledge Base via Semantic Knowledge", + "http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html": "Derri\u00e8re le MOOC \u00e0 la fran\u00e7aise : Google", + "http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/": "Sommes-nous pr\u00eats \u00e0 affronter un tsunami solaire ? Passeur de sciences", + "https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1": "Heroes of Deep Learning: Andrew Ng interviews Geoffrey Hinton - YouTube", + "http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j": "Finding Data Block Nirvana (a journey through the fastai data block API) \u2014 Part 2", + "https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704": "What worries me about AI \u2013 Fran\u00e7ois Chollet \u2013 Medium", + "https://arxiv.org/abs/1805.03793": "[1805.03793] hyperdoc2vec: Distributed Representations of Hypertext Documents", + "http://scikit-learn.org": "scikit-learn: machine learning in Python", + "https://www.youtube.com/watch?v=YJnddoa8sHk": "Deep Learning: Practice and Trends (NIPS 2017 Tutorial, parts I & II) - YouTube", + "http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel": "[2009.07938] Type-augmented Relation Prediction in Knowledge Graphs", + "http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html": "Google\u2019s Driver-less Car and Morality : The New Yorker", + "https://arxiv.org/abs/1412.6623": "[1412.6623] Word Representations via Gaussian Embedding", + "http://www.consortiuminfo.org/bulletins/pdf/jun05/feature.pdf": "Consortium Standards Bulletin - THE SEMANTIC WEB: AN INTERVIEW WITH TIM BERNERS-LEE", + "http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415": "Commotion, le projet d'un Internet hors de tout contr\u00f4le", + "https://blog.floydhub.com/language-translator/": "Found in translation: Building a language translator from scratch with deep learning - FloydHub", + "https://arxiv.org/abs/1812.09449": "[1812.09449] A Survey on Deep Learning for Named Entity Recognition", + "https://developers.google.com/machine-learning/rules-of-ml/": "R\u00e8gles du machine learning\u00a0: \u00a0\u00a0 Google Developers", + "http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf": "Modeling Uncertainty in Semantic Web Taxonomies", + "http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf": "The Architecture of Future Automotive Applications based on Web Technologies", + "http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_": "[1903.11279] Graph Convolution for Multimodal Information Extraction from Visually Rich Documents", + "http://community.ofset.org/wiki/Main_Page": "OFSET Wiki", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf": "NERD meets NIF: Lifting NLP Extraction Results to the Linked Data Cloud", + "https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601": "HMTL: Multi-task learning for state of the art NLP \u2013 dair.ai \u2013 Medium", + "https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense": "Usually RNNs are used for NLP, when do CNNs in NLP make sense? - Quora", + "http://en.wikipedia.org/wiki/Percy_Schmeiser": "Percy Schmeiser - Wikipedia, the free encyclopedia", + "http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html": "Using DBpedia resources as skos:Concepts?", + "http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b": "Building a Search Engine with BERT and TensorFlow - Towards Data Science", + "http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran": "Construire un bon analyzer fran\u00e7ais pour Elasticsearch", + "http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i": "[2109.04513] Filling the Gaps in Ancient Akkadian Texts: A Masked Language Modelling Approach", + "http://www.ontotext.com/owlim": "OWLIM Ontotext", + "http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "La Blockchain signera-t-elle la fin du capitalisme ? -", + "https://www.coursera.org/course/compneuro": "Computational Neuroscience Coursera", + "http://www.semanlink.net/doc/2021/05/integrating_document_clustering": "Integrating Document Clustering and Multidocument Summarization", + "http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data": "All XML roads lead to RDF", + "http://www.magyaradam.com/": "Adam Magyar", + "http://maven.apache.org/plugins/maven-war-plugin/": "Maven WAR Plugin", + "http://www.semanlink.net/doc/2021/10/fastapi": "FastAPI", + "http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428": "France Inter > Sur les \u00e9paules de Darwin > A la recherche des myst\u00e8res de l'h\u00e9r\u00e9dit\u00e9 (3)", + "https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations": "Result Clustering - Apache Solr Reference Guide - Apache Software Foundation", + "http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy": "[1911.09419] Learning Hierarchy-Aware Knowledge Graph Embeddings for Link Prediction", + "http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po": "Dinosaur asteroid hit 'worst possible place' - BBC News", + "http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html": "Cost-Cutting in New York, but a Boom in India", + "https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/": "A Complete Tutorial on Tree Based Modeling from Scratch (in R & Python)", + "http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo": "[2003.00330] Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective", + "https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial": "SIGIR 2018 Tutorial - Knowledge Extraction and Inference from Text: Shallow, Deep, and Everything in Between", + "http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi": "How to Turn Off Smart TV Snooping Features - Consumer Reports", + "http://www.jneurosci.org/content/38/44/9563": "Emergence of Binocular Disparity Selectivity through Hebbian Learning Journal of Neuroscience", + "https://www.greenpeace.fr/carte-fermes-usines/": "La carte des fermes-usines - Greenpeace France", + "http://cran.r-project.org/doc/manuals/R-intro.html": "An Introduction to R", + "http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_": "Graph visualization library in JavaScript - Stack Overflow", + "https://www.opensemanticsearch.org/": "Open Semantic Search: Your own search engine for documents, images, tables, files, intranet & news", + "http://www.bbc.com/news/technology-30998361": "BBC News - Could driverless cars own themselves?", + "https://arxiv.org/pdf/1004.5370.pdf": "[1004.5370] Self-Taught Hashing for Fast Similarity Search", + "http://michaelmoore.com/trumpwillwin/": "5 Reasons Why Trump Will Win MICHAEL MOORE", + "https://arxiv.org/abs/1810.00438": "[1810.00438] Parameter-free Sentence Embedding via Orthogonal Basis", + "http://www.vogella.com/articles/Python/article.html": "Python Development with PyDev and Eclipse - Tutorial", + "http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer": "huggingface/pytorch-transformers: A library of state-of-the-art pretrained models for NLP", + "http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/": "Why The First YC-Backed Biotech Company May Just Be The Future Of Pharma TechCrunch", + "http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html": "Mapping Relation Data to RDF with Virtuoso's RDF Views", + "http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s": "Comment Bayer a fait pression sur le Mexique pour emp\u00eacher l\u2019interdiction du glyphosate", + "http://www.semanlink.net/doc/2020/11/only_angels_have_wings": "Only Angels Have Wings", + "http://proceedings.mlr.press/v37/kusnerb15.pdf": "From Word Embeddings To Document Distances (Kusner 2015)", + "http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/": "A Semantic Web Primer for Object-Oriented Software Developers", + "http://datalift.org/fr/index.html": "DataLift - un ascenseur pour vos donn\u00e9es", + "http://moalquraishi.wordpress.com/2014/05/25/what-does-a-neural-network-actually-do/": "What Does a Neural Network Actually Do? \u00ab Some Thoughts on a Mysterious Universe", + "http://www2.cnrs.fr/en/2903.htm": "Electronic synapses that can learn\u00a0: towards an artificial brain\u00a0? - CNRS Web site", + "http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/": "Headlines from a Mathematically Literate World Math with Bad Drawings", + "http://topquadrant.com/solutions/ent_vocab_net.html": "TopQuadrant Solutions Enterprise Vocabulary Net", + "http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit": "Talk To Strangers on Signal With a Public Phone Number", + "http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html": "Derri\u00e8re la vid\u00e9o \"Kony 2012\", le marketing de l'\u00e9motion", + "http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/": "The Mystery of the Minimal Cell, Craig Venter\u2019s New Synthetic Life Form WIRED", + "http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout": "Relearn CSS layout: Every Layout", + "http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/": "New UMBEL Concept Noun Tagger Web Service & Other Improvements at Frederick Giasson\u2019s Weblog", + "http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm": "L'impl\u00e9mentation neuronale des m\u00e9canismes Bay\u00e9siens - Psychologie cognitive exp\u00e9rimentale - Stanislas Dehaene - Coll\u00e8ge de France", + "http://labs.systemone.at/wikipedia3": "System One - Wikipedia3", + "http://sourceforge.net/scm/?type=cvs&group_id=40417": "SourceForge.net: Jena: SCM", + "http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_": "[1904.01947] Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms", + "http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)": "Thomas l'imposteur (film, 1965)", + "http://2010.lucene-eurocon.org/slides/Integration-of-Natural-Language-Processing-tools-with-Solr_Joan-Codina-Filba.pdf": "Integration of Natural Language Processing tools with Solr", + "https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463": "Cheat Sheets for AI, Neural Networks, Machine Learning, Deep Learning & Big Data", + "http://www.textuality.com/tag/uri-comp-4": "How to Compare Uniform Resource Identifiers", + "https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview": "Web Finger proposals overview (The Sun BabelFish Blog)", + "http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics": "SkosDesign/ConceptSemantics - W3C Semantic Web Deployment Wiki", + "http://www.cse.ucsd.edu/users/dasgupta/mcgrawhill/": "\"Algorithms\" (Book)", + "http://blog.takipi.com/how-to-get-started-with-java-machine-learning/": "How to Get Started with Java Machine Learning Takipi Blog", + "http://www.coactus.com/blog/2006/12/validation-considered-harmful": "Validation considered harmful", + "http://www.rfi.fr/francais/actu/articles/087/article_50669.asp": "RFI - Niger : retour \u00e0 la terre - Reportage Afrique", + "https://blog.usievents.com/interview-keren-elazari-hackers-potentiel-disruptif-dont-nous-avons-besoin/": "Interview Keren Elazari : \" Les hackers ont le potentiel disruptif dont nous avons besoin\"", + "http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional": "FastHugs ntentional", + "http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf": "Weaving SIOC into the Web of Linked Data", + "http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm": "BBC NEWS - Light shed on mysterious particle", + "http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard": "SemanticWebDOAPBulletinBoard - ESW Wiki", + "http://structureddynamics.com/linked_data.html": "Linked Data FAQ", + "https://neo4j.com/developer/cypher-query-language/": "Neo4j's Graph Query Language: An Introduction to Cypher", + "http://www.w3.org/2004/02/skos/extensions.rdf": "SKOS Extensions [RDF/OWL Description]", + "http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr": "Journ\u00e9e\u00a0 TAL grand public \u2013 GDR TAL", + "http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor": "France is AI 2018: Lenka Zdeborova - Statistical physics modelling of machine learning - YouTube", + "http://redlink.co/redlinks-90seconds-of-fame-at-the-pioneers-festival/": "Redlink\u2019s 90seconds of fame at the Pioneers Festival redlink", + "http://www.reuters.com/article/mergersNews/idUSN1931556620070720?sp=true": "Facebook buys start-up Parakey for undisclosed sum Reuters", + "http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202": "Deep Learning for AI July 2021 Communications of the ACM", + "http://julie.grollier.free.fr/index.htm": "Julie Grollier - personal website: Nanodevices - Bio-inspired computing - Spin Transfer Torque - Memristors", + "https://github.com/nlptown/nlp-notebooks": "A collection of notebooks for Natural Language Processing from NLP Town", + "http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf": "Troubleshooting Deep Neural Networks", + "http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84": "Alistair Miles \u00bb SKOS and RDFa in e-Learning", + "http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam": "kamalkraj/BERT-NER: Pytorch-Named-Entity-Recognition-with-BERT", + "https://antrix.net/static/pages/python-for-java/online/#the_zen_of_python": "Python for the busy Java Developer", + "http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/": "10 Tips to Improve your Text Classification Algorithm Accuracy and Performance Thinknook", + "http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea": "[2012.04740] River: machine learning for streaming data in Python", + "http://www.w3.org/2011/11/WebAutomotive/": "W3C Web and Automotive Workshop", + "http://media.ford.com/article_display.cfm?article_id=34591": "Ford Developers Look to Use Google Prediction API to Optimize Energy Efficiency; Research Presented at Google I/O Ford Motor Company Newsroom", + "http://scikit-learn.org/stable/modules/scaling_strategies.html": "Strategies to scale computationally: bigger data \u2014 scikit-learn documentation", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html": "Le r\u00eave europ\u00e9en confisqu\u00e9", + "http://www.w3.org/2004/03/trix/": "Named Graphs / Semantic Web Interest Group", + "http://arxiv.org/abs/1602.05314": "[1602.05314] PlaNet - Photo Geolocation with Convolutional Neural Networks", + "https://en.wikipedia.org/wiki/Body_Heat": "Body Heat", + "http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos": "spaCy sur Twitter : \"THE VIDEOS FROM #spaCyIRL ARE NOW LIVE!\"", + "http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf": "The Unreasonable Effectiveness of Data", + "http://www.springerlink.com/content/978-1-4419-7664-2/contents/": "Linking Enterprise Data : the book", + "http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/": "Momies du Taklamakan", + "https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s": "SPACY'S ENTITY RECOGNITION MODEL: incremental parsing with Bloom embeddings & residual CNNs - YouTube", + "https://arxiv.org/abs/1608.05426": "[1608.05426] A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments", + "http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke": "elvis sur Twitter : \"Today I kept thinking about the machine learning / NLP / deep learning related blog posts (not papers) that have been transformational for me...\"", + "https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us": "Brussels attacks Liam Fox's 'ignorant' remarks on chlorinated chicken Politics The Guardian", + "http://jsonformatter.curiousconcept.com/": "JSON Formatter & Validator", + "https://arxiv.org/abs/1805.04032": "[1805.04032] From Word to Sense Embeddings: A Survey on Vector Representations of Meaning", + "http://www.mkbergman.com/?p=370": "The Encyclopedia of Life and Linking Open Data \u00bb AI3:::Adaptive Information", + "http://www.ldodds.com/blog/archives/000330.html": "Lost Boy: Google AppEngine for Personal Web Presence?", + "http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey": "http - EntityTag - Value, Caching, Comparison - how to in Jersey - Stack Overflow", + "https://pytorch.org/tutorials/beginner/nn_tutorial.html": "What is torch.nn really? \u2014 PyTorch Tutorials 1.0.0", + "http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html": "A rules language for RDF - bobdc.blog", + "http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b": "AKBC 2020 Automated Knowledge Base Construction", + "http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou": "Is Word Sense Disambiguation outdated? by Anna Breit May, 2021 Medium", + "http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_": "Codes of Interest: Easy Speech Recognition in Python with PyAudio and Pocketsphinx", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245": "Active learning literature survey (2010)", + "http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html": "Au Mexique, Anonymous fait fl\u00e9chir un cartel de la drogue", + "https://aclanthology.info/papers/D18-1360/d18-1360": "Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction - ACL Anthology", + "https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html": "Dix ans apr\u00e8s Lehman Brothers\u00a0: en attendant la prochaine crise", + "http://www.talis.com/platform/demos/": "Talis Platform - Demos", + "http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_": "Guillaume Lample sur Twitter : \"Last year, we showed that you can outperform a 24-layer transformer in language modeling with just...", + "http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer": "CTRL: A CONDITIONAL TRANSFORMER LANGUAGE MODEL FOR CONTROLLABLE GENERATION", + "http://www.economist.com/news/obituary/21569674-aaron-swartz-computer-programmer-and-activist-committed-suicide-january-11th-aged-26-aaron?frsc=dg%7Cb": "Aaron Swartz The Economist", + "http://protegewiki.stanford.edu/wiki/OWLViz": "Installing Graphviz for proteg\u00e9", + "http://pt.wikipedia.org/wiki/Parque_Nacional_Serra_da_Capivara": "Parque Nacional Serra da Capivara - Wikip\u00e9dia, a enciclop\u00e9dia livre", + "http://www.dajobe.org/2005/04-sparql/": "SPARQL Reference Card", + "http://www.semanlink.net/doc/2019/08/bloom_filter": "Bloom filter", + "http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_": "[1807.04905] Ultra-Fine Entity Typing", + "http://owlgred.lumii.lv/online_visualization/5dq2": "Online Ontology Visualization \u00b7 OWLGrEd: ConfigurationOntology", + "http://www.w3.org/2001/sw/SW-FAQ": "W3C Semantic Web FAQ", + "http://www.macports.org/": "macports", + "http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage": "SEMEDIA Semantic Web and Multimedia Group", + "http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_": "Sebastian Ruder sur Twitter : \"@yoavgo on (some of the) missing elements in NLP. Future vision: humans writing rules aided by ML. #spaCyIRL\u2026 \"", + "http://www.marco.org/2013/07/03/lockdown": "Lockdown \u2013 Marco.org", + "https://arxiv.org/abs/1804.01486": "[1804.01486] Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data", + "https://github.com/openlink/rdf-editor": "openlink/rdf-editor: The OpenLink RDF Edito... - GitHub", + "http://www.cross-browser.com/x/lib/view.php": "Cross-Browser.com - XV: X Library Viewer", + "http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri": "A review of conceptual clustering algorithms", + "http://www.jasons-toolbox.com/SlightlyThickerBox/": "Slightly ThickerBox", + "https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html": "Au Sahara, voyager devient\u00a0un\u00a0crime", + "http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2": "Boub\u00e9 Gado, Le Zarmatarey, Contribution \u00e0 l'histoire des populations d'entre Niger et Dallol Mawri", + "http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_": "[2002.04688] fastai: A Layered API for Deep Learning", + "http://search.datao.net/": "search.datao.net", + "http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_": "[2003.08505] A Metric Learning Reality Check", + "http://www.courrierinternational.com/article/2014/04/10/le-nouveau-rwanda": "\"Le nouveau Rwanda\" Courrier international", + "http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a": "[1812.00417] Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale", + "http://www.dglflf.culture.gouv.fr/Actualites/Programme_Semanticpedia.pdf": "Pr\u00e9sentation S\u00e9manticPedia", + "http://code.google.com/p/lmf/wiki/PrinciplesLinkedMedia": "PrinciplesLinkedMedia - lmf - The concepts behind Linked Media and how it extends Linked Data", + "https://arxiv.org/abs/1901.02860": "[1901.02860] Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context", + "http://www.tela-botanica.org/": "Tela Botanica - Le r\u00e9seau de la botanique francophone", + "https://sagascience.com/jeanrouch/": "Sagascience - Jean Rouch L\u2019ethnologue-cin\u00e9aste", + "https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824": "Extract Subject Matter of Documents Using NLP \u2013 Alexander Crosson \u2013 Medium", + "http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem": "cdixon tumblr, Bitcoin and the Byzantine Generals Problem", + "http://developer.apple.com/internet/webcontent/xmlhttpreq.html": "Dynamic HTML and XML: The XMLHttpRequest Object", + "http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_": "Project Debater - IBM Research AI", + "http://tech.groups.yahoo.com/group/jena-dev/message/35751": "ARQ: a question about property functions", + "https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585": "Finding experts in GrapAL \u2013 Semantic Scholar", + "http://videolectures.net/": "VideoLectures - exchange ideas & share knowledge", + "http://www.ivan-herman.net/WebLog/WorkRelated/SemanticWeb/ursw06.html": "Ivan's Blog : Workshop on Uncertainty Reasoning on the SW", + "http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/": "Erudition, pi\u00e8ge \u00e0 cons ? Alchimie du coll\u00e8ge", + "http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html": "Mal \u00e0 l'Europe - Chronique Ecologie, Herv\u00e9 Kempf", + "http://www.africatime.com/niger/nouvelle.asp?no_nouvelle=200085&no_categorie=2": "Fin du probl\u00e8me d'eau de la ville de Zinder (juin 2005) Souvenirs d'un enfant du Damagaram", + "http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve": "Neurala: How Lifelong-DNN Solves for Inherent Problems with Traditional DNNs", + "http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html": "Re: Squaring the HTTP-range-14 circle", + "http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html": "visualization of skos thesauri (public-esw-thes@w3.org from July 2012)", + "http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in": "[2110.06176] Mention Memory: incorporating textual knowledge into Transformers through entity mention attention", + "http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F": "Why is Google not showing rich snippets for my pages? - GoodRelations Wiki", + "http://blogs.sun.com/roller/page/searchguy/20050610": "Automatically building semantic taxonomies The Search Guy Weblog", + "https://arxiv.org/abs/1810.09164": "[1810.09164] Named Entity Disambiguation using Deep Learning on Graphs", + "https://www2018.thewebconf.org/program/web-content-analysis/": "RESEARCH TRACK: Web Content Analysis, Semantics and Knowledge", + "https://blog.twitter.com/2015/autograd-for-torch": "Autograd for Torch", + "https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/": "Machine Learning with Missing Labels: Transductive SVMs", + "https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478": "android - speech recognition reduce possible search results - Stack Overflow", + "http://www.atelier-francais.org/event/open-data-lexception-culturelle": "Open data : l'exception culturelle? Atelier Fran\u00e7ais", + "http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized": "[1906.08237] XLNet: Generalized Autoregressive Pretraining for Language Understanding", + "http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html": "COLD - Configuration ontology / LOV", + "https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5": "A Radical New Neural Network Design Could Overcome Big Challenges in AI", + "http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia": "Deathtrap (film)", + "https://twitter.com/TensorFlow/status/1055538593941409792": "TensorFlow: how to load and save models at every epoch so you never lose time or data.", + "http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la": "[1905.07129] ERNIE: Enhanced Language Representation with Informative Entities", + "http://www.ldp4j.org/#/": "LDP4j", + "http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con": "[2001.03765] Learning Cross-Context Entity Representations from Text", + "http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf": "From Text to Knowledge: The Information Extraction Pipeline by Tomaz Bratanic Towards Data Science", + "http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_": "[1909.03193] KG-BERT: BERT for Knowledge Graph Completion", + "http://images.math.cnrs.fr/": "Images des math\u00e9matiques", + "http://reference.sitepoint.com/javascript": "JavaScript Reference", + "http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings": "Building a real-time embeddings similarity matching system \u00a0\u00a0 Solutions \u00a0\u00a0 Google Cloud", + "http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560": "M\u00e9canique du vivant : les physiciens r\u00e9crivent la biologie", + "http://fr.wikipedia.org/wiki/Acoustique_musicale#Param.C3.A8tres_du_sonore_et_attributs_du_musical": "Acoustique musicale - Wikip\u00e9dia", + "http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c": "node.js - Browserify - How to call function bundled in a file generated through browserify in browser - Stack Overflow", + "http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/": "DS Toolbox - Topic Models - DS lore", + "http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie": "[2106.04647] Compacter: Efficient Low-Rank Hypercomplex Adapter Layers", + "http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860": "Google Becomes Answer Engine With Semantic Technology \u2212 Great News For Retailers", + "http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev": "[1711.00046] Replace or Retrieve Keywords In Documents at Scale", + "http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_": "Alex Russell sur Twitter : \"If you install Firefox on Windows, MacOS, Linux, ChromeOS, or Android you get *real* Firefox, complete with the Gecko engine. But not on iOS. Apple cripples engine competition in silent, deeply impactful ways.\" / Twitter", + "http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni": "[1706.00384] Deep Mutual Learning", + "http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/": "CEUR-WS.org/Vol-258 - OWL: Experiences and Directions 2007", + "https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241": "", + "http://www.notube.tv/": "NoTube semantic television project - making television more personal", + "http://simia.net/wiki/How_much_information_is_in_a_language%3F": "How much information is in a language? - Simia", + "http://2007.xtech.org/public/schedule/paper/49": "XTech 2007: without the X - the return of {{Textual}} markup", + "http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_": "Patrick von Platen sur Twitter : \"Today, @huggingface is the start of our Reformer series...\"", + "http://www.w3.org/TR/powder-use-cases/#tagsrus": "POWDER: Use Cases and Requirements", + "http://whc.unesco.org/fr/list/1225/": "Ruines de Lorop\u00e9ni - UNESCO World Heritage Centre", + "https://explosion.ai/blog/deep-learning-formula-nlp": "Embed, encode, attend, predict: The new deep learning formula for state-of-the-art NLP models Blog Explosion AI", + "http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/": "SemwebCampParis num\u00e9ro 2 : Alexandre Passant", + "http://dl.acm.org/citation.cfm?id=359563": "Time, clocks, and the ordering of events in a distributed system", + "https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/": "Lexical scope and function closures in R Darren Wilkinson's research blog", + "http://trialox.org/": "trialox ag", + "http://graphite.ecs.soton.ac.uk/browser/": "Q&D RDF Browser", + "https://groups.drupal.org/node/23216": "RDF for Solr: Possible implementation strategies", + "http://www.alta.asn.au/events/altss2004/course_notes/ALTSS-Curran-Maxent.pdf": "Maximum Entropy Models for NLP", + "http://scikit-learn.org/stable/_static/ml_map.png": "scikit learn: machine learning map", + "https://sites.google.com/view/federated-kbs-akbc19": "Federated KBs at AKBC2019", + "http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr": "100\u00a0000\u00a0morts du Covid-19 en France\u00a0: ferons-nous en sorte que le monde d\u2019apr\u00e8s\u00a0ne permette plus une telle trag\u00e9die\u00a0?", + "http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_": "Pr\u00e9curseur en\u00a0Am\u00e9rique latine, le\u00a0Mexique bannit le\u00a0ma\u00efs g\u00e9n\u00e9tiquement modifi\u00e9 et\u00a0le\u00a0glyphosate", + "http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html": "Offshore Leaks : silence radio \u00e0 Londres", + "https://dl.acm.org/citation.cfm?id=1321475": "Wikify!: linking documents to encyclopedic knowledge (2007)", + "https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk": "What are all possible pos tags of NLTK? - Stack Overflow", + "http://www.semanlink.net/doc/2020/05/do_the_right_thing": "Do the Right Thing", + "http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755": "Robert McLiam Wilson\u00a0: \u00abSi vous n\u2019aimez pas l\u2019Euro, supportez l\u2019Irlande du\u00a0Nord\u00bb - Lib\u00e9ration", + "http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/": "L\u2019universit\u00e9 est universelle : notre mati\u00e8re grise est de toutes les couleurs ! Signez l\u2019appel contre la chasse aux \u00e9tudiants \u00e9trangers", + "https://code.fb.com/open-source/pytorch-biggraph/": "PyTorch-BigGraph: Faster embeddings of large graphs - Facebook Code", + "https://github.com/fozziethebeat/S-Space": "fozziethebeat/S-Space - Java - GitHub", + "https://arxiv.org/abs/1806.06259": "[1806.06259] Evaluation of sentence embeddings in downstream and linguistic probing tasks", + "http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_": "scikit-multilearn: Multi-Label Classification in Python", + "http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu": "Towards a Conscious AI: A Computer Architecture inspired by Neuroscience - Microsoft Research", + "http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_": "Transformers Pipelines.ipynb - Colaboratory", + "http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm": "BBC NEWS Hope over Tasmanian Devil cancer", + "http://semanticweb.com/its-time-to-take-on-temporal-data-management-for-semantic-data_b39347#more-39347": "It\u2019s Time To Take On Temporal Data Management For Semantic Data - semanticweb.com", + "http://web.mit.edu/press/2010/genomic-fossil.html": "Scientists decipher 3 billion-year-old genomic fossils", + "http://www.readwriteweb.com/archives/online_training_and_learning.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29": "Making You More Awesome: The Red-Hot World of Online Learning Services", + "http://see.stanford.edu/see/courses.aspx": "Stanford School of Engineering - Stanford Engineering Everywhere", + "https://arxiv.org/abs/1609.08496": "[1609.08496] Topic Modeling over Short Texts by Incorporating Word Embeddings", + "http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip": "Exploitation des donn\u00e9es, manipulation de l\u2019opinion, culte du secret\u2026 La trahison des GAFA", + "https://arxiv.org/abs/1809.01797": "[1809.01797] Describing a Knowledge Base", + "http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co": "[1905.06088] Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning", + "http://youtube.com/watch?v=mAuYfQCgSQU": "YouTube - Samba Diko - Moussa Poussi", + "http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base": "[2004.03705] Deep Learning Based Text Classification: A Comprehensive Review", + "http://news.bbc.co.uk/2/hi/science/nature/8022612.stm": "BBC NEWS Telescopes given 'go' for launch", + "https://twitter.com/RichardSocher/status/1021917140801052672": "Slides motivating true multitask learning in AI and NLP", + "http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans": "[2104.06979] TSDAE: Using Transformer-based Sequential Denoising Auto-Encoder for Unsupervised Sentence Embedding Learning", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256": "[Linking-open-data] How to get the uri of a non-information resource from the corresponding HTML page?", + "http://crypto.stanford.edu/~blynn/c/object.html": "Object-oriented oblivion", + "http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu": "Artificial Neural Networks Accurately Predict Language Processing in the Brain bioRxiv", + "http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl": "A Technique for Building NLP Classifiers Efficiently with Transfer Learning and Weak Supervision", + "http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw": "Federico Errica \ud83c\uddee\ud83c\uddf9\ud83c\uddea\ud83c\uddfa sur Twitter : \"Our \u201c#Probabilistic #Learning on #Graphs via Contextual Architectures\u201d...\"", + "https://arxiv.org/abs/1801.04016": "[1801.04016] Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution", + "https://einstein.ai/research/learned-in-translation-contextualized-word-vectors": "Learned in translation: contextualized word vectors (Salesforce Research)", + "http://ceur-ws.org/Vol-748/paper4.pdf": "A Semantic Web Representation of a Product Range Specification based on Constraint Satisfaction Problem in the Automotive Industry", + "https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW": "OpenLink Structured Data Sniffer now available in the Chrome Web Store As an...", + "http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php": "Making the Web Searchable: The Story of SearchMonkey", + "http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)": "The Next 10 Years of Success", + "http://afs.github.io/rdf-patch/": "RDF Patch \u2013 Describing Changes to an RDF Dataset", + "http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention": "[1906.01195] Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs", + "http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio": "Knowledge Graphs: An Information Retrieval Perspective", + "http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm": "BBC NEWS Big Brother is watching us all", + "https://github.com/agazzarini/SolRDF": "SolRDF - GitHub", + "http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641": "Quand l\u2019oligopole de l\u2019internet courtise les \u00e9diteurs de presse InaGlobal", + "http://web.stanford.edu/class/cs224n/reports/6838634.pdf": "Context is Everything: Finding Meaning Statistically in Semantic Spaces (CS224n 2018)", + "http://www.senat.fr/rap/r12-720/r12-7201.pdf": "Rapport d'information sur la situation au Sahel (Jean-Pierre Chev\u00e8nement et G\u00e9rard Larcher)", + "http://www.smbc-comics.com/comic/path-of-a-hero": "Path of a Hero", + "http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app": "Building a Full-Text Search App Using Docker and Elasticsearch", + "http://nlp.stanford.edu/IR-book/": "Introduction to Information Retrieval, Cambridge University Press (2008) Manning, Raghavan, and Sch\u00fctze", + "https://en.wikipedia.org/wiki/Ishi": "Ishi - Wikipedia", + "http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html": "Open Source Projects @ Freie Universit\u00e4t Berlin", + "https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/": "(Relatively) quick and easy Gensim example code William Bert", + "http://redfoot.net/": "Redfoot: Hypercoding System", + "https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet": "The Biggest Digital Heist in History Isn\u2019t Over Yet - Bloomberg", + "https://jyx.jyu.fi/dspace/handle/123456789/56299": "Global RDF Vector Space Embeddings", + "http://wiki.apache.org/solr/Solrj": "Solrj - Solr Wiki", + "http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n": "Coronavirus: Why You Must Act Now - Tomas Pueyo - Medium", + "http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i": "Nandan Thakur sur Twitter : \"@ikuyamada @Nils_Reimers Thanks @ikuyamad...\"", + "http://www.lesinrocks.com/2014/12/20/actualite/amazon-killer-lempire-des-libraires-contre-attaque-11542327/": "Les Inrocks - Amazon-Killer : l'empire des libraires contre-attaque", + "http://fr.slideshare.net/jasontucker/how-to-stream-a-meetup-or-live-event": "How to stream a meetup or live event", + "http://www.data-publica.com/content/lexique-de-lopen-data/": "Petit Lexique de l\u2019Open Data Data Publica", + "http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo": "The Extreme Classification Repository", + "http://www.betaversion.org/~stefano/linotype/news/85/": "Stefano's Linotype ~ Folksologies: de-idealizing ontologies", + "http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l": "Named Entity Recognition without Labelled Data: A Weak Supervision Approach (2020) (slides)", + "http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su": "[1911.02685] A Comprehensive Survey on Transfer Learning", + "http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html": "SKOS and SWOOP: how - bobdc.blog", + "https://www.slideshare.net/sopekmir/graphchain": "GraphChain", + "http://en.wikipedia.org/wiki/Two_envelopes_problem": "Two envelopes problem", + "http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html": "A qui la faute? de Victor Hugo", + "http://www.w3.org/2003/g/talk62/slides": "Practical Semantic Web Deployment with Microformats and GRDDL (1)", + "http://ebiquity.umbc.edu/paper/html/id/235/": "UMBC eBiquity Publication: A Bayesian Methodology towards Automatic Ontology Mapping", + "http://www.netcrucible.com/blog/PermaLink.aspx?guid=d9508178-c4e4-4175-bd7f-0e261e1a4739": "The War is Over (WS-* vs. POX/HTTP)", + "https://www.nature.com/articles/s41598-018-30619-y": "Hybrid optical-electronic convolutional neural networks with optimized diffractive optics for image classification Scientific Reports", + "http://deeplearning.net/tutorial/rnnslu.html": "Recurrent Neural Networks with Word Embeddings \u2014 DeepLearning 0.1 documentation", + "http://www.la-grange.net/2011/03/02/google-ads": "Comprendre comment Google vous traque", + "http://lespetitescases.net/amusons-nous-avec-rdfa": "Amusons-nous avec RDFa Les petites cases", + "http://www.bbc.co.uk/news/science-environment-21866464": "BBC News - Planck satellite: Maps detail Universe's ancient light", + "http://web.it.kth.se/~rassul/exjobb/rapporter/sima-emil.pdf": "Carbonara - a semantically searchable distributed repository", + "https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a": "Run NLP Experiments using the Feedly API.ipynb - Colaboratory", + "https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Facial Recognition: Cracking the Brain\u2019s Code CNRS News", + "https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2": "Named Entity Recognition and Classification with Scikit-Learn", + "http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/": "Concept Search on Wikipedia \u00b7 Chris McCormick", + "http://www.prescod.net/rest/mistakes/": "Common REST Mistakes", + "https://lejournal.cnrs.fr/articles/les-defis-de-la-voiture-a-hydrogene?utm_content=buffer8a0d7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Les d\u00e9fis de la voiture \u00e0 hydrog\u00e8ne CNRS Le journal", + "https://fr.wikipedia.org/wiki/Dans_ses_yeux": "El secreto de sus ojos", + "http://code.google.com/p/lucene-skos/": "lucene-skos - A SKOS analyzer module for Apache Lucene and Solr - Google Project Hosting", + "https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations": "Apache vs Nginx: Practical Considerations DigitalOcean", + "http://www.elasticsearch.org/": "elasticsearch", + "http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html": "Don\u2019t Throw Out Your Broken iPod; Fix It via the Web", + "http://chinafrica.info/": "Chinafrica Le magazine du nouveau monde", + "http://www.semanticuniverse.com/": "Semantic Universe", + "http://es.wikipedia.org/wiki/Todo_sobre_mi_madre": "Todo sobre mi madre", + "http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_": "Knowledge Graphs @ ICLR 2020 - Michael Galkin - Medium", + "http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf": "Linking Folksonomies and Ontologies for Supporting Knowledge Sharing: a State of the Art", + "http://www.pbs.org/wnet/secrets/case_plague/index.html": "Secrets of the Dead . Mystery of the Black Death PBS", + "http://itunes.parisdescartes.fr/": "iTunes U. Paris Descartes", + "http://www.hymn-project.org/jhymndoc/": "JHymn Info and Help", + "http://www.semanlink.net/doc/2020/05/obsidian": "Obsidian", + "https://blog.cloudflare.com/why-we-terminated-daily-stormer/": "Why We Terminated Daily Stormer", + "http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de": "Matching Resumes to Jobs via Deep Siamese Network Companion Proceedings of the The Web Conference 2018", + "http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf": "MATLAB / R Reference", + "http://www.xbrl.org/": "XBRL: eXtensible Business Reporting Language", + "http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of": "[1911.03903] A Re-evaluation of Knowledge Graph Completion Methods", + "https://github.com/evolvingweb/ajax-solr/wiki": "Ajax solr", + "http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web": "The Sun BabelFish Blog: Aperture to the semantic desktop", + "http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_": "Hugging Face sur Twitter : \"No labeled data? No problem. The \ud83e\udd17 Transformers master branch now includes a built-in pipeline for zero-shot text classification...", + "http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re": "Chris Mungall sur Twitter : \"Reading: OWL2Vec*: Embedding of OWL Ontologies\"", + "https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf": "Automated patent landscaping (google/patents-public-data)", + "http://www.semantic-web-days.net/proceedings/ontoprise_SemanticWebDays2005.pdf": "Ontologies@ Work -Experience from Automotive and Engineering Industry", + "http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty": "Blog de Rapha\u00ebl Sourty", + "http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex": "Knowledge Graph, AI Web Data Extraction and Crawling Diffbot", + "http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/": "Why Big Data is Choking Business Managers Innovation Insights Wired.com", + "http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s": "Transfer Learning vs. Neurala\u2019s L-DNN: clearing up minds LinkedIn", + "http://nlp.seas.harvard.edu/2018/04/03/attention.html": "The Annotated Transformer", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/": "RDF Book Mashup", + "http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/": "The Rationale for Semantic Technologies \u00bb AI3:::Adaptive Information", + "http://www.bbc.com/news/technology-35639549": "Is your smartphone listening to you? - BBC News", + "https://atom.io": "ATOM, a hackable text editor for the 21st Century", + "http://dannyayers.com/2007/03/28/using-those-profiles": "Using those profiles", + "https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html": "Le Dickinsonia, le plus ancien animal sur Terre, \u00e9tait ovale et plat", + "http://dannyayers.com/2007/07/17/can-opml-2": "Can OPML 2.0 be part of the Semantic Web?", + "http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php": "Nodalities: Google App Engine and the Joy of WebArch", + "http://topics.cs.princeton.edu/Science/": "Modeling the Evolution of Science", + "http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer": "L\u2019hydrog\u00e8ne, une solution incertaine pour la mobilit\u00e9", + "http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf": "Machine Learning for Sequential Data: A Review", + "http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_": "Agostina Calabrese sur Twitter : Structured Sentiment Analysis as Dependency Graph Parsing", + "http://youtube.com/watch?v=JqWoJkvryvM": "YouTube - Sibo - Moussa Poussi", + "http://tech.groups.yahoo.com/group/jena-dev/message/46281": "jena-dev : Message: Re: [jena-dev] Could TDB sort triples by objects?", + "https://github.com/cygri/tarql": "Tarql: SPARQL for Tables - cygri/tarql \u00b7 GitHub", + "http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2": "Journ\u00e9e commune AFIA - ARIA - 2 d\u00e9cembre 2019\u00a0", + "http://www.wildml.com/": "WildML \u2013 Artificial Intelligence, Deep Learning, and NLP", + "https://arxiv.org/abs/1803.02893": "[1803.02893] An efficient framework for learning sentence representations", + "http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/": "How Mimicking Brain Function is Revolutionising NLP - Dataconomy", + "http://www.bnode.org/archives2/59": "Web Clipboard: Adding liveliness to \"Live Clipboard\" with eRDF, JSON, and SPARQL.", + "http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/": "One Genius\u2019 Lonely Crusade to Teach a Computer Common Sense WIRED", + "http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an": "NLP at Scale for Maintenance and Supply Chain Management", + "http://www.semanlink.net/doc/2020/02/self_supervised_representation_": "Self-Supervised Representation Learning", + "https://ai.stanford.edu/blog/weak-supervision/": "Weak Supervision: A New Programming Paradigm for Machine Learning SAIL Blog", + "http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites": "dowhatimean.net \u00bb Less code: eRDF templates for RDF-driven web sites", + "http://www.overmundo.com.br/guia/sabor-selvagem": "Sabor Selvagem", + "https://www.youtube.com/watch?v=CMS6Ds3qryY": "Banksy documentary: Welcome to the Banksy art hotel in Bethlehem - YouTube", + "http://code.google.com/p/jquery-jsonp/": "jQuery-JSONP", + "http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php": "Tattered French African empire looks toward China - International Herald Tribune", + "https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw": "Insect collapse: \u2018We are destroying our life support systems\u2019 Environment The Guardian", + "http://dbpedia.neofonie.de/browse/": "Faceted Wikipedia Search", + "http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut": "[1609.02521] DiSMEC - Distributed Sparse Machines for Extreme Multi-label Classification", + "https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html": "Re: Google Structured Data Testing Tool fails on valid JSON-LD from Dan Brickley on 2016-04-07 (public-schemaorg@w3.org from April 2016)", + "http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122-10.html": "Arnaque et Divulgation donn\u00e9es personnelles par Num\u00e9ricable : Internet - Page 2 - Forum Que Choisir", + "http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st": "DIY masks for all could help stop coronavirus - The Washington Post", + "http://www.foretpriveefrancaise.com/comment-estimer-le-prix-du-bois-sur-pied-150189.html": "Comment estimer le prix du bois sur pied ?", + "https://www.offconvex.org/2016/07/10/embeddingspolysemy/": "Linear algebraic structure of word meanings \u2013 Off the convex path", + "http://www.xml.com/pub/au/225": "Joe Gregorio: Restful Web columns", + "http://voiretagir.org/spip.php?article50": "Un si\u00e8cle de progr\u00e8s sans merci", + "http://www.bbc.co.uk/dna/h2g2/A2207297": "BBC - The Nebra Sky Disc", + "http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html": "Pillages chinois en for\u00eat tropicale", + "http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea": "Une solution inspir\u00e9e du cerveau pour \u00e9viter l\u2019oubli catastrophique des IA INSIS", + "https://www.theguardian.com/society/2016/aug/27/millions-at-risk-as-deadly-fungal-infections-acquire-drug-resistance": "Millions at risk as deadly fungal infections acquire drug resistance Society The Guardian", + "https://arxiv.org/pdf/1711.07128.pdf": "[1711.07128] Hello Edge: Keyword Spotting on Microcontrollers", + "http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm": "Kurzweil: \"Technology is a double-edged sword\"", + "http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/": "Why We\u2019re in a New Gilded Age by Paul Krugman The New York Review of Books", + "https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool": "Uncoiling the Data in DNA: Elasticsearch as a BioInformatics Research Tool Elastic", + "http://gigaom.com/2013/12/07/why-cognition-as-a-service-is-the-next-operating-system-battlefield/": "Why Cognition-as-a-Service is the next operating system battlefield \u2014 Tech News and Analysis", + "https://www.jair.org/media/4992/live-4992-9623-jair.pdf": "Goldberg, Y. (2016). A Primer on Neural Network Models for Natural Language Processing. Journal of Artificial Intelligence Research", + "http://www.cs.cmu.edu/~brettb/papers/06itsc-driver-intent.pdf": "Learning to Predict Driver Route and Destination Intent", + "http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net": "[1907.07355] Probing Neural Network Comprehension of Natural Language Arguments", + "http://wiki.blojsom.com/wiki/display/blojsom/About+blojsom": "About blojsom - Confluence", + "http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html": "Silicon Valley's push for universal basic income is \u2014 surprise! \u2014 totally self-serving - LA Times", + "http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n": "Transformers are Graph Neural Networks NTU Graph Deep Learning Lab", + "https://alarmingdevelopment.org/?p=1173": "The problem with programming and how to fix it \u2013 Alarming Development", + "http://www.bbc.co.uk/news/science-environment-23244768": "BBC News - Scientists building the world's first synthetic yeast", + "https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md": "RDF Binary encoding using Thrift.", + "https://t.co/abYFX5zXXq": "Neural Transfer Learning for Natural Language Processing - Seb Ruder PhD Thesis", + "http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne": "Le prix Nobel de chimie d\u00e9cern\u00e9 \u00e0 la Fran\u00e7aise Emmanuelle Charpentier et l\u2019Am\u00e9ricaine Jennifer Doudna pour les \u00ab\u00a0ciseaux mol\u00e9culaires\u00a0\u00bb", + "https://commons.apache.org/proper/commons-csv/": "Commons CSV", + "http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation": "\u00ab\u00a0Projet Pegasus\u00a0\u00bb\u00a0: r\u00e9v\u00e9lations sur un syst\u00e8me mondial d\u2019espionnage de t\u00e9l\u00e9phones", + "http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_": "[2104.14690] Entailment as Few-Shot Learner", + "http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/": "Navigational Knowledge Engineering (NKE) and HANNE", + "http://hyperdata.org/taglia/": "tagliatelle delicious RDFizer", + "http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi": "Le surendettement menace des millions d\u2019emprunteurs de microcr\u00e9dits dans le monde", + "https://communication.revues.org/6650": "La construction d\u2019un espace patrimonial partag\u00e9 dans le Web de donn\u00e9es ouvert", + "http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r": "[2010.03496] Inductive Entity Representations from Text via Link Prediction", + "http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html": "volle.com: La France, cette mal aim\u00e9e", + "http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering": "[1910.09760] Question Answering over Knowledge Graphs via Structural Query Patterns", + "http://swordfish.rdfweb.org/photos/2006/05/10/index.html": "JUC day 1: photos", + "http://www.naturenw.org/": "Nature of the Northwest", + "http://www.resgeol04.org/dalle.html": "La dalle \u00e0 ammonites, R\u00e9serve g\u00e9ologique de Haute Provence", + "http://julie.grollier.free.fr/STT.htm": "Julie Grollier - Spin Transfer Torque", + "http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306": "GoodRelations Fully Integrated with Schema.org - semanticweb.com", + "https://github.com/xiaohuiyan/BTM": "Biterm Topic Model (github)", + "http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des": "En Lituanie, dans les camps des migrants envoy\u00e9s par Loukachenko", + "http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh": "A Self-Training Approach for Short Text Clustering - (Hadifar 2019)", + "https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7": "An Adversarial Review of \u201cAdversarial Generation of Natural Language\u201d", + "https://arxiv.org/abs/1901.11504": "[1901.11504] Multi-Task Deep Neural Networks for Natural Language Understanding", + "http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne": "Reasoning With Neural Tensor Networks for Knowledge Base Completion (2013)", + "http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html": "Transnets, des gadgets aux r\u00e9seaux: C\u2019est gr\u00e2ce aux hippies (2)", + "http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action": "The Semantic Web in Action - Scientific American - December 2007", + "http://en.wikipedia.org/wiki/Timeline_of_evolution": "Timeline of evolution - Wikipedia, the free encyclopedia", + "http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su": "[2001.09522] TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network", + "http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html": "Stop Press: Microdata in TopBraid", + "https://towardsdatascience.com/": "Towards Data Science", + "http://www.quakr.co.uk/": "quakr", + "http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ": "Machine Learning at the VU University Amsterdam", + "http://ajaxmatters.com": "AJAX Matters - Asynchronous JavaScript and XML and XMLHTTP development information", + "http://rdfweb.org/topic/Smushing": "Smushing - FOAF Wiki", + "http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he": "Seth Stafford sur Twitter : \"Here\u2019s a nice paper (ICLR spotlight) on how to apply masking in LM training...\"", + "http://semantic-technology-companies.sti2.at/index.html": "Semantic Technology Companies", + "http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin": "From Random Grammars to Learning Language - D\u00e9partement de Physique de l'Ecole Normale sup\u00e9rieure", + "https://github.com/RubenVerborgh/Hydra-Architecture-Diagram/": "Hydra Architecture Diagram", + "http://www.ebusiness-unibw.org/ontologies/opdm/": "OPDM - Ontologies", + "http://www.w3.org/2010/Talks/0622-SemTech-IH/": "Introduction to Semantic Web Technologies, slides Ivan Herman at semtech 2010", + "http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html": "Empire: RDF & SPARQL Meet JPA Semantic Universe", + "http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/": "Episode 7 : la pause Obama - Le Feuilleton - Blog LeMonde.fr", + "http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_": "[1905.11852] EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction", + "http://scikit-learn.org/stable/modules/cross_validation.html": "Cross-validation: evaluating estimator performance \u2014 scikit-learn documentation", + "https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Bacteria Use Brainlike Bursts of Electricity to Communicate Quanta Magazine", + "http://www.newyorker.com/online/blogs/newsdesk/2013/01/the-libraries-of-timbuktu.html": "Has the Great Library of Timbuktu Been Lost? : The New Yorker", + "http://www.eswc2007.org/preliminaryprogram.cfm": "4th European Semantic Web Conference 2007", + "http://www.cinemovies.fr/fiche_film.php?IDfilm=2854": "Le Voyage de James \u00e0 J\u00e9rusalem - Un film de Ra'anan Alexandrowicz avec Siyabonga Melongisi Shibe, Arieh Elias, Salim Dau, ...", + "http://www.bbc.co.uk/news/technology-26065991": "BBC News - IBM's Watson in Africa to help solve problems", + "http://www.freepatentsonline.com/EP1700233.html": "METHOD FOR ORGANIZING A DATABASE - Patent EP1700233", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924": "[Linking-open-data] Equivalence miner", + "http://www.javaworld.com/jw-02-2001/jw-0209-double.html": "Double-checked locking: Clever, but broken - JavaWorld", + "http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation": "Garamantes ancient civilisation", + "http://www.eyaloren.org/pubs/sfsw2006.pdf": "ActiveRDF: ob ject-oriented RDF in Ruby ActiveRDF: object-oriented RDF in Ruby", + "http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures": "[1306.6802] Evaluation Measures for Hierarchical Classification: a unified view and novel approaches", + "https://stackoverflow.com/questions/4864883/safaris-reader-mode-open-source-solution": "web - Safari's \"Reader Mode\" - Open source solution? - Stack Overflow", + "https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware": "Why American Farmers Are Hacking Their Tractors With Ukrainian Firmware - Motherboard", + "https://arxiv.org/abs/1706.04902": "[1706.04902] A Survey Of Cross-lingual Word Embedding Models", + "http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/": "Visually index Instagram pictures and find them in real-time", + "http://www.ldodds.com/projects/slug/": "Slug: A Semantic Web Crawler", + "http://ieet.org/index.php/IEET/more/rinesi20150925": "The price of the Internet of Things will be a vague dread of a malicious world", + "https://www.bbc.co.uk/news/science-environment-45172671": "Tax haven link to rainforest destruction and illegal fishing - BBC News", + "http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html": "11 Billion Clues in 800 Million Documents: A Web Research Corpus Annotated with Freebase Concepts", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html": "Un Britannique condamn\u00e9 \u00e0 quinze ans de prison pour d\u00e9tention de documents prohib\u00e9s", + "http://ocw.mit.edu/OcwWeb/web/home/home/index.htm": "Free Online Course Materials MIT OpenCourseWare", + "http://archive.org/web/": "Internet Archive: Wayback Machine", + "http://mappings.dbpedia.org/index.php/Main_Page": "DBpedia Mappings Wiki", + "http://mednews.stanford.edu/stanmed/2005winter/rna.html": "Secret life of RNA- Stanford Medicine Magazine - Stanford University School of Medicine", + "http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep": "[1909.10506] Learning Dense Representations for Entity Retrieval", + "http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_": "Creating Interactive Timelines with JavaScript by Shachee Swadia Nightingale Medium", + "http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/": "Thinking Clearly\u00bb Understanding SWRL (Part 1)", + "http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_": "When and Why does King - Man + Woman = Queen? (ACL 2019) Kawin Ethayarajh", + "http://www.w3.org/2001/tag/group/track/issues/57": "ISSUE-57: Mechanisms for obtaining information about the meaning of a given URI - Technical Architecture Group Tracker", + "http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/": "Socrates : le romantique et le r\u00e9volutionnaire", + "http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr": "[2004.04906] Dense Passage Retrieval for Open-Domain Question Answering", + "https://github.com/iliaschalkidis/ELMo-keras": "iliaschalkidis/ELMo-keras: Re-implementation of ELMo on Keras", + "https://fr.wikipedia.org/wiki/Timbuktu_(film)": "Timbuktu (film)", + "http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex": "Killer Slime, Dead Birds, an Expunged Map: The Dirty Secrets of European Farm Subsidies - The New York Times", + "http://www.phildawes.net/blog/2007/04/30/some-ideas-for-static-triple-indexing/": "Phil Dawes\u2019 Stuff \u00bb Some ideas for static triple indexing", + "https://arxiv.org/abs/1601.01343": "[1601.01343] Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation", + "https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review": "Text feature extraction based on deep learning: a review (2017)", + "http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html": "A Benchmark of Boolean Formulae", + "http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/": "The Tetherless World Weblog \u00bb I will pay delicious $100 for hierarchical tagging", + "https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1": "Unsupervised Text Summarization using Sentence Embeddings", + "http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/": "With a mix of Arduino, Raspberry Pi and fun, Maker Box hopes to bolster Africa's future tech skills ZDNet", + "https://www.ipbes.net/": "IPBES Science and policy for people and nature", + "http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_": "Ilha das Flores (curta-metragem)", + "http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/": "Lightning-fast RDF in JavaScript Ruben Verborgh", + "http://www.youtube.com/watch?v=bj7AbJ0ZYCk": "Virtuoso: generating RDF Views", + "http://web.stanford.edu/class/cs224n/": "CS224n: Natural Language Processing with Deep Learning", + "http://machinelearningmastery.com/java-machine-learning/": "Java Machine Learning", + "http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_": "Brain-computer interface: huge potential benefits and formidable challenges", + "http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from": "Liebreich: Separating Hype from Hydrogen \u2013 Part Two: The Demand Side BloombergNEF", + "https://arxiv.org/abs/1803.11175": "[1803.11175] Universal Sentence Encoder", + "http://wiki.fasterxml.com/JacksonInFiveMinutes": "JacksonInFiveMinutes - FasterXML Wiki", + "http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of": "\ud83c\udf3b The Best and Most Current of Modern Natural Language Processing", + "http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html": "Querying machine learning distributional semantics with SPARQL - bobdc.blog", + "https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4": "Awesome Knowledge Graph Embedding Approaches", + "http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/": "5 minutes de franche (rigolade) Open Data devant 5 ministres \u00e0 la Conf\u00e9rence de Paris L'informatique Conviviale", + "http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html": "Mediterranean Ceramics: RDFa at Ilion", + "http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/": "R, Python, RapidMiner and Weka. Testing my faith? BInalytics", + "https://arxiv.org/abs/1802.04865": "[1802.04865] Learning Confidence for Out-of-Distribution Detection in Neural Networks", + "http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s": "[1909.02164] TabFact: A Large-scale Dataset for Table-based Fact Verification", + "http://www.w3.org/TR/rdf-interfaces/": "RDF Interfaces 1.0", + "https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/": "An Intuitive Understanding of Word Embeddings: From Count Vectors to Word2Vec", + "http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html": "Infos de la Plan\u00e8te - Tendance : Ces milliardaires qui croient sauver la plan\u00e8te\u2026 - The Guardian (Royaume-Uni) - 2008-02-13", + "http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/": "The Semantic Puzzle The hype, the hope and the LOD2: S\u00f6ren Auer engaged in the next generation LOD", + "http://www.lemonde.fr/pixels/article/2017/10/18/intelligence-artificielle-toujours-plus-puissant-alphago-apprend-desormais-sans-donnees-humaines_5202931_4408996.html": "Intelligence artificielle\u00a0: toujours plus puissant, AlphaGo apprend d\u00e9sormais sans donn\u00e9es humaines", + "http://arxiv.org/abs/1506.01094": "[1506.01094] Traversing Knowledge Graphs in Vector Space", + "http://www.snipsnap.org/": "SnipSnap :: start SnipSnap", + "http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte": "Patrick OShaughnessy sur Twitter : \"Other than @RoamResearch, who is doing interesting work in knowledge databases?...\"", + "http://simile.mit.edu/piggy-bank/": "SIMILE Piggy Bank", + "http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw": "One Shot learning, Siamese networks and Triplet Loss with Keras", + "http://www.w3.org/2001/sw/wiki/images/8/83/20130909_rdfvalidation.pdf": "Forms to direct interaction with Linked Data Platform APIs", + "https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw": "Benin City, the mighty medieval capital now lost without trace Cities The Guardian", + "https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html": "Generalized Language Models", + "https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526": "Neural Network Embeddings Explained \u2013 Towards Data Science", + "http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php": "France 5 - La R\u00e9publique des clandestins", + "http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla": "Sinequa : Enterprise Search Platform", + "http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle": "Amazonie \u00e9ternelle ARTE", + "http://en.wikipedia.org/wiki/Czech_Dream": "Czech Dream", + "http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em": "[2011.05864] On the Sentence Embeddings from Pre-trained Language Models", + "http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html": "Taxe carbone et in\u00e9galit\u00e9s", + "http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/": "MAVEN et MOM, 2 nouveaux satellites pour Mars Autour du Ciel", + "https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823": "Rare microbes lead scientists to discover new branch on the tree of life CBC News", + "http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f": "Cory Doctorow sur Twitter : \"#Facebook is a rotten company, rotten from the top down, its founder, board and top execs are sociopaths...\"", + "http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_": "Learning Structured Embeddings of Knowledge Bases (2011)", + "http://datahub.io/": "The Datahub", + "http://www.businessinsider.com/spritz-speed-reading-gifs-2014-2": "How To Read A 223-Page Novel In Just 77 Minutes", + "http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Taming Recurrent Neural Networks for Better Summarization Abigail See", + "https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string": "What is the best way to remove accents in a Python unicode string? - Stack Overflow", + "http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python": "Dive Into NLTK, Part V: Using Stanford Text Analysis Tools in Python \u2013 Text Mining Online", + "http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html": "Partage Internet sur mac osx", + "http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html": "Jupiter's Moon Europa is Target for Possible NASA Lander Jupiter & Moons Europa & Search for Alien Life Space.com", + "https://www.udacity.com/self-driving-car": "Open Source Self-Driving Car Udacity", + "https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437": "How to do Unsupervised Clustering with Keras \u2013 Chengwei Zhang \u2013 Medium", + "http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir": "[2102.07043] Reasoning Over Virtual Knowledge Bases With Open Predicate Relations", + "http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural": "Knowledge Distillation - Neural Network Distiller", + "http://www.semantic-web-days.net/": "Semantic Web Days", + "https://en.wikipedia.org/wiki/Monstrous_moonshine": "Monstrous moonshine - Wikipedia", + "http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm": "Version grecque", + "http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_": "Snorkel is a fundamentally new interface to ML without hand-labeled training data", + "http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont": "[2010.00402] From Trees to Continuous Embeddings and Back: Hyperbolic Hierarchical Clustering", + "http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec": "rakuten-nlp/category2vec (2015)", + "http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html": "La banlieue s'ennuie, par Tahar Ben Jelloun", + "http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/": "Google acquires Metaweb and Freebase", + "http://techland.time.com/2013/04/02/an-interview-with-computing-pioneer-alan-kay/": "An Interview with Computing Pioneer Alan Kay TIME.com", + "http://pyvandenbussche.info/2017/translating-embeddings-transe/": "Translating Embeddings (TransE) \u2013 Pierre-Yves Vandenbussche", + "http://www.nzdl.org/Kea/index.html": "Kea (Keyphrase Extraction Algorithm)", + "http://www.semanlink.net/doc/2020/12/textgraphs_2020": "TextGraphs 2020", + "http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as": "[1909.01066] Language Models as Knowledge Bases?", + "http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur": "Graph Neural Networks for Natural Language Processing tutorial at EMNLP 2019", + "http://hdl.handle.net/2142/97430": "Examination of machine learning methods for multi-label classification of intellectual property documents (2017)", + "http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf": "Extracting Enterprise Vocabularies Using Linked Open Data", + "http://infoheap.com/url-encode-decode-online-tool/": "Url encode decode online tool - InfoHeap", + "http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd": "[1806.06478] Co-training Embeddings of Knowledge Graphs and Entity Descriptions for Cross-lingual Entity Alignment", + "http://www.r-project.org/": "The R Project for Statistical Computing", + "https://www.debuggex.com/": "Debuggex: Online visual regex tester. JavaScript, Python, and PCRE.", + "http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/": "RESTful services with jQuery and Java using JAX-RS and Jersey", + "https://twitter.com/RubenVerborgh/status/815212205935394817": "Ruben Verborgh sur Twitter : \"Just published 25,000+ #RDF triples of my own data as #OpenData: https://t.co/1jqy3ZgrjJ Query them live: https://t.co/B96KBPnG9C #dogfood\"", + "http://24ways.org/": "24 ways", + "http://www.xml.com/pub/a/2005/06/22/skos.html": "XML.com: Introducing SKOS", + "https://hal.archives-ouvertes.fr/hal-01517094": "Learning Concept-Driven Document Embeddings for Medical Information Search (2017)", + "https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/": "Enriching Word Embeddings Using Knowledge Graph for Semantic Tagging in Conversational Dialog Systems - Microsoft Research (2015)", + "http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n": "Sebastian Ruder sur Twitter : network embeddings in the biomedical domain. @eurnlp #EurNLP2019", + "http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces": "CS224n: Natural Language Processing with Deep Learning Stanford / Winter 2019", + "http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins": "Anonymous hacks Monsanto: Operation Green Rights begins - National Anonymous Examiner.com", + "http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/": "Le web : \"the next 10 years\"\u2026 en 10 courts m\u00e9trages SF Le Mixer", + "https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "How to Build a Helpful Search for Technical Documentation : The Laravel Example Milliseconds Matter", + "http://stitch.cs.vu.nl/demo.html": "DEMO of the STITCH project (\"Semantic Interoperability To access Cultural Heritage\")", + "https://stats.stackexchange.com/questions/352036/what-should-i-do-when-my-neural-network-doesnt-learn": "What should I do when my neural network doesn't learn?", + "http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y": "How to allow deep learning on your data without revealing the data \u2013 Off the convex path", + "http://www.spritzinc.com/": "Spritz", + "https://cmusphinx.github.io/wiki/tutorialconcepts/": "Basic concepts of speech recognition \u2013 CMUSphinx Open Source Speech Recognition", + "https://twitter.com/cecilejanssens/status/1104134423673479169": "Cecile Janssens sur Twitter : \"The area under the ROC curve (AUC) is so frequently criticized...\"", + "http://code.google.com/p/linked-data-api/wiki/Specification": "Specification - linked-data-api - Linked Data API Specification - Project Hosting on Google Code", + "https://arxiv.org/abs/1807.03748": "[1807.03748] Representation Learning with Contrastive Predictive Coding", + "http://www.snee.com/bobdc.blog/2014/12/hadoop.html": "Hadoop:What it is and how people use it: my own summary. bobdc.blog", + "http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/": "Waffle With Meaning \u00bb Installing PHP, MySQL and phpMyAdmin on OS X 10.5 (Leopard)", + "http://tenbyten.org/10x10.html": "10x10 / 100 Words and Pictures that Define the Time / by Jonathan J. Harris", + "http://www.youtube.com/watch?v=ure2RdTZm8c": "Miriam Makeba (Mama Africa) - Khawuleza 1966", + "https://twitter.com/dpkingma/status/1070856305831624704": "Durk Kingma sur Twitter : about likelihood-based generative models", + "http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/": "SPARQL with R in less than 5 minutes (R news & tutorials)", + "http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/": "Comment les Etats-Unis viennent de sauver Daech d\u2019une d\u00e9faite annonc\u00e9e en Syrie Un si Proche Orient", + "http://www.bbc.co.uk/news/technology-25667292": "BBC News - Cicada 3301: The dark net treasure trail reopens", + "http://queue.acm.org/detail.cfm?id=1961297": "A co-Relational Model of Data for Large Shared Data Banks - ACM Queue", + "http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra": "[2107.00676] A Primer on Pretrained Multilingual Language Models", + "http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/": "Networked Knowledge Organization Systems and Services The 7th European Networked Knowledge Organization Systems (NKOS) Workshop", + "http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html": "Cousins of Neanderthals Left DNA in Africa, Scientists Report - NYTimes.com", + "http://es.wikipedia.org/wiki/Yag%C3%A1n": "Yag\u00e1n - Wikipedia, la enciclopedia libre", + "http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs": "RDF store for use with node.js?", + "http://en.wikipedia.org/wiki/Touch_of_Evil": "Touch of Evil - Wikipedia, the free encyclopedia", + "http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets": "Linking open data: interlinking the Jamendo and the Musicbrainz datasets - DBTune blog", + "http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep": "[2102.11107] Towards Causal Representation Learning", + "http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be": "UKPLab/beir: A Heterogeneous Benchmark for Information Retrieval.", + "http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer": "My First NN Part 3. Multi-Layer Networks and Backpropagation Scott H. Hawley (alt. blog via fastpages)", + "http://www.humanite.fr/bernard-stiegler-nous-devons-rendre-aux-gens-le-temps-gagne-par-lautomatisation-609824": "Bernard Stiegler \u00ab\u2009Nous devons rendre aux gens le temps gagn\u00e9 par l\u2019automatisation\u2009\u00bb L'Humanit\u00e9", + "http://jena.sourceforge.net/ARQ/Tutorial/data.html": "ARQ - SPARQL Tutorial", + "http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf": "The Semantic Web Revisited", + "http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_": "Fastai with Transformers (BERT, RoBERTa, XLNet, XLM, DistilBERT)", + "http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm": "BBC NEWS - Environment key to helping poor", + "http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf": "The Code for Facial Identity in the Primate Brain", + "http://en.wikipedia.org/wiki/It's_a_Wonderful_Life": "It's a Wonderful Life", + "https://www.bbc.com/news/science-environment-47659640": "Tasmanian devils 'adapting to coexist with cancer' - BBC News", + "http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input": "Making sense of raw input", + "https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2": "Dealing with Imbalanced Classes in Machine Learning", + "https://hal.archives-ouvertes.fr/hal-01910235": "Using Monolingual Data in Neural Machine Translation: a Systematic Study", + "http://colin-verdier.com/les-fossoyeurs-de-l-innovation/": "Les fossoyeurs de l\u2019innovation L'\u00c2ge de la multitude", + "http://blog.wolfram.com/2014/02/24/starting-to-demo-the-wolfram-language/": "Starting to Demo the Wolfram Language\u2014Wolfram Blog", + "http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C": "Les st\u00e8les perdues d\u2019\u00c9thiopie CNRS Le journal", + "http://www.w3.org/2001/sw/wiki/Main_Page": "Semantic Web Standards - wiki at w3c", + "http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven": "REST APIs must be hypertext-driven \u00bb Untangled", + "http://www.spurl.net/discover/user/fps/": "", + "http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_": "[2007.00077] Similarity Search for Efficient Active Learning and Search of Rare Concepts", + "https://spinningup.openai.com/en/latest/spinningup/keypapers.html": "Key Papers in Deep RL \u2014 OpenAI - Spinning Up documentation", + "http://cifre.anrt.asso.fr/": "Conventions CIFRE", + "http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of": "[1910.01348] On the Efficacy of Knowledge Distillation", + "http://www.bbc.com/news/technology-28976849": "BBC News - Millions of historic images posted to Flickr", + "http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin": "[1911.01464] Emerging Cross-lingual Structure in Pretrained Language Models", + "http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol": "[2002.12327] A Primer in BERTology: What we know about how BERT works", + "http://www.universcience.tv/video-la-vitesse-de-la-lumiere-3139.html": "Mesurer le vitesse de la lumi\u00e8re dans sa cuisine", + "http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/": "Aux origines de CRISPR", + "http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html": "Des auteurs et des ayants droit contre la num\u00e9risation des \u0153uvres indisponibles", + "http://tos-dr.info/": "Terms of Service; Didn't Read", + "https://www.quora.com/Why-dont-pure-functional-programming-languages-provide-a-loop-construct": "Why don't pure functional programming languages provide a loop construct? - Quora", + "http://www.mspace.fm/projects/richtags/": "mSpace - Projects - Rich Tags", + "http://stackoverflow.com/questions/tagged/schema.org": "Newest 'schema.org' Questions - Stack Overflow", + "http://www.ebusiness-unibw.org/events/ecweb2014/": "EC-Web'14: The 15th International Conference on Electronic Commerce and Web Technologies", + "http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf": "SageDB: A Learned Database System", + "http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au": "Open-sourcing hyperparameter autotuning for fastText", + "http://rue89.nouvelobs.com/2016/06/20/crypto-monnaie-ether-connait-crise-a-rendre-jaloux-lehman-brothers-264401": "La crypto-monnaie Ether conna\u00eet une crise \u00e0 rendre jaloux Lehman Brothers - Rue89 - L'Obs", + "http://ode.openlinksw.com/": "OpenLink Data Explorer Extension", + "http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me": "[1906.02715] Visualizing and Measuring the Geometry of BERT", + "http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_": "Lista de espera (Waiting List) Trailer", + "http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html": "Did learning to fly give bats super-immunity? - New Scientist", + "https://news.cnrs.fr/opinions/fermis-paradox-and-missing-aliens": "Fermi\u2019s Paradox and the Missing Aliens CNRS News", + "http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link": "Chengkai Li sur Twitter : \"Link prediction methods on knowledge graphs don't work...\"", + "http://web.mit.edu/remy/": "TCP ex Machina", + "http://www.mycarevent.com/Deliverables/DL3.4_Terminology_Method_GIIRM_FL_v01.00.pdf": "MyCarEvent - Terminology and Methodology for populating the generic information model", + "http://www.w3.org/2007/03/VLDB/": "RDF and SPARQL: Using Semantic Web Technology to Integrate the World's Data", + "https://www.researchgate.net/post/How_to_find_semantic_similarity_between_two_documents": "How to find semantic similarity between two documents? (researchgate)", + "http://blog.codeship.com/json-ld-building-meaningful-data-apis/": "JSON-LD: Building Meaningful Data APIs - via @codeship via @codeship", + "http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap": "davidsbatista/BREDS: \"Bootstrapping Relationship Extractors with Distributional Semantics\" (Batista et al., 2015) - code for EMNLP'15 paper", + "http://www.physics.usyd.edu.au/~gekko/pinwheel.html": "WR 104: The prototype Pinwheel Nebula", + "http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of": "Getting my personal data out of Facebook Ruben Verborgh", + "http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio": "[1908.08983] A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers", + "http://www.w3.org/TR/wordnet-rdf/": "RDF/OWL Representation of WordNet", + "http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno": "eclipse - Can't install Maven SCM Handler for EGit for Juno - Stack Overflow", + "http://www.paths-project.eu/": "PATHS EU project - PATHS", + "http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance": "Beno\u00eet Mandelbrot et l'histoire de la finance", + "https://arxiv.org/abs/1903.05872v1": "[1903.05872] Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services", + "http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye": "\"I made a bet that a Naive Bayes classifier would work as well on humor recognition as a neural net with fine-tuned Bert embeddings. I won\"", + "http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html": "Pouvons-nous devenir plus intelligents, individuellement comme collectivement ?", + "https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres": "Socialize the data centers", + "https://jena.apache.org/documentation/query/text-query.html": "Jena Text", + "http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi": "php - Mac OS Snow Leopard: Why does my mysql.default_socket value not change in my phpinfo() page? - Server Fault", + "http://aclweb.org/anthology/P18-2020": "A Named Entity Recognition Shootout for German (2018)", + "http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide": "Scraping Data - UHack Guide", + "https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/": "Understanding the Working of Universal Language Model Fine Tuning (ULMFiT) \u2013 Let the Machines Learn", + "http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750": "Rich snippets - Products", + "http://internetactu.blog.lemonde.fr/2012/11/30/linnovation-educative-une-question-economique/": "L\u2019innovation \u00e9ducative : une question \u00e9conomique ? InternetActu", + "http://passeurdesciences.blog.lemonde.fr/2014/10/05/les-poles-magnetiques-terrestres-peuvent-sinverser-brutalement/": "Les p\u00f4les magn\u00e9tiques terrestres peuvent s\u2019inverser brutalement Passeur de sciences", + "https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Comment nos cellules ont-elles appris \u00e0 respirer? CNRS Le journal", + "http://www.pbs.org/wgbh/nova/newton/einstein.html": "NOVA Einstein on Newton PBS", + "http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf": "Greenpeace: Stories from the frontlines of the plastic waste trade", + "http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/": "Surveying and Classifying SPARQL Extensions \u00ab Lost Boy", + "https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597": "Supercharging Elasticsearch for extended Knowledge Graph use cases: Data science + business analytics training: Strata Data Conference", + "http://martinfowler.com/articles/nosql-intro.pdf": "Polyglot persistence", + "http://www.bbc.com/news/science-environment-27935479": "BBC News - Cosmic inflation: Confidence lowered for Big Bang signal", + "http://www.semanlink.net/doc/2020/08/triple_classification_using_reg": "Triple Classification Using Regions and Fine-Grained Entity Typing (AAAI 2019)", + "http://googlewebmastercentral.blogspot.fr/": "Official Google Webmaster Central Blog", + "http://www.topquadrant.com/w3c/RDFa/": "TopQuadrant's RDFa Implementation Report", + "http://java.sun.com/applets/jdk/1.1/demo/GraphLayout/": "Graph Layout Graph Layout : sun jdk1.0 sample code", + "http://www.semanticweb.com/insight/article.php/12163_3700611_1": "Oracle Sees Semantic Tech Solving Business Problems", + "http://www.w3.org/TR/grddl/": "Gleaning Resource Descriptions from Dialects of Languages (GRDDL)", + "http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded": "Google Home View", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf": "Turning the Web into a Database", + "http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_": "[2002.05867] Transformers as Soft Reasoners over Language", + "http://openjena.org/wiki/TDB/QuadFilter": "TDB/QuadFilter - Jena Wiki", + "http://www.devx.com/semantic/Article/35480/1954": "Create Scalable Semantic Applications with Database-Backed RDF Stores", + "http://ccl.northwestern.edu/netlogo/": "NetLogo Home Page", + "http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/": "htxt.africa Hacking for kids project Afrimakers reaches crowdfunding milestone", + "http://blogs.talis.com/nodalities/files/2008/09/mic_2007_01.jpg": "mic_2007_01.jpg (Image JPEG, 200x218 pixels)", + "http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f": "asahi417/tner: Language model finetuning on NER", + "http://web.archive.org/web/19981202230410/http://www.google.com/": "Google page in 1998 (\"Beta\")", + "http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20markov%20distribute.pdf": "A Markov Model for Driver Turn Prediction", + "http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html": "Dereferencing HTTP URIs", + "https://en.wikipedia.org/wiki/Jharia_coalfield": "Jharia coalfield", + "http://arxiv.org/abs/1603.05106v1": "[1603.05106] One-Shot Generalization in Deep Generative Models", + "http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition": "Opinion: The Internet is a surveillance state - CNN.com", + "http://fishbowl.pastiche.org/2003/01/13/when_is_a_constant_not_a_constant/": "When is a constant not a constant? - The Fishbowl", + "http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/": "Browsing Musicbrainz\u2019s dataset via URI dereferencing at Frederick Giasson\u2019s Weblog", + "http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p": "La crise climatique s\u2019aggrave partout, \u00e0 des niveaux sans pr\u00e9c\u00e9dent, alerte le GIEC", + "https://arxiv.org/abs/1706.03762": "[1706.03762] Attention Is All You Need", + "http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/": "Obama\u2019s Groundbreaking use of the Semantic Web", + "http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list": "The Top of My Todo List", + "http://de.wikipedia.org/wiki/Die_Welle_(2008)": "Die Welle (2008) \u2013 Wikipedia", + "https://guillaumegenthial.github.io/testing.html": "Testing Tensorflow code", + "http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is": "[1912.03263] Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One", + "http://www.youtube.com/watch?v=AgHHX9R4Qtk": "Sarah Silverman and The Great Schlep", + "http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_": "SSP: Semantic Space Projection for Knowledge Graph Embedding with Text Descriptions (AAAI 2017)", + "https://en.wikipedia.org/wiki/Volver": "Volver", + "http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc": "Applying a character transformation to the content of a cell when creating an URI - RDF123 Google Groups", + "http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp": "Falcons", + "http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil": "scikit-multilearn/scikit-multilearn: A scikit-learn based module for multi-label et. al. classification", + "http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf": "Merisier", + "http://www.wired.com/gadgetlab/2010/07/hardware-hobbyists-arduino/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous": "Why Arduino Is a Hit With Hardware Hackers Gadget Lab Wired.com", + "https://pxlnv.com/blog/bullshit-web/": "The Bullshit Web \u2014 Pixel Envy", + "http://www.theguardian.com/politics/live/2016/jun/25/brexit-live-emergency-meetings-eu-uk-leave-vote#comment-77205935": "If Boris Johnson looked downbeat yesterday, that is because he realises that he has lost. (Guardian)", + "http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html": "Perrier : Ivresse", + "http://www.datavirtuality.com": "Data Virtuality", + "http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html": "Edgar Morin : \"Les nuits sont enceintes et nul ne conna\u00eet le jour qui na\u00eetra.\"", + "http://ngmodules.org/modules/angular-marked": "angular-marked", + "http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes": "Yuval Noah Harari Quotes", + "http://www.guardian.co.uk/world/2013/jun/10/nsa-spying-scandal-what-we-have-learned": "NSA spying scandal: what we have learned World news guardian.co.uk", + "http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2009-January/018249.html": "connecting up to the MCF heritage", + "https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond": "Hierarchical clustering in Python and beyond", + "http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su": "[1901.00596] A Comprehensive Survey on Graph Neural Networks", + "http://www.europeana.eu/portal/": "Europeana - Homepage", + "http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati": "Extreme Rare Event Classification using Autoencoders in Keras (2019)", + "https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/": "GCP and Fast Ai v1: A full setup that\u2019ll work \u2013 mc.ai", + "http://datao.net": "DataO - Browser of the Web of Data", + "http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news": "African land grab could lead to future water conflicts - environment - 26 May 2011 - New Scientist", + "http://www.semanlink.net/doc/2021/10/linguistic_diversity": "Linguistic Diversity", + "http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html": "Model Driven Development with Semantic Web Technologies", + "http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html": "La mort et le PIB", + "http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai": "NER algo benchmark: spaCy, Flair, m-BERT and camemBERT on anonymizing French commercial legal cases", + "http://deeplearning4j.org/word2vec.html": "Word2vec: Neural Word Embeddings in Java - Deeplearning4j: Open-source, distributed deep learning for the JVM", + "http://ourworld.compuserve.com/homepages/rajm/openesef.htm": "Facts versus Factions: the use and abuse of subjectivity in scientific research", + "http://dannyayers.com/2006/09/27/javascript-sparql-editor": "Javascript SPARQL editor", + "http://groups.drupal.org/node/20589": "Solr RDF Support Drupal Groups", + "http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/": "Attention and Memory in Deep Learning and NLP \u2013 WildML", + "http://craphound.com/littlebrother/about/": "Little Brother", + "http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_": "4th Workshop on Representation Learning for NLP", + "http://www.rollingstone.com/politics/news/global-warmings-terrifying-new-math-20120719": "Global Warming's Terrifying New Math Rolling Stone", + "http://tomcat.apache.org/tomcat-5.0-doc/index.html": "Tomcat 5 - Documentation Index", + "http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html": "Robotics, automation play a big role in Gartner's top 10 predictions Computerworld", + "http://www.w3.org/TR/rdf-sparql-query/": "SPARQL Query Language for RDF", + "http://www.w3.org/TR/rif-rdf-owl/#Overview": "RIF RDF and OWL Compatibility", + "http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc": "[2010.11882] Learning Invariances in Neural Networks", + "http://www.aclweb.org/anthology/W18-3012/": "Unsupervised Random Walk Sentence Embeddings: A Strong but Simple Baseline (Ethayarajh 2018)", + "http://www.videolan.org/": "VideoLAN - Free and Open Source software and video streaming solutions for every OS!", + "https://www.mdpi.com/2073-8994/11/4/453": "Entity Linking via Symmetrical Attention-Based Neural Network and Entity Structural Features (2019)", + "http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html": "Ether Rag: Duck Duck Go: Illusion of Privacy", + "https://twitter.com/fchollet/status/1105139360226140160": "Fran\u00e7ois Chollet sur Twitter : a crash course on everything you need to know to use TensorFlow 2.0 + Keras", + "http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html": "RDF and social networks - bobdc.blog", + "http://www.itl.nist.gov/iad/mig/tests/ace/": "Automatic Content Extraction (ACE) Evaluation", + "http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData": "Linking-open-data - Wiki", + "http://linkeddata.deri.ie/services/tutorials/rdfa": "RDFa cheat sheet", + "http://www.speedtest.net/my-result/5527234144": "Speedtest.net by Ookla - My Results", + "http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast": "Searching with Shingles Elastic Blog", + "https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720": "The Octonion Math That Could Underpin Physics Quanta Magazine", + "https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings": "LSTM with word2vec embeddings Kaggle", + "http://lists.w3.org/Archives/Public/semantic-web/2006Oct/0133.html": "Testing RDF vs HTML Content Negotiation from T.Heath on 2006-10-26 (semantic-web@w3.org from October 2006)", + "http://stackoverflow.com/questions/15257674/scikit-learn-add-features-to-a-vectorized-set-of-documents": "python - scikit-learn, add features to a vectorized set of documents - Stack Overflow", + "https://www.npmjs.com/package/markdown-it-hashtag": "markdown-it-hashtag", + "http://internetactu.blog.lemonde.fr/2012/07/13/vers-un-nouveau-monde-de-donnees/": "Vers un Nouveau Monde de donn\u00e9es InternetActu", + "http://blogs.sun.com/bblfish/entry/restful_semantic_web_services": "Restful semantic web services", + "http://www.semanlink.net/doc/2020/10/classifying_documents_without_a": "Classifying documents without any training data - Max Halford", + "http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/": "Faut-il prendre l\u2019effondrement au s\u00e9rieux ? InternetActu", + "http://www.w3.org/TR/rdfa-in-html/": "HTML+RDFa 1.1", + "http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310": "Le regard de Robert McLiam Wilson sur les attentats du 13 novembre GQ", + "http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html": "Data Integration on Semantic Web", + "http://googlewebmastercentral.blogspot.fr/2009/02/specify-your-canonical.html": "Official Google Webmaster Central Blog: Specify your canonical", + "https://dl.acm.org/citation.cfm?id=3159660": "Extreme Multi-label Learning with Label Features for Warm-start Tagging, Ranking & Recommendation (2018)", + "http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf": "Integrating Description Logics and Relational Databases", + "http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html": "Is Phelps really the greatest? BBC SPORT Olympics 2008 blog", + "http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/": "Le CEO destructeur d\u2019AIG r\u00e9clame 25 milliards \u00e0 l\u2019Etat am\u00e9ricain qui a sauv\u00e9 l\u2019entreprise. D\u00e9mystifier la finance", + "http://www.semantic-web-journal.net/": "www.semantic-web-journal.net", + "http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html": "What Amazon's ebook strategy means - Charlie's Diary", + "http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text": "Thesaurus-augmented Search with Jena Text ZBW Labs", + "http://www.3desite.fr/3de_brico_demont_arm.php": "Comment d\u00e9monter, une armoire \"Normande\" ?", + "http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)": "La Reine Margot (film, 1994) - Wikip\u00e9dia", + "http://www.regular-expressions.info/java.html": "Using Regular Expressions in Java", + "http://www.cpdomina.net/papers/mscthesis09_final.pdf": "Probabilistic Reasoning in the Semantic Web using Markov Logic", + "http://www.imdb.com/name/nm0559249/": "Renato Matos - Filmography", + "http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m": "Apache POI - the Java API for Microsoft Documents", + "http://www.dataversity.net/knowledge-maps-structure-versus-meaning/": "Knowledge Maps: Structure Versus Meaning - DATAVERSITY", + "http://fr.wikipedia.org/wiki/1729_(nombre)#Nombre_de_Hardy-Ramanujan": "1729 = 12^3 + 1^3 = 10^3 + 9^3", + "http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify": "[2001.07685] FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence", + "http://www.france5.fr/histoire_decouverte/articles/W00371/300/": "France 5 : Blancs de m\u00e9moire", + "http://www.khanacademy.org/": "Khan Academy", + "http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/": "Deconstructing the Google Knowledge Graph \u00bb AI3:::Adaptive Information", + "https://www.bergnet.org/2017/08/rdf-ext-v1-release/": "RDF-Ext v1 Release bergis reptile zoo of software, hardware and ideas", + "http://www.steinkern.de/": "Steinkern.de - Die Fossilien-Community - Startseite", + "https://www.researchgate.net/project/Theories-of-Deep-Learning": "Theories of Deep Learning by Hatef Monajemi Research Project on ResearchGate", + "http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever": "average joe sur Twitter : \"everyone please share your favorite not-english word or phrase.\"", + "http://wiki.apache.org/solr/LanguageAnalysis": "LanguageAnalysis - Solr Wiki", + "https://arxiv.org/abs/1607.07956": "[1607.07956] Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification (COLING 2016)", + "http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#": "What Is Enlightenment?: Google, Wikileaks, and the Reorganization of the World The Los Angeles Review of Books", + "http://rinuboney.github.io/2016/01/19/ladder-network.html": "Introduction to Semi-Supervised Learning with Ladder Networks Rinu Boney", + "http://www.w3.org/wiki/ConverterToRdf": "ConverterToRdf - W3C Wiki", + "http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/": "Credo sur l\u2019ombre des Lumi\u00e8res", + "http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html": "Re: LDP interfaces in Java (based on Jena and JAX-RS) from J\u00fcrgen Jakobitsch on 2012-08-06 (public-ldp-wg@w3.org from August 2012)", + "http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques": "Bilan de 15 ans de r\u00e9flexion sur la gestion des donn\u00e9es num\u00e9riques Les petites cases", + "http://www.youtube.com/watch?v=g0gaIcfxtpc": "400m : remont\u00e9e de Raquil aux championnats du monde St denis", + "http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/": "PERLES \u2013 Ils ont \u00ab\u00a0redor\u00e9 le blouson de l\u2019Equipe de France\u00a0\u00bb Big Browser", + "https://arxiv.org/abs/1712.09405": "[1712.09405] Advances in Pre-Training Distributed Word Representations", + "http://openspring.net/sites/openspring.net/files/corl-etal-2009iswc.pdf": "Produce and Consume Linked Data with Drupal! (full paper)", + "http://jena.hpl.hp.com/wiki/SDB/Dataset_Description": "SDB/Dataset Description - Jena wiki", + "http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw": "Alexandria Ocasio-Cortez sur Twitter : \"When it comes to climate change, we are going to pay no matter what\"", + "http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw": "[1808.02590] A Tutorial on Network Embeddings", + "http://aclweb.org/anthology/P17-1170": "Towards a Seamless Integration of Word Senses into Downstream NLP Applications (2017)", + "https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf": "Monolingual data in NMT", + "http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri": "Clustering of semantically enriched short texts (2019)", + "http://www.shirky.com/writings/evolve.html": "Shirky: In Praise of Evolvable Systems (1996)", + "http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics": "SKOS/Semantics - W3C Semantic Web Deployment Wiki", + "http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future": "The W3C Workshop on the Future of Social Networking Position Papers", + "http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html": "Getting started using Virtuoso as a triplestore - bobdc.blog", + "http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib": "Ocasio-Cortez's response to jibes about college dance video? A congressional dance video The Guardian (2019)", + "https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/": "Comprendre l\u2019informatique quantique", + "http://mallet.cs.umass.edu/": "MALLET homepage", + "http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/": "AI and Deep Learning in 2017 \u2013 A Year in Review \u2013 WildML", + "http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694": "On Bayesian inference, maximum entropy and Support Vector Machines methods", + "http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki": "Semantic MediaWiki - Semantic-mediawiki.org", + "https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf": "Joint Models in NLP - Slides - Tutorial (EMNLP 2018) - Yue Zhang", + "http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit": "(((\u0644()(\u0644() 'yoav))))\ud83d\udc7e sur Twitter : \"Text-based NP Enrichment\"", + "http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html": "Virtuoso Jena Provider", + "http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent": "[2010.12309] A Survey on Recent Approaches for Natural Language Processing in Low-Resource Scenarios", + "https://github.com/facebookresearch/fairseq-py": "facebookresearch/fairseq-py: Facebook AI Research Sequence-to-Sequence Toolkit written in Python.", + "https://sermanet.github.io/imitate/": "Time-Contrastive Networks: Self-Supervised Learning from Video (2017)", + "http://www.flickr.com/photos/danbri/3282565132/": "The topic topic", + "http://www.opur.u-bordeaux.fr/": "International Organization For Dew Utilization", + "http://slifty.com/2012/08/a-tor-of-the-dark-web/": "A Tor of the Dark Web Sorry for the Spam", + "http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf": "Map-Reduce for Machine Learning on multicore", + "http://www.youtube.com/watch?v=38spQlWHhno": "Marie-Jos\u00e9 P\u00e9rec, Atlanta 1996", + "http://www.mnot.net/blog/2006/05/11/browser_caching": "mnot\u2019s Web log: The State of Browser Caching", + "http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html": "Un dimanche \u00e0 Kigali, du m\u00e9morial du g\u00e9nocide \u00e0 \u00ab\u00a0l\u2019h\u00f4tel des mille combines\u00a0\u00bb", + "https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/": "Fossils may capture the day the dinosaurs died. Here's what you should know. (National Geographic)", + "http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html": "ONLamp.com: Bosworth's Web of Data", + "http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1": "scope - How do JavaScript closures work? - Stack Overflow", + "http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322": "Le tr\u00e9sor de guerre de Wikileaks ? Une gorge profonde chinoise - BUG BROTHER", + "http://semanticweb.com/defending_the_warehouse_b17223#more-17223": "I\u2019ve got a Federated Bridge to Sell You (A Defense of the Warehouse) - semanticweb.com", + "http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html": "Shellshock, la faille de s\u00e9curit\u00e9 majeure d\u00e9couverte \u00ab presque par hasard \u00bb par un Fran\u00e7ais", + "http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/": "Paul Ford: What is Code? Bloomberg", + "http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html": "An Animated Reconstruction of Ancient Rome: Take A 30-Minute Stroll Through the City's Virtually-Recreated Streets Open Culture", + "http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer": "Go humans: Lee Sedol scores first victory against supercomputer World news The Guardian", + "http://www.w3.org/2011/09/LinkedData/Report": "Workshop Report: Linked Enterprise Data Patterns", + "http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1": "Pedra do Ing\u00e1", + "http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322": "MOTUS \u2013 Aux Etats-Unis, deux enfants interdits de parler du gaz de schiste Big Browser", + "http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled": "[2012.04584] Distilling Knowledge from Reader to Retriever for Question Answering", + "http://sti.innoraise.com/profile/show/5350": "Me on INNORAISE", + "https://www.slideshare.net/pmika/what-happened-to-the-semantic-web": "What happened to the Semantic Web?", + "http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag": "AmbiverseNLU: A Natural Language Understanding suite by Max Planck Institute for Informatics", + "https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml": "ISWC 2019 Tutorial \"An Introduction To GraphQL\" - IDA", + "http://danbri.org/words/2010/11/30/615": "How to tell you\u2019re living in the future: bacterial computers, HTML and RDF", + "http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/": "Microdata chosen over RDFa for semantics by Google, Bing and Yahoo!", + "http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission": "W3C Semantic Web Activity News - XSPARQL published as a W3C Submission", + "http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy": "En Californie, la \u00ab\u00a0gig economy\u00a0\u00bb soumise \u00e0 r\u00e9f\u00e9rendum", + "http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why": "Why use eRDF?", + "http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_": "Hugging Face sur Twitter : DistilBERT-cased for Question Answering w/ just 3 lines of javascript", + "https://medium.com/@madrugado/interesting-stuff-in-emnlp-part-i-4a79b5007eb1": "Interesting Stuff in EMNLP (part I) \u2013 Valentin Malykh \u2013 Medium", + "http://www.jenitennison.com/blog/node/165": "Microdata and RDFa Living Together in Harmony Jeni's Musings", + "http://www.arxiv-sanity.com/": "Arxiv Sanity Preserver", + "http://www-nlp.stanford.edu/wiki/Software/Classifier": "The Stanford classifier", + "http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top": "[2002.06504] Differentiable Top-k Operator with Optimal Transport", + "http://www.ldodds.com/blog/archives/000237.html": "Lost Boy: Using Jena in an Application Server", + "http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/": "Machine Learning Tutorial: The Max Entropy Text Classifier DatumBox", + "http://www.wired.com/wiredscience/2014/03/strangest-magma-earth-carbonatites-oldoinyo-lengai/": "Strangest Magma on Earth: Carbonatites of Ol Doinyo Lengai - Wired Science", + "http://www.w3.org/International/O-URL": "i18n/l10n: URL", + "http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo": "[1503.08677] Label-Embedding for Image Classification", + "https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts": "Intro to text classification with Keras: automatically tagging Stack Overflow posts Google Cloud Big Data and Machine Learning Blog", + "http://en.wikipedia.org/wiki/Il_Divo_(film)": "Il Divo (film) - Wikipedia, the free encyclopedia", + "http://www.wired.co.uk/article/chinese-government-social-credit-score-privacy-invasion": "Big data meets Big Brother as China moves to rate its citizens WIRED UK", + "http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le": "Andrej Karpathy Multi-Task Learning in the Wilderness \u00b7 SlidesLive", + "http://trdf.sourceforge.net/": "tRDF - Tools for Trust in the Web of Data", + "http://emnlp2018.org/schedule": "Conference Schedule - EMNLP 2018", + "http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga": "Google AI Blog: Harnessing Organizational Knowledge for Machine Learning (2019)", + "http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti": "En Inde, la r\u00e9sistance aux antibiotiques devient un probl\u00e8me sanitaire tr\u00e8s s\u00e9rieux", + "http://www.afrixml.net/": "AfriXML - Semantic Africa African Vocabularies, Taxonomies and Ontologies: XML. Web Services. RDF", + "http://www.amazingadgets.com/Gadgets/google-adsense/social-book-marking-script-clones-of-diggcom.php": "Social Book marking Script - Clones of Digg.com", + "http://www.ibm.com/developerworks/library/x-disprdf/index.html": "Integrate disparate data sources with Semantic Web technology", + "http://media.daimler.com/dcmedia/0-921-657591-1-1588269-1-0-0-0-0-0-11701-614316-0-1-0-0-0-0-0.html": "Daimler IT honored with the first ever \u201eEuropean Data Innovator Award\u201c Daimler Global Media Site", + "http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html": "The rise of the new global super-rich Video on TED.com", + "http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/": "Knowledge graphs utilised for content personalisation and discovery on the Web It's all about music discovery!", + "http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu": "[1907.04829] BAM! Born-Again Multi-Task Networks for Natural Language Understanding", + "http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML": "(LOV-E) Linked Open Vocabularies Endpoint", + "http://www.semanlink.net/doc/2019/06/transferable_neural_projection_": "Transferable Neural Projection Representations (2019)", + "http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip": "Javascript RDF Parser in ie", + "http://purl.org/makolab/caont/": "CAO: Car Audio Ontology", + "http://semarglproject.org/index.html": "Semargl: better linked data processing", + "http://fgiasson.com/blog/index.php/2007/09/28/turbocharge-your-links-with-zlinks/": "Frederick Giasson\u2019s Weblog \u00bb Turbocharge your Links with zLinks", + "http://www.semanlink.net/doc/2021/06/africanlp_workshop": "AfricaNLP Workshop", + "http://ruben.verborgh.org/publications/verborgh_tplp_2016/": "The Pragmatic Proof: Hypermedia API Composition and Execution Ruben Verborgh", + "http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1": "Dynamic Semantic Publishing for any Blog (Part 1) - benjamin nowack's blog", + "https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view": "Frontiers of Natural Language Processing (Deep Learning Indaba 2018, Stellenbosch, South Africa)", + "https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/": "TUTORIAL: Representation Learning on Networks - TheWebConf 2018", + "http://nlp2rdf.org/": "NLP2RDF Converting NLP tool output to RDF", + "http://www.vogella.com/articles/EGit/article.html": "Git version control with Eclipse (EGit) - Tutorial", + "https://support.google.com/webmasters/answer/96569?hl=en": "rel=\"nofollow\" - Webmaster Tools Help", + "https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb": "Tutorial on Poincar\u00e9 Embeddings (Jupyter Notebook )", + "http://www.paulgraham.com/say.html": "What You Can't Say", + "http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la": "Accelerating Towards Natural Language Search with Graphs", + "http://data.bnf.fr/": "databnf", + "http://www.newswise.com/articles/view/521790": "Newswise Cracking the Secret Codes of Europe's Galileo Satellite", + "https://www.airpair.com/nlp/keyword-extraction-tutorial": "NLP keyword extraction tutorial with RAKE and Maui", + "http://www.bbc.co.uk/blogs/internet/entries/afdf2190-4e60-3dfc-b15f-fc17f88c85a1": "BBC Blogs - Internet Blog - Opening up the BBC's Linked Data with /things", + "http://tech.groups.yahoo.com/group/jena-dev/message/36263": "Message: RE: [jena-dev] ARQ property functions: is it possible to use a blank node as argObject?", + "http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an": "Plotly: The front end for ML and data science models", + "http://www.w3.org/2012/pyRdfa/": "RDFa 1.1 Distiller and Parser", + "http://www.maa.org/devlin/LockhartsLament.pdf": "A Mathematician\u2019s Lament", + "http://hieraki.goodlad.ca/read/chapter/6#page10": "Combining XMLHttpRequest and Rails to Produce More Efficient UIs Generalized JavaScript Functions", + "http://www.feynmanlectures.info/docroot/I_toc.html": "The Feynman Lectures on Physics", + "http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861": "Start Your Semantic Engines: TrueCar Looks To Foster Transition Of Vehicle Data From Flat To Structured And Enhanced - semanticweb.com", + "http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/": "Introduction to Latent Dirichlet Allocation", + "http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted": "[2110.08207] Multitask Prompted Training Enables Zero-Shot Task Generalization", + "http://www.mkyong.com/maven/how-to-include-library-manully-into-maven-local-repository/": "How to include library manually into maven local repository?", + "http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html": "Daniel Ellsberg on \u2018The Doomsday Machine\u2019", + "http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html": "R (and SPARQL), part 1 - bobdc.blog", + "http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query": "goodrelations-based rich snippet example in google query - Semantic Overflow", + "http://www.kaidangaskia.com/": "Kaidan Gaskia", + "http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html": "Comments on Data 3.0 manifesto", + "http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing": "Deep Locality Sensitive Hashing", + "http://www.w3.org/wiki/Activity_Streams": "Activity Streams - W3C Wiki", + "http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html": "Le DSI face \u00e0 la complexit\u00e9", + "https://dl.acm.org/citation.cfm?id=3210036": "Cross-Modal Retrieval in the Cooking Context", + "http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202": "Brazil - Go Fish - Which Fish? Slow Fish - Local Sustainable Fish", + "http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/": "Fresh Perspectives on the Semantic Enterprise \u00bb AI3:::Adaptive Information", + "http://www.youtube.com/watch?v=d5Pb9nykjQA": "Les statues meurent aussi", + "http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/": "Enterprise-scale Semantic Systems AI3:::Adaptive Information", + "http://jqapi.ru/": "jQuery 1.5 Cheatsheet by Future Colors", + "http://www.boston.com/business/globe/articles/2005/10/10/you_need_not_be_paranoid_to_fear_rfid?mode=PF": "You need not be paranoid to fear RFID - The Boston Globe", + "http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html": "Trying SPARQL 1.1 new query features with ARQ - bobdc.blog", + "http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_": "Introducing the Annotated Text Plugin for Elasticsearch: Search for Things (not Strings) Elastic Blog", + "http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf": "An empirical study of smoothing techniques for language modeling", + "http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser": "Data-Oriented Web Browser", + "http://colah.github.io/": "Colah's blog", + "http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html": "National Geographic News Photo Gallery: Megaflyover: Documenting Africa's Last Wild Places", + "http://sofakolle.planeteafrique.com/index.asp?p=65&m=167": "John Sofakoll\u00e9", + "http://yami2.com/films/la_mise_a_mort_du_travail_fr.php": "La mise \u00e0 mort du travail", + "http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html": "TechnicaLee Speaking: Using RDF on the Web: A Survey", + "https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie": "Ingeburge de Danemark", + "http://www.wired.com/2015/01/architecture-and-vision-warkawater/": "A Bamboo Tower That Produces Water From Air WIRED", + "http://www.artfact-online.fr/blog/blog-post/6": "Quick review on Text Clustering and Text Similarity Approaches", + "http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using": "Getting robots to listen: Using Watson's Speech to Text service - Watson", + "https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf": "Deep learning : background and application to natural language processing", + "http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop": "CEUR-WS.org/Vol-2377 - Workshop on Deep Learning for Knowledge Graphs 2019", + "https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts": "nlp - What's the disadvantage of LDA for short texts? - Stack Overflow", + "http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/": "The lie of the API Ruben Verborgh", + "https://deepmind.com/blog/learning-to-generate-images/": "Learning to write programs that generate images DeepMind", + "http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu": "[2006.07264] Low-resource Languages: A Review of Past Work and Future Challenges", + "http://nlp.stanford.edu/software/": "The Stanford NLP (Natural Language Processing) Group", + "http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/": "Emerging Web Technologies, Facing the Future of Education \u2014 EducTice", + "https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment": "Computational Neuroscience: Why does the brain adopt mechanisms like sparse coding to represent the environment? - Quora", + "http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks": "Blogging with jupyter notebooks and jekyll - Claire Duvallet", + "http://www.presentations2go.eu/": "Presentations 2Go\u2122 Lecture Capture & Webcasting", + "http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html": "'The Character of Physical Law': Richard Feynman's Legendary Course Presented at Cornell, 1964 Open Culture", + "http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html": "Bill de h\u00d3ra: MetaGraph: Domain knowledge v RDF", + "http://www.semanlink.net/doc/2020/06/2006_09462_selective_question": "[2006.09462] Selective Question Answering under Domain Shift", + "https://arxiv.org/abs/1902.11269": "[1902.11269] Efficient Contextual Representation Learning Without Softmax Layer", + "http://news.bbc.co.uk/2/hi/science/nature/8043397.stm": "BBC NEWS 'Distributed power' to save Earth", + "http://www.paulgraham.com/mac.html": "Return of the Mac", + "http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_": "Deepak Nathani Pay Attention, Relations are Important", + "http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm": "7 Alternatives to the div HTML Tag - Zac Heisey - Medium", + "https://www.html5rocks.com/en/tutorials/file/dndfiles/": "Reading local files in JavaScript - HTML5 Rocks", + "http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf": "Linking Enterprise Data (slides)", + "http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves": "\u201cLike Neurons in the Brain\u201d: A Molecular Computer That Evolves h+ Magazine", + "http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf": "Deep Unordered Composition Rivals Syntactic Methods for Text Classification (2015)", + "http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092": "Vid\u00e9os/DVD L'\u00c9POP\u00c9E DE L'OR NOIR - Auteur-r\u00e9alisateur Jean-Pierre Beaurenaut et Yves Billon", + "http://www.semanlink.net/doc/2021/05/hyenes_film_": "Hy\u00e8nes (film)", + "https://spacy.io/": "spaCy - Industrial-strength Natural Language Processing in Python", + "http://docs.scipy.org/doc/numpy/user/whatisnumpy.html": "What is NumPy?", + "https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f": "How it feels to learn JavaScript in 2016 \u2013 Hacker Noon", + "https://www.horsesforsources.com/gartner_fail_automation-AI_080418": "Gartner\u00a0fails spectacularly with its 180 degree flip on\u00a0the impact of AI Automation on\u00a0jobs - Horses for Sources", + "http://www.programmableweb.com/news/cognitive-computing-makes-it-possible-to-build-truly-amazing-apps/analysis/2014/09/05?utm_content=buffer3c400&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Cognitive Computing Makes It Possible to Build Truly Amazing Apps ProgrammableWeb", + "http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph": "Large-Scale Evaluation of Keyphrase Extraction Models (2020)", + "http://www.xml.com/pub/a/2007/04/04/introducing-rdfa-part-two.html": "XML.com: Introducing RDFa, Part Two", + "http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/": "La condamnation de Semblan\u00e7ay\u00a0: une erreur judiciaire\u00a0? autourdemesromans.com", + "http://blog.sgo.to/2013/10/minha-entrevista-no-google.html": "Hello World: Minha Entrevista no Google", + "http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter": "\"Immortal\" flatworms\u00a0: a weapon against bacteria - CNRS Web site - CNRS", + "https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground": "Understanding neural networks with TensorFlow Playground Google Cloud Big Data and Machine Learning Blog \u00a0\u00a0 Google Cloud Platform", + "https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092": "Self-Governing Neural Networks for On-Device Short Text Classification - Sujith Ravi Zornitsa Kozareva (2018)", + "https://en.wikipedia.org/wiki/Headhunters_(film)": "Headhunters (film)", + "http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g": "NLP Newsletter: The Annotated GPT-2, Understanding self-distillation, Haiku, GANILLA, Sparkwiki, Ethics in NLP, Torchmeta,\u2026", + "http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be": "Paris\u2019s 15-Minute City Could Be Coming to an Urban Area Near You - Bloomberg", + "http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification": "Highly discriminative statistical features for email classification", + "http://www.mkbergman.com/?p=354": "Did You Blink? The Structured Web Just Arrived", + "http://ccil.org/~cowan/XML/tagsoup/": "TagSoup home page", + "http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News": "Similarity Searches: Flies categorize odors using a variant of locality-sensitive hashing, inspire new algorithm", + "http://arxiv.org/abs/0811.3701": "[0811.3701] Symmetric matrices related to the Mertens function", + "http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html": "Les DSI d\u00e9plorent un manque de culture num\u00e9rique des Comit\u00e9s de direction - Global Security Mag Online", + "http://fr.wikipedia.org/wiki/Plein_Soleil": "Plein Soleil", + "http://hublog.hubmed.org/archives/001049.html": "HubLog: Graph del.icio.us related tags", + "http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users": "EGit/Git For Eclipse Users - Eclipsepedia", + "https://gist.github.com/andrewtkemp1/fa8f28e867e17559b931c3f6de9a4b9e": "This is a very basic guide on how to start working on group projects at DevMountain with GitHub. \u00b7 GitHub", + "https://en.wikipedia.org/wiki/Paper_Moon_(film)": "Paper Moon", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm": "BBC NEWS Technology Robotic racers achieve milestone", + "http://hyperfp.noosblog.fr/": "Mon weblog chez Noos", + "http://www.w3.org/DesignIssues/NoSnooping.html": "No Snooping", + "https://developers.google.com/gmail/schemas/": "Schemas in Gmail \u2014 Google Developers", + "http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici": "Drew Tada sur Twitter : \"Officially launching giantgra.ph A search engine for knowledge graphs...\"", + "http://www.nytimes.com/2015/12/04/business/dealbook/how-mark-zuckerbergs-altruism-helps-himself.html": "How Mark Zuckerberg\u2019s Altruism Helps Himself - The New York Times", + "http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines": "The merging of humans and machines is happening now WIRED UK", + "http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_": "[2103.11811] MasakhaNER: Named Entity Recognition for African Languages", + "http://www.semanlink.net/doc/2020/01/natural_language_understanding_": "Natural Language Understanding with Sequence to Sequence Models", + "http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti": "Regardless of the US presidential election outcome, Trumpism lives on The Guardian", + "https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html": "Attention? Attention!", + "http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/": "Deep Learning, NLP, and Representations - colah's blog", + "http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec": "[1908.01580] The HSIC Bottleneck: Deep Learning without Back-Propagation", + "http://www.sterpin.net/imacslotin.htm": "D\u00e9montage iMac Remplacer le disque dur d'un imac slot-in ou son lecteur optique (et \u00e9ventuellement la pile de sauvegarde et les barrettes m\u00e9moire)", + "https://www.tensorflow.org/tutorials/word2vec": "Vector Representations of Words \u00a0\u00a0 TensorFlow", + "http://www.nytimes.com/2015/01/02/business/energy-environment/a-gray-area-in-regulation-of-genetically-modified-crops.html?partner=rss&emc=rss&_r=0": "Scientists Alter Crops With Techniques Outside Regulators\u2019 Scope - NYTimes.com", + "http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise": "foaf enabling an enterprise", + "http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions": "Finding Similar Quora Questions with BOW, TFIDF and Xgboost", + "http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html": "Contre l'id\u00e9ologie de la comp\u00e9tence, l'\u00e9ducation doit apprendre \u00e0 penser", + "http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da": "[1802.05930] Learning beyond datasets: Knowledge Graph Augmented Neural Networks for Natural language Processing", + "https://github.com/RubenVerborgh/N3.js": "N3.js", + "https://www.kaggle.com/ostegm/plotting-similar-patents": "Plotting Similar Patents Kaggle", + "http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition": "Custom Named Entity Recognition Using spaCy - Towards Data Science", + "https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/": "Le Gros Ch\u00eane, pr\u00e8s de La Loupe, a perdu une branche (27/10/2016)", + "https://blog.openai.com/better-language-models/": "Better Language Models and Their Implications", + "https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/": "An Intuitive Explanation of Convolutional Neural Networks \u2013 the data science blog", + "http://www.aaronsw.com/weblog/nummi": "Fix the machine, not the person (Aaron Swartz's Raw Thought)", + "http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel": "Introducing the New Snorkel", + "http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax": "Ajax for Java developers: Build dynamic Java applications", + "https://arxiv.org/abs/1704.08803": "[1704.08803] Neural Ranking Models with Weak Supervision", + "https://github.com/kritarthanand/Disambiguation-Stanbol": "kritarthanand/Disambiguation-Stanbol \u00b7 GitHub", + "http://logicerror.com/semanticWeb-webdev": "The Semantic Web (for Web Developers)", + "https://databricks.com/blog/2015/02/17/introducing-dataframes-in-spark-for-large-scale-data-science.html": "Introducing DataFrames in Spark for Large Scale Data Science Databricks", + "http://www.cems.uwe.ac.uk/~phale/": "Peter Hale PhD Research - User Driven Programming", + "http://schema.rdfs.org/": "schema.rdfs.org", + "http://swui.semanticweb.org/swui06/papers/Berners-Lee/Berners-Lee.pdf": "Tabulator: Exploring and Analyzing linked data on the Semantic Web", + "https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd": "Making Your Neural Network Say \u201cI Don\u2019t Know\u201d\u200a\u2014\u200aBayesian NNs using Pyro and PyTorch", + "http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization": "A bird-eye view of optimization algorithms", + "https://lists.w3.org/Archives/Public/public-linked-json/2012Aug/0029.html": "RE: DuckDuckGo data to JSON-LD? from Markus Lanthaler on 2012-08-24 (public-linked-json@w3.org from August 2012)", + "http://en.wikipedia.org/wiki/Boyz_n_the_Hood": "Boyz n the Hood", + "http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm": "BBC NEWS Japanese develop 'female' android", + "http://owled2007.iut-velizy.uvsq.fr/": "OWL: Experiences and Directions - OWLED 2007", + "https://www.quora.com/Will-capsule-networks-replace-neural-networks": "Will capsule networks replace neural networks? - Quora", + "https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/": "4 Approaches To Natural Language Processing & Understanding", + "http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290": "DARPA Wants To Develop Machines That Mimic The Cerebral Neocortex - semanticweb.com", + "http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400": "Online marketers should shift techniques, AutoTrader exec says", + "http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i": "Papers With Code : the latest in machine learning", + "http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr": "[1904.02342] Text Generation from Knowledge Graphs with Graph Transformers", + "http://hugues.blogs.com/": "www.com-vat.com : Commentaires & vaticinations www.com-vat.com
Commentaires & vaticinations", + "http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la": "Knowledge Graphs and Natural Language Processing. The Year of the Graph Newsletter, July/August 2019 Linked Data Orchestration", + "http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo": "Sabil Kogl-weogo", + "http://www.jair.org/media/989/live-989-2063-jair.pdf": "A Knowledge Compilation Map - Adnan Darwiche, Pierre Marquis", + "http://beta.kasabi.com/": "Kasabi", + "http://protege.cim3.net/cgi-bin/wiki.pl?ModelingTipsAndTricks": "ProtegeWiki: Modeling Tips And Tricks", + "http://www.faviki.com/": "Faviki", + "http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf": "Using RDF Metadata To Enable Access Control on the Social Semantic Web", + "http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/": "The Grid, Our Cars and the Net: One Idea to Link Them All Autopia", + "https://gingkoapp.com/": "Gingko App", + "http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_": "Hugging Face \u2013 On a mission to solve NLP, one commit at a time.", + "https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published": "Tutorial : Des applications ultra-rapides avec Node.js", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html": "Actions in Schema.org / Hydra from Markus Lanthaler on 2013-06-08 (public-vocabs@w3.org from June 2013)", + "http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html": "L\u2019inventeur du Web exhorte \u00e0 r\u00e9guler l\u2019intelligence artificielle", + "http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html": "The Pathetic Fallacy of RDF", + "https://github.com/ahmadassaf/KBE": "Knowledge-base Extractor (github)", + "http://www.ldodds.com/blog/archives/000252.html": "Lost Boy: Writing an ARQ Extension Function", + "http://vocab.org/changeset/schema.html": "Changeset", + "http://www.agu.org/news/press/pr_archives/2013/2013-11.shtml": "Voyager 1 has entered a new region of space, sudden changes in cosmic rays indicate", + "http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia": "Cape York meteorite - Wikipedia", + "https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd": "What's happening to http://vocab.deri.ie/ ?\u00a0http://vocab.deri.ie/void for\u2026", + "http://pcworld.com/howto/article/0,aid,122094,pg,1,00.asp": "PCWorld.com - 20 Things They Don't Want You to Know", + "http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/": "PAUL KRUGMAN: Bitcoin is a more obvious bubble than housing was", + "http://neurosciencenews.com/memristor-computational-neuroscience-5232/": "First Demonstration of Brain Inspired Device to Power Artificial Systems \u2013 Neuroscience News", + "http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl": "Zero-Shot Learning in Modern NLP Joe Davison Blog (2020-05)", + "http://jena.hpl.hp.com/~afs/SPARQL-Update.html": "SPARQL Update", + "http://www.eea.europa.eu/": "European Environment Agency", + "http://supraconductivite.fr/fr/index.php": "La supraconductivit\u00e9 dans tous ses \u00e9tats", + "http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte": "They who can give up essential liberty to obtain a little temporary safety, deserve neither liberty nor safety.", + "http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001": "Tombouctou, \u00e9picentre du nouvel obscurantisme islamiste africain", + "http://usefulinc.com/edd/blog": "Edd Dumbill's Weblog: Behind the Times", + "http://vivoweb.org/": "VIVO connect - share - discover", + "https://github.com/dbpedia/GSoC/issues/19": "A Neural QA Model for DBpedia (GSoC 2019)", + "http://www.bbc.com/news/uk-politics-eu-referendum-36641390": "Reality Check: Have Leave campaigners changed their minds? - BBC News", + "https://realpython.com/python-speech-recognition/": "The Ultimate Guide To Speech Recognition With Python \u2013 Real Python", + "http://psi.cecs.anu.edu.au/": "Machine Learning as a Service", + "https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/": "Learning Deep Structured Semantic Models for Web Search using Clickthrough Data - Microsoft Research (2013)", + "http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D": "Cozinheiro brasileiro Ofir Oliveira veio ao Douro para encantar", + "https://pt.wikipedia.org/wiki/Aquarius_(filme)": "Aquarius (filme)", + "http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/": "Le Gros Ch\u00eane de La Loupe", + "http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh": "Roam vs Obsidian - Which one should you use? Medium", + "http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper": "What is XLNet and why it outperforms BERT - Towards Data Science", + "http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_": "Online speech recognition with wav2letter@anywhere", + "http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi": "anvaka/VivaGraphJS: Graph drawing library for JavaScript", + "http://www.intelligence.senate.gov/study2014/sscistudy1.pdf": "Rapport sur les tortures men\u00e9es par la CIA", + "http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm": "Un stockage d\u2019information quantique d\u2019origine mol\u00e9culaire ? CNRS", + "https://github.com/winterbe/java8-tutorial": "winterbe/java8-tutorial", + "http://code.google.com/p/skoseditor/": "skoseditor", + "https://www.lemonde.fr/afrique/article/2018/08/06/un-rapport-pointe-les-failles-des-etudes-internationales-et-liberales-sur-l-afrique_5339789_3212.html": "Un rapport pointe les failles des \u00e9tudes internationales (et lib\u00e9rales) sur l\u2019Afrique", + "http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html": "Plus fort qu'Hadopi ? Un site d\u00e9voile ce que vous avez pirat\u00e9 sur BitTorrent !", + "http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/": "A Simple Question Answering system using Solr and OpenNLP SearchHub Lucene/Solr Open Source Search", + "http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia": "William Gibson \u2014 Wikip\u00e9dia", + "http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6": "NLP: Any libraries/dictionaries out there for fixing common spelling errors? - Part 2 & Alumni - Deep Learning Course Forums", + "http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume": "[1905.10070] Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification", + "https://www.quora.com/How-is-GloVe-different-from-word2vec": "How is GloVe different from word2vec? - Quora", + "https://books.google.fr/books?id=4NjZCgAAQBAJ": "Riding the Demon: On the Road in West Africa - Peter Chilson - Google Livres", + "http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html": "Le scandale qui secoue le cricket australien", + "http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE": "SemanticDesktop.org - Wiki.IDELIANCE", + "http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html": "Dan's Blaggity Blog: JavaScript and RDF - (almost) perfect together", + "http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre": "Au c\u0153ur de l\u2019Afrique, la guerre au nom de la nature", + "http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_": "GraphAware - Neo4j consultancy, training, development", + "https://news.cnrs.fr/videos/the-secret-sex-life-of-truffles?utm_content=buffere34f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "The Secret Sex Life of Truffles CNRS News", + "http://bits.blogs.nytimes.com/2014/03/04/apples-new-carplay-is-a-step-in-the-right-direction/?_php=true&_type=blogs&partner=rss&emc=rss&_r=0": "Apple's New CarPlay Is Almost a Step in the Right Direction - NYTimes.com", + "http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html": "La forfaiture ou le m\u00e9pris r\u00e9p\u00e9t\u00e9 des lois les plus intimes de la R\u00e9publique - LeMonde.fr", + "https://hazyresearch.github.io/hyperE/": "HyperE: Hyperbolic Embeddings for Entities", + "http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se": "[1910.02227] Making sense of sensory input", + "http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_": "[2106.00882] Efficient Passage Retrieval with Hashing for Open-domain Question Answering", + "http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri": "Clustering of semantically enriched short texts (2018)", + "http://www.w3.org/2001/sw/Europe/reports/demo-lessons-report/": "12.1.8 HP SWAD-E Demonstrators - lessons learnt", + "https://titanpad.com/vIQu6H2Xdl": "TitanPad: SPARQL queries against the Wikidata endpoint", + "https://solarsystem.nasa.gov/galileo/": "Solar System Exploration: : Galileo Legacy Site", + "https://arxiv.org/abs/1801.01586": "[1801.01586] A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines", + "https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf": "A Few Useful Things to Know about Machine Learning", + "http://www.w3.org/2006/07/SWD/RDFa/impl/js/": "RDFa Bookmarklets", + "https://nlp.stanford.edu/software/jenny-ner-2007.pdf": "Named Entity Recognition and the Stanford NER Software (slides)", + "http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/": "Pour un revenu universel cr\u00e9dible et audacieux Le blog de Thomas Piketty", + "http://news.yale.edu/2014/03/25/yale-researchers-reconstruct-facial-images-locked-viewer-s-mind#.UzXiQztMwoc.twitter": "YaleNews Yale researchers reconstruct facial images locked in a viewer\u2019s mind", + "http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html": "Croissance : Attali contre Attali, par Herv\u00e9 Kempf", + "http://www.dartmouth.edu/~news/releases/2005/09/14.html": "Dartmouth News - Dartmouth researchers build world's smallest mobile robot - 09/14/05 Dartmouth News - Dartmouth researchers build world's smallest mobile robot", + "http://nbviewer.ipython.org/": "nbViewer", + "http://searchwebservices.techtarget.com/ateQuestionNResponse/0,289625,sid26_cid494324_tax289201,00.html": "Document vs. RPC style--why is it a big deal?", + "http://blogs.lexpress.fr/attali/2008/11/plans-b.php": "PLANS B - Conversation avec Jacques Attali - Lexpress", + "http://www.lesechos.fr/entreprises-secteurs/tech-medias/actu/0202374255582-les-revendeurs-de-donnees-cherchent-encore-leur-modele-economique-508886.php": "Les revendeurs de donn\u00e9es cherchent encore leur mod\u00e8le \u00e9conomique, Actualit\u00e9s", + "http://imagining-other.net/pp5thomasmoreextracts.htm": "Thomas More's political thought: extracts", + "http://rapid-i.com/content/view/202/206/": "RapidMiner - Extensions", + "http://stackoverflow.com/questions/1308263/wiping-out-maven-local-repository-on-build-machine": "continuous integration - Wiping out Maven local repository on build machine - Stack Overflow", + "http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#": "RDF 1.1 JSON Serialisation (RDF/JSON)", + "http://www.universcience.tv/": "universcience.tv la Web TV scientifique", + "http://www4.wiwiss.fu-berlin.de/pubby/": "Pubby \u2013 A Linked Data Frontend for SPARQL Endpoints", + "http://www.sciencemag.org/news/2017/11/artificial-intelligence-goes-bilingual-without-dictionary": "Artificial intelligence goes bilingual\u2014without a dictionary Science AAAS", + "http://dfdf.inesc-id.pt/tr/web-arch": "URI Identity and Web Architecture Revisited", + "https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d": "Advances in few-shot learning: reproducing results in PyTorch", + "http://msdn.microsoft.com/en-us/magazine/ff714592.aspx": "Going NoSQL with MongoDB - MSDN Magazine: The Working Programmer", + "http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi": "How I almost won an NLP competition without knowing any Machine Learning - DEV Community", + "http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new": "Adam Roberts sur Twitter : \"New preprint: How Much Knowledge Can You Pack into the Parameters of a Language Model?...\"", + "http://www-irem.univ-paris13.fr/spip/": "IREM de Paris Nord (Institut de Recherche en Enseignement des Math\u00e9matiques)", + "http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_": "Awesome Knowledge Distillation papers \u00b7 Seongkyun Han's blog", + "http://robohub.org/the-agricultural-labor-conundrum/": "The agricultural labor conundrum Robohub", + "http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592": "Huoua Uses Semantic Search to \u2018Spark\u2019 Instant Circles - semanticweb.com", + "http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this": "Graph databases and RDF: It's a family affair ZDNet", + "https://radekosmulski.github.io/answers/html/What%20are%20pretrained%20models%3F.html": "What are pretrained models and why are they useful?", + "http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/": "The Enterprise Search Market \u2013 What should be on your radar? Keynote IKS 2012 Workshop IKS Blog \u2013 The Semantic CMS Community", + "http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security": "Revealed: how US and UK spy agencies defeat internet privacy and security World news Guardian Weekly", + "http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html": "The Glowing Python: Combining Scikit-Learn and NTLK", + "https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News": "Text Data Preprocessing: A Walkthrough in Python", + "http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf": "Don\u2019t count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors (2014)", + "http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/": "Easy Hierarchical Faceting and display with Solr and jQuery (and a tiny bit of Python) SearchHub Lucene/Solr Open Source Search", + "http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf": "Probabilistic Topic Models", + "http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html": "Rwanda - National Geographic News Photo Gallery: Megaflyover: Documenting Africa's Last Wild Places", + "http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais": "La plus grosse explosion jamais observ\u00e9e depuis le Big Bang", + "http://www.semanlink.info/": "Semanlink : Find your Path in the Labyrinth of Information - Dimitris' site", + "https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project": "Git - GitHub - Contributing to a Project", + "http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags": "HierarchyVersusFacetsVersusTags", + "http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html": "Vincent Bollor\u00e9\u00a0: \u00ab\u00a0Notre m\u00e9thode, c\u2019est plut\u00f4t du commando que de l\u2019arm\u00e9e r\u00e9guli\u00e8re\u00a0\u00bb", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf": "FaceTag: Integrating Bottom-up and Top-down Classification in a Social Tagging System", + "http://www.w3.org/wiki/LDP_Implementations": "LDP Implementations - W3C Wiki", + "https://fr.wikipedia.org/wiki/Miharu_Takizakura": "Miharu Takizakura", + "https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)": "In the Heat of the Night (film)", + "http://donsmaps.com/brassempouyvenus.html": "Venus de Brassempouy", + "http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli": "Les enjeux de la publicit\u00e9 politique cibl\u00e9e CNRS Le journal", + "https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/": "Under the hood: Multilingual embeddings Engineering Blog Facebook Code", + "http://www.w3.org/2001/sw/DataAccess/prot26": "The SPARQL Service Interface", + "http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/": "Product Modelling using Semantic Web Technologies", + "https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f": "One URL: The Semantic Sensor Revolution", + "http://www.w3.org/2009/12/rdf-ws/papers/ws23": "Revisiting Blank Nodes in RDF to Avoid the Semantic Mismatch with SPARQL", + "http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop": "Cory Doctorow about Gamestop", + "http://benalman.com/projects/jquery-hashchange-plugin/": "Ben Alman \u00bb jQuery hashchange event", + "http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html": "Re: Schema.org Actions - an update and call for review from Sam Goto on 2013-10-17 (public-hydra@w3.org from October 2013)", + "http://code.google.com/p/gnizr/": "gnizr - Google Code", + "http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html": "Un correcteur orthographique en 21 lignes de Python", + "http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html": "Chercher et enseigner \u00e0 l'Universit\u00e9. 1- La recherche - Opinions - Le Monde.fr", + "http://www.semanlink.net/doc/2019/10/chronas_enter_history": "Chronas: Enter History", + "http://fr.wikipedia.org/wiki/Va,_vis_et_deviens": "Va, vis et deviens", + "http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003": "Nelson Mandela : Rolihlahla, pour transfigurer le monde Christiane Taubira", + "http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html": "schema.org as it could be from Peter F. Patel-Schneider on 2014-01-06 (public-vocabs@w3.org from January 2014)", + "http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html": "La gu\u00e9rilla judiciaire des g\u00e9ants des semences contre les fermiers am\u00e9ricains", + "http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web": "Internet Alchemy \u00bb MVC Obscures the Mechanics of the Web", + "http://www.cio.co.uk/news/r-and-d/self-driving-cars-hit-swedish-public-roads/": "Self-driving cars to hit Swedish public roads", + "https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/": "Evaluating Solutions for Named Entity Recognition Novetta.com (2018)", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html": "Re: SKOS for schema.org proposal for discussion from Guha on 2013-10-03 (public-vocabs@w3.org from October 2013)", + "http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/": "Google Product Rich Snippets for Multiple Products on a Page The Hepp Research Blog on Marketing with Data", + "http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno": "Pablo Castro sur Twitter : \"Knowledge mining using the knowledge store feature of #AzureSearch\"", + "http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_": "[1912.01412] Deep Learning for Symbolic Mathematics", + "http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C": "Weaviate Vector Search Engine SeMI Technologies", + "https://medium.com/@karpathy/software-2-0-a64152b37c35": "Software 2.0 \u2013 Andrej Karpathy \u2013 Medium", + "http://www.arbres.org/arbres_remarquables.html": "Les arbres remarquables", + "http://www.feynmanlectures.caltech.edu/": "The Feynman Lectures on Physics", + "http://neurosciencenews.com/machine-learning-vision-3312/": "Machines That Learn Like Humans Neuroscience News", + "https://cs230-stanford.github.io/pytorch-getting-started.html": "Introduction to PyTorch Code Examples", + "http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html": "Re: TPF and DBMSes (was Re: Hydra and Shapes) from Ruben Verborgh on 2014-11-26 (public-hydra@w3.org from November 2014)", + "https://www.quora.com/How-does-word2vec-work-Can-someone-walk-through-a-specific-example": "How does word2vec work? Can someone walk through a specific example? - Quora", + "http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such": "Jeremy Howard on Twitter: \"Such a ridiculously simple idea couldn't possibly work, could it? Or... could it? \"", + "https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html": "Ancient Viruses Are Buried in Your DNA - The New York Times", + "http://heliosearch.org/advanced-filter-caching-in-solr/": "Solr Filter Caching - Solr Evolved", + "http://streamplayer.free.fr/": "Stream Player", + "http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf": "Bridging the Gap Between OWL and Relational Databases", + "http://semanticweb.com/tag/francois-paul-servant": "Fran\u00e7ois-Paul Servant - semanticweb.com", + "http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu": "[2003.08001] Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study", + "http://philippe-boulet-gercourt.blogs.nouvelobs.com/archive/2008/03/20/l-integrale-du-discours-d-obama-en-francais.html": "L'int\u00e9grale du discours d'Obama en fran\u00e7ais : Made in USA", + "https://www.fastly.com/blog/best-practices-for-using-the-vary-header": "Best Practices for Using the Vary Header Fastly", + "http://muto.socialtagging.org/core/v1.html": "\"Modular Unified Tagging Ontology (MUTO)\"", + "http://www.esa.int/Our_Activities/Space_Science/Rosetta": "Rosetta / Space Science / Our Activities / ESA", + "http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/": "Reagan puissance dix Le blog de Thomas Piketty", + "http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html": "Luis von Ahn : Collaboration en ligne \u00e0 tr\u00e8s grande \u00e9chelle. Video on TED.com", + "http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou": "Named Entity Recognition without Labelled Data: A Weak Supervision Approach (2020)", + "https://www.jair.org/index.php/jair/index": "Journal of Artificial Intelligence Research", + "https://blog.jooq.org/2015/12/08/3-reasons-why-you-shouldnt-replace-your-for-loops-by-stream-foreach/": "3 Reasons why You Shouldn\u2019t Replace Your for-loops by Stream.forEach() \u2013 Java, SQL and jOOQ.", + "http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle": "[1910.12507] A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly?", + "http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter": "Annals of Science: Darwin\u2019s Surprise: Reporting & Essays: The New Yorker", + "http://swig.hpclab.ceid.upatras.gr/SKOS/Skos2Owl2": "SKOS in OWL 2 - SWIGroup Wiki", + "http://blog.octo.com/designer-une-api-rest/": "Designer une API REST OCTO talks !", + "https://arxiv.org/abs/1511.08855": "[1511.08855] Semantic Folding Theory And its Application in Semantic Fingerprinting", + "http://www.w3.org/TR/swbp-skos-core-spec/": "SKOS Core Vocabulary Specification", + "https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/": "The Extraordinary Link Between Deep Neural Networks and the Nature of the Universe", + "http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_": "[2106.04612] Neural Extractive Search", + "http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html": "Ontoprise brings semantic technologies to Microsoft SharePoint", + "http://www.ultralingua.com/onlinedictionary/": "Online Dictionary for French English, Spanish English, Italian English, and more.", + "http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp": "Knowledge Graphs in NLP @ EMNLP 2020 by Michael Galkin Nov, 2020 Medium", + "http://www.newscientist.com/article/dn25044-a-history-of-the-first-americans-in-9-sites.html?full=true#.UvwMx_2Ciww": "A history of the first Americans in 9\u00bd sites - life - 13 February 2014 - New Scientist", + "http://neuralnetworksanddeeplearning.com/chap2.html": "How the backpropagation algorithm works", + "http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert": "[2004.07202] Entities as Experts: Sparse Memory Access with Entity Supervision", + "http://cloud.feedly.com/#latest": "Feedly", + "http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex": "BERT, ELMo, & GPT-2: How Contextual are Contextualized Word Representations? SAIL Blog", + "http://www.personalinfocloud.com/2005/02/explaining_and_.html": "Explaining and Showing Broad and Narrow Folksonomies", + "http://ebusiness-unibw.org/pipermail/goodrelations/2010-May/000215.html": "GoodRelations vs. Google RDFa vs. Open Graph vs. hProduct/hListing: Using GoodRelations in 10 Triples", + "https://lejournal.cnrs.fr/articles/leconomie-malade-de-ses-modeles?utm_content=buffer8bbc6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "L\u2019\u00e9conomie malade de ses mod\u00e8les CNRS Le journal", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html": "More Like This Query Elasticsearch Reference", + "http://junit.sourceforge.net/#Getting": "JUnit: Getting started (using JUnit 4)", + "http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html": "How do I import a Maven project into Eclipse? - Web Tutorials - avajava.com", + "http://nerd.eurecom.fr/": "NERD: Named Entity Recognition and Disambiguation", + "http://www.tbray.org/ongoing/When/200x/2005/03/11/WSInTheSpring": "Web Services: Spring 2005 Roundup", + "http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec": "Au Sahel, des arbres et des b\u00eaches pour lutter contre l\u2019avanc\u00e9e du d\u00e9sert", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm": "BBC NEWS Microsoft scans British Library", + "https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html": "RDF(-DEV), back to the future (was Re: Semantic Web Interest Group now closed) from Dan Brickley on 2018-10-16 (semantic-web@w3.org from October 2018)", + "http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr": "Training a Speaker Embedding from Scratch with Triplet Learning (2018)", + "http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta": "\u2018The US should be held accountable\u2019: Guant\u00e1namo survivor on the war on terror\u2019s failure Guant\u00e1namo Bay The Guardian", + "https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/": "Technological Unemployment: The Real Reason This Elephant Chart is Terrifying", + "http://www.semanlink.net/doc/2021/01/the_big_short_film_": "The Big Short (film)", + "http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to": "Everything you always wanted to know about extreme classification (but were afraid to ask) - Microsoft Research - 2019", + "http://ajaxpatterns.org/Browser-Side_XSLT": "Browser-Side XSLT - Ajax Patterns", + "https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY": "What does it mean to not use hypermedia? - Google Groups", + "http://en.wikipedia.org/wiki/Spike-triggered_average": "Spike-triggered average - Wikipedia, the free encyclopedia", + "http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1": "mathematicalmonk's channel - YouTube", + "https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/": "I am a time-traveler from the future, here to beg you to stop what you are doing. : Bitcoin", + "http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin": "[1909.04939] InceptionTime: Finding AlexNet for Time Series Classification", + "http://blog.iks-project.eu/semantic-ui-development-with-vie/": "Semantic UI Development with VIE IKS Blog \u2013 The Semantic CMS Community", + "http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html": "\"Semantic Web for the Working Ontologist\" - bobdc.blog", + "http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui": "Combining knowledge graphs, quickly and accurately", + "http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co": "Comment les mains coup\u00e9es du Congo ont secou\u00e9 l\u2019Europe coloniale", + "http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo": "Exploration Engines - the koodos collective", + "http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan": "Knowledge Graphs in Natural Language Processing @ ACL 2021 by Michael Galkin Aug, 2021", + "https://nlp.stanford.edu/software/tagger.shtml": "Stanford Log-linear Part-Of-Speech Tagger", + "http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t": "What Every NLP Engineer Needs to Know About Pre-Trained Language Models TOPBOTS", + "http://www.betaversion.org/~stefano/linotype/news/94/": "Stefano's Linotype ~ Piggy Bank, Cocoon and the Future of the Web", + "http://eculture.cs.vu.nl/europeana/session/search": "Europeana's semantic search engine.", + "http://stackoverflow.com/questions/2803305/javascript-how-to-download-js-asynchronously": "web development - JavaScript: How to download JS asynchronously? - Stack Overflow", + "http://www.econsultant.com/delicious-by-function/index.html": "del.icio.us: 150+ hacks categorized", + "http://www.lassila.org/blog/archive/2006/03/oink.html": "Wilbur-and-O: OINK", + "http://swse.deri.org/": "SWSE - Semantic Search", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904": "[Linking-open-data] Re: Forms in the web of data (Richard Cyganiak)", + "http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_": "Cory Doctorow #BLM sur Twitter : \"Late last June, Google bought out \"North,\"...", + "http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html": "Creating Huge Bookmarklets", + "http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser": "What is the best Java RDFa Parser? - Semantic Overflow", + "http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/": "Zitgist\u2019s RDF Browser: Browse the Semantic Web at Frederick Giasson\u2019s Weblog", + "https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf": "Risks that threaten human civilisation", + "http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf": "Maxent models, Conditional estimation and Optimization", + "http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3": "D \u2423 a \u2423 n \u2423 P \u2423 i \u2423 p \u2423 o \u2423 n \u2423 i on Twitter: As an undergraduate I took an advanced classical mechanics course and one important result was Liouville's theorem...", + "http://planb.nicecupoftea.org/archives/001302.html": "Plan B: Ajax and Sparql", + "http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/": "For The Guardian, Solr is the new database", + "http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot": "[2109.04711] Pre-train or Annotate? Domain Adaptation with a Constrained Budget", + "http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a": "Custom NLP Approaches to Data Anonymization by Omri Mendels Towards Data Science", + "http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi": "Un partenariat plantes - champignons \u00e0 l\u2019origine de la v\u00e9g\u00e9talisation terrestre CNRS", + "http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long": "dowhatimean.net \u00bb Content negotiation with hash URIs (long)", + "https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d": "Finding Similar Quora Questions with Word2Vec and Xgboost", + "https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html": "La limule, \u00ab\u00a0fossile vivant\u00a0\u00bb au sang bleu menac\u00e9 de disparition", + "http://www.w3.org/2009/03/xbrl/program.html": "Program for the Workshop on Improving Access to Financial Data on the Web", + "https://arxiv.org/abs/1811.05370": "[1811.05370] Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents", + "http://www.boutell.com/newfaq/creating/scriptpass.html": "WWW FAQs: How do I pass data between JavaScript pages?", + "https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/": "Ch\u00eane de la Lambonni\u00e8re 550 ans, Pervench\u00e8res (Orne) Krapo arboricole", + "http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from": "Pulling Turtle RDF triples from the Google Knowledge Graph", + "http://arxiv.org/abs/1602.02410": "[1602.02410] Exploring the Limits of Language Modeling", + "http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n": "[1910.04126] Scalable Nearest Neighbor Search for Optimal Transport", + "https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa": "Kenya. Pour se rembourser, la Chine pourrait s\u2019emparer du port de Mombasa Courrier international", + "https://arxiv.org/abs/1607.00570": "[1607.00570] Representation learning for very short texts using weighted word embedding aggregation", + "https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8": "RDF2Vec: RDF Graph Embeddings for Data Mining - (2016)", + "http://news.bbc.co.uk/2/hi/science/nature/7733372.stm": "BBC NEWS Science & Environment Oldest nuclear family 'murdered'", + "http://www.ldh-toulon.net/spip.php?article877": "[LDH-Toulon] lettre de Victor Hugo au capitaine Butler", + "https://lists.w3.org/Archives/Public/semantic-web/2017Feb/0045.html": "[ANN] Apache Jena 3.2.0 released from A. Soroka on 2017-02-13 (semantic-web@w3.org from February 2017)", + "http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1": "Knowledge Graph Conference 2019, Day 1 - Simia", + "http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20": "Soutenance de th\u00e8se \"Technologies du Web S\u00e9mantique pour l'Entreprise 2.0\" Alexandre Passant", + "http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch": "Semantic search with NLP and elasticsearch - Stack Overflow", + "http://www.nasa.gov/vision/universe/solarsystem/voyager_agu.html": "NASA - Voyager Enters Solar System's Final Frontier", + "http://www.w3.org/TR/swbp-vocab-pub/": "Best Practice Recipes for Publishing RDF Vocabularies", + "https://arxiv.org/abs/1807.07984": "[1807.07984] Attention Models in Graphs: A Survey", + "http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php": "Macworld: Secrets: Stranger in a Strange LAN", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html": "Vehicles, and customizable products from Francois-Paul Servant on 2013-07-23 (public-vocabs@w3.org from July 2013)", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148": "ML-knn: A lazy learning approach to multi-label learning (2007)", + "http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la": "[2007.15779] Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing", + "http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/": "Modifier le vivant pour sauver la plan\u00e8te : la bioing\u00e9nierie peut-elle prot\u00e9ger la nature ? InternetActu", + "http://poolparty.punkt.at/demozone/": "Poolparty Demozone", + "https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8": "Modern Software Over-Engineering Mistakes \u2013 RDX \u2013 Medium", + "http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn": "Barack\u2019s Wife Hillary: Using Knowledge Graphs for Fact-Aware Language Modeling (ACL 2019)", + "http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/": "Le Br\u00e9sil va l\u00e2cher des millions de moustiques OGM contre la dengue Eco(lo)", + "http://www.essepuntato.it/lode": "LODE - Live OWL Documentation Environment", + "http://panopto.com/": "Video Platform for Businesses and Universities Panopto Video Platform", + "https://iamtrask.github.io/2015/07/12/basic-python-network/": "A Neural Network in 11 lines of Python (Part 1) - i am trask", + "http://research.microsoft.com/en-us/projects/trinity/query.aspx": "Real-time query processing for billion node graphs - Microsoft Research", + "http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c": "Information theory and neural coding (1999) (Alexander Borst and Fr\u00e9d\u00e9ric E. Theunissen)", + "http://www.bbc.com/news/magazine-28986843": "BBC News - The girl with three biological parents", + "http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm": "BBC NEWS Evolution reversed in mice", + "https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/": "What happened when a professor built a chatbot to be his teaching assistant - The Washington Post", + "http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/": "Quand un virus sauve une bact\u00e9rie du suicide Passeur de sciences", + "http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html": "Number of languages", + "https://www.bbc.com/news/world-africa-46442570": "Saving the last West African giraffes in Niger - BBC News", + "http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/": "Web Services Description Language (WSDL) Version 2.0: RDF Mapping", + "http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/": "Semantic Web Technologies in Automotive Repair and Diagnostic Documentation", + "https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e": "Unsolved research problems vs. real-world threat models", + "http://www.semanlink.net/doc/2021/09/song_of_lawino": "Song of Lawino", + "http://ruder.io/emnlp-2018-highlights/": "EMNLP 2018 Highlights: Inductive bias, cross-lingual learning, and more", + "https://www.youtube.com/watch?v=fTjNkbLBEqg": "BBC Horizon 2014-2015 Episode 4: Inside the Dark Web - YouTube", + "https://github.com/BAILOOL/DoYouEvenLearn": "Essential Guide to keep up with AI/ML/CV", + "http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=": "Introducing our Hybrid lda2vec Algorithm (2017) Stitch Fix Technology \u2013 Multithreaded", + "https://realitydrop.org/": "Reality Drop: Spread Science about Climate Change, Global Warming", + "https://www.ijcai.org/proceedings/2018/0810.pdf": "Grounded Language Learning: Where Robotics and NLP Meet (IJCAI 2018)", + "http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/": "Wikinizer: Introducing MindGraph LinkedUp: Linking Web Data for Education - An EU project about the potential of open data in education", + "http://adventuresinmachinelearning.com/word2vec-keras-tutorial/": "A Word2Vec Keras tutorial", + "http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them": "All About Bitmap Indexes... And Sorting Them", + "https://carrotsearch.com/lingo3g/": "Lingo3G: real-time text clustering engine Carrot Search", + "http://www.cortical.io/technology_semantic.html": "semantic fingerprinting - cortical.io", + "http://www.semanlink.net/doc/2021/03/university_industry_collaborati": "University-industry collaboration in R&D - World Economic Forum", + "https://www.quora.com/How-does-Keras-compare-to-other-Deep-Learning-frameworks-like-Tensor-Flow-Theano-or-Torch": "How does Keras compare to other Deep Learning frameworks like Tensor Flow, Theano, or Torch? - Quora", + "http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement": "Dictionnaire de la pens\u00e9e \u00e9cologique", + "https://github.com/tensorflow/nmt": "TensorFlow Neural Machine Translation (seq2seq) Tutorial", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1": "IBM plans 'brain-like' computers", + "https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1": "Andrew Ng: What is the future of jobs and work in 20 years? - Quora", + "http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html": "Rendre aux Fran\u00e7ais leur paysage architectural", + "http://blogs.techrepublic.com.com/tech-manager/?p=564&tag=rbxccnbtr1": "6 essential elements for a winning business case IT Leadership TechRepublic.com", + "http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148": "Personal URIs & Data Spaces", + "https://textblob.readthedocs.io/en/dev/": "TextBlob: Simplified Text Processing", + "http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html": "Apache OpenNLP Developer Documentation", + "http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_": "Event Extraction by Answering (Almost) Natural Questions", + "https://issues.apache.org/bugzilla/show_bug.cgi?id=23929": "Tomcat - Bug 23929 \u2013 request.setCharacterEncoding(String) doesn't work", + "http://books.google.com/ngrams/": "Google Ngram Viewer", + "http://vimeo.com/34870158": "Semantic SEO for the Automotive Industry on Vimeo", + "http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe": "[2004.06842] Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph", + "http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/": "Bigger, Better Google Ngrams: Brace Yourself for the Power of Grammar - Ben Zimmer - The Atlantic", + "http://www.onlamp.com/pub/wlg/6563": "What if SOAP had never happened?", + "http://www.volkswagen.co.uk/vocabularies/coo/ns": "The Car Options Ontology (COO)", + "http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en": "Why You Should Do NLP Beyond English", + "http://www.click2map.com/v2/sachone/Carte-association-ARBRES": "Carte de France des Arbres Remarquables labellis\u00e9s", + "http://www.westwind.com/reference/OS-X/invisibles.html": "Mac OS X Hidden Files & Directories", + "http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html": "Mort de Nelson Mandela, l'Africain capital", + "https://www.lemonde.fr/les-recettes-du-monde/article/2018/08/06/saumon-grille-au-beurre-rouge-facon-joel-robuchon-la-recette-de-nicolas-chatenier_5339851_5324493.html": "Saumon grill\u00e9 au beurre rouge fa\u00e7on Jo\u00ebl Robuchon\u00a0: la recette de Nicolas Chatenier", + "http://www.semanticdesktop.org": "SemanticDesktop.org", + "https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb": "gensim/WMD_tutorial.ipynb", + "https://github.com/fchollet/keras/tree/master/examples": "Keras examples directory", + "http://danbri.org/words/2008/01/03/243": "danbri\u2019s foaf stories \u00bb Commandline PHP for loading RDF URLs into ARC (and Twinkle for query UI)", + "https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1": "javascript - How does Firefox reader view operate - Stack Overflow", + "http://www.scmp.com/news/china/society/article/2120477/chinas-plan-use-solar-power-melt-permafrost-turn-tibetan": "China\u2019s plan to use solar power to melt permafrost to turn a Tibetan grassland into an artificial forest on the roof of the world South China Morning Post", + "http://www.structureddynamics.com/linked_data.html": "Linked Data FAQ", + "http://www.paulgraham.com/web20.html": "Web 2.0", + "http://apassant.net/lodr/": "LODr (Alexandre Passant's instance)", + "http://www.ultralingua.net/ulnet-enable.cgi?service=english2french&location=http://www.paulgraham.com/essay.html": "The Age of the Essay", + "http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/": "The Open World Assumption: Elephant in the Room \u00bb AI3:::Adaptive Information", + "http://thewhyaxis.info/hairball/": "The Why Axis - Sigma.js Cleans up Hairball Network Visualizations", + "https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn": "How to preprocess labeled data for use with SciKit-Learn - Quora", + "http://vocab.deri.ie/rdforms": "RDForms - representing HTML form and field semantics DERI Vocabularies", + "http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem": "> \u00ab\u00a0La gestion du Covid-19 ressemble \u00e0 celle du r\u00e9chauffement climatique\u00a0: m\u00eame procrastination du pouvoir devant la certitude du d\u00e9sastre\u00a0\u00bb", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm": "BBC NEWS Science/Nature Ancient fig clue to first farming", + "http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben": "What's next for AI - Yoshua Bengio (Interview)", + "https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/": "The Zombie Fungus Takes Over Ants\u2019 Bodies to Control Their Minds - The Atlantic", + "http://www.semanlink.net/doc/2019/10/feature_wise_transformations": "Feature-wise transformations. A simple and surprisingly effective family of conditioning mechanisms. (2018)", + "http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio": "[2104.10809] Provable Limitations of Acquiring Meaning from Ungrounded Form: What will Future Language Models Understand?", + "http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature": "Semantic Scholar TLDR Feature", + "http://www.blazegraph.com/": "www.blazegraph.com", + "http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j": "watson-developer-cloud/speech-javascript-sdk: IBM Watson Speech Services for Web Browsers", + "http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html": "LIBSVM -- A Library for Support Vector Machines", + "http://www.scholarpedia.org/article/Models_of_consciousness": "Models of consciousness - Scholarpedia", + "http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf": "IKS-Stanbol - Topic Classification", + "http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html": "Jacqueline de Romilly, hell\u00e9niste et acad\u00e9micienne, est morte", + "https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5": "Swagger Core Jersey 2.X Project Setup 1.5 \u00b7 swagger-api/swagger-core Wiki", + "http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf": "Peace Corps/Niger An Introduction to the Zarma Language", + "http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech": "Voice Dictation - Online Speech Recognition using chrome", + "http://www.lemonde.fr/opinions/chronique/2010/04/27/voile-pudique_1343015_3232.html": "Voile pudique", + "http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o": "[2005.03675] Machine Learning on Graphs: A Model and Comprehensive Taxonomy", + "http://news.bbc.co.uk/1/hi/health/4225564.stm": "BBC NEWS - Embryo with two mothers approved", + "http://myfaces.apache.org": "Apache MyFaces", + "http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d": "La fillette de Denisova, m\u00e8re d'une autre humanit\u00e9 (2010)", + "http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_": "\u00ab\u00a0La gestion de la pand\u00e9mie de Covid-19 et les mesures n\u00e9cessaires \u00e0 la sortie de crise conspirent \u00e0 faire de l\u2019environnement une question subsidiaire\u00a0\u00bb", + "http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary": "PushBackDataToLegacySourcesRDForms - ESW Wiki", + "http://titanpad.com/": "TitanPad", + "http://www.wired.com/gadgetlab/2011/12/summly-app-summarization/": "Teen\u2019s iOS App Uses Complex Algorithms to Summarize the Web Gadget Lab Wired.com", + "http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf": "Recurrent Convolutional Neural Networks for Text Classification (S Lai - \u200e2015)", + "http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded": "24bit vs 16bit, the myth exploded!", + "http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html": "Dropping OPTIONAL blocks from SPARQL CONSTRUCT queries - bobdc.blog", + "http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html": "Rwanda\u00a0: comment le g\u00e9nocide est enseign\u00e9 \u00e0 l\u2019\u00e9cole", + "http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa": "Niger : le \"Grand fleuve\" du Sahel - France Culture - \u00c9p. 3/5 - Chansons d'eau douce", + "http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn": "MasakhaNER: Named Entity Recognition for African Languages MIT Press", + "https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/": "How to Use Word Embedding Layers for Deep Learning with Keras - Machine Learning Mastery", + "http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de": "Patrick Artus\u00a0: \u00ab\u00a0L\u2019\u00e9conomie de sp\u00e9culation est inefficace\u00a0\u00bb", + "https://developers.google.com/machine-learning/glossary/": "Machine Learning Glossary Google Developers", + "http://www.semanlink.net/doc/2019/08/neural_models_for_information_r": "Neural Models for Information Retrieval (2017)", + "http://blog.stephenwolfram.com/2014/09/launching-today-mathematica-online/": "Launching Today: Mathematica Online!\u2014Stephen Wolfram Blog", + "http://www.lemonde.fr/europe/article/2015/10/12/apres-l-attentat-d-ankara-la-turquie-au-bord-du-gouffre_4787525_3214.html": "Apr\u00e8s l\u2019attentat d\u2019Ankara, la Turquie au bord du gouffre", + "http://lile2012.linkededucation.org/": "Linked Learning 2012", + "http://www.w3.org/2001/tag/2011/01/HashInURI-20110115": "Repurposing the Hash Sign for the New Web", + "https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html": "Pseudo relevance feedback", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890": "fps writes in response to [Linking-open-data] A Search Engine for URIs (T.Heath)", + "https://github.com/kawine/usif": "GitHub - kawine/usif: Implementation of unsupervised smoothed inverse frequency", + "http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/": "Index of /semweblabs/semwebpro/rdfa", + "http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies": "Creating Domain-specific Products and Services Ontologies for GoodRelations", + "http://n2.talis.com/wiki/SPARQL_Recipes": "SPARQL Recipes - N2 wiki", + "https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html": "Use Elasticsearch in your Java applications", + "http://gregarius.net": "Gregarius \u00bb A Free, Web-based Feed Aggregator", + "http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US": "Handwriting Helps You Learn - Business Insider", + "http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/": "A Prototype Knowledge Base for the Life Sciences", + "http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope": "Au Br\u00e9sil, le naufrage de l\u2019op\u00e9ration anticorruption \u00ab\u00a0Lava Jato\u00a0\u00bb", + "http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_": "huggingface/nlp: nlp: datasets and evaluation metrics for NLP in NumPy, Pandas, PyTorch and TensorFlow", + "http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html": "Boing Boing: MSFT: Our DRM licensing is there to eliminate hobbyists and little guys", + "http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe": "Correlation clustering - Wikipedia", + "https://github.com/salesforce/decaNLP": "The Natural Language Decathlon: Multitask Learning as Question Answering (2018) Salesforce research", + "http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af": "AfricaNLP Workshop Putting Africa on the NLP Map. ICLR 2020, Virtual Event", + "http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro": "A criticism of Stochastic Parrots", + "http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com": "Niger: National Geographic World Music", + "https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html": "En Su\u00e8de, un livret pour se pr\u00e9parer \u00e0 la guerre", + "http://rdfa.digitalbazaar.com/live-loop/": "Live Loop", + "http://xtech06.usefulinc.com/schedule/paper/147": "XTech 2006: Semantic Web @ NASA", + "http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont": "\u00ab\u00a0Les fous d\u2019Allah nous les ont arrach\u00e9s\u00a0\u00bb : le Niger sous le choc apr\u00e8s la mort des humanitaires", + "http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html": "www.com-vat.com: G\u00e9n\u00e9tique administrative : de Courteline \u00e0 Orwell", + "http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face": "Renault group at Hugging Face", + "http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_": "[1911.03681] E-BERT: Efficient-Yet-Effective Entity Embeddings for BERT", + "http://theconversation.com/cracking-big-data-with-statistical-physics-79864": "Cracking big data with statistical physics", + "https://github.com/anvaka/word2vec-graph": "GitHub - anvaka/word2vec-graph: Exploring word2vec embeddings as a graph of nearest neighbors", + "http://phonetics.ucla.edu/": "UCLA Phonetics Lab Data", + "http://en.wikipedia.org/wiki/El_Cantante": "El Cantante", + "http://iand.posterous.com/is-303-really-necessary": "Is 303 Really Necessary? - Internet Alchemy", + "https://www.ontotext.com/free-graphdb-download/?utm_source=twitter&utm_medium=card&utm_campaign=graphdb%20free": "GraphDB Free Download Ontotext", + "http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr": "[1911.00172] Generalization through Memorization: Nearest Neighbor Language Models", + "http://www.w3.org/TR/webarch/": "Architecture of the World Wide Web, Volume One", + "http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360": "Comparing deep learning and concept extraction based methods for patient phenotyping from clinical narratives (2018)", + "http://www.geocities.com/anpipniger/gajera.htm": "GAJERA ASPIRANTE Pompe Gajera aspirante", + "http://www.knowledgevision.com/": "KnowledgeVision Online Presentations Video Tools", + "https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors": "More Fun With Word Vectors - Bag of Words Meets Bags of Popcorn Kaggle", + "http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf": "YAGO: A Core of Semantic Knowledge Unifying WordNet and Wikipedia - Paper", + "http://www.w3.org/wiki/WebSchemas/SKOS": "WebSchemas/SKOS - W3C Wiki", + "http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie": "L\u2019\u00e2ge du Fer en Basse-Normandie. Gestes fun\u00e9raires en Gaule au Second-\u00c2ge du Fer. Volumes\u00a0I et II - Les \u00e9perons barr\u00e9s et petites enceintes au Bronze final et au Premier \u00c2ge du fer en Basse-Normandie - Presses universitaires de Franche-Comt\u00e9", + "http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html": "TechnicaLee Speaking: SPARQL Calendar Demo: A SPARQL JavaScript Library", + "http://xtech06.usefulinc.com/schedule/detail/135": "XTech 2006: Semantics Through the Tag", + "https://plus.google.com/112399767740508618350/posts/VWxowvLti4X": "Kingsley Idehen - Google+ - DIY Linked Data Deployment via DropBox Unbeknownst to\u2026", + "https://github.com/io-informatics/angular-jsonld": "angular-jsonld", + "http://www.semanlink.net/doc/2020/12/xkcd_git": "xkcd: Git", + "http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html": "Understanding linked data fragments", + "http://www.devx.com/semantic/Article/38595": "Semantic Search Arrives at the Web", + "http://www.wired.com/2014/03/rootworm-resistance-bt-corn": "Voracious Worm Evolves to Eat Biotech Corn Engineered to Kill It WIRED", + "http://lists.w3.org/Archives/Public/semantic-web/2014May/0032.html": "RDF Template from St\u00e9phane Campinas on 2014-05-10 (semantic-web@w3.org from May 2014)", + "http://www.youtube.com/user/minutephysics": "MinutePhysics - YouTube", + "http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/": "Gmail adds support for embedding semantic data", + "http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex": "Why Knowledge Bases Are The Next Big Thing", + "http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long": "Semantic Text Matching for Long-Form Documents (2019)", + "http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html": "Etudiants \u00e9trangers : Claude Gu\u00e9ant doit s'excuser", + "http://www.sublimetext.com/": "Sublime Text: The text editor you'll fall in love with", + "http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf": "R\u00e9publique 2.0 beta. Vers une soci\u00e9t\u00e9 de la connaissance ouverte. Michel Rocard", + "http://www.youtube.com/watch?v=txEekZcgl4s": "LHC (en fran\u00e7ais)", + "http://tech.groups.yahoo.com/group/jena-dev/message/28785": "jena-dev : Message: Re: [jena-dev] Checking URI validity before adding it to a model", + "http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html": "Finalizing Schema.org \"Roles\" design from Dan Brickley on 2014-05-08 (public-vocabs@w3.org from May 2014)", + "http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html": "forms, direction, query, etc \u2026 from Roger Menday on 2012-11-19 (public-ldp-wg@w3.org from November 2012)", + "https://arxiv.org/abs/1901.03136": "[1901.03136] Automating the search for a patent's prior art with a full text similarity search", + "http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/": "IBM's 'Rodent Brain' Chip Could Make Our Phones Hyper-Smart WIRED", + "http://www.googleartproject.com/": "Google Art Project", + "http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st": "huggingface/tokenizers: Fast State-of-the-Art Tokenizers optimized for Research and Production", + "https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p": "Writing Code for NLP Research, AllenNLP's tutorial at #emnlp2018", + "http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html": "Could SemTech Run On Excel? (SemTech Lightning Demo) - TechnicaLee Speaking", + "http://www.picment.com/articles/css/funwithforms/": "Picment.com \u00bb Articles \u00bb CSS \u00bb Fun with forms \u2013 customized input elements", + "http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_": "[2106.13474] Adapt-and-Distill: Developing Small, Fast and Effective Pretrained Language Models for Domains", + "https://carrotsearch.com/lingo3g/comparison/": "Lingo3G or Carrot2? Carrot Search", + "http://www.journaldunet.com/ebusiness/marques-sites/ergonomie-configurateurs-automobiles/": "Neuf configurateurs automobiles au banc d'essai - Journal du Net e-Business", + "http://www.nature.com/news/five-big-mysteries-about-crispr-s-origins-1.21294": "Five big mysteries about CRISPR\u2019s origins : Nature News & Comment", + "http://www.bbc.co.uk/news/special/2014/newsspec_6954/index.html": "A good man in Rwanda", + "http://dig.csail.mit.edu/breadcrumbs/node/253": "Map and Territory in RDF APIs", + "http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_": "Baselines and Bigrams: Simple, Good Sentiment and Topic Classification. Sida Wang and Christopher D. Manning", + "http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat": "A novel multi-label classification algorithm based on K-nearest neighbor and random walk - Zhen-Wu Wang, Si-Kai Wang, Ben-Ting Wan, William Wei Song, 2020", + "http://www.devx.com/semantic/Article/39162": "\"Getting Real\" with RDF and SPARQL", + "https://markdown-it.github.io/": "markdown-it demo", + "http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi": "Equilibrium Propagation: Bridging the Gap between Energy-Based Models and Backpropagation Frontiers in Computational Neuroscience", + "http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434": "Requirements for Relational-to-RDF Mapping", + "http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia": "Le Dit du Genji \u2014 Wikip\u00e9dia", + "http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l": "\u00ab\u00a0L\u2019entretien avec l\u2019agent de la CAF a \u00e9t\u00e9 une humiliation\u00a0\u00bb : les b\u00e9n\u00e9ficiaires du RSA dans l\u2019enfer des contr\u00f4les", + "https://www.aminer.cn/dl4g-sde": "International Workshop on Deep Learning for Graphs and Structured Data Embedding", + "http://www.thumbshots.org/": "Open Thumbshots - Free Web thumbnail preview image", + "https://developers.google.com/machine-learning/guides/text-classification/step-2-5": "Practical guide to text classification \u00a0\u00a0 Google Developers", + "http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning": "Meta Reinforcement Learning", + "http://www.wired.com/2015/12/the-cia-secret-to-cybersecurity-that-no-one-seems-to-get/": "The CIA Secret to Cybersecurity That No One Seems to Get WIRED", + "http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf": "Meaning Of A Tag: A Collaborative Approach to Bridge the Gap Between Tagging and Linked Data", + "http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_": "Thread by @wzuidema: The 2010s were an eventful decade for NLP! Here are ten shocking developments since 2010, and 13 papers* illustrating them, that have change\u2026", + "https://atom.io/docs/latest/": "ATOM documentation", + "http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md": "The Web Conf 2018 : compte-rendu", + "http://en.wikipedia.org/wiki/Fitch%27s_paradox_of_knowability": "Fitch's paradox of knowability - Wikipedia, the free encyclopedia", + "http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro": "Jacob Desvarieux, leader du groupe antillais Kassav\u2019, est mort", + "http://jena.apache.org/download/maven.html": "Apache Jena - Using Jena with Apache Maven", + "http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co": "Self-supervised learning and computer vision \u00b7 fast.ai", + "http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/": "Build a CMS, no forms allowed", + "http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=17&ll=48.839816,2.278762&spn=0.005303,0.012896&t=h": "170, rue de Lourmel", + "http://www.newsforge.com/article.pl?sid=06/08/14/1438204": "Open source project adds \"no military use\" clause to the GPL", + "http://www.blogmarks.net/": "http://www.blogmarks.net", + "http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno": "The complexity to construct knowledge graphs and how low code tools can help or hurt you Medium", + "https://github.com/oxford-cs-deepnlp-2017/lectures": "lectures: Oxford Deep NLP 2017 course", + "http://news.bbc.co.uk/2/hi/technology/8598871.stm": "The Day the Web Turned Day-Glo", + "http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_": "Premi\u00e8res soci\u00e9t\u00e9s structur\u00e9es au Nig\u00e9ria (Pour la Science 2012)", + "http://www.phontron.com/nlptools.php": "Natural Language Processing Tools", + "http://www.confoto.org/cart": "CONFOTO - RDF Data Cart", + "https://www.theguardian.com/environment/2017/apr/06/farms-could-slash-pesticide-use-without-losses-research-reveals": "Farms could slash pesticide use without losses, research reveals Environment The Guardian", + "http://www.independent.co.uk/life-style/gadgets-and-tech/news/prosthetic-hand-lets-man-actually-feel-what-he-touches-for-the-first-time-10499870.html": "Prosthetic hand lets man actually feel what he touches for the first time - News - Gadgets and Tech - The Independent", + "http://www.semanlink.net/doc/2019/08/peter_bloem": "Peter Bloem", + "http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw": "[En direct] La structure de Notre-Dame de Paris \u00abest sauv\u00e9e\u00bb - France - RFI", + "https://www.deepl.com/translator": "DeepL Traducteur", + "https://wikipedia2vec.github.io/wikipedia2vec/": "Wikipedia2Vec", + "http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi": "BrunoRB/ahocorasick: Aho-corasick for javascript.", + "http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_": "A Gentle Introduction to Graph Neural Networks", + "http://graves.cl/visualRDF/": "Visual RDF", + "http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082": "An Illustrated Guide To E-Commerce Markup Using GoodRelations", + "http://wodka.over-blog.com/article-2280247.html": "Un ivrogne dans la brousse", + "http://www.semanlink.net/doc/2021/01/die_toten_hosen": "Die Toten Hosen", + "http://decentralyze.com/2010/03/09/rdf-meets-nosql/": "RDF meets NoSQL \u00ab Decentralyze \u2013 Programming the Data Cloud", + "http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_": "Keyword Extraction with BERT Towards Data Science", + "http://xtech06.usefulinc.com/schedule/paper/61": "XTech 2006: SPARQLing Services", + "http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html": "Une fourmi \u00ab ninja pillarde \u00bb et esclavagiste", + "http://www.itworld.com/Tech/3494/071026pivo2/index.html": "ITworld.com - Nissan adds a robot helper to its concept car", + "http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding": "Visual and conceptual grounding for text representation learning", + "https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges": "Zuckerberg set up fraudulent scheme to 'weaponise' data, court case alleges Technology The Guardian", + "http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html": "Firechat, l'application popularis\u00e9e par les manifestants hongkongais", + "http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase": "Semantic Web by Example: Semantic CrunchBase - benjamin nowack's blog", + "http://www.mail-archive.com/log4j-user@jakarta.apache.org/msg08853.html": "RE: log4j ObjectRenderer.", + "http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink": "dowhatimean.net \u00bb [juc] Fran\u00e7ois-Paul Servant \u2013 Semanlink", + "http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/": "Starting to Play with the UMBEL Ontology at Frederick Giasson\u2019s Weblog", + "http://www.sciencedirect.com/science/article/pii/S0888613X08001813": "Semantic hashing (2008) - Ruslan Salakhutdinov, Geoffrey Hinton", + "http://www.bbc.co.uk/news/science-environment-17436400": "BBC News - Ancient sites spotted from space, say archaeologists", + "http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html": "Hadoop Map/Reduce Implementation (Pragmatic Programming Techniques)", + "https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html": "Can A.I. Be Taught to Explain Itself? - The New York Times", + "http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical": "A Moderate Proposal for Radically Better AI-powered Web Search", + "http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e": "Will a Half-Step by Macron Be Enough to Blunt France\u2019s Second Wave? - The New York Times", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm": "How to publish Linked Data on the Web?", + "http://www.sorosoro.org/en/love": "Love \u00ab Sorosoro", + "http://purl.org/coo/ns": "The Car Options Ontology (COO)", + "http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t": "[1905.05950] BERT Rediscovers the Classical NLP Pipeline", + "http://www.lemonde.fr/festival/article/2015/07/22/rand-hindi-l-homme-qui-veut-faire-disparaitre-les-technologies_4693695_4415198.html": "Rand Hindi, l\u2019homme qui veut faire dispara\u00eetre les technologies", + "http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246": "SemWeb.Pro 2012 : le Web s\u00e9mantique sort des... - Industrie et Technologies", + "http://code.google.com/apis/visualization/documentation/gallery.html": "Google Visualization API Gallery - Google Chart Tools / Interactive Charts (aka Visualization API) - Google Code", + "http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb": "Representing Text for Joint Embedding of Text and Knowledge Bases (EMNLP 2015)", + "http://www.opencroquet.org/index.html": "Croquet Project", + "https://code.facebook.com/posts/181565595577955/introducing-deeptext-facebook-s-text-understanding-engine/": "Introducing DeepText: Facebook's text understanding engine Engineering Blog Facebook Code", + "https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey": "A Java geekA Java geek > From Vaadin to Docker, a novice's journey", + "http://blog.sgo.to/2014/02/rows.html": "Hello World: ROWS", + "http://bblfish.net/work/presentations/2007/BOF-6747.pdf": "Developing Web 3.0 - JavaOne", + "http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends": "Defiant Mark Zuckerberg defends Facebook policy to allow false ads Technology The Guardian", + "https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d": "REST is the new SOAP \u2013 freeCodeCamp", + "https://www.npmjs.com/package/wikidata-taxonomy": "wikidata-taxonomy", + "https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/": "L\u2019anti-protection sociale de Facebook et l\u2019av\u00e8nement du \u00ab\u00a0providentialisme de plateforme\u00a0\u00bb \u2013 \u2013 S.I.Lex \u2013", + "https://arxiv.org/pdf/1412.1897v4.pdf": "[1412.1897] Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images", + "https://en.wikipedia.org/wiki/War_Witch": "War Witch (French: Rebelle)", + "http://www.openlinksw.com/weblog/oerling/?id=1498": "\"E Pluribus Unum\", or \"Inversely Functional Identity\", or \"Smooshing Without the Stickiness\" (re-updated)", + "https://youtu.be/CtDWzb1qd-E": "Akoguin Theresa - Maestro Laba Sosseh Con L'Orquesta Aragon", + "http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand": "[1810.04882] Towards Understanding Linear Word Analogies", + "http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html": "UML: The Positive Spin", + "http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m": "[1703.07464] No Fuss Distance Metric Learning using Proxies", + "https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/": "TUTORIAL: Graph-based Text Representations: Boosting Text Mining, NLP and Information Retrieval with Graphs", + "https://arxiv.org/pdf/1706.00957.pdf": "[1706.00957] Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines", + "https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery": "Inserting a text where cursor is using Javascript/jquery - Stack Overflow", + "http://rdfa.digitalbazaar.com/rdfa-test-harness/": "The RDFa Test Harness (Crazy Ivan)", + "http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en//archive/bigtable-osdi06.pdf": "Bigtable: A Distributed Storage System for Structured Data", + "http://www.semanlink.net/doc/2021/03/wikidata_browser": "Wikidata browser", + "http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights": "10 ML & NLP Research Highlights of 2019", + "http://ebiquity.umbc.edu/blogger/?p=383": "EBB: ebiquity blog at UMBC \u00bb Alexa Web Information Services", + "http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA": "Why Our Civilization's Video Art and Culture is Threatened by the MPEG-LA", + "http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye": "Rio Tinto blasting of 46,000-year-old Aboriginal sites compared to Islamic State's destruction in Palmyra - ABC News (May 2020)", + "http://www.lucenetutorial.com/lucene-query-syntax.html": "Lucene Query Syntax - Lucene Tutorial.com", + "http://www.w3.org/Submission/2010/04/": "Submission Request to W3C: OWLlink Protocol", + "https://www.cs.hmc.edu/~jpadgett/nnfinal/NNPrsntnJP1.pdf": "Sparse Distributed Memory - A study of psychologically driven storage - Pentti Kanerva", + "http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_": "Sebastian Riedel sur Twitter : Happy to introduce BLINK, the @facebookai open-source entity linker!...", + "https://arxiv.org/abs/1902.09229": "[1902.09229] A Theoretical Analysis of Contrastive Unsupervised Representation Learning", + "http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r": "Is it possible to force Excel recognize UTF-8 CSV files automatically? - Stack Overflow", + "http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with": "Hello World for JavaScript with npm modules in the browser", + "http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/": "La Grande-Bretagne enterre la soci\u00e9t\u00e9 de surveillance - BUG BROTHER", + "http://code.google.com/p/owl1-1/": "owl1-1 - Google Code", + "http://www.cl.cam.ac.uk/~mgk25/iso-time.html": "International standard date and time notation", + "http://www.ibm.com/developerworks/java/library/j-solr1/": "Search smarter with Apache Solr, Part 1: Essential features and the Solr schema", + "http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the": "[1810.10531] A mathematical theory of semantic development in deep neural networks", + "https://code.fb.com/ai-research/pytext-open-source-nlp-framework/": "Open-sourcing PyText for faster NLP development", + "http://www.rashmisinha.com/archives/05_09/tagging-cognitive.html": "A cognitive analysis of tagging", + "http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/": "Don't Lecture Me American RadioWorks", + "http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm": "Uruguay 1-1 Ghana", + "http://www.informationweek.com/news/global-cio/interviews/240002892": "General Motors Will Slash Outsourcing In IT Overhaul - Global-cio - Executive insights/interviews - Informationweek", + "http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html": "Re: Classification of ISSUE-57 change proposals - R Fielding", + "http://www.w3.org/2001/sw/rdb2rdf/": "W3C RDB2RDF Working Group", + "https://support.google.com/webmasters/answer/35769?hl=en": "Webmaster Guidelines - Webmaster Tools Help", + "http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno": "Les champions des \u00e9nergies renouvelables rivalisent d\u00e9sormais avec les majors du p\u00e9trole et du gaz", + "http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html": "TechnicaLee Speaking: Semantic Web Technologies in the Enterprise", + "http://maven-java-formatter-plugin.googlecode.com/svn/site/0.3.1/examples.html#Multimodule_Configuration": "Multimodule Configuration / Maven2 Java Formatter Plugin - Examples", + "http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html": "Le B\u00e9nin veut distribuer quatre millions de kits solaires en six mois", + "http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille": "L\u2019ADN d\u2019un peuple inconnu mill\u00e9naire d\u00e9couvert au Cameroun - Geo.fr", + "http://developer.yahoo.com/common/json.html": "Using JSON with Yahoo! Web Services", + "http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_": "Bootstrapping Entity Alignment with Knowledge Graph Embedding IJCAI", + "http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu": "[1912.08422] Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation", + "http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html": "Build a RESTful Web service using Jersey and Apache Tomcat", + "http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/": "Mokka mit Schlag \u00bb POST vs. PUT", + "http://www.insu.cnrs.fr/node/9519": "La plus vieille biodiversit\u00e9 de communaut\u00e9 bact\u00e9rienne, dat\u00e9e de 2,1 milliards d\u2019ann\u00e9es et son implication dans la conservation du biota francevillien", + "http://jena.sourceforge.net/ARQ/sparql-remote.html": "ARQ - Querying Remote SPARQL Services", + "https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "La cit\u00e9 oubli\u00e9e de Lorop\u00e9ni CNRS Le journal", + "http://www.ldodds.com/blog/archives/000291.html": "Lost Boy: Benefits of Refactoring to REST", + "http://www.patricksoftwareblog.com/tag/flask-uploads/": "flask-uploads \u2013 Patrick's Software Blog", + "https://www.wired.com/story/ai-pioneer-explains-evolution-neural-networks/": "Interview of Geoffrey Hinton WIRED", + "https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda": "osx - How do I upgrade to Python 3.6 with conda? - Stack Overflow", + "https://dzone.com/articles/swagger-make-developers-love": "Swagger: Make Developers Love Working With Your REST API - DZone Java", + "http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1": "What does \"var FOO = FOO {}\" (assign a variable or an empty object to that variable) mean in Javascript? - Stack Overflow", + "http://ruder.io/10-exciting-ideas-of-2018-in-nlp/": "10 Exciting Ideas of 2018 in NLP", + "http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_": "I Oversaw the US Nuclear Power Industry. Now I Think It Should Be Banned.", + "http://tuttlesvc.teacherhosting.com/blog/blosxom.cgi/2005/08/22#413": "The Not-RDF Tax", + "https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/": "Emmanuel Macron Q&A: France's President Discusses Artificial Intelligence Strategy WIRED", + "https://arxiv.org/abs/1603.01360": "[1603.01360] Neural Architectures for Named Entity Recognition", + "http://www.oregongeology.com/": "Oregon Department of Geology and Mineral Industries (DOGAMI) Homepage", + "https://twitter.com/yoavgo/status/1099273902415589376": "(((\u0644()(\u0644() 'yoav)))) sur Twitter : \"These explanation slides by Mike Collins on the transformer ...", + "http://sourceforge.net/projects/wikipedia/": "SourceForge.net: Project Info - MediaWiki", + "http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/": "Farms of the Future Will Run on Robots and Drones \u2014 NOVA Next PBS", + "http://www.semanlink.net/doc/2021/01/2012_15723": "[2012.15723] Making Pre-trained Language Models Better Few-shot Learners", + "http://www.andornot.com/blog/post/Advanced-autocomplete-with-Solr-Ngrams-and-Twitters-typeaheadjs.aspx": "Advanced autocomplete with Solr Ngrams", + "http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/": "Faut-il censurer les vieilles \u0153uvres consid\u00e9r\u00e9es comme racistes aujourd\u2019hui ? Big Browser", + "http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf": "Semantics and Complexity of SPARQL", + "http://eclipsesource.com/blogs/tutorials/egit-tutorial/": "EGit Tutorial \u00ab EclipseSource Blog", + "http://www.zitgist.com/labs/linked_data.html": "Linked Data by Zitgist", + "http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/": "Semantic Components at Frederick Giasson\u2019s Weblog", + "http://www.paulgraham.com/nerds.html": "Why Nerds are Unpopular", + "http://usc-isi-i2.github.io/DL4KGS/": "Workshop on Deep Learning for Knowledge Graphs and Semantic Technologies", + "https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0": "Effective Word Representation for Named Entity Recognition (2017)", + "http://www.lemonde.fr/festival/article/2015/06/25/et-si-on-ouvrait-les-frontieres_4661969_4415198.html": "Migrants : et si ouvrir les fronti\u00e8res g\u00e9n\u00e9rait de la richesse ?", + "http://www.xom.nu/": "XOM open source (LGPL), tree-based API for processing XML with Java", + "http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions": "Java Code Example com.github.jsonldjava.core.JsonLdOptions", + "http://semanticweb.com/w3c-publishes-linked-data-platform-best-practices-guidelines_b44199": "W3C Publishes Linked Data Platform Best Practices and Guidelines - Semanticweb.com", + "http://arstechnica.com/science/news/2012/04/bugs-pick-up-pesticide-resistance-from-pesticide-eating-bacteria.ars": "Bugs pick up pesticide resistance from pesticide-eating bacteria", + "http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474": "NASA - Solar Probe Plus - Nasa plans to visit the sun", + "http://nepomuk.semanticdesktop.org/": "NEPOMUK - The Social Semantic Desktop", + "http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a": "[2009.02835] E-BERT: A Phrase and Product Knowledge Enhanced Language Model for E-commerce", + "https://arxiv.org/pdf/1708.00214.pdf": "[1708.00214] Natural Language Processing with Small Feed-Forward Networks", + "http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm": "BBC - Earth News - 'Ghostly' Saharan cheetah filmed in Niger, Africa", + "https://blog.acolyer.org/2017/01/31/european-union-regulations-on-algorithmic-decision-making-and-a-right-to-explanation/": "European Union regulations on algorithmic decision making and a \u201cright to explanation\u201d the morning paper", + "http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html": "SXSW starts out with spotlight on security, as Google's Schmidt sounds off on NSA PCWorld", + "http://code.google.com/p/backplanejs/wiki/Rdfj": "Rdfj", + "https://github.com/huggingface/pytorch-pretrained-BERT": "huggingface/pytorch-pretrained-BERT: The Big-&-Extending-Repository-of-Transformers: Pretrained PyTorch models for Google's BERT, OpenAI GPT & GPT-2, Google/CMU Transformer-XL.", + "http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r": "Bringing the power of machine reading comprehension to specialized documents - Microsoft Research", + "https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html": "Des termites ont construit une \u00ab\u00a0structure\u00a0\u00bb aussi vaste que la Grande-Bretagne", + "http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross": "[1911.02116] Unsupervised Cross-lingual Representation Learning at Scale", + "https://twitter.com/python_tip/status/1029632055284363264": "Daily Python Tip sur Twitter : \"Wanna know which line of your function is eating all the time? Measure it with #lprun:\u2026 \"", + "http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s": "neuml/txtai: Build AI-powered semantic search applications", + "http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af": "Masakhane: Using AI to Bring African Languages Into the Global Conversation", + "http://u2.gmu.edu:8080/dspace/bitstream/1920/454/1/URSW05_PR-OWL.pdf": "PR-OWL: A Bayesian Ontology Language for the Semantic Web", + "http://dev.eclipse.org/newslists/news.eclipse.webtools/": "Eclipse Archives - webtools", + "http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo": "[2010.06467] Pretrained Transformers for Text Ranking: BERT and Beyond", + "http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr": "An Introduction to Knowledge Graphs SAIL Blog", + "https://www.youtube.com/watch?v=Yr1mOzC93xs": "From Deep Learning of Disentangled Representations to Higher-level Cognition - YouTube", + "http://rdf-translator.appspot.com/": "RDF Translator", + "http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma": "Coronavirus en France : \u00ab\u00a0En mati\u00e8re de pr\u00e9vention, nous ne sommes pas \u00e0 la hauteur de l\u2019\u00e9pid\u00e9mie\u00a0\u00bb", + "http://static.flickr.com/22/35163080_a32ed821ae.jpg": "Maria sur le fleuve Niger", + "http://www.semanlink.net/doc/2019/09/machine_translation_for_african": "Machine Translation for African Languages", + "https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc": "Les cinq \u00e9tapes du d\u00e9ni", + "http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript": "Migration d'applications d'Internet Explorer vers Mozilla - MDC", + "http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re": "Dies ir\u00e6 (Jour de col\u00e8re)", + "https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ": "Follow your Nose in Hypermedia APIs?", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm": "BBC NEWS - LRA victims seek peace with past", + "https://en.wikipedia.org/wiki/Psusennes_I": "Psusennes I", + "http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html": "Memristor minds: The future of artificial intelligence - New Scientist", + "http://norvig.com/spell-correct.html": "How to Write a Spelling Corrector (Peter Norvig)", + "https://cloud.google.com/getting-started/": "How to get started with GCP \u00a0\u00a0 Google Cloud Platform", + "http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable": "[1607.00653] node2vec: Scalable Feature Learning for Networks", + "https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html": "Intelligence artificielle\u00a0: DeepMind s\u2019int\u00e9resse au jeu de cartes fran\u00e7ais Hanabi", + "http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_": "New Study Suggests Dark matter predates the \u2018Big Bang\u2019 \u2014 but what does that actually mean?", + "http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382": "CiteSeerX \u2014 Automotive Product Documentation", + "http://www.w3.org/DesignIssues/LinkedData.html": "Linked Data - Design Issues", + "http://jena.hpl.hp.com/juc2006/proceedings.html": "2006 Jena User Conference - proceedings", + "https://github.com/wabyking/TextClassificationBenchmark": "A Benchmark of Text Classification in PyTorch", + "https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/": "Machine Learning Reveals Genetic Control System Quanta Magazine", + "http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html": "Un champignon menace la production mondiale de bl\u00e9", + "http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi": "Nouveaut\u00e9s sur le site hyperSOLutions", + "http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf": "Named Entity Recognition using Word Embedding as a Feature (2016)", + "http://dannyayers.com/2006/07/13/the-semantic-web": "The Semantic Web Revisited Danny Ayers about \"The Semantic Web Revisited\"", + "http://wiki.goodrelations-vocabulary.org/Documentation/Product_features": "Documentation/Product features - GoodRelations Wiki", + "http://www.freesoft.org/CIE/RFC/2068/147.htm": "13.6 Caching Negotiated Responses", + "https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf": "Why do the poor make such poor decisions?", + "http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t": "All My Tweets - View all your tweets on one pagesss.", + "http://blog.pinboard.in/2011/11/the_social_graph_is_neither/": "The Social Graph is Neither (Pinboard Blog)", + "http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan": "Match Markdown links with advanced regex features by Micha\u00ebl Perrin", + "http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp": "Semantic Enterprise: What Are The Gorillas Doing? (Oracle, IBM, HP, Cisco, Microsoft and SAP) - Semantic Web", + "http://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html": "Maven - Introduction to the Standard Directory Layout", + "https://www.quora.com/Data-Science-Can-machine-learning-be-used-for-time-series-analysis": "Data Science: Can machine learning be used for time-series analysis? - Quora", + "http://perian.org/": "Perian - A swiss-army knife for QuickTime", + "http://www.facebook.com/group.php?gid=19352893701": "Facebook Find your path in the Labyrinth of Information!", + "http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars": "From Win32 to Cocoa: a Windows user's conversion to Mac OS X", + "http://rdfa.info/rdfa-in-the-wild/": "RDFa in the wild", + "http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)": "Za\u00ef (agriculture) - Wikip\u00e9dia", + "http://www.xml.com/pub/a/2003/02/05/tr.html": "XML.com: XSLT, Browsers, and JavaScript", + "http://www.forbes.com/sites/gregsatell/2013/10/27/how-ibms-watson-will-change-the-way-we-work/": "How IBM's Watson Will Change The Way We Work - Forbes", + "http://www.laconscience.com/article.php?id_article=2382": "La Conscience - Sam Mangwana", + "http://java.dzone.com/articles/clerezza-apache-project": "Clerezza: An Apache Project for the Semantic Web Javalobby", + "http://realitesbiomedicales.blog.lemonde.fr/2015/11/27/decapite-ce-ver-repousse-avec-la-tete-dune-autre-espece/": "D\u00e9capit\u00e9, ce ver repousse avec la t\u00eate\u2026 d\u2019une autre esp\u00e8ce R\u00e9alit\u00e9s Biom\u00e9dicales", + "http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/": "Mini AI app using TensorFlow and Shiny \u2013 Opiate for the masses", + "http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr": "Information Retrieval for HR", + "http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/": "Debugging Semantic Web sites with cURL cygri\u2019s notes on web data", + "http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan": "Turning Up the Heat: The Mechanics of Model Distillation", + "http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081": "Apple et Google ne vont pas \"entrer dans l'industrie automobile\", selon Carlos Tavares", + "http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at": "Paris NLP Season 4 Meetup #1 at Algolia", + "https://www.quora.com/When-is-using-word-embeddings-harmful": "When is using word embeddings harmful? - Quora", + "https://github.com/tristan/jsonld-java": "tristan/jsonld-java \u00b7 GitHub", + "http://www.rfi.fr/francais/actu/articles/090/article_52884.asp": "RFI - L'oignon : une fili\u00e8re prometteuse pour les paysans nig\u00e9riens", + "http://www.google.com/webmasters/": "Webmasters - Google", + "https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack": "AI can win at poker: but as computers get smarter, who keeps tabs on their ethics? Technology The Guardian", + "http://www.w3.org/2001/sw/BestPractices/SE/ODSD/": "A Semantic Web Primer for Object-Oriented Software Developers", + "http://librdf.org/docs/ruby.html": "Redland RDF Application Framework - Ruby Interface", + "http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/": "Data Scraping Wikipedia with Google Spreadsheets \u00ab OUseful.Info, the blog\u2026", + "http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739": "PaySwarm \u2013 Give Someone $0.02 for Their Two Cents (Part I) - semanticweb.com", + "http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/": "Stanford bioengineer develops a 50-cent paper microscope Scope Blog", + "http://www.wdl.org/en/item/53/": "Map of Barbary, Nigritia and Guinea - World Digital Library", + "http://www.bioshare.net/": "Bioshare: Home", + "http://dexter.isti.cnr.it/": "Dexter, an Open Source Framework for Entity Linking", + "http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack": "How Much Knowledge Can You Pack Into the Parameters of a Language Model?", + "http://louvre-boite.viabloga.com/news/76.shtml": "L'ouvre-bo\u00eete - Next big thing : Tag Clusters", + "https://en.wikipedia.org/wiki/My_Life_in_the_Bush_of_Ghosts_%28novel%29": "\"My Life in the Bush of Ghosts\" Amos Tutuola", + "http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/": "Ne dites pas \u00e0 ma m\u00e8re que je suis un hacker, elle me croit blogueur au Monde.fr, & reporter au Vinvinteur BUG BROTHER", + "http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf": "An Introduction to Conditional Random Fields for Relational Learning (Charles Sutton and Andrew McCallum, 2006)", + "http://news.bbc.co.uk/2/hi/technology/8544935.stm": "BBC News - Is it time to defend our rights?", + "https://www.youtube.com/watch?v=ge4kEwrperk": "France is AI 2018: Emmanuel Bacry - Detecting weak signals in pharmaco epidemiology - YouTube", + "http://www.michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/": "How the Bitcoin protocol actually works DDI", + "http://www.pr-owl.org/": "PR-OWL: A Bayesian Framework for the Semantic Web", + "http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html": "Du cristal \u00e0 la fum\u00e9e", + "http://2008.xtech.org/public/schedule/detail/545": "XTech 2008: Why you should have a Website \u2014 IDEAlliance", + "http://aperture.sourceforge.net/": "Aperture Framework", + "https://github.com/Graphity": "Graphity on Github", + "http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html": "The W3C standard constraint language for RDF: SHACL - bobdc.blog", + "http://www.devx.com/semantic/Article/38700": "Relational Database Integration with RDF/OWL", + "http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong": "[1802.07569] Continual Lifelong Learning with Neural Networks: A Review", + "http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining": "TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining - ESW Wiki", + "http://web.mit.edu/cre/research/1k-house-project.html": "MIT's 1K House Project", + "http://www.spectrum.ieee.org/singularity": "IEEE Spectrum: Special Report: The Singularity", + "http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html": "ANN: Renault adds GoodRelations to UK Shop", + "http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i": "Omer Levy sur Twitter : \"What if I told you that fine-tuning T5-Large (0.8B params) on a couple hundred examples could outperform GPT-3 (175B params) on a bunch of tasks?\"", + "http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast": "DriftR Linked Data Browser and Editor (Screencast) - benjamin nowack's blog", + "http://ruder.io/requests-for-research/": "NLP: Requests for Research", + "http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html": "Niger: Greenpeace d\u00e9nonce le d\u00e9versement de d\u00e9chets radioactifs d'Areva - LeMonde.fr", + "http://jung.sourceforge.net/": "JUNG - Java Universal Network/Graph Framework", + "http://www.semanlink.net/2014/09/ec-web-paper.pdf": "Automotive range as e-commerce data (EC-WEB 2014)", + "http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it": "Jersey: UriConnegFilter now declared final, breaks old code: how to fix it? - Stack Overflow", + "http://www.toprankblog.com/2009/11/seo-for-flash-tips/": "Flash SEO: 5 Tips and Best Practices for Optimizing Flash Websites", + "http://www.der-mo.net/relationBrowser/index.html": "der-mo.net - Moritz Stefaner - Relation browser", + "http://www.semanlink.net/doc/2019/08/starlette": "Starlette", + "https://twitter.com/pnderthevstnes/status/1110260437801562112": "Sam Shleifer sur Twitter : \"ULMFit from @fastai + Data Augmentation with backtranslation can get 80+% validation accuracy using only 50 training examples on #NLP IMDB sentiment classification!", + "http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF": "Virtuoso Open-Source Edition: Mapping SQL Data to RDF", + "http://objecthunter.congrace.de/tinybo/blog/articles/98": "Serializing Apache Jena's RDF resources via JAXB into JSON docments in a JAX-RS context.", + "http://fr.wikipedia.org/wiki/Beijing_Genomics_Institute": "Beijing Genomics Institute", + "http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214": "Neural networks everywhere MIT News", + "https://www.reviewnb.com/": "ReviewNB: Jupyter Notebook Diff for GitHub", + "http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions": "Classifying duplicate questions from Quora with Keras R-bloggers", + "https://class.coursera.org/datascitoolbox-010": "The Data Scientist\u2019s Toolbox Coursera", + "https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/": "An unusually sensible post about RDF Brinxmat's blog", + "http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_": "[1905.06316] What do you learn from context? Probing for sentence structure in contextualized word representations", + "https://www.researchgate.net/messages/671455533": "Re: Product Customization as Linked Data - ResearchGate", + "http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck": "[2007.04612] Concept Bottleneck Models", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm": "BBC NEWS - Putin orders oil pipeline shifted", + "http://moussapoussy.planeteafrique.com": "", + "http://fortune.com/2015/09/23/china-sharing-economy-mobile/": "China's sharing economy is about mobile apps and quick delivery - Fortune", + "http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html": "Coding In Paradise: AJAX Tutorial: Saving Session Across Page Loads Without Cookies, On The Client Side", + "https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/": "Tutorial on Text Classification (NLP) using ULMFiT and fastai Library in Python - Analytics Vidhya", + "http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1": "Siamese CNN for job\u2013candidate matching (slides)", + "https://www.washingtonpost.com/news/wonk/wp/2016/11/08/a-new-theory-for-why-trump-voters-are-so-angry-that-actually-makes-sense/": "A new theory for why Trump voters are so angry \u2014 that actually makes sense - The Washington Post", + "http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/": "How to get file resource from Maven src/test/resources/ folder in JUnit test?", + "http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf": "Probabilistic Topic Models", + "http://projects.freelibrary.info/solr-jetty-maven/": "Solr-Jetty-Maven Project", + "http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service": "Best Practices for securing a REST API / web service - Stack Overflow", + "http://www.youtube.com/watch?v=o0NuuWJscqg": "YouTube - Sani Aboussa - Hadiza (soumata haour\u00e9)", + "http://www.w3.org/wiki/JSON+RDF": "JSON+RDF - W3C Wiki", + "http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi": "Multi Media Museum et les mus\u00e9es africains", + "http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/": "Transnets \u00bb Blog Archive \u00bb Comment faire voir?", + "http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz": "[1911.02168] CoKE: Contextualized Knowledge Graph Embedding", + "https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/": "Google's neural networks invent their own encryption New Scientist", + "https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view": "open-knowledge-embeddings.pdf - (2019)", + "http://www.assemblee-nationale.fr/histoire/dudh/1789.asp": "La libert\u00e9 consiste \u00e0 pouvoir faire tout ce qui ne nuit pas \u00e0 autrui", + "http://gawker.com/terrorism-works-1678049997": "Terrorism Works", + "http://www.bbc.com/news/magazine-21702546": "Syria's priceless heritage under attack", + "http://www.guardian.co.uk/print/0,3858,5223112-106710,00.html": "Guardian Blair finds a little heaven in euro hell", + "http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets": "fastai v2 cheat sheets", + "http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap": "A collection of 300+ survey papers on NLP and ML", + "http://www.litteratureaudio.com": "Litterature audio.com Livres audio gratuits \u00e0 \u00e9couter et t\u00e9l\u00e9charger", + "http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin": "[2007.12603] IR-BERT: Leveraging BERT for Semantic Search in Background Linking for News Articles", + "http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc": "L\u2019aventure citoyenne des semences paysannes, \u00ab\u00a0commun\u00a0\u00bb nourricier", + "http://www.slideshare.net/terraces/tmp-467722/": "FOAF & SIOC applications, slides (Alexandre Passant, ESWC 2008, \"Socail Networks\" tutorial)", + "https://distill.pub/2016/augmented-rnns/": "Attention and Augmented Recurrent Neural Networks (2016)", + "https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval": "Using Text Embeddings for Information Retrieval", + "http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm": "EUROPA - COMMUNIQUES DE PRESSE - Communiqu\u00e9 de presse - Speech: The big data revolution", + "http://www.semanticweb.com/on/semantic_enterprise_the_startups_169811.asp": "Semantic Enterprise: The StartUps - Semantic Web", + "http://redlink.co/": "redlink", + "http://www.bbc.com/future/story/20120614-how-bacteria-talk": "How bacteria 'talk' to each other", + "http://www.gnu.org/software/octave/doc/interpreter/index.html": "GNU Octave's Documentation", + "http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm": "BBC News - Why do Finland's schools get the best results?", + "http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php": "Top 10 Semantic Web Products of 2009", + "http://www.bbc.co.uk/things/": "BBC - Things", + "http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat": "D\u00e9tection d'intention: application industrielle d'un projet de recherche", + "http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour": "En 1989, 100 deutschemarks pour les citoyens de RDA : cadeau et ticket doux-amer vers l\u2019inconnu", + "https://plus.google.com/s/schema.org": "schema.org - Google+", + "http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf": "Language models - Jordan Boyd-Graber - University of Maryland", + "https://www.wired.com/story/four-successful-bel-transplants/": "Bioengineers Are Closer Than Ever To Lab-Grown Lungs WIRED", + "http://deliprao.com/archives/262": "Everything is a Model Delip Rao", + "http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html": "Re: Offer data in separate places in HTML from Martin Hepp on 2013-05-14 (public-vocabs@w3.org from May 2013)", + "http://allforces.com/2005/08/25/wordpress-on-mac-install/": "WordPress on Mac Part 3: Installing WordPress All Forces", + "http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_": "Quantum neuromorphic computing: Applied Physics Letters: Vol 117, No 15", + "https://arxiv.org/abs/1611.04228": "[1611.04228] Learning Sparse, Distributed Representations using the Hebbian Principle", + "http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf": "ESWC2007_SPARQL_Tutorial.pdf", + "http://www.youtube.com/watch?v=w7BKNySQ97w": "H\u00e9catombe - Georges Brassens", + "http://crianca.free.fr/": "Crian\u00e7a Punk Rock - Paris", + "http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news": "PETROLE Cruel sera le r\u00e9veil", + "http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322": "Courriels de trois Africains commentant l\u2019impact du mobile sur leur continent Winch 5", + "http://www.holygoat.co.uk/projects/tags/": "Tag ontology design", + "http://krook.org/jsdom/": "JavaScript DOM", + "https://medium.com/product-design/d8d4f2300cf3": "Tenth Grade Tech Trends", + "http://careers.ulitzer.us/node/888956": "The Renault-Nissan Alliance Forms Zero-Emission Vehicle Partnership in San Diego Careers and Employment Journal", + "http://bigbrowser.blog.lemonde.fr/2013/06/11/prism-comment-passer-entre-les-mailles-de-la-surveillance-dinternet/": "PRISM \u2013 Comment passer entre les mailles de la surveillance d\u2019Internet ? Big Browser", + "https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273": "How to Measure and Draw Causal Inferences with Patent Scope by Jeffrey M. Kuhn, Neil Thompson :: SSRN", + "http://paris.sae.edu/fr/home/": "Ecole audiovisuel Paris: formation audio - technicien son, formation mao, formation video, formation webdesign, formation 3D - SAE Paris", + "http://answers.semanticweb.com/questions/1087/generating-documentation-from-rdfs-andor-owl-vocabularies": "Generating documentation from RDFS and/or OWL vocabularies - ANSWERS", + "https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/": "European Ruling Could Slow Africa\u2019s Push For Crispr Crops WIRED", + "http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund": "What if you got $1,000 a month, just for being alive? I decided to find out. - Vox", + "http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/": "Droits des enfants \u00bb Vers la d\u00e9tention \u00e0 vie des mineurs : bonjour l\u2019espoir !", + "http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p": "Et\u00e9 comme hiver, le r\u00e9gime de pluie \u00e0 l'origine du Sahara vert il y a 9000 ans INEE", + "http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php": "CKAN LOD Datasets", + "http://sommet-ia.evenement.bercy.gouv.fr": "Sommet intelligence artificielle \u00e0 Paris", + "https://github.com/editorsnotes/edit-with-lov": "editorsnotes/edit-with-lov: demo of editing JSON-LD using LOV vocabularies", + "http://messenger.jhuapl.edu/gallery/sciencePhotos/image.php?gallery_id=2&image_id=117": "Messenger's First Look at Mercury\u2019s Previously Unseen Side", + "https://www.quora.com/How-do-you-calculate-the-memory-footprint-of-a-particular-deep-learning-model": "How do you calculate the memory footprint of a particular deep learning model? - Quora", + "http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais": "Linked Data Entity Extraction with Zemanta and OpenCalais - benjamin nowack's blog", + "https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html": "This Mutant Crayfish Clones Itself, and It\u2019s Taking Over Europe - The New York Times", + "http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_": "Watch Your Step: Learning Node Embeddings via Graph Attention", + "http://www.consortiuminfo.org/bulletins/semanticweb.php": "THE SEMANTIC WEB: AN INTERVIEW WITH TIM BERNERS-LEE - Consortiuminfo.org Consortium Standards Bulletin- June 2005", + "https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe": "G\u00f6bekli Tepe - Wikipedia, the free encyclopedia", + "http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/": "Chapitre 1 \u2013 L\u2019innovation vient aussi d\u2019ailleurs Winch 5", + "http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/": "Mac OS X: Set / Change $PATH Variable", + "http://eventmedia.eurecom.fr/": "EventMedia", + "http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479": "ISWC 2008: Some Questions - Inference: Is it always forward chaining?", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf": "Using read/write Linked Data for Application Integration \u2013 Towards a Linked Data Basic Profile", + "https://arxiv.org/abs/1602.06797": "[1602.06797] Semi-supervised Clustering for Short Text via Deep Representation Learning", + "http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre": "James Briggs sur Twitter : *free* course on vector similarity search and Faiss...\"", + "http://www.scottbot.net/HIAL/?p=221": "Topic Modeling and Network Analysis the scottbot irregular", + "https://plus.google.com/communities/104510681993581444051": "The Automotive Ontology Working Group - Community - Google+", + "http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/": "Exploding the Domain: UMBEL Web Services by Zitgist at Frederick Giasson\u2019s Weblog", + "http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us": "fastai/fastpages: An easy to use blogging platform, with enhanced support for Jupyter Notebooks.", + "http://infolab.stanford.edu/~bawa/Pub/similarity.pdf": "LSH Forest: Self-Tuning Indexes for Similarity Search (2005)", + "http://ishtarnews.blogspot.com/2006/12/national-day-in-zinder-parade.html": "Ishtar News: National Day in Zinder - the parade", + "http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be": "Elastic Transformers. Making BERT stretchy \u2014 Scalable\u2026 by Mihail Dungarov Sep, 2020 Medium", + "http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f": "[1912.03927] Large deviations for the perceptron model and consequences for active learning", + "http://www.technologyreview.com/featuredstory/520446/the-decline-of-wikipedia/": "The Decline of Wikipedia: Even As More People Than Ever Rely on It, Fewer People Create It MIT Technology Review", + "https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m": "Angular 2 versus React: There Will Be Blood \u2014 Free Code Camp \u2014 Medium", + "http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d": "An Embarrassingly Simple Approach for Transfer Learning from Pretrained Language Models (NAACL 2019) (Slides)", + "http://www2013.org/proceedings/p749.pdf": "Rethinking the Web as a Personal Archive", + "https://en.wikipedia.org/wiki/The_Lunchbox": "The Lunchbox - Wikipedia, the free encyclopedia", + "http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno": "[1503.02531] Distilling the Knowledge in a Neural Network", + "https://github.com/facebookresearch/MUSE": "facebookresearch/MUSE: A library for Multilingual Unsupervised or Supervised word Embeddings", + "http://revsys.com/newscloud/": "NewsCloud (Revolution Systems)", + "https://www.wired.com/tag/autonomous-vehicles/": "Autonomous Vehicles WIRED", + "http://graus.nu/research/context-based-entity-linking/": "Context-based Entity Linking Blog graus.nu", + "http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh": "AlphaFold 2 is here: what\u2019s behind the structure prediction miracle Oxford Protein Informatics Group", + "http://www.cnrs.fr/inc/communication/direct_labos/cario.htm?utm_content=buffer8109c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Premier neurone artificiel monocomposant - CNRS", + "http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl": "princeton-nlp/DensePhrases: ACL'2021: Learning Dense Representations of Phrases at Scale; EMNLP'2021: Phrase Retrieval Learns Passage Retrieval, Too", + "https://dl.acm.org/citation.cfm?id=1007733": "Editorial: special issue on learning from imbalanced data sets - Chawla (2004)", + "http://energy.gov/eere/articles/first-commercially-available-fuel-cell-electric-vehicles-hit-street": "First Commercially Available Fuel Cell Electric Vehicles Hit the Street Department of Energy", + "http://www.semanlink.net/doc/2021/09/structuring_your_project_the_": "Structuring Your Project \u2014 The Hitchhiker's Guide to Python", + "http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a": "A Lagos, le roi des Peuls est aussi le boss des dockers", + "http://en.wikipedia.org/wiki/The_Master_and_Margarita": "The Master and Margarita", + "http://www.youtube.com/watch?v=6gmP4nk0EOE": "Web 2.0 ... The Machine is Us/ing Us", + "http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/": "Google I/O 2014 Recap: Android, Knowledge Graph and more Alexandre Passant", + "http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/": "MinHash Tutorial with Python Code \u00b7 Chris McCormick", + "http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_": "[1910.00163] Specializing Word Embeddings (for Parsing) by Information Bottleneck", + "http://purl.org/configurationontology/quickstart": "C2GWeb-js: Renault Configuration data as RDF, tutorial", + "https://arxiv.org/abs/1801.06146": "[1801.06146] Universal Language Model Fine-tuning for Text Classification", + "http://www.oracle.com/technology/tech/semantic_technologies/index.html": "Oracle - Semantic Technologies Center", + "http://www.bbc.com/news/technology-35502030": "iPhones 'disabled' if Apple detects third-party repairs - BBC News", + "https://en.wikipedia.org/wiki/Thought_vector": "Thought vector - Wikipedia", + "http://www.productontology.org/": "The Product Types Ontology: Use Wikipedia pages for describing products or services with GoodRelations", + "http://maczealots.com/tutorials/wordpress/": "Installing WordPress on Tiger", + "http://nlp.seas.harvard.edu/NamedTensor": "Tensor Considered Harmful", + "http://hal.upmc.fr/hal-01517032": "Mod\u00e8le Neuronal de Recherche d'Information Augment\u00e9 par une Ressource S\u00e9mantique (2017)", + "http://simile.mit.edu/mail/SummarizeList?listId=14": "Linking Open Data mailing list at simile", + "https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1": "Module google/universal-sentence-encoder \u00a0\u00a0 TensorFlow", + "https://www.ethereum-france.com/deploiement-du-projet-the-dao-mere-de-toutes-les-dao/": "D\u00e9ploiement de The DAO, \u00ab\u00a0m\u00e8re de toute les DAO\u00a0\u00bb Ethereum France", + "http://www.readwriteweb.com/hack/2010/12/lisp-getting-started.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29": "How To Get Started With Lisp", + "http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107": "Macworld Avoid file-sharing risks", + "http://www.butleranalytics.com/10-free-deep-learning-tools/": "10 Free Deep Learning Tools - Butler Analytics", + "https://github.com/Babylonpartners/fastText_multilingual": "GitHub - Babylonpartners/fastText_multilingual: Multilingual word vectors", + "http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html": "Le chercheur, l'agrochimiste et les abeilles", + "http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/": "Towards an Interlinked Semantic Wiki Farm", + "http://www.siliconvalley.com/mld/siliconvalley/11685903.htm?template=contentModules/printstory.jsp": "Behind the wheel: nobody", + "http://simile.mit.edu/timeline/": "SIMILE Timeline", + "http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/": "An overview of word embeddings and their connection to distributional semantic models - AYLIEN (2016)", + "http://ben.adida.net/presentations/www2008-rdfa/": "Ben Adida - RDFa, slides du workshop \u00e0 WWW2008", + "http://ricostacruz.com/backbone-patterns/#assumptions": "Backbone patterns", + "http://getglue.com/fps": "fps on Get Glue", + "http://gerbil.aksw.org/gerbil/overview": "GERBIL Experiment Overview", + "https://github.com/solid/solid": "GitHub - solid/solid: Solid - Re-decentralizing the web (project directory)", + "https://www.lemonde.fr/sciences/article/2019/04/10/quand-plusieurs-humanites-peuplaient-la-terre_5448527_1650684.html": "Quand plusieurs humanit\u00e9s peuplaient la Terre", + "https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e": "Natural Language Processing is Fun! \u2013 Adam Geitgey \u2013 Medium", + "http://www.miv.t.u-tokyo.ac.jp/ishizuka/pr-class/Dumais-CIKM98.pdf": "Inductive learning algorithms and representations for text categorization", + "http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html": "Appreciating SPARQL property paths more - bobdc.blog", + "http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le": "FP Servant sur Twitter : \"On le savait pourtant qu'ils sont nuls en math, au gouvernement\"", + "http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru": "DIRT Discovery of inference rules from text (2001)", + "http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html": "L'op\u00e9ra de Mayence \u00e9crase une manifestation de l'extr\u00eame droite avec l'hymne \u00e0 la joie !", + "http://www.datalyse.fr/": "Datalyse - projet d'innovation Big Data - Datalyse", + "https://en.wikipedia.org/wiki/The_Man_Without_a_Past": "The Man Without a Past", + "http://www.html5rocks.com/en/tutorials/cors/?redirect_from_locale=fr": "Using CORS - HTML5 Rocks", + "https://www.quora.com/Scikit-Learn-Can-you-create-your-own-dataset-data-files-to-use-with-the-code-they-use-in-the-tutorials": "Scikit Learn - Can you create your own dataset/data files to use with the code they use in the tutorials? - Quora", + "https://rajpurkar.github.io/SQuAD-explorer/": "The Stanford Question Answering Dataset", + "http://dev.w3.org/html5/rdfa/rdfa-module.html": "HTML5+RDFa A mechanism for embedding RDF in HTML", + "http://docs.codehaus.org/display/MAVENUSER/MavenPropertiesGuide": "MavenPropertiesGuide", + "http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/": "R\u00e9inventer un rapport au temps, par Bernard Stiegler Lois des r\u00e9seaux", + "http://www.mkbergman.com/": "AI3:::Adaptive Information \u00bb Mike Bergman on the semantic Web and structured Web", + "http://www.scottbot.net/HIAL/?p=19113": "Topic Modeling for Humanists: A Guided Tour \u00bb the scottbot irregular", + "http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr": "huggingface/transformers: \ud83e\udd17 Transformers: State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch.", + "http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data": "Hook up Google Visualization with RDF data - Data-gov Wiki", + "https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning": "OpenAI\u2019s Dota 2 defeat is still a win for artificial intelligence\u00a0 - The Verge", + "http://www.amazon.com/gp/aws/sdk/103-1858432-5126204?v=2005%2d10%2d01&s=AWSMechanicalTurkRequester": "Amazon Web Services: Amazon Mechanical Turk", + "http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top": "Magic Quadrant for Business Intelligence Platforms - Gartner", + "https://aminer.org/bignet_www2018": "WORKSHOP: BigNet @\u00a0WWW 2018 Workshop on Learning Representations for Big Networks", + "http://www.flickr.com/photos/richardwallis/sets/72157629229803283/with/6841000233/": "Semtech Berlin 2012\u00a0: un album sur Flickr", + "https://groups.google.com/forum/#!forum/swagger-swaggersocket": "Swagger \u2013 Google Groups", + "https://arxiv.org/abs/1801.00631": "[1801.00631] Deep Learning: A Critical Appraisal", + "http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf": "Rapidminer User manual", + "http://www.w3.org/TR/rdfa-api/#enhanced-browser-interfaces": "RDFa API", + "http://www.ibtimes.co.uk/articles/456073/20130411/what-bitcoin-mining-silk-road-work-feature.htm": "What is Bitcoin and How Does it Work? - IBTimes UK", + "http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l": "Recherche multilingue gr\u00e2ce \u00e0 la d\u00e9tection de la langue dans Elasticsearch Elastic Blog", + "http://apassant.net/blog/2008/10/07/say-hello-to-lodrinfo/": "Say hello to lodr.info : Alexandre Passant", + "https://allennlp.org/elmo": "ELMo: Deep contextualized word representations (2018)", + "http://winch5.blog.lemonde.fr/introduction/": "Introduction Winch 5", + "https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs": "Is Finland\u2019s basic universal income a solution to automation, fewer jobs and lower wages? Society The Guardian", + "http://www.entreprises.ouest-france.fr/article/agriculture-agroalimentaire-france-decroche-17-12-2015-246432#.VnUOtBtPZPU.twitter": "Agriculture et agroalimentaire. La France d\u00e9croche Ouest France Entreprises", + "http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p": "Climat : le patronat s\u2019active pour infl\u00e9chir les normes", + "http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/": "1 milliard de personnes souffrent de la faim en 2009 : combien en faudra-t-il pour que la faim devienne enfin une priorit\u00e9 mondiale? - Action Contre La Faim", + "http://meta.wikimedia.org/wiki/Semantic_MediaWiki": "Semantic MediaWiki - Meta", + "https://developers.facebook.com/docs/graph-api/quickstart/v2.0": "Facebook Graph API Quickstart", + "https://towardsdatascience.com/semantic-code-search-3cd6d244a39c": "How To Create Natural Language Semantic Search For Arbitrary Objects With Deep Learning", + "http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/": "Danny Ayers, Raw Blog : \u00bb RDF.net : Challenged!", + "https://arxiv.org/abs/1703.02507": "[1703.02507] Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features", + "http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf": "Distributed Semantics & Embeddings", + "https://blog.wikimedia.org/2017/10/30/wikidata-fifth-birthday/": "Wikidata, a rapidly growing global hub, turns five \u2013 Wikimedia Blog", + "https://docs.docker.com/engine/reference/glossary/": "Docker Glossary", + "https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/": "QuickGraph#5 Learning a taxonomy from your tagged data Jes\u00fas Barrasa", + "https://schema.org/docs/automotive.html": "Markup for Autos - schema.org", + "http://www.txtnet.com/mathlib/home.asp": "La Librairie des Maths", + "http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh": "Vall\u00e9e de l'Azawagh", + "https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/": "Part-of-Speech tagging tutorial with the Keras Deep Learning library - Cdiscount TechBlog", + "https://twitter.com/olafhartig/status/1096781008098205697": "Olaf Hartig sur Twitter : \"Here are typical examples of how people do data integration in the GraphQL context. Everything is explicitly implemented in the program code. No flexibility. Reminds me of the API mash-up apps that were popular 15 years ago. https://t.co/3qJMKXoWDt https://t.co/GWqPdmeFIP\"", + "http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm": "BBC NEWS Technology Microsoft censors Chinese blogs", + "http://lists.w3.org/Archives/Public/public-semweb-ui/2006May/0001.html": "Jena User Conference - some interesting UI related papers/presentations from Shabajee, Paul on 2006-05-17 (public-semweb-ui@w3.org from May 2006)", + "http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m": "Koren Lazar sur Twitter : \"...Modern pre-trained language models are applicable even in extreme low-resource settings as the case of the ancient Akkadian language.\"", + "http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/": "The Silk Road's Dark-Web Dream Is Dead WIRED", + "https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2": "Uncertainty estimation for Neural Network\u200a\u2014\u200aDropout as Bayesian Approximation", + "http://www.corante.com/copyfight/archives/039510print.html": "The Latest IP Crime: \"Box-Wrap\" Patent Infringement", + "http://patterns.dataincubator.org/book/": "Linked Data Patterns", + "http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua": "[2012.02558] Pre-trained language models as knowledge bases for Automotive Complaint Analysis", + "https://github.com/HydraCG": "Hydra Community Group - GitHub", + "https://vuejs.org/": "Vue.js", + "http://gettingreal.37signals.com/": "Getting Real: The Book by 37signals", + "https://www.w3.org/wiki/WebSchemas/PropertyValuePairs": "WebSchemas/PropertyValuePairs - W3C Wiki", + "http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html": "TechnicaLee Speaking: Using RDF on the Web: A Vision", + "http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr": "Knowledge extraction from unstructured texts Tech Blog (2016)", + "http://www.joelonsoftware.com/items/2005/10/24.html": "Something Rotten in AdSense", + "http://www.touchgraph.com": "TouchGraph", + "http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript": "RDFa for videos rendered in javascript - Stack Overflow", + "http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser": "What is the best Java RDFa Parser? - semanticweb.com ANSWERS", + "https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb": "nlp-notebooks/Simple Sentence Similarity.ipynb at master \u00b7 nlptown/nlp-notebooks", + "http://leobard.twoday.net/stories/1900548/": "Semantic World and Cyberspace: gnowsis 0.9.0 release", + "http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_": "[2003.05473] Investigating Entity Knowledge in BERT with Simple Neural End-To-End Entity Linking (CoNNL 2019)", + "http://www.esa.int/Our_Activities/Space_Science/Rosetta/Highlights/Top_10_at_10_km": "Top 10 at 10 km / Highlights / Rosetta / Space Science / Our Activities / ESA", + "https://aclweb.org/anthology/papers/P/P17/P17-2085/": "List-only Entity Linking - ACL Anthology (2017)", + "http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/": "The RDF Data Cube Vocabulary", + "http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm": "BBC News - Out-of-focus pictures eliminated by photography innovation", + "http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent": "[2010.00904] Autoregressive Entity Retrieval", + "http://vancouverdata.blogspot.fr/2012/08/googles-self-driving-cars-are-going-to.html": "Google\u2019s Self-Driving Cars Are Going to Change Everything (Vancouver Data Blog by Neil McGuigan)", + "http://www.tagcommons.org/": "TagCommons", + "https://www.facebook.com/nipsfoundation/videos/795861577420073/": "Neural Information Processing Systems - Tutorial Sessions: Unsupervised Deep Learning \"predict everything\"", + "http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp": "Oracle Sees Semantic Tech Solving Business Problems", + "http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r": "La sonde japonaise Hayabusa-2 r\u00e9ussit \u00e0 se poser une seconde fois sur l\u2019ast\u00e9ro\u00efde Ryugu", + "http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/": "The Semantic Puzzle Transforming spreadsheets into SKOS with Google Refine", + "https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins": "Semantic Hashing [9 mins] - Universit\u00e9 de Toronto Coursera", + "http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant": "D\u00e9couverte d'un artefact datant de 24 000 ans \u00e0 Vale da Pedra Furada, Piau\u00ed, Br\u00e9sil INSHS", + "https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar": "Goldman Sachs about gene therapy: 'Is curing patients a sustainable business model?'", + "http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l": "Les chatbots sont morts, vive les m\u00e9dias 100% messagerie\u00a0!", + "http://slayeroffice.com/articles/innerHTML_alternatives/#6a": "slayeroffice alternatives to innerHTML", + "http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio": "[2010.07245] Text Classification Using Label Names Only: A Language Model Self-Training Approach", + "http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_": "[1503.02406] Deep Learning and the Information Bottleneck Principle", + "http://www.ina.fr/video/I05266317/jessye-norman-video.html": "La Marseillaise du bicentenaire de la R\u00e9volution", + "https://www.fast.ai/2019/05/13/blogging-advice/": "Advice for Better Blog Posts \u00b7 fast.ai", + "http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012": "SEMANTIC ENTERPRISE TECHNOLOGIES in ACTION - IKS Project", + "http://apassant.net/blog/2008/04/22/attending-www2008/": "Attending WWW2008 : Alexandre Passant", + "https://jira.mongodb.org/browse/SERVER-1723": "[SERVER-1723] Add Bitmap indexes - MongoDB", + "http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html": "Les villages de Sib\u00e9rie, cimeti\u00e8res des d\u00e9chets toxiques de l'ex-URSS", + "http://docs.openlinksw.com/virtuoso/": "OpenLink Virtuoso Universal Server: Documentation", + "https://www.youtube.com/watch?v=jfwqRMdTmLo": "Successes and Challenges in Neural Models for Speech and Language - Michael Collins - YouTube", + "https://innovation.ie.fujitsu.com/kedi/": "Fujitsu Ireland Research and Innovation Knowledge Engineering and DIscovery (KEDI)", + "http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve": "Les Caprices d'un fleuve", + "http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1": "What is the scope of variables in JavaScript? - Stack Overflow", + "http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer": "[1907.05242] Large Memory Layers with Product Keys", + "https://github.com/maxlath/wikidata-sdk#you-may-also-like": "maxlath/wikidata-sdk: A javascript tool-suite to query Wikidata and simplify its results", + "http://news.bbc.co.uk/2/hi/science/nature/7324564.stm": "BBC NEWS Science/Nature Secret 'dino bugs' revealed", + "http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html": "What SKOS-XL adds to SKOS - bobdc.blog", + "http://www.zerodeconduite.net/sistersinlaw/": "Sisters in law, un film de Kim Longinotto et Florence Ayisi", + "http://nlp.town/blog/sentence-similarity/": "Comparing Sentence Similarity Methods", + "http://www.cnes.fr/web/CNES-fr/11559-gp-recit-d-une-journee-historique-au-sonc.php": "GP - R\u00e9cit d\u2019une journ\u00e9e historique au SONC - CNES", + "http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm": "BBC NEWS Antarctic 'treasure trove' found", + "http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost": "Redirect After Post", + "http://dig.csail.mit.edu/breadcrumbs/node/62": "Links on the Semantic Web", + "http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_": "Sebastian Ruder sur Twitter : \"Besides the obvious things (ELMo, BERT, etc.), is there anything that we should definitely discuss at the NAACL \"Transfer Learning in NLP\" tutorial?\"", + "http://www.semanlink.net/doc/2021/09/www_ingall_niger_org": "www.ingall-niger.org", + "http://ruder.io/deep-learning-nlp-best-practices/index.html": "Deep Learning for NLP Best Practices", + "https://github.com/marcotcr/lime": "GitHub - marcotcr/lime: Lime: Explaining the predictions of any machine learning classifier", + "https://arxiv.org/abs/1604.00289": "[1604.00289] Building Machines That Learn and Think Like People", + "http://projectmosul.itn-dch.net/": "Projectmosul", + "http://dig.csail.mit.edu/issues/tabulator/issue258": "Demande 258: Patch for improved IE support - Tabulator Issue Tracker", + "http://www.w3.org/2011/09/LinkedData/": "Linked Enterprise Data Patterns", + "http://ckan.org/case-studies/publicdata-eu/": "PublicData.eu ckan - The open source data portal software", + "http://www.shopafrica53.com/": "shopafrica53", + "http://www.miximum.fr/pour-enfin-comprendre-javascript.html": "Miximum \u2013 Pour enfin comprendre Javascript", + "http://www.linkedin.com/in/francoispaulservant": "Fran\u00e7ois-Paul Servant - LinkedIn", + "http://km.aifb.kit.edu/sites/spark/": "Spark", + "https://sites.google.com/site/restframework/service-descriptors": "Service Descriptors - REST Framework", + "https://www.youtube.com/watch?v=5NuZxUxHN0o": "Janis Joplin ~ Live in Frankfurt", + "https://openreview.net/forum?id=S1HlA-ZAZ": "The Kanerva Machine: A Generative Distributed Memory OpenReview (2018)", + "http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce": "Entre col\u00e8re et culpabilit\u00e9, ces Fran\u00e7ais qui renoncent \u00e0 manifester par peur des violences", + "http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c": "[2003.11644] MAGNET: Multi-Label Text Classification using Attention-based Graph Neural Network", + "http://www.howtocreate.co.uk/tutorials/javascript/objects": "JavaScript tutorial - Creating objects", + "https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D": "Training deep neural networks for binary communication with the Whetstone method Nature Machine Intelligence", + "http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v": "Can You Hear Me Now? Improved Voice Assistant Keyword Spotting with Alibaba", + "https://www.deepl.com/": "DeepL", + "http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/": "Burningbird \u00bb Cheap Eats at the Semantic Web Caf\u00e9", + "http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm": "Guide Touristique Pekin", + "http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp": "RFI - Couleurs tropicales - \u00e9mission enregistr\u00e9e \u00e0 Niamey", + "http://www.w3.org/TR/owl-guide/": "OWL Web Ontology Language Guide", + "http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle": "Language Models are Open Knowledge Graphs... but are hard to mine - Towards Data Science", + "https://rubenverborgh.github.io/Solid-DeSemWeb-2018/": "Solid: Linked Data for personal data management", + "https://www.youtube.com/watch?v=kkTTrVotetI": "Comment Jacques Jaujard a sauv\u00e9 le Louvre", + "http://news.bbc.co.uk/1/hi/magazine/5048238.stm": "BBC NEWS - The science behind the swerve", + "http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html": "TechnicaLee Speaking: Videos: Anzo for Excel in action", + "http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft": "My oped in FT - Notes EM", + "http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig": "Salmon Run: Word Sense Disambiguation using BERT as a Language Model", + "http://neuralnetworksanddeeplearning.com/chap1.html": "Using neural nets to recognize handwritten digits", + "https://arxiv.org/abs/1711.09677": "[1711.09677] Binary classification models with \"Uncertain\" predictions", + "http://www.terramadre.info/": "TerraMadre Rete delle comunit\u00e0 del cibo", + "http://www.osxfaq.com": "", + "http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation": "What is a good Javascript RDFa parser implementation? - Stack Overflow", + "http://www.w3.org/TR/swbp-skos-core-guide/": "SKOS Core Guide", + "http://odaf.org/events/odaf_europe_2010.php": "ODaF Europe 2010: Semantic Statistics", + "http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p": "\u00ab\u00a0La d\u00e9croissance n\u2019est ni un programme ni m\u00eame une th\u00e9orie, mais une aspiration\u00a0\u00bb", + "http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=1095551": "Rich snippets: Shopping and products", + "http://thmanager.sourceforge.net/": "ThManager - metadata editor", + "http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled": "[2010.12321] BARThez: a Skilled Pretrained French Sequence-to-Sequence Model", + "http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu": "[2103.12876] Complex Factoid Question Answering with a Free-Text Knowledge Graph", + "http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje": "Outline of a New Research Project on the Nok Culture of Central Nigeria, West Africa", + "http://www.nytimes.com/2006/04/07/science/07evolve.html?_r=1&oref=slogin": "Study, in a First, Explains Evolution's Molecular Advance - New York Times", + "http://vimeo.com/46304267": "Sight", + "http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/": "How a Man in Austria Used Legos to Hack Amazon's Kindle E-Book Securit - Arik Hesseldahl - News - AllThingsD", + "http://automotive.dfki.de/index.php/en/home": "Automotive IUI - Car oriented multimodal interface architectures", + "http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s": "Learning Text Similarity with Siamese Recurrent Networks (2016)", + "http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs": "Canal-U - Universit\u00e9 de tous les savoirs", + "http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music": "Amy Winehouse in Concert - Dailymotion", + "http://www.lemonde.fr/campus/article/2017/11/15/paradise-papers-faire-la-morale-ne-suffit-pas_5215401_4401467.html": "\u00ab\u00a0Paradise Papers\u00a0\u00bb\u00a0: faire la morale ne suffit pas", + "https://arxiv.org/abs/1508.01991": "[1508.01991] Bidirectional LSTM-CRF Models for Sequence Tagging", + "http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi": "Mulan: A Java library for multi-label learning", + "http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/": "Why I\u2019m interested in Bitcoin - Chris Dixon", + "http://singularitysummit.com/": "The Singularity Summit", + "http://linkededucation.org/": "linkededucation.org", + "http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa": "Transformer models - Hugging Face Course", + "http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw": "[1812.02956] LNEMLC: Label Network Embeddings for Multi-Label Classification", + "http://www.livescience.com/scienceoffiction/060707_pacman_insects.html": "LiveScience.com - Live Insects Challenge Humans in Bizarre Computer Game", + "http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/package-summary.html": "EDU.oswego.cs.dl.util.concurrent", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf": "Automated interlinking of speech radio archives", + "https://www.nytimes.com/2019/03/29/science/dinosaurs-extinction-asteroid.html": "Fossil Site Reveals Day That Meteor Hit Earth and, Maybe, Wiped Out Dinosaurs - The New York Times", + "http://hci-matters.com/blog/?p=9": "The New Interface Advocate :: The misused mouse, part 2: A proposal for a nearly mouseless interface.", + "http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu": "L\u2019origine des chevaux domestiques enfin \u00e9tablie CNRS", + "http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778": "How Can Blockchain amplify Digital Identifiers? Improving Data Persis\u2026", + "http://www.semanlink.net/doc/2021/10/open_range_2003_film_": "Open Range (2003 film)", + "http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss": "Labour markets: On \"bullshit jobs\" The Economist", + "http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a": "pemistahl/lingua: natural language detection library for Java suitable for long and short text alike", + "https://gallica.bnf.fr/ark:/12148/bpt6k33245388": "Le Zarmatarey : contribution \u00e0 l'histoire des populations d'entre Niger et Dallol Mawri / par Boub\u00e9 Gado Gallica", + "https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Why we should all have a basic income World Economic Forum", + "http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2": "VOS: Virtuoso Universal Server AMI for Amazon EC2 Instantiation Guide", + "https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw": "Anthrax sickens 13 in western Siberia, and a thawed-out reindeer corpse may be to blame - The Washington Post", + "http://pandas.pydata.org/pandas-docs/stable/": "pandas documentation", + "http://www.heppresearch.com/gr4google": "Semantic SEO for Google with GoodRelations and RDFa Hepp Research GmbH", + "http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i": "[2104.12016] Learning Passage Impacts for Inverted Indexes", + "http://www.semanlink.net/doc/2020/10/wikifier": "Wikifier", + "http://php-java-bridge.sourceforge.net/doc/tomcat6.php": "PHP/Java Bridge", + "http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon": "A new model and dataset for long-range memory DeepMind", + "https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique": "Une plong\u00e9e dans l\u2019Afrique antique CNRS Le journal", + "http://images.apple.com/quicktime/pdf/QuickTime7_User_Guide.pdf": "QuickTime 7 User Guide", + "http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi": "Hierarchical Multi-label Classification of Text with Capsule Networks (2019)", + "http://www.w3.org/community/markdown/wiki/MarkdownImplementations": "MarkdownImplementations - Markdown Community Group", + "https://explosion.ai/blog/sense2vec-with-spacy": "Sense2vec with spaCy and Gensim \u00b7 Blog \u00b7 Explosion AI", + "http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/": "Chapitre 2 \u2014 Innover = cr\u00e9er des opportunit\u00e9s Winch 5", + "http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou": "VaLaR NMT: Vastly Lacking Resources Neural Machine Translation (2019)", + "http://stackoverflow.com/questions/11758676/resolve-multiple-slf4j-bindings-in-maven-project": "Resolve multiple SLF4J bindings in maven project - Stack Overflow", + "http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/": "Thanks for nothing, jerkface ZDNet", + "http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/": "Vocabulaires dans le web de donn\u00e9es : quels outils open-source ? - Sparna Blog", + "http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib": "[1912.08904] Macaw: An Extensible Conversational Information Seeking Platform", + "http://www.pbs.org/wgbh/nova/bonediggers/evolution.html": "NOVA Portrait of Australia's unique evolutionary history.", + "http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2": "W3C Semantic Web Activity News - Eleven W3C Publications Related to OWL 2", + "http://www.laquadrature.net/files/LaQuadratureduNet-Riposte-Graduee_reponse-inefficace-inapplicable-dangereuse-a-un-faux-probleme.pdf": "HADOPI, \u00ab Riposte gradu\u00e9e \u00bb : Une r\u00e9ponse inefficace, inapplicable et dangereuse \u00e0 un faux probl\u00e8me.", + "http://esw.w3.org/topic/SparqlImplementations": "SparqlImplementations - ESW Wiki", + "http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/": "Aux Etats-Unis, des chevaux de course clon\u00e9s entrent en piste Eco(lo)", + "https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/": "12 Useful Pandas Techniques in Python for Data Manipulation", + "http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu": "Sahajtomar/french_semantic \u00b7 Hugging Face", + "http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati": "[physics/0004057] The information bottleneck method", + "http://webmaster.yandex.ru/microtest.xml": "Yandex checker", + "http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese": "Releasing Spleeter: Deezer Research source separation engine", + "http://lists.w3.org/Archives/Public/semantic-web/2005Apr/0157.html": "When flickr meets del.icio.us meets SKOS.", + "http://www.cnrs.fr/inee/communication/breves/b098.html": "Un virus transforme les coccinelles en zombies au profit d\u2019une gu\u00eape parasito\u00efde", + "https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/": "A New Thermodynamics Theory of the Origin of Life Simons Foundation", + "http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06": "Dosso - TOUBAL N 06", + "http://www.semanlink.net/doc/2019/05/robust_language_representation_": "Robust Language Representation Learning via Multi-task Knowledge Distillation - Microsoft Research", + "http://video.google.com/videoplay?docid=-9050474362583451279": "Money As Debt", + "http://developer.apple.com/internet/opensource/osdb.html": "MySQL on Mac OS X", + "http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/": "Why Drupal 8 should drop RDFa (and microdata) in favor of JSON Lin Clark", + "http://www.ldodds.com/blog/archives/000322.html": "Lost Boy: Bee Node Deconstructed", + "http://www.supprimerlechomage.org/": "", + "http://www.gymglish.com/workbook/showlesson?e=fps%40semanlink.net&s=O6qCR70UXs&t=L": "Gymglish Lesson - Concordance des temps: Style indirect", + "http://www.tamtaminfo.com/inquietudes-sur-le-projet-de-constrution-de-la-voie-ferree-par-le-groupe-bollore/": "Inqui\u00e9tudes sur le projet de constrution de la voie ferr\u00e9e par le groupe Bollor\u00e9 Tamtaminfo", + "http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt": "Malbouffe et Covid-19, le cocktail mortel mexicain", + "http://www.cnrs.fr/inee/communication/breves/b390.html": "Out of Africa : nos origines multiples", + "http://www.streamingwizard.com/": "Streaming services and solutions provider; specialists in live broadcasting and on demand video", + "http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1": "Troubleshooting Tika", + "http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a": "[1712.05972] Train Once, Test Anywhere: Zero-Shot Learning for Text Classification", + "https://github.com/innoq/iqvoc/wiki/": "iQvoc", + "http://arc.semsol.org/": "Easy RDF and SPARQL for LAMP systems - ARC RDF Classes for PHP", + "http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu": "Des vari\u00e9t\u00e9s de v\u00e9g\u00e9taux devenues tol\u00e9rantes \u00e0 un herbicide\u2026 \u00e0 cause de ce m\u00eame herbicide", + "http://www.lemonde.fr/technologies/article/2013/08/26/google-investit-dans-le-service-de-taxis-uber_3466504_651865.html#": "Google investit dans le service de taxis Uber", + "http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/": "WordPress on Mac Part 1: PHP & MySQL All Forces", + "http://passeurdesciences.blog.lemonde.fr/2014/07/06/un-mystere-astronomique-de-470-millions-dannees/": "Un myst\u00e8re astronomique de 470 millions d\u2019ann\u00e9es Passeur de sciences", + "http://www.ebusiness-unibw.org/tools/goodrelations-annotator/": "GoodRelations Annotator", + "http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte": "Ta\u00efwan, un mod\u00e8le dans la lutte contre le coronavirus (RFI - 12/03/2020)", + "https://github.com/evilstreak/markdown-js": "evilstreak/markdown-js", + "http://mashable.com/2007/05/15/16-awesome-data-visualization-tools/": "16 Awesome Data Visualization Tools", + "http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu": "[1904.09078] EmbraceNet: A robust deep learning architecture for multimodal classification", + "http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/": "Solr, Jetty and CORS - Chris Eldredge", + "http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags": "Swayy Blog \u2014 An algorithm for generating automatic hashtags", + "http://www.planeteafrique.com/niger/ONG_Search.asp": "Annuaire des Associations et ONG oeuvrant au Niger", + "http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea": "TL;DR: This AI summarizes research papers so you don\u2019t have to", + "http://ivan-herman.name/2008/12/03/bridge-between-sw-communities-owl-rl/": "Bridge between SW communities: OWL RL \u00ab Ivan\u2019s private site", + "http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode": "course-nlp/2-svd-nmf-topic-modeling.ipynb at master \u00b7 fastai/course-nlp", + "https://www.typepad.com/t/app/weblog/manage?blog_id=284632": "Mon blog sur Noos - Edition", + "https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems": "Time, Context and Causality in Recommender Systems", + "http://dannyayers.com/code/sparql-editor": "Online SPARQL Editor", + "http://web.archive.org/web/19981206171549/www.hypersolutions.fr/": "hyperSOLutions - Home page", + "http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_": "Knowledge Graphs and Knowledge modelling took center stage at ISWC 2019 LinkedIn", + "http://petrole.blog.lemonde.fr/2011/11/06/trop-tard-pour-limiter-le-rechauffement-a-2%C2%B0c-selon-nature/": "Trop tard pour limiter le r\u00e9chauffement \u00e0 2\u00b0C, d\u2019apr\u00e8s \u2018Nature\u2019 Oil Man", + "http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m": "exBERT: Extending Pre-trained Models with Domain-specific Vocabulary Under Constrained Training Resources - ACL Anthology", + "http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf": "Cross-Lingual Word Representations: Induction and Evaluation (Tutorial EMNLP 2017)", + "http://www.knowledgesearch.org/": "The Semantic Indexing Project", + "https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/": "The major advancements in Deep Learning in 2018 Tryolabs Blog", + "http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf": "A Multi-Ontology Approach for Personal Information Management", + "http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf": "The Data Web as an OS", + "http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php": "Extractiv Launches \"Semantics as a Service\" Platform", + "https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Are Digital Devices Robbing our Memories? CNRS News", + "http://nicola.io/future-rdf/2015/": "Towards the future RDF library", + "http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak": "Supporting content decision makers with machine learning Dec, 2020 Netflix TechBlog", + "http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/": "A Decade in the Trenches of the Semantic Web AI3:::Adaptive Information", + "http://www.ibm.com/developerworks/xml/library/x-dita10/": "Subject classification with DITA and SKOS", + "http://www.worldpressphoto.nl": "World Press Photo", + "http://arxiv.org/abs/1002.2284v2": "[1002.2284] Markets are efficient if and only if P = NP", + "http://www.talisaspire.com/": "Talis Aspire", + "http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm": "BBC News - 'Artificial life' breakthrough announced by scientists", + "http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_": "Peter Bloem sur Twitter : \"One of the messages from Ruffinelli et al 2020...\"", + "https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77": "Advances in few-shot learning: a guided tour \u2013 Towards Data Science", + "http://linuxgizmos.com/google-launches-android-automotive-consortium/": "Google launches Android automotive consortium\u00a0\u00b7\u00a0 LinuxGizmos.com", + "http://torrez.us/archives/2005/09/13/393": "Elias Torres \u00bb From XML to RDF: how semantic web technologies will change the design of \u2018omic\u2019 standards", + "http://www.newscientist.com/article/mg21128323.200-the-vast-asian-realm-of-the-lost-humans.html#.UpfAXaVkiww": "The vast Asian realm of the lost humans - life - 29 September 2011 - New Scientist", + "http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument": "Nothing to hide argument", + "http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet": "\u00ab\u00a0Plus vite que le vent\u00a0\u00bb\u00a0: cette r\u00e9volution technique qui fait d\u00e9coller les bateaux du Vend\u00e9e Globe", + "http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_": "Mapper Annotated Text Plugin Elastic", + "http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm": "Victor Hugo : A qui la faute ?", + "https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1": "Obstacles on the Path to AI", + "http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill": "[1906.07241] Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling", + "http://oldman.readthedocs.org/en/latest/": "OldMan: Python OLDM", + "http://www.prototypejs.org/": "Prototype JavaScript framework", + "http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11": "Facebook Is Quietly Ramping Up A Product That 'Kills Us,' Says Yahoo Source - Business Insider", + "http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html": "Re: AW: Content negotiation flamewar (was: Re: \"Hash URIs\" and content negotiation) from Richard Cyganiak on 2006-11-13 (semantic-web@w3.org from November 2006)", + "http://www.franz.com/": "Franz Inc. Web 3.0's Database", + "http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html": "Le nombre de pays tr\u00e8s pauvres a doubl\u00e9 en quarante ans", + "http://esw.w3.org/topic/RdfStoreBenchmarking": "RdfStoreBenchmarking - ESW Wiki", + "http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_": "Modeling AI on the Language of Brain Circuits and Architecture Wu Tsai Neurosciences Institute", + "http://www.bbc.co.uk/blogs/bbcinternet/2010/07/bbc_world_cup_2010_dynamic_sem.html": "BBC - BBC Internet Blog: BBC World Cup 2010 dynamic semantic publishing", + "http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/": "Gamers succeed where scientists fail ScienceBlog.com", + "http://www.enswers.net/": "Enswers", + "http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf": "\"Around Machine Learning in 90 Minutes\" slides", + "https://github.com/D2KLab/entity2rec": "D2KLab/entity2rec: entity2rec generates item recommendation from knowledge graphs", + "http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm": "BBC NEWS - Funds for greenhouse gas storage", + "http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/": "Spring Dawns on Artificial Intelligence AI3:::Adaptive Information", + "http://lib.store.yahoo.net/lib/paulgraham/bbnexcerpts.txt": "Lisp for Web-Based Applications", + "http://www.answers.com/topic/the-sea-bat-1": "The Sea Bat 1930: Movie and film review from Answers.com", + "http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner": "Guess Who's Coming to Dinner", + "http://rdrpostagger.sourceforge.net/": "RDRPOSTagger: A Rule-based Part-of-Speech and Morphological Tagging Toolkit", + "http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html": "Eclipse Juno, Maven, M2E and EGit Compatibility Problem and Solution Machine vs. Me", + "http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro": "Self-Talk: Obtain Knowledge From Text Generation Transformer Models by Eric Fillion Aug, 2021 Towards Data Science", + "http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking": "[1901.04085] Passage Re-ranking with BERT", + "https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/": "When Debate Is Futile: Bertrand Russell\u2019s Remarkable Response to a Fascist\u2019s Provocation \u2013 Brain Pickings", + "http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n": "Microsoft makes Google's BERT NLP model better", + "http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info": "High-depth African genomes inform human migration and health Nature (2020)", + "https://datascience.stackexchange.com/questions/10299/what-is-a-good-explanation-of-non-negative-matrix-factorization/15438": "nlp - What is a good explanation of Non Negative Matrix Factorization? - Data Science Stack Exchange", + "http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms": "Facebook is selling old wine (Internet.org) in a new bottle (Free Basics), users be aware - Times of India", + "http://itmanagement.earthweb.com/features/article.php/12297_3867751_3/Business-Intelligence-Software-Ten-Leaders.htm": "Business Intelligence Software: Ten Leaders", + "http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ": "Luis von Ahn: Gamer Defeats Spam IIP Digital", + "http://passeurdesciences.blog.lemonde.fr/2014/06/25/une-nouvelle-arme-contre-les-superbacteries/": "Une nouvelle arme contre les superbact\u00e9ries Passeur de sciences", + "http://www.snl-e.salk.edu/publications/Chichilnisky2001.pdf": "\"A simple white noise analysis of neuronal light responses\", E.J. Chichinisky 2000", + "http://www.configworks.com/mz/AI_EDAM_2004.pdf": "Configuration knowledge representations for Semantic Web applications", + "https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript": "A re-introduction to JavaScript (JS Tutorial)", + "http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy": "Large-scale network motif analysis using compression peterbloem.nl", + "http://discussions.apple.com/thread.jspa?messageID=7143270": "Apple - Support - Discussions - The Backlit Keyboard on MacBook Air its working upside down", + "http://www.offconvex.org/2018/06/17/textembeddings/": "Deep-learning-free Text and Sentence Embedding, Part 1 \u2013 Off the convex path", + "http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY": "French bank becomes first to allow Twitter users to send money - Tech News - Digital Spy", + "http://www.regular-expressions.info/": "Regular-Expressions.info - Regex Tutorial, Examples and Reference - Regexp Patterns", + "http://esw.w3.org/topic/Job_Mart": "Job Mart - ESW Wiki", + "http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining": "Microsoft Concept Graph: Mining Semantic Concepts for Short Text Understanding MIT Press Journals (2019)", + "http://www.newscientist.com/article.ns?id=dn8383&print=true": "Air guitarists\u2019 rock dreams come true - New Scientist", + "http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29": "La Blockchain et l\u2019\u00e9mergence des \u00ab\u00a0distributed consensus engines\u00a0\u00bb", + "http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/": "Why mobile web apps are slow Sealed Abstract", + "http://www-128.ibm.com/developerworks/web/library/wa-semweb/": "The future of the Web is Semantic", + "http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html": "Code rant: Heisenberg Developers", + "http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343": "Linked Data Trip Report - Part 1 (WWW2008)", + "http://www.canal.ird.fr": "Canal IRD", + "http://tools.ietf.org/html/rfc6596": "RFC 6596 - The Canonical Link Relation", + "http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax": "\"Mastering Ajax\" - developerWorks : Web development : Technical library view", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html": "A Aulnay, la victoire de M. Sarkozy v\u00e9cue comme une \"grosse claque\"", + "http://greggkellogg.net/2012/08/21/json-ld-and-mongodb": "JSON-LD and MongoDB Gregg Kellogg", + "https://www.virtualbox.org/": "Oracle VM VirtualBox", + "http://stackoverflow.com/questions/899102/how-do-i-store-javascript-functions-in-a-queue-for-them-to-be-executed-eventually": "How do I store javascript functions in a queue for them to be executed eventually - Stack Overflow", + "http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000": "Scientific American: Separation of Man and Ape Down to Gene Expression", + "http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp": "neubig/lowresource-nlp-bootcamp-2020: The website for the CMU Language Technologies Institute low resource NLP bootcamp 2020", + "https://twitter.com/honnibal/status/1063108730219315201": "Matthew Honnibal sur Twitter : \"Have been experimenting with an unsupervised pre-training technique for @spacy_io, similar to ULMFit/Elmo/BERT etc.", + "http://my.opera.com/tomheath/blog/show.dml/306694": "Applications Built on Jena - Tom Heath's Displacement Activities - by tomheath", + "https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript": "How can I pretty-print JSON using JavaScript? - Stack Overflow", + "http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/": "Case Study: Enhancement and Integration of Corporate Social Software Using the Semantic Web", + "https://blog.ouseful.info/2017/09/04/simple-text-analysis-using-python-identifying-named-entities-tagging-fuzzy-string-matching-and-topic-modelling/": "Simple Text Analysis Using Python \u2013 Identifying Named Entities, Tagging, Fuzzy String Matching and Topic Modelling \u2013 OUseful.Info, the blog\u2026", + "http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/": "When space weather attacks!", + "https://bitcoinfoundation.org/blog/?p=418": "Contrary to Mt. Gox\u2019s Statement, Bitcoin is not at fault - Bitcoin Foundation: Blog", + "http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib": "Isra\u00ebl d\u00e9voile un\u00a0manuscrit biblique vieux de\u00a0deux mille ans", + "http://2007.xtech.org/": "XTech 2007: The Ubiquitous Web: 15-18 May 2007, Paris, France", + "http://backfeed.cc/": "Backfeed Decentralizing the Present", + "http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/": "Tasting the Light: Device Lets the Blind \"See\" with Their Tongues - Scientific American", + "http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique": "RDFaiser votre blog, 2\u00e8me partie : la pratique Les petites cases", + "https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/": "Sent2Vec: An unsupervised approach towards learning sentence embeddings RARE Technologies", + "http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia": "Burger King\u2019s new ad forces Google Home to advertise the Whopper - The Verge", + "http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower": "China\u2019s children are its secret weapon in the global AI arms race WIRED UK", + "http://www.boabinteractive.com.au/": "BoaB interactive - Web design, graphic design, multimedia, Content Management System (CMS)", + "http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch": "L\u2019\u00ab\u00a0homme dragon\u00a0\u00bb, un cr\u00e2ne chinois miraculeusement pr\u00e9serv\u00e9", + "http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html": "Building Web Applications with Maven 2 Java.net", + "http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi": "[1911.11506] Word-Class Embeddings for Multiclass Text Classification", + "http://www.w3.org/2005/Talks/0517-boit-tbl/": "Berners-Lee - Sem Web Life Sciences - Bio-IT world", + "http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai": "[2101.00345] Modeling Fine-Grained Entity Types with Box Embeddings", + "http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges": "Commission d\u2019enqu\u00eate sur la gestion du Covid-19\u00a0: \u00ab\u00a0Les d\u00e9fauts observ\u00e9s lors de la premi\u00e8re vague perdurent\u00a0\u00bb", + "https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/": "A Friendly Introduction to Cross-Entropy Loss", + "http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html": "Le Niger face \u00e0 ses difficult\u00e9s, l\u2019islamisme rampant export\u00e9 de l\u2019\u00e9tranger, la menace d\u2019Al-Qa\u00efda, l\u2019AQMI, l\u2019utilisation du territoire comme voie de passage de la drogue et la question Touar\u00e8gue", + "http://www2.cnrs.fr/presse/communique/5253.htm": "Les ondes gravitationnelles font la premi\u00e8re lumi\u00e8re sur la fusion d'\u00e9toiles \u00e0 neutrons - Communiqu\u00e9s et dossiers de presse - CNRS", + "https://micvog.com/2013/09/08/storm-first-story-detection/": "How to spot first stories on Twitter using Storm Michael Vogiatzis", + "http://events.linkeddata.org/ldow2009/": "Linked Data on the Web (LDOW2009) - Workshop at WWW2009, Madrid, Spain", + "http://danakil.ethiopia.free.fr/index.htm": "Le Triangle Afar: entre Ethiopie et Djibouti", + "https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/": "How to manipulate Facebook and Twitter instead of letting them manipulate you - MIT Technology Review", + "http://dannyayers.com/2011/07/24/Sitemap-notes": "Sitemaps notes - Danny Ayers : Raw Blog", + "http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors": "Lecture 14 \u2013 Contextual Vectors Stanford CS224U: Natural Language Understanding Spring 2019", + "http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf": "IKS: introduction and overview", + "http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html": "Hello World: What your API would look like as a WebPage", + "http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_": "Marcel Fr\u00f6hlich sur Twitter : \"Biology / information theory /physics question: How do encodings emerge?\"", + "http://www.nytimes.com/2006/05/19/science/19tiny.html?ei=5088&en=5e2b672c1e3fc1a9&ex=1305691200&partner=rssnyt&emc=rss&pagewanted=print": "Debate on Little Human Fossil Enters Major Scientific Forum - New York Times", + "https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf": "Distributed Word Representations for Information Retrieval", + "http://jekyllrb.com/": "Jekyll \u2022 Simple, blog-aware, static sites", + "https://news.cnrs.fr/opinions/energy-hydrogens-great-promise": "Energy: Hydrogen's Great Promise CNRS News", + "http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan": "Knowledge Graphs in Natural Language Processing @ ACL 2020 by Michael Galkin", + "http://www.ajaxpatterns.org/": "Main Page - Ajax Patterns Ajax Patterns", + "http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing": "Three Billboards Outside Ebbing, Missouri", + "http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music": "F.Gall/V.Sanson: la groupie du pianiste - Vid\u00e9o Dailymotion", + "http://miniajax.com/": "MiniAjax.com / A showroom of nice looking simple downloadable DHTML and AJAX scripts", + "https://rajarshd.github.io/papers/acl2015.pdf": "Gaussian LDA for Topic Models with Word Embeddings (2015)", + "http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_": "A la recherche de la lionne de Nimroud", + "http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet": "Transformers from scratch Peter Bloem", + "https://www.aiforhumanity.fr/": "AI for humanity", + "http://www.offconvex.org/2018/06/25/textembeddings/": "Deep-learning-free Text and Sentence Embedding, Part 2 \u2013 Off the convex path", + "https://dessia.tech/": "DessIA Technologies", + "http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1": "24 Tons of Towering Stone, Held Together With Compression WIRED", + "http://www.flax.co.uk/blog/2013/12/11/principles-of-solr-application-design-part-1-of-2/": "Principles of Solr application design \u2013 part 1 of 2", + "http://sparcool.net/": "SPARCool", + "http://www.lejdd.fr/societe/sciences/a-la-rencontre-du-blob-cet-organisme-ni-animal-ni-vegetal-ni-champignon-3347009#xtor=CS1-4": "A la rencontre du blob, cet organisme ni animal, ni v\u00e9g\u00e9tal, ni champignon", + "http://www.ibm.com/developerworks/xml/library/x-semweb.html": "Expose LDAP directories to the Semantic Web with SquirrelRDF", + "http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s": "Comment nos anc\u00eatres poissons sont sortis des eaux pour atteindre le milieu terrestre", + "http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra": "A la d\u00e9couverte de la cryptographie quantique", + "http://www.openlinksw.com/weblog/oerling/?id=1504": "Virtuoso RDF: Getting Started for the Developer", + "http://muyueh.com/greenhoney/": "Green Honey: Different languages have different ways to describe color", + "http://onlinehub.stanford.edu/cs224": "CS224n: Natural Language Processing with Deep Learning", + "http://data.semanticweb.org/conference/eswc/2012/html": "9th Extended Semantic Web Conference Semantic Web Dog Food", + "https://inc.cnrs.fr/fr/cnrsinfo/un-reseau-precurseur-de-la-biochimie-du-vivant-identifie": "Un r\u00e9seau pr\u00e9curseur de la biochimie du vivant identifi\u00e9", + "http://www.dynamicorange.com/blog/archives/internet-technical/ldow2008.html": "I Really _Don't_ Know: LDOW2008", + "http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/": "RDF Net API", + "https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488": "Spell Checker using Word2vec Kaggle", + "http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron": "How Taiwan fended off the coronavirus WORLD News Group", + "https://arxiv.org/abs/1703.00993": "[1703.00993] A Comparative Study of Word Embeddings for Reading Comprehension", + "http://youtube.com/results?search_query=moussa+poussi&search=Search": "YouTube - Search results for \"Moussa Poussi\"", + "http://musicontology.com/": "Music Ontology Specification", + "https://arxiv.org/pdf/1701.00185.pdf": "[1701.00185] Self-Taught Convolutional Neural Networks for Short Text Clustering", + "http://www.youtube.com/watch?v=y7WrYSwMHKA": "Polar bear executed in Iceland", + "http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/": "Google: une mine de mp3", + "http://www.courrierinternational.com/article/2009/08/01/un-tour-du-monde-de-l-humour": "SUPPL\u00c9MENT RIRE \u2022 Un tour du monde de l\u2019humour Courrier international", + "http://www.newscientist.com/article/dn26272-cosmic-inflation-is-dead-long-live-cosmic-inflation.html?full=true#.VCR1jUtGuww": "Cosmic inflation is dead, long live cosmic inflation! - 25 September 2014 - New Scientist", + "http://www3.nationalgeographic.com/places/countries/country_niger.html": "Niger facts, Niger travel videos, flags, photos - National Geographic", + "http://www.mamiwata.com/mamiwata.html": "MAMI WATA IN THE AFRICAN-AMERICAN DIASPORA", + "http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube": "Do You Love Me? - Boston Dynamics video", + "http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/": "Apple\u2019s iOS 9 Links Recall the Bad Old Days of Internet Explorer WIRED", + "http://whoo.ps/2015/02/23/futures-of-text": "Futures of text Whoops by Jonathan Libov", + "http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large": "Peter Bloem sur Twitter : Large-scale network motif analysis using compression", + "http://www.mnot.net/cache_docs/": "Caching tutorial", + "https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html": "Dealing with Human Language Elasticsearch: The Definitive Guide [master]", + "http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html": "Comment Dublin a permis \u00e0 Google de r\u00e9duire ses imp\u00f4ts en Europe", + "http://www.graphdracula.net/": "Dracula Graph Library", + "http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit": "(((\u0644()(\u0644() 'yoav))))\ud83d\udc7e sur Twitter : \"my two cents on why NLP as a field is focusing on the ML-ish / algorithmic / leaderboard-ish aspects (incl., now, LLMs) and not on the underlying language phenomena: it is just so much easier, on so many levels.\"", + "https://www.kickstarter.com/projects/1755283828/open-source-edition-of-livecode": "Next Generation LiveCode (Open Source) by RunRev Ltd \u2014 Kickstarter", + "http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html": "Qui a peur des v\u00e9rit\u00e9s scientifiques ?", + "http://www.semanlink.net/doc/2021/07/l_insulte_film_": "L'Insulte (film)", + "http://www.neo4j.org/": "Neo4j: The World's Leading Graph Database", + "http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_": "Chaos \u00e0 Hongkong apr\u00e8s la mise \u00e0 sac du Parlement", + "http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e": "Re: HELP about jena fuseki and NodeJS", + "http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search": "site:ety.com - Google Search", + "http://answers.semanticweb.com/questions/26220/sparql-query-to-return-all-triples-recursively-that-make-up-an-rdfsclass-definition": "Concise Bounded Descriptions with Two CONSTRUCT Queries", + "http://aksw.org/Projects/OntoWiki.html": "OntoWiki \u2014 Agile Knowledge Management and Semantic Web (AKSW)", + "https://research.fb.com/facebook-research-at-emnlp/": "Facebook Research at EMNLP \u2013 Facebook Research", + "http://jquery.org/": "jQuery Project", + "http://eunis.eea.europa.eu/": "EUNIS biodiversity database", + "http://selberg.org/2008/04/23/themes-from-beijing/": "WWW 2008 keynotes - Erik Selberg \u00bb Blog Archive \u00bb Themes from Beijing", + "http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the": "[1410.5859] Towards a Model Theory for Distributed Representations", + "http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html": "RESTful SPARQL queries of RDFa - bobdc.blog", + "https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834": "Poor Law Amendment Act 1834 - Wikipedia", + "http://webid.myxwiki.org/xwiki": "Web ID XWiki", + "http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/": "La prochaine r\u00e9volution ? Faites-la vous m\u00eame ! InternetActu", + "http://www.semanlink.net/doc/2021/07/lombok_into_eclipse": "Lombok into eclipse", + "http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html": "Bernard Stiegler\u00a0: \u00ab\u00a0Je propose la mise en place d\u2019un revenu contributif, qui favorise l\u2019engagement dans des projets\u00a0\u00bb", + "http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python": "Document Similarity Analysis Using ElasticSearch and Python - Data Science Central", + "http://www.jpl.nasa.gov/news/news.php?release=2013-120": "NASA Team Investigates Complex Chemistry at Titan - NASA Jet Propulsion Laboratory", + "http://prefuse.sourceforge.net/": "prefuse: an interactive visualization toolkit", + "https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings": "A Simple Introduction to Word Embeddings", + "http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/": "Un quarteron d\u2019eurod\u00e9put\u00e9s va brader nos vies priv\u00e9es BUG BROTHER", + "https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58": "Understanding the Persian Empire \u2013 Cher Yi \u2013 Medium", + "http://danbri.org/words/2008/07/04/361": "danbri\u2019s foaf stories \u00bb Referata, a Semantic Media Wiki hosting site", + "http://www.omnytex.com/articles/xhrstruts/": "Ajax using XMLHttpRequest and Struts", + "http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea": "[2002.10640] Differentiable Reasoning over a Virtual Knowledge Base", + "http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube": "Shaping Linked Data apps Ruben Verborgh", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html": "Apr\u00e8s 1789, 2009 ?", + "https://github.com/zalandoresearch/flair": "zalandoresearch/flair: A very simple framework for state-of-the-art NLP", + "http://jena.sourceforge.net/DB/index.html": "Jena Relational Database backend", + "http://emnlp2014.org/tutorials/8_notes.pdf": "Embeddings methods for NLP (2014) (tutorial - Jason Weston - Facebook Research)", + "http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion": "Install and configure Apache, MySQL, PHP on OSX 10.8 Mountain Lion", + "http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498": "BP pr\u00eat \u00e0 lancer un forage tr\u00e8s p\u00e9rilleux en Arctique", + "http://www.scoop.it/": "Build engaged audiences through publishing by curation. Scoop.it", + "http://scikit-learn.org/stable/modules/pipeline.html": "Pipeline and FeatureUnion: combining estimators \u2014 scikit-learn documentation", + "http://www.lemonde.fr/afrique/article/2012/07/01/la-destruction-des-remparts-protecteurs-de-tombouctou_1727539_3212.html": "Les mausol\u00e9es, \"remparts protecteurs\" de Tombouctou, d\u00e9truits par Ansar Eddine", + "http://dig.csail.mit.edu/breadcrumbs/node/215": "Giant Global Graph Decentralized Information Group (DIG) Breadcrumbs", + "https://pipelines.puppet.com/docs/tutorials/build-and-deploy-python-with-docker/": "How to Build and Deploy a Python Application on Docker Distelli", + "https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/": "A Message From the Future With Alexandria Ocasio-Cortez", + "http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_": "NSMNTX - Neo4j RDF & Semantics toolkit", + "http://www.mollio.org/": "Mollio CSS/HTML Templates", + "http://www.pbs.org/mediashift/2013/04/why-facebook-will-have-trouble-achieving-search-success104": "Why Facebook\u2019s Graph Search Could Be Doomed Mediashift PBS", + "http://ksl.stanford.edu/": "Stanford Knowledge Systems, AI Laboratory", + "http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_": "Les brexiters ont-ils eu ce qu'ils voulaient ?", + "http://www.npr.org/templates/story/story.php?storyId=96564952": "Mining For Diamonds In The Canadian Rough", + "https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html": "Reader Mode in Safari - The New York Times", + "http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html": "WebID Frustration from Hugh Glaser on 2013-08-06 (public-lod@w3.org from August 2013)", + "https://workflowy.com/": "WorkFlowy - Organize your brain.", + "https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/": "Putting data in a volume in a Dockerfile", + "http://www.afrik.com/article4010.html": "Les couilles de l'\u00e9l\u00e9phant", + "http://www.talis.com/tdn/platform/user/bigfoot/tour": "Bigfoot - An initial tour Talis Developer Network", + "https://arxiv.org/abs/1605.07427": "[1605.07427] Hierarchical Memory Networks", + "https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452": "How sampling works in Word2vec? Can someone please make me understand NCE and negative sampling? - Cross Validated", + "http://www.liberation.fr/actualite/societe/272511.FR.php": "\u00abMa grand-m\u00e8re a sorti sa carte avec la mention \"juif\"\u00bb", + "http://steveharris.tumblr.com/post/4590579712/construct-json": "Misc Thoughts, CONSTRUCT JSON", + "http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html": "schema:domain / Re: Official OWL version outdated from Bernard Vatant on 2013-05-13 (public-vocabs@w3.org from May 2013)", + "http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/": "Modifier le vivant pour sauver la plan\u00e8te ? InternetActu", + "https://dl.acm.org/citation.cfm?id=3186000": "HighLife: Higher-arity Fact Harvesting", + "http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)": "Creating, Deploying and Exploiting Linked Data", + "https://medium.com/swlh/chatbots-were-the-next-big-thing-what-happened-5fc49dd6fa61": "Chatbots were the next big thing: what happened? \u2013 The Startup \u2013 Medium", + "https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU": "Self-Supervised Learning, Yann LeCun, Facebook AI Research Dartmouth News", + "http://datajournalism.stanford.edu/": "Journalism in the Age of Data: A Video Report on Data Visualization by Geoff McGhee", + "http://dev.data2000.no/sgvizler/": "Sgvizler", + "http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050721.html": "NOVA scienceNOW Dispatches: What We're Thinking About: What Is Life? PBS", + "https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm": "Modifications for the Cluster Content Discovery and the Cluster Label Induction Phases of the Lingo Algorithm (2014)", + "http://redlink.co/adding-semantic-search-to-apache-solr/": "Adding Semantic Search to Apache Solr", + "http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock": "Python speech to text with PocketSphinx \u2013 sophie's blog", + "https://ajax.dev.java.net/": "ajax: Project jMaki", + "http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html": "Stanford bioengineers create circuit board modeled on the human brain Stanford News Release", + "http://vuejs.org/": "vue.js", + "http://www.w3.org/DesignIssues/Abstractions.html": "Abstractions in Web architecture - Design Issues", + "https://selfdrivingcars.mit.edu/": "MIT 6.S094: Deep Learning for Self-Driving Cars", + "http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html": "Obscured Clarity: Using Maven Offline", + "http://www.joelonsoftware.com/articles/Wrong.html": "Making Wrong Code Look Wrong - Joel on Software", + "http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys": "A Joint Model for Entity Analysis: Coreference, Typing, and Linking (Greg Durrett, Dan Klein 2014)", + "http://www.bbc.com/news/health-34857015": "Antibiotic resistance: World on cusp of 'post-antibiotic era' - BBC News", + "http://www.geocities.com/anpipniger/index.html": "AGENCE NIGERIENNE POUR LA PROMOTION DE L'IRRIGATION PRIVEE Agence Nig\u00e9rienne pour la Promotion de l'Irrigation Priv\u00e9e", + "http://www.w3schools.com/cssref/css_selectors.asp": "CSS Selector Reference", + "http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config": "SolrTomcat - URI Charset Config", + "http://tomcat.apache.org/tomcat-5.0-doc/config/context.html": "Server Configuration Reference - The Context Container", + "http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755": "How To Tell Search Engines What \"Entities\" Are On Your Web Pages", + "http://www.geoportail.fr/": "G\u00e9oPortail", + "http://www.lemonde.fr/idees/article/2017/10/05/monsanto-papers-des-derives-inadmissibles_5196563_3232.html": "\u00ab\u00a0Monsanto Papers\u00a0\u00bb\u00a0: des d\u00e9rives inadmissibles", + "http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia": "[2002.11402] Detecting Potential Topics In News Using BERT, CRF and Wikipedia", + "http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant": "Paradoxe du singe savant - Wikip\u00e9dia", + "http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/": "Recurrent neural networks and LSTM tutorial in Python and TensorFlow - Adventures in Machine Learning", + "http://en.wikipedia.org/wiki/Pre-Siberian_American_Aborigines#Lagoa_Santa": "Pre-Siberian American Aborigines - Wikipedia, the free encyclopedia", + "http://www.redmelon.net/tstme/4corners/": "CSS Rounded Corners", + "http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c": "Arundhati Roy\u00a0: \u00ab\u00a0En Inde, le confinement le plus gigantesque et le plus punitif de la plan\u00e8te\u00a0\u00bb", + "http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco": "Interpretable Named entity recognition with keras and LIME \u2013 Depends on the definition", + "https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare": "In Bed with Madonna", + "http://poetes.com/hugo/nuitdu4.htm": "Souvenir de la nuit du 4", + "https://nlp.stanford.edu/software/tmt/tmt-0.2/": "Stanford Topic Modeling Toolbox", + "http://karpathy.github.io/2016/09/07/phd/": "A Survival Guide to a PhD", + "http://www.semantic-web.at/1.36.resource.271.adrian-paschke-x22-corporate-semantic-web-also-addresses-the-pragmatic-aspects-of-using-se.htm": "Adrian Paschke: \"Corporate Semantic Web also addresses the pragmatic aspects of using Semantic Web technologies.\"", + "http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes": "Nouvelles en trois lignes \u2014 Wikip\u00e9dia", + "http://www.mmds.org/": "Mining of Massive Datasets", + "https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d": "ReactJS vs Angular5 vs Vue.js \u2014 What to choose in 2018?", + "http://docs.info.apple.com/article.html?artnum=106290": "Enabling and using the \"root\" user in Mac OS X", + "http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/": "About JSON-LD and Content-Negotiation Alexandre Passant", + "http://readwrite.com/2008/09/02/twitter_novels_not_big_success_stories#awesm=~oFt2tEn7VYxTp4": "Twitter Novels: Not Big Success Stories Yet \u2013 ReadWrite", + "https://www.youtube.com/watch?v=qThJEKhNgvY": "Alceu Valen\u00e7a - Morena Tropicana \"Ao Vivo\" HD - YouTube", + "http://www.w3.org/DesignIssues/Diff": "Delta: an ontology for the distribution of differences between RDF graphs", + "http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l": "Introducing Neural Structured Learning in TensorFlow", + "http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java": "uniqueidentifier - How do I create a unique ID in Java? - Stack Overflow", + "http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks": "An Introduction to Deep Learning (in Java): From Perceptrons to Deep Networks Toptal", + "http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf": "\"Semantic Web Technologies in Technical Automotive\" - CEUR-WS.org/Vol-258 - OWL: Experiences and Directions 2007", + "http://lists.w3.org/Archives/Public/public-awwsw/2011Jan/0021.html": "Re: [Fwd: Reversing HTTP Range 14 and SemWeb Cool URIs decision]", + "http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/": "Brexit: les Eurod\u00e9put\u00e9s r\u00e9agissent Philippe Lamberts", + "http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance": "Exp\u00e9dition Endurance - Wikip\u00e9dia", + "https://jakearchibald.com/2014/browser-cache-vary-broken/": "The browser cache is Vary broken - JakeArchibald.com", + "http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html": "How Best Buy Is Using The Semantic Web - NYTimes.com", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190": "[Linking-open-data] ann: Semantic Web Pipes", + "https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf": "Cost-Sensitive Boosting for Classification of Imbalanced Data (2007)", + "http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro": "Three years after the W3C approved a DRM standard, it's no longer possible to make a functional indie browser / Boing Boing", + "http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg": "Disambiguating KBpedia Knowledge Graph Concepts", + "http://www.macosxhints.com/": "macosxhints.com - OS X tips and tricks!", + "http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html": "Les manuscrits de Tombouctou, victimes des islamistes et de la corruption", + "http://programminghistorian.org/lessons/topic-modeling-and-mallet": "Getting Started with Topic Modeling and MALLET", + "https://www.tensorflow.org/install/install_mac": "Installing TensorFlow on Mac OS X \u00a0\u00a0 TensorFlow", + "http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context": "[2010.01057] LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention", + "http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri": "[2004.14958] A Call for More Rigor in Unsupervised Cross-lingual Learning", + "http://fr.wikipedia.org/wiki/Plasmide": "Plasmide - Wikip\u00e9dia", + "http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/": "Google Author Rich Snippets Internet Alchemy", + "http://colah.github.io/posts/2015-09-Visual-Information/": "Visual Information Theory -- colah's blog", + "http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c": "La cit\u00e9 oubli\u00e9e d\u2019Ulug D\u00e9p\u00e9 CNRS Le journal", + "http://www.semanlink.net/doc/2020/03/gilda": "Gilda", + "http://blog.schema.org/2015/05/schema.html": "Schema.org 2.0", + "http://www.offconvex.org/2018/09/18/alacarte/": "Simple and efficient semantic embeddings for rare words, n-grams, and language features \u2013 Off the convex path", + "http://rdfa.info/": "RDFa", + "http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t": "Differentiable Reasoning over Text \u2013 Machine Learning Blog ML@CMU Carnegie Mellon University", + "http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2": "\"Why we fear Google\", Mathias D\u00f6pfner\u2019s open letter to Eric Schmidt", + "http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced": "Machine Learning for Unbalanced Datasets using Neural Networks", + "http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan": "Knowledge graphs in Natural Language Processing @ ACL 2019 - Michael Galkin", + "https://en.wikipedia.org/wiki/Sima_Humboldt": "Sima Humboldt - Wikipedia", + "http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class": "20% Accuracy Bump in Text Classification with ME-ULMFiT", + "http://www.nltk.org/_modules/nltk/tag/stanford.html": "Source code for nltk.tag.stanford \u2014 NLTK documentation", + "http://sigmajs.org/": "Sigma js", + "http://internetactu.blog.lemonde.fr/2016/02/20/le-vertigineux-avenir-des-echanges-executables/": "Le vertigineux avenir des \u00e9changes ex\u00e9cutables InternetActu", + "http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html": "Fallback with SPARQL - bobdc.blog", + "http://stevelosh.com/blog/2018/08/a-road-to-common-lisp/": "A Road to Common Lisp / Steve Losh", + "http://www.semanticoverflow.com/": "Semantic Overflow", + "https://arxiv.org/abs/1806.04470": "[1806.04470] Design Challenges and Misconceptions in Neural Sequence Labeling", + "http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html": "Salmon Run: Implementing the RAKE Algorithm with NLTK", + "http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm": "BBC NEWS - Neanderthal yields nuclear DNA", + "https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy": "Google\u2019s Selfish Ledger is an unsettling vision of Silicon Valley social engineering - The Verge", + "http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg": "Un migrant doit \u00eatre trait\u00e9 comme un d\u00e9linquant. NON !", + "https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4": "Why You Don\u2019t Need Data Scientists \u2013 Kurt Cagle \u2013 Medium", + "http://www.nigerime.com/": "Nigerime: Le Portail du Rap et du Hip Hop Nigerien", + "http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html": "Neanderthal virus DNA spotted hiding in modern humans - life - 18 November 2013 - New Scientist", + "http://siren.solutions/siren/overview/": "SIREn Solutions Solr & Elasticsearch Consultancy \u2013 Overview", + "http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html": "Alternative to 303 response: Description-ID: header From: Tim Berners-Lee", + "https://blog.novatec-gmbh.de/the-problems-with-swagger/": "The problems with Swagger - NovaTec Blog", + "http://blog.octo.com/introduction-a-la-technologie-blockchain/": "Introduction \u00e0 la technologie Blockchain OCTO talks !", + "http://stackoverflow.com/questions/18496940/how-to-deal-with-persistent-storage-e-g-databases-in-docker": "How to deal with persistent storage (e.g. databases) in docker - Stack Overflow", + "http://developer.yahoo.net/ypatterns/": "Yahoo! Design Pattern Library", + "https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview": "visuaLOD / visualod.bitbucket.org \u2014 Bitbucket", + "http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/": "Bitcoin\u2019s Blockchain Can Revolutionize Supply Chain Transparency Spend Matters", + "http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf": "La moiti\u00e9 de la population mondiale vit en ville", + "http://www.semanlink.net/doc/2020/10/representation_learning_of_know": "Representation learning of knowledge graphs with entity descriptions (AAAI 2016)", + "http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233": "Volkswagen: Das Auto Company is Das Semantic Web Company! - semanticweb.com", + "http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_": "Elasticsearch RSS feed indexer with Spacy entity extraction", + "http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started": "Octave Programming Tutorial/Getting started - Wikibooks, open books for an open world", + "http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html": "Les d\u00e9boires de Knight Capital, sp\u00e9cialiste du trading haute fr\u00e9quence", + "https://discuss.elastic.co/t/loading-json-ld-into-es/19970": "Loading JSON-LD into ES - Elasticsearch - Discuss the Elastic Stack", + "http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all": "Video Games - The Life of the Chinese Gold Farmer - New York Times", + "http://dl.free.fr/": "Free FTP", + "http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_": "Efficient compression in color naming and its evolution", + "https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima": "Letters from Iwo Jima", + "http://semanticweb.com/report-from-day-5-at-iswc_b24326": "Report from Day 5 at ISWC - semanticweb.com", + "http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go": "'Apple is eating our lunch': Google employees admit in lawsuit that the company made it nearly impossible for users to keep their location private", + "http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering": "Facebook Natural Language Engineering", + "http://world.honda.com/ASIMO/history/": "Honda Worldwide ASIMO History", + "http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging": "Davlan (David Adelani) @Huggingface", + "http://en.wikipedia.org/wiki/Tulip_mania": "Tulip mania - Wikipedia", + "https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html": "Lil Miquela: The Digital Avatar Instagram Influencer", + "http://spraakdata.gu.se/svedd/papers/courses/masterThes.pdf": "Acronym Recognition - Recognizing acronyms in Swedish texts", + "http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html": "Ant for eclipse - access eclipse configurations from within ant", + "http://www.realgoodfood.com/ciao_vito.html": "Real Good Food Ciao Vito", + "http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html": "Querying my own MP3, image, and other file metadata with SPARQL - bobdc.blog", + "https://en.wikipedia.org/wiki/All_About_Eve": "All About Eve", + "http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/": "JSON-LD, the Google Knowledge Graph and schema.org SEO", + "http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html": "A Brain-to-Brain Interface for Real-Time Sharing of Sensorimotor Information : Scientific Reports : Nature Publishing Group", + "http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo": "Le langage, une \u00e9mergence explosive", + "http://www.offconvex.org/": "Off the convex path", + "http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html": "I, Cringely . The Pulpit . Amish Paradise PBS", + "http://duckduckgo.com/": "DuckDuckGo", + "http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text": "Bringing Extinct Species Back to Life - Pictures, More From National Geographic Magazine", + "http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html": "Graph Kernels for RDF data Semantic Web Dog Food", + "https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/": "Browser + SPARQL Server = Wiki Danny Ayers' Other Alternate Weblog", + "http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol": "[1909.01380] The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives", + "http://www.semanlink.net/2013/07/coldrdfa/": "RDFa / schema.org test", + "http://uk.co.rplug.renault.com/product/gen?embed=true": "Renault range with RDFa markup", + "http://ole-martin.net/hbase-tutorial-for-beginners/": "HBase tutorial for beginners - a blog by Ole-Martin M\u00f8rk", + "http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/": "RDFa, Microdata, and RDF (Two Notes Published by the W3C HTML Data Task Force) Semantic Web Activity News", + "http://www.pnas.org/content/early/2016/04/13/1520084113": "What insects can tell us about the origins of consciousness", + "https://arxiv.org/abs/1506.02142": "[1506.02142] Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning", + "https://sgugger.github.io/": "Another data science student's blog (Sylvain Gugger)", + "http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why": "What Is Best For Graph Rendering: Sigma.js Or D3.js? Why? - Quora", + "http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela": "Translating Embeddings for Modeling Multi-relational Data (2013)", + "http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr": "hardmaru sur Twitter : Legendre Memory Units", + "http://ohshitgit.com/": "Oh, shit, git!", + "http://wiki.apache.org/tomcat/FAQ/CharacterEncoding": "FAQ/CharacterEncoding - Tomcat Wiki", + "http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/": "Artificial General Intelligence that plays Atari video games: How did DeepMind do it? Robohub", + "http://esw.w3.org/topic/SkosDev": "SkosDev - ESW Wiki", + "http://smethur.st/posts/176135843": "Yet another post about Google (not really) removing the URL bar from Chrome Smethurst", + "http://www.flickr.com/photos/hyperfp": "Flickr: Photos from hyperfp", + "http://chatlogs.planetrdf.com/swig/2007-07-04.html#T13-34-27": "Semantic Web Interest Group IRC Chat Logs for 2007-07-04", + "http://www.youtube.com/watch?v=blzl4JmrjuE": "Finale lutte traditionnelle Niger 2012 - YouTube", + "http://blog.bitflux.ch/wiki/LiveSearch": "LiveSearch - Bitflux Blog Wiki", + "http://www.slf4j.org/": "SLF4J: Simple Logging Facade for Java", + "http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html": "L\u2019incendie au mus\u00e9e d\u2019Abomey\u00a0relance le d\u00e9bat sur la conservation des tr\u00e9sors du B\u00e9nin", + "https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e": "Learning Meaning in Natural Language Processing - The Semantics Mega-Thread", + "http://en.wikipedia.org/wiki/Myxococcus_xanthus": "Myxococcus xanthus - Wikipedia, the free encyclopedia", + "http://tech.groups.yahoo.com/group/jena-dev/message/35867": "jena-dev : Message: Joseki requirements", + "http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge": "S\u00e9curit\u00e9 nucl\u00e9aire : le grand mensonge ARTE Cinema", + "http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/": "The Semantic Puzzle DBpedia, UMBEL & the Future Web\u2019s Ecology - interview with Mike Bergman & S\u00f6ren Auer", + "https://www.pnas.org/content/early/2019/03/27/1817407116": "A seismically induced onshore surge deposit at the KPg boundary, North Dakota PNAS", + "https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2": "The memories around us \u2013 Fran\u00e7ois Chollet \u2013 Medium", + "http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref": "Definition of \"URI References\" in RDF (Concepts and Abstract Syntax)", + "http://www.ihes.fr/~lafforgue/dem/courriel.html": "Pourquoi j'ai d\u00e9missionn\u00e9 du Haut Conseil de l'Education - Laurent Lafforgue", + "http://www.futura-sciences.com/magazines/terre/infos/actu/d/paleontologie-dinosaures-ont-disparu-mammiferes-nen-menaient-pas-large-63250/": "Quand les dinosaures ont disparu, les mammif\u00e8res n'en menaient pas large", + "http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf": "Building a Web of Needs, Florian Kleedorfer", + "http://dig.csail.mit.edu/breadcrumbs/node/194": "Linked Data at WWW2007: GRDDL, SPARQL, and Wikipedia, oh my!", + "http://xmlarmyknife.org/blog/archives/000285.html": "XMLArmyKnife - Experimenting with EmbeddedRDF and GRDDL Support", + "http://www.mkbergman.com/?p=417": "99 Wikipedia Sources Aiding the Semantic Web \u00bb AI3:::Adaptive Information", + "http://riese.joanneum.at/": "RDFizing and Interlinking the EuroStat Data Set Effort - riese", + "http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html": "Inside the NSA's War on Internet Security - SPIEGEL ONLINE", + "https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html": "Framing explanation and framing return values from Dave Longley on 2011-08-24 (public-linked-json@w3.org from August 2011)", + "http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p": "[2006.15020] Pre-training via Paraphrasing", + "https://www.techdirt.com/blog/?tag=monkey+selfie": "Monkey Selfie stories at Techdirt.", + "http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance": "NSA surveillance: how to stay secure Bruce Schneier theguardian.com", + "http://www.insu.cnrs.fr/node/5745": "L'Europe est \u00e0 nouveau \u00e0 la conqu\u00eate de Mars", + "http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/": "Case Study: Semantic Tags", + "https://deepmind.com/blog/alphago-zero-learning-scratch/": "AlphaGo Zero: Learning from scratch DeepMind", + "https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146": "Writing code for Natural language processing Research", + "http://www.w3.org/DesignIssues/Principles.html": "-- Axioms of Web architecture", + "http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/": "Dans la Silicon Valley, \u00ab\u00a0l\u2019Oracle\u00a0\u00bb fran\u00e7ais de la complexit\u00e9 tech and berries", + "https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter": "World War Zero brought down mystery civilisation of 'sea people' New Scientist", + "http://www-128.ibm.com/developerworks/web/library/wa-ajaxintro3/?ca=dgr-lnxw01MasterAJAX3": "Mastering Ajax, Part 3: Advanced requests and responses in Ajax", + "https://www.quantamagazine.org/20170404-quantum-physicists-attack-the-riemann-hypothesis/": "Quantum Physicists Attack the Riemann Hypothesis Quanta Magazine", + "http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept": "Describe a topic of a concept - Semantic Overflow", + "http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_": "Aran Komatsuzaki sur Twitter : \"Big Bird: Transformers for Longer Sequences...\"", + "http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to": "The men who starved to death to save the world's seeds - Russia Beyond", + "http://news.bbc.co.uk/1/hi/sci/tech/4166076.stm": "BBC NEWS Ocean bug has 'smallest genome'", + "https://fr.wikipedia.org/wiki/Good_Morning_England": "Good Morning England (The boat that Rocked)", + "http://beckr.org/marbles": "Marbles", + "http://doc.carrot2.org/": "Carrot2 manual", + "http://www.youtube.com/watch?v=bIeCF0LjVDw": "Elba Ramalho e Claudia Ohana em \"O Meu Amor\"", + "http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version": "maven project version inheritance - do I have to specify the parent version? - Stack Overflow", + "https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator": "Testing Your Schemas - Schemas in Gmail \u2014 Google Developers", + "http://www.semanlink.net/doc/2021/06/masakhane": "Masakhane", + "https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/": "ElasticSearch vs Solr", + "http://fr.openoffice.org/docs/MacOSX_Install_fr_HowTo_OOo2_V1.4.pdf": "Installation d'OpenOffice.org 2 (version X11) Mac OS X PowerPC et Mac Intel", + "http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1": "The Charlie Hebdo Massacre in Paris - NYTimes.com", + "http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf": "Serialising Java Objects to RDF with Jersey (The Sun BabelFish Blog)", + "http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur": "Vaccins\u00a0: \u00ab\u00a0La France doit d\u2019urgence donner \u00e0 sa recherche les moyens de ses ambitions\u00a0\u00bb", + "https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/": "Dependency-Based Word Embeddings Omer Levy", + "http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_": "Le massif de Lovo, un tr\u00e9sor d'art rupestre \u00e0 pr\u00e9server CNRS Le journal", + "http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar": "La \"V\u00e9nus\" de Tursac Mus\u00e9e arch\u00e9ologie nationale", + "http://www.semanlink.net/doc/2021/06/librairy": "librAIry", + "http://mactips.info/blog/?p=1867": "Prepare Mac OS X for WordPress", + "http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf": "paggr -Smart Data Portals", + "https://dzone.com/articles/functional-programming-java-8": "Functional Programming with Java 8 Functions - DZone Java", + "http://www.laserbox.fr/ere-du-laser": "L\u2019\u00e8re du laser Laserbox", + "http://linkeddatabook.com/editions/1.0/": "Linked Data: Evolving the Web into a Global Data Space", + "https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/": "Introduction to Amazon SageMaker Object2Vec\u00a0 AWS Machine Learning Blog", + "http://news.bbc.co.uk/2/hi/business/10124807.stm": "BBC News - Shares hit by German short-selling ban", + "http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr": "[2010.02194] Self-training Improves Pre-training for Natural Language Understanding", + "http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_": "Make Delegation Work in Python \u00b7 fast.ai", + "http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_": "Le philosophe Bernard Stiegler est mort \u00e0 l\u2019\u00e2ge de 68\u00a0ans", + "http://aarkangel.wordpress.com/2007/04/15/je-suis-un-chef-noir-%E2%80%93-heart-of-darkness/": "Je suis un chef noir \u2013 Heart of Darkness", + "http://confluence.atlassian.com/display/DOC/Installing+the+Confluence+EAR-WAR+edition": "Installing the Confluence EAR-WAR edition - Confluence", + "http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/": "Nucl\u00e9aire : au coeur du r\u00e9acteur \u2013 liveblog Greenpeace France", + "http://www.semanlink.net/doc/2021/04/event_camera": "Event camera", + "https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf": "pandas cheat sheet", + "http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un": "Site arch\u00e9ologique de Bura - UNESCO World Heritage Centre", + "http://rdfa.info/2010/05/27/newsweek-using-rdfa/": "Newsweek using RDFa", + "http://esw.w3.org/topic/SparqlCalendarDemo": "SparqlCalendarDemo", + "https://tools.ietf.org/html/draft-kelly-json-hal-06": "JSON Hypertext Application Language", + "http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html": "Guerre aux paysans, par Herv\u00e9 Kempf", + "http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext": "A New Look at the Semantic Web September 2016 Communications of the ACM", + "http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture": "Anthologie de la litt\u00e9rature orale songhay-zarma par Mme Fatimata Mounka\u00efla (Ed. L\u2019Harmattan 2008) - Le Republicain-Niger", + "http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA": "Heredia - Les Troph\u00e9es - La Trebbia : L'aube d'un jour sinistre a blanchi les hauteur Heredia - Les Troph\u00e9es - La Trebbia : L'aube d'un jour sinistre a blanchi les hauteurs", + "http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/": "Comment un arbre m\u00e8ne des fourmis \u00e0 l\u2019esclavage Passeur de sciences", + "http://bolossdesbelleslettres.tumblr.com/": "Les boloss des Belles Lettres", + "http://species.wikipedia.org/wiki/Main_Page": "Main Page - Wikispecies", + "http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra": "facebookresearch/faiss: A library for efficient similarity search and clustering of dense vectors.", + "https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank": "Ranking Measures and Loss Functions in Learning to Rank (2009)", + "https://www.coursera.org/course/usefulgenetics": "Useful Genetics Coursera", + "http://semanticweb.com/semtechbiz-berlin-day-2_b26545": "#SemTechBiz Berlin \u2013 Day 2 - semanticweb.com", + "http://lifehacker.com/5711409/how-to-search-for-hidden-packaged-and-system-files-in-os-x": "How to Search for Hidden, Packaged, and System Files in OS X", + "http://www-128.ibm.com/developerworks/java/library/j-threads3.html": "Threading lightly, Part 3: Sometimes it's best not to share", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm": "BBC NEWS - Deep sea weapon against superbug", + "http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx": "InformationWeek \u2013 Software > How is Ford using Google\u2019s prediction engine to build self learning cars", + "http://www.openlinksw.com/weblog/oerling/?id=1515": "Faceted Search: Unlimited Data in Interactive Time", + "http://whc.unesco.org/en/tentativelists/5041/": "Palais du Zarmakoye de Dosso - UNESCO World Heritage Centre", + "http://db.uwaterloo.ca/LDQTut2013/": "Tutorial: Linked Data Query Processing", + "https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost": "What is the difference between gradient boosting and adaboost? - Quora", + "http://swoogle.umbc.edu/about.php": "", + "http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d": "Guerre des drones : la menace des essaims", + "https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl": "The resolution of the Bitcoin experiment \u2014 Medium", + "http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/": "ACL 2018 Highlights: Understanding Representations and Evaluation in More Challenging Settings - AYLIEN", + "http://www.w3.org/2007/02/turtle/primer/": "RDF Primer \u2014 Turtle version", + "http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e": "Soja br\u00e9silien\u00a0: la m\u00e9prise d\u2019Elisabeth Borne sur les OGM qui \u00ab\u00a0ne sont pas autoris\u00e9s en Europe\u00a0\u00bb", + "http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php": "GP - De nouvelles informations sur Philae et Rosetta - CNES", + "http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_": "Jeux olympiques\u00a0: les d\u00e9fis de Paris 2024", + "http://histography.io/": "Histography - Timeline of History", + "http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store": "First BBC microsite powered by a triple-store - DBTune blog", + "http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html": "Attentats\u00a0: le beau texte du \u00ab\u00a0New York Times\u00a0\u00bb \u00e9tait en fait un\u2026 commentaire", + "http://www.pbs.org/wgbh/nova/neutrino/missing.html": "NOVA The Ghost Particle Case of the Missing Particles PBS", + "http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate": "Richard Stallman : \"L'utilisateur doit contr\u00f4ler le programme, pas l'inverse\"", + "http://www.bbc.co.uk/news/world-africa-18657463": "BBC News - Timbuktu shrines damaged by Mali Ansar Dine Islamists", + "http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0": "The Most Important Thing, and It\u2019s Almost a Secret - The New York Times", + "http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_": "The Independent sur Twitter : \"Alexandria Ocasio-Cortez grills former Exxon scientists on oil giant's climate change denial\"", + "https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148": "Un ticket pour le Soleil CNRS Le journal", + "http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a": "La sonde japonaise Hayabusa-2 a rapport\u00e9 des \u00e9chantillons d\u2019ast\u00e9ro\u00efde sur Terre", + "http://drupal.org/node/560326": "RDF export/import should be W3C standard SKOS - here is the patch drupal.org", + "http://danakil.ethiopia.free.fr/dallol.htm": "Le Dallol (photos)", + "http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma": "La d\u00e9couverte d\u2019empreintes humaines vieilles de 23\u00a0000\u00a0ans\u00a0r\u00e9\u00e9crit\u00a0l\u2019histoire du peuplement de l\u2019Am\u00e9rique", + "http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives": "COMMA - shining a light into the archives - Blog - BBC R&D", + "http://lavue.fr/ce-qui-est-arrive-dans-ce-cinema-a-laisse-tout-le-monde-sans-voix-la-campagne-choc/": "Ce qui est arriv\u00e9 dans ce cin\u00e9ma a laiss\u00e9 tout le monde sans voix, la campagne choc", + "http://vocab.deri.ie/csp": "CSP - A vocabulary to represent Constraint Satisfaction Problems. DERI Vocabularies", + "http://www.lespetitescases.net/patrimoine-web-de-donnees": "Patrimoine et Web de donn\u00e9es Les petites cases", + "http://schema.org/docs/datamodel.html": "schema.org - Data Model / Mapping to RDFa 1.1", + "http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv": "[2003.03384] AutoML-Zero: Evolving Machine Learning Algorithms From Scratch", + "http://dbpedia.org/docs/": "dbpedia.org - Using Wikipedia as a Web Database", + "http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled": "[1903.04197] Structured Knowledge Distillation for Dense Prediction", + "http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html": "Deseret News Universities will be 'irrelevant' by 2020, Y. professor says", + "http://www.w3.org/DesignIssues/TagLabel.html": "Using labels to give semantics to tags - Design Issues", + "https://neurovenge.antonomase.fr/": "The Revenge of Neurons", + "https://web.stanford.edu/~jurafsky/slp3/16.pdf": "Semantics with Dense Vectors", + "http://www.mindcad.com/": "MindCad", + "http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text": "Unsupervised Learning with Text (AFIA 2019)", + "http://www.w3.org/Submission/CBD/": "CBD - Concise Bounded Description", + "https://twitter.com/yuvalpi/status/1057909000551964673": "Trying to Understand Recurrent Neural Networks for Language Processing (tweets)", + "http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html": "Le combat perdu de David Cameron contre Jean-Claude Juncker", + "http://www.agora21.org/unesco/7savoirs/": "Edgar Morin : Les sept savoirs n\u00e9cessaires \u00e0 l\u2019\u00e9ducation du futur", + "http://www.greenpeace.fr/zero-deforestation/index.php": "Z\u00e9ro d\u00e9forestation Mobilisez-vous pour la sauvegarde des for\u00eats et du climat", + "http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html": "Official Google Webmaster Central Blog: Easier website development with Web Components and JSON-LD", + "http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l": "[2004.14545] Explainable Deep Learning: A Field Guide for the Uninitiated", + "http://www.youtube.com/watch?v=aA-gTNxy1rw": "Bayes networks: How to use D-separation - illustrative examples - YouTube", + "https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e": "Outlier Detection with Isolation Forest \u2013 Towards Data Science", + "https://newrepublic.com/article/117429/capital-twenty-first-century-thomas-piketty-reviewed": "'Capital in the Twenty-First Century' by Thomas Piketty, reviewed New Republic", + "http://tempsreel.nouvelobs.com/economie/20160212.OBS4574/blockchain-revolution-technologique-ou-mirage.html": "Blockchain\u00a0: r\u00e9volution technologique ou\u00a0mirage ? - L'Obs", + "http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took": "VICE - How Police Secretly Took Over a Global Phone Network for Organised Crime", + "https://www.nytimes.com/2017/10/19/world/asia/myanmar-democracy-rohingya.html?_r=0": "Myanmar, Once a Hope for Democracy, Is Now a Study in How It Fails - The New York Times", + "http://ebiquity.umbc.edu/blogger/index.php?p=261": "EBB: ebiquity blog at UMBC \u00bb SKOS: Simple Knowledge Organization System", + "http://www.heppnetz.de/projects/eclassowl/": "eClassOWL - The Web Ontology for Products and Services", + "http://www.der-mo.net/ASADO/": "der-mo.net - Moritz Stefaner - Asado", + "https://www.lemonde.fr/idees/article/2019/05/06/biodiversite-l-humanite-face-a-ses-responsabilites_5458837_3232.html": "Biodiversit\u00e9\u00a0: l\u2019humanit\u00e9 face \u00e0 ses responsabilit\u00e9s", + "https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html": "En Ethiopie, la guerre du teff aura bien lieu", + "https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be": "Visualizing the Riemann zeta function and analytic continuation - YouTube", + "http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html": "Les m\u00e9dias dans l\u2019\u00e8re \u00ab\u00a0de la politique post-v\u00e9rit\u00e9\u00a0\u00bb", + "http://publicdata.eu/": "PublicData.eu - Europe's Public Data", + "http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu": "NLP Solutions to Streamline Neural Search and Question Answering deepset", + "http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e": "[Q&A #02] Bombs vs. Bugs - by Edward Snowden - Continuing Ed \u2014 with Edward Snowden", + "http://emmeesse.wordpress.com/2006/09/29/folksonomies-e-tagging3/": "Folksonomies e tagging/3 \u00ab emmeesse", + "http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/": "Word2Vec Tutorial - The Skip-Gram Model \u00b7 Chris McCormick", + "http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome": "Scientists Build First Man-Made Genome; Synthetic Life Comes Next", + "http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_": "Chanson : Marisa Monte, beaut\u00e9 et volupt\u00e9", + "http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra": "benedekrozemberczki/awesome-graph-classification: A collection of important graph embedding, classification and representation learning papers with implementations.", + "http://morenews.blogspot.com/2008/04/update-from-www2008.html": "More News: Update from WWW2008", + "http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf": "TUTORIAL: Graph-based Text Representations (SLIDES)", + "http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks": "Hamiltonian Neural Networks", + "http://comm.semanticweb.org/": "COMM: Core Ontology on Multimedia", + "https://apps.facebook.com/friendsmusicquizz/": "Friends music quizz, social music game", + "http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html": "A Few Thoughts on Cryptographic Engineering: How does the NSA break SSL?", + "https://github.com/tatsy/markdown-it-imsize/issues/5": "Using this plugin in browser? \u00b7 Issue #5 \u00b7 tatsy/markdown-it-imsize", + "http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics": "Different Kinds Of Semantics", + "http://www.nytimes.com/2010/11/29/world/29cables.html": "WikiLeaks Archive \u2014 Cables Uncloak U.S. Diplomacy - NYTimes.com", + "http://www.djerma.nl/": "The Zarma website, the site about the Zarma (Djerma) language and culture.", + "http://lists.w3.org/Archives/Public/public-esw-thes/2005Jun/0043": "HTTP behaviour for SKOS Concepts from Miles, AJ \\(Alistair\\) on 2005-06-21 (public-esw-thes@w3.org from June 2005)", + "http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling": "Using Word2Vec for topic modeling - Stack Overflow", + "http://developer.apple.com/internet/webcontent/": "Web Content Articles", + "https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a": "The Current Best of Universal Word Embeddings and Sentence Embeddings (2018)", + "http://beta.slashdot.org/story/200313": "P vs. NP Problem Linked To the Quantum Nature of the Universe - Slashdot", + "http://www.xml.com/pub/a/2005/11/16/introducing-sparql-querying-semantic-web-tutorial.html": "XML.com: Introducing SPARQL: Querying the Semantic Web", + "http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html": "D2R Server publishing the DBLP Bibliography as Linked Data (Semantic Web grows 10%) from Chris Bizer", + "https://www.lemonde.fr/pixels/article/2019/03/12/tim-berners-lee-il-n-est-pas-trop-tard-pour-changer-le-web_5434682_4408996.html": "30 ans du Web\u00a0: \u00ab\u00a0Il n\u2019est pas trop tard pour changer le Web\u00a0\u00bb, affirme Tim Berners-Lee", + "http://ebiquity.umbc.edu/project/html/id/59/": "UMBC eBiquity Project: Bayes OWL", + "http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/": "Sending Javascript Functions Over JSON Solutoire.com", + "http://idealliance.org/proceedings/xtech05/papers/02-07-02/": "How can ontologies help repair your car?", + "http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan": "\u00ab Le pangolin tient-il sa revanche avec le nouveau coronavirus ? \u00bb", + "http://flickrvision.com/": "flickrvision (beta)", + "http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv": "Economic Sanctions as Collective Punishment: The Case of Venezuela", + "https://pdfs.semanticscholar.org/1f65/6b9c686c1e5db2a4d41f1ce7e270965def3e.pdf": "Improving Topic Models with Latent Feature Word Representations (slides)", + "http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed": "Excite Kids To Code By Focusing Less On Coding - Slashdot", + "http://www.youtube.com/watch?v=87HhuYZePZs": "Niger", + "http://dannyayers.com/2006/04/28/triplestores-virtuosity": "Triplestores! Virtuosity! Marsupials!", + "http://www.semanlink.net/doc/2019/08/web_applications_frameworks_": "Web Applications & Frameworks \u2014 The Hitchhiker's Guide to Python", + "http://www.w3.org/Submission/2006/SUBM-owl11-overview-20061219/": "OWL 1.1 Web Ontology Language Overview", + "http://ascensionsemantica.blogspot.com/2009/09/new-spin-cycle.html": "Ascension Semantica: A New SPIN Cycle", + "http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi": "Damien Henry sur Twitter : \"This code is so beautifully written, it almost hurts.\"", + "http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/": "Comment contourner la cybersurveillance ? - BUG BROTHER - Blog LeMonde.fr", + "http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa": "Des militants catalans vis\u00e9s par un logiciel espion ultraperfectionn\u00e9", + "http://docs.info.apple.com/article.html?artnum=302983-fr": "About Java 2 Standard Edition (J2SE) 5.0 Release 4", + "http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_": "[1911.06136] KEPLER: A Unified Model for Knowledge Embedding and Pre-trained Language Representation", + "http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_": "Dr Simon Osindero sur Twitter : \"Neat! Transformers as RNNs\"", + "http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download": "Querying Semantic Data from ISWC Wiki (USB Version) - SMWForum", + "http://createjs.org/": "Create \u2014 A new kind of web editing interface", + "http://www.semanlink.net/doc/2019/10/improving_long_form_question_an": "Improving long-form question answering by compressing search results", + "https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/": "Is OAuth Stateless? Can it work for REST? Loosely Connected", + "https://www.wikitribune.com/": "Wikitribune \u2013 Evidence-based journalism", + "http://instagram.com/mirrorsme": "mirrorsme on Instagram", + "http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html": "Les oiseaux disparaissent des campagnes fran\u00e7aises \u00e0 une vitesse \u00ab\u00a0vertigineuse\u00a0\u00bb", + "http://www.nuxeo.com/fr/content/download/4453/147878/file/Nuxeo%20Platform%20in%2015%20Minutes.pdf": "Nuxeo Platform in 15 Minutes", + "https://link.springer.com/article/10.1007/s10618-015-0430-1": "Knowledge base completion by learning pairwise-interaction differentiated embeddings SpringerLink (2015)", + "https://www.youtube.com/watch?v=5FFRoYhTJQQ": "Burnistoun S1E1 - Voice Recognition Elevator - ELEVEN! - YouTube", + "http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr": "You CAN Teach an Old Dog New Tricks! On Training Knowledge Graph Embeddings (ICLR 2020)", + "http://fr.groups.yahoo.com/group/SemanticCampParis/": "Yahoo! Groupes\u00a0: SemanticCampParis", + "http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g": "Project NERA: State Attorneys General claim Google is planning to turn the internet into a \"walled garden\" - MSPoweruser", + "http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea": "How can synaptic plasticity lead to meaningful learning?", + "http://www.semanlink.net/doc/2020/08/graph_representation_learning_b": "Graph Representation Learning Book - Will Hamilton", + "http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/": "Google in Jeopardy: What If IBM's Watson Dethroned the King of Search? Wired Opinion Wired.com", + "http://blog.schema.org/2014/04/announcing-schemaorg-actions.html": "Schema.org Actions", + "http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html": "Code and tech stuff: Writing REST Services in Java: Part 6 Security & Authorization", + "http://virtuoso.openlinksw.com/wiki/main": "OpenLink Virtuoso: Open-Source Edition", + "http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1": "This Hellish Underground Fire Has Burned for 100 Years WIRED", + "http://www.w3.org/DesignIssues/Evolution.html": "The Evolution of a specification -- Commentary on Web", + "http://news.bbc.co.uk/2/hi/americas/4808342.stm": "BBC NEWS-Pentagon plans cyber-insect army", + "http://tools.wmflabs.org/reasonator/": "Reasonator", + "http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html": "Re: Change Proposal for HttpRange-14 from Jeni Tennison", + "https://twitter.com/radekosmulski/status/1124766298469277696": "Radek Osmulski sur Twitter : \"You would expect a difference in row access times depending on the type of a sparse matrix, but I didn't realize the difference would be so big!", + "http://paleodb.org/cgi-bin/bridge.pl": "The Paleobiology Database", + "http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_": "Crossing Divides: How a social network could save democracy from deadlock - BBC News", + "http://www.flickr.com/photos/hyperfp/508241234/": "Gado sur Flickr", + "http://blog.samaltman.com/the-merge": "The Merge - Sam Altman", + "http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf": "ConTag A Tagging System: Linking the Semantic Desktop with Web 2.0", + "http://maximilian.developpez.com/mysql/queryCache/": "Etude pratique du cache de requ\u00eates MySQL", + "https://tryolabs.com/blog/2017/12/12/deep-learning-for-nlp-advancements-and-trends-in-2017/": "Deep Learning for NLP, advancements and trends in 2017 - Tryolabs Blog", + "https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts": "How the internet flips elections and alters our thoughts Aeon Essays", + "https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js": "How to to integrate Linkurious.js into Angular.js", + "http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos": "Economic Collapse Seen Through Aerial Photos of Abandoned Mansions Raw File Wired.com", + "https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF": "Marietou - YouTube", + "http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s": "L\u2019\u00eele Henderson, paradis noy\u00e9 sous le plastique", + "https://sigmoidal.io/boosting-your-solutions-with-nlp/": "Natural Language Processing Algorithms (NLP AI) - Sigmoidal", + "http://developer.apple.com/internet/safari/faq.html#": "Safari Developer FAQ", + "http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc": "Advancing Natural Language Processing (NLP) for Enterprise Domains", + "http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html": "La comp\u00e9titivit\u00e9 est aussi culturelle", + "http://www.semanlink.net/doc/2021/09/contextualized_topic_models": "Contextualized Topic Models", + "http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php": "Corpus arbor\u00e9 pour le fran\u00e7ais / French Treebank", + "http://www.openlinksw.com/blog/~kidehen/?id=1238": "OpenLink Ajax Toolkit (OAT) 2.6 Released!", + "http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/": "La guerre aux migrants a fait 18 000 morts (au moins) BUG BROTHER", + "http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all": "Of the 1%, by the 1%, for the 1% Society Vanity Fair", + "http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html": "ARQtick: Jena-Mulgara : example of implementing a Jena graph", + "http://herschel.cea.fr/": "HERSCHEL : lumi\u00e8re sur les mondes enfouis de l'Univers (herschel.cea.fr)", + "https://aip.scitation.org/doi/abs/10.1063/1.5042250": "Overcoming device unreliability with continuous learning in a population coding based computing system (2018 - Journal of Applied Physics)", + "https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538": "How to understand Locality Sensitive Hashing? - Stack Overflow", + "http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language": "SPARQL more than a query language! - Semantic Overflow", + "https://news.cnrs.fr/articles/bourbaki-and-the-foundations-of-modern-mathematics": "Bourbaki and the Foundations of Modern Mathematics CNRS News", + "http://neuralnetworksanddeeplearning.com/": "Neural networks and deep learning", + "http://ourcodeworld.com/articles/read/359/top-7-best-markdown-editors-javascript-and-jquery-plugins": "Top 7: Best Markdown editors Javascript and jQuery plugins Our Code World", + "http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/": "D\u00e9manteler les GAFA ? InternetActu", + "http://www.wired.com/wired/archive/8.04/joy.html": "Wired 8.04: Why the future doesn't need us.", + "http://nadbordrozd.github.io/blog/2017/12/05/what-they-dont-tell-you-about-data-science-1/": "What They Don't Tell You About Data Science. 1: You Are a Software Engineer First - DS lore", + "http://en.wikipedia.org/wiki/Bowling_for_Columbine": "Bowling for Columbine", + "http://wiredreach.org/": "WiredReach A Universal Content Sharing Platform", + "http://stackoverflow.com/questions/603765/how-do-i-redirect-from-apache-to-tomcat": "How do I redirect from Apache to Tomcat?", + "http://www.songhay.org/": "SONGHAY.ORG", + "http://scienceblogs.com/startswithabang/2013/01/30/the-solar-storm-of-a-lifetime/": "The Solar Storm of a Lifetime \u2013 Starts With A Bang", + "http://googleblog.blogspot.co.uk/2012/05/introducing-knowledge-graph-things-not.html": "Introducing the Knowledge Graph: things, not strings Official Google Blog", + "http://alignapi.gforge.inria.fr/edoal.html": "EDOAL: Expressive and Declarative Ontology Alignment Language", + "https://www.sciencesetavenir.fr/archeo-paleo/archeologie/decouverte-d-un-rare-cimetiere-d-urnes-funeraires-en-amazonie_127402": "D\u00e9couverte d\u2019un rare cimeti\u00e8re d\u2019urnes fun\u00e9raires en Amazonie", + "http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project": "Publishing Local Open Data - Important Lessons from the Open Election Data project data.gov.uk", + "http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r": "This know-it-all AI learns by reading the entire web nonstop MIT Technology Review", + "http://www.defectivebydesign.org/": "We oppose DRM. Defective by Design", + "https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf": "Representations for Language: From Word Embeddings to Sentence Meanings (2017) - Slides", + "http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg": "Matching and mining in knowledge graphs of the Web of data Applications in pharmacogenomics", + "http://dret.net/glossary/grddl": "Definition: GRDDL (Gleaning Resource Descriptions from Dialects of Languages) [Web and XML Glossary]", + "http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank": "[1906.03158] Matching the Blanks: Distributional Similarity for Relation Learning", + "http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html": "A la recherche des c\u0153urs perdus des r\u00e9acteurs nucl\u00e9aires de Fukushima", + "https://amp.theguardian.com/commentisfree/2018/mar/12/climate-change-is-a-disaster-foretold-just-like-the-first-world-war?CMP=share_btn_tw&__twitter_impression=true": "Climate change is a disaster foretold, just like the first world war Jeff Sparrow Opinion The Guardian", + "http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71": "Revista Nosso Par\u00e1 - On line", + "http://contrecourant.france2.fr/article.php3?id_article=169": "Les origines du Sida", + "http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi": "python - Scraping: SSL: CERTIFICATE_VERIFY_FAILED - Stack Overflow", + "https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth": "How technology disrupted the truth Katharine Viner Media The Guardian", + "http://requirejs.org/": "RequireJS", + "http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained": "iterator - The Python yield keyword explained - Stack Overflow", + "http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi": "Le philosophe et acad\u00e9micien Michel Serres est mort", + "http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u": "Introducing Metadata Enhanced ULMFiT Novetta Nexus", + "https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers": "Increasing Application Performance with HTTP Cache Headers Heroku Dev Center", + "http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/": "Microdata: HTML5\u2019s Best-Kept Secret Webmonkey\u00a0 Wired.com", + "https://github.com/omarsar/nlp_overview": "omarsar/nlp_overview: Modern Deep Learning Techniques Applied to Natural Language Processing", + "http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html": "Les vari\u00e9t\u00e9s paysannes de bl\u00e9 en voie de disparition", + "https://solid.github.io/dweb-summit-2018/#decentralized-data": "Solid: Empowering people through choice", + "http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/": "Ce jeune prof de Drancy qui voulait \u00ab\u00a0changer le monde et sa classe\u00a0\u00bb Au centre, la banlieue", + "http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html": "Bal lugubre dans les monarchies africaines", + "http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders": "CS224U: Natural Language Understanding", + "http://en.wikipedia.org/wiki/Caral": "Caral", + "https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4": "Semantic hashing using tags and topic modeling (2013)", + "http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/": "Lawfare \u203a How a Blonde Tattooed Texas Girl Became an ISIS Twitter Star", + "http://ceur-ws.org/Vol-717/paper2.pdf": "Using Linked Data to Reduce Learning Latency for e-Book Readers", + "http://wiki.apache.org/incubator/StanbolProposal": "StanbolProposal - Incubator Wiki", + "http://www.semanlink.net/doc/2019/06/papers_acl_2019": "Papers - ACL 2019", + "http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview": "Graph Transformer OpenReview", + "http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs": "[2003.02320] Knowledge Graphs", + "http://dannyayers.com/2006/04/13/jena-user-conference-": "Jena User Conference - programme up", + "https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb": "gensim/FastText_Tutorial.ipynb", + "http://meta.wikimedia.org/wiki/Wikidata/Development/RDF": "Wikidata/Development/RDF", + "http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e": "Simple Unsupervised Keyphrase Extraction using Sentence Embeddings - ACL Anthology (2018)", + "https://github.com/ansell/aterms": "ATerm library on GitHub", + "http://stackoverflow.com/questions/3738137/javascript-variable-scope-question?rq=1": "jquery - Javascript variable scope question - Stack Overflow", + "http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems": "Hack Your Life With A Private Wiki Notebook Getting Things Done And Other Systems - WebSeitz/wiki", + "http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri": "Thomas Piketty\u00a0: \u00ab\u00a0Apr\u00e8s la crise, le temps de la monnaie verte\u00a0\u00bb", + "http://www.w3.org/TR/sw-oosd-primer/": "A Semantic Web Primer for Object-Oriented Software Developers", + "http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html": "[goodrelations] gr:ProductFeature", + "http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html": "Guerilla science: what can we do in 10 days?", + "http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/": "Nous ne prenons pas assez au s\u00e9rieux les implications politiques du num\u00e9rique InternetActu", + "https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf": "2017 Deloitte State of Cognitive Survey", + "https://doi.org/10.1145/3178876.3186007": "Improving Word Embedding Compositionality using Lexicographic Definitions", + "http://www.paulgraham.com/progbot.html": "Programming Bottom-Up", + "http://www.semanlink.net/doc/2020/01/tech_cliqz": "Tech @ Cliqz", + "https://github.com/shellac/java-rdfa": "shellac/java-rdfa ; github (Damian Steer)", + "http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases": "Three Myths of Graph Databases", + "http://www.wysigot.com": "Wysigot - browse, capture and monitor the web", + "http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander": "Les groupes sanguins de Neandertal et Denisova d\u00e9crypt\u00e9s CNRS", + "http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_": "End-to-End Learning with Text & Knowledge Bases (Bhuwan Dhingra PhD Thesis)", + "https://internetpolicy.mit.edu/blog-2018-fb-cambridgeanalytica/": "Facebook/Cambridge Analytica: Privacy lessons and a way forward Internet Policy Research Initiative @ MIT", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/": "How to publish Linked Data on the Web?", + "http://developers.facebook.com/docs/reference/plugins/like": "Facebook - Like button", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf": "Querying the Web of Interlinked Datasets using VOID Descriptions", + "http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf": "23 problems in systems neuroscience", + "https://www.infoq.com/articles/apache-shiro": "Application Security With Apache Shiro", + "http://www.bobdylan.com/songs/rolling.html": "Bob Dylan: Like a Rolling Stone", + "http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/": "If You Think Deep Links Are a Big Deal Now, Just Wait WIRED", + "http://www.w3.org/2012/ldp/wiki/Main_Page": "Linked Data Platform - wiki", + "http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/": "Sex and drugs and Rock\u2019n\u2019roll: Analysing the lyrics of the Rolling Stone 500 greatest songs of all time Alexandre Passant", + "http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1": "from: 170 Rue de Lourmel to: Ciao Vito 2203 NE Alberta Portland, OR 97211 - Google Maps", + "http://karpathy.github.io/neuralnets/": "Hacker's guide to Neural Networks", + "http://patterns.dataincubator.org/book/index.html": "Linked Data Patterns", + "https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python": "Python 3's f-Strings: An Improved String Formatting Syntax (Guide) \u2013 Real Python", + "http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/": "RDFa 1.1, microdata, and turtle-in-HTML now in the core distribution of RDFLib Ivan\u2019s private site", + "http://www.w3.org/TR/ldpatch/": "Linked Data Patch Format", + "http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you": "How to remove the \"Could not connect to the iTunes store\" error message when your network does not allow access to the iTunes Store - Ask Different", + "http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html": "Gates warns on US immigration curbs", + "https://www.microsoft.com/en-us/research/project/dssm/": "DSSM (\"Deep Semantic Similarity Model\") - Microsoft Research", + "http://www.openlinksw.com/blog/~kidehen/?id=1237": "Injecting Facebook Data into the Semantic Data Web", + "https://fr.slideshare.net/andrewkoo/textrank-algorithm": "How does Textrank work? (slides)", + "http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/": "Open Archive Initiative\u2019s aggregation vocabulary \u00ab Ivan\u2019s private site", + "http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call": "Jade Abbott sur Twitter : \"Calling all African NLPers! Goal: Let's publish a paper with NMT baselines for all African languages Slides", + "http://www.lifehacker.com/software/feature/special-geek-to-live-129141.php": "Ten Must-Have Bookmarklets", + "http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/": "Naucratis ville grecque ou \u00e9gyptienne, une question de m\u00e9thode - Art, Arch\u00e9ologie et Antiquit\u00e9", + "http://www.semanlink.net/doc/2019/11/cookbook_vue_js": "Cookbook \u2014 Vue.js", + "http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d": "Lessons Learned from Applying Deep Learning for NLP Without Big Data", + "http://www.google.com/fusiontables/Home/": "Google Fusion Tables - Gather, visualize, and share data tables online", + "http://www.bubblecode.net/fr/2013/03/10/comprendre-oauth2/": "Comprendre OAuth2 \u00ab BubbleCode by Johann Reinke", + "http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf": "DBpedia Mobile (ISWC2008 presentation)", + "http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html": "Paul Krugman: Those Revolting Europeans - NYTimes.com", + "http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/": "Introduction to Learning to Trade with Reinforcement Learning \u2013 WildML", + "http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653": "SPARQL guide for the Javascript Developer", + "http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/": "RDF Aggregates and Full Text Search on Steroids with Solr at Frederick Giasson\u2019s Weblog", + "http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print": "Nuclear Energy Loses Cost Advantage - NYTimes.com", + "https://en.wikipedia.org/wiki/Serial_Mom": "Serial Mom", + "https://api-platform.com/docs/distribution/": "API Platform: Creating your First API with API Platform, in 5 Minutes", + "http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw": "Explosive intervention by Pope Francis set to transform climate change debate The Guardian", + "http://planb.nicecupoftea.org/archives/001293.html": "Pigsty - a Firefox extension for RDF galleries", + "https://arxiv.org/abs/1710.06632": "[1710.06632] Towards a Seamless Integration of Word Senses into Downstream NLP Applications", + "http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/": "Comment les abeilles vaccinent leurs petits Passeur de sciences", + "http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html": "Il est stupide d'aller contre Internet avec b\u00e2ton, casque et ciseaux, par Eric Rochant", + "http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html": "Une vid\u00e9o de l'EI montre le saccage de la cit\u00e9 antique d'Hatra, en Irak", + "http://duolingo.com/": "Duolingo Apprends gratuitement l'anglais, l'espagnol, l'allemand, le portuguais et l'italien", + "http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks": "SPARCool supports JSONP callbacks Alexandre Passant", + "http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection": "[1707.00306] Variable Selection Methods for Model-based Clustering", + "http://www.ldodds.com/blog/archives/000219.html": "Lost Boy: Fun with Jena Rules", + "http://money.cnn.com/2006/07/13/pf/rfid_passports/index.htm?cnn=yes": "Technologists object to U.S. RFID passports", + "http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222": "Finding RDF instance data with Swoogle", + "http://www.dlib.org/dlib/january06/guy/01guy.html": "Folksonomies: Tidying up Tags?", + "http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html": "Les dangers de la mont\u00e9e des in\u00e9galit\u00e9s au menu du Forum de Davos", + "http://www.sciencemag.org/news/2016/10/europe-attempts-mars-landing": "Europe attempts Mars landing Science AAAS", + "http://bibd.uni-giessen.de/gdoc/2002/uni/d020057.pdf": "Computer-based expert system to optimize the water supply for modern irrigation systems in selected regions in Egypt", + "http://www.newscientistspace.com/article.ns?id=dn9337": "New Scientist - Antimatter and dark matter are new probe's prey", + "http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html": "Les fichiers policiers et la grenouille \u00e9bouillant\u00e9e, par Luc Bronner", + "https://redis.io/": "Redis", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/": "NG4J - Named Graphs API for Jena", + "https://arxiv.org/abs/1704.05358": "[1704.05358] Representing Sentences as Low-Rank Subspaces", + "https://arxiv.org/abs/1902.10618": "[1902.10618] Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition", + "http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record": "Voiture \u00e0 hydrog\u00e8ne : un record de distance et un d\u00e9but de fili\u00e8re industrielle", + "http://www.slideshare.net/gkellogg1/jsonld-and-mongodb": "JSON-LD and MongoDB", + "http://developer.apple.com/tools/rubyonrails.html": "Using Ruby on Rails for Web Development on Mac OS X", + "http://www.bbc.co.uk/news/science-environment-12851772": "BBC News - Stone tools 'demand new American story'", + "http://www.thenation.com/article/164497/capitalism-vs-climate?page=full": "Capitalism vs. the Climate The Nation", + "https://www2018.thewebconf.org/proceedings/": "PROCEEDINGS \u2013 The Web Conference in Lyon", + "http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html": "Les images du mus\u00e9e de Mossoul saccag\u00e9 par des djihadistes en Irak", + "http://www.newsweek.com/id/157516": "Large Hadron Collider May Explain Atom's Mysteries Newsweek.com", + "https://news.mit.edu/2018/machines-learn-language-human-interaction-1031": "Machines that learn language more like kids do MIT News", + "http://www.esa.int/SPECIALS/Herschel/index.html": "ESA - Herschel", + "https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest": "Why I believe GraphQL will come to replace REST", + "http://www.lemonde.fr/sciences/article/2012/05/20/la-difficile-ascension-vers-la-resolution-d-un-probleme-mathematique_1704410_1650684.html": "La difficile ascension vers la r\u00e9solution d'un probl\u00e8me math\u00e9matique", + "http://semwebcentral.org": "SemWebCentral - SemWebCentral Home Page", + "http://pingthesemanticweb.com/": "Ping the Semantic Web.com - Share your RDF data with the World!", + "http://apilama.com/2016/01/05/apis-and-linked-data-a-match-made-in-heaven/": "APIs and Linked Data: A match made in Heaven APILama", + "http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html": "UriConnegFilter (jersey-bundle 1.6 API)", + "http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/": "JSON-LD Syntax 1.0", + "http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html": "Sur les v\u00eatements, le \"tattoo\" est tabou", + "http://spasche.net/openinbrowser/": "Open in Browser Extension", + "http://barcamp.org/SemanticCampParis": "BarCamp wiki / SemanticCampParis", + "http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le": "[1802.07044] The Description Length of Deep Learning Models", + "http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr": "Weak Supervision: The New Programming Paradigm for Machine Learning", + "https://fr.wikipedia.org/wiki/Culture_Yamna": "Culture Yamna \u2014 Wikip\u00e9dia", + "http://www.wired.com/science/planetearth/news/2008/01/gm_insects": "Engineered Mosquitoes Could Wipe Out Dengue Fever", + "http://milicicvuk.com/blog/2014/08/26/can-json-and-rdf-be-friends/": "Can JSON and RDF be friends?", + "http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html": "Python NLP - NLTK and scikit-learn", + "http://www.newscientisttech.com/article.ns?id=mg19025566.400": "New Scientist Tech - The irresistible rise of cybersex", + "http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho": "Nandan Thakur sur Twitter : \"how to create sentence-embeddings when little or zero in-domain training data is available\"", + "http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html": "file to uri to file madness", + "http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html": "replacing email with atom and foaf+ssl", + "http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/": "Move Over Harvard And MIT, Stanford Has The Real \u201cRevolution In Education\u201d TechCrunch", + "http://manu.sporny.org/category/json-ld/": "JSON-LD The Beautiful, Tormented Machine", + "http://www.bbc.co.uk/news/science-environment-17436365": "BBC News - The strange new craft of making life from scratch", + "http://www.yworks.com/en/products_yfiles_about.htm": "yFiles - Java Graph Layout and Visualization Library", + "http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html": "Joho the Blog: IBM shows del.icio.us for the enterprise, and more", + "http://www.youtube.com/watch?v=yp8AjMBG87g": "Google I/O 2013 - From Structured Data to the Knowledge Graph - YouTube", + "http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf": "Convolutional Neural Networks on Graphs", + "http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote": "Keynote Video and Updates from the Amsterdam Fire Department - semanticweb.com", + "http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p": "A Saint-Rogatien, les cancers p\u00e9diatriques alimentent la suspicion sur les pollutions de l\u2019environnement", + "http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/": "This Tiny Country Feeds the World", + "http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c": "GraphChain \u2013 A Distributed Database with Explicit Semantics and Chained RDF Graphs", + "http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s": "UKPLab/sentence-transformers: Sentence Embeddings with BERT & XLNet", + "http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit": "David Cameron faces defeat in Juncker row as EU summit begins The Guardian", + "http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p": "Rencontre avec Aldo Gangemi et Valentina Presutti - Les rencontres du Web de donn\u00e9es (Paris) - Meetup", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm": "BBC NEWS Ancient drought 'changed history' BBC NEWS Science/Nature Ancient drought 'changed history'", + "http://osxdaily.com/2012/05/22/install-wget-mac-os-x/": "Install wget in Mac OS X Without Homebrew or MacPorts", + "http://www.linux.com/feature/144853": "Linux.com :: Nepomuk and KDE to introduce the semantic desktop", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm": "BBC NEWS Tut's gem hints at space impact", + "http://marc.info/?l=tomcat-user&m=121762788714431&w=2": "'Re: request parameters mishandle utf-8 encoding' - MARC", + "http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/": "This Is Your Brain. This Is Your Brain as a Weapon. Foreign Policy", + "http://hardware.slashdot.org/story/11/12/31/2022225/best-software-for-putting-lectures-online?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29": "Best Software For Putting Lectures Online? - Slashdot", + "http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0": "How Beer Gave Us Civilization - NYTimes.com", + "https://www.bbc.com/news/world-europe-47941794": "Notre-Dame cathedral: Firefighters tackle blaze in Paris - BBC News", + "http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services": "From Strings to Things to a Web of Services", + "http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html": "L'amour de soi et la haine des autres", + "http://www.webdavsystem.com/ajax/programming/cross_origin_requests": "Cross-Origin Requests (CORS) in Internet Explorer, Firefox, Safari and Chrome", + "https://www.w3.org/community/hydra/wiki/Collection_Design": "Collection Design - Hydra Community Group", + "https://eng.uber.com/deep-neuroevolution/": "Welcoming the Era of Deep Neuroevolution - Uber Engineering Blog", + "http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm": "BBC NEWS 'More genes' needed to make life", + "http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17": "Analyzing Schema.org - Peter F. Patel-Schneider - ISWC 2014", + "https://github.com/linkeddata/rdflib.js/": "Linked Data API for JavaScript", + "http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/": "Big Structure: At The Nexus of Knowledge Bases, the Semantic Web and Artificial Intelligence AI3:::Adaptive Information", + "http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers": "The Fishbowl: HTTP Conditional Get for RSS Hackers", + "http://www.xml.com/lpt/a/2005/12/21/json-dynamic-script-tag.html": "XML.com: JSON and the Dynamic Script Tag: Easy, XML-less Web Services for JavaScript", + "https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se": "Guerre du P\u00e9loponn\u00e8se \u2014 Wikip\u00e9dia", + "https://www.tulevaisuustalo.fi/en/articles/basic-income-new-universalism/": "Basic income and the new universalism - Tulevaisuustalo", + "https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting": "How does one apply deep learning to time series forecasting? - Quora", + "http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/": "How to install Ruby on Mac Nettuts+", + "http://www-128.ibm.com/developerworks/xml/library/j-sparql/": "Search RDF data with SPARQL (and Jena)", + "http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar": "NLP's Clever Hans Moment has Arrived", + "http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_": "Has AI found a new Foundation?", + "http://www.wired.com/reviews/2012/10/infotainment-systems/?pid=2696&viewall=true": "Infotainment Systems Product Reviews Wired.com", + "https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0": "Programme S\u00e9minaire EDF Web S\u00e9mantique", + "https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad": "A short introduction to NLP in Python with spaCy \u2013 Towards Data Science", + "https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786": "Warning: Germany edges toward Chinese-style rating of citizens", + "http://rubenverborgh.github.io/WebFundamentals/web-apis/#": "Web APIs", + "https://www.youtube.com/watch?v=GBPZAQfs6JY": "Acoustic Africa, \"Mayole\" (Africa Festival 2013)", + "http://news.bbc.co.uk/2/hi/science/nature/8027269.stm": "BBC NEWS Africa's genetic secrets unlocked", + "http://www.mkbergman.com/962/structured-web-gets-massive-boost/": "Structured Web Gets Massive Boost \u00bb AI3:::Adaptive Information", + "http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/": "Three reasons why the Semantic Web has failed \u2014 Tech News and Analysis", + "https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl": "An XSLT stylesheet transforming Jena's RDF/XML format to JSON-LD.", + "http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555": "\u00abAu Niger, les pr\u00e9dicateurs remplissent le vide laiss\u00e9 par l'Etat\u00bb - Lib\u00e9ration", + "http://rterp.wordpress.com/2012/03/16/stamping-version-number-and-build-time-in-properties-file-with-maven/": "Stamping Version Number and Build Time in a Properties File with Maven Rob's Blog", + "http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html": "De la Gr\u00e8ce \u00e0 l'Irlande, des strat\u00e9gies \u00e9conomiques illusoires", + "https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1": "How can I tag and chunk French text using NLTK and Python? - Stack Overflow", + "http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg": "Greenpeace: z\u00e9ro d\u00e9forestation d'ici 2020", + "http://www.decafbad.com/twiki/bin/view/Main/AgentFrank": "AgentFrank - Main - Wiki - 0xDECAFBAD", + "http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html": "Re: Schema.org Autodiscovery? from Dan Brickley on 2013-07-12 (public-lod@w3.org from July 2013)", + "http://www.aclweb.org/anthology/N15-1099": "A Word Embedding Approach to Predicting the Compositionality of Multiword Expressions (2015)", + "http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl": "The Heroes of Chernobyl", + "http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/": "JSON-LD: Finally, Google Honors Invisible Data for SEO The Hepp Research Blog on Marketing with Data", + "http://www.hackdiary.com/archives/000070.html": "hackdiary: Using Wikipedia and the Yahoo API to give structure to flat lists", + "http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige": "L\u2019\u00e9tat de l'arch\u00e9ologie au Niger: Azania: Archaeological Research in Africa (2009)", + "http://bnode.org/blog/2009/06/12/commontag-too-complicated": "CommonTag too complicated? - benjamin nowack's blog", + "http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template": "Abductive reasoning, template generation, and RDF-in-HTML", + "http://www.mimul.com:80/pebble/default/2007/11/24/1195909680000.html": "semanlink \uc18c\uac1c - Mimul's Developer World", + "http://www.boingboing.net/2005/11/01/hollywood_after_the_.html": "Boing Boing: Hollywood after the Anal. Hole again", + "http://www.meetup.com/paris-web-of-data/calendar/15099450/": "Comment r\u00e9concilier le SI legacy et le Web par le Web s\u00e9mantique ? - Les rencontres du Web de donn\u00e9es (Paris) - Meetup", + "http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine": "La nouvelle sc\u00e8ne de la cuisine br\u00e9silienne", + "http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf": "Linked Data - The Story So Far", + "http://jondavidjohn.com/javascript-closure-explained-using-events/": "Javascript closure explained using events", + "http://www.zotero.org/": "Zotero - The Next-Generation Research Tool", + "http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_": "Sebastian Ruder sur Twitter : \"In the second part of the NLP and speech processing session @DeepIndaba, @alienelf presents her journey and work on machine translation for African languages with @LauraMartinus #DLIndaba2019\"", + "http://www.aaai.org/AITopics/html/welcome.html": "Welcome to AI Topics (American Association for Artificial Intelligence)", + "https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Navigate All the Knowledge with Spring + Neo4j", + "http://aclweb.org/anthology/Q16-1002": "Learning to Understand Phrases by Embedding the Dictionary (2016)", + "http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i": "Building a sentence embedding index with fastText and BM25 by David Mezzetti Towards Data Science", + "http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot": "Honda says brain waves control robot", + "http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html": "Will Digital Networks Ruin Us? - NYTimes.com", + "http://news.bbc.co.uk/2/hi/science/nature/2975862.stm": "BBC NEWS Science/Nature When humans faced extinction", + "https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html": "A t'on oubli\u00e9 o\u00f9 m\u00e8ne la croix gamm\u00e9e ? - Vid\u00e9o Ina.fr", + "http://benlog.com/2008/06/19/dont-hash-secrets/": "Don\u2019t Hash Secrets Benlog", + "http://appuirwanda.free.fr/article.php3?id_article=20": "Rom\u00e9o Dallaire, le dernier des justes", + "https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2": "Who should hold the keys to our data? Nigel Shadbolt and Roger Hampson Opinion The Guardian", + "http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm": "BBC NEWS Science/Nature World's dry regions set to expand", + "http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how": "Nils Reimers sur Twitter : \"How to train state-of-the-art sentence embeddings?\"", + "http://www.readwriteweb.com/archives/the_best_tools_for_visualization.php": "The Best Tools for Visualization - ReadWriteWeb", + "http://www.cambridgesemantics.com/2008/09/sparql-by-example/": "SPARQL By Example (Lee Feigenbaum)", + "http://www.bbc.co.uk/science/horizon/1999/nasca.shtml": "BBC - Science & Nature - Cahuachi: The Lost City of Nasca", + "http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html": "tagtriples from Phil Dawes", + "http://milan2.free.fr/leopardPhpSQL/index.html": "Initialiser PHP et MySQL avec L\u00e9opard 10.5", + "http://www.paulgraham.com/hiring.html": "Hiring is Obsolete", + "http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html": "Enseigner est une science", + "http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo": "Anton Wilhelm Amo - Wikipedia, the free encyclopedia", + "https://stackoverflow.com/questions/15087322/how-to-predict-a-continuous-value-time-from-text-documents": "How to predict a continuous value (time) from text documents? - Stack Overflow", + "http://www.html5rocks.com/en/tutorials/es6/promises/": "JavaScript Promises: There and back again - HTML5 Rocks", + "http://triplify.org/": "triplify: expose semantics", + "http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_": "Laura Deming sur Twitter : Is there a good reason why many basic laws of physics are linear or quadratic (for example, F=ma), not much more complex?", + "http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html": "\u00ab\u00a0Le Web a d\u00e9velopp\u00e9 des r\u00e9sistances antibiotiques \u00e0 la d\u00e9mocratie\u00a0\u00bb", + "http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html": "Le Niger, aux avant-postes de la menace islamiste au Sahel", + "http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/": "R\u00e9inventer la programmation ? InternetActu", + "http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html": "XML.com: Fixing AJAX: XMLHttpRequest Considered Harmful", + "https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns": "Die M\u00f6rder sind unter uns (les assassins sont parmi nous)", + "http://www.huma-num.fr/service/nakala": "NAKALA TGIR Huma-Num", + "http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf": "Populating Personal Linked Data Caches using Context Models [PDF]", + "http://socialmedia.net/2010/07/27/linked-data-an-introduction": "Linked Data: An Introduction Navigating New Horizons", + "http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d": "Subspace clustering - Towards Data Science", + "http://moodle.org/": "Moodle.org: open-source community-based tools for learning", + "http://data.worldbank.org/": "Data The World Bank", + "https://gephi.org/": "Gephi, an open source graph visualization and manipulation software", + "http://www.mail-archive.com/public-lod@w3.org/msg07612.html": "Google's structured seach talk / Google squared UI", + "http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html": "Arnaque et Divulgation donn\u00e9es personnelles par Num\u00e9ricable : Internet - Forum Que Choisir", + "http://scikit-learn.org/stable/auto_examples/index.html#": "scikit-learn documentation: General examples", + "http://data.nytimes.com/": "New York Times - Linked Open Data", + "http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html": "Le petit blanc \u00e0 la cam\u00e9ra rouge", + "http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29": "Travelling Salesman, Thriller Set In a World Where P=NP - Slashdot", + "https://rominirani.com/2015/07/31/docker-tutorial-series-part-7-data-volumes/": "Docker Tutorial Series : Part 7 : Data Volumes iRomin", + "https://www.newscientist.com/article/dn28687-new-species-of-human-may-have-shared-our-caves-and-beds/": "New species of human may have shared our caves \u2013 and beds New Scientist", + "http://inkdroid.org/journal/2013/01/05/fielding-notes/": "Usually when [...] building an application the only thing that lasts forever is the data, at least if you\u2019re lucky", + "http://labs.unwieldy.net/moowheel/": "MooWheel: a javascript connections visualization library", + "http://www.openlinksw.com/virtuoso/FAQ/index.htm": "OpenLink Universal Integration Middleware - Virtuoso Product Family - FAQ", + "http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li": "Thomas Wolf sur Twitter : \"I liked the LSH attention in the reformer...\"", + "http://en.wikipedia.org/wiki/Mulholland_Drive_(film)": "Mulholland Drive", + "http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239": "Computational Linguistics and Deep Learning", + "http://news.bbc.co.uk/1/hi/technology/4685231.stm": "BBC NEWS A future full of hopes and fears", + "http://openjdk.java.net/projects/jdk7/features/": "JDK 7 Features", + "http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/": "France Inter > Les \u00e9missions > Sur les \u00e9paules de Darwin", + "http://www.nytimes.com/2010/10/07/science/07bees.html?_r=1&ref=global-home": "Honeybee Killer Found by Army and Entomologists - NYTimes.com", + "http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E": "eclipse - How to solve \"Plugin execution not covered by lifecycle configuration\" for Spring Data Maven Builds - Stack Overflow", + "https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/": "JSON-LD, currently\u2026 Brinxmat's blog", + "http://news.bbc.co.uk/2/hi/science/nature/8115148.stm": "BBC NEWS Science & Environment 'Misty caverns' on Enceladus moon", + "https://github.com/ijkilchenko/Fuzbal": "GitHub - ijkilchenko/Fuzbal: Chrome extension: Gives Ctrl+F like find results which include non-exact (fuzzy) matches using string edit-distance and GloVe/Word2Vec. Also searches by regular expressions.", + "http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html": "TechnicaLee Speaking: SPARQL Calendar Demo: A SPARQL JavaScript Library", + "https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network": "Tradeoff batch size vs. number of iterations to train a neural network - Cross Validated", + "http://www.opencms.org/en/": "OpenCms, the Open Source Java Web Content Management System / CMS", + "https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment": "Paris NLP Meetup #6", + "http://ebiquity.umbc.edu/papers/select/search/html/613a353a7b693a303b643a303b693a313b643a303b693a323b733a303a22223b693a333b733a31313a22756e6365727461696e7479223b693a343b643a303b7d/": "UMBC eBiquity - Publications - Probabilistic Framework for Semantic Web - OWL and Bayes Networks UMBC eBiquity - Publications - Probabilistic Framework for Semantic Web - OWL and Bayesian Networks", + "https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/": "Writing lightweight REST integration tests with the Jersey Test Framework", + "https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/": "How Automation is Going to Redefine What it Means to Work", + "http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64": "nissaba - paleolitische en neolithische vrouwenbeeldjes", + "http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_": "Au Kirghizistan, l\u2019ind\u00e9sirable pr\u00e9sence de la Chine", + "http://www.mkbergman.com/?p=240": "Models of Semantic Interoperability \u00bb AI3:::Adaptive Information", + "https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE": "Recommended mimetype for JSON-LD payload? - Google Groups", + "http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users": "Identity at Mozilla", + "http://www4.wiwiss.fu-berlin.de/is-group/snorql/": "SPARQL Explorer for http://dbpedia.org/sparql", + "http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524": "Bing Says Goodbye To Bing Shopping, Hello Product Search With Rich Captions & Product Ads", + "http://danbri.org/words/2010/07/09/557": "Subject classification and Statistics", + "http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_": "Les mille et une connexions de l'Afrique m\u00e9di\u00e9vale CNRS Le journal", + "https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/": "What is the difference between Bagging and Boosting?", + "http://blog.schema.org/2012/11/good-relations-and-schemaorg.html": "schema blog: Good Relations and Schema.org", + "http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov": "jsomers.net I should have loved biology", + "http://www.netvibes.com/": "www.netvibes.com Netvibes", + "http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/": "UMBEL Services, Part 2: Full-text, Faceted Search \u00bb AI3:::Adaptive Information", + "http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/": "Le 6eme continent", + "http://www.businessinsider.com/why-programmers-work-at-night-2013-1": "Why Programmers Work At Night - Business Insider", + "http://www.semanlink.net/doc/2020/01/pandoc": "Pandoc", + "http://www.inf.unibz.it/~franconi/dl/course/": "DESCRIPTION LOGICS course - Enrico Franconi", + "http://www.biodiversitylibrary.org/About.aspx": "Biodiversity Heritage Library", + "http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html": "Re: FW: draft findings on Unsafe Methods (whenToUseGet-7) from Roy T. Fielding on 2002-04-23", + "http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189": "Studio Ousia Envisions A World Of Semantic Augmented Reality - Semanticweb.com", + "http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/": "Why capitalism has nothing to do with supply and demand Making Sen$e PBS NewsHour", + "http://evernote.com": "Evernote Rappelez-vous tout avec Evernote", + "http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv": "Nucl\u00e9aire\u00a0: les nouvelles d\u00e9rives de l\u2019EPR en Finlande pourraient co\u00fbter cher \u00e0 l\u2019Etat fran\u00e7ais", + "http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/": "Comment j\u2019ai humili\u00e9 Lo\u00efc Pietri, champion du monde de judo Au tapis !", + "http://arxiv.org/abs/0807.4145": "[0807.4145] Une suite de matrices sym\u00e9triques en rapport avec la fonction de Mertens", + "https://arxiv.org/abs/1405.4053": "[1405.4053] Distributed Representations of Sentences and Documents", + "https://arxiv.org/abs/1710.04099": "[1710.04099] Wembedder: Wikidata entity embedding web service", + "http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha": "L\u2019arbre zombie qui pourrait changer notre regard sur la for\u00eat", + "http://www.rfi.fr/actufr/articles/075/article_42263.asp": "RFI - Abdoulaye Maga - Invit\u00e9 Afrique RFI - 6 400 pi\u00e8ces arch\u00e9ologiques rapatri\u00e9es la semaine derni\u00e8re au Niger.", + "http://www.semanlink.net/doc/2021/10/sphinx": "Sphinx", + "https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32": "Worsening Worldwide Land Degradation Now \u2018Critical\u2019, Undermining Well-Being of 3.2 Billion People IPBES", + "http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB": "Why No One Trusts Facebook To Power The Future \u2013 ReadWrite", + "https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf": "Knowledge Graph Embedding by Translating on Hyperplanes (2014)", + "http://worrydream.com/Engelbart/": "A few words on Doug Engelbart", + "http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit": "pfliu-nlp/Named-Entity-Recognition-NER-Papers: An elaborate and exhaustive paper list for Named Entity Recognition (NER)", + "http://greasemonkey.mozdev.org": "mozdev.org - greasemonkey", + "http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm": "BBC NEWS Science/Nature Europe moving in 'R&D slow lane'", + "https://datascience.stackexchange.com/questions/12649/how-to-calculate-the-mini-batch-memory-impact-when-training-deep-learning-models": "tensorflow - How to calculate the mini-batch memory impact when training deep learning models? - Data Science Stack Exchange", + "http://check.rdfa.info/": "check.rdfa", + "https://github.com/tensorflow/models": "GitHub - tensorflow/models: Models and examples built with TensorFlow", + "http://sourceforge.net/projects/delicious-java": "SourceForge.net: del.icio.us Java API", + "http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html": "Sketch of a simple authentication protocol", + "http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html": "TechnicaLee Speaking: using OPTIONAL to select a single value based on an ordered list of predicates which might appear in the data", + "http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to": "AI in Africa: Teaching a bot to read my mum's texts - BBC News (2020)", + "http://en.wikipedia.org/wiki/Slime_mold": "Slime mold - Wikipedia, the free encyclopedia", + "https://discussions.apple.com/thread/7366807": "Can you correct skew or perspective with \u2026 - Apple Community", + "https://twitter.com/asutoshsahoo_97/status/1062407088436113409": "Asutosh Sahoo sur Twitter : \"Slides of my seminar on ULMFIT\"", + "http://digg.com/": "digg", + "https://www.w3.org/community/hydra/wiki/Main_Page": "Hydra Community Group - wiki", + "http://wiki.surf.nl/display/vp/4.3+'InContext'+Visualiser": "'InContext' Visualiser", + "http://hive.apache.org/": "Welcome to Hive!", + "http://ourworld.compuserve.com/homepages/rajm/twooesef.htm": "Facts versus Factions: the use and abuse of subjectivity in scientific research - PART 2", + "http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html": "Tip: Use XML directly over HTTP for Web services (where appropriate)", + "http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita": "Coronavirus: 38 days when Britain sleepwalked into disaster News The Sunday Times", + "http://www.hrw.org/french/reports/rw94/rwandamai94.htm#_1_7": "", + "http://local.google.com/maps?q=2203+NE+Alberta+St+Portland&spn=0.007102,0.006802&t=k&hl=fr": "Ciao Vito (google maps)", + "http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0": "Drug Goes From $13.50 a Tablet to $750, Overnight - NYTimes.com", + "http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/": "Les r\u00e9surrections de la peste, de l\u2019Antiquit\u00e9 au Moyen \u00c2ge Dans les pas des arch\u00e9ologues", + "https://www.oreilly.com/ideas/machine-learning-in-the-wild": "Machine learning in the wild: A bridge between robust control and reinforcement learning.", + "https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months": "This 3D Human 'Mini-Brain' Is Made of Stem Cells and Can Live For Months - Motherboard", + "http://bitworking.org/news/193/Do-we-need-WADL": "Do we need WADL? BitWorking", + "http://erik.eae.net/archives/2005/05/27/18.55.22/": "JS, Encoding and XMLHttpRequest", + "https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/": "How a Cashless Society Could Embolden Big Brother - The Atlantic", + "https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/": "Paradigm shifts for the\u00a0decentralized\u00a0Web Ruben Verborgh", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf": "A Spectrometry of Linked Data", + "https://guillaumegenthial.github.io/serving.html": "Serving a model with Flask", + "http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers": "Knowledge Graph Reasoning Papers", + "http://fr.wikipedia.org/wiki/Roger_II_de_Sicile": "Roger II de Sicile", + "http://videolectures.net/andrew_ng/": "Andrew Ng - Computer Science Department, Stanford University - VideoLectures.NET", + "http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)": "Creating, Deploying and Exploiting Linked Data", + "http://www.lemonde.fr/sciences/article/2013/01/24/andre-choulika-sculpteur-de-genes_1822310_1650684.html": "Andr\u00e9 Choulika, sculpteur de g\u00e8nes", + "http://www.manageability.org/blog/stuff/java-open-source-social-network": "Manageability - Open Source Social Networking Applications Written in Java", + "http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con": "Microsoft Concept Graph and Concept Tagging Release", + "http://whc.unesco.org/fr/list/1225": "Ruines de Lorop\u00e9ni - UNESCO World Heritage Centre", + "http://www.lemonde.fr/technologies/article/2013/06/12/pourquoi-stocker-toutes-nos-vies-sur-des-serveurs-aux-etats-unis_3428857_651865.html": "\"Pourquoi stocker toutes nos vies sur des serveurs aux Etats-Unis ?\"", + "http://www.openlinksw.com/blog/~kidehen/?id=1224": "Enterprise 0.0, Linked Data, and Semantic Data Web", + "http://dannyayers.com/2006/05/31/system-one-screencast": "System One screencast", + "https://www.quora.com/Who-is-doing-interesting-NLP-research-for-low-resource-languages": "Who is doing interesting NLP research for low resource languages? - Quora", + "https://github.com/antoniogarrote/rdfstore-js#readme": "rdfstore-js", + "https://www.w3.org/TR/2016/PR-dwbp-20161215/": "Data on the Web Best Practices", + "http://www.w3.org/TR/rdf-sparql-protocol/": "SPARQL Protocol for RDF", + "http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/": "Finding perfection in the imperfect: Applying Darwinian neuro-evolution to robotics Robohub", + "http://www.activesplit.com/rdfa_project/": "RDFa demonstracija", + "https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890": "Un Univers sans mati\u00e8re noire? CNRS Le journal", + "http://www.ericandthetrip.com/": "Eric & The Trip. Plus qu'un voyage, le votre.", + "http://motherboard.vice.com/blog/one-last-interview-with-barnaby-jack": "Hacker Barnaby Jack Dies Days Before Revealing His Pacemaker Exploit: One Last Interview Motherboard", + "http://mlexplained.com/2017/12/29/attention-is-all-you-need-explained/": "Paper Dissected: \"Attention is All You Need\" Explained Machine Learning Explained", + "http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html": "Improve your taxonomy management using the W3C SKOS standard", + "http://faviki.com/": "Faviki - Social bookmarking tool using smart semantic Wikipedia (DBpedia) tags", + "http://fusiongrokker.com/post/using-git-as-your-subversion-client": "Using Git as your Subversion Client \u2022 FusionGrokker", + "http://code.google.com/p/rdfquery/issues/detail?id=32": "Issue 32 - rdfquery - memory leaks/perfomance issues caused by $.rdf#databanks member (array) - RDF processing in your browser - Google Project Hosting", + "http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf": "Business Case for Semantic Web Technologies (slides)", + "http://en.wikipedia.org/wiki/Attack_the_Block": "Attack the Block", + "http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved": "[2010.12566] DICT-MLM: Improved Multilingual Pre-Training using Bilingual Dictionaries", + "http://dannyayers.com/2006/04/12/decent-outliner-still": "Decent outliner still wanted", + "http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur": "Link Prediction with Graph Neural Networks and Knowledge Extraction", + "http://www.offconvex.org/2019/03/19/CURL/": "Contrastive Unsupervised Learning of Semantic Representations: A Theoretical Framework \u2013 Off the convex path (2019-03)", + "https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one": "The hard problem of consciousness is a distraction from the real one Aeon Essays", + "http://www.nltk.org/": "NLTK (Natural Language Toolkit) - home", + "http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf": "The rise of the verb", + "http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547": "[Linking-open-data] Forms in the web of data", + "http://www.digg.com/": "digg", + "http://nuevomedio.com/tepuy-vox/tepuys.html": "Tepuys y sus Saltos de Agua", + "http://winch5.blog.lemonde.fr/2013/08/23/commencez-a-lire-le-livre-de-winch5/": "Commencez \u00e0 lire le Livre de Winch5 Winch 5", + "http://journal.dajobe.org/journal/": "Dave Beckett - Journalblog", + "http://www.mnot.net/blog/2005/08/13/excel_microformats": "mnot\u2019s Web log: Adding Semantics to Excel with Microformats and GRDDL", + "http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/": "Calais : un \u00ab\u00a0\u00c9tat policier en situation de guerre\u00a0\u00bb BUG BROTHER", + "http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022": "Watson Goes Back to School - And what it tells us about the evolving role of semantic technology", + "http://www.armadillo.fr": "Armadillo - Gestion documentaire multim\u00e9dia", + "http://blog.someben.com/2013/01/hashing-lang/#footnote3": "Hashing Language Some Ben?", + "http://schema.rdfs.org/faq.html": "schema.rdfs.org - FAQ", + "https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms": "Are there any efficient stemming algorithms in addition to the Porter and Carry algorithms?", + "http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_": "The Natural Language Decathlon: Multitask Learning as Question Answering (slides)", + "https://www.quora.com/What-should-I-do-to-increase-my-skills-in-deep-learning": "What should I do to increase my skills in deep learning? - Quora", + "http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds": "[2004.10151] Experience Grounds Language", + "http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_": "Kawin Ethayarajh sur Twitter : \"When and why does king - man + woman = queen?\"", + "http://www.ibm.com/developerworks/web/library/wa-rdf/": "The Semantic Web, Linked Data and Drupal, Part 1: Expose your data using RDF", + "http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144": "Hello Data Web (Take 3 - Feel The \"RDF\" Force)", + "http://scikit-learn.org/stable/modules/svm.html": "Support Vector Machines \u2014 scikit-learn documentation", + "https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review": "The Dark Secret at the Heart of AI - MIT Technology Review", + "http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/": "JUnit 4 in 60 Seconds at cavdar.net", + "https://arxiv.org/abs/1804.04526": "[1804.04526] EventKG: A Multilingual Event-Centric Temporal Knowledge Graph", + "http://simile.mit.edu/exhibit/": "SIMILE Exhibit", + "http://www.sitepoint.com/blogs/2006/03/15/do-you-know-your-character-encodings/": "SitePoint Blogs \u00bb Do you know your character encodings?", + "http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela": "Evolution of a crop\u2019s wild relative into a weed that includes an herbicide resistance gene", + "http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3": "Nick Clegg's speech on constitutional reform The Liberal Democrats: Latest News Detail", + "https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#": "java - How to debug stream().map(...) with lambda expressions? - Stack Overflow", + "http://lov.okfn.org/dataset/lov/index.html": "(LOV) Linked Open Vocabularies", + "http://cap2018.litislab.fr/slides_AB.pdf": "Teaching Machines to Understand Natural Language (2018)", + "http://science.monstersandcritics.com/news/printer_1168946.php": "Gigantic meteor crater found in Antarctica", + "http://www.slaney.org/malcolm/yahoo/Slaney2008-LSHTutorial.pdf": "Locality sensitive hashing for finding nearest neighbors", + "http://rdf2h.github.io/rdf2h/": "Try RDF2h in your web browser", + "http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html": "The Secret of the Fibonacci Sequence in Trees", + "http://commonground.ca/iss/0401150/percy_schmeiser.shtml": "Common Ground - January 2004 - Percy Schmeiser vs. Monsanto by Percy Schmeiser", + "http://blog.xebia.fr/2009/09/15/servlet-3-0-les-3-points-marquants/#Lexcutionasynchronepourlesarch": "Servlet 3.0, les 3 points marquants Blog Xebia France", + "http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/": "Dans la disruption, allons-nous devenir fous ? Bernard Stiegler et l'urgence d'entrer dans le n\u00e9guanthropoc\u00e8ne -", + "http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html": "Automated RDFa Output from DITA Open Toolkit - bobdc.blog", + "https://medium.com/octavian-ai/deep-learning-with-knowledge-graphs-3df0b469a61a": "Deep Learning with Knowledge Graphs \u2013 Octavian \u2013 Medium", + "http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf": "Caitalism 3.0 - A guide to reclaiming the commons", + "http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm": "A Self-Service Rental Car", + "http://linkeddatafragments.org/in-depth/#tpf": "Triple Pattern Fragment", + "http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "CNRS INSIS - Un neurone artificiel mille fois plus \u00e9conome en \u00e9nergie qu\u2019un neurone biologique", + "https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f": "Gradient descent vs. neuroevolution", + "http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know": "raphaelsty/ckb: Contextual knowledge bases", + "http://www.uow.edu.au/arts/sts/bmartin/dissent/documents/AIDS/": "Polio vaccines and the origin of AIDS", + "http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/": "How friendly is your AI? It depends on the rewards Robohub", + "http://meta.wikimedia.org/wiki/Wikidata/Notes/Data_model_primer": "Wikidata/Data model primer", + "https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered": "android - How is 'reader mode' in Firefox triggered? - Stack Overflow", + "http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo": "Cory Doctorow: The zombie economy and digital arm-breakers", + "https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/": "Paris NLP Season 3 Meetup #1 Meetup", + "http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition": "Le sable - Enqu\u00eate sur une disparition ARTE+7", + "http://www.bbc.com/news/science-environment-43115485": "Ancient Britons 'replaced' by newcomers", + "http://www.manageability.org/blog/stuff/nail-in-soaps-coffin": "More Nails For SOAP's Coffin", + "http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge": "Linking Entities with Knowledge Graphs by Sigurd Berglann Strise Medium", + "http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d": "[1905.12149] SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver", + "http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/": "Word2Vec Tutorial Part 2 - Negative Sampling \u00b7 Chris McCormick", + "http://phonetics.ucla.edu/appendix/languages/orowin/orowin.html": "Oro-Win", + "http://wiki.blojsom.com/wiki/display/blojsom/blojsom+Quickstart": "blojsom Quickstart", + "http://www.bbc.co.uk/news/science-environment-23288620": "BBC News - Deadly oak disease 'spreading' in UK", + "http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/": "La masse se mesure aussi en secondes Passeur de sciences", + "http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml": "Dynamically loading an external JavaScript or CSS file", + "http://bigbrowser.blog.lemonde.fr/2012/09/06/presse-cheetah-le-robot-qui-court-plus-vite-quusain-bolt/": "Cheetah, le robot qui court plus vite qu\u2019Usain Bolt Big Browser", + "http://www.lemonde.fr/sciences/article/2014/11/14/philae-debut-du-forage-incertitudes-sur-l-energie-du-robot_4523823_1650684.html": "Philae a pu transmettre les donn\u00e9es de son forage avant de couper le contact", + "http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/": "Du droit \u00e0 violer la vie priv\u00e9e des internautes au foyer BUG BROTHER", + "http://demo.citizen-dan.org/conStruct/explorer": " A Community Instance of theOpen Semantic Framework", + "https://query.wikidata.org/": "Wikidata Query Service", + "http://ruder.io/text-classification-tensorflow-estimators/": "Text Classification with TensorFlow Estimators", + "http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_": "Document analysis with machine learning", + "http://www.technorati.com/help/tags.html": "Technorati: Using Technorati Tags", + "http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev": "Knowledge Graphs: Technical Review", + "http://json-ld.org/playground/": "JSON-LD Playground", + "http://linkedopencommerce.com/": "LOC - The Linked Open Commerce Dataspace", + "http://www.openlinksw.com/weblog/oerling/?id=1471": "ISWC 2008: The Scalable Knowledge Systems Workshop", + "http://www.mkbergman.com/?p=457": "A New Constellation in the Linking Open Data (LOD) Sky \u00bb AI3:::Adaptive Information", + "http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/": "Je n\u2019ai pas le droit de lire le livre que j\u2019ai achet\u00e9 BUG BROTHER", + "https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks": "Label Embedding Trees for Large Multi-Class Tasks (2010)", + "http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_": "W3C Semantic Web Activity News - Report of the \u201cUncertainty Reasoning for the World Wide Web\u201d Incubator Group at W3C", + "http://cs.stanford.edu/people/karpathy/convnetjs/index.html": "ConvNetJS: Deep Learning in your browser", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf": "Interacting with the Web of Data through a Web of Inter-connected Lenses", + "https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf": "Romain Vial (Hyperlex) at Paris NLP meetup, slides", + "http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/": "Case Study: KDE 4.0 Semantic Desktop Search and Tagging", + "https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/": "Q&A: Edward Snowden on rights, privacy, secrets and leaks in conversation with Jimmy Wales \u2013 Wikitribune", + "http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1": "YouTube - Nelson Mandela - Special AKA", + "http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1": "Humans store about 1.5 megabytes of information during language acquisition Royal Society Open Science", + "http://linkedu.eu/devtalk/?p=29": "Showing distribution of Open University course topics with R and SPARQL \u00ab LinkedUp DevTalk", + "http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de": "BARThez \u2014 transformers 4.5.0.dev0 documentation", + "https://colab.research.google.com": "Colaboratory", + "http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi": "Enqu\u00eate sur les usines d\u2019antibiotiques indiennes, fabriques d\u2019antibior\u00e9sistance (2018)", + "http://poolparty.punkt.at/": "PoolParty \u00bb SKOS Thesaurus Management \u2013 Semantic Search \u2013 Linked Data", + "https://arxiv.org/abs/1411.4166": "[1411.4166] Retrofitting Word Vectors to Semantic Lexicons", + "http://pisani.blog.lemonde.fr/pisani/2006/03/aperus_sur_la_t.html": "Transnets, des gadgets aux r\u00e9seaux: Aper\u00e7us sur la toile de demain", + "http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook": "Tim Berners-Lee: demand your data from Google and Facebook Technology guardian.co.uk", + "http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd": "Nikola ou quand le camion \u00e0 hydrog\u00e8ne enflamme la Bourse", + "http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html": "Surmonter la crise politique gr\u00e2ce \u00e0 une \u00e9conomie de la gratuit\u00e9", + "http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27": "Big Data sheds light on pharma's 'Small Data' problems - FierceBiotechIT", + "http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0": "Scientists See Advances in Deep Learning, a Part of Artificial Intelligence - NYTimes.com", + "http://www.semanlink.net/doc/2019/11/new_york_city_election_results_": "New York City election results: Ranked-choice voting ballot initiative passes", + "https://docs.google.com/presentation/d/1AgpzDNp0z5GVrizQOc_CFZOj7UpttwJDd99fg-JliF8/edit#slide=id.p": "State of Wikidata (Wikimania 2013) - Google Drive", + "http://www.figoblog.org/node/2013": "Le droit de ReLIRE Figoblog", + "http://itu.dk/people/sathi/papers/kctob.pdf": "Knowledge Compilation Properties of Tree-of-BDDs", + "https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/": "Scientists Have Reconstructed Ancient Greek Music And You Can Listen To It IFLScience", + "http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en": "Neelie Kroes European Commissioner for Competition Policy - Being open about standards", + "http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/": "Why Neurons Have Thousands of Synapses, A Theory of Sequence Memory in Neocortex - Single Artificial Neuron Taught to Recognize Hundreds of Patterns MIT Technology Review", + "http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html": "France telecom coupe du monde", + "http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html": "An alternative Approach to Tagging - ThinkPHP /dev/blog", + "http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi": "Projet multimedia et internet au Mus\u00e9e de Niamey", + "http://blog.okfn.org/2012/07/10/announcing-linked-open-vocabularies-lov-enabling-the-vocabulary-commons/": "Announcing: Linked Open Vocabularies (LOV), enabling the vocabulary commons Open Knowledge Foundation Blog", + "http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/": "Mali-Guin\u00e9e : la Charte du Manden de 1222\u2026 Et si les droits de l\u2019homme avaient \u00e9t\u00e9 invent\u00e9s en Afrique ?", + "http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/": "What Schema.org Means for SEO and Beyond", + "http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/": "Comment on peut mourir de trop se d\u00e9fendre contre une bact\u00e9rie Passeur de sciences", + "http://html5doctor.com/html5-custom-data-attributes/": "HTML5 Custom Data Attributes (data-*) HTML5 Doctor", + "http://developers.google.com/speed/pagespeed/insights/": "PageSpeed Insights", + "http://benalman.com/news/2010/11/immediately-invoked-function-expression/": "Ben Alman \u00bb Immediately-Invoked Function Expression (IIFE)", + "https://www.reddit.com/r/IPython/comments/27zash/can_i_increase_notebook_cell_width_on_wide_screens/": "Can I increase notebook cell width on wide screens? : IPython", + "http://formcept.com/blog/stanbol/": "Apache Stanbol - How to to create an enhancement engine", + "http://www.macintouch.com/": "MacInTouch: timely news and tips about the Apple Macintosh", + "https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france": "Emissions de CO2 et de polluants des v\u00e9hicules commercialis\u00e9s en France - data.gouv.fr", + "http://dannyayers.com/archives/2005/06/28/im-saying-nothing/": "Danny Ayers, Raw Blog - RSS extensions", + "http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/": "Debugging Asynchronous JavaScript with Chrome DevTools - HTML5 Rocks", + "https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html": "Caetano Veloso: Dark Times Are Coming for My Country - The New York Times", + "http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html": "FT.com / Lula\u2019s new lucre: Brazil may keep full control of offshore oil", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/08.pdf": "Reconciling concepts and relations in heterogeneous ontologies", + "http://www.semweb.pro/": "SemWeb.Pro 2011 - January 17-18 Paris (SemWeb.Pro)", + "https://github.com/danja/seki": "danja/seki", + "http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf": "Linked-data-connecting-and-exploiting-big-data-(v1.0).pdf", + "https://github.com/3Top/word2vec-api": "word2vec-api", + "http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html": "Bai\u00e3o Blues (Renato Matos - Box MP3 - Baixar m\u00fasicas gr\u00e1tis", + "https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76": "Deploy Your First Deep Learning Model On Kubernetes With Python, Keras, Flask, and Docker", + "http://sparkjava.com/": "Spark Framework: A tiny Java web framework", + "http://en.wikipedia.org/wiki/Double_Indemnity_(film)": "Double Indemnity", + "http://archive.timesonline.co.uk/": "Times online", + "http://www.w3.org/TR/json-ld-api/": "JSON-LD 1.0 Processing Algorithms and API", + "http://static.flickr.com/23/35123655_661c699b9f.jpg": "Euphrasie et Maria sont sur un chameau", + "http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb": "Le plus grand projet Open Data / Linked Data fran\u00e7ais bient\u00f4t ouvert", + "http://hal.ccsd.cnrs.fr/ccsd-00017763/en/": "A Hierarchical Database Manager", + "http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l": "[1909.07606] K-BERT: Enabling Language Representation with Knowledge Graph", + "http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html": "Ouzbekistan la vall\u00e9e du Ferghana", + "http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d": "srdjan ostojic sur Twitter : \"During my physics undergrad, I have never heard of Singular Value Decomposition (SVD). Why?...\"", + "http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator": "Dave Brondsema's Blog - Javascript RDFParser from Tabulator", + "http://web.stanford.edu/class/cs224n/reports/6896582.pdf": "Exploring neural architectures for NER (CS224N 2018)", + "http://linksailor.com/nav": "LinkSailor", + "http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/": "Topic Modeling in the Humanities: An Overview - Maryland Institute for Technology in the Humanities", + "http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print": "The Meta-Environment - ATerms", + "http://www.semanlink.net/doc/2021/09/haystack": "Haystack (deepset)", + "http://www.lemonde.fr/vous/article/2013/01/24/demain-c-est-big-brother-qui-conduit_1822135_3238.html": "Demain, c'est Big Brother qui conduit", + "http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html": "Chercher et enseigner \u00e0 l'Universit\u00e9. 2- Au quotidien - Opinions - Le Monde.fr", + "http://moat-project.org/": "MOAT: Meaning Of A Tag", + "http://dossierdoc.typepad.com/descripteurs/2012/05/de-retour-de-websem-pro-2012.html": "Descripteurs: De retour de SemWeb Pro 2012", + "http://www.mkbergman.com/?p=291": "AI3 - Comprehensive Listing of 250 Semantic Web Tools", + "http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear": "jeshraghian/snntorch: Deep learning with spiking neural networks in Python", + "https://staltz.com/the-web-began-dying-in-2014-heres-how.html": "Andr\u00e9 Staltz - The Web began dying in 2014, here's how", + "http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html": "OS X: unable to execute clang: No such file or directory . Perpetuum Mobile .", + "http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_": "Who says using RDF is hard?", + "https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits": "what are the pros and cons of the various unsupervised word and sentence/ document embedding models? - Quora", + "http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr": "L\u2019hydrog\u00e8ne tiendra-t-il ses promesses ? CNRS Le journal", + "https://distill.pub/": "Distill \u2014 Latest articles about machine learning", + "http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/": "\u00ab Null CTRL\u00a0\u00bb, l\u2019enqu\u00eate inqui\u00e9tante sur la s\u00e9curit\u00e9 informatique en Norv\u00e8ge J'ai du bon data", + "http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your": "Bringing traditional ML to your Neo4j Graph with node2vec Dave Voutila", + "http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html": "", + "http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o": "The RNA World and the Origins of Life - Molecular Biology of the Cell", + "http://www.mkbergman.com/?p=414": "Large-scale RDF Graph Visualization Tools \u00bb AI3:::Adaptive Information", + "http://www.mkbergman.com/?page_id=346": "Sweet Tools (Sem Web) - Simple Version \u00bb AI3:::Adaptive Information", + "http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf": "Factorizing Yago: scalable machine learning for the sw", + "http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s": "Distill our @huggingface zero-shot classifier with your specified class names", + "http://www.dustindiaz.com/css-shorthand/": "CSS Shorthand Guide", + "http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d": "[1912.12510] Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices", + "http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/": "apassant.net \u00bb Easy \u201ccopy and paste\u201d from the Web to LaTeX with SPARQL", + "http://www.blogmarks.net/tag/rdf": "", + "https://twitter.com/benadida/status/1116200296764436480": "Ben Adida sur Twitter : \"Alright Twitter friends, I want to host a wiki for an open-source project. MediaWiki? Something else? Any host recommendations? I need clear editorial control with ease of submitting contributions, even small one-offs.\"", + "http://linkeduniversities.org/lu/": "Linked Universities :: Home", + "http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf": "Fuzzy-Fingerprints for Text-Based Information Retrieval", + "http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/": "Free text search on Musicbrainz literals using Virtuoso RDF Views at Frederick Giasson\u2019s Weblog", + "http://stefansavev.com/blog/custom-similarity-for-elasticsearch/": "Custom Similarity for ElasticSearch - Algorithms for Big Data", + "http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri": "Carrot2 search results clustering engine (online)", + "http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477": "Planet Mercury Even Weirder Than We Thought Wired Science Wired.com", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215": "[Linking-open-data] Fw: linking geonames concepts to wikipedia and other concept", + "http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t": "Finally We May Have a Path to the Fundamental Theory of Physics\u2026 and It\u2019s Beautiful\u2014Stephen Wolfram Writings", + "https://www.wired.com/story/bitcoin-global-warming/": "The Hard Math Behind Bitcoin's Global Warming Problem WIRED", + "http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188": "Semantic Web in Emergency Response Systems \u2013 UPDATE - semanticweb.com", + "https://towardsdatascience.com/a-gentle-introduction-to-graph-neural-network-basics-deepwalk-and-graphsage-db5d540d50b3": "A Gentle Introduction to Graph Neural Network (Basics, DeepWalk, and GraphSage)", + "http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29": "Ontology Design Patterns . org (ODP)", + "http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html": "MIT launches online learning initiative - MIT News Office", + "http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology": "The Web Way to Learn a Language", + "http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/": "Comment s\u00e9curiser son t\u00e9l\u00e9phone mouchard portable? BUG BROTHER", + "http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q": "Answering Complex Open-domain Questions at Scale SAIL Blog", + "http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks": "Nissan Announces Unprecedented Autonomous Drive Benchmarks - Nissan Online Newsroom", + "http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html": "Semantic Web project ideas number 2 (CRM) - bobdc.blog", + "http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter": "Chinese vlogger who used filter to look younger caught in live-stream glitch - BBC News", + "http://blog.monstuff.com/archives/000252.html": "AJAX Debugging with Greasemonkey", + "http://lists.w3.org/Archives/Public/semantic-web/2006Sep/0090.html": "A URI for your Favourite Pub: httpRange-14 Question from T.Heath on 2006-09-21 (semantic-web@w3.org from September 2006)", + "http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas": "Jean Rouch, l\u2019ethnologue-cin\u00e9aste CNRS Le journal", + "http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm": "Cassini-Huygens: Enceladus: A Perspective on Life on Enceladus: A World of Possibilities", + "http://www.bbc.co.uk/news/science-environment-23274175": "BBC News - Will synthetic biology become a GM-style battleground?", + "http://www.openrdf.org/": "openRDF.org openRDF.org, home of Sesame", + "http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_": "W3C Semantic Web Activity News - RDF Working Group meets face-to-face in Amsterdam", + "http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i": "Learning to Tag OOV Tokens by Integrating Contextual Representation and Background Knowledge (ACL Anthology 2020)", + "https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/": "Bringing low-resource languages and spoken dialects into play with Semi-Supervised Universal Neural Machine Translation - Microsoft Research", + "http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf": "Litt\u00e9rature nig\u00e9rienne", + "http://www.youtube.com/watch?v=5KnfJibBx7c": "4*400m, championnats du monde 1993, Paris", + "http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv": "Les nouvelles fronti\u00e8res du vivant CNRS Le journal", + "http://en.wikipedia.org/wiki/Tampopo": "Tampopo", + "https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings": "Using FastText models (not vectors) for robust embeddings Kaggle", + "http://romiawasthy.blogspot.fi/2014/06/configure-solr-suggester.html": "Romi's blog: Configure Solr -Suggester", + "http://dataconomy.com/2016/01/understanding-dimensionality-reduction/": "Understanding Dimensionality Reduction and its Applications - Dataconomy", + "http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5": "Enhancing Enterprise 2.0 Ecosystems Using Semantic Web and Linked Data Technologies:The SemSLATES Approach - Springer", + "http://en.wikipedia.org/wiki/Hausdorff_distance": "Hausdorff distance - Wikipedia, the free encyclopedia", + "http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf": "A tutorial on hidden markov models in speech recognition applications", + "http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview": "Web Finger proposals overview", + "http://wiki.eclipse.org/EGit/User_Guide#Getting_Started": "EGit/User Guide - Eclipsepedia", + "https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf": "Representation learning for very short texts using weighted word embedding aggregation", + "http://www.semanlink.net/doc/2020/03/google_and_http": "Google and HTTP", + "http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r": "Semantic Scholar AI-Powered Research Tool", + "https://github.com/bergie/noflo": "bergie/noflo - GitHub", + "http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text": "New pipeline for zero-shot text classification - \ud83e\udd17Transformers - Hugging Face Forums", + "http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/": "Promiscuous promises Ruben Verborgh", + "https://eng.uber.com/mysql-migration/": "Why Uber Engineering Switched from Postgres to MySQL - Uber Engineering Blog", + "http://www.w3.org/Submission/ldbp/": "Linked Data Basic Profile 1.0", + "http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea": "awslabs/dgl-ke: package for learning large-scale knowledge graph embeddings.", + "http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl": "Pesticides et sant\u00e9\u00a0: les conclusions inqui\u00e9tantes de l\u2019expertise collective de l\u2019Inserm", + "http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use": "IBM Watson APIs hold key to broader cognitive computing use", + "https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london": "Gilberto Gil and Caetano Veloso in London Music The Guardian", + "https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/": "Creating functional linked-data solutions Brinxmat's blog", + "http://dalelane.co.uk/blog/?p=3403": "Normalised Discounted Cumulative Gain", + "http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service": "User authentication on a Jersey REST service - Stack Overflow", + "http://www.coconut-palm-software.com/the_visual_editor/?p=25": "The Visual Editor \u00bb Java does Duck Typing", + "https://wit.ai/": "Wit \u2014 Natural language for the Internet of Things", + "http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc": "Large Memory Layers with Product Keys (poster)", + "http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html": "The ability to automatically upgrade a reference to HTTPS from HTTP from Tim Berners-Lee on 2014-08-22 (semantic-web@w3.org from August 2014)", + "http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_": "Une collaboration CNRS atteint \u00e0 son tour l\u2019avantage quantique ! CNRS Le journal", + "http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg": "Nok sculpture, Louvre", + "http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven": "Un m\u00eame g\u00e8ne a permis \u00ab d\u2019inventer \u00bb l'h\u00e9moglobine plusieurs fois CNRS", + "http://www.edge.org/3rd_culture/krauss06/krauss06.2_index.html": "Edge: THE ENERGY OF EMPTY SPACE THAT ISN'T ZERO: A Talk with Lawrence Krauss", + "http://www.wired.com/2016/04/mathematician-solves-centuries-old-sphere-problem-higher-dimensions/": "Mathematician Solves the Centuries-Old Sphere Problem in Higher Dimensions WIRED", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html": "[SUMMARY] QuantitativeValue / Units of Measure - Proposal from Alex Milowski on 2013-06-06 (public-vocabs@w3.org from June 2013)", + "http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0": "Jobs Threatened by Machines: A Once \u2018Stupid\u2019 Concern Gains Respect - The New York Times", + "http://strapdownjs.com/": "Strapdown.js - Instant and elegant Markdown documents", + "http://www.sindicetech.com/": "Sindicetech: enterprise linked data clouds", + "http://jboye.com/blogpost/semantic-technologies-what-is-in-it-for-the-unhappy-cms-customer/": "Semantic Technologies: What is in it for the (unhappy) CMS customer? J. Boye", + "http://wiki.ontoworld.org/index.php/Semantic_Wiki_State_Of_The_Art": "Semantic Wiki State Of The Art - Wiki@OntoWorld", + "http://tartarus.org/martin/PorterStemmer/": "Porter Stemming Algorithm", + "http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma": "Relation soci\u00e9t\u00e9-milieu en domaine sah\u00e9lien au Sud-Ouest du Niger au cours des 4 derniers mill\u00e9naires (Th\u00e8se)", + "http://pouchdb.com/2015/05/18/we-have-a-problem-with-promises.html": "We have a problem with promises", + "http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html": "Oral evidence - The UK's economic relationship with the European Union - 25 Oct 2017", + "http://fauconnier.github.io/": "Some pre-trained word2vec models for French", + "http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r": "Au Gabon, une grotte pourrait r\u00e9v\u00e9ler des secrets vieux de 700\u00a0ans", + "http://kashori.com/2006/11/claims-about-power-of-uri.html": "Claims About the Power of URI References", + "http://www.fastly.com/blog/best-practices-for-using-the-vary-header/": "Best Practices for Using the Vary Header Fastly - The Next Gen CDN", + "http://neurosciencenews.com/decision-making-eeg-free-will-3333/": "Do We Have Free Will? The Brain-Computer Duel \u2013 Neuroscience News", + "http://docs.info.apple.com/article.html?artnum=302412": "About Java 2 Platform Standard Edition (J2SE) 5.0 Release 3 for Mac OS X v 10.4.2 or later", + "http://www.eurotexte.fr/100pieges/index.htm": "Les 100 pi\u00e8ges de l'Anglais", + "http://www2013.org/companion/p1253.pdf": "Using SKOS vocabularies for improving Web Search", + "http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/": "Best Practice Recipes for Publishing RDF Vocabularies", + "http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass": "Another Reason Semantic Web Kicks Ass\u2014Clark & Parsia: Thinking Clearly", + "http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html": "WHERE OWL fails", + "http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology": "Investigating Community Implementation of the GoodRelations Ontology", + "http://www.panic.com/transmit/index.html": "Panic - Transmit 3 - The next-generation Mac OS X FTP client!", + "https://towardsdatascience.com/lda2vec-word-embeddings-in-topic-models-4ee3fc4b2843": "LDA2vec: Word Embeddings in Topic Models \u2013 Towards Data Science", + "https://threadreaderapp.com/thread/1065841141201989632.html": "Thread by @RespectableLaw: \"There's been a lot of talk about the missionary killed by the natives of North Sentinel Island. They're probably so aggressive because of th [\u2026]\"", + "http://www.norconex.com/serving-autocomplete-suggestions-fast/": "Serving autocomplete suggestions fast!", + "http://www.datasciencecentral.com/profiles/blogs/new-batch-of-machine-learning-resources-and-articles-from-niche?overrideMobileRedirect=1": "New batch of machine learning resources and articles from niche bloggers - Data Science Central", + "http://www.dustindiaz.com/top-ten-javascript/": "Top 10 custom JavaScript functions of all time", + "http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m": "One-track minds: Using AI for music source separation", + "http://www.youtube.com/watch?v=drhgAfLFG7M": "YouTube - Apple's Knowledge Navigator Video", + "http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne": "Machine Learning on Graphs @ NeurIPS 2019 - ML Review - Medium", + "http://www.developer.com/java/web/article.php/3904871/Top-7-Features-in-Tomcat-7-The-New-and-the-Improved.htm": "Top 7 Features in Tomcat 7: The New and the Improved - Developer.com", + "http://bertails.org/2014/09/20/why-ldpatch": "Why LD-PATCH (Alexandre Bertails)", + "http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap": "huggingface/awesome-papers: Papers & presentation materials from Hugging Face's internal science day", + "https://github.com/zazukoians/rdf-ext": "RDF Interfaces Extension", + "http://blog.newrelic.com/2012/10/09/helpful-javascript-patterns/?utm_source=BLOG&utm_medium=content&utm_content=designpatterns&utm_campaign=RPM&utm_term=JavaScript&mpc=CN-BLOG-RPM-EN-100-Helpful-JavaScript": "Helpful JavaScript Patterns New Relic blog", + "http://www.crockford.com/JSON/": "JSON Home: introducing JSON Introducing JSON", + "http://blog.elliottkember.com/chromes-insane-password-security-strategy": "Chrome\u2019s insane password security strategy \u2022 Elliott Kember", + "http://software.newsforge.com/article.pl?sid=06/05/12/1539231": "Putting MediaWiki to use in an organization", + "https://nlp.h-its.org/bpemb/": "BPEmb: Subword Embeddings", + "https://en.wikipedia.org/wiki/Sentinelese": "Sentinelese - Wikipedia", + "http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php": "Semantify - Automate Your Semantic Web SEO in Five Minutes", + "https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles": "Le bel avenir des biopiles CNRS le journal", + "http://dannyayers.com/archives/2005/07/04/skin-deep/": "Danny Ayers, Raw Blog - Taxonomies in OWL", + "http://www.content-space.de/dokuwiki/blog/2008/semanlink_-_semantische_bookmarks": "Semanlink - semantische Bookmarks", + "http://www.w3schools.com/js/default.asp": "JavaScript Tutorial (w3schools.com)", + "http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central": "Nucl\u00e9aire\u00a0: pourquoi la centrale de Flamanville ne produit plus d\u2019\u00e9lectricit\u00e9 depuis six mois", + "https://www.youtube.com/watch?v=eHGt7z-br5g": "Manu Chao: Te lo digo, te lo canto: FUERA MONSANTO!!", + "http://www.semanlink.net/doc/2019/07/natural_language_processing_for": "Natural Language Processing for Requirements Engineering: The Best Is Yet to Come", + "http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/": "Pragmatic Approaches to the Semantic Web \u00bb AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library": "RDFLib/rdflib: a Python library for working with RDF", + "http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg": "A Survey of Text Clustering Algorithms - C. C. Aggarwal (2012)", + "http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas": "A Study of multilabel text classification and the effect of label hierarchy (2015)", + "https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/": "LASER natural language processing toolkit - Facebook Code", + "http://beckr.org/DBpediaMobile/": "DBpedia Mobile (site)", + "http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl": "A Comprehensive Survey of Knowledge Graph Embeddings with Literals: Techniques and Applications", + "http://www.gnu.org/software/octave/": "GNU Octave", + "http://css.maxdesign.com.au/floatutorial/": "Floatutorial: Step by step CSS float tutorial", + "http://www.cringely.com/2016/02/19/the-fbi-v-apple-isnt-at-all-the-way-you-think-it-is/": "I, Cringely The FBI v. Apple isn\u2019t at all the way you think it is - I, Cringely", + "http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_": "[1908.11860] Adapt or Get Left Behind: Domain Adaptation through BERT Language Model Finetuning for Aspect-Target Sentiment Classification", + "https://nlpparis.wordpress.com/": "Paris NLP - blog", + "http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484": "At SemTechBiz, Knowledge Graphs Are Everywhere - semanticweb.com", + "http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html": "La BBC n'aime pas les patrons, par Marc Roche - Le Monde.fr", + "http://www.aaronsw.com/weblog/mylifewithtim": "My Life With Tim (Aaron Swartz's Raw Thought)", + "https://github.com/json-ld/json-ld.org/issues/343": "markdown to json-ld", + "http://druid.io/": "Druid Real-time Exploratory Analytics on Large Datasets", + "http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488": "Power I-Tags", + "http://colah.github.io/posts/2015-08-Understanding-LSTMs/": "Understanding LSTM Networks -- colah's blog", + "http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/": "RAP - Rdf API for PHP V0.9.6 - Home", + "http://www.w3.org/2007/powder/blog": "POWDER Working Group Blog", + "http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e": "Julie Grollier sur Twitter : \"EqSpike: spike-driven equilibrium propagation for neuromorphic implementations\"", + "https://www.bbc.co.uk/news/world-africa-45262081": "Ubang: The Nigerian village where men and women speak different languages - BBC News", + "http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini": "A Postman Collection for Training IBM Watson Speech to Text", + "http://www.lemonde.fr/afrique/article/2018/01/19/la-classe-africaine-notre-serie-consacree-a-l-education_5243976_3212.html": "La classe africaine", + "http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/": "Un pas de plus vers la greffe de t\u00eate Passeur de sciences", + "http://www.cs.princeton.edu/~blei/topicmodeling.html": "David M. Blei: Topic modeling", + "http://news.bbc.co.uk/2/hi/science/nature/7329505.stm": "BBC NEWS Faeces hint at first Americans", + "http://www.youtube.com/watch?v=3-UGbrhWcak": "Champion Jack Dupree - Christina, Christina Blues - YouTube", + "https://www.bbc.com/news/entertainment-arts-44820536": "Obituary: VS Naipaul - BBC News", + "https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur": "Hackers Came, but the French Were Prepared - The New York Times", + "https://distill.pub/2017/ctc/": "Sequence Modeling with CTC", + "https://class.coursera.org/ml-005/forum/thread?thread_id=122": "How can Neural Networks be applied to Time Series Forecasting?", + "http://www.ijcis.info/Vol4N2/pp63-71.pdf": "E-Learning Model Based On Semantic Web Technology", + "https://fr.slideshare.net/fpservant/ec-webslides": "EC-WEB 2014 \"Automotive ranges as e-commerce data\"", + "http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan": "2007 G8 en Grande Bretagne Quand Vladimir Poutine humilia Nicolas Sarkozy - YouTube", + "http://fr.wikipedia.org/wiki/Bassas_da_India": "Bassas da India - Wikip\u00e9dia", + "http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html": "Using SPARQL queries from native Android apps - bobdc.blog", + "http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347": "Linked Data and Information Architecture", + "https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca": "Key topics extraction and contextual sentiment of users\u2019 reviews", + "http://glish.com/css/7.asp": "glish.com : CSS layout techniques : 3 columns, the holy grail", + "http://www.siteduzero.com/tutoriel-3-14663-memo-pour-les-regex.html#ss_part_1": "M\u00e9mo pour les Regex", + "http://www.www2015.it/documents/proceedings/proceedings/p864.pdf": "Deriving an Emergent Relational Schema from RDF Data", + "http://www.youtube.com/watch?v=c4d24NQua6U": "Seni sogindim - Shoira Otabekova", + "http://www.wired.com/2016/08/dear-college-students-take-geology/": "Dear College Students: You Should Take Geology WIRED", + "http://www.w3.org/TR/2007/WD-rif-bld-20071030/": "RIF Basic Logic Dialect", + "https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr": "France-Uruguay. \u201cTuer ou se faire tuer\u00a0: les Uruguayens connaissent la r\u00e8gle de la Coupe du monde\u201d Courrier international", + "http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/": "\u00c9conomie num\u00e9rique : Robots, le retour Lois des r\u00e9seaux", + "http://www.telegraph.co.uk/culture/books/4248401/100-novels-everyone-should-read.html": "100 novels everyone should read - Telegraph", + "http://www.w3.org/2009/03/xbrl/linked-data.png": "linked-data.png", + "http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op": "GitHub - OpenNMT/OpenNMT-py: Open Source Neural Machine Translation in PyTorch", + "https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html": "One Day There May Be a Drug to Turbocharge the Brain. Who Should Get It? - The New York Times", + "http://nlpprogress.com/english/entity_linking.html": "Entity Linking NLP-progress", + "http://onjava.com/lpt/a/6293": "ONJava.com: AJAX: How to Handle Bookmarks and Back Buttons", + "http://harry.hchen1.com/2006/01/29/255": "Harry Chen Thinks Aloud \u00bb Can ThinkMap Make Better Flickr and Technorati?", + "http://jena.sourceforge.net/assembler/assembler-howto.html": "the Assembler howto", + "http://www.blogmarks.net": "", + "http://www.heppnetz.de/ontologies/vso/ns": "The Vehicle Sales Ontology (VSO)", + "https://arxiv.org/abs/1902.05196v1": "[1902.05196] Categorical Metadata Representation for Customized Text Classification", + "http://www.bbc.co.uk/news/entertainment-arts-11056840": "BBC News - How commonplace is autotune?", + "http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html": "Composing the Semantic Web: Creating documents with SPARQL and JSP", + "http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel": "L\u2019architecture en terre: une solution pour le Sahel - NIGER - RFI", + "https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210": "I\u2019ve seen the future, it\u2019s full of HTML. \u2013 Mikeal \u2013 Medium", + "http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research": "14 NLP Research Breakthroughs You Can Apply To Your Business - 2018", + "http://vimeo.com/45633052": "Decoupling Content Management with CreateJS and VIE on Vimeo", + "http://colah.github.io/posts/2015-09-NN-Types-FP/": "Neural Networks, Types, and Functional Programming -- colah's blog", + "http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/": "Cognitive Aspects of Semantic Desktop to Support PIM \u00ab Danzinde", + "http://www.planetastronomy.com/articles/mesure-distance.htm": "La mesure des distances en astronomie des origines \u00e0 nos jours", + "http://passeurdesciences.blog.lemonde.fr/2013/05/05/decouverte-de-deux-planetes-oceans/": "D\u00e9couverte de deux plan\u00e8tes-oc\u00e9ans Passeur de sciences", + "http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest": "Is That a Duplicate Quora Question? LinkedIn", + "http://www.w3.org/RDF/Validator/": "W3C RDF Validation Service", + "http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html": "Paul IMBERT", + "https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15": "Deep Learning meets Physics: Restricted Boltzmann Machines Part I", + "http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html": "Deep Learning, Structure and Innate Priors - A Discussion between Yann LeCun and Christopher Manning Abigail See", + "http://www.paulgraham.com/avg.html": "Beating the Averages", + "http://www.objectlearn.com": "ObjectLearn - Home of Lomboz", + "http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_": "Thomas Kipf's PhD thesis: \"Deep Learning with Graph-Structured Representations\"", + "http://www.bbc.co.uk/music/beta": "BBC - Music - Beta", + "http://blog.wavii.com/2012/08/23/duped-by-dupes/": "Duped by Dupes Wavii Blog", + "http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html": "\"Terres nucl\u00e9aires\" : une histoire du plutonium", + "http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html": "Daniel Cohn-Bendit insiste, une derni\u00e8re fois, sur la \u00abn\u00e9cessit\u00e9 d\u2019Europe \u00bb", + "http://www.volcano.si.edu/world/volcano.cfm?vnum=0201-041&volpage=photos&photo=099008": "Global Volcanism Program Dallol.jpg", + "http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf": "A Comparison of Vector-based Representations for Semantic Composition (Blacoe and Lapata - 2012)", + "http://wikisem.makolab.pl": "GAO wiki", + "http://pgm.stanford.edu/intro.pdf": "Probabilistic Graphical Models (book, Koller)", + "https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html": "Jim Ratcliffe, le brexiteur milliardaire qui part se\u00a0r\u00e9fugier\u2026 \u00e0 Monaco", + "http://www.nemrud.nl/": "International Nemrud Foundation - Word Heritage Monument in Turkey", + "http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html": "Pooper, la fausse application de ramassage de crottes qui ridiculise \u00ab\u00a0l\u2019ub\u00e9risation\u00a0\u00bb", + "http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)": "Le Baron de l'\u00e9cluse (film)", + "http://www.w3.org/2009/03/xbrl/report.html": "Report for the Workshop on Improving Access to Financial Data on the Web", + "https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw": "Tim Berners-Lee on the future of the web: 'The system is failing' Technology The Guardian", + "https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/": "Topic modeling made just simple enough. The Stone and the Shell", + "http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f": "Uber, Lyft drivers manipulate fares at Reagan National causing artificial price surges WJLA", + "https://github.com/NatLibFi/Skosmos": "Skosmos", + "http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf": "Hierarchical classification: Combining Bayes with SVM", + "http://allforces.com/2005/08/23/wordpress-on-mac-subdomains/": "WordPress on Mac Part 2: Sub-Domains All Forces", + "http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/": "L\u2019homme pr\u00e9historique aimait les pains moutard\u00e9s Dans les pas des arch\u00e9ologues", + "http://dannyayers.com": "Danny Ayers, Raw Blog", + "http://mspace.fm/": "mSpace", + "http://data.semanticweb.org/person/francois-paul-servant/html": "Fran\u00e7ois-Paul Servant Semantic Web Dog Food", + "http://simile.mit.edu/RDFizers/": "SIMILE RDFizers", + "http://louvre-boite.viabloga.com/news/50.shtml": "", + "http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac": "How To Edit Your PATH Environment Variables On Mac OS X", + "http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html": "Loading JavaScript Functions Via AJAX - Experiment Garden", + "http://www.myspace.com/bandalaya": "Renato Matos (myspace)", + "http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html": "La Chine fait main basse sur les for\u00eats africaines", + "http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219": "Accord p\u00e9trolier entre le CNODC et le Niger", + "http://www.w3.org/2007/03/RdfRDB/report": "Report from the W3C Workshop on RDF Access to Relational Databases", + "http://sourceforge.net/projects/eulergui/": "EulerGUI", + "http://googleresearch.blogspot.fr/2013/03/learning-from-big-data-40-million.html": "Learning from Big Data: 40 Million Entities in Context", + "http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530": "Get More Robust Access Control, Courtesy of Semantic Technology - semanticweb.com", + "http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx": "Understand Your World with Bing - Search Blog", + "http://blog.schema.org/2013/06/schemaorg-and-json-ld.html": "schema blog: Schema.org and JSON-LD", + "http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap": "Ruggles of Red Gap", + "http://www.semanlink.net/doc/2020/06/thatmuse": "THATMuse", + "http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/": "R, Octave, and Python: Which Suits Your Analysis Needs? - Dice News", + "http://www.semanlink.net/doc/2021/07/practical_natural_language_proc": "Practical Natural Language Processing for Low-Resource Languages", + "http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c": "Domain-Specific BERT Models \u00b7 Chris McCormick", + "http://cs.stanford.edu/people/karpathy/": "Andrej Karpathy Academic Website", + "https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/": "An Efficient Way to Extract the Main Topics from a Sentence The Tokenizer", + "http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai": "NLP Year in Review \u2014 2019 - dair.ai - Medium", + "http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374": "WikiLeaks a traqu\u00e9 les vendeurs d\u2019armes de surveillance Rue89", + "http://www.semanlink.net/doc/2021/04/the_nlp_index": "The NLP Index", + "http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/": "Text Classification With Word2Vec - DS lore (2016)", + "http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis": "Search, Tagging and Wikis (2007)", + "https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway": "Silicon Valley Is Turning Into Its Own Worst Fear", + "http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di": "La \u00ab\u00a0positive attitude\u00a0\u00bb des directeurs d\u2019h\u00f4pitaux face au coronavirus d\u00e9contenance la commission d\u2019enqu\u00eate", + "https://pouannes.github.io/blog/decorators/": "Finally understanding decorators in Python \u2022 Pierre Ouannes", + "http://peacecorpsonline.org/messages/messages/2629/2025470.html": "Peace Corps Online - RPCV Vito DiLullo is looking forward to welcoming friends to his table", + "http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had": "BBC - Future - The \u2018untranslatable\u2019 emotions you never knew you had", + "http://www.w3.org/community/hydra/": "Hydra Community Group @ W3C", + "http://www.nytimes.com/2014/01/26/opinion/sunday/what-drives-success.html?_r=1": "What Drives Success? - NYTimes.com", + "http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw": "Obama sent these people home from prison early. Now what? The Washington Post", + "http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat": "Google AI Blog: REALM: Integrating Retrieval into Language Representation Models", + "http://paigrain.debatpublic.net/?p=6827": "Corruption des institutions \u2013 Communs / Commons", + "http://www.cortical.io/": "Cortical.io - Fast, precise, intuitive NLP", + "http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html": "Please allow JS access to Ontologies and LOD", + "http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961": "US nearly detonated atomic bomb over North Carolina \u2013 secret document World news The Guardian", + "http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/": "Understanding Convolutional Neural Networks for NLP WildML", + "http://www.mkbergman.com/?p=326": "AI3: Converting \u2018Sweet Tools\u2019 to an Exhibit", + "http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf": "Collaborative semantic editing of linked data lexica", + "http://data.blog.lemonde.fr/2015/10/23/le-fact-checking-peut-il-sautomatiser/": "Le fact-checking peut-il s\u2019automatiser ? J'ai du bon data", + "http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford": "nltk.tag.stanford module \u2014 NLTK documentation", + "http://www.webdeveloperjuice.com/2011/09/28/7-beautiful-web-based-timeline-using-javascript-and-css/": "7 Beautiful Web Based Timeline Using Javascript and CSS", + "https://www.eff.org/deeplinks/2013/11/drm-cars-will-drive-consumers-crazy": "DRM in Cars Will Drive Consumers Crazy Electronic Frontier Foundation", + "http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web": "BOF-5911: Building a Web 3.0 Address Book", + "https://github.s3.amazonaws.com/media/progit.en.pdf": "Pro Git", + "http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_": "[1906.05685] A Focus on Neural Machine Translation for African Languages", + "http://www.lespetitescases.net/semweblabs/drupalModules.php": "Modules pour Drupal", + "http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/": "A different usage of RDFa\u2026 \u00ab Ivan\u2019s private site", + "http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie": "\u00ab\u00a0Th\u00e9orie du drone\u00a0\u00bb, de Gr\u00e9goire Chamayou : la guerre est finie", + "https://github.com/dmlc/dgl": "dmlc/dgl: Python package built to ease deep learning on graph, on top of existing DL frameworks.", + "http://edition.cnn.com/2008/POLITICS/11/04/obama.transcript/index.html?iref=mpstoryview": "Transcript: 'This is your victory,' says Obama - CNN.com", + "http://maps.google.fr/maps?f=q&source=s_q&hl=fr&geocode=&q=niamey&sll=46.75984,1.738281&sspn=14.077708,19.599609&ie=UTF8&hq=&hnear=Niamey,+Niger&ll=13.590133,2.100331&spn=0.002438,0.002393&t=h&z=19": "Gado, parcelles Tchangarey 8714 pqrs", + "http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc": "thunlp/OpenMatch: An Open-Source Package for Information Retrieval.", + "http://research.talis.com/2005/erdf/wiki/Main/RdfInHtml": "Embedded RDF Wiki :: Talis", + "http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl": "My Virtual Life", + "http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/": "Multimedia Vocabularies on the Semantic Web", + "http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg": "A Common Sense View of Knowledge Graphs AI3:::Adaptive Information", + "https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain": "A brief history of the brain New Scientist", + "https://watson-api-explorer.mybluemix.net/": "Watson API Explorer", + "http://www.fofomag.com/index.asp?affiche=news_Display.asp&ArticleID=920": "Moussa Poussi hospitalis\u00e9", + "https://github.com/keon/awesome-nlp": "keon/awesome-nlp: A curated list of resources dedicated to Natural Language Processing (NLP)", + "http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml": "BBC - Science & Nature - The Lost City of Nasca", + "http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold": "Arnaque \u00e0 Blinkogold / orange", + "http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d": "Barbara Stiegler\u00a0: \u00ab\u00a0La crise due au coronavirus refl\u00e8te la vision n\u00e9olib\u00e9rale de la sant\u00e9 publique\u00a0\u00bb", + "http://www.semanticscripting.org/SFSW2008/": "Scripting for the Semantic Web (SFSW2008)", + "https://www.continuum.io/anaconda-overview": "Anaconda Continuum", + "https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf": "Human-level concept learning through probabilistic program induction (2015)", + "http://scot-project.org/?page_id=2": "SCOT:Let\u2019s Share Tags! \u00bb About SCOT", + "https://en.wikipedia.org/wiki/Zanj_Rebellion": "Zanj Rebellion - Wikipedia", + "http://www.zdnet.com/blog/networking/freedom-box-freeing-the-internet-one-server-at-a-time/698": "Freedom Box: Freeing the Internet one Server at a time ZDNet", + "https://blog.usievents.com/interview-technique-a-poison-remede/": "Interview : \"La technique est \u00e0 la fois notre poison et notre rem\u00e8de\" - USI Events - Blog", + "http://www.tatuagemdaboa.com.br/": "Bar da Boa", + "https://github.com/FasterXML/jackson-databind/": "FasterXML/jackson-databind", + "http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run": "Facebook paid Teen Vogue to run a fake article praising Facebook for \"helping ensure the integrity of the 2020 election\" / Boing Boing", + "https://wit.ai/blog/2014/12/19/dan-jurafsky-food": "The Language of Food (and Dating), by Dan Jurafsky", + "http://www.slideshare.net/bengee/bnowack-from-ideatoweb": "From Idea to Web - Creating Linked Data Apps", + "http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423": "semantic web - In JSON-LD, is it possible to extend a context? - Stack Overflow", + "https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN": "Brain Implants and the BRAIN Initiative: lights and Shadows - OpenMind", + "http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/": "Use These Secret NSA Google Search Tips to Become Your Own Spy Agency Threat Level Wired.com", + "https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html": "Latent semantic indexing (\"Introduction to Information Retrieval\" Manning 2008)", + "http://www.technologyreview.com/Infotech/19627/?a=f": "Technology Review: Twine", + "https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb": "A machine learning model to understand fancy abbreviations, trained on Tolkien", + "http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html": "NOVA Mystery of the Megaflood", + "http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t": "Roam Research \u2013 A note taking tool for networked thought.", + "http://biopython.org": "Biopython", + "http://www.deeplearningbook.org/contents/representation.html": "Representation learning (in \"Deep Learning\", Ian Goodfellow and Yoshua Bengio and Aaron Courville)", + "http://www.youtube.com/watch?v=SA9_3cxfHyI": "Adams Junior - La m\u00e8re de l'Humanit\u00e9 - YouTube", + "https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf": "Old semanlink schema in a github project!", + "http://www.ldodds.com/blog/archives/000248.html": "Lost Boy: It's Like the Ultimate Lazy Web", + "http://science.sciencemag.org/content/324/5923/81": "Distilling Free-Form Natural Laws from Experimental Data Science", + "http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr": "Nucl\u00e9aire\u00a0: \u00ab\u00a0L\u2019\u00e9tat du parc fran\u00e7ais est pr\u00e9occupant\u00a0\u00bb", + "http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html": "TechnicaLee Speaking: Why SPARQL?", + "http://junit.sourceforge.net/doc/faq/faq.htm": "JUnit FAQ", + "http://www.calacademy.org/exhibits/xtremelife/life_on_earth.php": "Extreme Life on Earth (California Academy of Sciences - Natural History)", + "http://www.pbs.org/cringely/pulpit/pulpit20050609.html": "PBS I, Cringely . June 9, 2005 - Going for Broke", + "https://github.com/aneesha/RAKE": "RAKE: A python implementation of the Rapid Automatic Keyword Extraction", + "http://msc2010.org/mscwork/": "Mathematics Subject Classification MSC2010", + "http://www.wired.com/2015/07/pluto-new-horizons-2/": "New Horizons' Long, Dark, Amazing Journey to Pluto...And Beyond WIRED", + "http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie": "\u00ab\u00a0Les cas de Covid se multiplient. \u00c7a tombe, \u00e7a tombe. Jusqu\u2019o\u00f9\u00a0?\u00a0\u00bb : la course \u00e0 la vie d\u2019une r\u00e9animatrice", + "http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html": "Comment devient-on un bourreau ?", + "http://www.lemonde.fr/culture/article/2013/02/07/les-manuscrits-sauves-de-tombouctou_1828672_3246.html": "Les manuscrits sauv\u00e9s de Tombouctou", + "http://www.w3.org/2001/tag/awwsw/issue57/20110625/#id35291": "Providing and discovering definitions of URIs", + "http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin": "javascript - XmlHttpRequest error: Origin null is not allowed by Access-Control-Allow-Origin - Stack Overflow", + "http://www.hyperdata.it/": "FooWiki - danja", + "http://www.kryogenix.org/code/browser/jses/": "JavaScript Event Sheets", + "http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=": "Linking enterprise data - Recherche Google", + "http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEM7ZTULWFE_0.html": "ESA - Results from Mars Express - Buried craters and underground ice - Mars Express uncovers depths of Mars", + "http://www.semanlink.net/doc/2019/09/evolution_of_representations_in": "Evolution of Representations in the Transformer (2019)", + "http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/": "Bienvenue dans une nouvelle \u00e8re g\u00e9ologique, l\u2019anthropoc\u00e8ne - Eco(lo) - Blog LeMonde.fr", + "http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html": "Greenpeace alerte sur le boom de la p\u00eache au krill en Antarctique", + "http://code.google.com/p/backplanejs/wiki/UsingRdfa": "backplanejs: A basic introduction to using the RDFa Parser in your web pages", + "http://www.bnode.org/archives2/47": "Quad store performance issues", + "http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w": "Africa, Offline: Waiting for the Web - New York Times", + "http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un": "Livre\u00a0: \u00ab\u00a0Tout s\u2019effondre\u00a0\u00bb, un hommage \u00e0 l\u2019Afrique ant\u00e9-coloniale \u00e0 l\u2019heure de sa d\u00e9sagr\u00e9gation", + "http://www.arte.tv/fr/semaine/244,broadcastingNum=1232779,day=6,week=2,year=2011.html": "La Ronde de Nuit, secrets d'un tableau - Peter Greenaway", + "http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio": "BabelNet Le plus grand dictionnaire encyclop\u00e9dique et r\u00e9seau s\u00e9mantique", + "http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/": "Incredible Optical Illusion Body Painting by Trina Merry \u2013 Fubiz Media", + "http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying": "The US government has betrayed the internet. We need to take it back Bruce Schneier The Guardian", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html": "Similarity module Elasticsearch Reference", + "https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf": "Two graph data models : RDF and Property Graphs", + "http://en.wikipedia.org/wiki/War/Dance": "War Dance", + "http://ontologies.makolab.com/uco/ns.html": "Used Cars Ontology Language Reference", + "http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/": "Un virus prend la bact\u00e9rie du chol\u00e9ra \u00e0 son propre pi\u00e8ge Passeur de sciences", + "https://class.coursera.org/datasci-001/wiki/view?page=syllabus": "Syllabus Introduction to Data Science", + "http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D": "SPARQL query to find the dbPedia concept corresponding to a wikipedia page", + "http://neurosciencenews.com/light-crispr-cas9-4917/": "Using Light to Control Genome Editing \u2013 Neuroscience News", + "http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_": "The 4 Biggest Open Problems in NLP (2019)", + "http://bblfish.net/coop/": "co-operating.systems", + "http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de": "Restoring ancient text using deep learning: a case study on Greek epigraphy DeepMind", + "http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_": "Maria sur Twitter : \"O you who go to Gao, do so by way of Timbuktu and murmur my name to my friends. Give them ...\"", + "http://fr.slideshare.net/julienplu/extraction-de-lasemantique": "Extraction de la semantique", + "http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328": "Linked Data on the Web Workshop at WWW 2012 - semanticweb.com", + "http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html": "Et si on rentrait tous chez nous ? Pens\u00e9es d\u2019une immigr\u00e9e - Opinions - Le Monde.fr", + "http://www.margueritte.fr/bloc/?p=496": "Pierre de Lave au Palais Royal, Paris BLOC", + "http://www.youtube.com/watch?v=mXlyDwywq3Q": "ZAZ On ira", + "http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html": "SKOS implementation: Rameau subjects as linked data", + "http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class": "Towards Unsupervised Text Classification Leveraging Experts and Word Embeddings - (ACL 2019)", + "http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132": "Nigerportal le portail du Niger - D\u00e9c\u00e8s \u00e0 Niamey du chanteur Moussa Poussy", + "http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/": "The Horrible Psychology of Solitary Confinement Wired Science Wired.com", + "http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a": "javascript - Partial matching a string against a regex - Stack Overflow", + "https://moz.com/blog/301-redirection-rules-for-seo": "301 Redirects Rules Change: What You Need to Know for SEO - Moz", + "http://www.w3.org/TR/xhtml-rdfa-primer/": "RDF/A Primer 1.0: Embedding RDF in XHTML", + "http://apassant.net/files/publications/these-apassant.pdf": "\"Technologies du Web S\u00e9mantique pour l'Entreprise 2.0\" - Th\u00e8se - Alexandre Passant", + "https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0": "Mathematics of Machine Learning: An introduction", + "http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple": "Introducing FastBert\u200a\u2014\u200aA simple Deep Learning library for BERT Models", + "http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding": "Contrastive Predictive Coding", + "http://umbel.org/intro.xhtml": "UMBEL Intro", + "http://semtechbizberlin2012.semanticweb.com/": "SEMTECHBIZ Berlin 2012: The Semantic Tech & Business Conference", + "https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/": "Vary with Care IEInternals", + "http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment": "Semantic Enterprise 2.0 - Enabling Semantic Web technologies", + "http://www.wired.com/wiredenterprise/2013/06/observos-internet-of-places/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous": "Ex-Restaurant Man Erects the 'Internet of Places' Wired Enterprise Wired.com", + "https://www.youtube.com/watch?v=TnokFAwi1yc": "Eunice Barber, Paris 2003 Long Jump Women", + "http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print": "Lego Mindstorms no kids' toy CNET News.com", + "http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564": "Discovering French Monuments, With the Help of the Semantic Web - semanticweb.com", + "http://www.corequant.com/?p=1": "Sentiment Analysis in RapidMiner / Technology Blog", + "https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine": "Untergunther, r\u00e9parateurs clandestins du patrimoine", + "http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf": "Knowledge Graph and Text Jointly Embedding (2014)", + "http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf": "Identity Crisis in Linked Data", + "http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html": "JSApps 101: AngularJS In A Nutshell", + "http://del.icio.us/": "http://del.icio.us", + "https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1": "Inversion of Control vs Dependency Injection - Stack Overflow", + "http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html": "CONSTRUCTing Quads - TechnicaLee Speaking", + "http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea": "Cognonto - Data. Structure. Meaning.", + "http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_": "\u00ab\u00a0Au vu des forces \u00e9conomiques en pr\u00e9sence, les abeilles et les pollinisateurs apparaissent ind\u00e9fendables\u00a0\u00bb", + "https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage": "Robert Mercer: the big data billionaire waging war on mainstream media Politics The Guardian", + "http://www.techempower.com/blog/2013/03/26/everything-about-java-8/": "Everything about Java 8 - TechEmpower Blog", + "http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t": "Isabel Cachola sur Twitter : \"TLDR: Extreme Summarization of Scientific Documents\"", + "https://junyanz.github.io/CycleGAN/": "Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks", + "http://www.calacademy.org/": "California Academy of Sciences", + "http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b": "Named Entity Recognition with Bert \u2013 Depends on the definition", + "http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf": "Conundrums in Unsupervised Keyphrase Extraction: Making Sense of the State-of-the-Art (2010)", + "http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_": "One Shot Learning with Siamese Networks using Keras", + "http://nonaedvige.ras.eu.org/": "Pour obtenir l'abandon du fichier EDVIGE", + "http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle": "QAnswer \u00b7 Accessing your Knowledge via Natural Language", + "http://www.aclweb.org/anthology/D14-1181": "Convolutional Neural Networks for Sentence Classification (2014)", + "http://www.csie.ntu.edu.tw/~cjlin/libshorttext/": "LibShortText: A Library for Short-text Classification and Analysis", + "http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo": "Initializing neural networks for hierarchical multi-label text classification (2017)", + "https://github.com/hubgit/md-ld": "MD-LD", + "http://stackoverflow.com/questions/16042885/swagger-hashmap-property-type": "api - Swagger HashMap property type - Stack Overflow", + "http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_": "Joey NMT\u2019s documentation!", + "http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia": "Katalin Kariko", + "http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html": "Deux rats s\u00e9par\u00e9s par un continent mais reli\u00e9s par le cerveau", + "http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv": "Zinder (Camille Lefebvre Langarchiv)", + "http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1": "Great list of resources - NoSQL, Big Data, Machine Learning and more GitHub - Data Science Central", + "http://worrydream.com/TheWebOfAlexandria/": "The Web of Alexandria", + "http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar": "[2010.11967] Language Models are Open Knowledge Graphs", + "https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486": "How to Train your Own Model with NLTK and Stanford NER Tagger? (for English, French, German\u2026)", + "http://www.cigref.fr/wp/wp-content/uploads/2016/09/Gouvernance-IA-CIGREF-LEXING-2016.pdf": "Gouvernance de l'intelligence artificielle dans les grandes entreprises", + "http://www.irt.org/script/script.htm": "irt.org - JavaScript FAQ Knowledge Base", + "http://arxiv.org/abs/1601.07752": "[1601.07752] Enhancing the Power of Cardinal's Algorithm", + "http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_": "Pour conqu\u00e9rir les terres, les plantes ont emprunt\u00e9 des g\u00e8nes aux bact\u00e9ries CNRS", + "https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb": "Gensim tutorial: Similarity Queries", + "http://www2.cnrs.fr/presse/communique/3633.htm": "La plus vieille biodiversit\u00e9 d\u00e9couverte dans un \u00e9cosyst\u00e8me marin au Gabon - CNRS", + "http://www.nytimes.com/2014/11/05/opinion/why-sand-is-disappearing.html?ref=international&_r=2": "Why Sand Is Disappearing - NYTimes.com", + "https://www.youtube.com/watch?v=nFCxTtBqF5U": "Representations for Language: From Word Embeddings to Sentence Meanings (2017) - YouTube", + "http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html": "Des mar\u00e9es sous Titan, une lune de Saturne", + "http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html": "Vancouver Data Blog by Neil McGuigan: Text Analytics with RapidMiner Part 1 of 6 - Loading Text", + "https://github.com/facebookresearch/fastText": "facebookresearch/fastText: Library for fast text representation and classification.", + "http://www.scottsantens.com/should-the-amount-of-basic-income-vary-with-cost-of-living-differences": "Scott Santens - Should the Amount of Basic Income Vary With Cost of Living Differences?", + "http://arxiv.org/pdf/1301.3781.pdf": "[1301.3781] Efficient Estimation of Word Representations in Vector Space", + "https://arxiv.org/pdf/1705.08039.pdf": "[1705.08039] Poincar\u00e9 Embeddings for Learning Hierarchical Representations", + "http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html": "Storing and querying RDF in Neo4j - bobdc.blog", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/": "Disco Hyperdata Browser", + "http://www.nature.com/news/2010/100630/full/news.2010.323.html": "Ancient macrofossils unearthed in West Africa : Nature News", + "http://yyue.blogspot.ca/2015/01/a-brief-overview-of-deep-learning.html": "Random Ponderings: A Brief Overview of Deep Learning", + "http://opendatapress.org/": "Open Data Press - Google Sheets to Open Data", + "http://www.mail-archive.com/public-lod@w3.org/msg05020.html": "CoIN: Composition of Identifier Names", + "http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d": "G\u00e9ologie - Normandie - Massif d'Ecouves (NS) - Pleurodictyum constantinopolitatum", + "http://deliprao.com/": "Delip Rao", + "https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1": "Survival of the Richest", + "http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin": "Richer Sentence Embeddings using Sentence-BERT \u2014 Part I", + "http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha": "The real Lord of the Flies: what happened when six boys were shipwrecked for 15 months The Guardian", + "https://gist.github.com/andyferra/2554919": "Github Markdown CSS - for Markdown Editor Preview", + "http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/": "Jardinier r\u00e9sistant, \u00e9change graines de laitues contre plants de tomates Une Ann\u00e9e en France", + "https://github.com/josephmisiti/awesome-machine-learning": "A curated list of awesome Machine Learning frameworks, libraries and software.", + "http://www.semanticweb.com/features/index_to_the_creative_destruction_7_act_play_161403.asp": "Index To The Creative Destruction 7 Act Play - Semantic Web", + "http://vene.ro/blog/word-movers-distance-in-python.html": "Word Mover\u2019s Distance in\u00a0Python", + "http://jamendo.org/": "Jamendo.org", + "https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions": "Entity Linking with a Knowledge Base: Issues, Techniques, and Solutions. W Shen (2015)", + "http://www.sindice.com/": "sindice", + "http://danbri.org/words/2008/01/06/246": "danbri\u2019s foaf stories \u00bb SPARQL results in spreadsheets", + "http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm": "Le CO2 \u00e0 l\u2019origine de la vie ?", + "http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t": "Sylvain Gugger sur Twitter : \"Training a transformer model for text classification...\"", + "https://www.quora.com/What-is-Noise-Contrastive-estimation-NCE": "What is Noise Contrastive estimation (NCE)? - Quora", + "http://scot.curriculum.edu.au/index.html": "Schools Online Thesaurus", + "http://iandavis.com/blog/2009/08/time-in-rdf-6": "Internet Alchemy \u00bb Representing Time in RDF Part 6", + "https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1": "Watson: Alchemy Language v1 API Explorer", + "http://judyshapiro.sharedby.co/share/FGN8rQ": "27 Science Fictions That Became Science Facts In 2012", + "https://www.technologyreview.com/lists/technologies/2017/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review": "10 Breakthrough Technologies 2017 - MIT Technology Review", + "http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo": "Cory Doctorow: \"Google's moves... really ARE good for users. I'd love to find proposals to fix this stuff WITHOUT creating monopolies\" / Twitter", + "https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/": "Multivariate Time Series Forecasting with LSTMs in Keras - Machine Learning Mastery", + "http://www.wired.com/autopia/2014/02/feds-v2v/": "Feds Will Require All New Vehicles to Talk to Each Other Autopia Wired.com", + "http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_": "How to combine categorical and continuous input features for neural network training - Data Science Stack Exchange", + "http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/": "Comment les \u00ab travailleurs 1099 \u00bb menacent l\u2019Uber-\u00e9conomie Silicon 2.0", + "https://js.tensorflow.org/": "TensorFlow.js", + "http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF": "Bounded Descriptions in RDF - n\u00b2 wiki", + "http://wikileaks.org/Transcript-Meeting-Assange-Schmidt": "Transcript of secret meeting between Julian Assange and Google CEO Eric Schmidt", + "http://prefix.cc/": "namespace lookup for RDF developers prefix.cc", + "https://openreview.net/forum?id=rJXMpikCZ": "Graph Attention Networks (2018)", + "http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp": "Highlights from CoNLL and EMNLP 2019", + "http://www.picklematrix.net/archives/000979.html": "SemErgence: Oracle 10g Support for RDF", + "http://thefigtrees.net/lee/sw/sparql-faq": "SPARQL Protocol and Query Language: Frequently AskedQuestions", + "https://thegradient.pub/nlp-imagenet/": "NLP's ImageNet moment has arrived", + "http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/": "from __future__ import * \u00bb Remote JSON - JSONP", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/15.pdf": "A Framework for Ontological Description of Archaeological Scientific Publications", + "http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l": "Amnesty International d\u00e9nonce l\u2019espionnage d\u2019un journaliste marocain par une technologie quasi ind\u00e9tectable", + "http://www.w3.org/2006/07/SWD/wiki/RDFa": "RDFa - W3C Semantic Web Deployment Wiki", + "http://answers.semanticweb.com/search/?q=schema.org&Submit=search&t=question": "semanticweb.com: questions matching 'schema.org'", + "http://e-claire.org/index.php?2005/04/17/25-outils-onlinegestionnaires-de-favorisbookmarks-social-bookmarking-applications": "", + "http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_": "GM fungus rapidly kills 99% of malaria mosquitoes, study suggests - BBC News", + "http://herschel.esac.esa.int/": "Herschel Science Centre", + "https://c2gweb.qa.heliosnissan.net/c2gweb/product/gen?embed=true": "C2GWeb Helios", + "http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor": "Inside the Guardian\u2019s CMS: meet Scribe, an extensible rich text editor Info theguardian.com", + "https://www.owasp.org/index.php/REST_Security_Cheat_Sheet": "REST Security Cheat Sheet - OWASP", + "http://www.psikopat.com/html/spirale.htm": "Le disk wahouuu", + "https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4": "Kassav Zenith 89 - YouTube", + "http://bitcoin.org/bitcoin.pdf": "Bitcoin: A Peer-to-Peer Electronic Cash System ; Satoshi Nakamoto", + "http://container42.com/2014/11/18/data-only-container-madness/": "Data-only container madness \u00b7 Container42", + "http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient": "FAQ Eclipse / subclipse install", + "http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html": "[news.eclipse.webtools] sharing resources between dynamic web projects", + "http://rollerweblogger.org/wiki/Wiki.jsp?page=InstallationGuide12": "RollerWiki: InstallationGuide12", + "http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html": "La pollution industrielle fait autant de ravages sur la sant\u00e9 que le paludisme", + "http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra": "Bura Funerary Urns: Niger Terracottas: An Interpretive Limbo? African Arts MIT Press", + "http://www.aims.ac.za/": "African Institute for Mathematical Sciences AIMS", + "https://www.theguardian.com/commentisfree/2017/jan/12/universal-basic-income-finland-uk": "Universal basic income is becoming an urgent necessity Guy Standing Opinion The Guardian", + "http://www.inhotim.org.br/": "Inhotim", + "http://forum.macbidouille.com/lofiversion/index.php/t137183.html": "Forums MacBidouille > Noos avec airport express", + "http://finance.blog.lemonde.fr/2012/02/12/la-banque-centrale-europeenne-risque-t-elle-dexploser/": "La Banque Centrale Europ\u00e9enne risque-t-elle d\u2019exploser en pr\u00eatant aux banques? D\u00e9mystifier la finance", + "http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified": "[1503.03832] FaceNet: A Unified Embedding for Face Recognition and Clustering", + "http://www.honeynet.org/papers/webapp/index.html": "Know your Enemy: Web Application Threats", + "http://www.twine.com/": "Twine", + "http://www.eetimes.com/news/semi/showArticle.jhtml?articleID=180201688": "EETimes.com - Cellphone could crack RFID tags, says cryptographer", + "http://fonnesbeck.github.io/ScipySuperpack/": "fonnesbeck/ScipySuperpack @ GitHub", + "https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html": "In Depth: Gaussian Mixture Models Python Data Science Handbook", + "http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland": "Kalsarik\u00e4nnit - thisisFINLAND", + "http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru": "Google AI Blog: Extracting Structured Data from Templatic Documents (2020)", + "http://spec.commonmark.org/": "CommonMark Spec", + "http://www.w3.org/2007/08/grddl/": "W3C GRDDL service", + "http://homepages.inf.ed.ac.uk/lzhang10/maxent.html": "Maximum Entropy Modeling", + "http://www.jpl.nasa.gov/news/news.php?release=2012-381": "NASA Voyager 1 Encounters New Region in Deep Space - NASA Jet Propulsion Laboratory", + "http://code.google.com/webtoolkit/": "Google Web Toolkit - Build AJAX apps in the Java language", + "http://scikit-learn.org/stable/modules/feature_extraction.html": "Feature extraction \u2014 scikit-learn documentation", + "http://liza.io/ijcai-session-notes-learning-common-sense/": "IJCAI Session Notes: Learning Common Sense \u00b7 Liza", + "http://norman.walsh.name/threads/webservices": "Norman Walsh - Thread: Web Services", + "http://www.semanlink.net/doc/2019/12/natural_language_processing_c": "Natural Language Processing \u2013 Current Applications and Future Possibilities", + "http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge": "L\u2019intelligence artificielle, g\u00e9nie de la biologie mol\u00e9culaire", + "http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/": "Un nouveau champignon-tueur s\u2019attaque aux amphibiens Passeur de sciences", + "http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_": "ML and NLP Research Highlights of 2020", + "http://www.ldodds.com/blog/archives/000289.html": "Lost Boy: Feeding Google Co-Op with SPARQL", + "http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/": "R\u00e9duire nos \u00e9missions de 30 % pour favoriser l\u2019emploi, le rapport qui d\u00e9range Eco(lo)", + "http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche": "Balade dans Kigali, ville-ruche qui se r\u00eave en \u00ab\u00a0Singapour africain\u00a0\u00bb", + "http://machinelearningmastery.com/useful-things-to-know-about-machine-learning/": "Useful Things To Know About Machine Learning - Machine Learning Mastery", + "https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/": "Comment Trump a manipul\u00e9 l'Am\u00e9rique ARTE", + "http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e": "raphaelsty/entitype : Predict entities type in context using transformers.", + "https://www.mnot.net/cache_docs/": "Un tutoriel de la mise en cache", + "http://www.w3.org/2001/sw/sweo/": "Semantic Web Education and Outreach (SWEO) Interest Group", + "https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf": "Short Text Similarity with Word Embeddings", + "http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430": "Bernard Stiegler: \u00abL\u2019acc\u00e9l\u00e9ration de l\u2019innovation court-circuite tout ce qui contribue \u00e0 l\u2019\u00e9laboration de\u00a0la\u00a0civilisation\u00bb - Lib\u00e9ration", + "https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true": "AI Can Recognize Images, But Text Has Been Tricky\u2014Until Now WIRED", + "http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism": "Sex Seems Like a Waste\u2014So Why Do So Many Creatures Need It to Reproduce?", + "https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang": "What product breakthroughs will recent advances in deep learning enable? - Quora", + "http://bayosphere.com/why-drupal": "Why Drupal? Bayosphere", + "http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf": "Learning Multilabel classification of news articles (2013)", + "https://www.youtube.com/watch?v=L3TcSwwQL_g": "Djamila", + "https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc": "How to Really Learn a Language (Maybe) \u2014 Medium", + "http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu": "Les Bouts de bois de Dieu", + "http://trice.semsol.org/": "Trice", + "http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html": "Rajesh Rao: Une Pierre de Rosette pour l'\u00e9criture de l'Indus Video on TED.com", + "https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features": "machine learning - Text categorization: combining different kind of features - Data Science Stack Exchange", + "http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso": "Le principe de la greffe - Association Vergers Vivants", + "http://lists.w3.org/Archives/Public/semantic-web/2010Jun/0077.html": "Diff/versioning as metadata for dynamic graphs?", + "http://www.websemanticsjournal.org/ps/pub/2005-15": "Horst, Herman J. ter: Completeness, decidability and complexity of entailment for RDF Schema and a semantic extension involving the OWL vocabulary", + "http://applidium.com/en/news/cracking_siri/": "Applidium \u2014 Cracking Siri", + "http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity": "Schema.org - Threat or Opportunity? - benjamin nowack's blog", + "https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf": "Learning Deep Architectures for AI By Yoshua Bengio (2009)", + "http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html": "Converting CSV to RDF - bobdc.blog", + "http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib": "A knowledge graph embedding library for reproducible research", + "http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi": "Next-Gen Sentence Embeddings with Multiple Negatives Ranking Loss Pinecone", + "http://www.abc.net.au/tv/fora/stories/2009/04/24/2552097-p.htm": "Daniel Everett: Endangered Languages and Lost Knowledge", + "http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/": "What IBM, the Semantic Web Company, and Siemens are doing with semantic technologies ZDNet", + "https://cmusphinx.github.io/wiki/phonemerecognition/": "Phoneme Recognition (caveat emptor) \u2013 CMUSphinx Open Source Speech Recognition", + "https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web": "Yann LeCun, Coll\u00e8ge de France", + "http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926": "[Linking-open-data] Returning to backlinks", + "https://www.oreilly.com/ideas/open-endedness-the-last-grand-challenge-youve-never-heard-of": "Open-endedness: The last grand challenge you\u2019ve never heard of - O'Reilly Media", + "http://www.economist.com/news/briefing/21677228-technology-behind-bitcoin-lets-people-who-do-not-know-or-trust-each-other-build-dependable?fsrc=scn/tw/te/pe/ed/blockchains": "The great chain of being sure about things The Economist", + "http://www.semanlink.net/doc/2019/09/how_complex_systems_fail": "How Complex Systems Fail", + "http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html": "The goodrelations November 2012 Archive by thread", + "http://neuro.imm.dtu.dk/wiki/ESWC_2012": "9th Extended Semantic Web Conference - Brede Wiki", + "http://michaeldoig.net/4/installing-mamp-and-wordpress.htm": "Installing Wordpress Locally Using MAMP \u2014 Michael Doig", + "https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Simple end-to-end TensorFlow examples Bcomposes", + "https://www.quora.com/When-should-we-not-use-Elasticsearch": "When should we not use Elasticsearch? - Quora", + "http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove": "Incremental Clustering - an overview ScienceDirect Topics", + "http://n2.talis.com/wiki/SPARQL_intro": "SPARQL intro - N2 wiki", + "http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html?page=2": "XML.com: Fixing AJAX: XMLHttpRequest Considered Harmful", + "http://pisani.blog.lemonde.fr/pisani/2005/05/mind_manager_vi.html": "Transnets : Mind Manager : Visualiser la complexit\u00e9, penser autrement", + "https://www.monumentaltrees.com/fr/": "Arbres monumentaux \u00b7 un inventaire des arbres gros et anciens", + "http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su": "Knowledge Graph Embedding: A Survey of Approaches and Applications - IEEE (2017)", + "http://ikewiki.salzburgresearch.at/": "IkeWiki", + "http://www.wired.com/2014/06/the-future-of-biotech-crops/": "The Next Generation of GM Crops Has Arrived\u2014And So Has the Controversy Science WIRED", + "http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html": "Filmer la cruaut\u00e9 envers les animaux devient un crime aux Etats-Unis", + "http://www.sitemaps.org/protocol.html": "sitemaps.org - Protocol", + "https://openreview.net/forum?id=SyK00v5xx": "A Simple but Tough-to-Beat Baseline for Sentence Embeddings (2017)", + "http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d": "Au Burkina, un premier l\u00e2cher de moustiques g\u00e9n\u00e9tiquement modifi\u00e9s cr\u00e9e la pol\u00e9mique", + "http://news.bbc.co.uk/2/hi/science/nature/6518161.stm": "Giant crystals enjoyed perfection", + "http://tinyclouds.org/colorize/": "Colorizing Black and White Photos with deep learning", + "http://data.semanticweb.org/conference/www/2012/paper/809/html": "Counting beyond a Yottabyte, or how SPARQL 1.1 Property Paths will prevent adoption of the standard Semantic Web Dog Food", + "http://linter.structured-data.org/": "Structured Data Linter", + "https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300": "An Exclusive Look at How AI and Machine Learning Work at Apple \u2013 Backchannel", + "http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0": "Is That Sausage Worth This? - NYTimes.com", + "http://messenger.jhuapl.edu/": "MESSENGER Web Site", + "https://www.ghostery.com": "Ghostery, Inc.", + "http://www.alphaworks.ibm.com/tech/sher": "alphaWorks : Scalable Highly Expressive Reasoner : Overview", + "http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/": "Absurd Creature of the Week: The Wasp That Enslaves Cockroaches With a Sting to the Brain - Wired Science", + "http://www.nytimes.com/2013/09/17/science/dna-double-take.html": "DNA Double Take - NYTimes.com", + "http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html": "Quand un article du \u00ab\u00a0Monde\u00a0\u00bb, lu par Manuel Valls, a chang\u00e9 la vie de Darina Al-Joundi", + "http://crave.cnet.co.uk/desktops/0,39029426,49256662,00.htm": "Mac Mini vs. Microsoft Media Center: Round 1 - Crave at CNET.co.uk", + "http://www.asteraweke.com/11162.jpg": "Aster Aweke", + "http://www.aclweb.org/anthology/Q15-1016": "Improving Distributional Similarity with Lessons Learned from Word Embeddings (O Levy - 2015)", + "http://www.nature.com/news/2006/060605/full/060605-7.html": "Complex ecosystems arrived early", + "http://news.bbc.co.uk/2/hi/science/nature/8040073.stm": "BBC NEWS World's most daunting parking job", + "http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html": "L'\u00e9trange disparition du logiciel de chiffrement TrueCrypt", + "http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval": "NLP Based Information Retrieval System by Nikhil Sharma Towards Data Science", + "http://trimc-devops.blogspot.fr/2015/03/running-docker-applications-apache.html": "DevOps: Docker and Apache Tomcat", + "http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/": "Google Adds Tagging", + "http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html": "O TEMPO NAO PARA lyrics CAZUZA", + "http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html": "Using tags for POWDER content labels", + "http://commontag.org/mappings": "Mappings between the CommonTag vocabulary and existing vocabularies", + "http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto": "Why ISIS Murdered Kenji Goto - The New Yorker", + "http://rdf.greggkellogg.net/distiller": "RDF Distiller", + "http://zitgist.com/": "Zitgist: the Semantic Web Query Service", + "http://www.arte-tv.com/fr/search__results/1095558.html": "S\u00e9lection de liens sur Aratta - ARTE", + "http://www.simongbrown.com/blog/2005/07/06/delicious_jsp.html": "delicious-jsp - Simon Brown", + "http://internetalchemy.org/2005/11/naked-metadata-using-embedded-rdf": "Internet Alchemy Naked Metadata Using Embedded RDF", + "http://www.w3.org/2005/Incubator/urw3/": "W3C Uncertainty Reasoning for the World Wide Web Incubator Group", + "http://news.bbc.co.uk/2/hi/science_and_environment/10196807.stm": "BBC News - Asteroid probe 'on home straight'", + "https://www.theguardian.com/us-news/2017/may/13/chelsea-manning-freedom-us-military-wikileaks": "Chelsea Manning prepares for freedom: 'I want to breathe the warm spring air' US news The Guardian", + "https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html": "Des b\u00e9b\u00e9s g\u00e9n\u00e9tiquement modifi\u00e9s seraient n\u00e9s en Chine", + "http://fr.wikipedia.org/wiki/Matthieu_Pigasse": "Matthieu Pigasse - Wikip\u00e9dia", + "http://www.semanlink.net/doc/2019/07/naacl_2019_highlights": "NAACL 2019 Highlights", + "http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/": "SPARQL Update", + "https://lejournal.cnrs.fr/articles/france-terre-de-dinosaures?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530943412": "France, terre de dinosaures CNRS Le journal", + "http://www.csie.ntu.edu.tw/~cjlin/libshorttext/doc/": "Welcome to LibShortText documentation! \u2014 LibShortText 1.1 documentation", + "http://www.smartlogic.com/": "Content Intelligence Software for automatic content classification, text analysis and information visualization.", + "http://christop.club/2014/05/06/using-gensim-for-lda/": "Using Gensim for LDA (notebook)", + "http://blog.datagraph.org/2010/04/rdf-nosql-diff": "How RDF Databases Differ from Other NoSQL Solutions - The Datagraph Blog", + "http://www.comtech-serv.com/dita.shtml": "Hot Topics: DITA - Darwin Information Typing Architecture", + "http://docs.api.talis.com/platform-api/output-types/rdf-json": "RDF JSON - docs.api (Talis)", + "http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_": "Kelechi sur Twitter : \"Excited to present AfriBERTa, a multilingual LM pretrained from scratch on 11 African languages with a joint corpus of less than 1GB.\"", + "https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6": "Survey results analysis - Analytics Exchange", + "http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf": "Structured Objects in OWL: Representation and Reasoning. In Proc. of the 17th Int. World Wide Web Conference (WWW 2008), Beijing", + "https://spark.apache.org/": "Apache Spark", + "http://www.brockman.se/writing/method-references.html.utf8": "Object-Oriented Event Listening through Partial Application in JavaScript", + "https://www.theguardian.com/news/2017/nov/05/paradise-papers-leak-reveals-secrets-of-world-elites-hidden-wealth?CMP=twt_gu": "Paradise Papers leak reveals secrets of the world elite's hidden wealth News The Guardian", + "http://ejohn.org/blog/processingjs/": "John Resig - Processing.js", + "http://www.vocabs.org/": "RDF vocabularies", + "http://www.rdfabout.com/demo/validator/": "Online N3 Validator", + "http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/": "La strat\u00e9gie diabolique des futures plantes OGM Passeur de sciences", + "http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf": "Structured Ontology Format", + "http://fr.wikipedia.org/wiki/Milou_en_mai": "Milou en mai", + "http://blocodosargentopimenta.com.br/": "Bloco do Sargento Pimenta", + "http://www.oracle.com/technetwork/articles/java/micro-1925135.html": "Real-Time Topic Modeling of Microblogs", + "http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=19&ll=48.839252,2.281881&spn=0.001326,0.003224&t=k": "170, rue de Lourmel", + "http://stackoverflow.com/questions/240546/removing-html-from-a-java-string": "Removing HTML from a Java String - Stack Overflow", + "https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/": "Les ifs amoureux de la Lande-Patry", + "http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm": "BBC NEWS Politics G8 climate plans 'watered down'", + "http://www.lemonde.fr/biodiversite/article/2017/07/10/la-sixieme-extinction-de-masse-des-animaux-s-accelere-de-maniere-dramatique_5158718_1652692.html": "La sixi\u00e8me extinction de masse des animaux s\u2019acc\u00e9l\u00e8re", + "http://www.semanlink.net/doc/2021/10/building_scalable_explainable_": "Building Scalable, Explainable, and Adaptive NLP Models with Retrieval SAIL Blog", + "http://tool-man.org/examples/": "Direct Manipulation Using JavaScript and CSS", + "http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html": "All Our N-gram are Belong to You", + "http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/": "Danny Ayers, Raw Blog : \u00bb Yet Another RSS History", + "https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html": "Re: Sustainable Codes vs Volatile URIs Re: URIs / Ontology for Physical Units and Quantities from Peter Krauss on 2015-05-07 (public-vocabs@w3.org from May 2015)", + "http://blog.semantic-web.at/2010/08/31/why-skos-thesauri-matter-the-next-generation-of-semantic-technologies/": "The Semantic Puzzle Why SKOS thesauri matter \u2013 the next generation of semantic technologies", + "http://coding.smashingmagazine.com/2008/09/16/jquery-examples-and-best-practices/": "jQuery and JavaScript Coding: Examples and Best Practices Smashing Coding", + "http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_": "Knowledge Graph Technology and Applications 2019 - Simia", + "http://www.semanlink.net/doc/2019/06/audio_classification_using_tran": "Audio classification using transfer learning approach \u2013 mc.ai", + "http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf": "DivRank: the Interplay of Prestige and Diversity in Information Networks", + "http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla": "Browsing and Traversing Linked Data with LODmilla", + "http://dev.uriqr.com/": "Uriqr - A URI Search Engine", + "http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html": "Mark it up! (nice rdfQuery demo)", + "http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/": "D\u00e9bat sur la transition \u00e9nerg\u00e9tique : vous pouvez r\u00e9p\u00e9ter la question ? Oil Man", + "http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/": "Why You Should Never Use MongoDB \u00ab Sarah Mei", + "http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages": "Your own blog with GitHub Pages and fast_template (4 part tutorial) \u00b7 fast.ai", + "http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process": "cmd - How do I kill the process currently using a port on localhost in Windows? - Stack Overflow", + "https://nlp.stanford.edu/projects/glove/": "GloVe: Global Vectors for Word Representation", + "http://www.wired.com/2014/09/metanautix/": "Ex-Googler Shares His Big-Data Secrets With the Masses Enterprise WIRED", + "http://sourceforge.net/projects/delicious-java/": "SourceForge.net: del.icio.us Java API", + "http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/": "Faviki uses Wikipedia and DBpedia for semantic tagging", + "https://www.newscientist.com/article/2077533-test-driving-the-hydrogen-car-that-makes-a-little-go-a-long-way/": "Test-driving the hydrogen car that makes a little go a long way New Scientist", + "http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html": "NOVA scienceNOW Dispatches: What We're Thinking About: Keep It Very Simple PBS", + "http://schema.org/": "schema.org", + "http://www.w3.org/2009/12/rdf-ws/Report.html": "W3C Workshop \u2014 RDF Next Steps: Workshop Report", + "http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo": "Size does not matter (if your data is in a silo)", + "http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html": "Le Parlement europ\u00e9en rejette l'interdiction du chalutage en eaux profondes", + "http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html": "Biohackers : les bricoleurs d'ADN", + "http://www.w3.org/wiki/WebSchemas": "WebSchemas - W3C Wiki", + "http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388": "Learning from the Amazon technology platform (a Conversation with Werner Vogel)", + "https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712": "Simple guide to Neural Arithmetic Logic Units (NALU): Explanation, Intuition and Code", + "http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/": "\u201cNow we must redefine \u2018tool\u2019, redefine \u2018man\u2019, or accept chimpanzees as humans.\u201d Sufficiently Radical", + "https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie": "Pain, Amour et Fantaisie \u2014 Wikip\u00e9dia", + "https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story": "Capitalism: A Love Story - Wikipedia", + "http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html": "D\u00e9tours des Mondes: Les sites arch\u00e9ologiques de Bura Asinda-Sikka au Niger", + "http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html": "IBM's Five Predictions for the Next Five Years - BusinessWeek", + "http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_": "Adrian Gschwend sur Twitter : \"getting started with RDF and JavaScript!...\"", + "https://distill.pub/2017/aia/": "Using Artificial Intelligence to Augment Human Intelligence", + "http://del.icio.us/help/api/": "del.icio.us/help/api", + "http://www.kk.org/2008/08/out-of-control-the-illustrated.php": "Out Of Control - The New Biology of Machines, Social Systems, & the Economic World - Kevin Kelly", + "http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network": "Is the future of Neural Networks Sparse? An Introduction", + "http://www.kurzweilai.net/": "Kurzweil Accelerating Intelligence", + "http://twitter.bug.quietbabylon.com/": "Tweets out of Context", + "http://news.bbc.co.uk/2/hi/science/nature/7864087.stm": "BBC NEWS 'Silver sensation' seeks cold cosmos", + "http://edition.cnn.com/2013/04/17/opinion/rinaudo-robots/index.html?iid=article_sidebar": "Opinion: Why robots are ready for takeoff - CNN.com", + "http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int": "Nils Reimers sur Twitter : \"Introduction - Neural Search\"", + "http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/": "Cocoa for Windows + Flash Killer = SproutCore \u2014 RoughlyDrafted Magazine", + "https://hackernoon.com/life-after-1-year-of-using-neo4j-4eca5ce95bf5#.wa9wncbe4": "Life after 1 year of using Neo4J", + "http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/": "Wolfram Programming Cloud Is Live!\u2014Stephen Wolfram Blog", + "http://www.semanlink.net/doc/2020/10/librairies_independantes": "librairies ind\u00e9pendantes", + "http://www4.wiwiss.fu-berlin.de/rdf_browser/": "Disco - Hyperdata Browser", + "https://www.datacamp.com/community/tutorials/pandas-multi-index": "pandas Multi-index and groupbys (article) - DataCamp", + "http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935": "Pourquoi la r\u00e9utilisation des donn\u00e9es publiques \u00e0 des fins commerciales doit \u00eatre gratuite", + "https://dl.acm.org/citation.cfm?doid=3209542.3209561": "Studying the Spatio-Temporal Dynamics of Small-Scale Events in Twitter", + "https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968": "DeepLearning Images Revision M13/14. PyTorch 1.0. Git Integration. Smaller Boot Time.", + "http://khaidoan.wikidot.com/solr": "Apache Solr - Do only what matters", + "http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/": "Use deck.js as a remote presentation tool ginger's thoughts", + "http://www.youtube.com/watch?v=ow40LQs0ue4": "N'Kosi Sikeleli Africa- With Miriam Makeba", + "http://www.macworld.com/article/51830/2006/07/showallfinder.html": "Show all files in the Finder Software Mac OS X Hints Macworld", + "http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html": "S is for Semantics: Extending OWL RL", + "http://products.enterpriseitplanet.com/dms/rdbms/1228848416.html": "Virtuoso (OpenLink Software, Inc) - Data Management/Storage/Relational Databases - Enterprise IT Planet Product Guide", + "http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/": "Downfall of DAO Digital Currency Fund Shows Blockchain Reputational Risk - CIO Journal. - WSJ", + "http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/": "Towards Web-scale Web querying Ruben Verborgh", + "http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi": "Neural Knowledge Acquisition via Mutual Attention between Knowledge Graph and Text (2018)", + "http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187": "IEEE Xplore Abstract - Neurogrid: A Mixed-Analog-Digital Multichip System for Large-Scale Neural Simulations", + "http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf": "LEARNING GRAPH EMBEDDINGS FOR NODE LABELING AND INFORMATION DIFFUSION IN SOCIAL NETWORKS (2017)", + "https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf": "Technical usability of Wikidata\u2019s linked data", + "http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_": "La sonde am\u00e9ricaine Osiris-Rex a r\u00e9ussi sa man\u0153uvre sur l\u2019ast\u00e9ro\u00efde B\u00e9nou", + "https://staltz.com/a-plan-to-rescue-the-web-from-the-internet.html": "Andr\u00e9 Staltz - A plan to rescue the Web from the Internet", + "http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/": "My Personal Library and the Semantic Web at Frederick Giasson\u2019s Weblog", + "http://news.bbc.co.uk/2/hi/africa/5129350.stm": "BBC NEWS - LRA victim: 'I cannot forget and forgive'", + "http://dcentproject.eu/": "D-CENT", + "https://github.com/carrot2/carrot2": "Carrot2: Text Clustering Algorithms and Applications", + "http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html": "Les entrepreneurs africains, un atout pour la France", + "http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison": "Guest Post: A Developers' Guide to the Linked Data APIs - Jeni Tennison data.gov.uk", + "http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html": "Les crimes de Ratko Mladic, \u00ab\u00a0parmi les plus haineux de l\u2019humanit\u00e9\u00a0\u00bb", + "http://internetactu.blog.lemonde.fr/2012/05/16/big-data-grande-illusion/": "Big Data, grande illusion ? InternetActu", + "http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html": "Apple, America and a Squeezed Middle Class - NYTimes.com", + "http://cassandra.apache.org/": "The Apache Cassandra Project", + "http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat": "DerwenAI/kglab: an abstraction layer in Python for building knowledge graphs", + "http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne": "Hugging Face: How to train a new language model from scratch using Transformers and Tokenizers", + "http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p": "Nominations for ACL 2019 Best Paper Awards - ACL 2019", + "http://aws.amazon.com/fr/architecture/": "Centre d'architecture AWS", + "http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/": "Deep Learning Machine Teaches Itself Chess in 72 Hours, Plays at International Master Level MIT Technology Review", + "https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/": "Scientists Just Found 200,000 New Marine Viruses NOVA PBS", + "http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php": "From iMovie to YouTube, a.s.a.p.", + "https://arxiv.org/abs/1712.01208v1": "[1712.01208] The Case for Learned Index Structures", + "https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google": "Does knowledge matter in the age of Google? The Guardian", + "http://www.nasa.gov/mission_pages/voyager/termination_shock.html": "Voyager 2 Proves the Solar System is Squashed", + "http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans": "Google AI Blog: Exploring Transfer Learning with T5: the Text-To-Text Transfer Transformer (2020)", + "http://www.manudibango.net/": "Manu DIBANGO - Official Website - Site Web Officiel", + "http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar": "BERT's success in some benchmarks tests may be simply due to the exploitation of spurious statistical cues in the dataset. Without them it is no better then random. : MachineLearning", + "http://www.asimovinstitute.org/neural-network-zoo/": "The Neural Network Zoo - The Asimov Institute", + "http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe": "IBM Cloud Speech to Text : R\u00e9f\u00e9rences de recherche", + "http://www.flickr.com/photos/milstan/sets/72157623935704725/": "Hypios VoCamp Paris 2010 - a set on Flickr", + "https://github.com/jwise/28c3-doctorow/blob/master/transcript.md": "The Coming War on General Computation - Cory Doctorow", + "http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf": "Data publica : open data (slides)", + "https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/": "Where does the Sigmoid in Logistic Regression come from? Onionesque Reality", + "http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf": "Business Case Tips", + "http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache": "java - Jersey: Default Cache Control to no-cache - Stack Overflow", + "http://www.jenitennison.com/blog/node/145": "Using Freebase Gridworks to Create Linked Data Jeni's Musings", + "http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing": "Jeff Hawkins: How brain science will change computing - CornellCast", + "http://www.moula-moula.de": "", + "https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf": "BiLSTM-CNN-CRF Implementation for Sequence Tagging", + "http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em": "[2004.14843] Knowledge Graph Embeddings and Explainable AI", + "https://rare-technologies.com/text-summarization-with-gensim/": "Text Summarization with Gensim", + "https://arxiv.org/abs/1810.07150": "[1810.07150] Subword Semantic Hashing for Intent Classification on Small Datasets", + "http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html": "Pour Snowden, le scandale des \u00e9coutes en Allemagne confirme un espionnage \u00ab de masse \u00bb", + "http://www.lafcpug.org/Tutorials/basic_you_tube.html": "uploading your movies for YOU Tube", + "https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump": "Why are palaeontologists suing Trump? Elsa Panciroli Science The Guardian", + "http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_": "fasterthanlime \ud83c\udf0c sur Twitter : google has a secret deal with facebook called \"Jedi Blue\"", + "http://googlewebmastercentral.blogspot.fr/2008/09/demystifying-duplicate-content-penalty.html": "Official Google Webmaster Central Blog: Demystifying the \"duplicate content penalty\"", + "http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb": "Dynamic Semantic Publishing for any Blog (Part 2: Linked ReadWriteWeb) - benjamin nowack's blog", + "http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee": "Richard Cyganiak: Sigma EE \u2013 Reaping low-hanging fruits in RDF-based data integration \u2013 I-SEMANTICS", + "https://lejournal.cnrs.fr/articles/crispr-cas9-des-ciseaux-genetiques-pour-le-cerveau": "CRISPR-Cas9: des ciseaux g\u00e9n\u00e9tiques pour le cerveau CNRS Le journal", + "http://www.crystalfighters.com/": "Crystal Fighters", + "http://mpld3.github.io/index.html": "mpld3 \u2014 Bringing Matplotlib to the Browser", + "http://www.semanlink.net/doc/2021/06/streamlit": "Streamlit", + "http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse": "Installing SonarQube in Eclipse - SonarQube - Codehaus", + "http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim": "How to use BERT for finding similar sentences or similar news? \u00b7 Issue #876 \u00b7 huggingface/transformers", + "http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi": "L'olivier mill\u00e9naire de Kavousi en Cr\u00e8te", + "http://www.nytimes.com/2013/10/18/science/fossil-skull-may-rewrite-humans-evolutionary-story.html?adxnnl=1&partner=rss&emc=rss&adxnnlx=1382129702-eokKMk+XDdhKASJOK4RWCg": "Skull Fossil Suggests Simpler Human Lineage - NYTimes.com", + "http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics": "MinHash token filter Elasticsearch Reference", + "http://publib.boulder.ibm.com/infocenter/wsiihelp/v8r3/index.jsp?topic=/com.ibm.websphere.ii.foundation.appdev.content.doc/developing/iiyvwdg12.htm": "Internationalisation des composants Web", + "https://www.wired.com/story/facebook-alternatives/": "The Best Alternative For Every Facebook Feature WIRED", + "http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/": "From RSS to SIOC using SPARQL : Alexandre Passant", + "http://simile.mit.edu/wiki/Piggy_Bank": "Piggy Bank - Home page", + "http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf": "Comment contourner les syst\u00e8mes de tra\u00e7abilit\u00e9 ?", + "http://www.w3.org/2001/tag/doc/whenToUseGet.html#checklist": "URIs, Addressability, and the use of HTTP GET and POST", + "http://aclweb.org/anthology/P18-1002": "A La Carte Embedding: Cheap but Effective Induction of Semantic Feature Vectors (2018)", + "http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html": "GraphViz and SKOS...", + "http://cs231n.github.io/convolutional-networks/": "CS231n Convolutional Neural Networks for Visual Recognition", + "http://www.brainjar.com/": "BrainJar.com: Experiments in Web Programming", + "http://blogs.zdnet.com/semantic-web/?p=132": "Commercialising the Semantic Web (panel at www 2008)", + "http://weblog.burningbird.net/2005/10/12/portable-data/": "Burningbird \u00bb Portable Data", + "http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf": "Linking Enterprise Data", + "http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10": "Un si\u00e8cle de progr\u00e8s sans merci", + "http://nowiknow.com/the-lichen-loophole/": "The Lichen Loophole", + "http://wiki.iks-project.eu/index.php/VIE": "VIE - IKS Project", + "http://www.jgoodwin.net/?p=1043": "Experimenting with Dynamic Topic Models Jonathan Goodwin", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399": "CiteSeerX \u2014 A Maximum Entropy Approach to Natural Language Processing", + "http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf": "R\u00e9volutions quantiques - CEA", + "http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_": "Minimal physicalism as a scale-free substrate for cognition and consciousness Neuroscience of Consciousness Oxford Academic", + "http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1": "Seyni Kountch\u00e9", + "http://www.semanlink.net/doc/2021/06/gobero": "Gobero", + "http://ebiquity.umbc.edu/blogger/2007/08/18/rdf123-maps-spreadsheet-data-to-rdf/": "RDF123 maps spreadsheet data to RDF", + "https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/": "Solving the Neural Code Conundrum: Digital or Analog? - MIT Technology Review", + "http://start.aimpages.com/": "AIM Pages", + "http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown": "html - Cross-reference (named anchor) in markdown - Stack Overflow", + "https://diuf.unifr.ch/main/xi/diplodocus": "dipLODocus[RDF]", + "http://www.dajobe.org/2004/01/turtle/": "Turtle - Terse RDF Triple Language", + "http://www.permadi.com/tutorial/jsFunc/": "Introduction and Features of JavaScript \"Function\" Objects", + "http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi": "raphaelsty/abayes: Autoregressive Bayesian linear model", + "http://www.openrdf.org": "", + "https://www.ted.com/talks/douglas_rushkoff_how_to_be_team_human_in_the_digital_future#t-731427": "Douglas Rushkoff: How to be \"Team Human\" in the digital future TED Talk", + "http://ast2014.fzi.de/": "Applications of Semantic Technologies - AST 2014 8th International Workshop at INFORMATIK 2014, September 22-26, 2014, Stuttgart (Germany)", + "https://www.youtube.com/watch?v=j5iFupLkwXo": "Les fran\u00e7ais vice-champions du monde du 4x400m (WC Paris 2003) HQ - YouTube", + "http://www.markbaker.ca/2003/05/RDF-Forms/": "RDF Forms", + "https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html": "keygen being destroyed when we need it from Tim Berners-Lee on 2015-09-01 (www-tag@w3.org from September 2015)", + "https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot": "Neoliberalism \u2013 the ideology at the root of all our problems Books The Guardian", + "http://amid.fish/reproducing-deep-rl": "Lessons Learned Reproducing a Deep Reinforcement Learning Paper", + "http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_": "rubensworks/rdf-dereference.js: Dereference any URL for its RDF contents", + "http://www.readwriteweb.com/archives/semantic_web_patterns.php": "Semantic Web Patterns: A Guide to Semantic Technologies", + "https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/": "Paris NLP Season 3 Meetup #2 \u2013 Paris NLP", + "https://news.ycombinator.com/item?id=18085765": "Ask HN: What are some of the best documentaries you've seen? Hacker News", + "http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval": "Darfour, le diable arrive \u00e0 cheval", + "http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else": "Expressing Constraints using RDF/OWL or something else? - Semantic Overflow", + "http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/": "Implementing a CNN for Text Classification in TensorFlow \u2013 WildML", + "http://lists.w3.org/Archives/Public/public-vocabs/": "public-vocabs@w3.org Mail Archives", + "http://rdf2h-browser.linked.solutions": "RDF2h Browser", + "https://jalammar.github.io/illustrated-transformer/": "The Illustrated Transformer \u2013 Jay Alammar", + "http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669": "Catching Up With the W3C And Its Focus On the Enterprise - semanticweb.com", + "https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States": "A People's History of the United States - Wikipedia", + "http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data": "Music recommendation and Linked Data - DBTune blog", + "http://www.spurl.net/": "http://www.spurl.net", + "http://www.javaworld.com/javaworld/jw-04-2004/jw-0419-httpunit.html": "Test Web applications with HttpUnit - JavaWorld", + "http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_": "Meet ALBERT: a new \u2018Lite BERT\u2019 from Google & Toyota with State of the Art NLP performance and 18x fewer parameters.", + "https://prodi.gy/": "Prodigy \u00b7 An annotation tool for AI, Machine Learning & NLP", + "https://arxiv.org/abs/1802.01021": "[1802.01021] DeepType: Multilingual Entity Linking by Neural Type System Evolution", + "http://www.lespetitescases.net/photos-livres-musiques-what-else": "Photos, livres, musiques, what else ? Les petites cases", + "http://link.springer.com/book/10.1007/978-3-319-10491-1": "EC-WEB 2014 proceedings - Springer", + "http://bugbrother.blog.lemonde.fr/2009/02/26/tout-ce-que-vous-avez-toujours-voulu-pirater-sans-jamais-savoir-comment-proceder/": "Tout ce que vous avez toujours voulu pirater sans jamais savoir comment proc\u00e9der - BUG BROTHER - Blog LeMonde.fr", + "https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html": "Google Developers Blog: Text Embedding Models Contain Bias. Here's Why That Matters.", + "https://scholarlyrepository.miami.edu/oa_dissertations/2145/": "\"Deep Learning Based Imbalanced Data Classification and Information Retrieval for Multimedia Big Data\" by Yilin Yan", + "https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article": "A Short Introduction to Learning to Rank (2010)", + "http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html": "Les vraies raisons du succ\u00e8s des Verts allemands - LExpansion.com", + "http://ontologies.makolab.com/gc/gc.html": "Gainesville Core", + "http://www.ibm.com/developerworks/web/library/wa-memleak/": "Memory leak patterns in JavaScript", + "http://platon.escet.urjc.es/~axel/sparqltutorial/": "ESWC 2007 Tutorial: SPARQL - Where are we? Current state, theory and practice", + "https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html": "Sequence Tagging with Tensorflow", + "http://meta.wikimedia.org/wiki/Wikidata/Data_model": "Wikidata/Data model primer", + "http://dowhatimean.net/2006/08/exporting-the-os-x-address-book-to-foaf": "dowhatimean.net \u00bb Exporting the OS X Address Book to FOAF", + "https://www.quora.com/What-is-life-like-in-China/answer/Kaiser-Kuo?srid=h6K": "Kaiser Kuo's answer to What is life like in China? - Quora", + "https://www.youtube.com/watch?v=VIRCybGgHts": "Stanford Seminar - \"Can the brain do back-propagation?\" - Geoffrey Hinton", + "http://blogs.sun.com/mr/entry/closures": "Closures for java - Mark Reinhold\u2019s Blog", + "https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/": "Single Artificial Neuron Taught to Recognize Hundreds of Patterns", + "http://www.npr.org/sections/goatsandsoda/2016/08/25/491261766/new-virus-breaks-the-rules-of-infection": "New Study Finds A Virus That Breaks The Rules Of Viral Infection : Goats and Soda : NPR", + "https://www.eventbrite.com/e/2019-knowledge-graph-conference-tickets-54867900367?aff=efbneb": "2019 Knowledge Graph Conference - Knowledge Graphs for AI in the Enterprise", + "http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po": "Vaccins contre le Covid-19\u00a0: pourquoi la France accuse-t-elle un tel retard\u00a0?", + "http://en.wikipedia.org/wiki/Ashoka": "Ashoka", + "https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb": "gensim : Similarity Queries using Annoy (Tutorial)", + "https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow": "For what tasks is Pytorch preferable to Tensorflow? - Quora", + "https://www.quora.com/How-does-doc2vec-represent-feature-vector-of-a-document-Can-anyone-explain-mathematically-how-the-process-is-done/answer/Piyush-Bhardwaj-7": "Explanation for Doc2Vec - Quora", + "http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345": "L'avis confidentiel de la Cnil contre la loi Cr\u00e9ation et Internet, actualit\u00e9 Tech & Net : Le Point", + "http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/": "Experiences with the conversion of SenseLab databases to RDF/OWL", + "http://www.mulgara.org/": "Mulgara: open source scalable RDF database written entirely in Java.", + "http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/": "Google+ For Auto Dealers, Mechanics, and Auto Parts Stores Above the Fold & Socially Acceptable", + "http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_": "Building a Faster and Accurate Search Engine on Custom Dataset with Transformers \ud83e\udd17 by Shivanand Roy Analytics Vidhya Sep, 2020 Medium", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm": "BBC NEWS Five new heritage sites in Africa", + "http://nlp.seas.harvard.edu/latent-nlp-tutorial.html": "Deep Latent-Variable Models for Natural Language - Tutorial - harvardnlp", + "http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/": "Compte-rendu du premier Meetup Web S\u00e9mantique \u00ab Blog Antidot", + "http://pt.wikipedia.org/wiki/Milton_Santos": "Milton Santos - Wikip\u00e9dia, a enciclop\u00e9dia livre", + "http://danbri.org/words/2005/08/06/121": "danbri\u2019s foaf stories \u00bb SPARQLing Prot\u00e9g\u00e9-OWL Jena integration", + "http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html": "Ivan Herman - Semantic Web Adoption", + "https://www.quora.com/How-do-I-find-out-if-an-idea-I-want-to-patent-is-already-patented-by-someone-else": "How to find out if an idea I want to patent is already patented by someone else - Quora", + "http://2015.eswc-conferences.org/lisegetoor": "Combining Statistics and Semantics to Turn Data into Knowledge - Lise Getoor, University of California, US 12th ESWC 2015", + "http://addyosmani.com/blog/essential-js-namespacing/#beginners": "Essential JavaScript Namespacing Patterns", + "http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful": "fastai/nbdev: Create delightful python projects using Jupyter Notebooks", + "http://www.macdevcenter.com/pub/a/mac/2003/02/25/apple_scripting.html": "MacDevCenter.com: Controlling Your Mac with AppleScript and Java", + "http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print": "New Strain of Wheat Rust Appears in Africa - New York Times", + "http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni": "The Last Picture Show (La Derni\u00e8re S\u00e9ance)", + "http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn": "Why and How we use Pangeo at CNES - pangeo - Medium", + "http://www.semanticwave.com/blog/archives/000225.jsp": "Semantic Wave: Breaking Tags Out of Their Existential Crisis", + "http://www.megapixel.net": "megapixel.net Webzine: Digital Camera Reviews and Information", + "http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/": "The Man Behind the Google Brain: Andrew Ng and the Quest for the New AI Wired Enterprise Wired.com", + "http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les": "Comment voir (et supprimer) les donn\u00e9es envoy\u00e9es \u00e0 Facebook par des sites tiers", + "http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/": "Why robots will not be smarter than humans by 2029 Robohub", + "http://microformats.org/wiki/faqs-for-rdf": "faqs-for-rdf - Microformats", + "https://www.npmjs.com/package/markdown-it-replace-link": "markdown-it-replace-link", + "http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/": "L\u2019Afrique enchant\u00e9e : \u00e0 la d\u00e9couverte l\u2019afro-cubain Mundo Latino", + "http://www.cnrs.fr/insb/recherche/parutions/articles2016/d-devienne.html": "Une plong\u00e9e vertigineuse dans la diversit\u00e9 du monde vivant - CNRS - Sciences biologiques - Parutions", + "http://server1.fandm.edu/departments/Anthropology/mami.html": "Mami Wata.", + "http://stackoverflow.com/questions/23183931/maven-java-ee-configuration": "eclipse - Maven Java EE Configuration - Stack Overflow", + "http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac": "Calling Java from Python - Stack Overflow", + "http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf": "Produce and Consume Linked Data with Drupal! (slides)", + "https://haveibeenpwned.com/": "Have I been pwned? Check if your email has been compromised in a data breach", + "http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "L\u2019histoire g\u00e9n\u00e9tique des Europ\u00e9ens en partie d\u00e9voil\u00e9e (CNRS)", + "http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/": "Fastest Way of Installing Drupal 7.8 on Mac OS X 10.7 Lion & 10.6 and fix Clean URLs using the Terminal coolestguyplanet.net Neil Gee", + "https://towardsdatascience.com/structured-deep-learning-b8ca4138b848": "Structured Deep Learning \u2013 Towards Data Science", + "http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/": "How Apple and Amazon Security Flaws Led to My Epic Hacking Gadget Lab Wired.com", + "http://www.liberationdelacroissance.fr/files/home.php": "Lib\u00e9ration de la Croissance Fran\u00e7aise", + "https://www.wired.com/story/everipedia-blockchain/": "The Wikipedia Competitor That's Harnessing Blockchain For Epistemological Supremacy WIRED", + "http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo": "Yann LeCun sur Twitter : \"Barlow Twins: a new super-simple self-supervised method to train joint-embedding architectures (aka Siamese nets) non contrastively. \"", + "http://space.newscientist.com/channel/astronomy/cosmology/dn9988": "Cosmology -04 September 2006 - New Scientist Space", + "https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=fr#scrollTo=rHLcriKWLRe4": "Pr\u00e9sentation rapide de Pandas", + "https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview": "How to Prepare for a Machine Learning Interview - Semantic Bits", + "http://planete.websemantique.org/": "Plan\u00e8te Web S\u00e9mantique", + "http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html": "RE: [VM] content-dependent redirects in apache ... help! from Miles, AJ \\(Alistair\\) on 2005-10-03 (public-esw-thes@w3.org from October 2005)", + "http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm": "BBC NEWS - Apes 'extinct in a generation'", + "http://www.bbc.com/news/science-environment-36286548": "Mastodon meal scraps revise US prehistory - BBC News", + "http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html": "Larry Masinter Musings: Resources are Angels; URLs are Pins", + "http://vimeo.com/20781278": "Hypermedia APIs - Jon Moore on Vimeo", + "http://www.guha.com/sw002.html": "A System for integrating Web Services into a Global Knowledge Base (2002)", + "http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/": "HTTP Conditional Get for RSS Hackers - The Fishbowl", + "https://www.smashingmagazine.com/2018/01/drag-drop-file-uploader-vanilla-js/": "How To Make A Drag-and-Drop File Uploader With Vanilla JavaScript \u2014 Smashing Magazine", + "http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse": "Marianne de ma jeunesse", + "http://www.elwatan.com/spip.php?page=article&id_article=83720": "El Watan - R\u00e9bellion Touareg et enjeux de l\u2019uranium La poudri\u00e8re du Nord-Niger", + "https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved": "[#STANBOL-594] Google Refine Reconciliation Service support - ASF JIRA", + "http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR": "alphaWorks : IBM Web Ontology Manager", + "https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)": "The Son (Nesb\u00f8 novel)", + "https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit": "Aetherial Symbols", + "https://www.quora.com/Can-I-use-word2vec-representation-to-train-a-weka-classifier": "Can I use word2vec representation to train a weka classifier? - Quora", + "http://www.hydra-cg.com/spec/latest/schema.org/": "Integration of Hydra into Schema.org", + "https://theclevermachine.wordpress.com/2014/09/11/a-gentle-introduction-to-artificial-neural-networks/": "A Gentle Introduction to Artificial Neural Networks The Clever Machine", + "http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot": "The dangers of reshaping and other fun mistakes I\u2019ve learnt from PyTorch", + "http://emnlp2018.org/program/tutorials/": "Tutorials - EMNLP 2018", + "http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti": "profiler - Why VisualVM Sampler does not provide full information about CPU load (method time execution)? - Stack Overflow", + "http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf": "Knowledge vault: a web scale approach to probabilistic knowledge fusion", + "https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730": "Topic Modeling with Scikit Learn \u2013 Aneesha Bakharia \u2013 Medium", + "http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun": "Calling your own JavaScript functions from SPARQL queries", + "http://code.google.com/p/topic-modeling-tool/": "topic-modeling-tool - A graphical user interface tool for topic modeling - Google Project Hosting", + "http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html": "Juliana Rotich: Meet BRCK, Internet access built for Africa Video on TED.com", + "http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective": "Africa\u2019s Development in Historical Perspective Foreign Affairs", + "http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf": "A practical guide to Support Vector classification", + "https://blog.codeship.com/json-ld-building-meaningful-data-apis/#disqus_thread": "JSON-LD: Building Meaningful Data APIs", + "http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf": "Representing Probabilistic Relations in RDF", + "http://news.bbc.co.uk/2/hi/africa/4459671.stm": "BBC NEWS - Ethiopians celebrate obelisk return", + "http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report": "The World's 50 Most Innovative Companies Interactive Scoreboard", + "http://www.idealliance.org/papers/extreme/proceedings/html/2007/Bryan01/EML2007Bryan01.html": "MYCAREVENT: OWL and the automotive repair information supply chain - Proceedings of Extreme Markup Languages\u00ae", + "http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html": "Berlin SPARQL Benchmark V2 - Results for Sesame, Virtuoso, Jena TDB, D2R Server, and MySQL", + "http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo": "Top 6 Open Source Pretrained Models for Text Classification you should use", + "http://topquadrantblog.blogspot.com/": "VOYAGES OF THE SEMANTIC ENTERPRISE", + "http://linkeddatafragments.org/": "Linked Data Fragments", + "https://towardsdatascience.com/spiking-neural-networks-the-next-generation-of-machine-learning-84e167f4eb2b": "Spiking Neural Networks, the Next Generation of Machine Learning (2018)", + "http://www.w3.org/TR/json-ld/": "JSON-LD 1.0", + "https://heartbeat.fritz.ai/capsule-networks-a-new-and-attractive-ai-architecture-bd1198cc8ad4": "Capsule Networks", + "http://hadoop.apache.org/": "Apache Hadoop - Home page", + "http://maps.google.com/maps?q=40.452107,93.742118&hl=de&ll=39.541977,83.94104&spn=0.031903,0.033002&num=1&t=h&vpsrc=6&z=15": "40.452107,93.742118 - Google Maps", + "http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng": "Calenda - Advances and challenges of NLP (Natural Language Processing) for african languages", + "http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html": "Three Theses of Representation in the Semantic Web", + "http://fr.slideshare.net/rvguha/sem-tech2014c": "Semantic Web and Schema.org", + "http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html": "\"Nous ne voulons pas mourir dans les d\u00e9combres du n\u00e9olib\u00e9ralisme !\"", + "https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms": "Algorithms - Apache Mahout - Apache Software Foundation", + "http://webdatacommons.org/": "Web Data Commons", + "http://www.bbc.com/future/bespoke/story/20150430-rosetta-the-whole-story/index.html": "Rosetta: The whole story", + "https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/": "How to reduce dimension for TfIdf / BOW vector? : MachineLearning", + "https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273": "Patrick Boucheron bouscule l\u2019histoire CNRS Le journal", + "http://www.joehewitt.com/software/firebug/": "FireBug - JoeHewitt.com", + "http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp": "OpenLux\u00a0: Luxembourg, radiographie d\u2019un paradis fiscal", + "http://jena.hpl.hp.com:3040/index.html": "BBC - Backstage", + "http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/": "Create a Domain Text Classifier Using Cognonto Frederick Giasson", + "http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html": "Simple WebID, WebID+TLS Protocol, and ACL Dogfood Demo from Kingsley Idehen on 2013-08-06 (public-lod@w3.org from August 2013)", + "http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/": "Il y a 3500 ans en Cr\u00e8te, une invention provoque l\u2019exode d\u2019une civilisation Dans les pas des arch\u00e9ologues", + "https://www.pbs.org/wgbh/nova/article/new-fossils-mass-extinction-wiped-out-dinosaurs/": "A Fossil Snapshot of Mass Extinction NOVA PBS", + "http://www.icefox.net/articles/typemanager.php": "Type Manager", + "http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/": "Turtles all the way down Ruben Verborgh", + "https://medium.com/p/1321fa0298c3": "Data is the new NEW\u200a\u2014 WHAT?", + "http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf": "Colonel Tandja's country", + "http://web.archive.org/web/19981202132847/http://www.hypersolutions.fr/m3/": "Multi Media Museum - M3", + "http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n": "Semantic textual similarity NLP-progress", + "http://www.w3.org/TR/ld-bp/": "Best Practices for Publishing Linked Data", + "http://www.universitygames.fr/": "", + "https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook": "The \"Yellow Jackets\" Riots In France Are What Happens When Facebook Gets Involved With Local News", + "https://web.hypothes.is/": "Hypothesis \u2013 The Internet, peer reviewed.", + "http://blog.cloudfoundry.org/2012/10/09/securing-restful-web-services-with-oauth2/": "Securing RESTful Web Services with OAuth2 Cloud Foundry Blog", + "https://fr.wikipedia.org/wiki/Matthieu_Ricard": "Matthieu Ricard", + "http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin": "In Poker Match Against a Machine, Humans Are Better Bluffers - New York Times", + "https://github.com/facebookresearch/fastText/issues/189": "(fastText) Euclidean distance instead of cosine-similarity?", + "http://www.w3.org/2005/04/fresnel-info/": "Fresnel - Display Vocabulary for RDF", + "http://kleenexsosforet.com": "Kleenex rase nos for\u00eats bor\u00e9ales !", + "http://aclweb.org/anthology/D17-1024": "Dict2vec : Learning Word Embeddings using Lexical Dictionaries", + "http://mark-shepherd.com/blog/springgraph-flex-component/": "mark-shepherd.com \u00bb SpringGraph Flex Component", + "http://www.coindesk.com/understanding-dao-hack-journalists/": "Understanding The DAO Attack - CoinDesk", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf": "Putting OWL in Order: Patterns for Sequences in OWL", + "http://www.teebweb.org/": "The Economics of Ecosystems and Biodiversity (TEEB)", + "http://internetactu.blog.lemonde.fr/2013/03/01/cours-en-ligne-massifs-et-ouverts-la-standardisation-ou-linnovation/": "Cours en ligne massifs et ouverts : la standardisation ou l\u2019innovation ? InternetActu", + "http://firblitz.com/2006/12/9/itude-0-1-released": "Firblitz: iTude 0.1 released", + "http://www.javascriptkit.com/domref/": "JavaScript Kit- DOM (Document Object Model) Reference", + "http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers": "Understanding Browser HTTP Accept Headers: Firefox, Internet Explorer, Opera, and WebKit (Safari / Chrome)", + "http://www.bitcoin.org/": "Bitcoin P2P Virtual Currency", + "http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html": "Chronique d'une faillite programm\u00e9e au Mali", + "http://ruder.io/word-embeddings-2017/": "Word embeddings in 2017: Trends and future directions", + "https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe": "Zinder renoue avec son pass\u00e9 CNRS Le journal", + "http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/": "Creating Dynamic Web Project using Maven in Eclipse. Eclipse Maven Web project", + "http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/": "Sarkozy expulse les libert\u00e9s de son \u201cinternet civilis\u00e9\u201d - BUG BROTHER - Blog LeMonde.fr", + "http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/": "Yahoo! adds RDF support to SearchMonkey and BOSS", + "http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html": "Tim Berners-Lee on the next Web Video on TED.com", + "http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi": "Thomas Piketty\u00a0: \u00ab\u00a0Face au r\u00e9gime chinois, la bonne r\u00e9ponse passe par une nouvelle forme de socialisme d\u00e9mocratique et participatif\u00a0\u00bb", + "http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a": "The History of West Africa at a Glance", + "https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb": "History tells us what may happen next with Brexit & Trump \u2013 Medium", + "http://jena.hpl.hp.com/wiki/SDB": "SDB - Jena wiki", + "http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_": "Douglas Kennedy\u00a0: \u00ab\u00a0A l\u2019\u00e8re de la \u201ccancel culture\u201d \u2013 o\u00f9 un simple bon mot peut chambouler votre carri\u00e8re \u2013, surveiller ce qu\u2019on dit en public est devenu crucial\u00a0\u00bb", + "http://danakil.ethiopia.free.fr/ertaale.htm": "L'Erta Al\u00e9, volcan actif dans le Danakil", + "http://recode.net/2015/02/15/white-house-red-chair-obama-meets-swisher/": "President Barack Obama Speaks with Kara Swisher (Full Transcript) Re/code", + "https://hackernoon.com/understanding-promises-in-javascript-13d99df067c1": "Understanding promises in Javascript \u2013 Hacker Noon", + "https://www.stardog.com/": "Stardog: The Enterprise Knowledge Graph Platform", + "http://dev.mysql.com/doc/refman/5.0/en/index.html": "MySQL 5.0 Reference Manual", + "http://www.coindesk.com/ethereum-response-dao-kill/": "Why the Wrong Response to The DAO Attack Could Kill Ethereum - CoinDesk", + "http://www.openlinksw.com/weblog/oerling/?id=1777": "LOD2 Plenary and Open Data Meet-up in Mannheim", + "http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un": "Jean Latreille\u00a0: \u00ab\u00a0Le revenu universel ne nous fera pas moins travailler, au contraire\u00a0\u00bb", + "https://course-v3.fast.ai/start_gcp.html": "GCP fast.ai course v3", + "http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery": "Tutorials:Getting Started with jQuery", + "http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an": "Search your favorited tweets and articles with Twitter Discover \u2014 Daniel Nouri's Blog", + "http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html": "Pourquoi les Canadiens se moquent \u00e9perdument des 150 ans du Canada", + "http://www.newscientist.com/article/dn18834-zoologger-the-most-bizarre-life-story-on-earth.html": "Zoologger: The most bizarre life story on Earth? - life - 28 April 2010 - New Scientist", + "http://lafeuille.blog.lemonde.fr/2013/09/07/un-github-pour-les-ecrivains-%e2%80%a8/": "Un GitHub pour les \u00e9crivains ? La Feuille", + "http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio": "Exploring The MET Art Collections with Hume #2 GraphAware", + "https://medium.com/starts-with-a-bang/the-disappearing-universe-d7447467c63a": "The Disappearing Universe \u2014 Starts With A Bang! \u2014 Medium", + "https://www.google.fr/?gfe_rd=ssl&ei=Qck3V_S2KISDaL74kuAG#q=Loriane+Zacharie&stick=H4sIAAAAAAAAAONgFuLVT9c3NEwyjy9PN7NIV0LlaklmJ1vp55YWZybrJxaVZBaXWBVnpqSWJ1YWAwAXCNBNOwAAAA": "Loriane Zacharie", + "http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi": "[2001.04451] Reformer: The Efficient Transformer", + "http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/": "Help-Key: How to Optimize Your Video for YouTube", + "http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web": "Evolution Towards Web 3.0: The Semantic Web", + "http://www.sciencemag.org": "Science (site officiel de la revue)", + "http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/": "On APIs, JSON, Linked Data, attitude and opportunities Linked Data Orchestration", + "http://web.stanford.edu/class/cs224n/reports.html": "CS224n: Natural Language Processing with Deep Learning", + "http://semanticweb.com/plumbing-depths-deep-learning_b41996": "Plumbing The Depths Of Deep Learning - Semanticweb.com", + "http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/": "P\u00eache frauduleuse Greenpeace Afrique", + "http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na": "Paleo Data Search Search National Centers for Environmental Information (NCEI)", + "http://marmotta.incubator.apache.org/": "Apache Marmotta", + "http://www.jenitennison.com/blog/node/170": "Using \"Punning\" to Answer httpRange-14 Jeni's Musings", + "http://en.wikipedia.org/wiki/The_Chaser_(film)": "The Chaser (film)", + "https://www.bbc.com/news/science-environment-47873072": "New human species found in Philippines", + "http://www.snee.com/xml/xml2006/owlrdbms.html": "Relational database integration with RDF/OWL", + "http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs": "new fast.ai course: A Code-First Introduction to Natural Language Processing \u00b7 fast.ai", + "http://www.ldodds.com/blog/archives/000269.html": "Lost Boy: 2006 Jena User Conference Programme", + "https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie": "Chimamanda Ngozi Adichie", + "http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279": "Semantic SEO Comes to Prestashop e-commerce Sites - semanticweb.com", + "http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html": "Working With Text Data \u2014 scikit-learn documentation", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/": "CEUR-WS.org/Vol-201 - SWAP, Semantic Web Applications and Perspectives, 2nd Italian Semantic Web Workshop", + "http://searchivarius.org/blog/brief-overview-querysentence-similarity-functions": "A brief overview of query/sentence similarity functions searchivarius.org", + "http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch": "search - Solr vs. ElasticSearch - Stack Overflow", + "http://karpathy.github.io/2015/05/21/rnn-effectiveness/": "The Unreasonable Effectiveness of Recurrent Neural Networks", + "http://www.sitepoint.com/article/javascript-library": "The JavaScript Library World Cup [JavaScript & DHTML Tutorials]", + "http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html": "Free-flying cyborg insects steered from a distance - tech - 01 October 2009 - New Scientist", + "http://www.kartoo.com/": "KartOO M\u00e9tamoteur de recherche", + "http://pcottle.github.io/learnGitBranching/": "Learn Git Branching", + "http://www.spurl.net": "", + "http://docs.info.apple.com/article.html?artnum=301415": "iMovie HD: No sound after applying a title, effect, or transition to an MPEG-4 video clip", + "http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents": "JSON-LD 1.0: Embedding JSON-LD in HTML Documents", + "https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown": "creating-semantic-sites-with-web-components-and-jsonld.markdown", + "http://news.bbc.co.uk/1/hi/in_depth/sci_tech/2004/planet/default.stm": "BBC NEWS - Planet under pressure", + "http://www.oracle.com/technology/sample_code/tech/java/j2ee/jintdemo/tutorials/webservices.html": "Document Style Web Services And Dynamic Invocation Of Web Services", + "http://news.bbc.co.uk/onthisday/": "", + "http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html": "Nicolas Hulot et l'oligarchie, par Herv\u00e9 Kempf", + "http://www.cnrs.fr/ins2i/spip.php?article2581&utm_content=buffer07b2c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "D\u00e9veloppement web\u00a0: concilier s\u00fbret\u00e9 et flexibilit\u00e9 avec le typage graduel", + "http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo": "Term Based Semantic Clusters for Very Short Text Classification (2019)", + "http://www.archive.org/": "Internet Archive", + "http://news.bbc.co.uk/2/hi/health/7114587.stm": "BBC NEWS Health Amputees 'regain sense of touch'", + "http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/": "Calais: des r\u00e9fugi\u00e9s aux doigts br\u00fbl\u00e9s BUG BROTHER", + "http://robohub.org/baidus-self-driving-tech-plans-revealed/": "Baidu\u2019s self-driving tech plans revealed Robohub", + "http://www.wired.com/opinion/2013/01/forget-the-internet-of-things-here-comes-the-internet-of-cars/": "Forget the Internet of Things: Here Comes the Internet of Cars Wired Opinion Wired.com", + "http://n2.talis.com/wiki/SPARQL_Demo": "SPARQL Demo - n\u00b2 wiki", + "http://www.javaworld.com/javaworld/jw-11-2004/jw-1101-spider.html": "Create intelligent Web spiders", + "http://www.lespetitescases.net/semweblabs/linkedmymusic/": "Linked My Music", + "http://www.semanlink.net/doc/2021/10/application_of_self_organizing_": "Application of Self-Organizing Maps in Text Clustering: A Review IntechOpen (2012)", + "http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html": "Sapping Attention: When you have a MALLET, everything looks like a nail", + "https://semantic-ui.com/": "Semantic UI", + "http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/": "Volkswagen\u2019s RDF Data Management Workflow at Frederick Giasson\u2019s Weblog", + "http://www.thebeijingguide.com/": "The China Guide: China Tours, Hotels, Plane and Train Tickets, Photographs and Pictures", + "http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre": "Confinement\u00a0: du 12\u00a0mars au premier tour des municipales, une semaine de bascule au sommet de l\u2019Etat", + "http://hsivonen.iki.fi/schema-org-and-communities/": "Schema.org and Pre-Existing Communities", + "http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html": "Le \"Monsanto act\" met les OGM au-dessus de la loi aux Etats-Unis", + "http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/": "Student Invents Clever Online Market for Eating Locally Wired Design Wired.com", + "http://siren.sindice.com/index.html": "SIREn: Semantic Information Retrieval Engine", + "https://blog.openai.com/evolution-strategies/": "Evolution Strategies as a Scalable Alternative to Reinforcement Learning", + "http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/": "The Music Data Space at Frederick Giasson\u2019s Weblog", + "http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/": "The Hunt for the Algorithms That Drive Life on Earth WIRED", + "http://www.w3.org/2001/sw/wiki/Linking_patterns": "Linking patterns - Semantic Web Standards", + "http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la": "Transfer Learning in Natural Language Processing - Google Slides", + "http://fleursenpoche.antiopa.info/fleursenpoche.htm": "Fleurs en poche : identifiez les fleurs sauvages avec votre iPhone", + "http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p": "\u00ab\u00a0La dispersion des graines a permis \u00e0 Dame Nature de parfaire ses qualit\u00e9s d\u2019ing\u00e9nieur a\u00e9ronautique\u00a0\u00bb", + "http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res": "Du glyphosate aux SDHI, les ressorts de la controverse", + "http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0167.html": "Tag ontology RFC from Richard Newman on 2005-03-23 (semantic-web@w3.org from March 2005)", + "http://www.gnowsis.org/": "http://www.gnowsis.org", + "http://www.cnes.fr/web/CNES-fr/11590-gp-des-resultats-a-venir-pour-ptolemy.php": "Des r\u00e9sultats \u00e0 venir pour PTOLEMY - CNES", + "http://semanticweb.com/semtechbiz-berlin-to-explore-semantics-in-the-auto-industry_b26151": "SemTechBiz Berlin to Explore Semantics in the Auto Industry - semanticweb.com", + "https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "How Go + Neo4j Enabled the Financial Times to Deliver at Speed", + "http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm": "BBC NEWS - Getting connected in rural India BBC NEWS Programmes Click Online Getting connected in rural India", + "https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395": "Physicists discover time may move in discrete \u2018chunks\u2019", + "http://code.google.com/p/owl1-1/wiki/S1NotesOwled2007": "S1NotesOwled2007 - owl1-1 - Google Code", + "http://help.eclipse.org/help30/index.jsp?topic=/org.eclipse.jdt.doc.user/gettingStarted/qs-junit.htm": "Help - Eclipse Platform - JUnit", + "https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019": "Alexandria Ocasio-Cortez says \u2018we should be excited about automation\u2019 - The Verge", + "http://www.johndcook.com/blog/r_language_for_programmers/": "The R language, for programmers John D. Cook", + "http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322": "Le Conseil d'Etat annule la suspension de culture de l'OGM MON 810", + "http://www.nytimes.com/video/world/africa/100000003107917/dying-of-ebola-at-the-hospital-door.html?partner=rss&emc=rss": "Dying of Ebola at the Hospital Door - Video - NYTimes.com", + "http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later": "Word Embeddings: 6 Years Later", + "http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html": "Le SS7, le r\u00e9seau des op\u00e9rateurs qui permet de surveiller vos t\u00e9l\u00e9phones portables", + "http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir": "Cet obscur objet du d\u00e9sir", + "http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768": "SemTechBiz Puts Spotlight On Financial Industry Business Ontology - semanticweb.com", + "http://spinrdf.org/spin.html": "SPIN Modeling Vocabulary", + "http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search": "Discovery Hub: on-the-fly linked data exploratory search", + "http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le": "Knowledge Graphs and Machine Learning - Towards Data Science", + "ftp://reports.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf": "\"Sparse Distributed Memory: Principles and Operation\"", + "http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files": "JQuery / Disable the Cache Busting in Dynamically Linked External Javascript Files?", + "http://www.w3.org/TR/powder-dr/": "Protocol for Web Description Resources (POWDER): Description Resources", + "http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik": "Winograd Schema Challenge - Wikipedia", + "http://archeo.blog.lemonde.fr/2018/01/23/lhistoire-du-femur-de-toumai/": "L\u2019histoire du f\u00e9mur de Touma\u00ef Dans les pas des arch\u00e9ologues", + "http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427": "Connecting Freebase, Wikipedia, DBpedia, and other Linked Data Spaces", + "http://www.lavoutenubienne.org/": "Vo\u00fbte nubienne", + "http://productdb.org/": "ProductDB", + "http://www.w3.org/2000/10/swap/doc/Rules": "Rules and Formulae", + "http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html": "Bringing Semantic Technologies to Enterprise Data Enterprise Data Journal", + "http://hdalab.iri-research.org/hdalab/": "HdA Lab", + "http://europeecologie22mars.org/": "L'appel du 22 mars", + "http://www.projectliberty.org/resources/whitepapers/Web_Services_Nokia.pdf": "", + "http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf": "Probabilistic Topic Models - blei-mlss-2012.pdf (slides)", + "http://esw.w3.org/topic/SemanticWebTools": "SemanticWebTools - ESW Wiki", + "https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles": "La langue \u00e9lectronique plus forte que nos papilles ? CNRS le journal", + "http://www.w3.org/2001/sw/sweo/public/BusinessCase/": "Semantic Web Education and Outreach (SWEO) Interest Group / Business case", + "http://codahale.com/what-makes-jersey-interesting-injection-providers/": "What Makes Jersey Interesting: Injection Providers codahale.com", + "http://news.bbc.co.uk/onthisday/hi/themes/science_and_technology/space/": "BBC - On This Day - Space", + "http://www.rochester.edu/news/show.php?id=2963": "One Species' Genome Discovered Inside Another's - University of Rochester Press Releases", + "http://www.ldodds.com/blog/archives/000283.html": "Lost Boy: XTech 2006: SPARQLing Services", + "http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html": "NOVA The Tongue-Eating Parasite", + "http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image": "Siamese Network for Image and Text similarity using Keras", + "http://www.newscientist.com/article.ns?id=dn8826&print=true": "'Mental typewriter' controlled by thought alone - Breaking News New Scientist", + "http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat": "[1511.03643] Unifying distillation and privileged information", + "http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html": "Re: Limitations on JSON which conforms to JSON-LD spec? from Nicholas Bollweg on 2014-12-01 (public-linked-json@w3.org from December 2014)", + "http://ebiquity.umbc.edu/blogger/2006/05/01/2007-darpa-grand-challenge-urban-driving/": "2007 DARPA Grand Challenge: urban driving", + "http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/": "The Semantic Puzzle Linking Open Data to Thesaurus Management", + "http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f": "Face \u00e0 Booking, des h\u00f4teliers fran\u00e7ais tentent le boycott", + "http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/": "The Semantic Puzzle Short Semantic MediaWiki Tutorial (with link to sandbox)", + "http://blog.octo.com/strategie-d-architecture-api/": "Strat\u00e9gie d\u2019architecture API OCTO talks !", + "https://patentpdw.files.wordpress.com/2017/10/3-younge-and-kuhn.pdf": "Patent Similarity. A Big Data Method for Patent Analysis (2015)", + "http://www.netvouz.com/action/searchBookmarksI?query=semanlink": "Semanlink on Netvouz", + "http://labs.antidot.net/museesdefrance": "Les mus\u00e9es en France", + "http://rdf123.umbc.edu/": "RDF123 Homepage", + "https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style": "HATEOAS 101 - Opinionated Introduction to a REST API Style", + "https://sgfin.github.io/learning-resources/": "ML Resources", + "http://www.bbc.co.uk/nature/13982886": "BBC Nature - Warring ants know their enemies", + "https://stanford.edu/~shervine/teaching/cs-229.html": "Machine Learning - Cheatsheet (Teaching - CS 229)", + "http://wiki.apache.org/solr/SchemaXml": "SchemaXml - Solr Wiki", + "http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire": "Le gouvernement va r\u00e9introduire les insecticides \u00ab\u00a0tueurs d\u2019abeilles\u00a0\u00bb", + "http://3spots.blogspot.com/2006/02/30-social-bookmarks-add-to-footer.html": "3spots: 30 Social Bookmarks 'Add to' footer links for blogs", + "http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r": "CONCEPTUAL GROUNDING FOR TEXT REPRESENTATION LEARNING", + "http://www.ikelin.com/implementing-etag-caching-jersey/": "Implementing ETag Caching with Jersey", + "https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf": "Int\u00e9gration de la similarit\u00e9 entre phrases comme crit\u00e8re pour le r\u00e9sum\u00e9 multi-document (2016)", + "http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html": "SPARQL for SKOS integrity constraints", + "http://lavryengineering.com/pdfs/lavry-sampling-theory.pdf": "Sampling theory for digital audio", + "https://arxiv.org/abs/1307.5101": "[1307.5101] Large-scale Multi-label Learning with Missing Labels", + "http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers": "GoodRelations for manufacturers", + "http://www.holygoat.co.uk/applications/iphoto-rdf/iphoto-rdf": "RDF Exporter Plugin for iPhoto", + "http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf": "An OWL model for managing ontologies in the agricultural domain", + "http://www.esa.int/esaCP/SEMEV82DU8E_index_2.html": "ESA Portal - Mars Express radar ready to work", + "https://research.googleblog.com/2017/08/transformer-novel-neural-network.html": "Research Blog: Transformer: A Novel Neural Network Architecture for Language Understanding", + "https://biotext.berkeley.edu/papers/psb03.pdf": "A SIMPLE ALGORITHM FOR IDENTIFYING ABBREVIATION DEFINITIONS IN BIOMEDICAL TEXT", + "http://blog.ted.com/2014/01/31/the-attack-on-our-higher-education-system-and-why-we-should-welcome-it/": "The attack on higher ed \u2014 and why we should welcome it TED Blog", + "http://www.w3.org/2012/ldp/charter.html": "W3C Linked Data Platform Working Group Charter", + "http://internetactu.blog.lemonde.fr/2014/03/21/linternet-a-t-il-vraiment-fait-la-demonstration-de-notre-capacite-a-collaborer/": "Internet : outil de collaboration ou de domination ? InternetActu", + "http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html": "L\u2019Afrique au c\u0153ur de la nouvelle Route de la soie", + "https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c": "Small Data is Big in AI\u00a0: Train-spotting at France is AI", + "http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso": "ESWC2008 Relational2RDF - Mapping Relational Databases to RDF with OpenLink Virtuoso", + "http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html": "A nineteenth-century linking application - bobdc.blog", + "http://wiki.apache.org/solr/Suggester": "Suggester - Solr Wiki", + "http://www.ciaovito.net/": "www.ciaovito.net", + "http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html": "Comment l'Etat peut favoriser l'essor des startups de technologie", + "http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o": "Les r\u00e9seaux s\u00e9mantiques comme outil de travail quotidien (Jean Rohmer)", + "http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi": "Proverbes (site hyperSOLutions)", + "https://usbeketrica.com/article/comment-les-webs-alternatifs-veulent-reinventer-internet": "Comment les webs alternatifs veulent r\u00e9inventer Internet Usbek & Rica", + "http://www.youtube.com/watch?v=Jd3-eiid-Uw": "Head Tracking for Desktop VR Displays using the WiiRemote", + "http://www.barefootpower.com/": "Barefoot Power", + "http://www.jeux-geographiques.com/": "Jeux G\u00e9ographiques : quizz sur la France, le monde, les villes et les d\u00e9partements.", + "http://www.graphviz.org/": "Graphviz - Graph Visualization Software", + "http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf": "Towards bridging the gap between deep learning and brains", + "http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button": "ajax - Content negotiation ignored when using browser Back button - Stack Overflow", + "http://en.wikipedia.org/wiki/The_Skeleton_Key": "The Skeleton Key", + "https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d": "The Bitcoin network at risk \u2013 Fran\u00e7ois Chollet", + "https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html": "To Sate China\u2019s Demand, African Donkeys Are Stolen and Skinned - The New York Times", + "http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf": "Sesame: A Generic Architecture for Storing and Querying RDF and RDF Schema", + "http://www.ldodds.com/blog/archives/000314.html": "Lost Boy: How Shall I Integrate Thee? Let Me Count the Ways...", + "http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove": "SpacyIRL 2019 Conference in Overview LinkedIn", + "http://maps.google.com/maps?ie=UTF8&z=13&ll=69.351578,88.197899&spn=0.044674,0.134754&t=k&om=1": "Norilsk - Google Maps", + "http://www.icij.org/": "International Consortium of Investigative Journalists The World\u2019s Best Cross-Border Investigative Team", + "https://github.com/sebastianruder/NLP-progress": "sebastianruder/NLP-progress: Repository to track the progress in Natural Language Processing (NLP), including the datasets and the current state-of-the-art for the most common NLP tasks.", + "http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf": "Learning to Map Wikidata Entities To Predefined Topics", + "http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf": "Bank of America's report on creative disruption, AI and robotisation", + "https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/": "Tesla Model 3 = #1 Best Selling Electric Car in World", + "https://arxiv.org/abs/1511.06335": "[1511.06335] Unsupervised Deep Embedding for Clustering Analysis", + "http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html": "Data Categorization using OpenNLP", + "http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss": "In a Wired South Korea, Robots Will Feel Right at Home - New York Times", + "https://class.coursera.org/rprog-010": "R Programming Coursera", + "http://fr.wikipedia.org/wiki/Tchin-Tabaraden": "Tchin-Tabaraden - Wikip\u00e9dia", + "https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/": "Paris NLP Season 3 Meetup #3 at Doctrine \u2013 Paris NLP", + "http://www.theinquirer.net/default.aspx?article=34523": "Microsoft Media Player shreds your rights", + "http://www.usinenouvelle.com/article/la-pile-a-combustible-a-la-francaise-une-filiere-complete-sans-les-constructeurs-automobiles.N263587": "La pile \u00e0 combustible \u00e0 la fran\u00e7aise : une fili\u00e8re compl\u00e8te... sans les constructeurs automobiles - L'Usine Auto", + "http://www.jeuneafrique.com/Article/ARTJAJA2530p032-034.xml0/-arrestation-opposition-president-Mamadou-Tandja-Tandja-l-apprenti-sorcier.html": "Tandja, l'apprenti sorcier : Jeuneafrique.com", + "http://code.google.com/p/oort/wiki/SparqlTree": "SparqlTree - oort - Automatic treeification of SPARQL query results. - Project Hosting on Google Code", + "http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html": "NOVA - Official Website Mystery of a Masterpiece", + "http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0": "Scientists Find Form of Crispr Gene Editing With New Capabilities - The New York Times", + "https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all": "Body\u2019s bacteria don\u2019t outnumber human cells so much after all Science News", + "http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/": "Freebase knowledge maps", + "http://blog.aksw.org/2013/ontowiki-feature-of-the-week-document-management/": "blog.aksw.org \u00bb Blog Archive \u00bb OntoWiki Feature of the Week: Document Management", + "http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html": "The Pfizer IDEA project: An Interview with Franz' Jans Aasman and IO Informatics' Robert Stanley Semantic Universe", + "https://www.lifewire.com/patent-search-1616728": "What Is Google Patents Search?", + "http://www.ssi.gouv.fr/": "ANSSI Agence nationale de la s\u00e9curit\u00e9 des syst\u00e8mes d'information", + "https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780": "How to do Deep Learning on Graphs with Graph Convolutional Networks - Part 1", + "http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf": "Howto build a Snippet Manager", + "https://www.newscientist.com/article/2127625-entire-nervous-system-of-an-animal-recorded-for-the-first-time/": "Entire nervous system of an animal recorded for the first time New Scientist", + "https://twitter.com/jeremyphoward/status/891421041410531329": "Jeremy Howard sur Twitter : \"Memory networks are the most overhyped and disappointing DL \"advance\" I've seen yet\u2026 \"", + "http://internetactu.blog.lemonde.fr/2014/02/07/comment-apprendre-a-apprendre/": "Comment apprendre \u00e0 apprendre ? InternetActu", + "http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro": "Maximum Entropy Modeling", + "http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf": "Lingo: Search Results Clustering Algorithm Based on Singular Value Decomposition (slides)", + "http://docs.python-guide.org/en/latest/writing/structure/": "Structuring Your Project \u2014 The Hitchhiker's Guide to Python", + "https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)": "Nashorn (JavaScript engine)", + "http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf": "Analyzing survey text: a brief overview", + "http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html": "Might FOAF be a plain XML language?", + "http://www.wired.com/wiredscience/2013/11/christof-koch-panpsychism-consciousness/": "A Neuroscientist's Radical Theory of How Networks Become Conscious - Wired Science", + "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/": "Using machine learning for concept extraction on clinical documents from multiple data sources (2011)", + "http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/": "Google, Adobe and Best Buy are working on an ecommerce web data standard \u2014 Tech News and Analysis", + "http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_": "Th\u00e8se : Mod\u00e8les neuronaux pour la recherche d\u2019information : approches dirig\u00e9es par les ressources s\u00e9mantiques", + "http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html": "How to call scripts - Unobtrusive Javascript", + "https://www.edge.org/response-detail/26794": "Differentiable Programming", + "https://jersey.java.net/documentation/latest/security.html": "Jersey user guide. Chapter\u00a015.\u00a0Security", + "http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1": "Hierarchical Multi-Label Classification Networks (2018)", + "http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html": "Bringing Semantic Technologies to Enterprise Data Semantic Universe", + "https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1": "The Business Implications of Machine Learning \u2014 Free Code Camp", + "http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/": "JE DIS \u00c7A JE DIS RIEN \u2013 L\u2019ancienne directrice de la r\u00e9daction du \u00ab\u00a0New York Times\u00a0\u00bb d\u00e9nonce le secret de l\u2019administration Obama Big Browser", + "https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html": "Google AI Blog: Transformer: A Novel Neural Network Architecture for Language Understanding", + "http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml": "Notes from the WWW 2012 conference", + "http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497": "Cool URIs, Fish, and Wine", + "http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe": "Isotropy in the Contextual Embedding Space: Clusters and Manifolds OpenReview", + "http://www.w3.org/TR/rdfa-syntax/": "RDFa in XHTML: Syntax and Processing", + "http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog": "Hash URIs W3C Blog", + "https://github.com/Accenture/AmpliGraph": "Accenture/AmpliGraph: Python library for Representation Learning on Knowledge Graphs", + "http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415": "Surfer sans entraves (pirate box, freedom box)", + "https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret": "ongoing by Tim Bray \u00b7 How To Be Secret", + "http://www.readwriteweb.com/hack/2010/12/how-to-semantically-analyze-we.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29": "How to Semantically Analyze Web Pages With Delicious", + "http://vapour.sourceforge.net/": "Vapour, a web-based validator tool to check best practices for publishing RDF vocabularies", + "https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/": "How Dirt Could Save Us From Antibiotic-Resistant Superbugs WIRED", + "http://technology.burningbird.net/": "Mad Techie Woman", + "http://news.bbc.co.uk/2/hi/science/nature/7753267.stm": "BBC NEWS Science & Environment Crop diversity: Eat it or lose it", + "http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms": "machine learning - Unsupervised automatic tagging algorithms? - Stack Overflow", + "http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g": "En pleine crise sanitaire, le g\u00e9ant am\u00e9ricain Palantir lorgne les donn\u00e9es des h\u00f4pitaux fran\u00e7ais", + "http://www.epimorphics.com/web/wiki/rdf-json-converter-google-app-engine": "An RDF to JSON converter on Google App Engine Epimorphics", + "http://linkedup-challenge.org/veni.html": "Veni Competition Submissions", + "http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire": "Projet:Requ\u00eater le Wiktionnaire \u2014 Wiktionnaire", + "http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb": "Blockchain Company's Smart Contracts Were Dumb - Bloomberg View", + "http://www.ieml.org/spip.php?rubrique49&lang=fr": "ieml", + "http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html": "SearchMonkey and RDFa - bobdc.blog", + "http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/": "MarkLogic's New Enterprise NoSQL Solution Drives Next-Gen Apps and Processes - DATAVERSITY", + "http://www.ac-nice.fr/clea/lunap/html/Interf/InterfActiv.html": "Diffraction et interf\u00e9rences", + "http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy": "EDF2013: Talk of European Data Innovator Award Winner: Michael Gorriz, CIO of Daimler: From Old to New Economy", + "http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au": "Covid19 : pourquoi z\u00e9ro mort au Vietnam ?", + "https://neo4j.com/developer/graph-database/#property-graph": "Property Graph Model Neo4j", + "http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th": "Industrial farming is one of the worst crimes in history The Guardian (2015)", + "http://en.wikipedia.org/wiki/Spark_of_Life_(novel)": "Spark of Life (novel)", + "http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation": "Don't let French lose the tu/vous distinction Agn\u00e8s Poirier theguardian.com", + "https://cse.snu.ac.kr/en/node/30084": "[Seminar] Deep Latent Variable Models of Natural Language", + "https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html": "Google AI Blog: Open Sourcing BERT: State-of-the-Art Pre-training for Natural Language Processing", + "http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil": "I\u2019m a journalist. Monsanto built a step-by-step strategy to destroy my reputation Carey Gillam Opinion The Guardian", + "http://www.slideshare.net/fpservant/product-customization-as-linked-data": "Product Customization as Linked Data @slideshare", + "http://www.newscientist.com/article.ns?id=dn8105&print=true": "Micro-organisms may be turned into nano-circuitry - New Scientist", + "https://gist.github.com/JeniT/2927644": "Possible way to provide POSTable URI in RDF", + "http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran": "Pablo Castro sur Twitter : \"Random finding of the day for word embeddings: vec(\"apple\")-vec(\"apples\") yields a vector close to ipad, ipod, etc. (apples removes the \"fruitness\" from apple)", + "http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s": "Yrj\u00e4n\u00e4 Rankka @ghard@mastodon.social sur Twitter : \"Facebook must be razed to the ground...\"", + "http://en.wikipedia.org/wiki/Poor_Law": "Poor Law - Wikipedia", + "http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always": "[2009.00318] More is not Always Better: The Negative Impact of A-box Materialization on RDF2vec Knowledge Graph Embeddings", + "http://www.technologyreview.com/blog/arxiv/25331/": "Technology Review: Blogs: arXiv blog: New Quantum Theory Separates Gravitational and Inertial Mass", + "http://www.tagcloud.com": "TagCloud - Home", + "http://manu.sporny.org/2014/github-adds-json-ld-support/": "Github adds JSON-LD support in core product The Beautiful, Tormented Machine", + "http://www.indiana.edu/~arch/saa/matrix/saa/saa_mod02.html": "South American Archaeology: Paleo-Indian", + "http://meta.wikimedia.org/wiki/Running_MediaWiki_on_Mac_OS_X#Required_software": "Help:Running MediaWiki on Mac OS X - Meta", + "http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song": "Lee Moses - Bad Girl (full song, no break) - YouTube", + "https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true": "Why Aren\u2019t Any Bankers in Prison for Causing the Financial Crisis? - The Atlantic", + "http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf": "Automatic Keyphrase Extraction: A Survey of the State of the Art (2014)", + "http://www.memo.fr/dossier.asp?ID=79": "", + "http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html": "Un robot \u00e0 quatre pattes bat un record de vitesse", + "http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html": "Re: Offer data in separate places in HTML from Robert Kost on 2013-05-14 (public-vocabs@w3.org from May 2013)", + "http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q": "Maven Integration for Eclipse WTP Eclipse Plugins, Bundles and Products - Eclipse Marketplace", + "http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_": "Fado tropical de Chico Buarque et Portugal de Georges Moustaki. De la dictature de Salazar \u00e0 la R\u00e9volution des \u0153illets au Portugal", + "http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_": "The State of Transfer Learning in NLP (2019)", + "http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/": "RAP - Rdf API for PHP", + "http://news.bbc.co.uk/2/hi/technology/4072704.stm": "BBC NEWS Technology Chinese gamer sentenced to life", + "http://maven.apache.org/guides/mini/guide-ide-eclipse.html": "Maven - Guide to using Eclipse with Maven 2.x", + "http://www.automotive-ontology.org/": "The Automotive Ontology Working Group", + "http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_": "At Columbia University virtual conference, masters ply the strange and beautiful art of knowledge graphs ZDNet", + "http://www.dlib.org/dlib/april05/hammond/04hammond.html": "Social Bookmarking Tools (I): A General Review", + "http://del.icio.us/doc/api": "del.icio.us/doc/api", + "http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29": "Ainsi parlait vraiment Zarathoustra", + "http://news.bbc.co.uk/2/hi/africa/7246985.stm": "BBC NEWS Africa Genocide hatred lingers in Rwanda schools", + "http://leobard.twoday.net/stories/1302436/": "Semantic World and Cyberspace: the heart of gnowsis", + "http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc": "Cascade du D\u00e9roc - Wikip\u00e9dia", + "http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "CNRS - Quand les souvenirs refont surface gr\u00e2ce \u00e0 la stimulation \u00e9lectrique c\u00e9r\u00e9brale\u2026", + "https://www.youtube.com/watch?v=vZY5ehnYfPo": "Comment Jacko, le petit singe savant, retrouva sa maman - John S. Goodall", + "http://code.google.com/p/ontology-browser/": "ontology-browser - An OWL Ontology and RDF (Linked Open Data) Browser - Google Project Hosting", + "http://johnvey.com/features/deliciousdirector/": "del.icio.us direc.tor: Delivering A High-Performance AJAX Web Service Broker :: Johnvey", + "https://towardsdatascience.com/beginners-guide-to-data-science-python-docker-3181fd321a5c": "Beginner\u2019s guide to Data Science \u2014 Python + Docker \u2013 Towards Data Science", + "http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg": "Darfour, le diable arrive \u00e0 cheval.jpg", + "http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches": "Un linceul n'a pas de poches", + "http://www-sop.inria.fr/edelweiss/software/corese/": "Corese", + "http://www.wolframscience.com/nks/": "Stephen Wolfram: A New Kind of Science", + "http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc": "Trends in Natural Language Processing: ACL 2019 In Review - Mihail Eric", + "http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction": "The Latent Structure of Dictionaries - Vincent\u2010Lamarre - 2016", + "http://developer.apple.com/referencelibrary/Java/": "Apple: Java Reference Library", + "http://www.weebly.com/": "Weebly Website Builder: Create a Free Website, Store or Blog", + "https://ds9a.nl/amazing-dna/": "DNA seen through the eyes of a coder", + "http://www.perceptualedge.com/articles/visual_business_intelligence/big_data_big_ruse.pdf": "Big Data, Big Ruse", + "http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness": "A Role Model of Consciousness", + "http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337": "TamTaminfo.com \u2022Zinder : d\u00e9couverte d'une pyramide et d'un sphinx", + "http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu": "Neuromorphic spintronics Nature Electronics", + "http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf": "Patent finding using free search tools", + "http://www.slate.fr/story/4085/lettre-ouverte-%C3%A0-un-repr%C3%A9sentant-de-la-nation": "Hadopi: lettre ouverte \u00e0 un repr\u00e9sentant de la nation slate", + "http://wiki.apache.org/solr/SolrCloud": "SolrCloud", + "https://academic.oup.com/bioinformatics/article/33/14/i37/3953940": "Deep learning with word embeddings improves biomedical named entity recognition Bioinformatics Oxford Academic (2017)", + "http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html": "Querying a set of named RDF graphs without naming the graphs - bobdc.blog", + "http://rdf4food.org/moin.cgi/SparqlPress": "SparqlPress - SPARQL-ing days", + "https://web.stanford.edu/class/cs124/lec/sem": "Word Meaning and Similarity - Stanford University", + "http://www.semanticblogging.org": "Semantic Blogging Demonstrator", + "http://www.activewidgets.com/javascript.forum.6114.21/dynamic-load-javascript-from-javascript.html": "ActiveWidgets \u2022 dynamic load javascript from javascript \u2022 java javascript download", + "https://arxiv.org/abs/1601.00670": "[1601.00670] Variational Inference: A Review for Statisticians", + "http://www.modamag.com/8milereview.htm": "8 Mile", + "http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle": "Quora Question Pairs Kaggle", + "https://s2-eu4.ixquick.com/": "Ixquick Web Recherche", + "http://dataliftcamp.eventbrite.fr/": "Datalift Camp - 9-10 octobre 2012", + "http://spinrdf.org/": "SPIN - SPARQL Inferencing Notation", + "https://www.letemps.ch/sciences/2017/07/07/bacterie-tueuse-doliviers-progresse-europe": "La bact\u00e9rie tueuse d\u2019oliviers progresse en Europe - Le Temps", + "http://planetrdf.com": "Planet RDF", + "http://www.google.com/support/youtube/bin/answer.py?hl=en-GB&answer=132460": "Optimising your video uploads - YouTube Help", + "http://aclweb.org/anthology/P14-3006": "An Exploration of Embeddings for Generalized Phrases (2014)", + "https://www.youtube.com/watch?v=x2AK5eIKL8c&index=1&list=LLFla3d0JK7zqq9JJkwgvqMQ": "Patti Smith - Because The Night (1979) Germany - YouTube", + "http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/": "Cool URIs for the Semantic Web", + "http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple": "Should you use FastAI? - deeplearningbrasilia - Medium", + "http://www.technologyreview.com/computing/22702/?a=f": "How IBM Plans to Win Jeopardy!", + "https://medium.com/prism-truth/82a1791c94d3": "The PRISM Details Matter \u2014 PRISM Truth \u2014 Medium", + "http://www.antipope.org/charlie/blog-static/2013/12/why-i-want-bitcoin-to-die-in-a.html": "Why I want Bitcoin to die in a fire - Charlie's Diary", + "http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset": "A Tower of Molten Salt Will Deliver Solar Power After Sunset - IEEE Spectrum", + "http://solr.pl/en/2010/11/15/solr-and-autocomplete-part-2/": "Solr and autocomplete (part 2) Solr Enterprise Search", + "http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_": "'The platypuses were glowing': the secret light of Australia's marsupials Science The Guardian", + "http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html": "Displaying SPARQL results on a mobile phone - bobdc.blog", + "http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html": "Building Competency in Semantic Web Technology Semantic Universe", + "http://www.digital-web.com/articles/seven_javascript_techniques/": "Digital Web Magazine - Seven JavaScript Techniques You Should Be Using Today", + "http://arstechnica.com/news.ars/post/20060521-6880.html": "Hollywood reportedly in agreement to delay forced quality downgrades for Blu-ray, HD DVD", + "http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html": "Le lobbying agressif de Washington pour les OGM", + "http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter": "Martynas Jusevicius sur Twitter : \"Is there a solution for entity recognition that would use a local #KnowledgeGraph to look for matches? Ideally any SPARQL datasource...\"", + "http://www.newyorker.com/fact/content/articles/060828fa_fact2": "MANIFOLD DESTINY - The New Yorker", + "http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_": "\u00ab\u00a0Toi le Yadga mangeur de riz, tu es mon esclave\u00a0\u00bb\u00a0: pour rire et faire la paix, les Burkinab\u00e9s s\u2019insultent", + "http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213": "Fujitsu Labs And DERI To Offer Free, Cloud-Based Platform To Store And Query Linked Open Data", + "http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol": "The Quiet Semi-Supervised Revolution \u2013 Towards Data Science", + "http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029": "Introducing the Used Cars Ontology - semanticweb.com", + "http://wolke23.at/2010/06/creating-applications-from-shell-scripts-on-os-x/": "Creating Executable Applications from Shell Scripts on OS X", + "http://www.omg.org/hot-topics/fibo.htm": "Financial Industry Business Ontology (FIBO)", + "http://www.ird.fr": "Institut de recherche pour le D\u00e9veloppement", + "http://www.figoblog.org": "Figoblog. Un blog sur Internet, la biblioth\u00e9conomie et la confiture de figues", + "http://www.rojo.com/": "Rojo", + "http://vowl.visualdataweb.org/webvowl.html": "WebVOWL - Web-based Visualization of Ontologies", + "http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging": "SKOS + DC + Linked Data = Semantic Tagging? - benjamin nowack's blog", + "http://www.w3.org/2005/Incubator/rdb2rdf/RDB2RDF_SurveyReport.pdf": "A Survey of Current Approaches for Mapping of Relational Databases to RDF", + "http://siri.com/": "Siri - Your Virtual Personal Assistant", + "http://www.lemonde.fr/idees/article/2016/04/23/repenser-l-humanite-apres-tchernobyl_4907654_3232.html": "Repenser l\u2019humanit\u00e9 apr\u00e8s Tchernobyl", + "http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert": "raphaelsty/rebert: Renault Bert", + "http://emnlp2018.org/": "2018 Conference on Empirical Methods in Natural Language Processing - EMNLP 2018", + "http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8": "Calculus via Lambdas in Java 8 - Michael E. Cotterell", + "http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f": "Why we switched from Spacy to Flair to anonymize French case law by Micha\u00ebl Benesty Towards Data Science", + "https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf": "Introduction \u00e0 Wikidata", + "http://blog.bnf.fr/gallica/?p=7874": "Un nouveau moteur de recherche pour Gallica Gallica", + "http://www.restlet.org/": "Restlet - Lightweight REST framework for Java", + "http://www.librarything.com/catalog/hyperfp": "LibraryThing - my online catalog of books", + "http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html": "Natural Language Understanding-focused awards announced", + "https://blackboxnlp.github.io/": "Analyzing and interpreting neural networks for NLP (Workshop's Home page)", + "http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb": "raphaelsty/mkb: Knowledge Base Embedding By Cooperative Knowledge Distillation", + "http://www.sheaflight.com/": "Sheaflight Home", + "http://select.nytimes.com/gst/abstract.html?res=F3081FFE3E5D0C728CDDA80894DD404482&fta=y&incamp=archive:article_related": "", + "http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de": "The Softmax function and its derivative - Eli Bendersky's website", + "http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social": "Building Secure, Open and Distributed Social Network Applications", + "https://www.w3.org/community/rdfjs/": "RDF JavaScript Libraries Community Group", + "http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf": "Attitude vis \u00e0 vis des in\u00e9galit\u00e9s de revenu en France : existerait-il un consensus ?", + "https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf": "Document Representation and Dimension Reduction for Text Clustering", + "http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_": "What does 95% COVID-19 vaccine efficacy really mean? - The Lancet Infectious Diseases", + "https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec": "alternatives to word2vec? - Quora", + "http://www.cnrs.fr/inc/communication/direct_labos/rudiuk.htm?utm_content=buffere58b9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "La lumi\u00e8re, une alternative pour assembler l'ADN \u00e0 temp\u00e9rature constante", + "https://github.com/tkurz/skosjs": "tkurz/skosjs \u00b7 GitHub", + "http://java.sun.com/javase/6/docs/technotes/guides/jdbc/": "JDK 6 Java Database Connectivity (JDBC)-related APIs & Developer Guides -- from Sun Microsystems", + "http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091": "Neuroscientists rethink how the brain recognizes faces : Nature News & Comment", + "http://chris.photobooks.com/json/default.htm": "JSON Visualization", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/4679220.stm": "BBC NEWS - Dark matter comes out of the cold BBC NEWS Science/Nature Dark matter comes out of the cold", + "http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html": "La CNIL s'inqui\u00e8te d'une \"soci\u00e9t\u00e9 de surveillance\" qui menace les libert\u00e9s", + "http://www.research.att.com/~john/Grappa/": "AT&T Labs Research - Grappa", + "http://matthewjamestaylor.com/blog/ultimate-multi-column-liquid-layouts-em-and-pixel-widths": "Ultimate multi-column liquid layouts (em and pixel widths)", + "http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html": "eRDF-T with Tonic and Smarty", + "http://searchengineland.com/unzip-the-secrets-behind-leveraging-unique-identifiers-structured-data-in-e-commerce-178692": "How Online Retailers Can Leverage Unique Identifiers & Structured Data", + "http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098": "The fallacy of the multi-API culture: The fallacy of the multi-API culture: Journal of Documentation: Vol 71, No 2", + "http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648": "What the World Will Speak in 2115 - WSJ", + "http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl": "Scientists taught these adorable rats to play hide and seek - Los Angeles Times", + "http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html": "C\u00e9line Alvarez, une institutrice r\u00e9volutionnaire", + "http://www.semanlink.net/doc/2020/06/on_word_embeddings": "On word embeddings", + "http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/": "L\u2019\u00e9cole invers\u00e9e ou comment la technologie produit sa disparition InternetActu", + "http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b": "Facebook apologizes to users, businesses for Apple\u2019s monstrous efforts to protect its customers' privacy \u2022 The Register", + "http://textsummarization.net/text-summarizer": "Text Summarizer - Text Summarization Online", + "http://wodka.over-blog.com/article-5492410.html": "Ryszard KAPUSCINSKI (1932-2007) - WODKA", + "http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const": "FP Servant sur Twitter : \"constructing a personal knowledge graph as a support for learning (and a metaphor of the learning experience)...\"", + "http://dataliberate.com/2012/02/wikidata-announcing-wikipedias-next-big-thing/": "WikiData \u2013 Announcing Wikipedia\u2019s Next Big Thing Data Liberate", + "http://www.411song.com/": "411-SONG", + "http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan": "Richard Horton, patron du \u00ab\u00a0Lancet\u00a0\u00bb\u00a0: \u00ab\u00a0Le Covid-19 montre une faillite catastrophique des gouvernements occidentaux\u00a0\u00bb", + "http://blog.valotas.com/2011/01/resteasy-form-annotation-for-jersey.html": "Things to remember: Resteasy Form annotation for Jersey", + "http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html": "Java Framework for Content Negotiation", + "http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers": "Why No One Trusts Facebook To Power The Future - ReadWrite", + "http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread": "What the Semantic Web can learn from JavaScript", + "http://www.w3schools.com/xsl/xsl_client.asp": "XSLT on the Client", + "http://www-128.ibm.com/developerworks/xml/library/x-matters41.html": "XML Matters: Beyond the DOM", + "https://www.ibm.com/us-en/marketplace/spss-text-analytics-for-surveys": "IBM SPSS Text Analytics for Surveys", + "http://www.geocities.com/anpipniger/aspiro.htm": "Pompe aspirante refoulante ordinaire", + "http://tagsonomy.com/index.php/tagging-for-business-and-education/": "Tagging for business and education", + "http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html": "What Drives Motivation in the Modern Workplace? PBS NewsHour April 15, 2010 PBS", + "http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html": "Keynotes at WWW 2008 - Geeking with Greg", + "http://www.3rd1000.com/chronology/chrono.htm": "Chronology of Events in Science, Mathematics, and Technology", + "http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX": "Call SOAP Web services with Ajax", + "http://www.vogella.de/articles/REST/article.html": "REST with Java (JAX-RS) using Jersey - Tutorial", + "http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth": "[Enqu\u00eate] Soja: quand la d\u00e9forestation s'invite dans nos assiettes - Am\u00e9riques - RFI", + "http://secondlife.com/": "Second Life: Your World. Your Imagination.", + "https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a": "Learning React.js is easier than you think \u2013 EdgeCoders", + "http://www.gartner.com/newsroom/id/2359715": "Gartner Identifies Top Technology Trends Impacting Information Infrastructure in 2013", + "http://semanticweb.com/introduction-linked-data-platform_b43472": "Introduction to: Linked Data Platform - Semanticweb.com", + "http://www.solrtutorial.com/solr-query-syntax.html": "Query Syntax - SolrTutorial.com", + "http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l": "Approximating the Softmax for Learning Word Embeddings", + "http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1": "Sofie Van Landeghem: Entity linking functionality in spaCy (spaCy IRL 2019) - Slides", + "http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/": "Cryptography Breakthrough Could Make Software Unhackable - Wired Science", + "http://www.youtube.com/user/LACANNASWING": "La canne \u00e0 swing", + "http://blogs.sun.com/bblfish/entry/restful_web_services_the_book": "RESTful Web Services: the book", + "http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/": "Transnets \u00bb Blog Archive \u00bb Porno 2.0: \u00e0 la tra\u00eene", + "http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html": "Bill de h\u00d3ra: Automated mapping between RDF and forms, part I", + "http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html": "Replace Facebook with FOAF + twitter + ? - bobdc.blog", + "https://githubengineering.com/towards-natural-language-semantic-code-search/": "Towards Natural Language Semantic Code Search GitHub Engineering", + "http://zepheira.com/community/LED/": "Zepheira :: Community :: Linking Enterprise Data", + "http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch": "Locality-sensitive hashing - Elasticsearch - Stack Overflow", + "http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams": "Live topic generation from event streams", + "https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf": "Supplementary : Extreme Multi-label Learning with Label Features for Warm-start Tagging, Ranking & Recommendation", + "http://www.wired.com/2014/11/delphi-automated-driving-system/": "A System That Any Automaker Can Use to Build Self-Driving Cars WIRED", + "http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613": "URIBurner: Painless Generation & Exploitation of Linked Data", + "https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738": "Eclipse crashes on startup", + "https://github.com/kheyer/Genomic-ULMFiT": "kheyer/Genomic-ULMFiT: ULMFiT for Genomic Sequence Data", + "http://sweet.jpl.nasa.gov/": "SWEET Semantic Web for Earth and Environmental Terminology", + "https://www.telerama.fr/livre/lecrivain-v.-s.-naipaul-est-mort,-et-cetait-lun-des-prix-nobel-de-litterature-les-moins-consensuels,n5761201.php": "L'\u00e9crivain V. S. Naipaul est mort, et c'\u00e9tait un prix Nobel de litt\u00e9rature tout sauf consensuel - Livres - T\u00e9l\u00e9rama.fr", + "http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec": "Active Learning Synthesis Lectures on Artificial Intelligence and Machine Learning (2012)", + "http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons": "Where in maven project's path should I put configuration files that are not considered resources - Stack Overflow", + "http://tech.groups.yahoo.com/group/jena-dev/message/27990": "jena-dev : Message: Creating custom model for legacy system (Newbie question)", + "http://www.macosxhints.com/article.php?story=20071127011627796": "Leopard: Use the built-in firewall to block web sharing", + "http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul": "Ranked Entities in Search Results at Google", + "http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html": "Premi\u00e8re d\u00e9faite d\u2019un professionnel du go contre une intelligence artificielle", + "http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la": "12 NLP Examples: How Natural Language Processing is Used", + "http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen": "Volkswagen adopts semantic search technology (Wired UK)", + "http://javaluator.sourceforge.net": "Javaluator", + "http://chronicle.com/article/Badges-Earned-Online-Pose/130241/": "'Badges' Earned Online Pose Challenge to Traditional College Diplomas - College 2.0 - The Chronicle of Higher Education", + "http://www.semanlink.net/doc/2019/02/keywords2vec": "Keywords2vec", + "http://www.haut-vernet.com/indexesclangon.html": "Le Haut Vernet Randonn\u00e9es r\u00e9serve g\u00e9ologique Haute Provence Serre d'Esclangon", + "https://blogs.oracle.com/nbprofiler/entry/profiling_with_visualvm_part_2": "Profiling With VisualVM, Part 2 (NetBeans Profiler)", + "http://news.sciencemag.org/sciencenow/2013/03/microbes-likely-abundant-hundred.html?ref=hp": "Microbes Likely Abundant Hundreds of Meters Below Sea Floor - ScienceNOW", + "http://www.redmonk.com/jgovernor/archives/000474.html": "SOAP is boring", + "http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik": "Letter from France: The Human Bomb: The New Yorker", + "http://www.oregonfossilguy.com/": "Oregon Fossil Guy - Plant & Animal Fossils - Geologic History", + "http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers": "Amazon Web Services opens up machine learning service to European developers", + "http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi": "AutoPhrase: Automated Phrase Mining from Massive Text Corpora", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html": "Proposal: Looking inside tables from \u0639\u0645\u0631 \u0628\u0646\u062c\u0644\u0648\u0646 on 2013-08-13 (public-vocabs@w3.org from August 2013)", + "http://www.lemonde.fr/pixels/article/2018/03/28/intelligence-artificielle-ce-qu-il-faut-retenir-du-rapport-de-cedric-villani_5277697_4408996.html": "Intelligence artificielle\u00a0: ce qu\u2019il faut retenir du rapport de C\u00e9dric Villani", + "https://www.lemonde.fr/planete/article/2019/02/11/le-declin-des-insectes-une-menace-grandissante-pour-les-ecosystemes-naturels_5422018_3244.html": "Les insectes pourraient avoir compl\u00e8tement disparu dans cent ans", + "http://stackoverflow.com/questions/16673347/multi-label-document-classification": "java - Multi-Label Document Classification - Stack Overflow", + "http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_": "\"Text is the API for humans\"", + "http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1": "Rights, Deportation, and Detention in the Age of Immigration Control: Tom Wong: 9780804793063: Amazon.com: Books", + "http://www.w3.org/DesignIssues/RDB-RDF.html": "Relational Databases and the Semantic Web (in Design Issues)", + "http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich": "D\u00e9tecter le texte dans les fichiers (PDF/TIFF) \u00a0\u00a0 API Cloud\u00a0Vision", + "http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an": "Abstract Wikipedia/July 2020 announcement", + "http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud": "Canal+ troque son mainframe contre un SI Cloud", + "http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136": "JPMorgan's massive guide to machine learning jobs in finance", + "http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/": "Automated Keyword Extraction \u2013 TF-IDF, RAKE, and TextRank (Less Than Dot - Blog)", + "https://nlp.stanford.edu/seminar/details/jdevlin.pdf": "Jacob Devlin talks about BERT at the Stanford NLP seminar", + "http://www.w3.org/2001/sw/SW-FAQ#rulesandonts": "How do I know when to use OWL and when to Rules? How can I use them both together?", + "https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26": "A simple spell checker built from word vectors \u2013 Ed Rushton \u2013 Medium", + "http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene": "Le plan de la fili\u00e8re hydrog\u00e8ne pour acc\u00e9l\u00e9rer le d\u00e9veloppement de la technologie", + "http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html": "State of the Semantic Web - Bangalore, 23 February, 2007 - Ivan Herman, W3C", + "http://www.mkbergman.com/?p=437": "Semantic Web Semantics: Arcane, but Important \u00bb AI3:::Adaptive Information", + "http://afromusing.com/": "Afromusing Africa and Beyond! (the personal blog of Juliana Rotich)", + "http://tuukka.iki.fi/tmp/swig-2008-04-22.html": "swig-2008-04-22", + "http://doc.rplug.renault.com/car-configurator/overview.html": "C2GWEB Overview", + "http://semwebdev.keithalexander.co.uk/snap.html": "A use for embedded semantics? eRDF Link Preview Demo", + "https://www.quora.com/What-is-a-simple-but-detailed-explanation-of-Textrank": "What is a simple but detailed explanation of Textrank? - Quora", + "http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html": "Normalizing company names with SPARQL and DBpedia - bobdc.blog", + "http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc": "La Passion de Jeanne d'Arc, film de Dreyer", + "http://2007.xtech.org/public/schedule/paper/40": "XTech 2007: Open Data in HTML: GRDDL, eRDF and RDFa", + "http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/": "Bill de h\u00d3ra: REST as an engineering discipline", + "http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear": "SimCSE: Simple Contrastive Learning of Sentence Embeddings", + "http://www.lamap.fr/": "La main \u00e0 la p\u00e2te", + "http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html": "Local identifiers in IriTemplates from Dietrich Schulten on 2014-12-13 (public-hydra@w3.org from December 2014)", + "http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th": "Hazukashi \ud83c\udf29 sur Twitter : \"[THREAD] Je vous ai d\u00e9j\u00e0 parl\u00e9 de la Cit\u00e9 Emmur\u00e9e de Kowloon, le ghetto dystopique cyberpunk Hong-Kongais imp\u00e9n\u00e9trable des ann\u00e9es 80 ?", + "http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/": "Architecture des APIs dans les syst\u00e8mes distribu\u00e9s", + "http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&": "With Ag-Gag Laws, We\u2019re Eating With Our Eyes Closed - NYTimes.com", + "http://news.bbc.co.uk/1/hi/world/europe/4714103.stm": "BBC NEWS Bulgaria unearths Thracian riches", + "http://www.masternewmedia.org/news/2005/04/01/where_to_find_great_free.htm": "Where To Find Great Free Photographs And Visuals For Your Own Online Articles", + "http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/": "Linked Data on the Web Workshop, Lyon \u00ab Ivan\u2019s private site", + "https://rare-technologies.com/word2vec-tutorial/": "Word2vec in gensim Tutorial RaRe Technologies", + "http://www.les-ernest.fr/orlean": "Andr\u00e9 Orl\u00e9an: L'instabilit\u00e9 des march\u00e9s financiers Les Ernest", + "http://www.w3.org/DesignIssues/HTTP-URI.html": "What do HTTP URIs Identify? - Design Issues", + "http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html": "La Chine et la France dans un m\u00e9nage \u00e0 trois avec l\u2019Afrique", + "http://www.bbc.com/news/science-environment-30058176": "BBC News - Philae comet lander sends more data before losing power", + "http://www.google.com/search?sourceid=mozclient&ie=utf-8&oe=utf-8&q=-inurl%3A(htm%7Chtml%7Cphp)+intitle%3A%22index+of%22+%2B%22last+modified%22+%2B%22parent+directory%22+%2Bdescription+%2Bsize+%2B(jpg%7Cgif)+": "Google hack example", + "http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas": "Bayesian deep learning with Fastai : how not to be uncertain about your uncertainty !", + "http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html": "Minding the Planet: Minding The Planet -- The Meaning and Future of the Semantic Web", + "http://manu.sporny.org/2014/json-ld-origins-2/": "JSON-LD and Why I Hate the Semantic Web The Beautiful, Tormented Machine", + "https://news.cnrs.fr/articles/ramanujan-the-man-who-knew-infinity?utm_content=buffer4160a&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Ramanujan: The Man Who Knew Infinity CNRS News", + "http://www.semanlink.net/doc/2019/10/textual_representation_learning": "TEXTUAL REPRESENTATION LEARNING DRIVEN BY KNOWLEDGE RESOURCES: APPLICATION TO INFORMATION RETRIEVAL", + "http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf": "Building an operational product ontology system", + "https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s": "I pity the poor immigrant", + "http://www.futura-sciences.com/magazines/terre/infos/actu/d/terre-extinction-masse-permien-elle-due-micro-organismes-53129/": "L'extinction de masse du Permien est-elle due \u00e0 des micro-organismes ?", + "https://athalhammer.github.io/wikidata-autocomplete/wikidata.html": "#PageRank-based #Wikidata autocomplete powered by #Apache #Solr", + "http://www.newscientist.com/article.ns?id=mg18925423.600&print=true": "Three cosmic enigmas, one audacious answer - New Scientist Three cosmic enigmas, one audacious answer - News Print New Scientist", + "http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model": "The RDF Data Cube Vocabulary", + "https://cloud.google.com/tpu/": "Cloud\u00a0TPU\u00a0\u2013 Acc\u00e9l\u00e9rateurs de ML pour TensorFlow \u00a0\u00a0 Google Cloud", + "http://www.arcticphoto.co.uk/": "Arctic & Antarctic pictures", + "http://www.semanlink.net/doc/2021/01/la_vie_aquatique": "La Vie Aquatique", + "http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html": "Pragmatic Programming Techniques: Location Sensitive Hashing in Map Reduce", + "https://en.wikipedia.org/wiki/Naked_Blood": "Naked Blood", + "http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/": "Facebook\u2019s open-sourcing of AI hardware is the start of the deep-learning revolution Ars Technica", + "http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html": "WWW Conference Exhibits", + "http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165": "Le dimanche de M. Bertrand, par Herv\u00e9 Kempf", + "http://www.eswc2007.org/": "4th European Semantic Web Conference 2007", + "https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf": "Integrating Domain-Knowledge into Deep Learning (2019)", + "http://n2.talis.com/wiki/Platform_FAQ": "Platform FAQ - n\u00b2 wiki", + "https://www.airpair.com/angularjs/posts/top-10-mistakes-angularjs-developers-make": "10 Top Mistakes AngularJS Developers Make", + "http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec": "Yoshua Bengio, Revered Architect of AI, Has Some Ideas About What to Build Next - IEEE Spectrum", + "http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html": "Semantic Web project ideas number 3 - (ERP) bobdc.blog", + "https://github.com/linkeddata/cimba": "CIMBA Client-Integrated Micro-Blogging Architecture application", + "http://www.google.com/webmasters/tools/richsnippets": "Google Structured Data Testing Tool", + "http://www.newscientist.com/article/mg22229722.800-venus-death-dive-to-unlock-secrets-of-earths-evil-twin.html": "Venus death dive to unlock secrets of Earth's evil twin - space - 05 June 2014 - New Scientist", + "http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa": "Getting started with RDFa: Creating a basic FOAF profile webBackplane", + "http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification": "nlp - Feature Selection and Reduction for Text Classification - Stack Overflow", + "https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ": "Code lists for units of measure? - Google Groups", + "http://gotze.eu/2003/01/coding-for-automation.html": "GotzeBlogged \u00bb Blog Archive \u00bb Coding for automation", + "http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html": "A Berlin, Edward Snowden r\u00e9pond \u00e0 Barack Obama sur le chiffrement", + "https://www.theguardian.com/news/2018/mar/17/cambridge-analytica-facebook-influence-us-election": "Revealed: 50 million Facebook profiles harvested for Cambridge Analytica in major data breach News The Guardian", + "http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/": "(your) information wants to be free \u2013 obamacare edition Benlog", + "http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml": "General purpose build script for web applications and web services", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html": "A 8 et 11 ans, ils sont menac\u00e9s de fichage g\u00e9n\u00e9tique pour vol de jouets", + "http://www.semanlink.net/doc/2020/02/yoshua_bengio": "Yoshua Bengio", + "http://www.flickr.com/photos/tags/ldow2008/": "Flickr: Items tagged with ldow2008", + "http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible": "Les secrets r\u00e9v\u00e9l\u00e9s de la Bible ARTE", + "http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_": "Semantic Search with S-BERT is all you need", + "https://developers.google.com/knowledge-graph/": "Google Knowledge Graph Search API \u00a0\u00a0 Knowledge Graph Search API \u00a0\u00a0 Google Developers", + "http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html": "Ashesi, laboratoire du Ghana de demain", + "http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf": "Numenta: Hierarchical Temporal Memory, including HTM cortical learning algorithms", + "http://www.apple.com/sitemap/": "Apple - Site Map (example of website with good hierarchy)", + "http://www.cnrs.fr/inee/communication/breves/b353.html": "CNRS - Des fossiles dans les g\u00e9nomes pour dater l\u2019arbre du vivant", + "http://archeo.blog.lemonde.fr/2014/10/16/une-magnifique-mosaique-decouverte-dans-la-tombe-damphipolis/": "Une magnifique mosa\u00efque d\u00e9couverte dans la tombe d\u2019Amphipolis Dans les pas des arch\u00e9ologues", + "http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet": "The Brother from Another Planet", + "http://java.sun.com/j2se/1.5.0/docs/guide/jmx/tutorial/essential.html": "Essentials of the JMX API", + "http://news.bbc.co.uk/2/hi/science/nature/8040982.stm": "BBC NEWS Science & Environment Peering into Hubble's future", + "http://www.autorepair.eu.com/oasis.htm": "Auto Repair Information - OASIS Documentation", + "https://youtu.be/OV692rxN_LI": "Patti Smith Montreux 2005", + "http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html": "A la recherche d'une vie pass\u00e9e sur Mars", + "http://theodi.org/": "Open Data Institute Knowledge for everyone", + "https://fr.wikipedia.org/wiki/Eug%C3%A8ne_de_Savoie-Carignan#Guerre_contre_l.27Empire_ottoman": "Eug\u00e8ne de Savoie", + "http://en.wikipedia.org/wiki/Bombay_(film)": "Bombay (film)", + "http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html": "Digit recognition to solve Sudoku puzzles automatically with a webcam - MATLAB Vid\u00e9o", + "http://news.bbc.co.uk/1/hi/health/4228712.stm": "BBC NEWS - Concern over three-parent embryo", + "http://markmail.org/thread/kzsg3qntovmqzbje": "[foaf-protocols] semantic pingback improvement request for foaf - Story Henry ", + "http://www.lelombrik.net/videos/30536/coiffeur-virtuel.html": "Coiffeur virtuel - SON 3D", + "http://m240.net81-67-17.noos.fr/~fps/sicg/c2g/2004/08/eproc_sendingcats_notes.htm": "", + "http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html": "La salle de classe plan\u00e9taire", + "http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy": "Does Silicon Valley\u2019s reign herald the end of social democracy? The Guardian", + "https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z": "Kingsley Idehen - Google+ - Google Knowledge Graph to Linked Data Here are some simple\u2026", + "http://www.w3.org/wiki/WebSchemas/SchemaDotOrgProposals": "WebSchemas/SchemaDotOrgProposals - W3C Wiki", + "http://www.temis.com/press-releases/-/asset_publisher/PBR3sbzpVJ9d/content/press-release-oecd?p_r_p_22296479_owat=content&p_r_p_22296479_owagid=10488&p_r_p_22296479_owaut=press-release-oecd&redirect=http://www.temis.com/press-releases?p_p_id=101_INSTANCE_PBR3sbzpVJ9d&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&utm_content=buffer5d3ce&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer": "Press release - OECD - Press Releases - TEMIS", + "http://informationality.com/tagglywiki": "TagglyWiki - a taggable, reusable, non-linear personal web notebook", + "http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s": "[1602.01137] A Dual Embedding Space Model for Document Ranking", + "https://devcenter.heroku.com/articles/jax-rs-http-caching": "HTTP Caching in Java with JAX-RS Heroku Dev Center", + "http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html": "L\u2019ONU mobilise sur la \u00ab\u00a0menace fondamentale\u00a0\u00bb que repr\u00e9sente la r\u00e9sistance aux antibiotiques", + "http://jena.sourceforge.net/": "Jena on Sourceforge", + "http://my.opera.com/tomheath/blog/index.dml/tag/web": "web - Tom Heath's Displacement Activities", + "http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1": "(((\u0644()(\u0644() 'yoav)))) sur Twitter : \"is there a convincingly successful application of graph convolutions in NLP you can point me to?\"", + "https://en.wikipedia.org/wiki/Crash_(2004_film)": "Crash (2004 film)", + "http://www.bbc.com/news/health-36439260": "Gene editing technique could transform future - BBC News", + "https://github.com/aaronsw/html2text": "html2text", + "http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022": "Ann\u00e9e de la biologie 2021-2022", + "http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4431": "KEYNOTE: Real-time Emergency Response Using Semantic Web Technology", + "http://www.mkbergman.com/?cat=22": "UMBEL \u00bb AI3:::Adaptive Information", + "https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596": "Learning Entity Embeddings in one breath \u2013 Apil Tamang \u2013 Medium", + "https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428": "How to use Dataset in TensorFlow \u2013 Towards Data Science", + "https://medium.com/@bklyn_newton/is-growth-necessary-for-a-thriving-economy-59258df196b4#.hiq4du2py": "Is growth necessary for a thriving economy? \u2014 Medium", + "http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis": "HITS at TAC KBP 2015:Entity Discovery and Linking, and Event Nugget Detection", + "http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/": "Scientists clone extinct frog that gives birth from its mouth", + "http://ben.bolte.cc/blog/2016/gensim.html": "Using Gensim Word2Vec Embeddings in Keras Ben Bolte's Blog", + "http://www.nytimes.com/2010/12/25/science/earth/25fossil.html": "In Kenya, Huts Far Off the Grid Harness the Sun - NYTimes.com", + "http://jan2012.ml-class.org/": "Machine Learning", + "http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav": "Handling Events :: Eloquent JavaScript", + "https://realpython.com/python-keras-text-classification/": "Practical Text Classification With Python and Keras \u2013 Real Python", + "http://dannyayers.com/misc/grddl-reference": "GRDDL Quick reference card", + "http://lifehacker.com/5643460/how-to-track-and-potentially-recover-your-stolen-laptop-or-android-with-prey": "How to Track and (Potentially) Recover Your Stolen Laptop or Android with Prey", + "http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn": "arogozhnikov/einops: Deep learning operations reinvented (for pytorch, tensorflow, jax and others)", + "http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf": "Interactively using Semantic Web knowledge: Creating scalable abstractions with FacetOntology", + "http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig": "Manipulations politiques en ligne\u00a0: la lanceuse d\u2019alerte Sophie Zhang d\u00e9nonce les lacunes de Facebook", + "https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/": "Introducing a Graph-based Semantic Layer in Enterprises The Semantic Puzzle", + "http://danbri.org/words/2008/02/09/273": "danbri\u2019s foaf stories \u00bb Graph URIs in SPARQL: Using UUIDs as named views", + "http://www.w3.org/2004/02/skos/core.rdf": "SKOS core (rdf file)", + "http://people.xiph.org/~xiphmont/demo/neil-young.html": "24/192 Music Downloads are Very Silly Indeed", + "http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html": "From Words to Concepts and Back: Dictionaries for Linking Text, Entities and Ideas", + "http://www.heppnetz.de/projects/goodrelations/GoodRelations-TR-final.pdf": "GoodRelations: technical report", + "http://www.djlu.fr/": "DjLu - The simple and free tool to organize your research papers", + "http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e": "Label unstructured data using Enterprise Knowledge Graphs 2", + "http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html": "Expressing Statistical Data in RDF with SDMX-RDF", + "http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw": "Did neurons evolve more than once on Earth? - life - 10 April 2015 - New Scientist", + "https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique": "Enrichir les sols en carbone pour lutter contre le changement climatique CNRS Le journal", + "http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod": "Recent Advances in Language Model Fine-tuning", + "http://www.openlinksw.com/weblog/oerling/index.vspx": "Orri Erling's Weblog", + "http://osxdaily.com/2006/11/30/how-to-manually-adjust-the-macbook-pro-keyboard-backlight/": "How-to: Manually adjust the MacBook Pro Keyboard Backlight - OS X Daily", + "http://regexpal.com/": "Regex Tester \u2013 RegexPal", + "http://www.threetags.com/": "threetags.com: secure your data online", + "http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_": "D\u00e9pistage du coronavirus : les raisons du fiasco fran\u00e7ais sur les tests", + "http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html": "Java Development Guide for Mac OS X: Core Java APIs and the Java Runtime on Mac OS X", + "http://www.cs.cmu.edu/~rsalakhu/": "Ruslan Salakhutdinov - Carnegie Mellon School of Computer Science", + "http://www.bizcoder.com/the-insanity-of-the-vary-header": "Bizcoder - The Insanity of the Vary Header", + "http://www.bbc.com/news/science-environment-30097648": "BBC News - Comet landing: Organic molecules detected by Philae", + "https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf": "search-engine-optimization-starter-guide.pdf", + "http://www.armadillo.fr/": "Armadillo - Gestion documentaire multim\u00e9dia", + "http://www.panimages.org/index.jsp?displang=eng": "PanImages: cross-lingual image search", + "https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel": "The Grand Budapest Hotel", + "http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/": "\u00ab\u00a0Bienvenue \u00e0 Gattaca\u00a0\u00bb sera-t-il\u00a0bient\u00f4t r\u00e9alit\u00e9\u00a0? Passeur de sciences", + "http://ruder.io/multi-task/": "An Overview of Multi-Task Learning for Deep Learning", + "http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html": "Chiffrement, blocage de sites, lectures interdites : les amendements de la Loi P\u00e9nale \u00e0 suivre - Politique - Numerama", + "http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations": "KGC 2021 Call for Presentations The Knowledge Graph Conference", + "http://techwiki.openstructs.org/index.php/Solr": "Solr - OpenStructs TechWiki", + "http://2006.xmlconference.org/proceedings/188/presentation.html": "Relational database integration with RDF/OWL", + "http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html": "Deploying Linked Data (Virtuoso)", + "http://munich2012.drupal.org/program/sessions/decoupling-content-management": "Decoupling Content Management DrupalCon Munich 2012", + "https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/": "Grounded Language Learning and Understanding \u2014 MIT Media Lab (1999-2001)", + "http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html": "XML.com: Solr: Indexing XML with Lucene and REST", + "http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc": "Au Kenya, l\u2019unique girafe blanche femelle et son petit tu\u00e9s par des braconniers", + "http://www.youtube-mp3.org/fr": "Convertisseur YouTube vers mp3", + "http://junit.sourceforge.net/doc/cookstour/cookstour.htm": "JUnit: A Cook\u2019s Tour", + "http://www.eol.org/": "Encyclopedia of Life", + "http://blogs.sun.com/bblfish/entry/it_s_all_about_context": "Keeping track of Context in Life and on the Web", + "http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology": "Annotations Ontology", + "http://huet.blog.lemonde.fr/2016/12/12/comment-produire-nobels-et-medailles-fields/": "Comment produire Nobels et m\u00e9dailles Fields ? {Sciences\u00b2}", + "http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5": "Ernest ILISCA (0000-0002-3842-586X) - ORCID Connecting Research and Researchers", + "http://en.wikipedia.org/wiki/Great_Hall_of_the_People": "Great Hall of the People - Wikipedia, the free encyclopedia", + "https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/": "Convolutional 2D Knowledge Graph Embeddings (2017)", + "https://ieeexplore.ieee.org/abstract/document/8320777": "Collective List-Only Entity Linking: A Graph-Based Approach - IEEE Journals & Magazine (2018)", + "http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065": "L'Union europ\u00e9enne d\u00e9tient les clefs du succ\u00e8s, par Jean-Paul Besset, Daniel Cohn-Bendit, Yannick Jadot, Eva Joly", + "http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml": "Automatic Keyphrase Extraction: A Survey of the State of the Art (2014)", + "http://projectwordsworth.com/the-paradox-of-the-proof/?_ga=1.18937784.1815790288.1389170208": "The Paradox of the Proof Project Wordsworth", + "https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/": "Evaluation of sentence embeddings in downstream and linguistic probing tasks", + "http://ld2sd.deri.org/lod-ng-tutorial/": "Linked Data Tutorial - Publishing and consuming linked data with RDFa", + "http://www.bnode.org/archives2/58": "ARC Embedded RDF (eRDF) Parser for PHP", + "http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html": "\"Le Front national sera majoritaire\", pr\u00e9dit le philosophe Bernard Stiegler - L'Express", + "http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/": "A Simple Markdown Spotlight Importer - Hiltmon", + "http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012": "Wikidata presentation at SemTechBiz Berlin 2012", + "http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/": "AlchemyAPI rolls out deep-learning-based computer vision as a service \u2014 Tech News and Analysis", + "http://www.metmuseum.org/toah/splash.htm": "Timeline of Art History The Metropolitan Museum of Art", + "http://putaindecode.io/fr/articles/js/react/": "Introduction \u00e0 ReactJS", + "http://www.vogella.com/tutorials/JavaConcurrency/article.html": "Java concurrency (multi-threading) - Tutorial", + "http://www.seobythesea.com/2009/10/how-search-engines-might-expand-abbreviations-in-queries/": "How Search Engines Might Expand Abbreviations in Queries - SEO by the Sea \u2693", + "http://xmlarmyknife.org/docs/rdf/sparql/": "XMLArmyKnife -- SPARQL Query Service", + "https://theintercept.com/2017/12/22/snowdens-new-app-uses-your-smartphone-to-physically-guard-your-laptop/": "Edward Snowden\u2019s New App Uses Your Smartphone to Physically Guard Your Laptop", + "http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=": "pyLDAvis", + "http://db.tidbits.com/article/9544": "TidBITS Opinion: Instant Messaging for Introverts", + "http://www.w3.org/Submission/2006/10/": "Submission Request to W3C: OWL 1.1 Web Ontology Language", + "http://jastor.sourceforge.net": "Jastor", + "http://www.w3.org/TR/rdfa-api/": "RDFa API", + "http://drdrak.over-blog.com/article-288506.html": "Comment trouver des MP3 avec Google", + "http://www.wired.co.uk/news/archive/2014-01/27/maidsafe-bitcloud": "Scottish company Maidsafe claims to have built a Bitcloud-like system (Wired UK)", + "http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html": "Au Cameroun, un projet g\u00e9ant d'huile de palme fait scandale", + "http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/": "Given Tablets but No Teachers, Ethiopian Children Teach Themselves MIT Technology Review", + "https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization": "What is the best tutorial on RNN, LSTM, BRNN, and BLSTM with visualization? - Quora", + "https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/": "Le v\u00e9n\u00e9rable ch\u00eane de La Loupe, Meauc\u00e9 (Eure-et-Loir) Krapo arboricole", + "https://arxiv.org/abs/1510.00726": "[1510.00726] A Primer on Neural Network Models for Natural Language Processing", + "https://www.bbc.co.uk/news/science-environment-45049024": "Small height evolved twice on 'Hobbit' island of Flores - BBC News", + "http://composing-the-semantic-web.blogspot.com/2009/01/object-oriented-semantic-web-with-spin.html": "Composing the Semantic Web: The Object-Oriented Semantic Web with SPIN", + "http://semanticweb.com/structured-data-gets-a-lift_b23820#more-23820": "Structured Data Gets a Lift - semanticweb.com", + "http://particletree.com/features/lightbox-gone-wild/": "Particletree \u00b7 Lightbox Gone Wild! (Dialogs in javascript)", + "http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/": "Position Statement: The RDF* and SPARQL* Approach to Annotate Statements in RDF and to Reconcile RDF and Property Graphs Olaf Hartig", + "http://webr3.org/blog/": "webr3.org", + "http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth": "\u00ab\u00a0Le massacre de la Saint-Barth\u00e9lemy s\u2019est jou\u00e9 entre voisins\u00a0\u00bb", + "http://radimrehurek.com/gensim/models/phrases.html": "gensim: models.phrases \u2013 Phrase (collocation) detection", + "https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet": "Markdown Cheatsheet", + "http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html": "Pattern: Using Pseudo-URIs with Microservices", + "http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for": "A2N: Attending to Neighbors for Knowledge Graph Inference - ACL 2019", + "http://histropedia.com/": "Histropedia - The Timeline of Everything", + "http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource": "java - Jersey: How to get the URI of a resource? - Stack Overflow", + "http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa": "Max Little sur Twitter : \"Causal bootstrapping - a simple way of doing causal inference using arbitrary machine learning algo...\"", + "https://github.com/zazi/rdf2rdfaizer": "RDF2RDFa-izer", + "http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Les origines de l'incroyable biodiversit\u00e9 des insectes remises en cause", + "http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/": "Plus belle la vid\u00e9osurveillance BUG BROTHER", + "http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/": "Apple and Didi is about foreign cash and the future of motoring - I, Cringely", + "http://www.springerlink.com/content/u3588666k780ng58/": "Use of Semantic Web technologies on the BBC Web Sites", + "http://mindraider.sourceforge.net/": "MindRaider - Semantic Web Outliner", + "http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide": "Wik-Bee Leaks: EPA Document Shows It Knowingly Allowed Pesticide That Kills Honey Bees Fast Company", + "http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf": "Technical workshop on the goals and requirements for a pan-European data portal", + "http://continuations.com/post/91111911845/more-on-basic-income-and-robots": "Continuations : More On Basic Income (and Robots)", + "http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia": "Ivan Maisky", + "http://parachutes.tv/pages/beauty.html": "BEAUTY OF MATHEMATICS - PARACHUTES", + "https://github.com/google-research/bert": "GitHub - google-research/bert: TensorFlow code and pre-trained models for BERT", + "https://github.com/markdown-it/markdown-it": "markdown-it", + "https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/": "How to Protect Yourself After the Next Big Corporate Hack WIRED", + "https://cloud.google.com/datalab/": "Cloud\u00a0Datalab\u00a0\u2013 Outil interactif d'analyse de donn\u00e9es \u00a0\u00a0 Google Cloud", + "https://stanfordnlp.github.io/stanfordnlp/": "StanfordNLP StanfordNLP", + "http://arstechnica.com/science/2016/07/algorithms-used-to-study-brain-activity-may-be-exaggerating-results/": "Software faults raise questions about the validity of brain studies Ars Technica", + "http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm": "France 5 : Les maternelles - Ecologie (Les Petits D\u00e9brouillards)", + "http://www.wiwiss.fu-berlin.de/suhl/bizer/toolkits/": "Developers Guide to Semantic Web Toolkits for different Programming Languages", + "http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de": "Thomas Piketty\u00a0: \u00ab\u00a0Que faire de\u00a0la dette Covid-19\u00a0?\u00a0\u00bb", + "https://www.theatlantic.com/technology/archive/2017/04/the-tragedy-of-google-books/523320/": "Torching the Modern-Day Library of Alexandria - The Atlantic", + "http://swik.net/": "The Open Software Wiki - SWiK", + "https://medium.com/basic-income/true-freedom-comes-with-basic-income-7ff1368e170#.bsbnlrs5h": "True Freedom Comes With Basic Income", + "http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149": "8 Regular Expressions You Should Know - Tuts+ Code Tutorial", + "https://app.box.com/notes/238671166577": "Workshop 17Oct2017", + "http://www.niso.org/news/events/2013/dcmi/developing/": "September 25: Linked Data in Developing Countries - National Information Standards Organization", + "http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html": "Transnets, des gadgets aux r\u00e9seaux: Le moteur de recherche : interface et miroir (1)", + "http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer": "Prevent cross site scripting with jsoup", + "http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html": "Sleepless in Salt Lake City: An Example of Caching with REST using Jersey JAX-RS", + "http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch": "An easy introduction to Pytorch for Neural Networks", + "http://www.jenitennison.com/blog/node/149": "Priorities for RDF Jeni's Musings", + "http://code.google.com/p/owl1-1/wiki/UserRequirements": "UserRequirements - owl1-1 - Google Code", + "http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_": "A Trick Of The Light - Villagers Berlin Live \u2013 ARTE Concert (2019)", + "http://www.newscientist.com/article.ns?id=dn8251&print=true": "Hormone levels predict attractiveness of women - New Scientist", + "http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/": "Light Blue Touchpaper - A Merry Christmas to all Bankers", + "http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821": "France Inter > Sur les \u00e9paules de Darwin - A la recherche des myst\u00e8res de l\u2019h\u00e9r\u00e9dit\u00e9", + "http://osds.openlinksw.com/#DownloadChrome": "OpenLink Structured Data Sniffer", + "https://www.ibm.com/watson/developercloud/": "IBM Watson Developer Cloud", + "http://nytimes.blogrunner.com": "", + "http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html": "Infos de la Plan\u00e8te - Mali: Coupe de bois contre h\u00f4pital et 3e pont ? - Les Echos (Mali) - 2008-09-16", + "https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search": "Vectorland: Brief Notes from Using Text Embeddings for Search", + "http://driving.stanford.edu/papers.html": "Stanford Autonomous Driving Team - Publications", + "http://linuxfr.org/news/api-platform-2-un-cadriciel-pour-creer-des-api-web-hypermedia-en-quelques-minutes": "API\u00a0Platform\u202f2 : un cadriciel pour cr\u00e9er des API Web hyperm\u00e9dia en quelques minutes - LinuxFr.org", + "https://voyageenbarbarie.wordpress.com/": "Voyage en Barbarie, de Delphine Deloget et C\u00e9cile Allegra", + "http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931": "[Linking-open-data] Returning to backlinks - Additional requirement: Paging", + "http://www2.cnrs.fr/presse/communique/4673.htm?utm_content=buffer4527f&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Placenta chez les femelles, masse musculaire chez les m\u00e2les\u00a0: le double h\u00e9ritage d'un virus - Communiqu\u00e9s et dossiers de presse - CNRS", + "http://apassant.net/blog/2008/01/12/one-foaf-fits-all/": "One FOAF fits all : Alexandre Passant", + "http://piketty.blog.lemonde.fr/2015/12/03/pourquoi-le-gouvernement-protege-t-il-les-multinationales/": "Pourquoi le gouvernement prot\u00e8ge-t-il les multinationales ? Le blog de Thomas Piketty", + "http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html": "LHC research programme gets underway", + "http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html": "La guerre du sexe passe de l\u2019Afrique aux ghettos urbains de Chicago", + "http://realitesbiomedicales.blog.lemonde.fr/2017/08/08/interview-confession-du-plus-grand-serial-killer-le-moustique/": "Interview-confession du plus grand serial-killer : le moustique R\u00e9alit\u00e9s Biom\u00e9dicales", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467": "CiteSeerX \u2014 Collaborative Creation of Semantic Points of Interest as Linked Data on the Mobile Phone", + "https://semantic-web.com/": "Semantic Web Company", + "http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat": "SentenceTransformers Documentation", + "https://docs.angularjs.org/api/ng/service/$q": "AngularJS: API: $q", + "http://www.newstatesman.com/martin-robbins/2012/09/trouble-ted-talks": "The trouble with TED talks", + "http://www.simonsfoundation.org/quanta/20140624-fluid-tests-hint-at-concrete-quantum-reality/": "Fluid Experiments Support Deterministic \u201cPilot-Wave\u201d Quantum Theory Simons Foundation", + "http://pipl.com/": "Pipl - People Search", + "http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html": "Binding Java Objects to RDF", + "http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world": "Google knows nearly every Wi-Fi password in the world Computerworld Blogs", + "http://www.bbc.com/news/science-environment-29054889": "BBC News - Deep sea 'mushroom' may be new branch of life", + "http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/": "M\u00e9thodes agiles : g\u00e9rer les entreprises comme des logiciels InternetActu", + "https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter": "Can Mark Zuckerberg Fix Facebook Before It Breaks Democracy? The New Yorker", + "http://karpathy.github.io/2015/11/14/ai/": "Short Story on AI: A Cognitive Discontinuity.", + "http://data.semanticweb.org/conference/eswc/2008/html": "ESWC2008 Conference Data", + "http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/": "Sens et enjeux des mod\u00e8les de stockage et d\u2019acc\u00e8s aux donn\u00e9es", + "https://support.google.com/webmasters/answer/146898": "schema.org: About RDFa - Webmaster Tools Help", + "http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm": "Invasion of the DIY Robots", + "http://informationweek.com/news/showArticle.jhtml?articleID=201805939": "Fair Use Worth More to Economy Than Copyright, CCIA Says", + "https://www.scripted.com/scripted-updates/nlp-hacking-in-python": "Teaching a Computer to Read: - Scripted", + "http://www.jaxa.jp/press/2007/11/20071107_kaguya_movie_e.html": "JAXA Moving image of the Moon shot by the HDTV camera of the KAGUYA (SELENE)", + "http://java.sun.com/products/jfc/tsc/articles/bookmarks/": "The Swing HTML Parser - Parsing a Netscape Navigator Bookmarks File", + "https://radimrehurek.com/gensim/models/word2vec.html": "gensim: models.word2vec \u2013 Deep learning with word2vec", + "http://chillyinside.com/blog/?p=15": "chillyinside.com \u00bb Blog Archive \u00bb 2006 Jena User Conference", + "http://www.semanticuniverse.com/articles-putting-powder-work.html-0": "Putting POWDER to Work Semantic Universe", + "https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf": "Unsupervised Machine Translation. G. Lample (slides)", + "http://www.icmu.org/icmu2012/papers/FP-5.pdf": "A Destination Prediction Method Based on Behavioral Pattern Analysis of Nonperiodic Position Logs.", + "http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_": "La crise du coronavirus montre que \u00ab\u00a0nous ne jouons plus dans la cour des grands\u00a0\u00bb", + "http://www.ldh-toulon.net/": "Ligue des droits de l'Homme- [LDH-Toulon]", + "http://www.facebook.com/profile.php?id=705574873": "Facebook Fran\u00e7ois-Paul Servant", + "https://class.coursera.org/wh1300-002/class": "Announcements A History of the World since 1300", + "http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html": "Infos de la Plan\u00e8te - Le jatropha curcas comme alternative dans le sahel burkinab\u00e9 - Sidwaya (Burkina Faso) - 2010-04-28", + "https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7": "Using BERT for state-of-the-art pre-training for natural language processing", + "http://m3pe.org/semperwiki/": "SemperWiki", + "https://www.coursera.org/course/datasci": "Introduction to Data Science Coursera", + "https://www.youtube.com/watch?v=e-5obm1G_FY": "Anjana Vakil: Learning Functional Programming with JavaScript - JSUnconf 2016 - YouTube", + "http://www.sciencedaily.com/releases/2014/09/140917131812.htm": "New branch added to European family tree: Europeans descended from at least 3, not 2, groups of ancient humans -- ScienceDaily", + "http://www.w3.org/2012/08/web-and-automotive/Overview.html": "Shift into High Gear on the Web : W3C Web and Automotive Workshop - 14-15 November 2012", + "http://www.w3.org/blog/data/2014/01/06/vocabularies-at-w3c/": "Vocabularies at W3C W3C Data Activity", + "http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem": "Quadrupling your elephants - RDF and the Hadoop ecosystem", + "http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf": "Le capital au 21eme si\u00e8cle - Slides", + "http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html": "Re: Generic Property-Value Proposal for Schema.org from Francois-Paul Servant on 2014-04-30 (public-vocabs@w3.org from April 2014)", + "https://theclevermachine.wordpress.com/": "The Clever Machine Topics in Computational Neuroscience & Machine Learning", + "http://payswarm.com": "Rewarding 'awesome' on the Web. The Universal Payment Standard.", + "http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350": "Bill Gates thinks AI taking everyone's jobs could be a good thing (MSFT)", + "http://sentientdevelopments.blogspot.com/2007/08/fermi-paradox-back-with-vengeance.html": "Sentient Developments: The Fermi Paradox: Back with a vengeance", + "http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen": "Apple's Plan to \"Think Different\" About Encryption Opens a Backdoor to Your Private Life Electronic Frontier Foundation", + "http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html": "NOVA The Fabric of the Cosmos", + "http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf": "Describing Customizable Products on the Web of Data, LDOW 2013", + "http://www.w3.org/DesignIssues/CloudStorage.html": "Socially aware cloud storage - Design Issues", + "http://del.icio.us/gambina": "My delicious : http://del.icio.us/gambina", + "http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/": "Chroniques judiciaires \u00bb Blog Archive \u00bb Comment prononcez-vous Colonna?", + "http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/": "Geological Survey Austria launches thesaurus project The Semantic Puzzle", + "http://www.w3.org/2001/sw/sweo/public/UseCases/": "Semantic Web Education and Outreach Interest Group Case Studies and Use Cases", + "http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml": "Talis shuts down semantic web operations\u00a0", + "http://www.dailyrazor.com/java/overview.php": "DailyRazor Hosting - Advanced Java Hosting, JSP Hosting, Servlets, ASP.NET, ASP 3.0, PHP Hosting", + "http://aloha-editor.org/": "Aloha Editor - HTML5 WYSIWYG Editor", + "http://denisnddo.free.fr/html/zarma.htm": "Notions \u00e9l\u00e9mentaires de Zarma", + "http://environment.newscientist.com/article.ns?id=dn12433": "Eight-million-year-old bug is alive and growing - New Scientist Environment", + "http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf": "Synote: Weaving Media Fragments and Linked Data", + "http://www.semanlink.net/doc/2021/03/the_angels_share": "The Angels' Share", + "http://dannyayers.com/archives/2005/10/03/semantic-web-starting-points/": "Danny Ayers, Raw Blog : \u00bb Semantic Web Starting Points", + "http://semanticweb.com/applying-semantic-technology-to-big-data_b28569": "Applying Semantic Technology to Big Data - semanticweb.com", + "http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_": "Nandana Mihindukulasooriya sur Twitter : \"I wonder if there is a service for disambiguation and entity linking for @wikidata where I can pass a text and get Wikidata entities mentioned in it!", + "http://chemicalsemantics.com/": "Chemical Semantics", + "http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php": "Lego Mindstorms", + "http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea": "Contrastive Self-Supervised Learning Ankesh Anand (2020)", + "http://www.aventureforth.com/2005/09/16/top-10-ajax-applications-part-2/": "A Venture Forth \u00bb Blog Archive \u00bb Top 10 Ajax Applications (Part 2)", + "http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html": "Tombouctou ou notre \"pari de civilisation\"", + "http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html": "Un v\u00e9t\u00e9ran s'\u00e9chappe de sa maison de retraite pour aller aux comm\u00e9morations du D\u00e9barquement", + "http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_": "Jacques Attali\u00a0: \u00ab\u00a0La chute du mur de Berlin est une anecdote sans importance\u00a0\u00bb - Le Point", + "http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/": "L\u2019empire romain et la soci\u00e9t\u00e9 d\u2019opulence \u00e9nerg\u00e9tique : un parall\u00e8le Oil Man", + "http://www.lisperati.com/casting.html": "Casting SPELs in Lisp", + "https://www.polymer-project.org/0.5/": "Polymer", + "http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp": "Introducing The Creative Destruction 7 Act Play - Semantic Web", + "http://www.ldh-toulon.net/spip.php?article1628": "[LDH-Toulon] un tableau accablant de la vid\u00e9osurveillance en Grande-Bretagne", + "http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/": "Case Study: Contextual Search for Volkswagen and the Automotive Industry", + "http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm": "Le CNNum flingue la loi Renseignement devant le Conseil constitutionnel - Next INpact", + "http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa": "Machine Learning for Humans, Part 5: Reinforcement Learning", + "http://www2012.org/proceedings/companion/p115.pdf": "From Linked Data to Linked Entities: a Migration Path - Giovanni Bartolomeo, Stefano Salsano", + "http://www.webreference.com/programming/ajax_tech2/index.html": "How to use images and cookies to enable client-server communication", + "https://www.lemonde.fr/planete/article/2019/05/03/le-senat-rejette-l-inscription-du-crime-d-ecocide-dans-le-droit-penal_5458028_3244.html": "Le S\u00e9nat rejette l\u2019inscription du \u00ab\u00a0crime d\u2019\u00e9cocide\u00a0\u00bb dans le droit p\u00e9nal", + "https://github.com/spotify/annoy": "GitHub - spotify/annoy: Approximate Nearest Neighbors in C++/Python optimized for memory usage and loading/saving to disk", + "https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/": "New Theory Cracks Open the Black Box of Deep Learning Quanta Magazine", + "https://bitcoinmagazine.com/21919/decentralist-perspective-bitcoin-might-need-small-blocks/": "The Decentralist Perspective, or Why Bitcoin Might Need Small Blocks \u2013 Bitcoin Magazine", + "http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/": "A Review of the Recent History of Natural Language Processing - AYLIEN", + "http://decoupledcms.org/": "Decoupling Content Management", + "http://www.readwriteweb.com/archives/is_schemaorg_really_a_google_land_grab.php": "Is Schema.org Really a Google Land Grab?", + "http://www.simile-widgets.org/timeline/": "SIMILE Widgets Timeline", + "http://www.mnh.si.edu/africanvoices/": "", + "http://maps.google.com": "", + "http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima": "Product-Key Memory (PKM) Minimalist implementation of a Product-Key Memory layer", + "https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs": "Crowdsourcing : tous chercheurs ! CNRS le journal", + "https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige": "Quand la Terre \u00e9tait une boule de neige CNRS Le journal", + "http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html": "Thomas Piketty : \u00ab Le retour des in\u00e9galit\u00e9s inqui\u00e8te aux Etats-Unis\u00a0\u00bb", + "https://github.com/ldodds/dowl": "ldodds/dowl \u00b7 GitHub", + "http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/": "Ernst Wiechert: Fluchtversuch aus dem KZ ( Tentative d\u2019\u00e9vasion du Camp de concentration) Raison garder!", + "http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction": "Europeana Professional - Market study on technical options for semantic feature extraction - Europeana Tech", + "http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm": "BBC NEWS Ouganda: Night commuters", + "http://blog.newsweek.com/blogs/techtonicshifts/archive/2010/04/22/facebook-f8-internet-open-social-graph-semantic-web-twitter.aspx": "Facebook's Play to Take Over the Entire Internet - Techtonic Shifts Blog - Newsweek.com", + "http://www.x-tags.org/": "X-Tag - Web Components Custom Element Polylib", + "http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi": "Similarity Learning with (or without) Convolutional Neural Network (slides)", + "https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer": "A Mysterious Infection, Spanning the Globe in a Climate of Secrecy - The New York Times", + "http://schema.org/docs/schema_org_rdfa.html": "Schema.org core schema as RDFa Lite", + "https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p": "Introduction to Machine learning - Google Slides", + "http://paolo.evectors.it/stories/entKcollectorWWWW.html": "Topics, aggregators, K-collector and other assorted stuff. Paolo Valdemarin Weblog", + "http://www.luern.fr/": "Fouilles de Corent", + "http://blogs.zdnet.com/Google/?p=18": "Google: Thinking about the future of TV ads - ZDNet.com", + "http://www.webrtc.org/home": "WebRTC", + "https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944": "2018, une ann\u00e9e de science CNRS Le journal", + "https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor": "\u00c9toiles \u00e0 neutrons : une fusion qui vaut de l\u2019or CNRS Le journal", + "https://ajolicoeur.wordpress.com/RelativisticGAN/": "The relativistic discriminator: a key element missing from standard GAN (2018) \u2013 Alexia Jolicoeur-Martineau", + "http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/": "Les pollueurs du monde doivent payer Le blog de Thomas Piketty", + "http://openstructs.org": "OpenStructs Open source data structs and semantic frameworks", + "http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_": "Machine Learning Crash Course \u00a0\u00a0 Google Developers", + "http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html": "Research Blog: Automatically making sense of data", + "http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter": "12 of the best free Natural Language Processing and Machine Learning educational resources - AYLIEN", + "http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml": "BBC - Research and Development: Automatically tagging the World Service archive", + "http://thenextweb.com/insider/2015/11/01/technological-innovation-for-the-third-world/": "9 ways tech is transforming the developing world", + "https://www.bbc.com/news/world-asia-china-47667880": "Huge fossil discovery made in China's Hubei province - BBC News", + "http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf": "On the properties of metamodeling in OWL", + "https://escholarship.org/uc/item/48z2p287": "Machine learning and natural language processing on the patent corpus: Data, tools, and new measures (2015)", + "https://medium.com/s/story/yes-you-should-delete-facebook-heres-why-bc623a3b4625": "Yes, You Should Delete Facebook", + "http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in": "Handling imbalanced datasets in machine learning - Towards Data Science", + "http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329": "The Semantic Web Has Gone Mainstream! Wanna Bet? - semanticweb.com", + "http://blog.morrisjohns.com/javascript_closures_for_dummies": "JavaScript Closures for Dummies Developing\u00a0thoughts \u2014\u00a0Morris\u00a0Johns", + "http://json-ld.org/spec/latest/json-ld-framing/": "JSON-LD Framing 1.0", + "http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg": "Ocasio-Cortez stumps Zuckerberg with questions on far right and Cambridge Analytica Technology The Guardian", + "https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld": "category:structured-data jsonld (in Webmaster Central Help Forum) - Google Product Forums", + "http://ajaxian.com/": "Ajaxian", + "http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/": "I, Cringely Steve Ballmer's Dilemma ~ I, Cringely", + "http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html": "additionalType property, vs extending Microdata syntax for multiple types from Dan Brickley on 2012-06-15 (public-vocabs@w3.org from June 2012)", + "http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis": "Le discours du griot g\u00e9n\u00e9alogiste chez les Zarma du Niger - Sandra Bornand - Google Books", + "http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear": "BERT is now part of Google Search, so let\u2019s understand how it reasons", + "http://www.marekrei.com/blog/paper-summaries/": "57 Summaries of Machine Learning and NLP Research - Marek Rei", + "https://duckduckgo.com/": "DuckDuckGo", + "http://www.w3.org/blog/SW/2010/07/17/powder_not_so_quiet": "W3C Semantic Web Activity News - POWDER: Not So Quiet", + "http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_": "Inria Paris NLP (ALMAnaCH team) sur Twitter : \"#PAGnol, a new, free, GPT-3-like generative LM for French", + "https://docs.docker.com/mac/": "Get Started with Docker", + "http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_": "Information Bottleneck for NLP (parsing & summarization)", + "http://createjs.org/guide/": "Create.js - Create.js Integration Guide", + "http://stardust.jpl.nasa.gov/": "Stardust JPL NASA", + "http://www.w3.org/TR/swbp-n-aryRelations/": "Defining N-ary Relations on the Semantic Web", + "http://news.stanford.edu/pr/2015/pr-memory-monitor-biox-061715.html": "Brain connections last as long as the memories they store, Stanford neuroscientist finds Stanford News Release", + "http://www.ifixit.com": "iFixit - iBook & PowerBook Parts", + "http://www.wired.com/2015/01/beth-moon-ancient-trees/#slide-1": "The Most Ancient and Magnificent Trees From Around the World WIRED", + "http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe": "Nils Reimers sur Twitter : \"SBERT Release v1.1.0\"", + "http://www.newscientist.com/article/mg20627621.000-language-lessons-you-are-what-you-speak.html?full=true": "Language lessons: You are what you speak - life - 01 June 2010 - New Scientist", + "https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)": "A\u00efcha (s\u00e9rie de t\u00e9l\u00e9films) \u2014 Wikip\u00e9dia", + "https://blog.makk.es/java-libs-for-processing-wiki-markup.html": "Java libs for processing wiki markup \u2013 Makk.es", + "https://medium.com/@madrugado/interesting-stuff-at-emnlp-part-ii-ce92ac928f16": "Interesting Stuff at EMNLP (part II) \u2013 Valentin Malykh \u2013 Medium", + "http://www.geocities.com/hollywood/set/8100/frroman.html": "Le roman d'un tricheur", + "http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html": "French gov't report recommends standardizing on ODF", + "http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html": "German model is ruinous for Germany, and deadly for Europe - Telegraph", + "http://diveintogreasemonkey.org": "Dive Into Greasemonkey", + "http://dev2ops.org/2010/02/what-is-devops/": "What is DevOps? - dev2ops", + "http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy": "How your ancestors' farms shaped your thinking - New Scientist", + "http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les": "Arch\u00e9ologie\u00a0: en Cr\u00e8te, sur les traces des \u00ab\u00a0\u00e2ges obscurs\u00a0\u00bb", + "https://www.crummy.com/software/BeautifulSoup/": "Beautiful Soup", + "http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io": "JSON Web Tokens - jwt.io", + "http://blogs.zdnet.com/semantic-web/?p=243": "Zemanta talks Linked Data with SDK and commercial API The Semantic Web ZDNet.com", + "http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html": "Mathematics of Machine Learning and Deep Learning - Plenary talk at International Congress of Mathematicians 2018", + "http://sourceforge.net/projects/touchgraph/": "SourceForge.net: Project Info - TouchGraph", + "http://linkedup-challenge.org/": "LinkedUp Challenge", + "http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html": "Bomb blasts plunge London into chaos from Guardian Unlimited: Newsblog", + "https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/": "Complete guide to create a Time Series Forecast (with Codes in Python)", + "http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin": "CNRS- Zinder 1900-2019 - Pauline Rousseau", + "http://www.diplomatie.gouv.fr/fr/conseils-aux-voyageurs_909/pays_12191/niger_12300/index.html": "Minist\u00e8re des Affaires Etrang\u00e8res. Conseils aux voyageurs au Niger", + "http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf": "Capturing meaning: Toward an abstract Wikipedia (ISWC 2018)", + "https://webflow.com/": "Web design tool, CMS, and hosting platform Webflow", + "http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep": "Deep InfoMax: Learning good representations through mutual information maximization - Microsoft Research", + "https://nadesnotes.wordpress.com/2016/04/10/natural-language-processing-nlp-fundamentals-finite-state-transducers-fsts/": "Natural Language Processing (NLP) Fundamentals: Finite State Transducers (FSTs) \u2013 Nade's Notes", + "http://blog.socialcast.com/javascript-memory-management/": "JavaScript Memory Management", + "http://jena.sourceforge.net/grddl/index.html": "Jena GRDDL Reader", + "http://www.ihes.fr/~lafforgue/textes/lettresaucollege.pdf": "", + "http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html": "RDF Thrift : A binary format for RDF data", + "http://www.readwriteweb.com/archives/10_semantic_apps_to_watch_one_year_later.php": "Semantic apps to watch - ReadWriteWeb", + "http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html": "Official Google Webmaster Central Blog: Crawling through HTML forms", + "http://java.net/projects/sommer": "Sommer \u2014 Java.net", + "http://www.w3.org/wiki/TagIssue57Responses": "TagIssue57Responses - W3C Wiki", + "http://www.kuro5hin.org/story/2005/6/12/143721/743": "Who Will Google Buy Next?", + "http://www.nytimes.com/2008/07/31/opinion/31kristof.html": "A Farm Boy Reflects", + "https://medium.com/@chriszhu12/highlights-of-emnlp-2018-55892fba4247": "Highlights of EMNLP 2018 \u2013 Chris Zhu \u2013 Medium", + "http://www.xml.com/pub/a/2004/02/11/googlexml.html": "XML.com: Googling for XML", + "https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html": "Finally, a Machine That Can Finish Your Sentence - The New York Times", + "http://www.ldodds.com/blog/archives/000272.html": "Lost Boy: SPARQLing the BBC Programme Catalogue", + "http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi": "Amazon reconnue coupable d\u2019avoir gard\u00e9 une partie des pourboires de ses livreurs aux Etats-Unis", + "http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf": "Calendrier des crimes de la France outre-mer", + "http://drupal.org/project/usage": "Project usage overview drupal.org", + "http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of": "SPARQLing AltaVista: the meaning of forms", + "http://www.bbc.co.uk/news/science-environment-21150047": "BBC News - Quantum biology: Do weird physics effects abound in nature?", + "http://julialang.org/": "The Julia Language", + "http://www.ibm.com/developerworks/library/wa-datasets/": "The Semantic Web, Linked Data and Drupal, Part 2: Combine linked datasets with Drupal 7 and SPARQL Views", + "http://chrispederick.com/work/webdeveloper/": "Web Developer Extension", + "http://deeplearning4j.org/neuralnet-overview.html": "Introduction to Deep Neural Networks - Deeplearning4j: Open-source, distributed deep learning for the JVM", + "http://ask.slashdot.org/article.pl?sid=06/08/27/000248": "Slashdot Teaching Primary School Students Programming?", + "http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi": "Multi Media Museum et les mus\u00e9es africains", + "http://clusty.com": "Clusty the Clustering Engine", + "http://blog.howarddierking.com/2016/10/07/swagger-ain-t-rest-is-that-ok/": "Swagger Ain't REST - is that OK?", + "http://bioinformaticsalgorithms.com/index.htm": "Bioinformatics Algorithms: an Active Learning Approach", + "http://bergie.iki.fi/blog/decoupling_content_management/": "Decoupling Content Management - Henri Bergius", + "http://nodejs.org/": "node.js", + "https://www.xlpat.com/": "Online Patent Search Tools Patent Analytics - XLPAT Labs", + "http://code.google.com/p/timemap/": "timemap - Javascript library to help use a SIMILE timeline with online maps including Google, OpenLayers, and Bing. - Google Project Hosting", + "http://www.onlinetools.org/articles/unobtrusivejavascript/chapter4.html": "How to call scripts - Unobtrusive Javascript", + "http://code.google.com/p/google-refine/": "google-refine - Project Hosting on Google Code", + "http://webseitz.fluxent.com/wiki/MindMapping": "Mind Mapping (WebSeitz/wikilog)", + "https://act.greenpeace.org/page/14369/petition/1": "EDF : c\u2019est pas bient\u00f4t fini le nucl\u00e9aire ?", + "http://www.pbs.org/wgbh/nova/einstein/": "NOVA Einstein's Big Idea PBS", + "https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article": "Et voil\u00e0 pourquoi l\u2019allemand met le verbe \u00e0 la fin - Le Temps", + "http://readwrite.com/2013/10/17/evernote-addons#awesm=~okELddEpQbFm2M": "10 Ways To Make Evernote Rock Harder \u2013 ReadWrite", + "http://alias-i.com/lingpipe/index.html": "LingPipe", + "http://www.nzlinux.org.nz/blogs/2006/03/03/scoping-a-semantic-wiki/": "Visions of Aestia \u00bb Scoping a Semantic Wiki", + "http://www.wikimedia.fr/afripedia": "Afripedia : un partenariat avec l'Agence universitaire de la francophonie et l'Institut fran\u00e7ais pour d\u00e9velopper Wikip\u00e9dia en Afrique Wikim\u00e9dia France", + "http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html": "RDF-Gravity", + "http://maps.google.fr/maps/place?hl=fr&um=1&ie=UTF-8&q=paris+%22francois+bonvin%22+restaurant&fb=1&gl=fr&hq=%22francois+bonvin%22+restaurant&hnear=Paris&cid=13847008823922781253": "Le Troquet - restau rue Fran\u00e7ois Bonvin, 15e", + "http://www.w3.org/TR/grddl-primer/": "GRDDL Primer", + "http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/": "SemWeb.Pro : le web s\u00e9mantique d\u00e9montre son utilit\u00e9 en entreprise #semwebpro", + "https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/": "Build your own summary tool! The Tokenizer", + "http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures": "L\u2019apport des technologies du Web s\u00e9mantique \u00e0 la gestion des donn\u00e9es structur\u00e9es", + "http://www.salon.com/technology/feature/2006/09/14/basic": "Why Johnny can't code", + "http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/": "Thinking \u2018Inside the Box\u2019 with Description Logics \u00bb AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi": "Experiments in Graph-based Semi-Supervised Learning Methods for Class-Instance Acquisition", + "http://en.wikipedia.org/wiki/Terra_preta": "Terra preta - Wikipedia, the free encyclopedia", + "http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html": "Mike's Site: Maven's WAR Overlay: What are WAR Overlays?", + "https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains": "How do RNNs differ from Markov Chains? - Quora", + "http://www.thebrain.com": "TheBrain Technologies Corporation", + "http://www.opencontentalliance.org/": "Open Content Alliance (OCA) - Home", + "http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html": "El\u00e9mentaire, mon cher Poincar\u00e9", + "http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr": "CC-100: Monolingual Datasets from Web Crawl Data", + "http://www.betaversion.org/~stefano/linotype/news/93/": "Stefano's Linotype ~ Data First vs. Structure First", + "http://arstechnica.com/articles/culture/drmhacks.ars/1": "Hacking Digital Rights Management", + "https://twitter.com/seanjtaylor/status/1073632404286275584": "Sean J. Taylor sur Twitter : \"A couple days ago another team asked me to speak about Bayesian data analysis...", + "http://cain.ice.ucdavis.edu/semanticnaturalist/": "The Semantic Naturalist", + "http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21": "Open Data : le Premier Ministre met la pression sur les minist\u00e8res", + "http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html": "En Syrie, le plus ancien palais de l\u2019humanit\u00e9 d\u00e9truit par l\u2019organisation Etat islamique", + "http://rww.io/help.php": "rww.io: data cloud", + "http://rapid-i.com/rapidforum/index.php": "Rapid-I Forum", + "http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri": "CTLR@WiC-TSV: Target Sense Verification using Marked Inputs and Pre-trained Models (2021)", + "http://www.culturecommunication.gouv.fr/Ressources/HADOC/Referentiels2/Les-vocabulaires-scientifiques-et-techniques/L-application-GINCO": "L'application GINCO - Minist\u00e8re de la Culture et de la Communication", + "https://fr.slideshare.net/PetarRistoski/rdf2vec-rdf-graph-embeddings-for-data-mining": "RDF2Vec: RDF Graph Embeddings for Data Mining", + "http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf": "The Read-Write Secure Data Web, Henry Story at semwebpro 2014", + "https://machinelearningmastery.com/keras-functional-api-deep-learning/": "How to Use the Keras Functional API for Deep Learning", + "http://today.java.net/lpt/a/225": "Handling Java Web Application Input", + "http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/": "Comprehensive Listing of 175 Semantic Web Tools \u00bb AI3:::Adaptive Information", + "http://dannyayers.com/2011/06/15/httpRange-14-Reflux": "httpRange-14 Reflux - Danny Ayers : Raw Blog", + "http://www.w3.org/TR/urls-in-data/": "URLs in Data Primer", + "http://www.fiftyfoureleven.com/resources/programming/javascript": "Javascript - Links and Resources, Fiftyfoureleven.com", + "http://stefansavev.com/blog/beyond-cosine-similarity/": "Beyond Cosine Similarity - Algorithms for Big Data", + "http://answers.semanticweb.com/questions/12147/whats-the-best-way-to-parameterize-sparql-queries": "What's the best way to parameterize SPARQL queries? - ANSWERS", + "http://www.livescience.com/43270-new-burgess-shale-fossils-canada.html": "'Mother Lode' of Amazingly Preserved Fossils Discovered in Canada LiveScience", + "http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin": "Lord of the Wiki Ring: Importing Wikidata into Neo4j and analyzing family trees", + "https://github.com/ffftzh/BTM-Java": "ffftzh/BTM-Java: A java implement of Biterm Topic Model", + "https://www.w3.org/2016/04/blockchain-workshop/report": "W3C Blockchains and the Web Workshop Report", + "https://blog.wikimedia.org/2016/10/30/histropedia/": "Histropedia: \u201cThe power of data visualisation combined with free knowledge\u201d \u2013 Wikimedia Blog", + "http://orange.biolab.si/": "Orange \u2013 Data Mining Fruitful & Fun", + "https://docs.google.com/presentation/d/17CGWPwu59GB7miyY1ErTjr4Wb-kS-rM7dB3MAMVO9HU/pub?slide=id.p": "Pin the Web - Firefox OS Design Concept", + "http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie": "Les URLs du Monde ne sont pas cool", + "https://mike.place/talks/serverless/": "Serverless for data scientists", + "http://www.vub.ac.be/BIBLIO/nieuwenhuysen/african-art/african-art-links.html": "African art links", + "http://blogs.esa.int/rosetta/2014/11/19/did-philae-drill-the-comet/": "Did Philae drill the comet? Rosetta - ESA's comet chaser", + "http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w": "Towards Reproducible Research with PyTorch Hub PyTorch", + "http://www.mpi-inf.mpg.de/~suchanek/downloads/yago/": "Yago - A Core of Semantic Knowledge", + "https://en.wikipedia.org/wiki/Viva_Zapata!#Awards": "Viva Zapata!", + "http://lucene.apache.org/solr/": "Apache Solr", + "http://www.mycarevent.com/default.aspx": "MYCAREVENT", + "http://www.devx.com/semantic/Article/42543": "Using RDFa with DITA and DocBook", + "http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/": "La propagande des algorithmes ? Vraiment ? InternetActu", + "https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html": "A ten-minute introduction to sequence-to-sequence learning in Keras", + "https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf": "Monolingual and Cross-Lingual Information Retrieval Models Based on (Bilingual) Word Embeddings (2015)", + "http://www.radioactivehq.org": "The RadioActive Foundation: The standards based open source RFID project.", + "http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html": "Named graphs in Open Anzo - TechnicaLee Speaking", + "http://www.ysearchblog.com/archives/000527.html": "Yahoo! Search Blog: The Yahoo! Search Open Ecosystem", + "http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php": "Socrate, ennemi de la d\u00e9mocratie ?", + "http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer": "The Secret Sharer - The New Yorker", + "http://www.inforbix.com/volkswagen-and-product-data-semantics/": "Volkswagen and Product Data Semantics", + "http://en.wikipedia.org/wiki/Business_case": "Business case", + "http://www.againsttcpa.com/tcpa-faq-en.html": "Against TCPA TCPA would TAKE your FREEDOM This is NO FAKE", + "http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf": "l\u2019alg\u00e8bre \u00e0 travers les \u00e9quations", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html": "En Chine, le planning familial du Shandong a impos\u00e9 une violente campagne de st\u00e9rilisation et d'avortement", + "http://www.lights.com/pickalink/bookmarks/": "Free Bookmark Managers", + "https://www.bbc.co.uk/news/science-environment-44904298": "Petrichor: why does rain smell so good? - BBC News", + "http://www.coral-lab.org/": "Cognition, Robotics, and Learning (CORAL) Lab Home", + "http://www.w3.org/2006/07/SWD/track/issues/54": "ISSUE-54 - SWD", + "http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea": "Top Trends of Graph Machine Learning in 2020 by Sergei Ivanov Towards Data Science", + "http://commons.wikimedia.org/wiki/User_talk:Fps61": "User talk:Fps61 - Wikimedia Commons", + "http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/": "Facebook to Partner With Acxiom, Epsilon to Match Store Purchases With User Profiles Digital - Advertising Age", + "http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_": "Simultaneous Categorization of Text Documents And Identification of Cluster-dependent Keywords (2003)", + "http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135": "How to choose between Jersey, Apache Wink and JBoss RESTEasy? - Stack Overflow", + "http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_": "Les m\u00e9galithes de Veyre-Monton (Puy-de-D\u00f4me) : ... Inrap", + "http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184": "Le Republicain-Niger: CAUSERIE RADIOT\u00c9L\u00c9VIS\u00c9E. Un appel au g\u00e9nocide !", + "http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29": "Conf\u00e9rence : La blockchain au del\u00e0 du Bitcoin", + "http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html": "McKinsey continue de croire en la capacit\u00e9 \u00e9conomique des \u00ab\u00a0lions\u00a0\u00bb d\u2019Afrique", + "http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/": "Arch\u00e9ologie : les traces d\u2019une \u00e9pid\u00e9mie antique d\u00e9couvertes en Egypte Passeur de sciences", + "http://www.w3.org/TR/ldp/": "Linked Data Platform 1.0", + "http://www.squeak.org/": "Squeak", + "http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html": "Climat : le pape s\u2019attaque \u00e0 la \u00ab culture du d\u00e9chet \u00bb des pays riches", + "http://www.hlt.utdallas.edu/~vince/papers/acl14-keyphrase-poster.jpg": "Automatic Keyphrase Extraction (Poster): A Survey of the State of the Art (2014)", + "http://www.semanlink.net/doc/2020/05/http_javascript_post_request_": "http - JavaScript post request like a form submit - Stack Overflow", + "http://jena.hpl.hp.com/juc2006/schedule.html": "2006 Jena User Conference - schedule", + "http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why": "The gene editor CRISPR won\u2019t fully fix sick people anytime soon. Here\u2019s why Science AAAS", + "http://www.w3.org/TR/rdfa-primer/": "RDFa 1.1 Primer", + "http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043": "What is SDMX-RDF?", + "http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1": "python - Classifying Documents into Categories - Stack Overflow", + "http://jena.sourceforge.net/ARQ/property_paths.html": "ARQ - Property Paths", + "http://backbonetutorials.com/": "Backbone.js Tutorials", + "http://www.inter-locale.com/codeset1.jsp": "Unicode JSP Primer", + "http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/": "The SIREn 1.0 Open Source Release and its Use in the Semantic Web Community - Sindice Blog", + "http://www.youtube.com/watch?v=Itcir2iiH6E": "Mbilia Bel - Yamba Ngai", + "http://www.scholarpedia.org/article/Text_categorization": "Text categorization - Scholarpedia", + "http://arxiv.org/abs/1511.08154": "[1511.08154] Notes on Cardinal's Matrices", + "http://www.apesmanifesto.org": "Manifesto for Apes and nature [mAn]", + "http://semanticday.mondeca.makolab.fr/": "Semantic Web\u2019s significance confirmed II MakoLab Semantic Day", + "http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars": "Lesson 3 - Self Driving Cars", + "https://aclanthology.coli.uni-saarland.de/events/emnlp-2018": "EMNLP (2018) - ACL Anthology - Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", + "http://www.joseki.org/": "Joseki - A SPARQL Server for Jena", + "http://tools.wmflabs.org/sqid/#/": "Wikidata Class Browser", + "https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis": "Democracy is in crisis, but blaming fake news is not the answer Evgeny Morozov Opinion The Guardian", + "https://medium.com/@writingben/when-roman-barbarians-met-the-asian-enlightenment-2be064d7af9b#.b5gor5ymx": "When Roman \u201cBarbarians\u201d Met the Asian Enlightenment \u2013 Medium", + "http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection": "Viral \u2018fossils\u2019 in our DNA may help us fight infection Science AAAS", + "http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf": "Challenges of the email domain for text classification", + "https://www.sitepoint.com/getting-started-browserify/": "Getting Started with Browserify", + "https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html": "Can Global Semantic Context Improve Neural Language Models? - Apple (2018)", + "https://medium.com/@keremturgutlu/understanding-building-blocks-of-ulmfit-818d3775325b": "Understanding building blocks of ULMFIT \u2013 Kerem Turgutlu \u2013 Medium", + "https://www.youtube.com/watch?v=FNQSM4ipZog": "Exponential Organizations - Salim Ismail, at USI - YouTube", + "http://www.w3.org/MarkUp/2009/rdfa-for-html-authors": "RDFa for HTML Authors", + "http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1": "How to do text classification with label probabilities? - Stack Overflow", + "http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1": "NSA files decoded: Edward Snowden's surveillance revelations explained World news theguardian.com", + "http://www.wired.com/2014/06/microbe-symbiosis-evolution/": "Microbes May Drive Evolution of New Animal Species Science WIRED", + "http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html": "Generic Property-Value Proposal for Schema.org from martin.hepp@ebusiness-unibw.org on 2014-04-29 (public-vocabs@w3.org from April 2014)", + "http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin": "Unsupervised Similarity Learning from Textual Data (2012)", + "http://www.practicalembeddedjava.com/tools/eclipse_tips.html": "Eclipse Tips", + "https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045": "Why you (yes, you) should blog \u2013 Rachel Thomas \u2013 Medium", + "https://trackography.org/": "Trackography - Who tracks you online?", + "http://www.herodote.net": "", + "http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly": "JQuery's getJSON() not setting Accept header correctly?", + "http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba": "Fran\u00e7ois-Jean Lefebvre de La Barre \u2014 Wikip\u00e9dia", + "http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise": "Guiding Principles for the Open Semantic Enterprise", + "http://pt.wikipedia.org/wiki/Cidade_de_Deus_(filme)": "Cidade de Deus (filme) \u2013 Wikip\u00e9dia, a enciclop\u00e9dia livre", + "http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison": "The Bridges of Madison County", + "http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html": "Important Change to HTTP semantics re. hashless URIs from Kingsley Idehen on 2013-03-24 (public-lod@w3.org from March 2013)", + "http://www.w3.org/History/1989/proposal.html": "The original proposal of the WWW, HTMLized", + "http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/": "Google Introduces New Self Driving Car at the Code Conference Re/code", + "http://openspring.net/blog/2009/10/22/produce-and-consume-linked-data-with-drupal": "Produce and Consume Linked Data with Drupal!", + "http://hinchcliffe.org/archive/2008/04/08/16627.aspx": "12 Things You Should Know About REST and WOA", + "http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/": "MOUCHARD \u2013 Le myst\u00e9rieux texto re\u00e7u par les manifestants de Kiev Big Browser", + "http://www.cs.washington.edu/homes/rao/indus.html": "Probabilistic Analysis of the 4000-year-old Indus Script", + "http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301": "Stanford U. and edX Will Jointly Build Open-Source Software to Deliver MOOCs - Wired Campus - The Chronicle of Higher Education", + "http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html": "Federated SPARQL queries - bobdc.blog", + "http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D": "R\u00e9sum\u00e9 Automatique Multi-Document Dynamique : \u00c9tat de l\u2019Art (2015)", + "http://hbase.apache.org/": "HBase - Apache HBase\u0099 Home", + "http://hueniverse.com/oauth/guide/": "The OAuth 1.0 Guide", + "http://www.nytimes.com/2006/08/24/technology/24basics.html?ei=5088&en=58c571f0b4ae0ed8&ex=1314072000&partner=rssnyt&emc=rss&pagewanted=print": "About Batteries: Tips on Longevity and Reviving the Dead - New York Times", + "http://www.linuxjournal.com/article/9301?page=0,0": "Ajax Timelines and the Semantic Web Linux Journal", + "http://callimachusproject.org/": "Callimachus - Data-driven applications made easy", + "http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st": "stanfordnlp/stanza: Official Stanford NLP Python Library for Many Human Languages", + "https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet": "Git cheat sheet", + "http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/": "Semantic Web and uncertainty \u00ab Ivan\u2019s private site", + "https://www.youtube.com/watch?v=A6wq16Ow5Ec": "AngularJS - Custom Components - Part 1 - YouTube", + "http://mail-archives.apache.org/mod_mbox/jena-users/201211.mbox/%3c50B89950.6010705@it.ox.ac.uk%3e": "Re: Dealing with expensive queries (jena users mailing list)", + "http://dev2dev.bea.com/pub/a/2006/01/ajax-back-button.html?page=1": "Developing Ajax Applications That Preserve Standard Browser Functionality", + "http://tiffanybbrown.com/2011/03/23/html5-does-not-allow-self-closing-tags/": "HTML5 does NOT allow \u201cself-closing\u201d tags \u2022 Tiffany B. Brown", + "http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html": "Le livre de sang d'Andre\u00ef Platonov", + "https://twitter.com/enahpets/status/1052537794764128257": "#franceisai, petit r\u00e9capitulatif des choses vues et entendues ce matin", + "http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les": "Le calendrier mythique chez les Songhay-Zarma (Niger)", + "http://www.theatlantic.com/features/archive/2014/09/why-i-hope-to-die-at-75/379329/": "Why I Hope to Die at 75 - The Atlantic", + "http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_": "\u00ab\u00a0Il existe d\u00e9j\u00e0 des pratiques agricoles alternatives \u00e0 l\u2019emploi du glyphosate et des n\u00e9onicotino\u00efdes\u00a0\u00bb", + "http://www.infomesh.net/2001/swintro": "The Semantic Web - An Introduction.", + "http://youtube.com/watch?v=Jk3o1hAgBlE": "YouTube - Koke\u00efna - Moussa Poussi", + "http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_": "Chip Huyen sur Twitter : \"This thread is a combination of 10 free online courses on machine learning that I find the most helpful\"", + "http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html": "Lettre au pr\u00e9sident de la R\u00e9publique, par Tahar Ben Jelloun", + "http://shanghailectures.org/lectures": "About the ShanghAI Lectures ShanghAI Lectures", + "https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject": "The Best Textbooks on Every Subject", + "http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf": "A Framework for Semi supervised Concept Extraction from MOOC content (2017)", + "https://www.youtube.com/watch?v=nMK9-E-LUnc": "\"Na am Francophonie\" Sogha Niger - YouTube", + "https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains": "\u00ab\u00a0Bogou\u00a0\u00bb, faire voyager l\u2019expertise au\u00a0c\u0153ur des\u00a0d\u00e9serts m\u00e9dicaux africains", + "http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf": "Serialising Java Objects to RDF with Jersey", + "http://www.journaldunet.com/solutions/cloud-computing/1146290-cloud-pourquoi-docker-peut-tout-changer/": "Cloud : pourquoi Docker peut tout changer - JDN", + "http://passeurdesciences.blog.lemonde.fr/2017/03/26/des-archeologues-chinois-decouvrent-un-tresor-mythique/": "Des arch\u00e9ologues chinois d\u00e9couvrent un tr\u00e9sor mythique Passeur de sciences", + "http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/": "Describing SPARQL Extension Functions \u00ab Lost Boy", + "http://digital.cityofchicago.org/index.php/how-a-table-becomes-a-dataset-openrefine/": "Digital Hub How a Table Becomes a Dataset, OpenRefine", + "http://freemind.sourceforge.net/wiki/index.php/Main_Page": "Main Page - FreeMind - free mind mapping software", + "http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_": "A Structural Probe for Finding Syntax in Word Representations", + "http://rdf4food.org/moin.cgi/DanBriSlides": "DanBriSlides - SPARQL-ing days", + "http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu": "Enterprise Knowledge Graph Solutions eccenca", + "http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ": "How to build deep neural network for custom NER with Keras", + "https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/": "The intertwined quest for understanding biological intelligence and creating artificial intelligence", + "http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s": "Curiosit\u00e9s animales - Une vie sans sexe : le dragon de Komodo et le puceron ARTE", + "http://www.openlinksw.com/weblog/oerling/?id=1550": "Short Recap of Virtuoso Basics", + "http://www.w3.org/People/maxf/textorizer/": "SVG Textorizer Tool", + "http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/": "From Wikipedia URI-s to DBpedia URI\u2026 \u00ab Ivan\u2019s blog", + "http://www.nytimes.com/2014/05/09/science/a-synthetic-biology-conference-lures-an-intriguing-audience.html?partner=rss&emc=rss": "A Synthetic Biology Conference Lures an Intriguing Audience - NYTimes.com", + "http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/": "Zitgist DataViewer at Frederick Giasson\u2019s Weblog", + "http://www.w3.org/2005/Incubator/urw3/XGR-urw3/": "Uncertainty Reasoning for the World Wide Web", + "http://www.w3.org/2008/09/msnws/papers/": "Papers submitted to W3C Workshop on the Future of Social Networking", + "http://www.prescod.net/rest/": "REST Resources", + "http://www.heppnetz.de/projects/skos2owl/": "SKOS2OWL: Online tool for deriving OWL ontologies from SKOS categorization schemas", + "https://github.com/keon/awesome-nlp/blob/master/README.md": "awesome NLP: A curated list of resources dedicated to Natural Language Processing", + "http://www.bakerbotts.com/ideas/publications/2019/february/what-is-unity-a-look-at-the-usptos-ai-development-efforts": "A Look at the USPTO\u2019s AI Development Efforts", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3232,50-647202,0.html": "", + "http://www.documentarymania.com/player.php?title=Steve%20Jobs%20the%20Lost%20Interview": "Steve Jobs the Lost Interview - Documentary Mania", + "https://nlp.stanford.edu/pubs/hancock2018babble.pdf": "Training Classifiers with Natural Language Explanations", + "http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk": "The Glove Compartment \u00bb Tomcat 5.5, mod_jk, and OS X\u2019s built-in Apache", + "http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi": "Adapters: A Compact and Extensible Transfer Learning Method for NLP", + "http://www.uncontactedtribes.org/": "Uncontacted Tribes", + "https://ontotext.com/": "Ontotext Semantic Technology Developer", + "https://thegradient.pub/structure-learning/": "How do we capture structure in relational data?", + "http://en.wikipedia.org/wiki/Big_Lebowski": "The Big Lebowski", + "http://blog.datagraph.org/2010/03/rdf-isomorphism": "The Curious Case of RDF Graph Isomorphism - The Datagraph Blog", + "https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/": "The Coming Software Apocalypse - The Atlantic", + "https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie": "Une riche cit\u00e9 d\u00e9couverte aux portes de la M\u00e9sopotamie CNRS Le journal", + "https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois": "Utiliser un dictionnaire chinois - \u4e1c\u6e38\u8bb0", + "http://www.roosevelt2012.fr/": "Roosevelt 2012 - un collectif et 15 r\u00e9formes pour changer la donne", + "http://semanticweb.com/gmail-meet-json-ld_b37211": "Gmail, Meet JSON-LD - semanticweb.com", + "https://blog.insightdatascience.com/reinforcement-learning-from-scratch-819b65f074d8": "Reinforcement Learning from scratch \u2013 Insight Data", + "http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html": "Le Prix Nobel Albert Fert plaide pour une recherche libre", + "https://github.com/jprante/elasticsearch-langdetect": "A plugin for language detection in Elasticsearch using Nakatani Shuyo's language detector", + "http://musique.rfi.fr/artiste/zouk/kassav": "Kassav ' - Biographie, discographie et fiche artiste \u2013 RFI Musique", + "http://data.semanticweb.org/person/michael-k-bergman/html": "Mike Bergman Semantic Web Dog Food", + "https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components": "GitHub - solid/react-components at v1.4.0", + "http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci": "Journal des africanistes Soci\u00e9t\u00e9 des africanistes", + "https://twitter.com/random_walker/status/1079759096272818178": "Arvind Narayanan sur Twitter : \"In 2018 the blockchain/decentralization story fell apart. For example, a study of 43 use cases found a 0% success rate\"", + "http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data": "Open Education: A Growing, High Impact Area for Linked Open Data", + "http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html": "D\u00e9forestation\u00a0au Br\u00e9sil\u00a0: l\u2019op\u00e9ration \u00ab\u00a0greenwashing\u00a0\u00bb de Michel Temer fait long feu", + "http://silentcircle.wordpress.com/2013/09/11/the-battle-for-your-digital-soul/?utm_content=buffer9771d&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer": "The Battle for Your Digital Soul Silent Circle Blog", + "http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_": "Un an d\u2019\u00e9pid\u00e9mie de Covid-19\u00a0: retrouver le sens du long terme", + "https://www.edge.org/response-detail/26693": "We May All Die Horribly", + "http://www.youtube.com/watch?v=6eGcsGPgUTw": "You Tube - DataPortability and Me (Get Your Data Out!)", + "http://skhole.fr/petite-poucette-la-douteuse-fable-de-michel-serres": "Petite Poucette : la douteuse fable de Michel Serres Revue Skhole.fr", + "http://www.w3.org/2007/powder/": "W3C POWDER Working Group", + "http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095": "The cost of monoculture", + "http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html": "D\u00e9nonc\u00e9 par Albanel, vir\u00e9 par TF1", + "http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na": "Neural Transfer Learning for Natural Language Processing - Seb Ruder's PhD Thesis", + "http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/": "Why Carry the Cost of Linked Data? \u2013 Tom Heath\u2019s Displacement Activities", + "http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py": "Sample pipeline for text feature extraction and evaluation \u2014 scikit-learn documentation", + "http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3": "Turning Education Upside Down - NYTimes.com", + "http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu": "[1802.01528] The Matrix Calculus You Need For Deep Learning", + "http://apassant.net/blog/2008/06/20/eswc2008-slides/": "ESWC2008 slides : Alexandre Passant", + "http://freedomboxfoundation.org/": "FreedomBox Foundation", + "http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html": "Syrie : \"Le Monde\" t\u00e9moin d'attaques toxiques", + "http://www.youtube.com/watch?v=G4UX6UNdLUM": "Patti Smith Indicts George W. Bush - Dream of life", + "http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/": "BM25 The Next Generation of Lucene Relevance", + "http://events.linkeddata.org/ldow2013/": "Linked Data on the Web (LDOW2013) - Workshop at WWW2013, Rio de Janeiro, Brazil", + "http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet": "Hauptsacht. bis 42.1876: Bulletin monumental ou Collection de m\u00e9moires et d... - Google Books", + "http://en.wikipedia.org/wiki/JSON": "JSON - Wikipedia, the free encyclopedia", + "http://www.wired.com/2014/05/the-world-of-computer-go": "The Mystery of Go, the Ancient Game That Computers Still Can't Win WIRED", + "http://www.desertec.org/": "DESERTEC Foundation", + "http://html5doctor.com/interview-with-ian-hickson-html-editor/": "Interview with Ian Hickson, HTML editor HTML5 Doctor", + "http://www.snee.com/bobdc.blog/2012/02/pull-rdf-metadata-out-of-jpegs.html": "Pull RDF metadata out of JPEGs, MP3s, and more - bobdc.blog", + "http://www.insu.cnrs.fr/node/6047": "Proxima b, une exoplan\u00e8te recouverte d\u2019un oc\u00e9an ?", + "http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments": "Eclipse hints, tips, and random musings \u00bb Blog Archive \u00bb Many classpaths to enlightenment", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.7265": "CiteSeerX \u2014 Fast backtrack-free product configuration using a precompiled solution space representation", + "http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html": "VOYAGES OF THE SEMANTIC ENTERPRISE: How to: convert a spreadsheet to SKOS", + "http://www.appleinsider.com/articles/08/06/16/apples_open_secret_sproutcore_is_cocoa_for_the_web.html": "AppleInsider Apple's open secret: SproutCore is Cocoa for the Web", + "http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1": "Sebastian Ruder sur Twitter : \"It's great to see the growing landscape of NLP transfer learning libraries\"", + "https://www.youtube.com/watch?v=2cEUKKJoAdU": "Andrew Moore on \"TOKeN: The Open Knowledge Network\" - YouTube", + "http://www.slideshare.net/candp/sem-tech-pelletdb": "PelletDb: Scalable Reasoning for Enterprise Semantics", + "http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil": "COVID-19 Lessons from Three Mile Island #2 \u2014 the NRC I, Cringely", + "http://www-sul.stanford.edu/depts/ssrg/africa/guide.html": "Africa South of the Sahara: Selected Internet Resources - Stanford University", + "http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html": "Download SPARQL results directly into a spreadsheet - bobdc.blog", + "https://blog.miguelgrinberg.com/post/using-celery-with-flask": "Using Celery With Flask - miguelgrinberg.com", + "https://tilloy.wordpress.com/teaching/physique-pour-tous/": "\u201cPhysique pour tous\u201d Antoine Tilloy's research log", + "http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "The Graph Database and the RDF Database Inside Analysis", + "https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other": "Capitalism\u2019s new crisis: after Carillion, can the private sector ever be trusted? Politics The Guardian", + "http://www.dfki.uni-kl.de/~sauermann/2006/01-pimo-report/pimOntologyLanguageReport.html": "PIMO-a PIM Ontology for the Semantic Desktop", + "https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic": "How do RBMs work? - Quora", + "http://etienne.chouard.free.fr/Europe/": "", + "https://distill.pub/2018/building-blocks/": "The Building Blocks of Interpretability", + "https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services": "Combining semantics and deep learning for intelligent information services", + "http://www.gazeoftime.com/": "Gaze of time Timelines and Chronologies curated by the community", + "https://aclanthology.coli.uni-saarland.de/volumes/proceedings-of-the-2018-emnlp-workshop-blackboxnlp-analyzing-and-interpreting-neural-networks-for-nlp": "PROCEEDINGS of the BlackboxNLP Workshop", + "https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory": "installation - How do I find the location of my Python site-packages directory? - Stack Overflow", + "https://www.quora.com/How-is-information-coded-in-neural-activity": "How is information coded in neural activity? (Quora)", + "http://rdfa.info/play/": "RDFa / Play: visualizer for RDFa by @manusporny", + "https://github.com/facebookresearch/fastText/issues/26": "How can we get the vector of a paragraph? \u00b7 Issue #26 \u00b7 facebookresearch/fastText", + "http://www.webreference.com/programming/javascript/index.html": "WebReference JavaScript Articles", + "http://timeline.verite.co/": "Timeline JS - Beautifully crafted timelines that are easy, and intuitive to use.", + "http://www2.cnrs.fr/presse/communique/4519.htm": "Un organisme unicellulaire capable d'apprendre - Communiqu\u00e9s et dossiers de presse - CNRS", + "http://blog.tcrouzet.com/la-quatrieme-theorie/la-quatrieme-theorie-liens/": "Twitt\u00e9rature", + "http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp": "How many data points is a prompt worth ?", + "http://news.bbc.co.uk/2/hi/technology/4766755.stm": "BBC NEWS Technology BBC starts to rock online world", + "https://www.reddit.com/r/MachineLearning/comments/4ssk6u/is_it_true_that_r_programming_is_dying/?st=iqmztsrx&sh=b08ddfe5": "Is it true that R programming is dying? : MachineLearning", + "http://restlet.tigris.org/issues/show_bug.cgi?id=463": "Support variant selection based on file extension", + "http://www.w3.org/TR/2015/CR-ldpatch-20150303/": "Linked Data Patch Format", + "http://cs231n.github.io/neural-networks-1/#feedforward": "CS231n Convolutional Neural Networks for Visual Recognition", + "http://www.hugin.com/": "Hugin", + "http://www.semanlink.net/doc/2019/09/blackrock": "BlackRock", + "http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf": "Un g\u00e8ne d\u2019origine r\u00e9trovirale essentiel pour la formation du placenta", + "https://twitter.com/feiliu_nlp/status/1058985012945735680": "Adapting the Neural Encoder-Decoder Framework from Single to Multi-Document Summarization", + "http://www.subbu.org/blog/2012/07/mvc-may-be-slowing-down-your-site": "Your MVC may be Slowing Down Your Site", + "http://www.xfront.com/REST-Web-Services.html": "Building Web Services the REST Way", + "http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter": "DHQ: Digital Humanities Quarterly: Modelling Medieval Hands: Practical OCR for Caroline Minuscule", + "http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html": "Alors que s\u2019\u00e9teignent les \u00e9l\u00e9phants d\u2019Afrique, les Chinois ont pris le contr\u00f4le des routes de l\u2019ivoire", + "http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf": "Helping online communities to semantically enrich folksonomies", + "https://theintercept.com/2018/07/08/ethiopia-garment-industry/": "Ethiopia Touts Good Conditions in Factories for Brands Like H&M and Calvin Klein, but Workers Scrape By On $1 a Day", + "http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks": "The Unreasonable Reputation of Neural Networks [ thinking machines ]", + "https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view": "Transfer learning with language models", + "http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha": "The Fishbowl: The String Memory Gotcha", + "http://www.rdfhdt.org/": "RDF HDT", + "http://sig.ma/search?q=francois-paul+servant": "fps on sig.ma - Semantic Information MAshup", + "http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u": "Extractive Text Summarization Using spaCy in Python", + "http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas": "raphaelsty/RetrieverReader: Fast API QA", + "http://blog.outer-court.com/archive/2005-05-22-n83.html": "Google Translator: The Universal Language", + "http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf": "HBase tutorial", + "http://blog.aylien.com/word-embeddings-and-their-challenges/": "Word Embeddings and Their Challenges - AYLIEN", + "http://www.readwriteweb.com/archives/4_tools_for_teaching_kids_to_code.php#more": "4 Tools for Teaching Kids to Code", + "http://www.tombouctoumanuscripts.org/": "Tombouctou Manuscripts Project", + "http://pisani.blog.lemonde.fr/pisani/": "Transnets, des gadgets aux r\u00e9seaux", + "http://semtech2010.semanticuniverse.com/": "SemTech 2010", + "http://www.honeynet.org/papers/phishing/": "Know your Enemy: Phishing", + "http://www.zdnet.com/article/back-to-the-future-does-graph-database-success-hang-on-query-language/": "Back to the future: Does graph database success hang on query language? ZDNet", + "http://www.ccfd.asso.fr/": "CCFD : Comit\u00e9 Catholique contre la Faim et pour le D\u00e9veloppement", + "http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/": "RDF Extension for Google Refine", + "http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html": "Les partisans des OGM marquent des points aux Etats-Unis", + "https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e": "A gentle introduction to Doc2Vec \u2013 ScaleAbout \u2013 Medium", + "http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html": "For the Love of Money - NYTimes.com", + "http://www.wiseclerk.com/group-news/countries/france-the-situation-of-social-lending-in-france/": "P2P-Banking.com \u00bb The situation of social lending in France", + "http://interface.fh-potsdam.de/incom/code/projekte/projekt_anzeigen.php?4,260,17,0,0,281": "incom projekt {Tag Clouds 5.0}", + "https://www.bbc.com/news/science-environment-47540792": "Ancient migration transformed Spain's DNA - BBC News", + "https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf": "Topic Modeling for Short Texts with Auxiliary Word Embeddings", + "http://dl.acm.org/citation.cfm?id=2505526": "Effective measures for inter-document similarity", + "http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng": "How we created neuromorphic engineering Nature Electronics", + "http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes": "What is the (best) way to manage permissions for docker shared volumes - Stack Overflow", + "http://www.nytimes.com/2014/11/18/science/earth/hydrogen-cars-join-electric-models-in-showrooms.html?partner=rss&emc=rss": "Hydrogen Cars Join Electric Models in Showrooms - NYTimes.com", + "http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all": "A Benz With a Virtual Chauffeur - NYTimes.com", + "http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp": "Dans les mines d\u2019or de Komabangou, au Niger", + "http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno": "scikit-learn Pipelines meet Knowledge Graphs - The Python kgextension Package ESWC 2021", + "http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/": "Comment le mobile change l\u2019Afrique Winch 5", + "http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html": "Vers des panneaux solaires inspir\u00e9s des plantes", + "https://medium.freecodecamp.org/how-to-turn-your-website-into-a-mobile-app-with-7-lines-of-json-631c9c9895f5": "How to Turn Your Website into a Mobile App with 7 Lines of JSON", + "http://www.alvit.de/web-dev/index.html": "Essential bookmarks for web-designers and webdevelopers CSS, Color Tools, Royalty free photos, Usability etc.", + "http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html": "[ANN] Public release of Glimmer RDF search engine and demo from Peter Mika on 2013-06-20 (public-vocabs@w3.org from June 2013)", + "http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html": "TechnicaLee Speaking: Updates to sparql.js", + "http://news.bbc.co.uk/2/hi/africa/7981964.stm": "BBC NEWS Africa Rwanda's ghosts refuse to be buried", + "http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/": "W3C RDB2RDF Incubator Group Report", + "http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_": "Efficient open-domain question-answering on Vespa.ai Vespa Blog", + "http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de": "Nomade des mers, les escales de l'innovation", + "https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now": "The Father Of Mobile Computing Is Not Impressed", + "http://openjena.org/": "Jena Semantic Web Framework", + "http://www.bbc.co.uk/news/science-environment-15181187": "2011: Nobel win for the discovery of the structure of quasicrystals", + "http://pingthesemanticweb.com/namespaces.php": "Ping the Semantic Web.com", + "http://www.math.union.edu/~dpvc/jsMath/welcome.html": "jsMath Home Page", + "http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/": "Tutorial: Easy Version Control with Git Nettuts+", + "http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_": "Finding similar documents with transformers \u00b7 Codegram", + "http://code.google.com/p/linked-data-api/wiki/AssumptionsAndGoals": "linked-data-api: assumptions and goals", + "http://www.markus-lanthaler.com/research/third-generation-web-apis-bridging-the-gap-between-rest-and-linked-data.pdf": "Markus Lanthaler - Doctoral Dissertation", + "http://edition.cnn.com/2012/01/13/tech/innovation/ces-future-driving/index.html": "'Augmented-reality' windshields and the future of driving - CNN.com", + "http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb": "Java syntax with MongoDB - Stack Overflow", + "http://www.w3.org/2005/rules/wiki/RIF_FAQ": "RIF FAQ", + "http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/": "Giradora, la machine \u00e0 laver \u00e0 p\u00e9dale qui change la vie M\u00eame pas mal", + "http://www.itworld.com/software/382730/ibm-offer-watson-supercomputer-cloud-development-platform": "IBM to offer Watson supercomputer as cloud development platform ITworld", + "http://www.cigref.fr/etude-cigref-enjeux-de-mise-en-oeuvre-de-l-intelligence-artificielle-pour-l-entreprise": "Etude CIGREF : enjeux de mise en \u0153uvre de l\u2019Intelligence Artificielle pour l\u2019entreprise\u2026 \u2013 CIGREF", + "http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/": "La Blockchain, au-del\u00e0 du Bitcoin \u2013 Framablog", + "https://huyenchip.com/2018/10/04/sotawhat.html": "SOTAWHAT - A script to keep track of state-of-the-art AI research", + "http://arstechnica.com/reviews/os/macosx-10.4.ars/7": "Mac OS X 10.4 Tiger : Page 7 Mac OS X 10.4 Tiger", + "http://www.wollemipine.com": "WollemiPine.com - the official home of the Wollemi Pine.", + "http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1": "Renault Exploitation of Linked Data LinkedIn, discussion started by Kingsley Idehen", + "http://www.semanlink.net/doc/2021/01/why_generative_modeling": "Why generative modeling", + "http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/": "Linked Enterprise Data in a nutshell \u00ab Web of Data", + "http://www.ldh-toulon.net/spip.php?article399": "[LDH-Toulon] la colonne infernale de Voulet-Chanoine", + "http://www.freenet.org.nz/misc/google-privacy.html": "Protect Your Privacy from Google", + "http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html": "Live Clipboard Example", + "http://www.semanlink.net/doc/2021/01/plantu_2020": "Plantu 2020", + "http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con": "Trip Report: AKBC 2019 (1st Conference on Automated Knowledge Base Construction)", + "http://www.rentacoder.com": "Rent A Coder: How Software Gets Done", + "http://bighugelabs.com/flickr/": "fd's Flickr Toys: Fun toys for your digital photographs", + "http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html": "Dans le parc congolais des Virunga, l\u2019arm\u00e9e \u00e0 la solde du p\u00e9trolier SOCO", + "http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/": "Invasion of the golden mussel: wielding genes to protect the Amazon TED Blog", + "https://www.reador.net/": "Reador - aggregateur de news s\u00e9mantique", + "http://denisnddo.free.fr/html/zarma.htm#ancre16": "Zarma: notions de base Zarma : notions de base", + "http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/": "Thinking Clearly\u00bb Understanding SWRL (Part 2): DL Safety", + "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803": "Information Retrieval as Statistical Translation (Adam Berger , John Lafferty, 1999)", + "http://en.wikipedia.org/wiki/Perdita_Durango": "Perdita Durango - Wikipedia, the free encyclopedia", + "https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost": "How to boost a Keras based neural network using AdaBoost? - Stack Overflow", + "https://transacl.org/ojs/index.php/tacl/article/download/742/204": "A Latent Variable Model Approach to PMI-basedWord Embeddings", + "http://ebiquity.umbc.edu/v2.1/blogger/": "EBB: ebiquity blog at UMBC", + "http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/": "Talk: Memory-based Bayesian Reasoning and Deep Learning \u2190 The Spectator", + "https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html": "HTML Data Guide", + "http://community.kde.org/Baloo#Why_change_Nepomuk.3F": "Baloo - KDE Community Wiki", + "http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html": "Java Tip 98: Reflect on the Visitor design pattern JavaWorld", + "http://home.ccil.org/~cowan/XML/tagsoup/": "TagSoup", + "https://support.apple.com/en-us/HT201300": "How to identify MacBook Pro models - Apple Support", + "http://infolab.stanford.edu/gps/": "GPS: A Graph Processing System", + "http://www.jenitennison.com/blog/node/154": "Hash URIs Jeni's Musings", + "http://www.geocities.com/anpipniger/CompC.htm": "CIMA.html Irrigation Manuelle Am\u00e9lior\u00e9e", + "http://internetactu.blog.lemonde.fr/2013/05/31/de-la-fabrique-des-automates-politiques/": "De la fabrique des automates politiques InternetActu", + "http://alistapart.com/column/windows-on-the-web": "Windows on the Web \u2219 An A List Apart Column", + "http://dig.csail.mit.edu/breadcrumbs/node/71": "Give yourself a URI", + "https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html": "Re: Querying JSON-LD on client side from David I. Lehn on 2015-07-17 (public-linked-json@w3.org from July 2015)", + "http://www.newscientist.com/article/mg22329832.700-googles-factchecking-bots-build-vast-knowledge-bank.html?full=true#.U_3IxEtGuwx": "Google's fact-checking bots build vast knowledge bank - tech - 20 August 2014 - New Scientist", + "http://en.wikipedia.org/wiki/Koumbi_Saleh": "Koumbi Saleh - Wikipedia, the free encyclopedia", + "http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf": "Route Prediction from Trip Observations", + "http://idlewords.com/talks/website_obesity.htm": "The Website Obesity Crisis", + "http://bigbrowser.blog.lemonde.fr/2016/02/26/un-poeme-anglais-imprononcable/": "Un po\u00e8me anglais impronon\u00e7able Big Browser", + "http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_": "Julian Eisenschlos sur Twitter : \"Ever wondered how to pre-train models that understand tables and do QA?\"", + "http://time.com/4193747/el-salvador-zika-virus-pregnancy/": "El Salvador Asks People Not to Have Children for Two Years Due to Zika Virus TIME", + "http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su": "[1709.07604] A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications", + "http://jobs.semanticweb.com/jobboard.php": "Semantic Web Jobs Semantic Web Job Board", + "http://jex.im/regulex/#!embed=false&flags=&re=%5E(a%7Cb)*%3F%24": "Regulex\uff1aJavaScript Regular Expression Visualizer.", + "http://www-128.ibm.com/developerworks/opensource/library/os-php-read/": "Recommended PHP reading list", + "http://www.ilrt.bris.ac.uk/discovery/rdf/resources/": "", + "http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html": "Ccomment penser le d\u00e9sir sacrificiel au nom de l\u2019islam", + "http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/": "Use Case: How Ontologies and Rules Help to Advance Automobile Development", + "http://www.mkbergman.com/?p=415": "Cytoscape: Hands-down Winner for Large-scale Graph Visualization \u00bb AI3:::Adaptive Information", + "http://www.semanlink.net/doc/2021/08/ige_le_crochemelier": "Ig\u00e9 \u2013 Le Crochem\u00e9lier", + "http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2": "In a Big Network of Computers, Evidence of Machine Learning - NYTimes.com", + "http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic": "Fungi Discovered In The Amazon Will Eat Your Plastic Co.Exist: World changing ideas and innovation", + "http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest/examples": "XMLHttpRequest & Ajax Working Examples - Links and Resources, Fiftyfoureleven.com", + "https://twitter.com/trbouma/status/1050188438786502656": "Tim Bouma sur Twitter : \"I take the bus home. I stand behind two people. I notice the weird-looking shoes tied to their backpacks. After a few moments, I realize they are rock-climbers and those are grippy shoes...\"", + "http://www.lehall.com/galerie/africains/": "Les Africains de la Chanson Francophone", + "http://www.lefigaro.fr/sciences/2018/04/09/01008-20180409ARTFIG00009-des-civilisations-entieres-peuplaient-l-amazonie-au-moyen-ge.php": "Des civilisations oubli\u00e9es peuplaient l'Amazonie au Moyen-\u00c2ge", + "http://raimond.me.uk/slides/isemantics-2013/#/": "Current and future uses of Semantic Web technologies at the BBC", + "http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html": "Comment la p\u00eache chinoise pille les oc\u00e9ans de la plan\u00e8te", + "http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_": "I Oversaw the US Nuclear Power Industry. Now I Think It Should Be Banned. Common Dreams Views", + "http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou": "Edgar Morin\u00a0: \u00ab\u00a0Cette crise nous pousse \u00e0 nous interroger sur notre mode de vie, sur nos vrais besoins masqu\u00e9s dans les ali\u00e9nations du quotidien\u00a0\u00bb", + "http://smethur.st/posts/176135860": "Designing a URL structure for BBC programmes Smethurst", + "http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de": "Coronavirus : la Seine-Saint-Denis confront\u00e9e \u00e0 une inqui\u00e9tante surmortalit\u00e9", + "http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html": "HOW-TO: Debug JavaScript in Internet Explorer", + "http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html": "Au Danemark, la mutation d'Aalborg en ville \u00e9cologique", + "https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27": "How to use pre-trained word vectors from Facebook\u2019s fastText", + "https://developers.facebook.com/docs/opengraph/action-guides": "Open Graph Actions", + "http://spectrum.ieee.org/energy/nuclear/24-hours-at-fukushima/0": "24 Hours at Fukushima - IEEE Spectrum", + "https://academic.microsoft.com/": "Microsoft Academic", + "http://www.cigref.fr/wp/wp-content/uploads/2017/10/CIGREF-Cercle-IA-2017-Mise-en-oeuvre-operationnelle-IA-en-Entreprises.pdf": "Les enjeux de mise en \u0153uvre op\u00e9rationnelle de l\u2019intelligence artificielle dans les grandes entreprises", + "http://www.ldodds.com/blog/archives/000255.html": "Lost Boy: SPARQLing", + "http://www.alwayssunny.com/blog/?cat=16": "Bits n Bobs \u00bb tomcat", + "http://mashable.com/2009/04/05/europe-social-media/": "European Social Media: 19 Web Startups to Watch", + "http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html": "Re: More on distinguishing information resources from other resources Roy T. Fielding", + "http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html": "NASA - Close-Up of Iapetus", + "http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte": "(((\u0644()(\u0644() 'yoav)))) sur Twitter : what do you think should be an interesting and important achievement of 2020 for NLP?", + "https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container": "python - Running Flask app in a Docker container - Stack Overflow", + "https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en": "Reflections on a Year spent developing with RDF and JSON (Software Development on the Cloud Exploration)", + "http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d": "How Dense Passage Retrievers (DPR) Work Towards Data Science", + "http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/": "D\u00e9couverte d\u2019un nouveau monstre du cosmos Passeur de sciences", + "http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e": "\u00ab Pourquoi les autorit\u00e9s sont-elles si prudentes pour le Covid-19 et pas pour le r\u00e9chauffement climatique\u00a0?\u00a0\u00bb", + "http://qudt.org/": "QUDT - Quantities, Units, Dimensions and Data Types in OWL and XML", + "http://www.nytimes.com/2006/05/28/world/asia/28everest.html": "'Dead' Climber's Survival Impugns Mount Everest Ethics - New York Times", + "http://www.w3.org/2000/10/swap/doc/": "Semantic Web Tutorial Using N3", + "http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu": "Denny Britz sur Twitter : \"I built a little frontend for my AI/ML papergraph tool...\"", + "http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf": "An introduction to semantic web and linked data", + "http://www.dbgroup.unimo.it/swae08/index.html": "SWAE 08", + "http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140": "France Inter > Sur les \u00e9paules de Darwin > A la recherche des myst\u00e8res de l'h\u00e9r\u00e9dit\u00e9 (2)", + "http://dannyayers.com/2006/05/01/personal-gnowledge": "Personal Gnowledge", + "http://ebiquity.umbc.edu/blogger/2006/09/15/nissan-to-test-intelligent-transportation-system/": "Nissan to test intelligent transportation system", + "http://www.gbif.org/": "GBIF portal: Home", + "https://twitter.com/karpathy/status/1013244313327681536": "Andrej Karpathy sur Twitter : most common neural net mistakes", + "http://demo.openlinksw.com/DAV/JS/rdfbrowser/index.html": "OpenLink RDF Browser", + "https://www.fastcompany.com/40432885/its-time-to-break-up-amazon": "It\u2019s Time To Break Up Amazon", + "http://www.google.com/googlenotebook/overview.html": "Google Notebook", + "http://www.eff.org/deeplinks/archives/004910.php": "EFF: Microsoft's Zune Won't Play Protected Windows Media", + "http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x": "eclipse - Installing Subversion JavaHL library in Mac OS X - Stack Overflow", + "http://www.w3.org/TR/skos-ucr/": "SKOS Use Cases and Requirements", + "http://asynchronous.org/blog/archives/2006/01/25/logging_in_json.html": "AsynchronousBlog: Logging in JSON", + "http://www.w3.org/wiki/N3inHTML": "N3inHTML - W3C Wiki", + "http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd": "NLP: Contextualized word embeddings from BERT \u2013 Towards Data Science", + "http://www.xfront.com/sld001.htm": "REST (slides)", + "https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#link_time=1512760037": "\u00c9nergie : les promesses de l'hydrog\u00e8ne CNRS Le journal", + "http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v": "Using Axios to Consume APIs \u2014 Vue.js", + "http://arstechnica.com/science/news/2012/04/decision-to-entangle-effects-results-of-measurements-taken-beforehand.ars": "Quantum decision affects results of measurements taken earlier in time", + "http://esw.w3.org/WebID": "WebID - ESW Wiki", + "http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf": "Semantic Web Assisted Learning Experience Management \u2013 Architecture and Strategy for Collaborative Learning Experience Sharing", + "http://maestric.com/doc/mac/apache_php_mysql_snow_leopard": "Install Apache/PHP/MySQL on Snow Leopard", + "http://recollection.zepheira.com/": "Recollection", + "http://spectrum.ieee.org/computing/software/the-future-of-music/0": "The Future of Music - Tearing Down the Wall of Noise - IEEE Spectrum", + "http://blog.xebia.fr/2014/03/17/post-vs-put-la-confusion/": "POST vs. PUT : la confusion Blog Xebia - Cabinet de conseil IT", + "https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html": "Banksy peint les murs de Paris pour illustrer la crise des migrants", + "http://www.bioshare.net/News/NEWS1178699092": "Bioshare: A home page for every species", + "http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.164.416&rep=rep1&type=pdf": "Predictive analytics and data mining", + "http://www.bbc.co.uk/news/science-environment-17769529": "Evolution seen in 'synthetic DNA'", + "http://www.offconvex.org/2015/12/12/word-embeddings-1/": "Semantic Word Embeddings \u2013 Off the convex path", + "http://www.jacuba.com": "Welcome to Jacuba - Free Online Spellchecker", + "http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html": "Dinis Cruz Blog: Using AngularJS in Eclipse, Part 1) The Basics", + "http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/": "Approaches to Publishing Linked Data via Named Graphs \u00ab Lost Boy", + "http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/": "Embedding RapidMiner as a library in an application Dacamo76's Blog", + "http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html": "\u00ab\u00a0Panama Papers\u00a0\u00bb\u00a0: le roi de l\u2019autobus au Niger pratique la finance aux Seychelles", + "http://blog.octo.com/designer-une-api-rest/#case_body": "Designer une API REST OCTO talks !", + "https://arxiv.org/pdf/1507.07998.pdf": "[1507.07998] Document Embedding with Paragraph Vectors", + "http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html": "Stanford's robotic Audi to brave Pikes Peak without a driver", + "http://owled2007.iut-velizy.uvsq.fr/FinalProgramme.html": "OWLED 2007 Program", + "http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607": "Where Schema.org Is At: A Chat With Google\u2019s R.V. Guha - Semanticweb.com", + "http://www.nature.com/news/2011/110228/full/471013a.html": "Beautiful theory collides with smashing particle data : Nature News", + "http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/": "Un bilan (tr\u00e8s personnel) des ann\u00e9es Merkel Merkel, acte III", + "https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16": "Crochemelier", + "http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/": "New open-source Machine Learning Framework written in Java Datumbox", + "http://www.newsweek.com/id/124122": "Obama's audacious\u2014and risky\u2014address on race", + "http://semanticweb.com/build-knowledge-graph-video_b43358": "How to Build Your Own Knowledge Graph (Video \u2013 Part 1) - Semanticweb.com", + "http://blogs.zdnet.com/BTL/?p=3626": "\u00bb EFF on Zune: Risk of DRM/DMCA checkmate no longer a risk. It's reality ZDNet.com", + "http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html": "Africa must get real about Chinese ties - FT.com", + "https://shiro.apache.org/": "Apache Shiro Simple. Java. Security.", + "http://lemonde-educ.blog.lemonde.fr/2012/12/20/avec-gymglish-le-monde-sembarque-dans-une-aventure-pedago/": "Avec Gymglish, \u00ab\u00a0Le Monde\u00a0\u00bb s\u2019embarque dans une aventure \u00ab\u00a0p\u00e9dago\u00a0\u00bb Peut mieux faire", + "http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html": "Schema for vehicles from Richard Le Poidevin on 2012-05-22 (public-vocabs@w3.org from May 2012)", + "http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf": "Rapport du Comit\u00e9 consultatif national d'\u00e9thique sur l'amendement ADN", + "http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/": "SitePoint Blogs \u00bb WWW2008 Beijing: Day 1 - Linked Data on the Web (LDOW 2008) Workshop", + "http://www.bbc.co.uk/news/technology-21769537": "BBC News - Worms detected by converted iPhone microscope", + "https://www.quora.com/Can-I-use-word2vec-to-train-a-machine-learning-classifier": "Can I use word2vec to train a machine learning classifier? - Quora", + "http://mccormickml.com/2016/04/27/word2vec-resources/": "Word2Vec Resources \u00b7 Chris McCormick", + "http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr": "How do different communities create unique identifiers? \u2013 Lost Boy", + "http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su": "BigScience Research Workshop sur Twitter : \"Come help us improve language resource visibility over the next week...\"", + "http://piketty.pse.ens.fr/fr/capital21c": "Thomas Piketty - Le capital au 21e si\u00e8cle - Web site", + "http://www.wired.com/2015/08/coding-physics-course/": "You Should Be Coding in Your Physics Course WIRED", + "http://www.youtube.com/watch?v=n2aaIbeKQvo&feature=related": "Moussa toujours en retard", + "http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699": "Hackers find new way to cheat on Wall Street -- to everyone's peril", + "http://www.w3.org/2001/sw/sweo/public/UseCases/BBC/": "Case Study: Use of Semantic Web Technologies on the BBC Web Sites", + "https://plantnet.org/": "Pl@ntNet", + "http://documentcloud.github.com/backbone/docs/todos.html": "todos.js", + "http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised": "AI system 'should be recognised as inventor' - BBC News", + "http://www.cs.utexas.edu/~jsequeda/meetings.html": "Juan Sequeda - Meetings", + "http://www-lipn.univ-paris13.fr/~cerin/documents/mine05.pdf": "Efficient Data-Structures and Parallel Algorithms for Association Rules Discovery", + "http://www.salsasud.com/spip.php?article=734": "Orquesta Aragon - Salsasud", + "http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati": "Zero-shot classifier distillation at master \u00b7 huggingface/transformers", + "https://supernlp.github.io/2018/11/26/sentreps/": "On sentence representations, pt. 1: what can you fit into a single #$!%@*&% blog post? \u00b7 Supernatural Language Processing", + "https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news": "Gravity may be created by strange flashes in the quantum realm New Scientist", + "http://apex.sjtu.edu.cn/": "Apex Data & Knowledge Management Lab", + "http://www2008.org/program/program-overview.html": "WWW2008 Conference: program", + "https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene": "\u00c9nergie : les promesses de l'hydrog\u00e8ne CNRS Le journal", + "https://class.coursera.org/bigdata-002/class/index": "Web Intelligence and Big Data - Coursera", + "http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf": "H.-F. Yu, C.-H. Ho, Y.-C. Juan, and C.-J. Lin. LibShortText: A Library for Short-text Classification and Analysis", + "http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging": "CommonTag - An easy-to-use vocabulary for Semantic Tagging Alexandre Passant", + "http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html": "Sphere Engineering - Machine Learning Solutions - QuickAnswers.io: a new algorithm", + "http://www.easychair.org/OWLED2007/": "EasyChair. Login Page for OWLED 2007", + "https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/": "Going asynchronous: from Flask to Twisted Klein", + "http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385": "Semantic SEO for Automotive", + "http://webr3.org/apps/specs/jsn3/": "JSN3", + "http://thegeekyway.com/css-regex-selector-using-regular-expression-css/": "CSS Regex selector \u2013 Using Regular Expression in CSS \u2013 The Geeky Way", + "http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/": "600,000 queryable datasets\u2014and counting Ruben Verborgh", + "http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html": "Launching the Quantum Artificial Intelligence Lab", + "http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/": "Managing RDF Using Named Graphs \u00ab Lost Boy", + "http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm": "Business Intelligence selon Atos Origin", + "https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c": "Programmatic Patent Searches Using Google\u2019s BigQuery & Public Patent Data", + "http://weblog.burningbird.net/2006/04/13/allo-jena/": "Burningbird \u00bb \u2018allo Jena", + "http://ns.inria.fr/nicetag/2010/09/09/voc.html": "Nice Tag ontology", + "https://research.google.com/pubs/pub45482.html": "Contextual LSTM: A Step towards Hierarchical Language Modeling", + "http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only": "REST without RDF is only half as bad as SOAP - The Sun BabelFish Blog", + "http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html": "\u00ab L\u2019Etat islamique \u00e0 Palmyre, c\u2019est l\u2019Etat islamique dans la cour du Louvre \u00bb", + "http://leobard.twoday.net/stories/982264/": "Semantic World and Cyberspace: SPARQL has some use - Kendall Clark", + "http://education.okfn.org/12-open-education-days-of-christmas/": "12 Open Education Days of Christmas Open Education Working Group", + "http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu": "INITIATION AUX \u00c9TUDES HISTORIQUES", + "http://ecologie.blog.lemonde.fr/2013/04/03/peut-on-nettoyer-les-oceans-des-dechets-plastiques/": "Peut-on nettoyer les oc\u00e9ans des d\u00e9chets plastiques ? Eco(lo)", + "https://www.msn.com/en-xl/europe/top-stories/uk-could-cancel-brexit-and-stay-in-eu-on-same-terms-says-french-government/ar-BBL6cyE": "UK could cancel Brexit and stay in EU on same terms, says French government", + "https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final": "World Cup 2018: Kylian Mbapp\u00e9 and France Troll Their Way to the Final The New Yorker", + "http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses": "Which browsers have problems caching XMLHTTPRequest responses? - Stack Overflow", + "http://www.jair.org/media/2934/live-2934-4846-jair.pdf": "From Frequency to Meaning: Vector Space Models of Semantics (2010)", + "http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence": "Are the robots about to rise? Google's new director of engineering thinks so\u2026 Technology The Observer", + "http://www.w3.org/DesignIssues/Fractal.html": "Fractal Web - Commentary on Web Architecture", + "http://del.icio.us": "", + "http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark": "A Mysterious Patch Of Light Shows Up In The North Dakota Dark : Krulwich Wonders... : NPR", + "http://www.oezratty.net/wordpress/2016/avancees-intelligence-artificielle-2/": "Les avanc\u00e9es de l\u2019intelligence artificielle", + "http://ontorule-project.eu/parrot/parrot": "Parrot, a RIF and OWL documentation service", + "http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o": "NMT Training through the Lens of SMT", + "http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C": "Understanding Graph Embeddings by Dan McCreary Nov, 2020 Medium", + "http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html": "Ancient seagrass: 'Oldest living thing on earth' discovered in Mediterranean Sea - Telegraph", + "http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt": "World Bank - PROJECT APPRAISAL DOCUMENT ON A PROPOSED CREDIT TO THE REP OF NIGER FOR THE PRIVATE IRRIGATION PROMOTION PROJECT (February 21, 2002) World Bank - Private irrigation promotion project in Niger", + "http://dspace.mit.edu/handle/1721.1/96942": "DSpace@MIT: Object detectors emerge in Deep Scene CNNs", + "https://www.quora.com/Is-Swagger-any-good": "Is Swagger any good? - Quora", + "http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html": "Chemical robots - DEFENSE SCIENCES RESEARCH AND TECHNOLOGY", + "http://www.youtube.com/watch?v=E3tgI9AjrCg": "\u00c9teins la lumi\u00e8re", + "http://pathway.screenager.be/index.html": "Pathway", + "http://www.w3.org/2005/Talks/1214-Trento-IH/": "Tutorial on Semantic Web Technologies (1)", + "https://vimeo.com/141675862": "10x Not 10% : Product management by orders of magnitude by Ken Norton at Mind the Product 2015 sur Vimeo", + "http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/": "How To Learn Any Machine Learning Tool - Machine Learning Mastery", + "http://www.guardian.co.uk/world/2013/jun/17/edward-snowden-nsa-files-whistleblower": "Edward Snowden Q&A: Dick Cheney traitor charge is 'the highest honor' World news guardian.co.uk", + "http://arxiv.org/pdf/cs.DS/0310019": "A hierarchical Algorithm to Solve the Shortest Path Problem in Valued Graphs", + "http://www.ldodds.com/blog/archives/000251.html": "Lost Boy: Parameterised Queries with SPARQL and ARQ", + "http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions": "Politicians talk, leaders act", + "http://wiki.leibnizcenter.org/index.php/OWLED_2007": "OWLED 2007 - LeibnizWiki", + "http://twitter.com/cern/": "CERN (CERN) on Twitter", + "https://sizovs.net/2018/12/17/stop-learning-frameworks/": "Stop Learning Frameworks \u2013 Lifehacks for Developers by Eduards Sizovs", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4394080.stm": "BBC NEWS Science/Nature Smart directions for green ideas", + "http://ebiquity.umbc.edu/blogger/2009/07/02/nosql-distributed-key-value-data-stores/": "NOSQL: distributed key-value data stores", + "http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo": "Sentence Embeddings and Transformers Pinecone", + "http://www.bbc.com/news/science-environment-43116836": "Origins of land plants pushed back in time", + "https://addons.mozilla.org/en-US/firefox/addon/8062": "OpenLink Data Explorer :: Firefox Add-ons", + "https://www.facebook.com/notes/blake-ross/aphantasia-how-it-feels-to-be-blind-in-your-mind/10156834777480504": "Aphantasia: How It Feels To Be Blind In Your Mind", + "http://www.bing.com/widget/knowledge": "Bing - Knowledge Widget (Beta)", + "https://www.youtube.com/watch?v=ZmNOAtZIgIk": "Bay Area Vision Meeting: Unsupervised Feature Learning and Deep Learning - YouTube", + "https://twitter.com/i/web/status/1010499867330871296": "\"Bansky Paris Invasion !\"", + "http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp": "Banksy's new artwork criticises use of teargas in Calais refugee camp Art and design The Guardian", + "http://dpreview.com/": "Digital Camera Reviews and News: Digital Photography Review: Forums, Glossary, FAQ", + "http://sebastianruder.com/optimizing-gradient-descent/": "An overview of gradient descent optimization algorithms", + "http://leobard.twoday.net/stories/191619/": "semantic weltbild 2.0: why I love Patrick Sticklers URIQA approach (2004)", + "https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/": "Named Entity Recognition: Milestone Papers, Models and Technologies (2017)", + "https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News": "Google's Machine Learning Crash Course \u00a0\u00a0 Google Developers", + "https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html": "Scientists Created Bacteria With a Synthetic Genome. Is This Artificial Life? - The New York Times", + "http://robohub.org/engineers-design-artificial-synapse-for-brain-on-a-chip-hardware/": "Engineers design artificial synapse for \u201cbrain-on-a-chip\u201d hardware Robohub", + "http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_": "Sebastian Ruder sur Twitter : \"1/ Our paper Episodic Memory in Lifelong Language Learning...\"", + "http://openstructs.org/": "OpenStructs Open source data structs and semantic frameworks", + "https://lejournal.cnrs.fr/articles/en-ethiopie-lhistoire-de-lalibela-se-revele-peu-a-peu": "En \u00c9thiopie, l\u2019histoire de Lalibela se r\u00e9v\u00e8le peu \u00e0 peu CNRS Le journal", + "http://backstage.bbc.co.uk": "BBC Backstage :: Front Page :: BBC Backstage", + "http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse": "m2eclipse : D\u00e9clarez vos projets Maven dans Eclipse - Netapsys Blog", + "http://via-sahel-toulouse.over-blog.com/article-967205-6.html": "Reforestation en Pays dogon", + "http://readwrite.com/2013/11/27/github-pages-explained#awesm=~oowPZQBq7kbIj5": "How To Use GitHub Pages To Make Web Sites While Learning Code \u2013 ReadWrite", + "https://www.stardog.com/blog/extending-nlp/": "Extending NLP - Stardog", + "http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo": "\u00ab\u00a0Pandora Papers\u00a0\u00bb\u00a0: plong\u00e9e mondiale dans les secrets de la finance offshore", + "http://www.apple.com/customer-letter/": "A Message to Our Customers - Apple", + "https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Imagine Living in a Parallel World CNRS News", + "http://www.protopage.com": "Protopage - AJAX personal bookmarks springboard", + "https://github.com/epfml/sent2vec": "epfml/sent2vec: General purpose unsupervised sentence representations", + "http://www.itworld.com/security/177163/30-ridiculously-dumb-tech-warning-labels": "30 ridiculously dumb tech warning labels ITworld", + "http://itunes.unice.fr/": "UNS sur iTunes U", + "http://code.google.com/p/linked-data-api/wiki/JSONFormats": "Survey - Existing JSON RDF formats", + "http://json-ld.org/": "JSON-LD - Expressing Linked Data in JSON", + "http://www.wdl.org/en/": "World Digital Library Home", + "http://sig.ma/": "sig.ma - Semantic Information MAshup", + "https://blog.openai.com/language-unsupervised/": "Improving Language Understanding with Unsupervised Learning", + "http://www.bladi.net/forum//archive/index.php?t-19078.html": "chansons de oum khaltoum [Archives] - Bladi.net", + "http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html": "Supervirus H5N1 : les secrets de fabrication du mutant d\u00e9voil\u00e9s", + "http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features": "Overview of JAX-RS 1.0 Features - Jersey: RESTful Web services made easy - wikis.sun.com", + "http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev": "Peter Bloem sur Twitter : \"Clever idea. When you use augmentation, why throw away the information of which instances are augmentations of each other?\" / Twitter", + "http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm": "BBC NEWS - Salvage prospect for 'junk' DNA", + "http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/": "Pourquoi la NSA espionne aussi votre papa (#oupas) BUG BROTHER", + "http://jpspan.sourceforge.net/wiki/doku.php?id=javascript:xmlhttprequest:snippets:request": "javascript:xmlhttprequest:snippets:request [JPSPAN]", + "http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern": "Is visually hidden RDFa an anti-pattern? - ANSWERS", + "http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous": "\u00ab\u00a0L\u2019effondrement de la vie sous nos latitudes reste largement sous le radar m\u00e9diatique\u00a0\u00bb", + "https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/": "HOLOGAN: UNSUPERVISED LEARNING OF 3D REPRESENTATIONS FROM NATURAL IMAGES", + "https://insis.cnrs.fr/fr/personne/thierry-poinsot": "Utilisation de l\u2019hydrog\u00e8ne comme moyen de stockage des \u00e9nergies renouvelables - Thierry Poinsot INSIS", + "https://forums.docker.com/t/where-are-images-stored-on-mac-os-x/17165": "Where are images stored on Mac OS X? - Docker for Mac - Docker Forums", + "http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f": "\u00ab\u00a0Amkoullel, l\u2019enfant peul\u00a0\u00bb, fresque vivante d\u2019une jeunesse malienne au d\u00e9but du XXe\u00a0si\u00e8cle", + "http://www.bbc.com/news/science-environment-25576718": "BBC News - China cloning on an 'industrial scale'", + "http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data": "Grawiki - A Wiki (and aggregator) for graph-shaped data - benjamin nowack's blog", + "http://www.blacksmithinstitute.org/ten.php": "The World\u2019s Worst Polluted Places - Blacksmith Institute", + "http://www.seoskeptic.com/bing-mounts-a-personal-offensive-against-googles-knowledge-grap/": "Bing Mounts a Personal Offensive Against Google's Knowledge Graph", + "http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html": "Google Open Source Blog: Protocol Buffers: Google's Data Interchange Format", + "http://www2012.wwwconference.org/program/accepted-papers/": "Tracks & Accepted papers www2012", + "http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th": "Learning From Europe", + "https://github.com/apigee-labs/rapier": "Rapier: specification language created by Apigee", + "https://web.stanford.edu/group/brainsinsilicon/": "Brains in Silicon", + "http://www.henriverdier.com/": "Henri Verdier Blog", + "http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf": "Indexing by Latent Semantic Analysis - Deerwester et al. (1990)", + "http://www.internetactu.net/2015/10/12/crispr-la-technologie-qui-bouleverse-la-biotech/": "CRISPR, la technologie qui bouleverse la biotech \u00ab InternetActu.net", + "http://pipes.deri.org/": "DERI Pipes", + "https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/": "RDF, it\u2019s difficult, nasty, horrible and I hate it Brinxmat's blog", + "http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_": "Syncing your blog with your PC, and using your word processor \u00b7 fast.ai", + "http://incubator.apache.org/clerezza/": "Welcome to Apache Clerezza", + "http://makolab.com/fr/software/semantic/": "R\u00e9seau des Significations", + "http://www.inf.unibz.it/~franconi/dl/course/dlhb/dlhb-01.pdf": "An introduction to Description Logics - Daniele Nardi, Ronald J. Brachman", + "http://www.nytimes.com/2010/06/20/magazine/20Computer-t.html": "Smarter Than You Think - I.B.M.'s Supercomputer to Challenge 'Jeopardy!' Champions - NYTimes.com", + "http://www.grokker.com": "Grokker - A New Way to Look at Search", + "http://scuttle.org/": "Scuttle: Store, share and tag your favourite links", + "http://arethuse1.free.fr/": "M\u00e9sopotamie : un portail de l'Orient ancien", + "http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html": "[goodrelations] vso:feature, dbPedia and productontology", + "http://www.space.com/scienceastronomy/050509_blackhole_birth.html": "Creation of Black Hole Detected", + "http://www.jgraph.com/index.html": "Java Graph Visualization and Layout", + "https://benlog.com/2016/02/18/on-apple-and-the-fbi/": "On Apple and the FBI Benlog", + "https://jakevdp.github.io/PythonDataScienceHandbook/index.html": "Python Data Science Handbook (Jake VanderPlas)", + "https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition": "Lingo: Search Results Clustering Algorithm Based on Singular Value Decomposition (2004) (paper)", + "https://www.france-universite-numerique-mooc.fr/": "FUN - France Universit\u00e9 Num\u00e9rique", + "http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo": "Knowledge Base Embedding By Cooperative Knowledge Distillation - ACL Anthology", + "http://www.infoworld.com/d/open-source-software/jeff-hawkins-where-open-source-and-machine-learning-meet-big-data-224069": "Jeff Hawkins: Where open source and machine learning meet big data Open Source Software - InfoWorld", + "http://www.mturk.com/mturk/welcome": "Amazon Mechanical Turk - Welcome", + "http://tagcentral.net/index.php?tag=rdf&submit=Get+Tag": "", + "http://architects.dzone.com/articles/solr-hadoop-big-data-love": "Solr + Hadoop = Big Data Love Architects Zone", + "https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be": "How to Build Linked Data APIs with JSON LD and Hydra - YouTube", + "http://planb.nicecupoftea.org/archives/001292.html": "sparqling days / iphoto rdf export", + "http://www.opendatasoft.com/fr/": "OpenDataSoft Plateforme clef-en-main pour l'open data", + "http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet": "Quel \u00e9v\u00e9nement !? Ou comment contextualiser le triplet Les petites cases", + "http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century": "France Is Not Impressed with Thomas Piketty", + "http://music.blog.lemonde.fr/": "A la recherche des sons perdus - Blog LeMonde.fr", + "http://www.nuxeo.com/fr": "Gestion Documentaire (GED) Open Source par Nuxeo", + "http://dl.acm.org/citation.cfm?id=1498283": "Product configuration knowledge modeling using ontology web language", + "http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/": "Danger: America Is Losing Its Edge In Innovation - Forbes", + "http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/": "Un moteur de transformation RDF bas\u00e9 sur SPARQL (2) \u00ab Le\u00e7ons de Choses", + "http://dannyayers.com/2007/12/30/another-rdf-syntax": "Another RDF Syntax! (URI-embedded RDF)", + "http://www.altova.com/products_semanticworks.html": "Altova SemanticWorks", + "https://hub.docker.com/_/python/": "library/python - Docker Hub", + "http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/": "Toyota\u2019s Robot Violinist Wows Crowd At Shanghai Expo 2010 (Video) Singularity Hub", + "https://issues.apache.org/jira/browse/JENA-203": "[#JENA-203] support for Non Blocking Parsers - ASF JIRA", + "http://developer.apple.com/internet/": "Apple Developer Connection - Internet & Web", + "http://codahale.com/what-makes-jersey-interesting-parameter-classes/": "What Makes Jersey Interesting: Parameter Classes codahale.com", + "http://websearch.alexa.com/static.html?show=webtour/start": "Building your own search engine with Alexa", + "http://carlchenet.com/2016/01/22/le-danger-github/": "Le danger Github \u2013 Le blog de Carl Chenet", + "http://bratton.com/?p=5": "The Bratton Blog \u00bb Setting up a CVS server on Mac OSX in 4 steps and 4 minutes", + "http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr": "IBM Research addressing Enterprise NLP challenges in 2020", + "http://fr.wikipedia.org/wiki/L'Usage_du_monde": "L'Usage du monde - Nicolas Bouvier", + "http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html": "RDFa support in Jena GRDDL Reader (SVN)", + "https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50": "How Facebook Failed To Build A Better Alexa (Or Siri)", + "http://users.jena.apache.narkive.com/NF0pn3kq/controlling-json-ld-output": "Jena: controlling JSON-LD output", + "http://clesnes.blog.lemonde.fr/2013/06/10/le-defi-dedward-snowden-a-barack-obama/": "Le d\u00e9fi d\u2019Edward Snowden \u00e0 Barack Obama Big Picture", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html": "Quand les ing\u00e9nieurs en herbe se projettent en 2020", + "http://www.w3.org/2005/ajar/tab": "Tabulator: Generic data browser", + "http://theswitchboard.ca": "", + "http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-369/": "Proceedings of the WWW2008 Workshop on Linked Data on the Web", + "http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d": "Why 'The System' Is Rigged And The U.S. Electorate Is Angry - Forbes", + "http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the": "How Mosquitoes Helped Shape the Course of Human History History Smithsonian", + "http://readwrite.com/2016/02/17/apple-wont-build-backdoor?utm_source=feedly&utm_medium=webfeeds": "Why Apple Is Fighting Back - ReadWrite", + "http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html": "canonicURI property (public-lod@w3.org from January 2013)", + "https://lilianweng.github.io/lil-log/": "Lil'Log", + "http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424": "Crunchbase & Semantic Web Interview (Remix - Update 1)", + "http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration": "A Call To Minimize Distration", + "http://www.ccs.neu.edu/home/kenb/csg112/synchronize.html": "Java Synchronization Tutorial", + "http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)": "La Kermesse h\u00e9ro\u00efque - Wikipedia, the free encyclopedia", + "http://www.nltk.org/book/ch02.html": "Accessing Text Corpora and Lexical Resources", + "http://www.liip.ch/fr": "Liip AG", + "https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng": "Notes from Coursera Deep Learning courses by Andrew Ng", + "http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua": "Clarifying exceptions and visualizing tensor operations in deep learning code", + "https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html": "Embeddings@Twitter", + "http://stackoverflow.com/questions/5692256/maven-best-way-of-linking-custom-external-jar-to-my-project": "java - Maven: best way of linking custom external JAR to my project? - Stack Overflow", + "http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/": "Les statues africaines n\u2019en finissent pas de mourir - africamix - Blog LeMonde.fr", + "http://africamix.blog.lemonde.fr/category/genocide/": "G\u00e9nocide - africamix - Blog LeMonde.fr", + "https://github.com/jsonld-java/jsonld-java/issues/101": "Serialization of JSON-LD using appropriate context \u00b7 Issue #101 \u00b7 jsonld-java/jsonld-java", + "http://arstechnica.com/tech-policy/news/2009/03/french-anti-p2p-law-toughest-in-the-world.ars": "French anti-P2P law toughest in the world", + "http://particletree.com/features/the-importance-of-rss": "particletree \u00b7 The Importance of RSS", + "http://jena.hpl.hp.com/wiki/SDB/Store_Description": "SDB/Store Description - Jena wiki", + "http://www.semweb.pro/file/2285?vid=download": "SemWeb.Pro 2011 : dossier de presse", + "http://linkeddata.jiscpress.org/": "Linked Data Horizon Scan", + "http://lab.arc90.com/2006/07/link_thumbnail.php#examples": "arc90 lab : tools : Link Thumbnail", + "http://robobees.info/": "Robobees", + "https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html": "Deep Learning Development with Google Colab, TensorFlow, Keras & PyTorch", + "http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/": "From JSON to RDF in Six Easy Steps with JRON", + "http://purl.org/configurationontology": "Configuration ontology", + "http://news.bbc.co.uk/2/hi/technology/6291746.stm": "BBC NEWS Technology Robot unravels mystery of walking", + "http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea": "Transfer Learning - Machine Learning's Next Frontier (2017)", + "http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf": "Facilitating the Exploration and Visualization of Linked Data", + "http://freeculture.org/blog/2013/04/23/dont-let-the-myths-fool-you-the-w3cs-plan-for-drm-in-html5-is-a-betrayal-to-all-web-users/": "Free Culture Foundation \u00bb Blog Archive \u00bb Don\u2019t let the myths fool you: the W3C\u2019s plan for DRM in HTML5 is a betrayal to all Web users.", + "http://open.vocab.org/docs/AutomobileCataologType": "ov:AutomobileCataologType, a class in the OpenVocab RDF schema", + "http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAKdC0PsHEe4M=OZe=_vE3A+jH5k4HgdiyX=sJ4j7Uhf+m0=Wzw@mail.gmail.com%3e": "HELP about jena fuseki and NodeJS", + "http://www.hpl.hp.com/personal/bwm/rdf/jena/rssinjena.htm": "Processing A Site Summary Format with Jena", + "http://elda.googlecode.com/hg/deliver-elda/src/main/docs/index.html": "Elda -- an implementation of the Linked Data API", + "http://www.growing-gardens.org/": "Gardening in Portland Oregon Growing Gardens Digs at the Root of Hunger", + "http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne": "Liste des monuments historiques de l'Orne - Wikip\u00e9dia", + "http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc": "Retail Graph \u2014 Walmart\u2019s Product Knowledge Graph", + "http://blog.intrapromote.com/category/seo-for-automotive/": "Seo For Automotive Above the Fold & Socially Acceptable", + "http://www.fast.ai/": "fast.ai \u00b7 Making neural nets uncool again", + "http://www.wired.com/autopia/2012/09/connected-car-innovation/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous": "Why Connected-Car Innovation Could Come From Outside the Auto Industry Autopia Wired.com", + "http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/": "The Top 10 Javascript MVC Frameworks Reviewed", + "http://www.comtech-serv.com/webinar-rdf.shtml": "How Effective Use of Metadata and the Resource Description Framework (RDF) Can Be an Answer to Your DITA Nightmares", + "http://blog.iks-project.eu/iks-salzburg-workshop-june-2012/": "IKS Salzburg Workshop June 2012 IKS Blog \u2013 The Semantic CMS Community", + "https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0": "How to do Deep Learning on Graphs with Graph Convolutional Networks Part 2", + "http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html": "skos:Concept and \"real world things\"", + "https://www.youtube.com/watch?v=uBr_jOyrdbk": "Les fran\u00e7aises championnes du monde du 4x100m (WC Paris 2003) HQ - YouTube", + "http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis": "Red Hat project brings open-source to digital media Tech News on ZDNet", + "http://www.projectforum.com": "CourseForum Technologies Home", + "https://blog.algolia.com/inside-the-engine-part-6-handling-synonyms-the-right-way/?utm_source=twitter&utm_campaign=enginepart6&utm_medium=cpm": "Inside the Algolia Engine Part 6 \u2014 Handling Synonyms the Right Way Milliseconds Matter", + "http://www.jguru.com/faq/view.jsp?EID=1030399": "How to get international unicode characters from a a form input field/servlet parameter into a string?", + "https://www.lemonde.fr/afrique/article/2019/03/20/ghana-le-petit-pays-qui-voit-grand_5438952_3212.html": "Ghana, le petit pays qui voit grand", + "https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf": "talks/2018-04-12__Embed-Encode-Attend-Predict.pdf at master \u00b7 explosion/talks \u00b7 GitHub", + "http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables": "WikiLeaks cables: 'Nicolas Sarkozy thin-skinned and authoritarian' World news guardian.co.uk", + "https://supernlp.github.io/2018/11/10/emnlp-2018/": "EMNLP 2018 Thoughts and Notes \u00b7 Supernatural Language Processing", + "https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014": "Death of Hyperlink: The Aftermath \u2014 Thoughts on Media \u2014 Medium", + "http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/": "Benlog \u00bb Why I\u2019m switching to Yahoo Search", + "http://www.sysdeo.com/eclipse/tomcatpluginfr": "Sysdeo Eclipse Tomcat plugin", + "http://nkos.slis.kent.edu/": "NKOS Networked Knowledge Organization Systems and Services", + "http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "DARPA is working on its own deep-learning project for natural-language processing \u2014 Tech News and Analysis", + "http://googleresearch.blogspot.co.uk/2012/04/working-with-your-data-easier-and-more.html": "Working with your Data: Easier and More Fun", + "http://www.republicain-niger.com/index.asp?affiche=News_Display.asp&articleid=5614": "UN NIG\u00c9RIEN HONOR\u00c9 EN FRANCE Habibou Ma\u00eftournam, laur\u00e9at du prix PLUMEY 2009 de l\u2019Acad\u00e9mie des sciences - Le Republicain-Niger", + "https://reporterre.net/Exclusif-L-ex-Premier-ministre-du-Japon-Naoto-Kan-raconte-la-catastrophe-de": "L\u2019ex-Premier ministre du Japon, Naoto Kan, raconte la catastrophe de Fukushima", + "http://taskonomy.stanford.edu/": "Taskonomy Stanford", + "http://blogs.esa.int/rosetta/": "Rosetta blog", + "http://www.twine.com/user/fps": "Fran\u00e7ois-Paul Servant Twine", + "http://xtech06.usefulinc.com/schedule/detail/38": "XTech 2006: Making Connections: Exploring new forms of semantic browsing", + "http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/": "Le myst\u00e8re de la plus grande \u00e9ruption volcanique du dernier mill\u00e9naire est r\u00e9solu Passeur de sciences", + "http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf": "Unix for Poets", + "http://remacle.org/bloodwolf/textes/thucyeloge.htm": "Thucydide, la guerre du P\u00e9lopon\u00e8se : oraison fun\u00e8bre prononc\u00e9e par P\u00e9ricl\u00e8s", + "http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp": "RFI Musique - L'Afrique prend la Bastille", + "http://taddei.zapto.org/TINS/": "TINS Is Not Sharepoint", + "http://readwrite.com/2013/07/12/how-an-engineering-toy-for-girls-went-from-kickstarter-to-bestseller?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed:+readwriteweb+(ReadWriteWeb)#awesm=~obnkAaJdbjjbl1": "How An Engineering Toy For Girls Went From Kickstarter To Bestseller \u2013 ReadWrite", + "https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire": "Le vivant a sa mati\u00e8re noire CNRS Le journal", + "http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html": "Swagger integration with Jersey", + "http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates": "Using RDFa for testing templates - DBTune blog", + "http://lifemap.univ-lyon1.fr/": "Lifemap", + "http://www.fastcoexist.com/3041719/change-generation/work-is-bullshit-the-argument-for-antiwork": "Work Is Bullshit: The Argument For \"Antiwork\" Co.Exist ideas + impact", + "http://www.kingtutone.com": "Ancient Egypt - Includes pyramids, pharaohs, queens, and more.", + "http://manu.sporny.org/2013/microdata-downward-spiral/": "The Downward Spiral of Microdata The Beautiful, Tormented Machine", + "https://docs.docker.com/engine/userguide/": "Docker User guide", + "http://jmlr.org/proceedings/papers/v28/bi13.pdf": "Efficient Multi-label Classification with Many Labels (2013)", + "http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma": "Ensemble Learning to Improve Machine Learning Results", + "http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h": "Amit Chaudhary sur Twitter : \"How to learn transformers:...\"", + "http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf": "An Entity Name System for Linking Semantic Web Data", + "https://www.qwant.com/": "Qwant", + "http://www.semanlink.net/doc/2020/10/html_javascript_network_visua": "html - JavaScript network visualization? - Stack Overflow", + "http://lib.tkk.fi/Diss/2007/isbn9789512289851/": "Ora Lassila - Programming Semantic Web Applications: A Synthesis of Knowledge Representation and Semi-Structured Data", + "http://en.lodlive.it/": "LodLive - browsing the Web of Data", + "http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html": "Spotlight-like livesearch", + "http://torrez.us/code/www-rdfa/": "RDFa Tutorial WWW2008 (demo files) - Elias Torres", + "http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html": "Elementary, My Dear Watson - Will IBM\u2019s quiz show champion outgrow humankind?", + "https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v": "Danny Ayers - Google+ - I've knocked together a serializer for Jena to the minimal\u2026", + "https://stackoverflow.com/questions/13131139/lemmatize-french-text": "Lemmatize French text - Stack Overflow", + "http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking": "Digital Billboards Are Tracking You - Consumer Reports", + "https://www.quora.com/What-are-some-good-papers-about-topic-modeling-on-Tweets": "What are some good papers about topic modeling on Tweets? - Quora", + "https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html": "Penn Treebank P.O.S. Tags", + "http://www.tumblr.com/": "Tumblr", + "http://manu.sporny.org/2013/drm-in-html5/": "DRM in HTML5 The Beautiful, Tormented Machine", + "http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html": "Huit \u00e9crivains africains racontent l'Afrique qui vient", + "https://www.lemonde.fr/planete/article/2018/08/20/l-huile-de-palme-menace-aussi-les-primates-d-afrique_5344271_3244.html": "L\u2019huile de palme menace aussi les primates d\u2019Afrique", + "http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne": "\"Technologies du Web S\u00e9mantique pour l'Entreprise 2.0\": Th\u00e8se et slides en ligne Alexandre Passant", + "http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html": "Official Google Webmaster Central Blog: Improved Flash indexing", + "http://www.semanlink.net/doc/2020/11/tree_of_life_web_project": "Tree of Life Web Project", + "http://blogstats.wordpress.com/2009/10/27/sdmx-and-rdf-getting-acquainted/": "SDMX and RDF: Getting Acquainted \u00ab Blog about Stats", + "https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html": "Spring Framework Documentation", + "https://www.research.ibm.com/ibm-q/": "Quantum Computing - IBM Q", + "http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html": "Big data to be made little: Individuals to mine data too Network World", + "http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/": "Converting your WordPress and Mediawiki data into RDF on-the-fly at Frederick Giasson\u2019s Weblog", + "http://www.jibbering.com/faq/notes/closures/": "Javascript Closures", + "http://java-source.net": "", + "http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064": "Jupyter Notebook Enhancements, Tips And Tricks - Part 1", + "https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html": "The Brazil Museum Fire: What Was Lost - The New York Times", + "http://www.w3.org/wiki/Web_and_Automotive": "Web and Automotive - W3C Wiki", + "https://www.youtube.com/watch?v=v4mAuMp7dHs": "Soyeya Niger", + "http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020": "Camel Express News April 2020", + "http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise": "Danielle Akini, la Camerounaise qui parle aux ordinateurs", + "https://twitter.com/aureliengeron/status/1005483669929299969": "Aur\u00e9lien Geron sur Twitter : \"In @TensorFlow 1.9, it is much easier to use Keras with the Data API...\"", + "http://itsnat.sourceforge.net/php/spim/spi_manifesto_en.php": "The Single Page Interface Manifesto", + "http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld": "Creating semantic sites with Web Components and JSON-LD", + "https://code.google.com/p/topic-modeling-tool/": "topic-modeling-tool - A graphical user interface tool for topic modeling - Google Project Hosting", + "http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth": "Iguana vs Snakes Planet Earth II - YouTube", + "http://www.vidizmo.com/": "Enterprise Video Portal & Streaming Media Solutions", + "http://www.audiocite.net/livres-audio-gratuits-planete-actuelle/index.html": "Audiocit\u00e9 - Livres audio", + "http://www-128.ibm.com/developerworks/java/library/j-threads1.html": "Threading lightly, Part 1: Synchronization is not the enemy", + "https://theintercept.com/2015/04/27/encrypting-laptop-like-mean/": "", + "https://stackoverflow.com/questions/20727552/abbreviation-detection": "nlp - Abbreviation detection - Stack Overflow", + "https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8": "Article recommendation with Personalized PageRank and Full Text Search", + "https://addons.mozilla.org/fr/firefox/addon/249": "Html Validator :: Modules pour Firefox", + "http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler": "Jena Assembler (D2RQ V0.5 - User Manual)", + "http://www.hippasus.com/resources/mapdelicious/": "", + "http://wordpress.org/extend/plugins/poolparty-thesaurus/": "WordPress \u203a PoolParty Thesaurus \u00ab WordPress Plugins", + "http://fr.wikipedia.org/wiki/99_francs_(film)": "99 francs (film)", + "http://www.w3.org/QA/2011/10/steve_jobs.html": "Steve Jobs and the actually usable computer - W3C Blog", + "http://www.nytimes.com/2016/03/28/business/dealbook/ethereum-a-virtual-currency-enables-transactions-that-rival-bitcoins.html?partner=rss&emc=rss": "Ethereum, a Virtual Currency, Enables Transactions That Rival Bitcoin\u2019s - The New York Times", + "http://www.flax.co.uk/blog/2013/12/17/principles-of-solr-application-design-%e2%80%93-part-2-of-2/": "Principles of Solr application design \u2013 part 2 of 2", + "https://github.com/rdfjs/rdfjs.org": "rdfjs/rdfjs.org", + "http://noone.org/blog/English/Computer/Web/Blosxom/Blosxom%20Tagging%20Plugin%20Version%200.02.futile": "Blosxom Plugin Tagging", + "http://semarglproject.org": "Semargl: better linked data processing", + "http://www.rsf.org/article.php3?id_article=24888": "100\u00e8 jour de d\u00e9tention pour Moussa Kaka\u00a0: Reporters sans fronti\u00e8res exprime sa solidarit\u00e9 avec son correspondant incarc\u00e9r\u00e9", + "http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/": "Ontology-supported Business Intelligence (ISWC 2008)", + "http://www.europe-solidaire.org/spip.php?article7952": "[Europe Solidaire Sans Fronti\u00e8res] Equateur\u00a0: Les marais noirs de Texaco", + "http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666": "Atos and its partners launch the DataLift platform - NASDAQ.com", + "http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html": "Semantic Technology and Master Data Management Semantic Universe", + "http://www.w3.org/TR/r2rml/": "R2RML: RDB to RDF Mapping Language", + "http://www.semanlink.net/doc/2021/08/cynthia_fleury": "Cynthia Fleury", + "http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq": "google/tapas-base-finetuned-wtq \u00b7 Hugging Face", + "http://www.lemonde.fr/livres/article/2012/08/01/le-romancier-americain-gore-vidal-meurt-a-86-ans_1740843_3260.html": "Le romancier am\u00e9ricain Gore Vidal meurt \u00e0 86 ans", + "http://www.seoskeptic.com/what-is-json-ld/": "What is JSON-LD? A Talk with Gregg Kellogg", + "https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1": "python - Pythonic way of detecting outliers in one dimensional observation data - Stack Overflow", + "http://www.cert.org/advisories/CA-2000-02.html": "CERT Advisory CA-2000-02 Malicious HTML Tags Embedded in Client Web Requests", + "http://jena.sourceforge.net/ARQ/": "ARQ - A SPARQL Processor for Jena", + "http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo": "A dive into spatial search algorithms", + "http://commons.apache.org/jcs/": "JCS - Java Caching System", + "http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers": "Microdata, semantic markup for both RDFers and non-RDFers - benjamin nowack's blog", + "https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf": "Evaluating the Impact of Word Embeddings on Similarity Scoring in Practical Information Retrieval (2017)", + "http://www.saudiaramcoworld.com/issue/200405/what.was.jiroft..htm": "Saudi Aramco World : What Was Jiroft?", + "http://www-128.ibm.com/developerworks/library/j-ruby/?ca=dgr-lnxw01RubyOffRails": "Ruby off the Rails", + "http://www.info.uni-karlsruhe.de/~frick/gd/index.html": "Graph Drawing", + "https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html": "Efficient unsupervised keywords extraction using graphs", + "http://lucene.472066.n3.nabble.com/question-on-solr-ASCIIFoldingFilterFactory-td2780463.html": "Solr - User - question on solr.ASCIIFoldingFilterFactory", + "http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/": "Semai \u00bb Blog Archive \u00bb OWL 2 for RDF vocabularies", + "http://3roundstones.com/linking-enterprise-data/": "Linking Enterprise Data: the book", + "https://web.stanford.edu/~jurafsky/slp3/": "Speech and Language Processing", + "http://static.flickr.com/33/54411225_f3555d9409.jpg": "Euphrasie \u00e0 Grand-Popo", + "http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm": "To SQL or NoSQL - that's not the question - Views from the Lab - Blogs - Technology Blog and Community from IT Experts - Techworld.com", + "http://yro.slashdot.org/story/13/11/22/1929234/tor-now-comes-in-a-box?utm_source=rss1.0mainlinkanon&utm_medium=feed": "Tor Now Comes In a Box - Slashdot", + "http://simon.incutio.com/notes/2006/summit/schachter.txt": "Joshua Schachter, del.icio.us: Things to look out for when building a large application.\n", + "http://linkeddata.org/": "linkeddata.org", + "https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/": "Google explores AI's mysterious polytope ZDNet", + "http://www.lac.ox.ac.uk/sites/sias/files/documents/Schwarcz47.pdf": "Not black, not white: just the opposite. Culture, race and national identity in Brazil", + "http://news.bbc.co.uk/2/hi/science/nature/7399226.stm": "BBC NEWS Mars probe set for risky descent", + "http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi": "Nouveaut\u00e9s sur le site hyperSOLutions (1998)", + "http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html": "Schema.org and RDFa 1.1 Lite: how does it look now? - W3C Blog", + "http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html": "Re: How to avoid that collections \"break\" relationships from Ruben Verborgh on 2014-03-31 (public-hydra@w3.org from March 2014)", + "https://techcrunch.com/2016/07/06/france-wants-to-rethink-the-state-as-a-platform/": "France wants to rethink the state as a platform TechCrunch", + "http://www.readwriteweb.com/cloud/2010/12/host-your-own-delicious-altern.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29": "3 Ways to Host Your Own Delicious Alternative - ReadWriteCloud", + "https://github.com/andrewoma/dexx": "Dexx Collections: Persistent (immutable) collections for Java", + "http://www.colheradacultural.com.br/viagem/tag/chef-ofir-oliveira/": "chef ofir oliveira Colheres na Estrada", + "https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8": "Redirects & SEO - The Complete Guide", + "http://distill.pub/": "Distill \u2014 Latest articles about machine learning", + "http://www.ldh-toulon.net/spip.php?article2018": "[LDH-Toulon] Plantu, Sarkozy et les mouches", + "http://htmlparser.sourceforge.net": "", + "http://www.runrev.com/": "Rev", + "http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/": "blog.aksw.org \u00bb AKSW coordinates EU-funded research project LOD2 aiming to take the Web of Linked Data to the next level", + "http://events.linkeddata.org/ldow2012/": "Linked Data on the Web (LDOW2012) - Workshop at WWW2012, Lyon, France", + "https://github.com/castagna/jena-examples": "castagna/jena-examples \u00b7 GitHub", + "http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html": "A Single Migration From Africa Populated the World, Studies Find - The New York Times", + "http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf": "Semantic Negotiation: Co-identifying objects across data", + "http://www.w3.org/TR/grddl-scenarios/": "GRDDL Use Cases: Scenarios of extracting RDF data from XML documents", + "http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/": "A Detailed Introduction To Custom Elements \u2013 Smashing Magazine", + "http://semanticweb.com/session-for-the-rdfa-initiative-in-drupal-7-and-how-it-will-impact-the-semantic-web_b15681": "Q&A Session for \u201cThe RDFa initiative in Drupal 7, and how it will impact the Semantic Web\u201d - semanticweb.com", + "https://visualvm.dev.java.net/": "visualvm", + "http://www-128.ibm.com/developerworks/java/library/j-jtp09275.html?ca=dgr-lnxw01JavaUrbanLegends": "Java theory and practice: Urban performance legends, revisited", + "http://www.w3.org/2008/09/msnws/report": "Report from W3C Workshop on the Future of Social Networking", + "http://blogs.esa.int/rosetta/2015/06/14/rosettas-lander-philae-wakes-up-from-hibernation/": "Rosetta\u2019s lander Philae wakes up from hibernation Rosetta - ESA's comet chaser", + "http://www.w3.org/TR/rdfa-lite/": "RDFa Lite 1.1", + "https://en.wikipedia.org/wiki/Kyshtym_disaster": "Kyshtym disaster", + "http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html": "Peut-on encore produire des voitures en France ?", + "https://lejournal.cnrs.fr/articles/amarna-la-cite-disparue-dakhenaton?utm_content=bufferbae84&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer": "Amarna, la cit\u00e9 disparue d'Akhenaton CNRS Le journal", + "http://fr.slideshare.net/basistech/multilingual-search-and-text-analytics-with-solr-open-source-search-conference": "Multilingual Search and Text Analytics with Solr - Open Source Search\u2026", + "http://www.codecademy.com/": "Apprenez \u00e0 coder Codecademy", + "http://news.nationalgeographic.com/news/2008/09/080903-oldest-skeletons.html": "Oldest Skeleton in Americas Found in Underwater Cave?", + "http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7": "Installing numpy Lion with Python 2.7? - Stack Overflow", + "http://www.w3.org/TR/turtle/#in-html": "Embedding Turtle in HTML documents", + "http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl": "raphaelsty/kdmlm: Combine knowledge bases with language models.", + "http://www.javaworld.com/article/2995526/development-tools/jump-into-java-micro-frameworks-part-1.html": "Jump into Java microframeworks, Part 1: Introduction JavaWorld", + "http://www.paleolands.org/": "Oregon Paleo Lands Institute", + "https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex": "Designing a Linked Data developer experience Ruben Verborgh", + "http://www.ibiblio.org/hhalpin/homepage/publications/html/airedux/": "The Semantic Web: The Origins of Artificial Intelligence Redux", + "http://www.alvit.de/vf/en/essential-bookmarks-for-webdesigners-and-web-developers.html": "Bookmarks for web developers: a list of useful web-tools", + "http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/": "(Fix) Memory Leaks: Ajax page replacement Illyriad - Beneath the Misted Land", + "http://www.xml.com/pub/a/2005/04/06/restful.html": "XML.com: Constructing or Traversing URIs?", + "http://www.h-net.org/~africa/threads/mamiwata.html": "Mami Wata", + "http://jubat.us/en/": "Jubatus : Distributed Online Machine Learning Framework", + "https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5": "On EME in HTML5 W3C Blog", + "http://json-ld.org/spec/latest/json-ld-api-best-practices/": "Building JSON-LD APIs: Best Practices", + "http://www.howtocreate.co.uk/tutorials/javascript/domstructure": "JavaScript tutorial - DOM objects and methods", + "http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint": "Semantic Web Landscape 2009", + "https://english.lasindias.com/blockchain-is-a-threat-to-the-distributed-future-of-the-internet": "The blockchain is a threat to the distributed future of the Internet", + "http://www.nlp-class.org/": "Natural Language Processing", + "https://github.com/castagna/SARQ": "castagna/SARQ \u00b7 GitHub", + "http://stackoverflow.com/questions/23996953/json-ldhydra-link-discovery/24066336#24066336": "angularjs - JSON-LD+Hydra link discovery - Stack Overflow", + "http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse": "RDFa 1.1 pour corriger les erreurs de jeunesse ? Les petites cases", + "http://www.lemonde.fr/planete/article/2012/08/06/suivez-l-atterrissage-de-curiosity-sur-mars-en-direct_1742760_3244.html": "Le robot Curiosity s'est pos\u00e9 avec succ\u00e8s sur la plan\u00e8te Mars", + "http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html": "@inadarei: Code on Demand in APIs: Dumb Clients, Smart Endpoints", + "http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf": "A comparison of two modelling paradigms in the Semantic Web", + "http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html": "IBM\u2019s Watson Computer Gets a Wall Street Job - Bloomberg", + "http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html": "Poker menteur autour de l'interdiction des OGM", + "http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html": "Words and what not: #Wikidata & #Freebase - an #interview with Denny Vrande\u010di\u0107", + "https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e": "What You Need to Know About Natural Language Processing", + "http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_": "En Inde, pr\u00e8s de deux millions de citoyens, la plupart musulmans, d\u00e9chus de leur nationalit\u00e9", + "https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence": "AI will create 'useless class' of human, predicts bestselling historian Technology The Guardian", + "http://www.osema.org.ve/": "OSEMA 2011. Ontology and Semantic Web for Manufacturing", + "http://blog.revolutionanalytics.com/2016/02/because-its-friday-the-mysterious-rotating-woman.html": "Because it's Friday: The mysterious rotating woman", + "http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture": "La mort d'une icone de la musique moderne nig\u00e9rienne: Moussa Poussy nous a quitt\u00e9.", + "http://www.servlets.com/isps/servlet/ISPViewAll": "ISPs Supporting Servlets", + "https://twitter.com/julie_grollier": "Julie Grollier (@julie_grollier) Twitter", + "http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMA1UULWFE_0.html": "ESA - Results from Mars Express - Mars Express evidence for large aquifers on early Mars", + "http://www.semweb.pro/blogentry/3138": "Revue de presse SemWeb.Pro 2012 (SemWeb.Pro)", + "http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf": "The case for generating URIs by hashing RDF content", + "http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data": "Petite Poucette au secours de l'Open Data Les petites cases", + "http://www.javascriptkit.com/javatutors/closures.shtml": "JavaScript Closures 101- they're not magic", + "http://www.google.com/support/webmasters/bin/topic.py?topic=21997": "Rich snippets (microdata, microformats, RDFa)", + "http://stackoverflow.com/questions/5250923/http-content-negotiation-conflicts-in-jax-rs-jersey": "rest - HTTP content negotiation conflicts in JAX-RS/Jersey? - Stack Overflow", + "http://www.programmableweb.com/mashup/youtube-subtitle-inserter": "YouTube Subtitle Inserter - ProgrammableWeb Mashup Detail", + "http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects": "JSON.stringify(object) - How to inspect Javascript Objects - Stack Overflow", + "http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/": "On the BBC Annotatable Audio project... (plasticbag.org)", + "https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y": "RDFIO: extending Semantic MediaWiki for interoperable biomedical data management Journal of Biomedical Semantics Full Text", + "http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html": "Le \"meilleur des mondes\" de M. Sarkozy, par Herv\u00e9 Chneiweiss", + "http://terrier.org/docs/v3.5/dfr_description.html": "Divergence From Randomness (DFR) Framework", + "http://homepages.cwi.nl/~paulk/publications/SPE00.pdf": "Efficient Annotated Terms", + "http://fr.dbpedia.org/": "DBp\u00e9dia en fran\u00e7ais", + "http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html": "Marlon Pierce's Community Grids Lab Blog: Maven: Making a War and Jar at the Same Time", + "http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp": "L\u2019\u00e9valuation officielle du glyphosate de nouveau mise en cause", + "http://www.w3.org/2001/sw/interest/webschema.html": "Semantic Web Interest Group - Web Schemas Task Force", + "http://learn.arc.nasa.gov": "", + "https://github.com/wikimedia/wikidata-query-gui": "wikimedia/wikidata-query-gui", + "https://en.wikipedia.org/wiki/The_Electric_Horseman": "The Electric Horseman", + "https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/": "Building a lightweight and portable knowledge base for fun and profit", + "http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri": "Best Practices for ML Engineering \u00a0\u00a0 Google Developers", + "http://www.ajaxpatterns.org/AJAXFrameworks": "Ajax Frameworks - Ajax Patterns", + "http://www.mail-archive.com/public-lod@w3.org/msg07196.html": "Re: Is 303 really necessary?", + "http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th": "Absurd Creature of the Week: The Parasitic Worm That Turns Snails Into Disco Zombies WIRED", + "http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/": "Watson : l\u2019Intelligence artificielle en ses limites InternetActu", + "http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html": "-- ISSUE-45: Introduce hydra:filter (subPropertyOf hydra:search) -- from Thomas Hoppe on 2014-04-20 (public-hydra@w3.org from April 2014)", + "http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables": "pdftables.com: PDF to Excel converter - PDFTables", + "https://en.wikipedia.org/wiki/Lemmy_%28film%29": "Lemmy (film)", + "http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0": "KASSEY\u00a0: FIGURE MYTHIQUE FEMININE DANS LES RECITS D\u2019ISSA KOROMBE", + "http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/": "Why hierarchical models are awesome, tricky, and Bayesian", + "https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html": "\u00ab\u00a0Julieta\u00a0\u00bb, de Pedro Almodovar, les souvenirs et les regrets aussi", + "http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye": "Les le\u00e7ons de Wuhan pour enrayer l\u2019\u00e9pid\u00e9mie", + "http://jqueryui.com/": "jQuery UI", + "http://aperture.sourceforge.net/index.html": "Aperture Framework", + "http://lifehacker.com/five-best-text-editors-1564907215": "Five Best Text Editors", + "http://www.w3.org/2005/Incubator/cwl/": "W3C Common Web Language Incubator Group", + "http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html": "Inside Google Books: Books of the world, stand up and be counted! All 129,864,880 of you.", + "https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html": "La RDC, deuxi\u00e8me front de la d\u00e9forestation mondiale", + "https://www.theguardian.com/technology/2018/mar/14/tech-big-data-capitalism-give-wealth-back-to-people": "Big data for the people: it's time to take it back from our tech overlords Technology The Guardian", + "http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png": "Layered_Semantic_Enterprise_Architecture.png", + "http://www.w3.org/2004/02/skos/": "Simple Knowledge Organisation Systems (SKOS) - home page", + "http://videolectures.net/eswc2014_tresp_machine_learning/": "Machine Learning with Knowledge Graphs - VideoLectures.NET", + "https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/": "Encoder-Decoder Recurrent Neural Network Models for Neural Machine Translation", + "http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html": "Hello World: schema.org actions implementations", + "https://fr.slideshare.net/fpservant/makolab-semanticday": "How to publish data about a range of cars - in 3 slides and 2 links (2013)", + "http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/": "Un m\u00e9decin italien veut greffer des t\u00eates humaines Passeur de sciences", + "http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios": "Mujeres al borde de un ataque de nervios", + "http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_": "\u00ab\u00a0Les n\u00e9onicotino\u00efdes sont des substances trop efficaces et trop persistantes pour que leur usage puisse \u00eatre contr\u00f4l\u00e9\u00a0\u00bb", + "http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/": "Running Solr with Maven", + "http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)": "Midnight in the Garden of Good and Evil (film)", + "https://medium.com/@jakefuentes/the-immigration-ban-is-a-headfake-and-were-falling-for-it-b8910e78f0c5#.txou3qh52": "The Immigration Ban is a Headfake, and We\u2019re Falling For It \u2013 Medium", + "http://www.lemonde.fr/idees/article/2018/03/26/biodiversite-l-urgence-du-politique_5276421_3232.html": "Biodiversit\u00e9\u00a0: l\u2019urgence du politique", + "http://ask.metafilter.com/54805/Watch-Cable-TV-on-a-MacBook": "Watch Cable TV on a MacBook? Ask MetaFilter", + "http://www.deeplearningbook.org/": "Deep Learning (Ian Goodfellow and Yoshua Bengio and Aaron Courville)", + "https://devcenter.heroku.com/articles/local-maven-dependencies": "Adding Unmanaged Dependencies to a Maven Project", + "http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/": "RDF and JSON: A Clash of Model and Syntax \u00ab Lost Boy", + "http://stackoverflow.com/questions/111102/how-do-javascript-closures-work": "How do JavaScript closures work? - Stack Overflow", + "https://hacked.com/ancient-viruses-hacked-human-brains/": "Ancient Viruses Hacked Human Brains", + "http://wiki.apache.org/solr/OpenNLP": "OpenNLP - Solr Wiki", + "http://snipsnap.org/space/start": "SnipSnap", + "http://www.bbc.com/news/health-32024158": "DNA of 'an entire nation' assessed - BBC News", + "https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2": "Ian Hickson - Google+ - Discussions about DRM often land on the fundamental problem\u2026", + "http://jibbering.com/rdf-parser/": "Simple javascript RDF Parser and query thingy.", + "http://www.pbs.org/cringely/pulpit/pulpit20060112.html": "PBS I, Cringely . January 12, 2006 - Win Some, Lose Some", + "http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21": "Google Hummingbird: When Evolutionary Becomes Revolutionary", + "http://www.macdevcenter.com/pub/a/mac/collections/webserving.html": "MacDevCenter.com: Apache and Web Serving with Mac OS X", + "http://www.semanlink.net/doc/2020/11/visual_rdf": "Visual RDF", + "https://m.mediawiki.org/wiki/Wikidata_query_service/User_Manual#SPARQL_endpoint": "Wikidata query service/User Manual - MediaWiki", + "http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today": "Chris Olah sur Twitter : \"Today I realized I know almost nothing about the tree of life... What's the best resource for learning?\"", + "http://java.sun.com/products/servlet/Filters.html": "The Essentials of Filters", + "http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson": "Speech to Text Demo - Watson", + "http://www.w3.org/2003/12/semantic-extractor.html": "Semantic data extractor - QA @ W3C", + "https://transacl.org/ojs/index.php/tacl/article/view/582/158": "Improving Topic Models with Latent Feature Word Representations Nguyen Transactions of the Association for Computational Linguistics", + "https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf": "The backpropagation algorithm", + "https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers": "Niger Islamic State hostage: 'They want to kill foreign soldiers' The Guardian", + "http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/": "Working with PHP 5 in Mac OS X 10.5 (Leopard) - Professional PHP", + "http://rapid-i.com/content/view/181/190/": "Rapid - I, RapidMiner", + "https://www.sciencedirect.com/science/article/pii/S0172219018300103": "The state-of-the-art on Intellectual Property Analytics (IPA) - ScienceDirect (2018)", + "http://www.internetactu.net/2013/07/12/open-data-13-la-technique-a-t-elle-pris-le-pas/": "Open Data (1/3) : la technique a-t-elle pris le pas ? \u00ab InternetActu.net", + "http://www.laconferencedeparis.fr/": "La Conf\u00e9rence de Paris : Open Data et Gouvernement Ouvert", + "http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript": "SPARQL/Extensions/SPARQLScript - ESW Wiki", + "http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html": "Using VALUES to map values in a SPARQL query - bobdc.blog", + "http://diveintohtml5.info/storage.html": "Local Storage - Dive Into HTML5", + "http://www.typo3-media.com/blog/solr-search-request-handlers.html": "SOLR Search Request Handlers explained", + "https://www.bbc.com/news/science-environment-47755275#": "Chicxulub asteroid impact: Stunning fossils record dinosaurs' demise - BBC News", + "http://www.apanews.net/apa.php?article51458": "Un journaliste nig\u00e9rien tu\u00e9 dans l\u2019explosion d\u2019une mine \u00e0 Niamey", + "http://web2.wsj2.com/the_best_web_20_software_of_2005.htm": "The Best Web 2.0 Software of 2005", + "http://vcg.informatik.uni-rostock.de/~hs162/treeposter/poster.html": "A Visual Bibliography of Tree Visualization", + "https://nlp.stanford.edu/software/CRF-NER.shtml": "Stanford Named Entity Recognizer", + "https://scholar.google.com/citations?view_op=list_works&hl=fr&user=WNFqgy8AAAAJ": "Fran\u00e7ois-Paul Servant - Citations Google\u00a0Scholar", + "http://www.mondeca.com/foaf/voaf-doc.html": "Vocabularies of a Friend (VOAF)", + "http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion": "SourceForge.net: dbpedia-discussion", + "http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html": "Caching with Servlets", + "https://github.com/NirantK/best-of-jupyter": "NirantK/best-of-jupyter: Jupyter Tips, Tricks, Best Practices with Sample Code for Productivity Boost", + "https://ieeexplore.ieee.org/document/7876817": "Enhancing Binary Classification by Modeling Uncertain Boundary in Three-Way Decisions - IEEE Journals & Magazine", + "https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html": "Commerce de peaux d\u2019\u00e2ne en Afrique, un conte moderne \u00e0 la chinoise", + "https://www.lemonde.fr/international/article/2018/11/30/north-sentinel-derriere-la-mort-d-un-missionnaire-une-longue-histoire-de-resistance_5391100_3210.html": "North Sentinel\u00a0: derri\u00e8re la mort d\u2019un missionnaire, une longue histoire de r\u00e9sistance", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm": "BBC NEWS - Unknown creatures found in cave", + "http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t": "CARAA sur Twitter : \"Probably the first photo of Notre Dame de Paris in 1838 !! (daguerreotype)\"", + "http://pisani.blog.lemonde.fr/pisani/2005/03/partager_nos_si.html": "Partager nos signets", + "https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html": "How Trump Consultants Exploited the Facebook Data of Millions - The New York Times", + "http://www.semanlink.net/doc/2020/09/rdf2vec_org": "RDF2vec.org", + "http://www.w3.org/2007/03/RdfRDB/papers/d2rq-positionpaper/": "D2RQ \u2014 Lessons Learned", + "http://www.paulgraham.com/investors.html": "How to Present to Investors", + "http://www.bbc.com/news/technology-36376966": "Foxconn replaces '60,000 factory workers with robots' - BBC News", + "http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/": "TreeTagger - a part-of-speech tagger for many languages", + "http://www.theguardian.com/commentisfree/2013/dec/30/we-need-to-talk-about-ted": "We need to talk about TED Benjamin Bratton theguardian.com", + "http://datahub.io/group/country-sn": "Senegal - the Datahub", + "http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co": "NASA\u2019s OSIRIS-REx Spacecraft Collects Significant Amount of Asteroid NASA", + "http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_": "pdf2table: A Method to Extract Table Information from PDF Files", + "https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/": "Visualizing A Neural Machine Translation Model (Mechanics of Seq2seq Models With Attention) \u2013 Jay Alammar", + "http://www.correiobragantino.com.br/arq_gastronomia/gastronomia.html": "Correio Bragantino News - CB NEWS :: Bragan\u00e7a - Par\u00e1 - Brasil", + "https://www.huffingtonpost.fr/2018/11/21/les-spectateurs-du-2e-concert-de-lauryn-hill-a-paris-ont-adore-et-ils-lont-fait-savoir_a_23596467/": "Les spectateurs du 2e concert de Lauryn Hill \u00e0 Paris ont ador\u00e9 (et ils l'ont fait savoir) Le Huffington Post", + "http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te": "Marcel Fr\u00f6hlich sur Twitter: Tech products, culture are \u2018designed intentionally for mass deception\u2019", + "https://deeplearning4j.org/lstm.html": "A Beginner's Guide to Recurrent Networks and LSTMs - Deeplearning4j", + "http://www.diplomatie.gouv.fr/fr/actions-france_830/documentaire_1045/diffusion-non-commerciale_5378/offre-dvd_5373/grand-ecran_10336/hommage-jean-rouch_10341/index.html": "Hommage \u00e0 Jean Rouch -Minist\u00e8re des Affaires \u00e9trang\u00e8res-", + "http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_": "Leveraging Just a Few Keywords for Fine-Grained Aspect Detection Through Weakly Supervised Co-Training - ACL Anthology (2019)", + "http://www.svenskaakademien.se/en/nobel-lecture": "Bob Dylan's Nobel Lecture", + "http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia": "Ajax and XML: Ajax for media", + "http://www.bradshawfoundation.com/giraffe/": "The giraffe carvings of the Tenere desert", + "http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le": "Text-Enhanced Representation Learning for Knowledge Graph (IJCAI 2016)", + "http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy": "20,000 Roam Tags with Spacy", + "http://www.pbs.org/wnet/secrets/flash/catastrophe1_script.html": "", + "http://meryl.net/2008/01/22/175-data-and-information-visualization-examples-and-resources/": "Meryl.net \u00bb 175+ Data and Information Visualization Examples and Resources", + "http://www.henriverdier.com/2014/06/letat-innovateur-radical.html": "Henri Verdier Blog: L'Etat peut-il \u00eatre un innovateur radical ?", + "http://stackoverflow.com/questions/27818856/docker-add-warfile-to-official-tomcat-image": "boot2docker - Docker add warfile to official Tomcat image - Stack Overflow", + "http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/": "The Tetherless World Weblog \u00bb Putting open Facebook data into Linked Data Cloud", + "https://ibm.box.com/s/g72skghoelpd7c9f1swen960mcb3ujpd": "\"IBM Box\" (zone de partage)", + "http://www.the-american-interest.com/article.cfm?piece=1352": "The End of the University as We Know It - Nathan Harden - The American Interest Magazine", + "http://ivan-herman.name/2008/11/14/calais-release-4-and-the-linking-data-cloud/": "Calais Release 4 and the Linking Data cloud\u2026 \u00ab Ivan\u2019s private site", + "http://stackoverflow.com/questions/30571/how-do-i-tell-maven-to-use-the-latest-version-of-a-dependency": "java - How do I tell Maven to use the latest version of a dependency? - Stack Overflow", + "http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros": "Lanceurs d'alerte : coupables ou h\u00e9ros ? ARTE", + "http://www.le-tigre.net/Marc-L.html": "Marc L***", + "http://hsivonen.iki.fi/producing-xml/": "HOWTO Avoid Being Called a Bozo When Producing XML", + "http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/": "RDF, Big Data and The Semantic Web I _Really_ Don't Know", + "http://www.lemondeinformatique.fr/dossiers/lire-business-intelligence-l-avenir-s-annonce-sans-nuages-91.html": "Business Intelligence : L'avenir s'annonce sans nuages - D\u00e9cisionnel - Le Monde Informatique", + "http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html": "The ultimate mashup -- Web services and the semantic Web", + "http://www.openstreetmap.org/": "OpenStreetMap", + "http://ivan-herman.name/2010/05/28/self-documenting-vocabularies-using-rdfa/": "Self-documenting vocabularies using RDFa", + "http://prism-break.org/": "Opt out of PRISM, the NSA\u2019s global data surveillance program - PRISM BREAK", + "http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html": "C2GWeb-js", + "http://wiki.tela-botanica.org/outildeterminationplantes/wakka.php?wiki=SpecificationsProjets": "OutilDeterminationPlantes:SpecificationsProjets", + "http://www.kuro5hin.org/print/2005/8/22/182159/251": "AJAX - beyond the buzzwords", + "http://scikit-learn.org/stable/modules/multiclass.html": "Multiclass and multilabel algorithms \u2014 scikit-learn documentation", + "http://www.michaelrakowitz.com/the-invisible-enemy-should-not-exist": "The invisible enemy should not exist", + "https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-c38210537fe4": "Finding Data Block Nirvana (a journey through the fastai data block API)", + "http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas": "Mining Quality Phrases from Massive Text Corpora (2015)", + "http://co-operating.systems/": "Co-operating Systems", + "http://www.scottbot.net/HIAL/index.html@p=19113.html": "Topic Modeling for Humanists: A Guided Tour", + "http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey": "java - \"Use map instead of class to represent data\" -Rich Hickey", + "http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html": "Topic: Concept Semantics -- Patterns for Working With SKOS and OWL", + "http://mail-archives.apache.org/mod_mbox/jena-users/201306.mbox/%3C51ACA6CF.9000405@apache.org%3E": "Re: sparql performance parameters and limitations", + "http://tslearn.readthedocs.io/en/latest/index.html": "tslearn", + "https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858": "Can we include algorithms/programs in RDF database like PL*SQL in Oracle database?", + "http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl": "Adventures in Zero-Shot Text Classification", + "https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died": "The Day the Dinosaurs Died The New Yorker", + "http://foaf.me/": "FOAF Me", + "http://en.wikipedia.org/wiki/Woman_in_the_Dunes": "The Woman in the Dunes - Wikipedia, the free encyclopedia", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-739553,0.html": "", + "http://m.mgafrica.com/article/2015-01-15-what-they-said-then-and-where-we-are-now#.Vg-LULT5TnS": "What US intelligence predicted about Africa today 15 years ago, and how terribly wrong they were Mail & Guardian Africa (Mobile edition)", + "http://modulaweb.fr/blog/2012/08/we-make-data-porn-les-donnees-sont-toujours-fausses/comment-page-1/#comment-406": "We make data porn : Les donn\u00e9es sont toujours fausses - Modulaweb \u2013 Web Open-Source et modulable \u00e0 Montpellier", + "http://www.schtuff.com": "www.schtuff.com - What is Schtuff?", + "http://www.hippasus.com/resources/mapdelicious": "", + "http://del.icio.us/url/950955a2af0f59f3607cbf9de59edfe2": "Semanlink on del.icio.us", + "http://www.cosmovisions.com/ChronoCroisades02.htm": "Les derni\u00e8res croisades (1217 - 1270)", + "http://animaux.blog.lemonde.fr/2013/09/27/889/": "Poules en batterie: des images de maltraitance censur\u00e9es par la justice Un \u00e9l\u00e9phant dans mon salon", + "http://www.sciam.com/article.cfm?articleID=00048144-10D2-1C70-84A9809EC588EF21&pageNumber=1&catID=2": "Science & Technology at Scientific American.com: The Semantic Web -- A new form of Web content that is meaningful to computers will unleash a revolution of new possibilities", + "http://semanticweb.com/graphs-make-the-world-of-data-go-round_b36195#more-36195": "Graphs Make The World Of Data Go Round - semanticweb.com", + "http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_": "pocketsphinx [Wiki ubuntu-fr]", + "http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html": "Validating RDF data with SHACL - bobdc.blog", + "http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h": "Unsupervised NER using BERT - Hands-on NLP model review - Quora", + "http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/semwebclient/": "Semantic Web Client Library", + "http://i-semantics.tugraz.at/industry-track/bernhard-schandl-cluug": "Bernhard Schandl: Semantically enabled Personal Knowledge Management with cluug.com \u2013 I-SEMANTICS", + "http://djerma.nl/": "Cawyan Zarma Sanni", + "http://charlie.cu.cc/2012/06/how-add-external-libraries-maven/": "How to add external libraries in Maven - Charlie Wu", + "http://worrydream.com/LearnableProgramming/": "Learnable Programming", + "http://www.flickr.com/photos/iks_project/sets/72157630176990928/show/": "IKS Salzburg Workshop June 2012 - pictures", + "http://www.the325project.org/": "The 325 Project", + "http://www.nytimes.com/2006/11/12/business/12web.html": "Entrepreneurs See a Web Guided by Common Sense - New York Times", + "http://www.pbs.org/wnet/secrets/flash/catastrophe2_script.html": "", + "https://dl.dropboxusercontent.com/u/172199972/map/index.html": "Wikidata Map Interface", + "http://www.bisharat.net/Zarma/": "Zarma Dictionnary (Peace Corps / Niger)", + "http://zone47.com/crotos/": "CROTOS", + "https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview": "Passage AI Conversational Chatbot with AI/NLP by Passage AI", + "http://nlp.cs.nyu.edu/sekine/papers/li07.pdf": "A survey of named entity recognition and classification (2006)", + "https://towardsdatascience.com/deep-learning-for-ner-1-public-datasets-and-annotation-methods-8b1ad5e98caf": "Deep Learning for Named Entity Recognition #1: Public Datasets and Annotation Methods", + "http://www.mentalfloss.com/article/54853/our-interview-jeopardy-champion-arthur-chu": "Our Interview With Jeopardy! Champion Arthur Chu Mental Floss", + "https://www.fast.ai/2019/03/06/fastai-swift/": "fast.ai Embracing Swift for Deep Learning \u00b7 fast.ai", + "https://cloud.google.com/blog/products/gcp/google-patents-public-datasets-connecting-public-paid-and-private-patent-data": "Google Patents Public Datasets: connecting public, paid, and private patent data Google Cloud Blog", + "http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMTFSULWFE_0.html": "ESA - Results from Huygens - Highlights of ESA\u2019s Huygens mission", + "http://www.dbin.org/": "DBin Project", + "http://code.google.com/p/pallet/": "pallet - A professionalization of the UMass project \"Mallet\" - Google Project Hosting", + "http://www.bbc.co.uk/news/science-environment-21668712": "BBC News - Herschel space telescope to go blind", + "http://institutoamazoniabrasil.com/ofir.php": "Instituto Amaz\u00f4nia Brasil: Ofir", + "http://www.ics.mq.edu.au/~cassidy/talks/semweb/semweb.html": "", + "http://www.wired.com/2016/03/sadness-beauty-watching-googles-ai-play-go": "The Sadness and Beauty of Watching Google\u2019s AI Play Go WIRED", + "http://www.virtualchaos.co.uk/blog/2008/04/23/www2008-day-2-ldow2008-workshop/": "VirtualChaos - Nadeem\u2019s blog \u00bb WWW2008: Day 2 - LDOW2008 Workshop", + "http://www.mathkang.org/default.html": "le Kangourou des mathematiques", + "http://stackoverflow.com/questions/36136885/swagger-map-of-string-object": "Swagger: map of - Stack Overflow", + "http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest": "XMLHttpRequest & Ajax Based Applications - Links and Resources, Fiftyfoureleven.com", + "https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets": "Measuring patent claim breadth using Google Patents Public Datasets Google Cloud Blog", + "http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr": "A Brief History of Knowledge Graph's Main Ideas: A tutorial, Claudio Gutierrez\u00a0and Juan F. Sequeda", + "http://xmlns.com/foaf/spec/": "FOAF Vocabulary Specification", + "http://www.sheridanprinting.com/14-websci4chRV610jmp/docs/p161.pdf": "Latent Dirichlet Allocation: stability", + "http://www.airtightinteractive.com/projects/related_tag_browser/app/": "", + "http://www.visualdataweb.org/relfinder.php": "RelFinder - Visual Data Web", + "http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit": "Trust, but verify! Better entity linking through automatic verification (2017)", + "https://www.technologyreview.com/s/608911/is-ai-riding-a-one-trick-pony/": "Is AI Riding a One-Trick Pony? - MIT Technology Review", + "http://www.metagraph.org/index.html": "MetaGraph.org", + "http://www.cnrs.fr/inee/communication/breves/b230.html": "Du nouveau sur la f\u00e9condation de la truffe (CNRS)", + "http://json-ld.org/spec/latest/": "JSON-LD specification", + "http://www.foopad.com/account/about": "Foopad :: The Very Simple Wiki", + "https://www.quora.com/How-can-I-use-machine-learning-to-propose-tags-for-content": "How can I use machine learning to propose tags for content? - Quora", + "http://ruder.io/deep-learning-optimization-2017/index.html": "Optimization for Deep Learning Highlights in 2017", + "https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47": "Product Customization as Linked Data SpringerLink (ESWC-2012)", + "http://www.autorepair.eu.com/": "Auto Repair Information in the EU", + "http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu": "Pour \u00e9radiquer la lucilie bouch\u00e8re en Libye, des mouches m\u00e2les st\u00e9riles sont l\u00e2ch\u00e9es vers les femelles dont les larves d\u00e9vorent le b\u00e9tail (1991)", + "http://bigbrowser.blog.lemonde.fr/2016/02/26/les-destructions-de-palmyre-documentees/": "Les destructions de Palmyre document\u00e9es Big Browser", + "http://internetactu.blog.lemonde.fr/2015/06/20/la-batterie-delon-musk-peut-elle-tout-changer/": "La batterie d\u2019Elon Musk peut-elle tout changer ? InternetActu", + "https://www.youtube.com/watch?v=eJYFubZfbfY&list=PL0FF1370EBACD1DAD&index=13": "Silent Night, Holy Cow (Part 1) - YouTube", + "http://mafihe.hu/~bnc/feynman/": "", + "http://www.lemonde.fr/planete/article/2012/05/25/la-croissance-mondiale-va-s-arreter_1707352_3244.html": "\"La croissance mondiale va s'arr\u00eater\"", + "http://www.kdnuggets.com/2015/12/tensor-flow-terrific-deep-learning-library.html": "TensorFlow is Terrific \u2013 A Sober Take on Deep Learning Acceleration", + "http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin": "Mike Tsao puts the fun back in defunct. \u2014 Ten Things I Believe About Bitcoin", + "http://java-monitor.com/forum/showthread.php?t=22": "java.net.BindException: Address already in use:8080 ... uhm, who's listening? - Java-Monitor Forum", + "https://www.nytimes.com/2018/12/05/technology/facebook-emails-privacy-data.html": "Facebook Emails Show Its Real Mission: Making Money and Crushing Competition - The New York Times", + "https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml": "structWSF-Open-Semantic-Framework/framework/solr_schema_v1_2.xml", + "http://lucene.472066.n3.nabble.com/Internationalization-td475088.html": "Solr - User - Internationalization", + "http://joelleguillais.blogspot.com/": "Les ateliers et les publications de la romanci\u00e8re Jo\u00eblle Guillais", + "http://www.plasticbag.org/files/native/": "Native to a Web of Data (Tom Coates, plasticbag.org)", + "http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html": "Cross a border, lose your ebooks - Boing Boing", + "http://www.kanzaki.com/works/2016/pub/image-annotator?u=/works/2017/annot/youtube-norrington-eroica-mov2.json": "Demo of #mediafragments combined with web #annotations on a @YouTube video", + "http://www.la-grange.net/2004/12/08.html#data": "Un web s\u00e9mantique pour l'utilisateur - 2004-12-08 - Carnet Web Karl", + "https://www.coursera.org/course/wh1300?utm_campaign=2013-august-newsletter&utm_date=1377177003&utm_source=newsletter&utm_user=97328&utm_medium=email&utm_recommendation=1&utm_variant=401": "A History of the World since 1300 Coursera", + "http://perso.wanadoo.fr/philippe.boeuf/robert/physique/physiquequest.htm": "Questions de physique", + "http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/": "Rosetta and Philae: Searching for a good signal Rosetta - ESA's comet chaser", + "http://rdfa.info/2010/04/22/facebook-adopts-rdfa/": "RDFa \u00bb Blog Archive \u00bb Facebook adopts RDFa", + "http://news.softpedia.com/news/Nanogenerator-Could-Draw-Energy-from-Human-Blood-60580.shtml": "Nanogenerator Could Draw Energy from Human Blood - Unlike the mythical vampire, it will help people - Softpedia", + "http://www.chant-orthoptere.com/": "Chant Orthopt\u00e8res", + "http://www-128.ibm.com/developerworks/java/library/j-jena/?ca=dgr-jw766j-jena": "Introduction to Jena", + "http://nicodjimenez.github.io/2017/10/08/tensorflow.html": "Tensorflow sucks", + "http://www.hrw.org/reports/1999/rwanda/index.htm#TopOfPage": "Leave None to Tell the Story: Genocide in Rwanda (Human Rights Watch Report, March 1999)", + "http://www.politico.com/magazine/story/2017/04/23/what-a-1973-french-novel-tells-us-about-marine-le-pen-steve-bannon-and-the-rise-of-the-populist-right-215064": "What a 1973 French Novel Tells Us About Marine Le Pen, Steve Bannon and the Rise of the Populist Right - POLITICO Magazine", + "https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html": "Google AI Blog: Google at EMNLP 2018", + "http://webmasters.stackexchange.com/questions/37721/does-a-303-status-code-pass-page-rank": "seo - Does a 303 status code pass page rank? - Webmasters Stack Exchange", + "http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs": "DrupalCon Munich highlights and Backbone.js Drupal motion", + "http://business-intelligence.developpez.com/cours/": "Tutoriels Business Intelligence", + "http://vocamp.org/mw/index.php?title=HypiosVoCampParisMay2010": "HypiosVoCampParisMay2010", + "http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/": "Les Islandais se mobilisent pour accueillir davantage de r\u00e9fugi\u00e9s Big Browser", + "http://hyperfp.blogspot.com/": "Blog d'hyperfp sur blogger", + "http://pinboard.in/": "Pinboard - antisocial bookmarking", + "http://news.independent.co.uk/world/asia/article1222214.ece": "A meeting of civilisations: The mystery of China's celtic mummies", + "http://www-128.ibm.com/developerworks/java/library/j-thread.html": "Writing multithreaded Java applications", + "https://www.datacamp.com/community/tutorials/machine-learning-python": "Python Machine Learning: Scikit-Learn Tutorial (Article)", + "https://en.wikipedia.org/wiki/Neural_backpropagation": "Neural backpropagation - Wikipedia, the free encyclopedia", + "https://jersey.java.net/documentation/latest/index.html": "Jersey User Guide", + "http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or": "Nick Grossman's Slow Hunch \u2014 Should we regulate the Internet the real world way or the real world the Internet way?", + "http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/": "RDFa format data: what is its impact and when should you implement?", + "http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html": "Michel Serres : \"Cette campagne pr\u00e9sidentielle est une campagne de vieux p\u00e9p\u00e9s !\"", + "http://youtube.com/ucberkeley": "UC Berkeley to post full lectures to YouTube", + "http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ": "Le Niger appel\u00e9 \u00e0 \u00e9lire le successeur de Mahamadou Issoufou", + "http://hypermedia.research.glam.ac.uk/kos/STAR/": "Semantic Technologies for Archaeological Resources", + "http://scratch.mit.edu/": "Scratch Home imagine, program, share", + "http://www.interleaves.org/~rteeter/rss.html": "RSS: What it is, Where to get it, How to make it, How to use it", + "http://www.les-ernest.fr/lionel_zinsou": "Une vision optimiste de l'Afrique Les Ernest", + "http://esw.w3.org/mt/esw/archives/000048.html": "SWAD-Europe Weblog: FAQ: Using RDFS or OWL as a schema language for validating RDF", + "http://gears.google.com/": "Gears", + "https://medium.com/data-from-the-trenches/training-cutting-edge-neural-networks-with-tensor2tensor-and-10-lines-of-code-10973c030b8": "Training Cutting-Edge Neural Networks with Tensor2Tensor and 10 lines of code", + "http://www.arduino.cc/": "Arduino - HomePage", + "http://www.wired.com/wiredscience/2014/01/bells-theorem/": "The Experiment That Forever Changed How We Think About Reality - Wired Science", + "http://www.nltk.org/book/": "NLTK Book", + "http://www.vivevenezuela.com/foto_gran_sabana.htm": "Venezuela foto Tepuy della Gran Sabana", + "http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf": "Online Generation of Locality Sensitive Hash Signatures", + "http://www.nytimes.com/2008/03/18/us/politics/18text-obama.html": "Barack Obama\u2019s Speech on Race", + "http://machinelearning.wustl.edu/mlpapers/paper_files/LodhiSSCW02.pdf": "Text classification using string kernels", + "http://seevl.net/": "reinventing music discovery - seevl.net", + "http://jersey.java.net/nonav/documentation/latest/user-guide.html": "Jersey User Guide", + "http://weblog.burningbird.net/archives/2005/08/27/photos-flickr-and-back-doors/": "Burningbird \u00bb Photos, Flickr, and Back Doors", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-709190,0.html": "", + "http://web.resource.org/rss/1.0/": "", + "http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213": "LinkedIn Labs InMaps - Fran\u00e7ois-Paul Servant's professional network", + "http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an": "nltk - hierarchical classification + topic model training data for internet articles and social media - Stack Overflow", + "http://tools.ietf.org/html/rfc6570": "RFC 6570 - URI Template", + "http://www.myspace.com/crianca": "www.myspace.com/crianca", + "https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html": "Precious Gems Bear Messages From Earth\u2019s Molten Heart - The New York Times", + "http://mspace.ecs.soton.ac.uk": "", + "http://www.sofakolle.com": "www.sofakolle.com", + "http://droppages.com/": "DropPages.com", + "http://sameas.org/": "sameas.org", + "http://www.nytimes.com/2005/11/26/international/asia/26china.html?ex=1290661200&en=8031ed40f8a9bad5&ei=5088&partner=rssnyt&emc=rss": "", + "http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel": "Quand l\u2019intelligence artificielle devient une alli\u00e9e des arch\u00e9ologues", + "http://www.palais-decouverte.fr/index.php?id=858": "Th\u00e9or\u00e8me de Pythagore, 6 d\u00e9monstrations", + "http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends": "ACL 2019: Highlights and Trends - Maria Khvalchik - Medium", + "http://www.gnowsis.org": "", + "http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html": "A Djibouti, \u00ab\u00a0la Chine commence \u00e0 d\u00e9chanter\u00a0\u00bb", + "http://code.google.com/p/owl1-1/wiki/P2NotesOwled2007": "P2NotesOwled2007 - owl1-1 - Google Code", + "http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html": "jQuery BBQ: Back Button & Query Library", + "http://bricolage.io/hosting-static-sites-with-docker-and-nginx/": "Hosting static sites with Docker and Nginx Kyle Mathews", + "https://medium.com/an-american-developer-in-paris/a-year-in-paris-a235519a486b#.z4vgwhfkn": "A Year in Paris \u2014 An American Developer in Paris \u2014 Medium", + "http://edutechwiki.unige.ch/en/Latent_semantic_analysis_and_indexing": "Latent semantic analysis and indexing - EduTech Wiki", + "http://www.azurs.net/mercredi/": "", + "http://eprints.ecs.soton.ac.uk/10359/01/GDP04-mspaceReport.pdf": "", + "http://www.summly.com/en/introduction.html": "summly", + "http://schemapedia.com/schemas/coo": "Car Options Ontology at Schemapedia", + "http://www.w3.org/wiki/WebID": "WebID - W3C Wiki", + "http://www-128.ibm.com/developerworks/java/library/j-threads2.html": "Threading lightly, Part 2: Reducing contention", + "http://www.manageability.org/blog/stuff/soap-is-dead": "SOAP is Comatose But Not Officially Dead!", + "http://www.rap.prd.fr/pdf/technologie_streaming.pdf": "Quelques mots sur la technologie de streaming", + "http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_": "Speech to Text - IBM Cloud API Docs", + "http://www.forbes.com/sites/danielnyegriffiths/2013/01/12/aaron-swartz-uncompromising-reckless-and-delightful/": "Open Data Activist Aaron Swartz Dies Aged 26: 'Uncompromising, Reckless and Delightful' - Forbes", + "http://www.hydra-cg.com/spec/latest/linked-data-fragments/": "Linked Data Fragments", + "http://uk.businessinsider.com/dao-hacked-ethereum-crashing-in-value-tens-of-millions-allegedly-stolen-2016-6": "DAO hacked, Ethereum crashing in value - Business Insider", + "http://www.slate.com/blogs/the_spot/2016/06/01/how_to_fix_the_penalty_shootout_play_it_before_extra_time.html": "How to fix the penalty shootout: play it before extra time.", + "https://fr.wikipedia.org/wiki/Breakthrough_Starshot": "Breakthrough Starshot", + "http://www.memo.fr": "", + "http://www.nextapp.com/products/echo2/": "NextApp . Echo2", + "http://jena.sourceforge.net/ARQ/documentation.html": "ARQ - Documentation and Resources", + "http://www.pbs.org/cringely/pulpit/pulpit20060105.html": "PBS I, Cringely . January 5, 2006 - A Commercial Runs Through It", + "http://mobile.nytimes.com/2013/07/09/science/what-is-nostalgia-good-for-quite-a-bit-research-shows.html?pagewanted=all&_r=0": "What Is Nostalgia Good For? Quite a Bit, Research Shows - NYTimes.com", + "http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm": "BBC - Earth News - Ant mega-colony takes over world", + "http://developer.apple.com/internet/webcontent/objectdetection.html": "Object Detection", + "http://autourduciel.blog.lemonde.fr/2014/07/17/regardez-la-rotation-dun-noyau-cometaire-double/": "Regardez la rotation d\u2019un noyau com\u00e9taire double Autour du Ciel", + "https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/": "Les saboteurs de la centrale de Doel toujours \u00e0 l\u2019int\u00e9rieur\u2009? - Greenpeace France", + "http://www.betaversion.org/~stefano/": "Stefano Mazzocchi's Home Page", + "http://www.ledevoir.com/2003/06/30/30862.html": "\u00abTuez-les tous, Dieu reconna\u00eetra les siens.\u00bb - Arnaud Amaury \u00abTuez-les tous, Dieu reconna\u00eetra les siens.\u00bb", + "http://www.sciencemag.org/site/extra/crispr/": "The CRISPR Revolution", + "http://www.ibm.com/developerworks/java/library/j-solr2/": "Search smarter with Apache Solr, Part 2: Solr for the enterprise", + "http://www-128.ibm.com/developerworks/library/j-sparql/": "SPARQL tips: using the GRAPH keyword", + "http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/": "XML processing in Ajax, Part 2: Two Ajax and XSLT approaches", + "http://www.miss-safia.com/": "MISS SAFIA - LA VOIX EN OR DU NIGER", + "http://www.zyvex.com/nanotech/feynman.html": "Feynman: there's plenty of room at the bottom", + "https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/": "I, Cringely Prediction #4 \u2014 Bitcoin stays crazy until traders learn it is not a currency", + "http://comments.gmane.org/gmane.comp.apache.jena.user/5473": "controlling JSON-LD output, Apache Jena main user support list", + "http://www.googletutor.com/2005/04/15/voyeur-heaven/": "Voyeur Heaven: finding interesting video, sound and image files in unprotected directories", + "http://www.ivy.fr/revealicious/": "", + "http://webcomponents.org/": "WebComponents.org", + "http://www.xml.com/pub/a/ws/2004/05/26/binding.html": "webservices.xml.com: Schema Binding for Java Web Services", + "http://internetalchemy.org/2005/05/tinky-and-skos": "Internet Alchemy Tinky and SKOS", + "https://www.braincreators.com/2018/06/memory-networks/": "Memory networks and why they're a good Idea - BrainCreators", + "http://www.ibm.com/developerworks/java/library/j-jwebunit/": "Create test cases for Web applications", + "http://www.tecmint.com/open-source-artificial-intelligence-tools-softwares-linux/": "10 Top Open Source Artificial Intelligence Tools for Linux", + "http://erikbenson.typepad.com/mu/2005/02/using_bloglines.html": "Using Bloglines to manage my online presence", + "http://www.emusic.com": "", + "http://economist.com/science/PrinterFriendly.cfm?Story_ID=4316021": "The self-driving car comes closer\u2014but difficulties remain", + "http://www.theregister.co.uk/2005/05/11/open_access_research/print.html": "Dutch academics declare research free-for-all - The Register", + "http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm": "Architectural Styles and the Design of Network-based Software Architectures - Roy Thomas Fielding", + "http://www.gchagnon.fr/cours/dhtml/evenements.html": "Gestion des \u00e9v\u00e9nements et DOM", + "http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/": "La finance peut-elle sauver la plan\u00e8te ? - Eco(lo) - Blog LeMonde.fr", + "https://github.com/datquocnguyen/LFTM": "datquocnguyen/LFTM: Improving Topic Models with Latent Feature Word Representations (GitHub)", + "https://medium.com/@joeDiHare/deep-bayesian-neural-networks-952763a9537": "Deep Bayesian Neural Networks. \u2013 Stefano Cosentino \u2013 Medium", + "http://esw.w3.org/topic/LargeTripleStores": "LargeTripleStores - ESW Wiki", + "http://dannyayers.com/archives/2005/11/10/semantic-web-challenge-winners/": "Danny Ayers, Raw Blog : \u00bb Semantic Web Challenge Winners", + "http://www.sitepoint.com/blogs/2008/03/16/twines-dual-personality/": "SitePoint Blogs \u00bb Twine\u2019s dual personality", + "http://www.webforefront.com/archives/2005/05/rest_-_represen.html": "Web Forefront: The Web Services debate : SOAP vs. REST [ XML/HTTP ]", + "http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html": "Mike's Site: Maven's WAR Overlays: How to manage dependencies", + "http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4836752.stm": "BBC NEWS - 2006 mathematics prize announced", + "http://semanticweb.com/semtechbiz-berlin-day-1_b26503": "#SemTechBiz Berlin \u2013 Day 1 - semanticweb.com", + "http://www.service-public.fr/accueil/attestation_accueil_etranger.html": "Modification des conditions de d\u00e9livrance de l'attestation d'accueil (26/11/04) - Service-public.fr -", + "http://nlp.town/blog/sentence-similarity/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News": "Comparing Sentence Similarity Methods", + "http://blog.dejavu.sk/2013/11/19/registering-resources-and-providers-in-jersey-2/": "Registering Resources and Providers in Jersey 2 tl;dr", + "https://www.quora.com/What-are-the-best-open-source-tools-for-unsupervised-clustering-of-text-documents": "What are the best open source tools for unsupervised clustering of text documents? - Quora", + "http://semanticweb.com/web-components-even-better-semantic-markup_b45129": "Web Components: Even Better With Semantic Markup - Semanticweb.com", + "http://www.informit.com/articles/article.aspx?p=759232": "InformIT: Organizing Your Eclipse Web Tools Platform Development Project > Web Project Types and J2EE Applications", + "http://any23.apache.org/xref/org/apache/any23/extractor/html/TurtleHTMLExtractor.html": "TurtleHTMLExtractor xref", + "http://www.w3.org/2001/tag/issues.html#httpRange-14": "TAG Issues List - httpRange-14: What is the range of the HTTP dereference function?", + "http://internetactu.blog.lemonde.fr/2013/08/30/ou-en-est-lopen-data/": "O\u00f9 en est l\u2019Open Data ? InternetActu", + "http://swirlstats.com/": "swirl: Learn R, in R.", + "http://storynory.com/": "Storynory: Free Audio Stories for Kids", + "http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-702731,0.html": "", + "http://technorati.com/help/tags.html": "", + "http://ajaxpatterns.org/": "" +} \ No newline at end of file diff --git a/ckb/datasets/semanlink/questions.csv b/ckb/datasets/semanlink/questions.csv deleted file mode 100644 index 548e6a7..0000000 --- a/ckb/datasets/semanlink/questions.csv +++ /dev/null @@ -1,3158 +0,0 @@ -- Siamese network with two deep sub-models - Projects input and candidate texts into embedding space - Trained by maximizing cosine similarity between correct input-output pairs [source](/doc/2019/08/neural_models_for_information_r)|has_question|How many deep sub-models do Siamese ne have? -How many deep sub-models do Siamese ne have?|has_answer|two -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What raises the challenge of efficient searching in dense, high-dimensional vector spaces? -What raises the challenge of efficient searching in dense, high-dimensional vector spaces?|has_answer|semantic vector space modeling -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What type of applications would we want to meet with the scalability and robustness of Elasticsearch? -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What is addressed in this paper? -What is addressed in this paper?|has_answer|research challenge -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|The paper describes novel ways of encoding dense vectors into text documents, allowing the use of what? -The paper describes novel ways of encoding dense vectors into text documents, allowing the use of what?|has_answer|traditional inverted index engines -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What does VSM stand for? -What does VSM stand for?|has_answer|vector space modeling -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What is proposed to vector similarity searching' over dense semantic representations of words and documents? -What is proposed to vector similarity searching' over dense semantic representations of words and documents?|has_answer|a novel approach -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What does the novel approach to vector similarity searching' do? -What does the novel approach to vector similarity searching' do?|has_answer|allows the indexing and querying of dense vectors in text domains -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What does the indexing and querying of dense vectors in text domains open up? -What does the indexing and querying of dense vectors in text domains open up?|has_answer|efficiency gains -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What is the end result of the novel approach? -What is the end result of the novel approach?|has_answer|a fast and scalable vector database -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|has_question|What task does the solution demonstrate its performance and quality? -What task does the solution demonstrate its performance and quality?|has_answer|semantic searching over a dense vector representation of the entire English Wikipedia -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|has_question|What type of feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing -What type of feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing|has_answer|shallow -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|has_question|What are resource-constrained environments like? -What are resource-constrained environments like?|has_answer|mobile phones -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|has_question|What is an example of a resource-constrained environment? -What is an example of a resource-constrained environment?|has_answer|mobile phones -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|What is RotatE? -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|Learning representations of entities and relations in knowledge graphs for predicting what? -Learning representations of entities and relations in knowledge graphs for predicting what?|has_answer|missing links -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|The success of learning representations of entities and relations in knowledge graphs heavily relies on what? -The success of learning representations of entities and relations in knowledge graphs heavily relies on what?|has_answer|ability of modeling and inferring the patterns of (or between) the relations -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|What is the new approach for knowledge graph embedding called? -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|What defines each relation as a rotation from the source entity to the target entity in the complex vector space? -What defines each relation as a rotation from the source entity to the target entity in the complex vector space?|has_answer|RotatE model -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|What is proposed for efficiently and effectively training the RotatE model? -What is proposed for efficiently and effectively training the RotatE model?|has_answer|self-adversarial negative sampling technique -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|has_question|What model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the -What model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the|has_answer|RotatE model -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|What are the regions responsible for each step in the model? -What are the regions responsible for each step in the model?|has_answer|POS tagging, parsing, NER, semantic roles, then coreference -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|What type of analysis reveals that the model can and often does adjust this pipeline dynamically? -What type of analysis reveals that the model can and often does adjust this pipeline dynamically?|has_answer|Qualitative -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|What has rapidly advanced the state of the art on many NLP tasks? -What has rapidly advanced the state of the art on many NLP tasks?|has_answer|Pre-trained text encoders -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|What does BERT aim to do? -What does BERT aim to do?|has_answer|quantify where linguistic information is captured within the network -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|How does the BERT model represent the steps of the traditional NLP pipeline? -How does the BERT model represent the steps of the traditional NLP pipeline?|has_answer|interpretable and localizable -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|has_question|Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising what on the basis of disambiguating information from -Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising what on the basis of disambiguating information from|has_answer|lower-level decisions -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|What is the name of the problem of multilingual masked language modeling? -What is the name of the problem of multilingual masked language modeling?|has_answer|Emerging Cross-lingual Structure in Pretrained Language Models -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|What is the training of a single model on? -What is the training of a single model on?|has_answer|concatenated text from multiple languages -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|Transfer is possible even when what is not shared across the monolingual corpora and also when the text comes from very different domains? -Transfer is possible even when what is not shared across the monolingual corpora and also when the text comes from very different domains?|has_answer|there is no shared vocabulary -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|What is required in the top layers of the multi-lingual encoder? -What is required in the top layers of the multi-lingual encoder?|has_answer|some shared parameters -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|What is found in the learned embedding spaces? -What is found in the learned embedding spaces?|has_answer|universal latent symmetries -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|has_question|For what type of modeling are universal latent symmetries automatically discovered and aligned during the joint training process? -For what type of modeling are universal latent symmetries automatically discovered and aligned during the joint training process?|has_answer|multilingual masked language modeling -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|What is a simple and efficient baseline for text classification? -What is a simple and efficient baseline for text classification?|has_answer|Bag of Tricks for Efficient Text Classification -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|What can be averaged together to form good sentence representations? -What can be averaged together to form good sentence representations?|has_answer|word features -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|FastText is often on par with deep learning classifiers in terms of what? -FastText is often on par with deep learning classifiers in terms of what?|has_answer|accuracy -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|How many words can we train fastText on in less than ten minutes using a standard multicoreCPU? -How many words can we train fastText on in less than ten minutes using a standard multicoreCPU?|has_answer|more than one billion -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|What does this paper explore for text classification? -What does this paper explore for text classification?|has_answer|simple and efficient baseline -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|has_question|Our experiments show that fastText is often on par with deep learning classifiers in terms of what? -Our experiments show that fastText is often on par with deep learning classifiers in terms of what?|has_answer|accuracy -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|has_question|What is used to Evaluate Semantic Similarity in a Taxonomy? -What is used to Evaluate Semantic Similarity in a Taxonomy?|has_answer|Information Content -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|has_question|What is the traditional edge counting approach? -What is the traditional edge counting approach?|has_answer|r = 0.66 -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What do many existing machine learning algorithms depend on to generate a good model? -What do many existing machine learning algorithms depend on to generate a good model?|has_answer|the quality of the input characteristics -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What is used to produce feature sets that are more compact and higher level? -What is used to produce feature sets that are more compact and higher level?|has_answer|feature fusion techniques -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What has been developed to fuse original variables for producing new ones? -What has been developed to fuse original variables for producing new ones?|has_answer|plethora of procedures -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What are the most basic procedures to fuse original variables for producing new ones? -What are the most basic procedures to fuse original variables for producing new ones?|has_answer|linear combinations of the original variables -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What is an alternative to manifold learning for conducting nonlinear feature fusion? -What is an alternative to manifold learning for conducting nonlinear feature fusion?|has_answer|autoencoders -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|How many AE models have been proposed lately? -How many AE models have been proposed lately?|has_answer|Dozens -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What are autoencoders designed with other applications in mind? -What are autoencoders designed with other applications in mind?|has_answer|AEs -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What kind of view of what an AE is? -What kind of view of what an AE is?|has_answer|broad -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|What is provided on how to choose the proper AE for a given task? -What is provided on how to choose the proper AE for a given task?|has_answer|didactic guidelines -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|has_question|How many case studies illustrate the use of AEs with datasets of handwritten digits and breast cancer? -How many case studies illustrate the use of AEs with datasets of handwritten digits and breast cancer?|has_answer|two -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|What is a large-scale life-long memory module for use in deep learning? -What is a large-scale life-long memory module for use in deep learning?|has_answer|Learning to Remember Rare Events -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Learning to Remember Rare Events exploits fast nearest-neighbor algorithms for what? -Learning to Remember Rare Events exploits fast nearest-neighbor algorithms for what?|has_answer|efficiency -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Learning to Remember Rare Events is fully differentiable and trained end-to-end with no extra supervision except for what? -Learning to Remember Rare Events is fully differentiable and trained end-to-end with no extra supervision except for what?|has_answer|the nearest-neighbor query -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Learning to Remember Rare Events operates in what manner? -Learning to Remember Rare Events operates in what manner?|has_answer|life-long -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|What is limited when it comes to life-long and one-shot learning? -What is limited when it comes to life-long and one-shot learning?|has_answer|memory-augmented deep neural networks -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Learning to Remember Rare Events is a large-scale what? -Learning to Remember Rare Events is a large-scale what?|has_answer|life-long memory module -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Why does Learning to Remember Rare Events use fast nearest-neighbor algorithms? -Why does Learning to Remember Rare Events use fast nearest-neighbor algorithms?|has_answer|efficiency -Learning to Remember Rare Events is fully differentiable and trained end-to-end with no extra supervision except for what?|has_answer|nearest-neighbor query -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Our memory module can be easily added to any part of what? -Our memory module can be easily added to any part of what?|has_answer|supervised neural network -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|Learning to Remember Rare Events can be easily added to any part of a supervised neural network, from simple convolutional ones tested on what? -Learning to Remember Rare Events can be easily added to any part of a supervised neural network, from simple convolutional ones tested on what?|has_answer|image classification -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|In all cases, the enhanced network gains the ability to remember and do what? -In all cases, the enhanced network gains the ability to remember and do what?|has_answer|life-long one-shot learning -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|What does Learning to Remember Rare Events remember? -What does Learning to Remember Rare Events remember?|has_answer|training examples -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|has_question|On what dataset did we set new state-of-the-art for one-shot learning? -On what dataset did we set new state-of-the-art for one-shot learning?|has_answer|Omniglot -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|has_question|What study shows how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude -What study shows how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude|has_answer|Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|has_question|What is a new weak supervision management system for this setting? -What is a new weak supervision management system for this setting?|has_answer|Snorkel DryBell -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|has_question|What is one of the most costly bottlenecks in developing machine learning-based applications? -What is one of the most costly bottlenecks in developing machine learning-based applications?|has_answer|Labeling training data -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|has_question|What framework does Snorkel DryBell build on? -What framework does Snorkel DryBell build on?|has_answer|Snorkel framework -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|has_question|How many hand-labeled examples does Snorkel DryBell create classifiers with? -How many hand-labeled examples does Snorkel DryBell create classifiers with?|has_answer|tens of thousands -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|has_question|What is the use-case ex: NER when the target data contains new categories? -What is the use-case ex: NER when the target data contains new categories?|has_answer|Transfer Learning for Sequence Labeling -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|has_question|What does TL stand for? -What does TL stand for?|has_answer|transfer learning -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|has_question|What is the solution to transfer learning for sequence labeling? -What is the solution to transfer learning for sequence labeling?|has_answer|adding new neurons in the output layer of the target model and transferring parameters from the source model -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|has_question|What do we propose a neural adapter to learn? -What do we propose a neural adapter to learn?|has_answer|the difference between the source and the target label distribution -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|has_question|Our experiments on what show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories? -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|has_question|Neural Architectures for Named Entity Recognition Neural architectures that use no language-specific resources or features beyond a small amount of what? -Neural Architectures for Named Entity Recognition Neural architectures that use no language-specific resources or features beyond a small amount of what?|has_answer|supervised training data and unlabeled corpora -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|has_question|What are the two sources of information that our models rely on? -What are the two sources of information that our models rely on?|has_answer|character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|has_question|What is the transition-based approach of the new neural architectures? -What is the transition-based approach of the new neural architectures?|has_answer|shift-reduce parsers -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|has_question|Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as what -Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as what|has_answer|gazetteers -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What is imposed by the embedding space geometry? -What is imposed by the embedding space geometry?|has_answer|inductive bias -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What is a weighted graph with? -What is a weighted graph with?|has_answer|shortest path distance -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What can a weighted graph model with a proper configuration of edges and weights? -What can a weighted graph model with a proper configuration of edges and weights?|has_answer|arbitrary geometry -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What is the name of the method that learns a weighted graph representation of data end-to-end by gradient descent? -What is the name of the method that learns a weighted graph representation of data end-to-end by gradient descent?|has_answer|PRODIGE -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What is a key ingredient to the success of modern machine learning? -What is a key ingredient to the success of modern machine learning?|has_answer|Learning useful representations -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|Currently, representation learning mostly relies on what? -Currently, representation learning mostly relies on what?|has_answer|embedding data into Euclidean space -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What is better modeled by non-euclidean metric spaces? -What is better modeled by non-euclidean metric spaces?|has_answer|data in some domains -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|In this paper, we aim to eliminate what imposed by the embedding space geometry? -In this paper, we aim to eliminate what imposed by the embedding space geometry?|has_answer|inductive bias -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What do we propose to map data into more general non-vector metric spaces? -What do we propose to map data into more general non-vector metric spaces?|has_answer|a weighted graph with a shortest path distance -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|What makes PRODIGE more powerful than existing embedding-based approaches? -What makes PRODIGE more powerful than existing embedding-based approaches?|has_answer|Greater generality and fewer model assumptions -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|has_question|How do we confirm the superiority of PRODIGE? -How do we confirm the superiority of PRODIGE?|has_answer|extensive experiments -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|Named Entity Resolution Employs lexicons as part of the word embedding training? -Named Entity Resolution Employs lexicons as part of the word embedding training?|has_answer|Lexicon Infused Phrase Embeddings -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|How can the skip-gram model be trained to predict not only neighboring words but also lexicon membership? -How can the skip-gram model be trained to predict not only neighboring words but also lexicon membership?|has_answer|plug phrase embeddings into an existing log-linear CRF System -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|What do most state-of-the-art approaches for named-entity recognition use? -What do most state-of-the-art approaches for named-entity recognition use?|has_answer|semi supervised information -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|What are highly informative vector representations for words called? -What are highly informative vector representations for words called?|has_answer|word embeddings -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|How many contributions do we present in this paper? -How many contributions do we present in this paper?|has_answer|two -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|has_question|What is the F1 score of our system on the test set for CoNLL 2003? -What is the F1 score of our system on the test set for CoNLL 2003?|has_answer|90.90 -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What is the formation of a low-dimensional combination of a few concepts constituting a conscious thought? -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What is an additional mechanism describing what mind chooses to focus on? -What is an additional mechanism describing what mind chooses to focus on?|has_answer|Attention -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What is proposed for learning representations of high-level concepts of the kind we manipulate with language? -What is proposed for learning representations of high-level concepts of the kind we manipulate with language?|has_answer|A new prior -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What do priors help disentangle from each other? -What do priors help disentangle from each other?|has_answer|abstract factors -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|The Consciousness Prior consciousness is inspired by what? -The Consciousness Prior consciousness is inspired by what?|has_answer|cognitive neuroscience theories of consciousness -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|The set of recently selected elements one becomes aware of is seen as forming what? -The set of recently selected elements one becomes aware of is seen as forming what?|has_answer|low-dimensional conscious state -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What is a conscious state combining the few concepts constituting a conscious thought? -What is a conscious state combining the few concepts constituting a conscious thought?|has_answer|what one is immediately conscious of at a particular moment -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What constraint corresponds to assumptions about the joint distribution between high-level concepts? -What constraint corresponds to assumptions about the joint distribution between high-level concepts?|has_answer|architectural and information-processing -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|The Consciousness Prior can form a useful prior for what? -The Consciousness Prior can form a useful prior for what?|has_answer|representation learning -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|A low-dimensional thought or conscious state is analogous to what? -A low-dimensional thought or conscious state is analogous to what?|has_answer|a sentence -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|What is the joint distribution over high-level concepts that has the form of? -What is the joint distribution over high-level concepts that has the form of?|has_answer|sparse factor graph -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|has_question|The consciousness prior makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to what? -The consciousness prior makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to what?|has_answer|facts and rules -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What is a structured memory that can be easily integrated into a neural network? -What is a structured memory that can be easily integrated into a neural network?|has_answer|Large Memory Layers with Product Keys -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What is the size of Large Memory Layers with Product Keys? -What is the size of Large Memory Layers with Product Keys?|has_answer|very large -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|Large Memory Layers with Product Keys' design and access pattern is based on what? -Large Memory Layers with Product Keys' design and access pattern is based on what?|has_answer|product keys -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What is the ability of Large Memory Layers with Product Keys? -What is the ability of Large Memory Layers with Product Keys?|has_answer|increase the number of parameters -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What can increase model capacity for a negligible computational cost? -What can increase model capacity for a negligible computational cost?|has_answer|key-value memory layer -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|How many layers outperforms a 24-layer transformer? -How many layers outperforms a 24-layer transformer?|has_answer|12 -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What can be easily integrated into a neural network? -What can be easily integrated into a neural network?|has_answer|a structured memory -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|How large is the memory by design? -How large is the memory by design?|has_answer|very large -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What is the design and access pattern of Large Memory Layers with Product Keys based on? -What is the design and access pattern of Large Memory Layers with Product Keys based on?|has_answer|product keys -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|What is the ability of Large Memory Layers with Product Keys to do while keeping the same computational budget? -What is the ability of Large Memory Layers with Product Keys to do while keeping the same computational budget?|has_answer|increase the number of parameters -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|This memory layer allows us to tackle what? -This memory layer allows us to tackle what?|has_answer|very large scale language modeling tasks -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|How many words can a dataset contain? -How many words can a dataset contain?|has_answer|30 billion -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|How much faster does a memory augmented model outperform a baseline transformer model with 24 layers? -How much faster does a memory augmented model outperform a baseline transformer model with 24 layers?|has_answer|twice faster -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|has_question|Why do we release our code? -Why do we release our code?|has_answer|reproducibility purposes -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|has_question|What remain vague between the different algorithms? -What remain vague between the different algorithms?|has_answer|qualitative differences -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|has_question|What account for a significant performance gap among algorithms? -What account for a significant performance gap among algorithms?|has_answer|a particular feature set -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|has_question|What is an example of a traditional alignment algorithm? -What is an example of a traditional alignment algorithm?|has_answer|IBM Model-1 -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|has_question|What do different algorithmic approaches for utilizing the sentence ID feature space result in? -What do different algorithmic approaches for utilizing the sentence ID feature space result in?|has_answer|similar performance -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|has_question|What parallels does this paper draw between the embedding and alignment literature? -What parallels does this paper draw between the embedding and alignment literature?|has_answer|empirical and theoretical -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|What does A Survey and Perspective review the state-of-the-art on the use of GNNs as a model of neural-sy -What does A Survey and Perspective review the state-of-the-art on the use of GNNs as a model of neural-sy|has_answer|Graph Neural Networks -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|Neural-symbolic computing has become the subject of interest of what? -Neural-symbolic computing has become the subject of interest of what?|has_answer|academic and industry research laboratories -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|Where have Graph Neural Networks been widely used? -Where have Graph Neural Networks been widely used?|has_answer|relational and symbolic domains -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|What does the need for improved explainability, interpretability and trust of AI systems in general require? -What does the need for improved explainability, interpretability and trust of AI systems in general require?|has_answer|principled methodologies -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|What do we review on the use of GNNs as a model of neural-symbolic computing? -What do we review on the use of GNNs as a model of neural-symbolic computing?|has_answer|state-of-the-art -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|has_question|In what domains does this paper review the state-of-the-art on the use of GNNs as a model of neural-s -In what domains does this paper review the state-of-the-art on the use of GNNs as a model of neural-s|has_answer|several domains -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|State-of-the-art methods for learning cross-lingual word embeddings have relied on what? -State-of-the-art methods for learning cross-lingual word embeddings have relied on what?|has_answer|bilingual dictionaries or parallel corpora -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|Recent studies showed that the need for parallel data supervision can be alleviated with what? -Recent studies showed that the need for parallel data supervision can be alleviated with what?|has_answer|character-level information -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|How do these methods compare with their supervised counterparts? -How do these methods compare with their supervised counterparts?|has_answer|not on par -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|How can we build a bilingual dictionary between two languages without parallel corpora? -How can we build a bilingual dictionary between two languages without parallel corpora?|has_answer|by aligning monolingual word embedding spaces in an unsupervised way -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|Without what does our model outperform existing supervised methods on cross-lingual tasks for some language pairs? -Without what does our model outperform existing supervised methods on cross-lingual tasks for some language pairs?|has_answer|character information -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|What languages do our experiments demonstrate that our method works very well for? -What languages do our experiments demonstrate that our method works very well for?|has_answer|English-Russian or English-Chinese -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|What low-resource language pair does our model work on? -What low-resource language pair does our model work on?|has_answer|English-Esperanto -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|has_question|Our code, embeddings and dictionaries are what? -Our code, embeddings and dictionaries are what?|has_answer|publicly available -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What has BERT learned about argument comprehension? -What has BERT learned about argument comprehension?|has_answer|Probing Neural Network Comprehension of Natural Language Arguments -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What is BERT's peak performance on the Argument Reasoning Comprehension Task? -What is BERT's peak performance on the Argument Reasoning Comprehension Task?|has_answer|77% -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What is BERT's peak performance entirely accounted for by? -What is BERT's peak performance entirely accounted for by?|has_answer|exploitation of spurious statistical cues -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What do we demonstrate about spurious statistical cues in the dataset? -What do we demonstrate about spurious statistical cues in the dataset?|has_answer|a range of models all exploit them -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What does this analysis inform the construction of? -What does this analysis inform the construction of?|has_answer|an adversarial dataset -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|has_question|What provides a more robust assessment of argument comprehension? -What provides a more robust assessment of argument comprehension?|has_answer|Our adversarial dataset -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|What has revolutionized many machine learning tasks in recent years? -What has revolutionized many machine learning tasks in recent years?|has_answer|Deep learning -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|Deep learning tasks are typically represented in what space? -Deep learning tasks are typically represented in what space?|has_answer|Euclidean -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|Data generated from non-Euclidean domains are represented as what? -Data generated from non-Euclidean domains are represented as what?|has_answer|graphs -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|What has imposed significant challenges on existing machine learning algorithms? -What has imposed significant challenges on existing machine learning algorithms?|has_answer|complexity of graph data -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|Recent studies have emerged on what? -Recent studies have emerged on what?|has_answer|extending deep learning approaches for graph data -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|In what fields do we provide a comprehensive overview of graph neural networks? -In what fields do we provide a comprehensive overview of graph neural networks?|has_answer|data mining and machine learning fields -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|How many categories do we propose a new taxonomy to divide the state-of-the-art graph neural networks into? -How many categories do we propose a new taxonomy to divide the state-of-the-art graph neural networks into?|has_answer|four -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|What do we summarize in this survey? -What do we summarize in this survey?|has_answer|open source codes, benchmark data sets, and model evaluation of graph neural networks -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|has_question|What do we propose in this rapidly growing field? -What do we propose in this rapidly growing field?|has_answer|potential research directions -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|What is used for learning representations across multiple NLU tasks? -What is used for learning representations across multiple NLU tasks?|has_answer|Multi-Task Deep Neural Network (MT-DNN) -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|What effect does MT-DNN benefit from? -What effect does MT-DNN benefit from?|has_answer|regularization effect -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|Who proposed the model for MT-DNN? -Who proposed the model for MT-DNN?|has_answer|Liu et al. -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|What is the pre-trained bidirectional transformer language model called? -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|What percentage improvement did MT-DNN push the GLUE benchmark to? -What percentage improvement did MT-DNN push the GLUE benchmark to?|has_answer|82.7% -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|What does MT-DNN allow domain adaptation with compared to pre-trained BERT representations? -What does MT-DNN allow domain adaptation with compared to pre-trained BERT representations?|has_answer|substantially fewer in-domain labels -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|has_question|Where can the code and pre-trained models be found? -Where can the code and pre-trained models be found?|has_answer|https://github.com/namisan/mt-dnn -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|What are triples injected into sentences as? -What are triples injected into sentences as?|has_answer|domain knowledge -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|When reading a domain text, experts make inferences with what? -When reading a domain text, experts make inferences with what?|has_answer|relevant knowledge -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|What do KGs stand for? -What do KGs stand for?|has_answer|knowledge graphs -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|Too much knowledge incorporation may divert the sentence from its correct meaning, what is KN? -Too much knowledge incorporation may divert the sentence from its correct meaning, what is KN?|has_answer|knowledge noise -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|What introduces soft-position and visible matrix to limit the impact of knowledge? -What introduces soft-position and visible matrix to limit the impact of knowledge?|has_answer|K-BERT -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|What can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self? -What can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self?|has_answer|K-BERT -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|How many NLP tasks did our investigation reveal promising results in? -How many NLP tasks did our investigation reveal promising results in?|has_answer|twelve -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|has_question|What are some domain-specific tasks that K-BERT significantly outperforms BERT? -What are some domain-specific tasks that K-BERT significantly outperforms BERT?|has_answer|finance, law, and medicine -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|Photo Geolocation with Convolutional Neural Networks What is the name of the model that is able to determine the location where a photo was -Photo Geolocation with Convolutional Neural Networks What is the name of the model that is able to determine the location where a photo was|has_answer|PlaNet -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|How difficult is it to construct situations where no location can be inferred? -How difficult is it to construct situations where no location can be inferred?|has_answer|trivial -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What are some of the cues that can be used to determine a location? -What are some of the cues that can be used to determine a location?|has_answer|landmarks, weather patterns, vegetation, road markings, and architectural details -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What is an example of a cue that humans are good at integrating to geolocate images? -What is an example of a cue that humans are good at integrating to geolocate images?|has_answer|en-masse -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What is the photo geolocation problem usually approached using in computer vision? -What is the photo geolocation problem usually approached using in computer vision?|has_answer|image retrieval methods -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What is the problem with photo geolocation? -What is the problem with photo geolocation?|has_answer|classification -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What is PlaNet able to integrate? -What is PlaNet able to integrate?|has_answer|multiple visible cues -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What is the resulting model called? -What is the resulting model called?|has_answer|PlaNet -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|What does PlaNet extend its model to? -What does PlaNet extend its model to?|has_answer|photo albums -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|has_question|How much performance improvement does PlaNet achieve over a single-image model? -How much performance improvement does PlaNet achieve over a single-image model?|has_answer|50% -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|What is the term for Extreme Multi-Label Text Classification? -What is the term for Extreme Multi-Label Text Classification?|has_answer|Label-aware Document Representation via Hybrid Attention -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|What does XMTC aim at? -What does XMTC aim at?|has_answer|tagging a document with most relevant labels from an extremely large-scale label set -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|Why is it difficult to build a classifier for tail labels? -Why is it difficult to build a classifier for tail labels?|has_answer|there are only few training documents -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|What are two things that this paper is motivated to better explore the semantic relationship between each document and extreme labels? -What are two things that this paper is motivated to better explore the semantic relationship between each document and extreme labels?|has_answer|document content and label correlation -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|What is LAHA? -What is LAHA?|has_answer|to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|How many parts does LAHA consist of? -How many parts does LAHA consist of?|has_answer|three -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|Which part of LAHA adopts a multi-label self-attention mechanism to detect the contribution of each word to labels? -Which part of LAHA adopts a multi-label self-attention mechanism to detect the contribution of each word to labels?|has_answer|first -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|Which part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space? -Which part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space?|has_answer|The second part -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|What is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated? -What is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated?|has_answer|adaptive fusion strategy -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|How many benchmark datasets have been used? -How many benchmark datasets have been used?|has_answer|six -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|has_question|The results show the superiority of our proposed LAHA method, especially on what? -The results show the superiority of our proposed LAHA method, especially on what?|has_answer|tail labels -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|What is the embedding method specifically designed for NED? -What is the embedding method specifically designed for NED?|has_answer|Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|How many models are used to extend the skip-gram model? -How many models are used to extend the skip-gram model?|has_answer|two -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|What graph model learns the relatedness of entities using the link structure of the KB? -What graph model learns the relatedness of entities using the link structure of the KB?|has_answer|KB -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|What is proposed for NED? -What is proposed for NED?|has_answer|novel embedding method -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|The proposed method jointly maps words and entities into what? -The proposed method jointly maps words and entities into what?|has_answer|the same continuous vector space -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|has_question|What was the state-of-the-art accuracy on the TAC 2010 dataset? -What was the state-of-the-art accuracy on the TAC 2010 dataset?|has_answer|85.2% -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|What is the name of the indispensable machine learning tool for achieving human-level performance on many learning tasks? -What is the name of the indispensable machine learning tool for achieving human-level performance on many learning tasks?|has_answer|Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|Why is it difficult to understand which aspects of the input data drive the decisions of the network? -Why is it difficult to understand which aspects of the input data drive the decisions of the network?|has_answer|black-box -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|How many real-world scenarios exist in which humans need to make actionable decisions based on the output DNNs? -How many real-world scenarios exist in which humans need to make actionable decisions based on the output DNNs?|has_answer|various -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|Where can decision support systems be found? -Where can decision support systems be found?|has_answer|critical domains -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|Decisions made by DNNs must be what? -Decisions made by DNNs must be what?|has_answer|legally or ethically defensible -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|What has the development of new methods and studies on explaining the decision-making process of DNNs blossomed into? -What has the development of new methods and studies on explaining the decision-making process of DNNs blossomed into?|has_answer|an active research field -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|What field of study may be intimidated by the plethora of orthogonal directions the field is taking? -What field of study may be intimidated by the plethora of orthogonal directions the field is taking?|has_answer|explainable deep learning -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|What is a problem with defining what it means to be able to explain the actions of a deep learning system? -What is a problem with defining what it means to be able to explain the actions of a deep learning system?|has_answer|confusion -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|What does Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network offer? -What does Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network offer?|has_answer|a field guide to deep learning explainability -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|How many simple dimensions define the space of foundational methods that contribute to explainable deep learning? -How many simple dimensions define the space of foundational methods that contribute to explainable deep learning?|has_answer|three -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|has_question|Why is the field guide designed for those just embarking in the field? -Why is the field guide designed for those just embarking in the field?|has_answer|easy-to-digest -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|Paragraph Vector is an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs -Paragraph Vector is an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs|has_answer|Distributed Representations of Sentences and Documents -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|Many machine learning algorithms require the input to be represented as what? -Many machine learning algorithms require the input to be represented as what?|has_answer|a fixed-length feature vector -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What is one of the most common fixed-length features when it comes to texts? -What is one of the most common fixed-length features when it comes to texts?|has_answer|bag-of-words -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What are two major weaknesses of bag-of-words? -What are two major weaknesses of bag-of-words?|has_answer|they lose the ordering of the words and they also ignore semantics of the words -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What city is equally distant from bag-of-words? -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What is an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of text? -What is an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of text?|has_answer|Paragraph Vector -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What does Paragraph Vector represent each document by? -What does Paragraph Vector represent each document by?|has_answer|dense vector -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|Paragraph Vector's construction gives our algorithm the potential to overcome the weaknesses of what model? -Paragraph Vector's construction gives our algorithm the potential to overcome the weaknesses of what model?|has_answer|bag-of-words models -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|What shows that Paragraph Vectors outperform bag-of-words models? -What shows that Paragraph Vectors outperform bag-of-words models?|has_answer|Empirical results -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|has_question|Paragraph Vector achieves state-of-the-art results on what types of text classification and sentiment analysis tasks? -Paragraph Vector achieves state-of-the-art results on what types of text classification and sentiment analysis tasks?|has_answer|several -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|has_question|What is the state-of-the-art in the field of knowledge graph embeddings? -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|has_question|What is now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces? -What is now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces?|has_answer|Knowledge graph embeddings -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|has_question|How do we introduce the reader to the concept of knowledge graph embeddings? -How do we introduce the reader to the concept of knowledge graph embeddings?|has_answer|by explaining what they are, how they can be generated and how they can be evaluated -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|has_question|How do we summarize the state-of-the-art in the field of knowledge graph embeddings? -How do we summarize the state-of-the-art in the field of knowledge graph embeddings?|has_answer|by describing the approaches that have been introduced to represent knowledge in the vector space -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|has_question|In relation to knowledge representation, we consider the problem of what? -In relation to knowledge representation, we consider the problem of what?|has_answer|explainability -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|What is a scenario without parallel data and abundant monolingual data unrealistic in practice? -What is a scenario without parallel data and abundant monolingual data unrealistic in practice?|has_answer|A Call for More Rigor in Unsupervised Cross-lingual Learning -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|What is an existing rationale for unsupervised cross-lingual learning based on? -What is an existing rationale for unsupervised cross-lingual learning based on?|has_answer|lack of parallel data -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|A scenario without parallel data and abundant monolingual data is what in practice? -A scenario without parallel data and abundant monolingual data is what in practice?|has_answer|unrealistic -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|What have been used in previous work that depart from the pure unsupervised setting? -What have been used in previous work that depart from the pure unsupervised setting?|has_answer|different training signals -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|What do we describe in tuning and evaluation of unsupervised cross-lingual models? -What do we describe in tuning and evaluation of unsupervised cross-lingual models?|has_answer|common methodological issues -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|has_question|What is one type of research that we provide a unified outlook for? -What is one type of research that we provide a unified outlook for?|has_answer|unsupervised machine translation -RDFj is a set of conventions forbr/- constructing JSON objects in such a way that they can easily be interpreted as RDF;br/ - taking RDF and arriving at canonical JSON objects.|has_question|What is a set of conventions for constructing JSON objects in such a way that they can easily be interpreted as RDF? -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|What is a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks? -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|What is the versatility achieved by trying to avoid? -What is the versatility achieved by trying to avoid?|has_answer|task-specific engineering -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|Instead of exploiting man-made input features carefully optimized for each task, our system learns what? -Instead of exploiting man-made input features carefully optimized for each task, our system learns what?|has_answer|internal representations -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|What can be applied to various natural language processing tasks? -What can be applied to various natural language processing tasks?|has_answer|unified neural network architecture and learning algorithm -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|What does the unified neural network architecture and learning algorithm try to avoid? -What does the unified neural network architecture and learning algorithm try to avoid?|has_answer|task-specific engineering -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|has_question|What is this work used for building a freely available tagging system with good performance and minimal computational requirements? -What is this work used for building a freely available tagging system with good performance and minimal computational requirements?|has_answer|a basis -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|Deep Mutual Learning In this paper we explore a different but related idea to model distillation – what? -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – what?|has_answer|mutual learning -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What does distillation perform to a small untrained student? -What does distillation perform to a small untrained student?|has_answer|one-way knowledge transfer -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What does mutual learning start with? -What does mutual learning start with?|has_answer|a pool of untrained students who learn simultaneously to solve the task together -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|Who reported a benefit in quality over basic distillation? -Who reported a benefit in quality over basic distillation?|has_answer|Zhang et al. -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What did Zhang et al. report a benefit in over basic distillation? -What did Zhang et al. report a benefit in over basic distillation?|has_answer|quality -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What percentage of students report for online distillation using traditional offline distillation? -What percentage of students report for online distillation using traditional offline distillation?|has_answer|70.7% -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What is an effective and widely used technique to transfer knowledge from a teacher to a student network? -What is an effective and widely used technique to transfer knowledge from a teacher to a student network?|has_answer|Model distillation -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What is the typical application of model distillation? -What is the typical application of model distillation?|has_answer|low-memory or fast execution requirements -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What is DML? -What is DML?|has_answer|deep mutual learning -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What benchmarks do our experiments show that a variety of network architectures achieve compelling results on? -What benchmarks do our experiments show that a variety of network architectures achieve compelling results on?|has_answer|CIFAR-100 recognition and Market-1501 -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|has_question|What is required for deep mutual learning? -What is required for deep mutual learning?|has_answer|no prior powerful teacher network -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What is needed to pair short text fragments as a concatenation of separate words? -What is needed to pair short text fragments as a concatenation of separate words?|has_answer|Learning Semantic Similarity for Very Short Texts -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What based method might lead to a better model for semantic content within very short text fragments? -What based method might lead to a better model for semantic content within very short text fragments?|has_answer|tf-idf -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|The combination of word embeddings and what information might lead to a better model for semantic content within very short text fragments? -The combination of word embeddings and what information might lead to a better model for semantic content within very short text fragments?|has_answer|tf-idf -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|Levering data on social media requires information retrieval algorithms to become able to relate very short text fragments to each other. -Levering data on social media requires information retrieval algorithms to become able to relate very short text fragments to each other.|has_answer|Twitter and Facebook -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|Traditional text similarity methods such as what are based on word overlap? -Traditional text similarity methods such as what are based on word overlap?|has_answer|tf-idf cosine-similarity -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What is another name for distributed word representations? -What is another name for distributed word representations?|has_answer|word embeddings -What is needed to pair short text fragments as a concatenation of separate words?|has_answer|adequate distributed sentence representation -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What was investigated as a combination of word embeddings in the context of semantic pair matching? -What was investigated as a combination of word embeddings in the context of semantic pair matching?|has_answer|several text representations -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What does this paper investigate the effectiveness of for fragments of different lengths? -What does this paper investigate the effectiveness of for fragments of different lengths?|has_answer|naive techniques -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|What based method is used to reduce the impact of less informative terms? -What based method is used to reduce the impact of less informative terms?|has_answer|tf-idf -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|has_question|Our new approach outperforms the existing techniques in what? -Our new approach outperforms the existing techniques in what?|has_answer|a toy experimental set-up -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What is a method to build contextualized entity and relation embeddings? -What is a method to build contextualized entity and relation embeddings?|has_answer|Contextualized Knowledge Graph Embedding -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What may appear in different graph contexts? -What may appear in different graph contexts?|has_answer|Entities and relations -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What project symbolic entities and relations into continuous vector spaces? -What project symbolic entities and relations into continuous vector spaces?|has_answer|Knowledge graph embedding -What may appear in different graph contexts?|has_answer|entities and relations -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What is CoKE? -What is CoKE?|has_answer|Contextualized Knowledge Graph Embedding -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What are two types of graph contexts studied? -What are two types of graph contexts studied?|has_answer|edges and paths -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What does CoKE use to obtain contextualized representations? -What does CoKE use to obtain contextualized representations?|has_answer|a Transformer encoder -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What do CoKE's representations capture? -What do CoKE's representations capture?|has_answer|contextual meanings of entities and relations -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in what two areas? -Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in what two areas?|has_answer|link prediction and path query answering -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|has_question|What is the absolute improvement in H@10 on path query answering? -What is the absolute improvement in H@10 on path query answering?|has_answer|21.0% -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|has_question|Who created the deep network model of Imagenet? -Who created the deep network model of Imagenet?|has_answer|Krizhevsky, Sutskever and Hinton -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|has_question|How many years has the field of deep learning discovered? -How many years has the field of deep learning discovered?|has_answer|five -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|has_question|What do I suggest that deep learning must be supplemented by other techniques if we are to reach? -What do I suggest that deep learning must be supplemented by other techniques if we are to reach?|has_answer|artificial general intelligence -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What draws together many topics & perspectives regarding Knowledge Graphs? -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|Who is the co-author of Knowledge Graphs? -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|Who wrote Knowledge Graph Embedding: A Survey of Approaches and Applications? -Who wrote Knowledge Graph Embedding: A Survey of Approaches and Applications?|has_answer|Wang et al. -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What are the language models for embedding? -What are the language models for embedding?|has_answer|Knowledge Graph Embedding: A Survey of Approaches and Applications -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What are used for knowledge graphs? -What are used for knowledge graphs?|has_answer|graph-based data models and query languages -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What roles are discussed in knowledge graphs? -What roles are discussed in knowledge graphs?|has_answer|schema, identity, and context -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What types of techniques are used to represent and extract knowledge? -What types of techniques are used to represent and extract knowledge?|has_answer|deductive and inductive -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What methods do we summarize in this paper? -What methods do we summarize in this paper?|has_answer|creation, enrichment, quality assessment, refinement, and publication of knowledge graphs -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What are two prominent types of knowledge graphs? -What are two prominent types of knowledge graphs?|has_answer|open knowledge graphs and enterprise knowledge graphs -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|has_question|What do we conclude with for knowledge graphs? -What do we conclude with for knowledge graphs?|has_answer|high-level future research directions -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|Current representation methods for what are not as effective as claimed in popular classification methods? -Current representation methods for what are not as effective as claimed in popular classification methods?|has_answer|categorical metadata -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What are categorical features harder to represent for machine use? -What are categorical features harder to represent for machine use?|has_answer|categorical features are harder to represent for machine use -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What are examples of parts of a model that can be modified by using categorical metadata? -What are examples of parts of a model that can be modified by using categorical metadata?|has_answer|word embeddings, attention mechanisms -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What are current representation methods for categorical metadata devised for? -What are current representation methods for categorical metadata devised for?|has_answer|human consumption -What are categorical features harder to represent for machine use?|has_answer|categorical features are harder to represent -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What do we propose to use to effectively incorporate categorical metadata on various parts of a neural-based model? -What do we propose to use to effectively incorporate categorical metadata on various parts of a neural-based model?|has_answer|basis vectors -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What is the benefit of using basis vectors to incorporate categorical metadata on various parts of a neural-based model? -What is the benefit of using basis vectors to incorporate categorical metadata on various parts of a neural-based model?|has_answer|decreases the number of parameters dramatically -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|has_question|What is performed to show that we can represent categorical metadata more effectively to customize parts of the model? -What is performed to show that we can represent categorical metadata more effectively to customize parts of the model?|has_answer|Extensive experiments -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What has long pursued the goal of having systems reason over explicitly provided knowledge? -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|How can transformers learn to reason? -How can transformers learn to reason?|has_answer|using rules expressed in language -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|AI has long pursued the goal of having systems reason over explicitly provided knowledge, but what has proved challenging? -AI has long pursued the goal of having systems reason over explicitly provided knowledge, but what has proved challenging?|has_answer|building suitable representations -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What is the first demonstration that this is possible? -What is the first demonstration that this is possible?|has_answer|We provide the first demonstration that this is possible -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What do we test in a collection of synthetic datasets? -What do we test in a collection of synthetic datasets?|has_answer|increasing levels of reasoning complexity -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What is the accuracy of transformers learning rule-based reasoning? -What is the accuracy of transformers learning rule-based reasoning?|has_answer|99% -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|We demonstrate that the models transfer well to what? -We demonstrate that the models transfer well to what?|has_answer|two hand-authored rulebases -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What is a new role for transformers? -What is a new role for transformers?|has_answer|a limited soft theorem prover operating over explicit theories in language -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|What new possibilities do transformers have in question-answering? -What new possibilities do transformers have in question-answering?|has_answer|explainability, correctability, and counterfactual reasoning -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|has_question|Where can you find a live demo? -Where can you find a live demo?|has_answer|rule-reasoning.apps.allenai.org -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What is the main idea of Neural Ranking Models with Weak Supervision? -What is the main idea of Neural Ranking Models with Weak Supervision?|has_answer|leverage large amounts of unsupervised data to infer “weak” labels -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What is the only supervisor we have used to train a model which performs better than BM25 itself? -What is the only supervisor we have used to train a model which performs better than BM25 itself?|has_answer|BM25 -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What has achieved impressive improvements in computer vision and NLP tasks? -What has achieved impressive improvements in computer vision and NLP tasks?|has_answer|unsupervised deep neural networks -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|Why have improvements not been observed in ranking for information retrieval? -Why have improvements not been observed in ranking for information retrieval?|has_answer|complexity of the ranking problem -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What do we propose to train a neural ranking model using? -What do we propose to train a neural ranking model using?|has_answer|weak supervision -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What is an example of an unsupervised ranking model? -What is an example of an unsupervised ranking model?|has_answer|BM25 -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What do we train a set of simple yet effective ranking models based on? -What do we train a set of simple yet effective ranking models based on?|has_answer|feed-forward neural networks -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What is an example of a different input representation? -What is an example of a different input representation?|has_answer|encoding query-document pairs into dense/sparse vectors -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|How many training instances do we use to train our networks? -How many training instances do we use to train our networks?|has_answer|tens of millions -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What are the MAP improvements over the BM25 model? -What are the MAP improvements over the BM25 model?|has_answer|over 13% and 35% -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|has_question|What do supervised neural ranking models benefit from pre-training on? -What do supervised neural ranking models benefit from pre-training on?|has_answer|weakly labeled data -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|What is MARGE a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document -What is MARGE a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document|has_answer|Pre-training via Paraphrasing -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|MARGE provides an alternative to what paradigm? -MARGE provides an alternative to what paradigm?|has_answer|masked language modeling paradigm -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|What is MARGE able to jointly learn to do? -What is MARGE able to jointly learn to do?|has_answer|retrieval and reconstruction -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|The objective noisily captures aspects of what? -The objective noisily captures aspects of what?|has_answer|paraphrase, translation, multi-document summarization, and information retrieval -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|What is the BLEU score for document translation? -What is the BLEU score for document translation?|has_answer|35.8 -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|has_question|What gives strong performance on discriminative and generative tasks in many languages? -What gives strong performance on discriminative and generative tasks in many languages?|has_answer|fine-tuning -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|What type of neural networks are the state of the art on problems such as speech recognition and computer vision? -What type of neural networks are the state of the art on problems such as speech recognition and computer vision?|has_answer|Deep Nets -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|What is the state of the art on problems such as speech recognition and computer vision? -What is the state of the art on problems such as speech recognition and computer vision?|has_answer|deep neural networks -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|Deep feed-forward networks can achieve accuracies previously only achievable with what? -Deep feed-forward networks can achieve accuracies previously only achievable with what?|has_answer|deep models -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|How can shallow neural nets learn deep functions? -How can shallow neural nets learn deep functions?|has_answer|a total number of parameters similar to the original deep model -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|What is the name of the phoneme recognition task? -What is the name of the phoneme recognition task?|has_answer|TIMIT -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|has_question|What does our success in training shallow feed-forward nets suggest for training shallow feed-forward nets? -What does our success in training shallow feed-forward nets suggest for training shallow feed-forward nets?|has_answer|better algorithms -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|What is the multi-label classification problem? -What is the multi-label classification problem?|has_answer|Large-scale Multi-label Learning with Missing Labels -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|What do existing approaches not adequately address? -What do existing approaches not adequately address?|has_answer|ability to handle data with missing labels -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|How do we address the multi-label problem? -How do we address the multi-label problem?|has_answer|by studying the multi-label problem in a generic empirical risk minimization (ERM) framework -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|Our framework is surprisingly able to encompass several recent which can be derived as special cases of our method? -Our framework is surprisingly able to encompass several recent which can be derived as special cases of our method?|has_answer|label-compression based methods -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|What is an example of a loss function that is exploited to offer efficient algorithms? -What is an example of a loss function that is exploited to offer efficient algorithms?|has_answer|the squared loss function -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|Our learning framework admits formal excess risk bounds even in the presence of what? -Our learning framework admits formal excess risk bounds even in the presence of what?|has_answer|missing labels -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|Our risk bounds demonstrate better generalization performance for low-rank promoting what when compared to (rank insensitive) Frobenius norm regularization -Our risk bounds demonstrate better generalization performance for low-rank promoting what when compared to (rank insensitive) Frobenius norm regularization|has_answer|trace-norm regularization -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|has_question|What dataset can our methods scale up to? -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|What can sentence embeddings be compared using? -What can sentence embeddings be compared using?|has_answer|cosine-similarity -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|What is BERT unsuitable for? -What is BERT unsuitable for?|has_answer|semantic similarity search -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|What do simple methods such as using the CLS token give? -What do simple methods such as using the CLS token give?|has_answer|low quality sentence embeddings -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|What is STS? -What is STS?|has_answer|semantic textual similarity -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|How many inference computations are required to find the most similar pair in a collection of 10,000 sentences? -How many inference computations are required to find the most similar pair in a collection of 10,000 sentences?|has_answer|50 million -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|The construction of BERT makes it unsuitable for what? -The construction of BERT makes it unsuitable for what?|has_answer|semantic similarity search -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|What is a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embed -What is a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embed|has_answer|Sentence-BERT -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|How long does it take to find the most similar pair with SBERT? -How long does it take to find the most similar pair with SBERT?|has_answer|5 seconds -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|has_question|On what tasks does SBERT outperform other state-of-the-art sentence embeddings methods? -On what tasks does SBERT outperform other state-of-the-art sentence embeddings methods?|has_answer|common STS tasks and transfer learning tasks -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|has_question|What is structured knowledge derived from? -What is structured knowledge derived from?|has_answer|differentiable path-based recommendation model -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|has_question|What has been prevalent in both academia and industry due to their effectiveness and flexibility? -What has been prevalent in both academia and industry due to their effectiveness and flexibility?|has_answer|embedding-based recommendation models -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|has_question|What is a limitation of embedding-based recommendation models? -What is a limitation of embedding-based recommendation models?|has_answer|data sparsity -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|has_question|What is proposed to get around these limitations without introducing any extra overhead? -What is proposed to get around these limitations without introducing any extra overhead?|has_answer|end-to-end joint learning framework -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|has_question|Through what did we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons? -Through what did we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons?|has_answer|extensive experiments -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|has_question|When was Cardinal's factorization algorithm created? -When was Cardinal's factorization algorithm created?|has_answer|1996 -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|has_question|In what year did Cardinal's factorization algorithm become nil? -In what year did Cardinal's factorization algorithm become nil?|has_answer|2016 -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|has_question|What did we reduce the final stage of Cardinal's algorithm to manageable computations with? -What did we reduce the final stage of Cardinal's algorithm to manageable computations with?|has_answer|structured matrices -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|has_question|Some of our techniques can be of independent interest for what? -Some of our techniques can be of independent interest for what?|has_answer|matrix computations -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What helps a model focus on the most relevant parts of the input to make decisions? -What helps a model focus on the most relevant parts of the input to make decisions?|has_answer|Attention Models in Graphs: A Survey -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|By representing data as graphs, we can capture what? -By representing data as graphs, we can capture what?|has_answer|entities -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What is an ever-growing body of work focused on? -What is an ever-growing body of work focused on?|has_answer|graph mining -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What can pose a problem for effective graph mining? -What can pose a problem for effective graph mining?|has_answer|noisy -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What is an effective way to deal with the problem of noisy graphs? -What is an effective way to deal with the problem of noisy graphs?|has_answer|incorporate attention into graph mining solutions -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|An attention mechanism allows a method to focus on what parts of the graph? -An attention mechanism allows a method to focus on what parts of the graph?|has_answer|task-relevant -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What do we conduct in this work? -What do we conduct in this work?|has_answer|a comprehensive and focused survey of the literature on the emerging field of graph attention models -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|How many intuitive taxonomies are introduced to group existing work? -How many intuitive taxonomies are introduced to group existing work?|has_answer|three -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What are problem setting? -What are problem setting?|has_answer|type of input and output -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|How do we motivate our taxonomies? -How do we motivate our taxonomies?|has_answer|detailed examples -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|has_question|What do we discuss in our taxonomies? -What do we discuss in our taxonomies?|has_answer|promising directions for future work -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|The use of natural language to facilitate communication between the expert programmer and apprentice AI system? -The use of natural language to facilitate communication between the expert programmer and apprentice AI system?|has_answer|Amanuensis: The Programmer's Apprentice -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|When was Amanuensis: The Programmer's Apprentice taught at Stanford? -When was Amanuensis: The Programmer's Apprentice taught at Stanford?|has_answer|spring quarter of 2018 -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|The course draws upon insight from what to implement hybrid connectionist and symbolic reasoning systems? -The course draws upon insight from what to implement hybrid connectionist and symbolic reasoning systems?|has_answer|cognitive and systems neuroscience -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|What do digital assistants provide initial value as? -What do digital assistants provide initial value as?|has_answer|powerful analytical, computational and mathematical savants -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|Whose work extends the [#Global Workspace Theory](/tag/global_workspace_theory)? -Whose work extends the [#Global Workspace Theory](/tag/global_workspace_theory)?|has_answer|Bernard Baars -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|Whose concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|When was the course taught at Stanford? -When was the course taught at Stanford?|has_answer|spring quarter of 2018 -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|The course draws upon insight from what to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning? -The course draws upon insight from what to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning?|has_answer|cognitive and systems neuroscience -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|What are cognitive strategies? -What are cognitive strategies?|has_answer|domain-relevant problem solving skills -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|has_question|What do these savants effectively serve as? -What do these savants effectively serve as?|has_answer|cognitive extensions and digital prostheses -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What did the emergence of pre-trained models bring natural language processing to a new era? -What did the emergence of pre-trained models bring natural language processing to a new era?|has_answer|Pre-trained Models for Natural Language Processing -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What does this survey provide about pre-trained models for NLP? -What does this survey provide about pre-trained models for NLP?|has_answer|comprehensive review -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What do we introduce in this survey? -What do we introduce in this survey?|has_answer|language representation learning -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|How many perspectives do pre-trained models have? -How many perspectives do pre-trained models have?|has_answer|four -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What do we describe how to adapt the knowledge of PTMs to? -What do we describe how to adapt the knowledge of PTMs to?|has_answer|downstream tasks -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What do we outline some potential directions of PTMs for? -What do we outline some potential directions of PTMs for?|has_answer|future research -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|has_question|What is the purpose of this survey? -What is the purpose of this survey?|has_answer|hands-on guide -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the name of the theory that describes the process of encoding words? -What is the name of the theory that describes the process of encoding words?|has_answer|Semantic Folding Theory -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What has been able to reach human levels of performance so far? -What has been able to reach human levels of performance so far?|has_answer|No computer system -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the only known computational system capable of proper language processing? -What is the only known computational system capable of proper language processing?|has_answer|the human brain -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the nature of the brain's fundamental computational processes? -What is the nature of the brain's fundamental computational processes?|has_answer|obscure -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|The lack of a sound computational brain theory prevents the fundamental understanding of what? -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|When science lacks a theoretical foundation, what is applied to accommodate as many sampled real-world data as possible? -When science lacks a theoretical foundation, what is applied to accommodate as many sampled real-world data as possible?|has_answer|statistical modeling -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the actual representation of language within the brain called? -What is the actual representation of language within the brain called?|has_answer|Representational Problem -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the name of Jeff Hawkins' theory of the human cortex? -What is the name of Jeff Hawkins' theory of the human cortex?|has_answer|Hierarchical Temporal Memory -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is the process of using a topographic semantic space as distributional reference frame into a sparse binary representational vector called? -What is the process of using a topographic semantic space as distributional reference frame into a sparse binary representational vector called?|has_answer|encoding words -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What does Hawkins' Hierarchical Temporal Memory theory use? -What does Hawkins' Hierarchical Temporal Memory theory use?|has_answer|HTM networks -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What is a generic similarity function that can solve many complex NLP problems? -What is a generic similarity function that can solve many complex NLP problems?|has_answer|Euclidian Distance -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|has_question|What are some practical problems of statistical NLP systems? -What are some practical problems of statistical NLP systems?|has_answer|high cost of computation -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What is the name of the first neural end-to-end entity linking model? -What is the name of the first neural end-to-end entity linking model?|has_answer|End-to-End Neural Entity Linking -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What are the key components of the first neural end-to-end entity linking model? -What are the key components of the first neural end-to-end entity linking model?|has_answer|word, entity and mention embeddings -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What is an essential task for semantic text understanding and information extraction? -What is an essential task for semantic text understanding and information extraction?|has_answer|Entity Linking -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What does MD stand for? -What does MD stand for?|has_answer|Mention Detection -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What is proposed that jointly discovers and links entities in a text document? -What is proposed that jointly discovers and links entities in a text document?|has_answer|first neural end-to-end EL system -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What is the main idea of the first neural end-to-end EL system that jointly discovers and links entities in a text document? -What is the main idea of the first neural end-to-end EL system that jointly discovers and links entities in a text document?|has_answer|all possible spans -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What are the key components of the end-to-end EL system? -What are the key components of the end-to-end EL system?|has_answer|context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|When enough training data is available, our end-to-end method significantly outperforms popular systems on what platform? -When enough training data is available, our end-to-end method significantly outperforms popular systems on what platform?|has_answer|Gerbil platform -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What do testing datasets follow compared to the training set? -What do testing datasets follow compared to the training set?|has_answer|different annotation conventions -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|has_question|What offers the best or second best EL accuracy? -What offers the best or second best EL accuracy?|has_answer|ED model coupled with a traditional NER system -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|What is a novel neural architecture that enables learning dependency beyond a fixed length without disrupting temporal coherence? -What is a novel neural architecture that enables learning dependency beyond a fixed length without disrupting temporal coherence?|has_answer|Transformer-XL -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|What does Transformer-XL do? -What does Transformer-XL do?|has_answer|enables learning dependency beyond a fixed length without disrupting temporal coherence -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|What does Transformer-XL consist of? -What does Transformer-XL consist of?|has_answer|segment-level recurrence mechanism and a novel positional encoding scheme -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|What problem does Transformer-XL solve? -What problem does Transformer-XL solve?|has_answer|context fragmentation -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|Transformer-XL learns dependency that is what longer than RNNs? -Transformer-XL learns dependency that is what longer than RNNs?|has_answer|80% longer -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|What is the state-of-the-art result of bpc/perplexity on enwiki8? -What is the state-of-the-art result of bpc/perplexity on enwiki8?|has_answer|1 -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|How many tokens does Transformer-XL generate when trained only on WikiText-103? -How many tokens does Transformer-XL generate when trained only on WikiText-103?|has_answer|thousands of tokens -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|has_question|Transformer-XL's code, pretrained models, and hyperparameters are available in what two languages? -Transformer-XL's code, pretrained models, and hyperparameters are available in what two languages?|has_answer|Tensorflow and PyTorch -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|What act as intermediate representations that enable parameter sharing between classes? -What act as intermediate representations that enable parameter sharing between classes?|has_answer|Label-Embedding for Image Classification Attributes -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|Each class is embedded in the space of what? -Each class is embedded in the space of what?|has_answer|attribute vectors -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|What measures the compatibility between an image and a label embedding? -What measures the compatibility between an image and a label embedding?|has_answer|a function -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|How are the parameters of the function learned? -How are the parameters of the function learned?|has_answer|on a training set of labeled samples -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|Which datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario? -Which datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario?|has_answer|Animals With Attributes and Caltech-UCSD-Birds -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|Label embedding has built-in ability to leverage what instead of or in addition to attributes? -Label embedding has built-in ability to leverage what instead of or in addition to attributes?|has_answer|alternative sources of information -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|Label embedding has a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g -Label embedding has a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g|has_answer|class hierarchies or textual descriptions -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|has_question|What encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples? -What encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples?|has_answer|label embedding -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|has_question|What does the Yahoo! Knowledge Graph use for Entity Recommendation? -What does the Yahoo! Knowledge Graph use for Entity Recommendation?|has_answer|Layered Graph Embedding -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|has_question|What is the embedding-based entity recommendation framework for Wikipedia? -What is the embedding-based entity recommendation framework for Wikipedia?|has_answer|Knowledge Graph -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|has_question|What do the embeddings and recommendations perform well in? -What do the embeddings and recommendations perform well in?|has_answer|quality and user engagement -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|has_question|What languages does the embedding-based entity recommendation framework provide default entity recommendations for? -What languages does the embedding-based entity recommendation framework provide default entity recommendations for?|has_answer|English and other languages -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|has_question|What is Wikipedia a core subset of? -What is Wikipedia a core subset of?|has_answer|Knowledge Graph -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|has_question|The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of what? -The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of what?|has_answer|novel deep learning architectures -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|has_question|What has the focus of past machine learning research for Reading Comprehension tasks been primarily on? -What has the focus of past machine learning research for Reading Comprehension tasks been primarily on?|has_answer|the design of novel deep learning architectures -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|has_question|What is the impact of seemingly minor choices made on the use of pre-trained word embeddings and the representation of out-of-voc -What is the impact of seemingly minor choices made on the use of pre-trained word embeddings and the representation of out-of-voc|has_answer|larger -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|has_question|What do we do to researchers working in this area? -What do we do to researchers working in this area?|has_answer|provide recommendations -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|has_question|What is available in the fastai lib? -What is available in the fastai lib?|has_answer|Universal Language Model Fine-tuning for Text Classification code -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|has_question|What is ULMFiT? -What is ULMFiT?|has_answer|Universal Language Model Fine-tuning -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|has_question|How much error does ULMFiT reduce on most datasets? -How much error does ULMFiT reduce on most datasets?|has_answer|18-24% -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|has_question|What does ULMFiT match the performance of training from scratch on? -What does ULMFiT match the performance of training from scratch on?|has_answer|100x more data -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|has_question|What do we do with our pretrained models and code? -What do we do with our pretrained models and code?|has_answer|open-source -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like what? -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|What type of relevance application is entity linking useful for? -What type of relevance application is entity linking useful for?|has_answer|machine learning -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|What is an example of a well-formed text? -What is an example of a well-formed text?|has_answer|news articles -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|What is a production system for entity disambiguation on noisy text? -What is a production system for entity disambiguation on noisy text?|has_answer|Pangloss -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|What percentage of state-of-the-art results did Pangloss achieve in F1? -What percentage of state-of-the-art results did Pangloss achieve in F1?|has_answer|5% -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|has_question|What is an example of low-memory environments? -What is an example of low-memory environments?|has_answer|mobile phones -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|has_question|What is the name of the distributed representation of hypertext documents? -What is the name of the distributed representation of hypertext documents?|has_answer|hyperdoc2vec -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|has_question|What do conventional text embedding methods suffer from if directly adapted to hyper-documents? -What do conventional text embedding methods suffer from if directly adapted to hyper-documents?|has_answer|information loss -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|has_question|How many criteria characterize necessary information that hyper-document embedding models should preserve? -How many criteria characterize necessary information that hyper-document embedding models should preserve?|has_answer|four -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|has_question|What are the two tasks that hyperdoc2vec is compared to other embedding models? -What are the two tasks that hyperdoc2vec is compared to other embedding models?|has_answer|paper classification and citation recommendation -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|has_question|What validates the superiority of hyperdoc2vec to other models? -What validates the superiority of hyperdoc2vec to other models?|has_answer|Analyses and experiments -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on what? -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on what?|has_answer|student and teacher architectures -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|What is the ICCV? -What is the ICCV?|has_answer|IEEE International Conference on Computer Vision -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|What is not a panacea? -What is not a panacea?|has_answer|knowledge distillation -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|What is an approach to mitigate the problem of student capacity being too low to successfully mimic a teacher? -What is an approach to mitigate the problem of student capacity being too low to successfully mimic a teacher?|has_answer|stopping teacher training early -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|What is the main observation of knowledge distillation that we try to tease apart the factors that affect knowledge distillation performance? -What is the main observation of knowledge distillation that we try to tease apart the factors that affect knowledge distillation performance?|has_answer|more accurate teachers often don't make good teachers -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|What do not often make better teachers? -What do not often make better teachers?|has_answer|larger models -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|We show that larger models do not often make better teachers because of what? -We show that larger models do not often make better teachers because of what?|has_answer|mismatched capacity -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|We find typical ways of circumventing this to be what? -We find typical ways of circumventing this to be what?|has_answer|ineffective -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|How can this effect be mitigated? -How can this effect be mitigated?|has_answer|stopping the teacher's training early -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|has_question|Our results generalize across what? -Our results generalize across what?|has_answer|datasets and models -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What is a hierarchical multi-task approach for learning Embeddings from Semantic Tasks? -What is a hierarchical multi-task approach for learning Embeddings from Semantic Tasks?|has_answer|A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What is the main issue with settings in which multi-task learning has a significant effect? -What is the main issue with settings in which multi-task learning has a significant effect?|has_answer|a lack of understanding -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What is the hierarchical model trained on? -What is the hierarchical model trained on?|has_answer|carefully selected semantic tasks -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What is the model trained in a hierarchical fashion to do? -What is the model trained in a hierarchical fashion to do?|has_answer|introduce an inductive bias -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What are some of the tasks the hierarchical model achieves state-of-the-art results on? -What are some of the tasks the hierarchical model achieves state-of-the-art results on?|has_answer|Named Entity Recognition, Entity Mention Detection and Relation Extraction -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|What induces a set of shared semantic representations at lower layers of the model? -What induces a set of shared semantic representations at lower layers of the model?|has_answer|hierarchical training supervision -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|has_question|As we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent what kind of semantic information? -As we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent what kind of semantic information?|has_answer|more complex -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|has_question|What paper shows how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together? -What paper shows how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together?|has_answer|Advances in Pre-Training Distributed Word Representations -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|has_question|Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from what? -Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from what?|has_answer|large text corpora -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|has_question|What do we show how to train high-quality word vector representations by using? -What do we show how to train high-quality word vector representations by using?|has_answer|known tricks -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|has_question|What type of pre-trained models outperform the current state of the art? -What type of pre-trained models outperform the current state of the art?|has_answer|public -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What is the method for refining vector space representations using relational information from semantic lexicons? -What is the method for refining vector space representations using relational information from semantic lexicons?|has_answer|encouraging linked words to have similar vector representations -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What is a technique for using lexical relational resources to obtain higher quality semantic vectors called? -What is a technique for using lexical relational resources to obtain higher quality semantic vectors called?|has_answer|Graph-based learning -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|Retrofitting can be used on pre-trained word vectors obtained using what? -Retrofitting can be used on pre-trained word vectors obtained using what?|has_answer|any vector training model -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What are vector space word representations learned from? -What are vector space word representations learned from?|has_answer|distributional information of words in large corpora -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What do statistics disregard that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database? -What do statistics disregard that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database?|has_answer|valuable information -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What is a method for refining vector space representations using relational information from semantic lexicons? -What is a method for refining vector space representations using relational information from semantic lexicons?|has_answer|encouraging linked words to have similar vector representations -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|How many languages are used to evaluate a method for refining vector space representations using semantic information from semantic lexicons? -How many languages are used to evaluate a method for refining vector space representations using semantic information from semantic lexicons?|has_answer|several languages -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|has_question|What method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms? -What method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms?|has_answer|refinement -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|What is a deep relational learning framework that learns entity and relationship representations across multiple graphs? -What is a deep relational learning framework that learns entity and relationship representations across multiple graphs?|has_answer|Entity Linkage -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|What is a vital component to achieve our goal? -What is a vital component to achieve our goal?|has_answer|entity linkage across graphs -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|What is the goal of LinkNBed: Multi-Graph Representation Learning with Entity Linkage? -What is the goal of LinkNBed: Multi-Graph Representation Learning with Entity Linkage?|has_answer|multi-task training -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|Knowledge graphs have given rise to the construction of numerous large scale but incomplete what encoding information extracted from various resources? -Knowledge graphs have given rise to the construction of numerous large scale but incomplete what encoding information extracted from various resources?|has_answer|knowledge graphs -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of -An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of|has_answer|knowledge-based inference -What is a deep relational learning framework that learns entity and relationship representations across multiple graphs?|has_answer|LinkNBed -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|What is a vital component of LinkNBed? -What is a vital component of LinkNBed?|has_answer|entity linkage -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|What is the goal of LinkNBed? -What is the goal of LinkNBed?|has_answer|build an efficient multi-task training procedure -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|has_question|Experiments on link prediction and entity linkage demonstrate what over the state-of-the-art relational learning approaches? -Experiments on link prediction and entity linkage demonstrate what over the state-of-the-art relational learning approaches?|has_answer|substantial improvements -Same architecture as autoencoder, but make strong assumptions concerning the distribution of latent variables. They use variational approach for latent representation learning (\Stochastic Gradient Variational Bayes\ (SGVB) training algorithm)|has_question|What is the same architecture as? -What is the same architecture as?|has_answer|autoencoder -Same architecture as autoencoder, but make strong assumptions concerning the distribution of latent variables. They use variational approach for latent representation learning (\Stochastic Gradient Variational Bayes\ (SGVB) training algorithm)|has_question|What approach do they use for latent representation learning? -What approach do they use for latent representation learning?|has_answer|variational approach -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |has_question|What is used for classification and regression analysis? -What is used for classification and regression analysis?|has_answer|supervised learning models -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |has_question|An SVM model is a representation of training examples as what? -An SVM model is a representation of training examples as what?|has_answer|points in space -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |has_question|What type of binary linear classifier can be made non-linear with the kernel trick? -What type of binary linear classifier can be made non-linear with the kernel trick?|has_answer|probabilistic -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |has_question|What can be made non-linear with? -What can be made non-linear with?|has_answer|kernel trick -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|has_question|What is the name of the first application of BERT to document classification? -What is the name of the first application of BERT to document classification?|has_answer|DocBERT: BERT for Document Classification -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|has_question|What is one thing that might lead one to think that BERT is not the best model for document classification? -What is one thing that might lead one to think that BERT is not the best model for document classification?|has_answer|documents often have multiple labels -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|has_question|How many popular datasets can a classification model using BERT achieve state of the art across? -How many popular datasets can a classification model using BERT achieve state of the art across?|has_answer|four -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|has_question|To address the computational expense associated with BERT inference, we distill knowledge from what bidirectional LSTMs? -To address the computational expense associated with BERT inference, we distill knowledge from what bidirectional LSTMs?|has_answer|BERT-large to small -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|has_question|What is the primary contribution of our paper? -What is the primary contribution of our paper?|has_answer|improved baselines -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What is codistillation? -What is codistillation?|has_answer|using the same architecture for all the models -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What gets better results than codistillation? -What gets better results than codistillation?|has_answer|codistillation using different data -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What models are successfully transmitting useful information about different parts of the training data to each other? -What models are successfully transmitting useful information about different parts of the training data to each other?|has_answer|codistilling models -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What techniques promise model quality improvements when paired with almost any base model? -What techniques promise model quality improvements when paired with almost any base model?|has_answer|ensembling and distillation -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|Why are ensembling and distillation challenging to use in industrial settings? -Why are ensembling and distillation challenging to use in industrial settings?|has_answer|increased test-time cost -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What is a variant of distillation that does not require a complicated multi-stage setup or many new hyperparameters? -What is a variant of distillation that does not require a complicated multi-stage setup or many new hyperparameters?|has_answer|relatively straightforward -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What does online distillation enable us to use to fit very large datasets about twice as fast? -What does online distillation enable us to use to fit very large datasets about twice as fast?|has_answer|parallelism -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What does online distillation do for neural network training? -What does online distillation do for neural network training?|has_answer|speed up training -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What subsets of data can two neural networks train on? -What subsets of data can two neural networks train on?|has_answer|disjoint -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What can be safely computed using predictions from a stale version of the other model? -What can be safely computed using predictions from a stale version of the other model?|has_answer|weights -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What is a cost-effective way to make the exact predictions of a model dramatically more reproducible? -What is a cost-effective way to make the exact predictions of a model dramatically more reproducible?|has_answer|online distillation -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|has_question|What is the largest dataset used for neural language modeling? -What is the largest dataset used for neural language modeling?|has_answer|Criteo Display Ad Challenge dataset, ImageNet -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|What do you need for deep learning? -What do you need for deep learning?|has_answer|The Matrix Calculus -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|what is assumed no math knowledge beyond what you learned in? -what is assumed no math knowledge beyond what you learned in?|has_answer|calculus 1 -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|those who wish to deepen their understanding of the underlying math -those who wish to deepen their understanding of the underlying math|has_answer|those who are already familiar with the basics of neural networks -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|go back and do what if you get stuck at some point along the way? -go back and do what if you get stuck at some point along the way?|has_answer|reread -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|What category at forums.fast.ai? -What category at forums.fast.ai?|has_answer|Theory -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|What section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here? -What section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here?|has_answer|a reference section -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|has_question|http://explained.ai? -http://explained.ai?|has_answer|http://explained.ai -Train a model in an unsupervised way on a large amount of data, and then fine-tune it to achieve good performance on many different tasks|has_question|How do you train a model on a large amount of data? -How do you train a model on a large amount of data?|has_answer|Train a model in an unsupervised way -How do you train a model on a large amount of data?|has_answer|fine-tune it -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|has_question|How many years have Metric Learning Reality Check Deep metric learning papers consistently claimed great advances in accuracy? -How many years have Metric Learning Reality Check Deep metric learning papers consistently claimed great advances in accuracy?|has_answer|four -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|has_question|What do we do in this paper to see if the claims are true? -What do we do in this paper to see if the claims are true?|has_answer|take a closer look -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|has_question|What do we find in the experimental setup of metric learning papers? -What do we find in the experimental setup of metric learning papers?|has_answer|flaws -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|has_question|Experimental results show that the improvements over time have been what at best? -Experimental results show that the improvements over time have been what at best?|has_answer|marginal -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What is the name of the method used in Bootstrapping Semantic Services? -What is the name of the method used in Bootstrapping Semantic Services?|has_answer|Interactive Concept Mining on Personal Data -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What is the problem with Bootstrapping Semantic Desktops? -What is the problem with Bootstrapping Semantic Desktops?|has_answer|cold start -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What is the user's personal information sphere in the beginning of a Semantic service? -What is the user's personal information sphere in the beginning of a Semantic service?|has_answer|not represented by the system -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What creates 1:1 representations of the different information items? -What creates 1:1 representations of the different information items?|has_answer|Information extraction tools -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|File names, mail subjects or in the content body of these items are not extracted? -File names, mail subjects or in the content body of these items are not extracted?|has_answer|Higher level concepts -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What can leaving higher level concepts out of a system lead to? -What can leaving higher level concepts out of a system lead to?|has_answer|underperformance -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What is a consequence of making every found term a concept? -What is a consequence of making every found term a concept?|has_answer|clutter the arising knowledge graph with non-helpful relations -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What approach proposes concept candidates gathered by exploiting schemata of usual personal information management applications and analysing the personal information sphere using various metrics -What approach proposes concept candidates gathered by exploiting schemata of usual personal information management applications and analysing the personal information sphere using various metrics|has_answer|interactive concept mining -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What allows to easily rank and give feedback on proposed concept candidates? -What allows to easily rank and give feedback on proposed concept candidates?|has_answer|graphical user interface -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|has_question|What demonstrates major steps of our approach? -What demonstrates major steps of our approach?|has_answer|prototypical implementation -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What does the model have to do to make the final linking decisions? -What does the model have to do to make the final linking decisions?|has_answer|the model must read these descriptions together with the mention context -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What are two common linking cues not available in this setting? -What are two common linking cues not available in this setting?|has_answer|entity alias tables and link popularity -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What is the two stage approach for zero-shot linking based on? -What is the two stage approach for zero-shot linking based on?|has_answer|fine-tuned BERT architectures -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What is used in the first stage of retrieval in a dense space? -What is used in the first stage of retrieval in a dense space?|has_answer|a bi-encoder -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What concatenates the mention and entity text? -What concatenates the mention and entity text?|has_answer|cross-encoder -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|How much absolute gain does our approach achieve on a recently introduced zero-shot entity linking benchmark? -How much absolute gain does our approach achieve on a recently introduced zero-shot entity linking benchmark?|has_answer|5 point -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|has_question|What is the result on TACKBP-2010? -What is the result on TACKBP-2010?|has_answer|state-of-the-art -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What generalizing beyond one's experiences remains a formidable challenge for modern AI? -What generalizing beyond one's experiences remains a formidable challenge for modern AI?|has_answer|graph networks -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What is another name for artificial intelligence? -What is another name for artificial intelligence?|has_answer|AI -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|Why has AI made major progress in key domains such as vision, language, control, and decision-making? -Why has AI made major progress in key domains such as vision, language, control, and decision-making?|has_answer|cheap data and cheap compute resources -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|Many defining characteristics of human intelligence remain what for current approaches? -Many defining characteristics of human intelligence remain what for current approaches?|has_answer|out of reach -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What is a hallmark of human intelligence from infancy? -What is a hallmark of human intelligence from infancy?|has_answer|generalizing beyond one's experiences -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What are the three parts of this paper? -What are the three parts of this paper?|has_answer|position paper, part review, and part unification -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What must be a top priority for AI to achieve human-like abilities? -What must be a top priority for AI to achieve human-like abilities?|has_answer|combinatorial generalization -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|We reject the false choice between hand-engineering and what? -We reject the false choice between hand-engineering and what?|has_answer|end-to-end learning -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What can relational inductive biases facilitate learning about entities, relations, and rules for composing them? -What can relational inductive biases facilitate learning about entities, relations, and rules for composing them?|has_answer|deep learning architectures -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What is the new building block for the AI toolkit with a strong relational inductive bias? -What is the new building block for the AI toolkit with a strong relational inductive bias?|has_answer|graph network -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What do graph networks support? -What do graph networks support?|has_answer|relational reasoning and combinatorial generalization -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|has_question|What did we release as a companion to this paper? -What did we release as a companion to this paper?|has_answer|open-source software library -ML technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. Allows the optimization of an arbitrary differentiable loss function. |has_question|What type of prediction models are used in the ML technique for regression and classification problems? -What type of prediction models are used in the ML technique for regression and classification problems?|has_answer|decision trees -ML technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. Allows the optimization of an arbitrary differentiable loss function. |has_question|What is the ML technique used for? -What is the ML technique used for?|has_answer|regression and classification problems -ML technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. Allows the optimization of an arbitrary differentiable loss function. |has_question|What does the ML technique allow the optimization of? -What does the ML technique allow the optimization of?|has_answer|differentiable loss function -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What algorithm is used when the top-k operation is implemented in an algorithmic way? -What algorithm is used when the top-k operation is implemented in an algorithmic way?|has_answer|bubble algorithm -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What does prevalent gradient descent algorithms typically involve? -What does prevalent gradient descent algorithms typically involve?|has_answer|swapping indices -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What is the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set essentially? -What is the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set essentially?|has_answer|discontinuous -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What is the top-k operation? -What is the top-k operation?|has_answer|k largest or smallest -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What algorithm is used if the top-k operation is implemented in an algorithmic way? -What algorithm is used if the top-k operation is implemented in an algorithmic way?|has_answer|bubble algorithm -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What is the proposed smoothed approximation? -What is the proposed smoothed approximation?|has_answer|the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What does EOT stand for? -What does EOT stand for?|has_answer|Entropic Optimal Transport -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|The gradient of the SOFT operator can be efficiently approximated based on what? -The gradient of the SOFT operator can be efficiently approximated based on what?|has_answer|optimality conditions -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|has_question|What algorithms do we apply the proposed operator to? -What algorithms do we apply the proposed operator to?|has_answer|k-nearest neighbors and beam search algorithms -Unsupervised keyword/keyphrase extraction algorithm. Creates a graph of the words and relationships between them from a document (using a sliding window), then identifies the most important vertices of the graph (words) based on importance scores calculated recursively from the entire graph. |has_question|What type of algorithm creates a graph of the words and relationships between them? -What type of algorithm creates a graph of the words and relationships between them?|has_answer|Unsupervised -Unsupervised keyword/keyphrase extraction algorithm. Creates a graph of the words and relationships between them from a document (using a sliding window), then identifies the most important vertices of the graph (words) based on importance scores calculated recursively from the entire graph. |has_question|What does the unsupervised keyword/keyphrase extraction algorithm do? -What does the unsupervised keyword/keyphrase extraction algorithm do?|has_answer|identifies the most important vertices -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What is GLoMo? -What is GLoMo?|has_answer|GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What do deep transfer learning approaches usually transfer? -What do deep transfer learning approaches usually transfer?|has_answer|unary features -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|From what type of data does this work explore the possibility of learning generic latent relational graphs? -From what type of data does this work explore the possibility of learning generic latent relational graphs?|has_answer|large-scale unlabeled data -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as what? -Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as what?|has_answer|word embeddings in language and pretrained convolutional features in vision -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What do modern deep transfer learning approaches usually transfer? -What do modern deep transfer learning approaches usually transfer?|has_answer|unary features -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What type of data does this work explore the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units? -What type of data does this work explore the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units?|has_answer|large-scale unlabeled data -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What are some tasks that our proposed transfer learning framework improves performance on? -What are some tasks that our proposed transfer learning framework improves performance on?|has_answer|question answering, natural language inference, sentiment analysis, and image classification -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|has_question|What are some embeddings on which the learned graphs have not been trained? -What are some embeddings on which the learned graphs have not been trained?|has_answer|GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What are the consequences for active learning? -What are the consequences for active learning?|has_answer|Large deviations -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for what? -The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for what?|has_answer|supervised learning -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What is the task of querying a limited number of samples to obtain the corresponding labels? -What is the task of querying a limited number of samples to obtain the corresponding labels?|has_answer|choosing the subset of samples to be labeled from a fixed finite pool of samples -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What do we assume the pool of samples to be? -What do we assume the pool of samples to be?|has_answer|a random matrix -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What do we use to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool? -What do we use to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool?|has_answer|replica methods -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What provide optimal achievable performance boundaries for any active learning algorithm? -What provide optimal achievable performance boundaries for any active learning algorithm?|has_answer|large deviations -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|How can the optimal learning performance be efficiently approached? -How can the optimal learning performance be efficiently approached?|has_answer|message-passing active learning algorithms -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|has_question|What do we compare the optimal learning performance of simple message-passing active learning algorithms with? -What do we compare the optimal learning performance of simple message-passing active learning algorithms with?|has_answer|performance of some other popular active learning strategies -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|Deep learning tools have gained tremendous attention in what? -Deep learning tools have gained tremendous attention in what?|has_answer|applied machine learning -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|Deep learning tools do not capture model uncertainty. -Deep learning tools do not capture model uncertainty.|has_answer|regression and classification -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|What do Bayesian models usually come with? -What do Bayesian models usually come with?|has_answer|prohibitive computational cost -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|What does the new theoretical framework cast dropout training in deep neural networks as approximate? -What does the new theoretical framework cast dropout training in deep neural networks as approximate?|has_answer|Bayesian inference in deep Gaussian processes -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|What is a direct result of the theory casting dropout NNs as approximate Bayesian inference in deep Gaussian processes? -What is a direct result of the theory casting dropout NNs as approximate Bayesian inference in deep Gaussian processes?|has_answer|extracting information from existing models -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|What does the new theoretical framework mitigate the problem of representing uncertainty in deep learning without sacrificing? -What does the new theoretical framework mitigate the problem of representing uncertainty in deep learning without sacrificing?|has_answer|computational complexity or test accuracy -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|What kind of study is performed on the properties of dropout's uncertainty? -What kind of study is performed on the properties of dropout's uncertainty?|has_answer|extensive -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|Various network architectures and non-linearities are assessed on tasks of regression and classification using what as an example? -Various network architectures and non-linearities are assessed on tasks of regression and classification using what as an example?|has_answer|MNIST -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|has_question|We show a considerable improvement in predictive log-likelihood and what other method compared to existing state-of-the-art methods? -We show a considerable improvement in predictive log-likelihood and what other method compared to existing state-of-the-art methods?|has_answer|RMSE -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|has_question|What is the name of StarSpace? -What is the name of StarSpace?|has_answer|Embed All The Things -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|has_question|What is StarSpace? -What is StarSpace?|has_answer|a general-purpose neural embedding model -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|has_question|What does StarSpace compare entities comprised of discrete features against each other? -What does StarSpace compare entities comprised of discrete features against each other?|has_answer|learning similarities dependent on the task -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|has_question|What type of results show that StarSpace is highly competitive with existing methods? -What type of results show that StarSpace is highly competitive with existing methods?|has_answer|Empirical -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|We are still far away from what that can perform consistently across several downstream tasks? -We are still far away from what that can perform consistently across several downstream tasks?|has_answer|universal encoder -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|What is it still to find comprehensive evaluations of new sentence embedding methods? -What is it still to find comprehensive evaluations of new sentence embedding methods?|has_answer|challenging -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|What could provide inductive transfer to a wide variety of downstream tasks? -What could provide inductive transfer to a wide variety of downstream tasks?|has_answer|universal sentence encoders -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|In this work, we perform a comprehensive evaluation of recent methods using what kind of downstream and linguistic feature probing tasks? -In this work, we perform a comprehensive evaluation of recent methods using what kind of downstream and linguistic feature probing tasks?|has_answer|a wide variety -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|What are sentence encoders trained on? -What are sentence encoders trained on?|has_answer|entailment datasets -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|has_question|What can perform consistently across several downstream tasks? -What can perform consistently across several downstream tasks?|has_answer|universal encoder -Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))|has_question|What type of learning uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision? -What type of learning uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision?|has_answer|supervised learning -Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))|has_question|What are intrinsically correlated to the data? -What are intrinsically correlated to the data?|has_answer|signals or domain knowledge -Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))|has_question|In what type of learning does the system learn to predict part of its input from other parts of it input? -In what type of learning does the system learn to predict part of its input from other parts of it input?|has_answer|self-supervised learning -Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))|has_question|Where can you find a post about self-supervised learning? -Where can you find a post about self-supervised learning?|has_answer|https://www.facebook.com/722677142 -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|has_question|How many temporal relations does EventKG contain? -How many temporal relations does EventKG contain?|has_answer|2.3 million -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|has_question|Existing knowledge graphs focus mostly on what? -Existing knowledge graphs focus mostly on what?|has_answer|entity-centric information -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|has_question|What type of event-centric temporal knowledge graph is presented in this paper? -What type of event-centric temporal knowledge graph is presented in this paper?|has_answer|multilingual -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|has_question|What monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning? -What monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning?|has_answer|A Brief Introduction to Machine Learning for Engineers -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|has_question|What is the focus of the monograph? -What is the focus of the monograph?|has_answer|probabilistic models -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|has_question|How does A Brief Introduction to Machine Learning for Engineers introduce fundamental concepts and algorithms? -How does A Brief Introduction to Machine Learning for Engineers introduce fundamental concepts and algorithms?|has_answer|building on first principles -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|has_question|What are some of the categories in the monograph? -What are some of the categories in the monograph?|has_answer|discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|has_question|What is this monograph meant as an entry point for researchers with a background in? -What is this monograph meant as an entry point for researchers with a background in?|has_answer|probability and linear algebra -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What is ERNIE? -What is ERNIE?|has_answer|Enhanced Language Representation with Informative Entities -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What are used to train an enhanced language representation model? -What are used to train an enhanced language representation model?|has_answer|large-scale textual corpora and KGs -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What do existing pre-trained language models rarely consider incorporating? -What do existing pre-trained language models rarely consider incorporating?|has_answer|knowledge graphs -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What do informative entities in KGs enhance language representation with? -What do informative entities in KGs enhance language representation with?|has_answer|external knowledge -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What does ERNIE take full advantage of? -What does ERNIE take full advantage of?|has_answer|lexical, syntactic, and knowledge information -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|What is ERNIE comparable with on other common NLP tasks? -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|has_question|Where can the source code of this paper be obtained? -Where can the source code of this paper be obtained?|has_answer|https://github.com/thunlp/ERNIE -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|has_question|What is the term for Quantized Word Vectors? -What is the term for Quantized Word Vectors?|has_answer|Word2Bits -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|has_question|What does training with the quantization function act as? -What does training with the quantization function act as?|has_answer|regularizer -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|has_question|How can high quality quantized word vectors be learned? -How can high quality quantized word vectors be learned?|has_answer|introducing a quantization function -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|has_question|Where do we train word vectors? -Where do we train word vectors?|has_answer|English Wikipedia -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|has_question|How much less space do our quantized word vectors take than full precision (32 bit) word vectors? -How much less space do our quantized word vectors take than full precision (32 bit) word vectors?|has_answer|8-16x less space -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What is appropriate when an incomplete KB is available with a large text corpus? -What is appropriate when an incomplete KB is available with a large text corpus?|has_answer|Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|In practice, some questions are best answered using text, while others are best answered using what? -In practice, some questions are best answered using text, while others are best answered using what?|has_answer|KBs -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What is a natural question in Open Domain Question Answering? -What is a natural question in Open Domain Question Answering?|has_answer|how to effectively combine both types of information -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|How much prior work has looked at the problem of how to effectively combine both types of information? -How much prior work has looked at the problem of how to effectively combine both types of information?|has_answer|Surprisingly little prior work -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What is evolving from complex pipelined systems to end-to-end deep neural networks? -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What has been developed for extracting answers from either text alone or Knowledge Bases alone? -What has been developed for extracting answers from either text alone or Knowledge Bases alone?|has_answer|Specialized neural models -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What is a more practical setting for Open Domain Question Answering? -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What is a novel model for extracting answers from a question-specific subgraph containing text and KB entities and relations? -What is a novel model for extracting answers from a question-specific subgraph containing text and KB entities and relations?|has_answer|GRAFT-Net -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|What are the benchmark tasks for GRAFT-Net? -What are the benchmark tasks for GRAFT-Net?|has_answer|varying the difficulty of questions, the amount of training data, and KB completeness -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|GRAFT-Net is competitive with the state-of-the-art when tested using what? -GRAFT-Net is competitive with the state-of-the-art when tested using what?|has_answer|KBs or text alone -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|has_question|Where is the source code for GRAFT-Net available? -Where is the source code for GRAFT-Net available?|has_answer|https://github.com/OceanskySun/GraftNet -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|has_question|What does this paper consider transferring the structure information from large networks to small ones for dense prediction tasks? -What does this paper consider transferring the structure information from large networks to small ones for dense prediction tasks?|has_answer|Structured Knowledge Distillation for Dense Prediction -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|has_question|What is the distillation scheme used for dense prediction tasks? -What is the distillation scheme used for dense prediction tasks?|has_answer|image classification -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|has_question|What do we propose to distill from large networks to small networks? -What do we propose to distill from large networks to small networks?|has_answer|structured knowledge -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|has_question|What type of similarities does pair-wise distillation distill? -What type of similarities does pair-wise distillation distill?|has_answer|pairwise -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|has_question|What are three dense prediction tasks? -What are three dense prediction tasks?|has_answer|semantic segmentation, depth estimation, and object detection -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|What paper seems too good to be true? -What paper seems too good to be true?|has_answer|Stacked Approximated Regression Machine: A Simple Deep Learning Approach -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|What is the name of the type of net that can be trained quickly? -What is the name of the type of net that can be trained quickly?|has_answer|VGG -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|Who would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach? -Who would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach?|has_answer|I Zhangyang Wang -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|What was not included in the manuscript? -What was not included in the manuscript?|has_answer|Some experimental procedures -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|What was I solely responsible for in the relevant research? -What was I solely responsible for in the relevant research?|has_answer|carrying out the experiments -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|has_question|What is the latest version of the manuscript? -What is the latest version of the manuscript?|has_answer|updated text -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What does RNN stand for? -What does RNN stand for?|has_answer|Recurrent Memory Networks for Language Modeling Recurrent Neural Networks -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What is the source of RNN's success? -What is the source of RNN's success?|has_answer|understanding and interpreting -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What is the name of the novel RNN architecture that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning -What is the name of the novel RNN architecture that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning|has_answer|Recurrent Memory Network -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What tasks do we demonstrate the power of RMN? -What tasks do we demonstrate the power of RMN?|has_answer|language modeling and sentence completion tasks -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|On language modeling, RMN outperforms what network? -On language modeling, RMN outperforms what network?|has_answer|Long Short-Term Memory -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What does RMN capture? -What does RMN capture?|has_answer|various linguistic dimensions -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains what accuracy? -On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains what accuracy?|has_answer|69.2% -What does RNN stand for?|has_answer|Recurrent Neural Networks -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What remains a challenge for Recurrent Neural Networks? -What remains a challenge for Recurrent Neural Networks?|has_answer|understanding and interpreting -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|What two tasks do we demonstrate the power of RMN? -What two tasks do we demonstrate the power of RMN?|has_answer|language modeling and sentence completion tasks -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|has_question|On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset -On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset|has_answer|various linguistic dimensions -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|What do we seek to learn models that we can interact with using high-level concepts? -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|How can we intervene on concept bottleneck models? -How can we intervene on concept bottleneck models?|has_answer|by editing their predicted concept values and propagating these changes to the final prediction -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|When can we correct model mistakes on concepts? -When can we correct model mistakes on concepts?|has_answer|test time -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|What would a model predict if it did not think there was a bone spur in the x-ray? -What would a model predict if it did not think there was a bone spur in the x-ray?|has_answer|severe arthritis -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|What models do not typically support the manipulation of concepts like the existence of bone spurs? -What models do not typically support the manipulation of concepts like the existence of bone spurs?|has_answer|State-of-the-art models -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|What is the classic idea of first predicting concepts that are provided at training time? -What is the classic idea of first predicting concepts that are provided at training time?|has_answer|first predicting concepts -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|has_question|What models achieve competitive accuracy with standard end-to-end models? -What models achieve competitive accuracy with standard end-to-end models?|has_answer|x-ray grading and bird identification -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|What extends a pre-trained neural language model by linearly interpolating it with a $k$-nearest neighbors ($ -What extends a pre-trained neural language model by linearly interpolating it with a $k$-nearest neighbors ($|has_answer|$k$NN-LMs -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|The nearest neighbors are computed according to what in the pre-trained LM embedding space? -The nearest neighbors are computed according to what in the pre-trained LM embedding space?|has_answer|distance -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|What is the new state-of-the-art perplexity of the $k$NN-LM? -What is the new state-of-the-art perplexity of the $k$NN-LM?|has_answer|15.79 -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|How does the $k$NN-LM allow for domain adaptation? -How does the $k$NN-LM allow for domain adaptation?|has_answer|by simply varying the nearest neighbor datastore -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|What rare pattern is the nearest neighbor model particularly helpful in predicting? -What rare pattern is the nearest neighbor model particularly helpful in predicting?|has_answer|factual knowledge -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|has_question|Nearest neighbor search is an effective approach for what? -Nearest neighbor search is an effective approach for what?|has_answer|language modeling in the long tail -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|Learning Deep Latent Spaces for Multi-Label Classification uses what? -Learning Deep Latent Spaces for Multi-Label Classification uses what?|has_answer|Deep Canonical Correlation Analysis -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|What does multi-label classification require? -What does multi-label classification require?|has_answer|prediction of more than one label category for each input instance -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|What does C2AE stand for? -What does C2AE stand for?|has_answer|Canonical Correlated AutoEncoder -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|What is Canonical Correlated AutoEncoder aimed at? -What is Canonical Correlated AutoEncoder aimed at?|has_answer|better relating feature and label domain data for improved classification -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|What is the C2AE based model integrating? -What is the C2AE based model integrating?|has_answer|DNN architectures of canonical correlation analysis and autoencoder -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|Our C2AE can be easily extended to address the learning problem with what? -Our C2AE can be easily extended to address the learning problem with what?|has_answer|missing labels -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|has_question|Our method is shown to perform favorably against state-of-the-art methods for what? -Our method is shown to perform favorably against state-of-the-art methods for what?|has_answer|multi-label classification -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|What are Knowledge Bases? -What are Knowledge Bases?|has_answer|Language Models -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|Recent progress in pretraining language models on what led to a surge of improvements for downstream NLP tasks? -Recent progress in pretraining language models on what led to a surge of improvements for downstream NLP tasks?|has_answer|large textual corpora -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|Language models may be able to answer queries structured as what? -Language models may be able to answer queries structured as what?|has_answer|fill-in-the-blank cloze statements -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|What have many advantages over structured knowledge bases? -What have many advantages over structured knowledge bases?|has_answer|Language models -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|What type of analysis of the relational knowledge already present in a wide range of state-of-the-art pretrained language models? -What type of analysis of the relational knowledge already present in a wide range of state-of-the-art pretrained language models?|has_answer|in-depth analysis -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|What does BERT do remarkably well on? -What does BERT do remarkably well on?|has_answer|open-domain question answering against a supervised baseline -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|The ability of language models to recall factual knowledge without fine-tuning demonstrates their potential as what? -The ability of language models to recall factual knowledge without fine-tuning demonstrates their potential as what?|has_answer|unsupervised open-domain QA systems -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|has_question|Where is the code to reproduce our analysis available? -Where is the code to reproduce our analysis available?|has_answer|https://github.com/facebookresearch/LAMA -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What are VRDs? -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What are examples of visual rich documents? -What are examples of visual rich documents?|has_answer|purchase receipts, insurance policy documents, custom declaration forms -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What is critical for document understanding in VRDs? -What is critical for document understanding in VRDs?|has_answer|visual and layout information -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|BiLSTM-CRF typically operate on what? -BiLSTM-CRF typically operate on what?|has_answer|text sequences -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What is introduced to combine textual and visual information presented in VRDs? -What is introduced to combine textual and visual information presented in VRDs?|has_answer|graph convolution based model -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What is trained to summarize the context of a text segment in a VRD? -What is trained to summarize the context of a text segment in a VRD?|has_answer|Graph embeddings -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What has been conducted to show that our method outperforms BiLSTM-CRF baselines? -What has been conducted to show that our method outperforms BiLSTM-CRF baselines?|has_answer|Extensive experiments -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|has_question|What studies are performed to evaluate the effectiveness of each component of our model? -What studies are performed to evaluate the effectiveness of each component of our model?|has_answer|ablation studies -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|What is the area of machine learning interested in learning how to assign labels to time series? -What is the area of machine learning interested in learning how to assign labels to time series?|has_answer|InceptionTime: Finding AlexNet for Time Series Classification Time series classification -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|What is the state of the art of classifiers? -What is the state of the art of classifiers?|has_answer|HIVE-COTE algorithm -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|What algorithm is infeasible to use in many applications? -What algorithm is infeasible to use in many applications?|has_answer|HIVE-COTE -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|Why has deep learning received enormous attention? -Why has deep learning received enormous attention?|has_answer|high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|How long have the first architectures been developed for deep learning for TSC? -How long have the first architectures been developed for deep learning for TSC?|has_answer|3 years -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of what? -The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of what?|has_answer|HIVE-COTE -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|What does this paper outperform HIVE-COTE? -What does this paper outperform HIVE-COTE?|has_answer|accuracy together with scalability -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|What is the name of the ensemble of deep Convolutional Neural Network models? -What is the name of the ensemble of deep Convolutional Neural Network models?|has_answer|InceptionTime -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|How does InceptionTime outperform HIVE-COTE? -How does InceptionTime outperform HIVE-COTE?|has_answer|win/draw/loss -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|has_question|InceptionTime can learn from a dataset with how many time series in 13 hours? -InceptionTime can learn from a dataset with how many time series in 13 hours?|has_answer|8M -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is one of the core problems of modern statistics is to approximate difficult-to-compute probability densities? -What is one of the core problems of modern statistics is to approximate difficult-to-compute probability densities?|has_answer|Variational Inference: A Review for Statisticians -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What frame all inference about unknown quantities as a calculation involving the posterior density? -What frame all inference about unknown quantities as a calculation involving the posterior density?|has_answer|Bayesian statistics -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is a method from machine learning that approximates probability densities through optimization? -What is a method from machine learning that approximates probability densities through optimization?|has_answer|variational inference -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|Variational inference tends to be faster than classical methods, such as what? -Variational inference tends to be faster than classical methods, such as what?|has_answer|Markov chain Monte Carlo sampling -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is the idea behind variational inference? -What is the idea behind variational inference?|has_answer|to first posit a family of densities and then to find the member of that family which is close to the target -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is closeness measured by? -What is closeness measured by?|has_answer|Kullback-Leibler divergence -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What does VI use to scale up to massive data? -What does VI use to scale up to massive data?|has_answer|stochastic optimization -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What kind of problems do we highlight in VI? -What kind of problems do we highlight in VI?|has_answer|open -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is the problem with VI? -What is the problem with VI?|has_answer|not yet well understood -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|has_question|What is our hope in writing this paper? -What is our hope in writing this paper?|has_answer|catalyze statistical research on this class of algorithms -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What is a universal unsupervised learning approach to extract useful representations from high-dimensional data? -What is a universal unsupervised learning approach to extract useful representations from high-dimensional data?|has_answer|Representation Learning with Contrastive Predictive Coding -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|How does our model learn representations? -How does our model learn representations?|has_answer|by predicting the future in latent space -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What induces the latent space to capture information that is maximally useful to predict future samples? -What induces the latent space to capture information that is maximally useful to predict future samples?|has_answer|probabilistic contrastive loss -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What makes the model tractable? -What makes the model tractable?|has_answer|negative sampling -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What can a contrastive method be applied to? -What can a contrastive method be applied to?|has_answer|any form of data -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What is an important and challenging endeavor for artificial intelligence? -What is an important and challenging endeavor for artificial intelligence?|has_answer|unsupervised learning -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What is the universal unsupervised learning approach to extract useful representations from high-dimensional data called? -What is the universal unsupervised learning approach to extract useful representations from high-dimensional data called?|has_answer|Contrastive Predictive Coding -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|How is Contrastive Predictive Coding learned? -How is Contrastive Predictive Coding learned?|has_answer|by predicting the future in latent space -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|has_question|What are the four domains that we demonstrate that our approach is able to learn useful representations achieving strong performance on? -What are the four domains that we demonstrate that our approach is able to learn useful representations achieving strong performance on?|has_answer|speech, images, text and reinforcement learning in 3D environments -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|What is an effective methodology for Principled Integration of Machine Learning and Reasoning? -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|Who has raised concerns about interpretability and accountability of AI? -Who has raised concerns about interpretability and accountability of AI?|has_answer|influential thinkers -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|Neural-symbolic computing integrates principled knowledge representation and reasoning mechanisms with what? -Neural-symbolic computing integrates principled knowledge representation and reasoning mechanisms with what?|has_answer|deep learning-based systems -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|What aims at integrating the ability to learn from the environment and the ability to reason from what has been learned? -What aims at integrating the ability to learn from the environment and the ability to reason from what has been learned?|has_answer|Neural-symbolic computing -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|What has been an active topic of research for many years? -What has been an active topic of research for many years?|has_answer|Neural-symbolic computing -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|What is a principled methodology for integrated machine learning and reasoning? -What is a principled methodology for integrated machine learning and reasoning?|has_answer|neural-symbolic computing -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|What is the main characteristic of neural-symbolic computing? -What is the main characteristic of neural-symbolic computing?|has_answer|principled integration of neural learning with symbolic knowledge representation and reasoning -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|has_question|The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for what types of AI systems? -The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for what types of AI systems?|has_answer|interpretable and accountable -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|What do knowledge graphs often have that disrupts path queries? -What do knowledge graphs often have that disrupts path queries?|has_answer|missing facts -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|How do recent models for knowledge base completion impute missing facts? -How do recent models for knowledge base completion impute missing facts?|has_answer|embedding knowledge graphs in vector spaces -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|What do knowledge graphs suffer from? -What do knowledge graphs suffer from?|has_answer|cascading errors -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|What training objective dramatically improves all models' ability to answer path queries? -What training objective dramatically improves all models' ability to answer path queries?|has_answer|compositional -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|What language is spoken by people living in? -What language is spoken by people living in?|has_answer|Lisbon -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|Knowledge graphs often have missing facts (edges) which disrupt what? -Knowledge graphs often have missing facts (edges) which disrupt what?|has_answer|path queries -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|has_question|What acts as a novel form of structural regularization? -What acts as a novel form of structural regularization?|has_answer|compositional training -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What makes utterances meaningful? -What makes utterances meaningful?|has_answer|Experience Grounds Language -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|Why do today's best systems make mistakes? -Why do today's best systems make mistakes?|has_answer|failure to relate language to the physical world it describes and to the social interactions it facilitates -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What is a diverse field? -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What do we believe can be deeply enriched from the parallel tradition of research on the contextual and social nature of language? -What do we believe can be deeply enriched from the parallel tradition of research on the contextual and social nature of language?|has_answer|large text corpora -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What are the contextual foundations of language? -What are the contextual foundations of language?|has_answer|grounding, embodiment, and social interaction -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What is the focus of this article? -What is the focus of this article?|has_answer|how this integration can move the field forward -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|has_question|What do we believe this framing will serve as for truly contextual language understanding? -What do we believe this framing will serve as for truly contextual language understanding?|has_answer|a roadmap -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What is a Comprehensive Survey of Graph Embedding? -What is a Comprehensive Survey of Graph Embedding?|has_answer|Problems, Techniques and Applications -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What does effective graph analytics provide users? -What does effective graph analytics provide users?|has_answer|deeper understanding -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What do most graph analytics methods suffer from? -What do most graph analytics methods suffer from?|has_answer|high computation and space cost -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What is an effective yet efficient way to solve the graph analytics problem? -What is an effective yet efficient way to solve the graph analytics problem?|has_answer|Graph embedding -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What does Graph embedding do? -What does Graph embedding do?|has_answer|It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What type of review of the literature in graph embedding is conducted in this survey? -What type of review of the literature in graph embedding is conducted in this survey?|has_answer|comprehensive review -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|What do we introduce in this comprehensive review of the literature in graph embedding? -What do we introduce in this comprehensive review of the literature in graph embedding?|has_answer|formal definition of graph embedding -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|How many taxonomies of graph embedding are proposed? -How many taxonomies of graph embedding are proposed?|has_answer|two -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|has_question|How many promising future research directions do we suggest in terms of computation efficiency, problem settings, techniques and application scenarios? -How many promising future research directions do we suggest in terms of computation efficiency, problem settings, techniques and application scenarios?|has_answer|four -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What does GEM stand for? -What does GEM stand for?|has_answer|Geometric Embedding -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is the basis of the subspace spanned by a word and its surrounding context in a sentence? -What is the basis of the subspace spanned by a word and its surrounding context in a sentence?|has_answer|orthogonal -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What do we model the semantic meaning of a word in a sentence based on? -What do we model the semantic meaning of a word in a sentence based on?|has_answer|two aspects -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is the semantic meaning of a word in a sentence based on? -What is the semantic meaning of a word in a sentence based on?|has_answer|relatedness -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is the word's new basis vector perpendicular to this existing subspace? -What is the word's new basis vector perpendicular to this existing subspace?|has_answer|novel semantic meaning -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is Geometric Embedding inspired by? -What is Geometric Embedding inspired by?|has_answer|the Gram-Schmidt Process -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is the goal of Geometric Embedding? -What is the goal of Geometric Embedding?|has_answer|combine pre-trained word embeddings into sentence representations -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What is required to combine pre-trained word embeddings into sentence representations? -What is required to combine pre-trained word embeddings into sentence representations?|has_answer|zero parameters -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|How many downstream NLP tasks are evaluated? -How many downstream NLP tasks are evaluated?|has_answer|11 -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|has_question|What does our model show compared to non-parameterized alternatives? -What does our model show compared to non-parameterized alternatives?|has_answer|superior performance -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What is the term for a QA model to abstain from answering when it doesn't know the answer? -What is the term for a QA model to abstain from answering when it doesn't know the answer?|has_answer|Selective Question Answering -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What policies based solely on the model's softmax probabilities fare poorly? -What policies based solely on the model's softmax probabilities fare poorly?|has_answer|Abstention policies -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What is trained to identify inputs on which the QA model errs and abstain when it predicts an error is likely? -What is trained to identify inputs on which the QA model errs and abstain when it predicts an error is likely?|has_answer|calibrator -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|Why do QA models need to know when to abstain from answering? -Why do QA models need to know when to abstain from answering?|has_answer|To avoid giving wrong answers -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What makes errors more likely and thus abstention more critical? -What makes errors more likely and thus abstention more critical?|has_answer|users often ask questions that diverge from the model's training data -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|How many questions must a QA model answer? -How many questions must a QA model answer?|has_answer|as many questions as possible -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|Why do Abstention policies based solely on the model's softmax probabilities fare poorly? -Why do Abstention policies based solely on the model's softmax probabilities fare poorly?|has_answer|models are overconfident on out-of-domain inputs -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What does the calibrator benefit from observing the model's behavior on? -What does the calibrator benefit from observing the model's behavior on?|has_answer|out-of-domain data -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What is the name of the QA model that we combine the selective question answering under domain shift with? -What is the name of the QA model that we combine the selective question answering under domain shift with?|has_answer|SQuAD -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|has_question|What percentage of questions does our method answer? -What percentage of questions does our method answer?|has_answer|56% -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|has_question|What does a lot of good: A Study in Bootstrapping Low-resource Named Entity Recognizers? -What does a lot of good: A Study in Bootstrapping Low-resource Named Entity Recognizers?|has_answer|A Little Annotation -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|has_question|What type of learning efficiently selects effective training data based on model predictions? -What type of learning efficiently selects effective training data based on model predictions?|has_answer|active learning -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|has_question|What is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? -What is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages?|has_answer|human annotation -What is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages?|has_answer|dual-strategy approach -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|has_question|How much training data does an entity-targeted annotation strategy use? -How much training data does an entity-targeted annotation strategy use?|has_answer|one-tenth -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What do we propose to detect Out-of-Distribution Examples with? -What do we propose to detect Out-of-Distribution Examples with?|has_answer|In-distribution Examples and Gram Matrices -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What type of networks yield confident, incorrect predictions when presented with Out-of-Distribution examples? -What type of networks yield confident, incorrect predictions when presented with Out-of-Distribution examples?|has_answer|deep neural networks -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What is challenging when presented with Out-of-Distribution examples? -What is challenging when presented with Out-of-Distribution examples?|has_answer|Detecting OOD examples -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|How do we detect OOD examples? -How do we detect OOD examples?|has_answer|by identifying inconsistencies between activity patterns and class predicted -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What can characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values yield? -What can characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values yield?|has_answer|high OOD detection rates -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|How do we identify anomalies in gram matrices? -How do we identify anomalies in gram matrices?|has_answer|by simply comparing each value with its respective range observed over the training data -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What does this method not require access to OOD data for? -What does this method not require access to OOD data for?|has_answer|fine-tuning hyperparameters -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|has_question|What is the task of detecting far-from-distribution out-of-distribution examples? -What is the task of detecting far-from-distribution out-of-distribution examples?|has_answer|surprisingly hard -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What is the focus of the From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey? -What is the focus of the From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey?|has_answer|semantic representation of meaning -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What deficiency arises from representing a word with all its possible meanings as a single vector? -What deficiency arises from representing a word with all its possible meanings as a single vector?|has_answer|meaning conflation deficiency -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What can be addressed by a method for modelling? -What can be addressed by a method for modelling?|has_answer|unambiguous lexical meaning -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What is the second branch of sense representation? -What is the second branch of sense representation?|has_answer|knowledge-based -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What does this survey focus on? -What does this survey focus on?|has_answer|representation of meaning -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What is one of the major limitations of word vector space models? -What is one of the major limitations of word vector space models?|has_answer|meaning conflation deficiency -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|The meaning conflation deficiency can be addressed through a transition from the word level to what level of word senses? -The meaning conflation deficiency can be addressed through a transition from the word level to what level of word senses?|has_answer|fine-grained -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|has_question|What are the four important aspects of sense representation? -What are the four important aspects of sense representation?|has_answer|interpretability, sense granularity, adaptability to different domains and compositionality -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|has_question|Who generated the abstract above? -Who generated the abstract above?|has_answer|one of the models presented in this paper -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|has_question|how many words does abstractive summarization of long documents exceed? -how many words does abstractive summarization of long documents exceed?|has_answer|several thousand -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|has_question|What is performed before generating a summary? -What is performed before generating a summary?|has_answer|a simple extractive step -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|has_question|How does the extractive step improve summarization results? -How does the extractive step improve summarization results?|has_answer|significantly improves -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|has_question|We show that extractive summaries produce more abstractive summaries compared to prior work that employs a copy mechanism while still achieving -We show that extractive summaries produce more abstractive summaries compared to prior work that employs a copy mechanism while still achieving|has_answer|higher rouge scores -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What is a major emerging research area in information retrieval? -What is a major emerging research area in information retrieval?|has_answer|Macaw: An Extensible Conversational Information Seeking Platform -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What will CIS research require to allow the implementation and study of conversational systems? -What will CIS research require to allow the implementation and study of conversational systems?|has_answer|data and tools -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What is Macaw? -What is Macaw?|has_answer|open-source framework with a modular architecture -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What types of interactions does Macaw support? -What types of interactions does Macaw support?|has_answer|multi-turn, multi-modal, and mixed-initiative interactions -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What does Macaw's modular design encourage the study of? -What does Macaw's modular design encourage the study of?|has_answer|CIS algorithms -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What can Macaw integrate with a user interface? -What can Macaw integrate with a user interface?|has_answer|user studies and data collection in an interactive mode -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|has_question|What is Macaw distributed under? -What is Macaw distributed under?|has_answer|MIT License -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|has_question|What is a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs? -What is a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs?|has_answer|Learning Confidence for Out-of-Distribution Detection in Neural Networks -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|has_question|Learning Confidence for Out-of-Distribution Detection in Neural Networks is closely related to what? -Learning Confidence for Out-of-Distribution Detection in Neural Networks is closely related to what?|has_answer|out-of-distribution detection -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|has_question|What does learning confidence estimates for neural networks produce? -What does learning confidence estimates for neural networks produce?|has_answer|intuitively interpretable outputs -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|has_question|What task does our method surpass recently proposed techniques that construct confidence based on the network's output distribution? -What task does our method surpass recently proposed techniques that construct confidence based on the network's output distribution?|has_answer|out-of-distribution detection -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|has_question|What is the problem of learning confidence estimates for out-of-distribution detection in neural networks? -What is the problem of learning confidence estimates for out-of-distribution detection in neural networks?|has_answer|calibrating out-of-distribution detectors -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What shows that it is possible to learn classification tasks at near competitive accuracy without backpropagation? -What shows that it is possible to learn classification tasks at near competitive accuracy without backpropagation?|has_answer|HSIC Bottleneck: Deep Learning without Back-Propagation -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|How can fully competitive accuracy be obtained? -How can fully competitive accuracy be obtained?|has_answer|by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What does the HSIC Bottleneck: Deep Learning without Back-Propagation use? -What does the HSIC Bottleneck: Deep Learning without Back-Propagation use?|has_answer|an approximation of the [#information bottleneck](/tag/information_bottleneck_method) -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vani -Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vani|has_answer|facilitates parallel processing and requires significantly less operations -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What does the HSIC Bottleneck: Deep Learning without Back-Propagation not suffer from? -What does the HSIC Bottleneck: Deep Learning without Back-Propagation not suffer from?|has_answer|exploding or vanishing gradients -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What is the name of the bottleneck for training deep neural networks? -What is the name of the bottleneck for training deep neural networks?|has_answer|HSIC -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What is an alternative to the conventional cross-entropy loss and backpropagation? -What is an alternative to the conventional cross-entropy loss and backpropagation?|has_answer|HSIC bottleneck -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What does the HSIC bottleneck mitigate? -What does the HSIC bottleneck mitigate?|has_answer|exploding and vanishing gradients -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|There is no requirement for what? -There is no requirement for what?|has_answer|symmetric feedback or update locking -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|On what classifications does the HSIC bottleneck provide performance comparable to backpropagation with a cross-entropy target? -On what classifications does the HSIC bottleneck provide performance comparable to backpropagation with a cross-entropy target?|has_answer|MNIST/FashionMNIST/CIFAR10 -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|has_question|What further improves performance on MNIST/FashionMNIST/CIFAR10 classification? -What further improves performance on MNIST/FashionMNIST/CIFAR10 classification?|has_answer|Appending a single layer trained with SGD (without backpropagation) to reformat the information -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What is an efficient framework for learning sentence representations? -What is an efficient framework for learning sentence representations?|has_answer|Quick Thoughts -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What is an efficient framework for learning sentence representations Quick Thoughts? -What is an efficient framework for learning sentence representations Quick Thoughts?|has_answer|unlabelled data -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|We reformulate the problem of predicting the context in which a sentence appears as what? -We reformulate the problem of predicting the context in which a sentence appears as what?|has_answer|classification problem -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What type of framework do we propose for learning sentence representations from unlabelled data? -What type of framework do we propose for learning sentence representations from unlabelled data?|has_answer|simple and efficient -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What is the framework for learning sentence representations from unlabelled data based on? -What is the framework for learning sentence representations from unlabelled data based on?|has_answer|distributional hypothesis -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What does a classifier distinguish context sentences from other contrastive sentences based on? -What does a classifier distinguish context sentences from other contrastive sentences based on?|has_answer|their vector representations -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What does the model learn? -What does the model learn?|has_answer|high-quality sentence representations -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|has_question|What is achieved in training time with sentence representations? -What is achieved in training time with sentence representations?|has_answer|speedup -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|The recent proliferation of knowledge graphs coupled with incomplete or partial information has fueled a lot of research on what? -The recent proliferation of knowledge graphs coupled with incomplete or partial information has fueled a lot of research on what?|has_answer|knowledge base completion -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|What does CNN stand for? -What does CNN stand for?|has_answer|convolutional neural network -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|What do KG embeddings treat independently? -What do KG embeddings treat independently?|has_answer|triples -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|What does our paper propose a novel attention based feature embedding that captures in any given entity's neighborhood? -What does our paper propose a novel attention based feature embedding that captures in any given entity's neighborhood?|has_answer|entity and relation features -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|In addition to relation clusters, what type of relations are encapsulated in our model? -In addition to relation clusters, what type of relations are encapsulated in our model?|has_answer|multihop -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|has_question|We show marked performance gains in comparison to state of the art methods on what? -We show marked performance gains in comparison to state of the art methods on what?|has_answer|all datasets -Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach|has_question|What models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space? -Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach|has_question|What is VAE? -What is VAE?|has_answer|Variational Autoencoders -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|What is all you need? -What is all you need?|has_answer|Attention -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|How do the best performing models connect the encoder and decoder? -How do the best performing models connect the encoder and decoder?|has_answer|through an attention mechanism -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|What is the name of the new simple network architecture? -What is the name of the new simple network architecture?|has_answer|the Transformer -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in what configuration? -The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in what configuration?|has_answer|encoder-decoder -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|What do the best performing models connect through an attention mechanism? -What do the best performing models connect through an attention mechanism?|has_answer|the encoder and decoder -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|What shows the Transformer to be superior in quality while being more parallelizable and requiring significantly less time to train? -What shows the Transformer to be superior in quality while being more parallelizable and requiring significantly less time to train?|has_answer|Experiments on two machine translation tasks -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|What did our model achieve on the WMT 2014 English-to-German translation task? -What did our model achieve on the WMT 2014 English-to-German translation task?|has_answer|28.4 BLEU -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the -On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the|has_answer|41.8 -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|has_question|How does the Transformer generalize to other tasks? -How does the Transformer generalize to other tasks?|has_answer|by applying it successfully to English constituency parsing both with large and limited training data -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|What are models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks? -What are models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks?|has_answer|Universal Sentence Encoder models -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of what training data? -With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of what training data?|has_answer|supervised -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|What are the models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks? -What are the models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks?|has_answer|efficient -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|How many variants of the encoding models allow for trade-offs between accuracy and compute resources? -How many variants of the encoding models allow for trade-offs between accuracy and compute resources?|has_answer|Two variants -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|What are the two variants of the encoding models that allow for trade-offs between accuracy and compute resources? -What are the two variants of the encoding models that allow for trade-offs between accuracy and compute resources?|has_answer|complexity, resource consumption, the availability of transfer task training data, and task performance -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|Comparisons are made with what? -Comparisons are made with what?|has_answer|baselines that use word level transfer learning via pretrained word embeddings -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|Transfer learning using sentence embeddings tends to outperform what? -Transfer learning using sentence embeddings tends to outperform what?|has_answer|word level transfer -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|With transfer learning via sentence embeddings, we observe what performance with minimal amounts of supervised training data for a transfer task? -With transfer learning via sentence embeddings, we observe what performance with minimal amounts of supervised training data for a transfer task?|has_answer|surprisingly good -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|What does WEAT stand for? -What does WEAT stand for?|has_answer|Word Embedding Association Tests -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|has_question|Where are our pre-trained sentence encoding models made freely available for download? -Where are our pre-trained sentence encoding models made freely available for download?|has_answer|TF Hub -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|has_question|How many techniques enable machines to learn from other machines? -How many techniques enable machines to learn from other machines?|has_answer|two -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|has_question|What is a framework to learn from multiple machines and data representations? -What is a framework to learn from multiple machines and data representations?|has_answer|generalized distillation -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|has_question|What types of insights do we provide about the inner workings of generalized distillation? -What types of insights do we provide about the inner workings of generalized distillation?|has_answer|theoretical and causal -the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known.|has_question|What is the problem of identifying to which of a set of categories (sub-populations) a new observation belongs? -What is the problem of identifying to which of a set of categories (sub-populations) a new observation belongs?|has_answer|identifying to which of a set of categories (sub-populations) a new observation belongs -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|Recent advances in artificial intelligence have renewed interest in what? -Recent advances in artificial intelligence have renewed interest in what?|has_answer|building systems that learn and think like people -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|What are some examples of deep neural networks trained end-to-end in? -What are some examples of deep neural networks trained end-to-end in?|has_answer|object recognition, video games, and board games -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|What do deep neural networks differ from human intelligence in crucial ways? -What do deep neural networks differ from human intelligence in crucial ways?|has_answer|biological inspiration and performance achievements -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|What field of study suggests that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they -What field of study suggests that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they|has_answer|cognitive science -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|What do causal models of the world support? -What do causal models of the world support?|has_answer|explanation and understanding -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|has_question|What do we suggest for building machines that learn and think like people? -What do we suggest for building machines that learn and think like people?|has_answer|concrete challenges and promising routes -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|How does the Explaining the Predictions of Any Classifier technique explain the predictions of any classifier? -How does the Explaining the Predictions of Any Classifier technique explain the predictions of any classifier?|has_answer|by learning an interpretable model locally around the prediction -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What is quite important in assessing trust? -What is quite important in assessing trust?|has_answer|Understanding the reasons behind predictions -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What can be used to transform an untrustworthy model or prediction into a trustworthy one? -What can be used to transform an untrustworthy model or prediction into a trustworthy one?|has_answer|insights into the model -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What is a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner? -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What does LIME frame the task as? -What does LIME frame the task as?|has_answer|submodular optimization problem -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What does LIME explain for text and image classification? -What does LIME explain for text and image classification?|has_answer|different models -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What is an example of a model that can be explained in a non-redundant way? -What is an example of a model that can be explained in a non-redundant way?|has_answer|random forests -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|What is an example of an image classification model? -What is an example of an image classification model?|has_answer|neural networks -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|has_question|How do we show the utility of explanations? -How do we show the utility of explanations?|has_answer|novel experiments -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|has_question|What can impede NLP systems from accurate understanding of semantics? -What can impede NLP systems from accurate understanding of semantics?|has_answer|Lexical ambiguity -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|has_question|What has remained understudied in NLP systems? -What has remained understudied in NLP systems?|has_answer|sense-level information -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|has_question|What is created by incorporating a novel disambiguation algorithm into a state-of-the-art classification model? -What is created by incorporating a novel disambiguation algorithm into a state-of-the-art classification model?|has_answer|a pipeline -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|has_question|A simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and what? -A simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and what?|has_answer|polarity detection datasets -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|has_question|What do our results point to the need for sense representation research to focus more on? -What do our results point to the need for sense representation research to focus more on?|has_answer|in vivo evaluations -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|What are the entity embeddings of categorical variables? -What are the entity embeddings of categorical variables?|has_answer|Euclidean spaces -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|Who learns the mapping of categorical variables during the standard supervised training process? -Who learns the mapping of categorical variables during the standard supervised training process?|has_answer|a neural network -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|What does entity embedding reduce? -What does entity embedding reduce?|has_answer|memory usage -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|In a recent Kaggle competition, what position did we reach with relative simple features? -In a recent Kaggle competition, what position did we reach with relative simple features?|has_answer|third -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|When does entity embedding help the neural network to generalize better? -When does entity embedding help the neural network to generalize better?|has_answer|when the data is sparse and statistics is unknown -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|What is entity embedding particularly useful for? -What is entity embedding particularly useful for?|has_answer|datasets with lots of high cardinality features -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|Entity embeddings boost the performance of all tested methods when used as input features instead of entity embedding? -Entity embeddings boost the performance of all tested methods when used as input features instead of entity embedding?|has_answer|machine learning -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|has_question|What can entity embedding be used for? -What can entity embedding be used for?|has_answer|visualizing categorical data and for data clustering -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|What does attention-based bag-of-words context model and pre-trained entity embeddings assess? -What does attention-based bag-of-words context model and pre-trained entity embeddings assess?|has_answer|topic level context compatibility -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|What happens to the latent entity type information in the immediate context of the mention? -What happens to the latent entity type information in the immediate context of the mention?|has_answer|neglected -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|What is used to inject latent entity type information into the entity embeddings? -What is used to inject latent entity type information into the entity embeddings?|has_answer|pre-trained BERT -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|What is integrated into the local context model of a state-of-the-art model to better capture latent entity type information? -What is integrated into the local context model of a state-of-the-art model to better capture latent entity type information?|has_answer|BERT-based entity similarity score -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|Our model significantly outperforms the state-of-the-art entity linking models on what standard benchmark? -Our model significantly outperforms the state-of-the-art entity linking models on what standard benchmark?|has_answer|AIDA-CoNLL -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|has_question|What demonstrates that our model corrects most of the type errors produced by the direct baseline? -What demonstrates that our model corrects most of the type errors produced by the direct baseline?|has_answer|Detailed experiment analysis -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|has_question|What is the name of the attentive sequence model that compresses past memories for long-range sequence learning? -What is the name of the attentive sequence model that compresses past memories for long-range sequence learning?|has_answer|Compressive Transformers for Long-Range Sequence Modelling -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|has_question|What is an attentive sequence model that compresses past memories for long-range sequence learning? -What is an attentive sequence model that compresses past memories for long-range sequence learning?|has_answer|Compressive Transformer -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|has_question|What are the state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks? -What are the state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks?|has_answer|17.1 ppl and 0.97 bpc -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|has_question|The Compressive Transformer can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on what task? -The Compressive Transformer can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on what task?|has_answer|object matching task -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|has_question|What is the name of the new open-vocabulary language modelling benchmark? -What is the name of the new open-vocabulary language modelling benchmark?|has_answer|PG-19 -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|What is a popular approach for clustering multivariate data? -What is a popular approach for clustering multivariate data?|has_answer|Variable Selection Methods for Model-based Clustering -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|What type of data is becoming more and more common? -What type of data is becoming more and more common?|has_answer|high-dimensional data -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|What has received a lot of attention and research effort in recent years? -What has received a lot of attention and research effort in recent years?|has_answer|variable selection techniques -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|Why has variable selection been advocated for small size problems? -Why has variable selection been advocated for small size problems?|has_answer|to facilitate the interpretation of the clustering results -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|What does this review provide of the methods developed for variable selection in model-based clustering? -What does this review provide of the methods developed for variable selection in model-based clustering?|has_answer|a summary -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|has_question|What are indicated and illustrated in application to two data analysis examples? -What are indicated and illustrated in application to two data analysis examples?|has_answer|Existing R packages -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What is the ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept? -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What is one-shot generalization? -What is one-shot generalization?|has_answer|an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|Deep generative models combine the representational power of deep learning with the inferential power of what? -Deep generative models combine the representational power of deep learning with the inferential power of what?|has_answer|Bayesian reasoning -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation? -What two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation?|has_answer|feedback and attention -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What are generative models among the state-of-the art in? -What are generative models among the state-of-the art in?|has_answer|density estimation and image generation -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What are the three tasks that we demonstrate the one-shot generalization ability of our models? -What are the three tasks that we demonstrate the one-shot generalization ability of our models?|has_answer|unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|has_question|What are our models able to generate? -What are our models able to generate?|has_answer|compelling and diverse samples -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What type of BERT model outperforms BERT models modeling intent classification and slot filling separately? -What type of BERT model outperforms BERT models modeling intent classification and slot filling separately?|has_answer|joint -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What does not improve the results of BERT for Joint Intent Classification and Slot Filling? -What does not improve the results of BERT for Joint Intent Classification and Slot Filling?|has_answer|Adding a CRF on top of the model -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What are two essential tasks for natural language understanding? -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|Intent classification and slot filling often suffer from what? -Intent classification and slot filling often suffer from what?|has_answer|small-scale human-labeled training data -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora? -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What has not been much effort on for natural language understanding? -What has not been much effort on for natural language understanding?|has_answer|exploring BERT -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What model is proposed for BERT for natural language understanding? -What model is proposed for BERT for natural language understanding?|has_answer|joint intent classification and slot filling model based on BERT -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|has_question|What models are better than BERT for intent classification and slot filling? -What models are better than BERT for intent classification and slot filling?|has_answer|attention-based recurrent neural network models and slot-gated models -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|What describes the fastai v2 API? -What describes the fastai v2 API?|has_answer|fastai: A Layered API for Deep Learning Paper -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|What does fastai v2 aim to achieve without substantial compromises? -What does fastai v2 aim to achieve without substantial compromises?|has_answer|performance -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|What makes fastai possible without substantial compromises in ease of use, flexibility, or performance? -What makes fastai possible without substantial compromises in ease of use, flexibility, or performance?|has_answer|carefully layered architecture -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|Fastai v2 leverages the dynamism of the underlying Python language and the flexibility of what library? -Fastai v2 leverages the dynamism of the underlying Python language and the flexibility of what library?|has_answer|PyTorch library -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|How many lines of code can optimization algorithms be implemented in fastai? -How many lines of code can optimization algorithms be implemented in fastai?|has_answer|4-5 lines -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|What was the result of using fastai v2? -What was the result of using fastai v2?|has_answer|write more quickly -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|Fastai is already in wide use in what industries? -Fastai is already in wide use in what industries?|has_answer|research, industry, and teaching -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|has_question|Where is fastai v2 currently in pre-release? -Where is fastai v2 currently in pre-release?|has_answer|http://dev.fast.ai/ -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|What is a structured representation of a text's content called? -What is a structured representation of a text's content called?|has_answer|document plan -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|What does the work address the problem of generating coherent multi-sentence texts from the output of an information extraction system? -What does the work address the problem of generating coherent multi-sentence texts from the output of an information extraction system?|has_answer|knowledge graph -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|What is ubiquitous in computing? -What is ubiquitous in computing?|has_answer|Graphical knowledge representations -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|What can leverage the relational structure of knowledge graphs without imposing linearization or hierarchical constraints? -What can leverage the relational structure of knowledge graphs without imposing linearization or hierarchical constraints?|has_answer|graph transforming encoder -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|What does the graph transforming encoder provide for graph-to-text generation? -What does the graph transforming encoder provide for graph-to-text generation?|has_answer|end-to-end trainable system -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|has_question|Automatic and human evaluations show that our technique produces what type of texts? -Automatic and human evaluations show that our technique produces what type of texts?|has_answer|more informative -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|What is an example of a model that provides explanations along with predictions? -What is an example of a model that provides explanations along with predictions?|has_answer|Explaining model Decisions through Unsupervised Concepts Extraction -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|What model performs output prediction and provides an explanation in terms of the presence of particular concepts in the input? -What model performs output prediction and provides an explanation in terms of the presence of particular concepts in the input?|has_answer|self-interpretable model -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|Our model's prediction relies solely on what representation of the input? -Our model's prediction relies solely on what representation of the input?|has_answer|low-dimensional binary representation -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|What is crucial in some text processing tasks? -What is crucial in some text processing tasks?|has_answer|Providing explanations along with predictions -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|What model performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input? -What model performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input?|has_answer|self-interpretable model -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|The presence of a concept is decided from what? -The presence of a concept is decided from what?|has_answer|an excerpt -The presence of a concept is decided from what?|has_answer|a small sequence of consecutive words in the text -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for what? -Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for what?|has_answer|concept-level annotations -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|To ease interpretability, we enforce that for each concept, the corresponding excerpts share what? -To ease interpretability, we enforce that for each concept, the corresponding excerpts share what?|has_answer|similar semantics -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|has_question|We experimentally demonstrate the relevance of our approach on text classification and what other task? -We experimentally demonstrate the relevance of our approach on text classification and what other task?|has_answer|multi-sentiment analysis tasks -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What is a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge? -What is a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge?|has_answer|Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|How can the model be updated without retraining? -How can the model be updated without retraining?|has_answer|manipulating its symbolic representations -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What does this model allow us to do? -What does this model allow us to do?|has_answer|add new facts and overwrite existing ones -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What does a neural language model learn to access information in? -What does a neural language model learn to access information in?|has_answer|symbolic knowledge graph -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What does _as_expert stand for? -What does _as_expert stand for?|has_answer|entities -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What is the additional memory that encodes triples from a symbolic KB? -What is the additional memory that encodes triples from a symbolic KB?|has_answer|fact memory -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What are the core of modern NLP modeling? -What are the core of modern NLP modeling?|has_answer|Massive language models -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What is likely to become stale as the world changes? -What is likely to become stale as the world changes?|has_answer|factual information -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|Knowledge stored as parameters will inevitably exhibit all of what? -Knowledge stored as parameters will inevitably exhibit all of what?|has_answer|biases inherent in the source materials -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What does the neural language model include an explicit interface between? -What does the neural language model include an explicit interface between?|has_answer|subsymbolic neural knowledge -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What does the neural language model do? -What does the neural language model do?|has_answer|dramatically improves performance on two knowledge-intensive question-answering tasks -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|has_question|What is interesting about the model? -What is interesting about the model?|has_answer|the model can be updated without re-training -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What is an increasingly popular similarity measure for rich data domains? -What is an increasingly popular similarity measure for rich data domains?|has_answer|Scalable Nearest Neighbor Search for Optimal Transport -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What is another name for Optimal Transport? -What is another name for Optimal Transport?|has_answer|Wasserstein -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What does the need for fast nearest neighbor search pose for various tasks on massive datasets? -What does the need for fast nearest neighbor search pose for various tasks on massive datasets?|has_answer|substantial computational bottleneck -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What do we study for searching nearest neighbors w.r.t. the Wasserstein-1 distance? -What do we study for searching nearest neighbors w.r.t. the Wasserstein-1 distance?|has_answer|tree-based approximation algorithms -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|Fast tree-based approximation algorithms for searching nearest neighbors w.r.t. what distance? -Fast tree-based approximation algorithms for searching nearest neighbors w.r.t. what distance?|has_answer|Wasserstein-1 -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What is a standard tree-based technique for searching nearest neighbors w.r.t. the Wasserstein-1 distance? -What is a standard tree-based technique for searching nearest neighbors w.r.t. the Wasserstein-1 distance?|has_answer|Quadtree -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What is the name of the variant of the Quadtree algorithm? -What is the name of the variant of the Quadtree algorithm?|has_answer|Flowtree -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|Flowtree improves over existing methods in what two areas? -Flowtree improves over existing methods in what two areas?|has_answer|running time or accuracy -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|has_question|What is Flowtree's running time? -What is Flowtree's running time?|has_answer|running time is much faster -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|has_question|What does BERT stand for? -What does BERT stand for?|has_answer|Bidirectional Encoder Representations from Transformers -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|has_question|What is BERT designed to do? -What is BERT designed to do?|has_answer|pre-train deep bidirectional representations from unlabeled text -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|has_question|How many output layers can a pre-trained BERT model be fine-tuned with? -How many output layers can a pre-trained BERT model be fine-tuned with?|has_answer|one additional output layer -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|has_question|How does BERT compare to other language representation models? -How does BERT compare to other language representation models?|has_answer|conceptually simple and empirically powerful -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|has_question|What is BERT's MultiNLI accuracy? -What is BERT's MultiNLI accuracy?|has_answer|86.7% -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|What is one of the novel model architectures for computing continuous vector representations of words from very large data sets? -What is one of the novel model architectures for computing continuous vector representations of words from very large data sets?|has_answer|Efficient Estimation of Word Representations in Vector Space -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|The quality of these representations is measured in a task called what? -The quality of these representations is measured in a task called what?|has_answer|similarity -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|We observe large improvements in what at much lower computational cost? -We observe large improvements in what at much lower computational cost?|has_answer|accuracy -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|How long does it take to learn high quality word vectors from a 1.6 billion words data set? -How long does it take to learn high quality word vectors from a 1.6 billion words data set?|has_answer|less than a day -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|What do these vectors provide on our test set for measuring syntactic and semantic word similarities? -What do these vectors provide on our test set for measuring syntactic and semantic word similarities?|has_answer|state-of-the-art performance -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|How many novel model architectures are proposed for computing continuous vector representations of words from very large data sets? -How many novel model architectures are proposed for computing continuous vector representations of words from very large data sets?|has_answer|two -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|The quality of these representations is measured in what task? -The quality of these representations is measured in what task?|has_answer|word similarity -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|has_question|We show that these vectors provide state-of-the-art performance on our test set for measuring what? -We show that these vectors provide state-of-the-art performance on our test set for measuring what?|has_answer|syntactic and semantic word similarities -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|Who have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan? -Who have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan?|has_answer|Humans and animals -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|Lifelong learning is mediated by a rich set of what type of mechanisms? -Lifelong learning is mediated by a rich set of what type of mechanisms?|has_answer|neurocognitive -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|Lifelong learning capabilities are crucial for what? -Lifelong learning capabilities are crucial for what?|has_answer|autonomous agents -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|The continual acquisition of incrementally available information from non-stationary data distributions generally leads to what? -The continual acquisition of incrementally available information from non-stationary data distributions generally leads to what?|has_answer|catastrophic forgetting or interference -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|What models typically learn representations from stationary batches of training data without accounting for situations in which information becomes incrementally available over time? -What models typically learn representations from stationary batches of training data without accounting for situations in which information becomes incrementally available over time?|has_answer|state-of-the-art deep neural network models -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|What do existing neural network approaches alleviate? -What do existing neural network approaches alleviate?|has_answer|catastrophic forgetting -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|has_question|What is structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration? -What is structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration?|has_answer|biological systems -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|What is the hashing trick? -What is the hashing trick?|has_answer|random hash function -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|What is an efficient method for representing words in a continuous vector form? -What is an efficient method for representing words in a continuous vector form?|has_answer|hash embeddings -What is the hashing trick?|has_answer|a random hash function -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|How many $d$-dimensional embeddings vectors are used in hash embeddings? -How many $d$-dimensional embeddings vectors are used in hash embeddings?|has_answer|$k$ -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|What is the final $d$ dimensional representation of the token? -What is the final $d$ dimensional representation of the token?|has_answer|the product of the two -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|What are the hashing vectors selected from? -What are the hashing vectors selected from?|has_answer|$B$ embedding vectors -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|How many tokens can a hash embedding deal with? -How many tokens can a hash embedding deal with?|has_answer|millions -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|When using a hash embedding, there is no need to do what? -When using a hash embedding, there is no need to do what?|has_answer|create a dictionary before training -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|Models trained using hash embeddings exhibit at least the same level of performance as what? -Models trained using hash embeddings exhibit at least the same level of performance as what?|has_answer|models trained using regular embeddings -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|How much of a regular embedding is required by a hash embedding? -How much of a regular embedding is required by a hash embedding?|has_answer|fraction -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|has_question|What can hash embeddings be considered? -What can hash embeddings be considered?|has_answer|an extension and improvement over the existing regular embedding types -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|What aim at learning low-dimensional latent representation of nodes in a network? -What aim at learning low-dimensional latent representation of nodes in a network?|has_answer|Network embedding methods -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|What are some examples of tasks that can be used with network embeddings? -What are some examples of tasks that can be used with network embeddings?|has_answer|classification, clustering, link prediction, and visualization -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|How do we give an overview of network embeddings? -How do we give an overview of network embeddings?|has_answer|summarizing and categorizing recent advancements in this research field -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|What do we discuss in this survey? -What do we discuss in this survey?|has_answer|desirable properties -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|What are some of the different scenarios that network embedding methods are discussed under? -What are some of the different scenarios that network embedding methods are discussed under?|has_answer|supervised versus unsupervised learning -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|has_question|What do we conclude the survey with? -What do we conclude the survey with?|has_answer|future work -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|What does KG-BERT stand for? -What does KG-BERT stand for?|has_answer|Knowledge Graph Completion Pre-trained language models -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|What are triples in knowledge graphs treated as? -What are triples in knowledge graphs treated as?|has_answer|textual sequences -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from what? -Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from what?|has_answer|incompleteness -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|What do we propose to use for knowledge graph completion? -What do we propose to use for knowledge graph completion?|has_answer|pre-trained language models -What does KG-BERT stand for?|has_answer|Knowledge Graph Bidirectional Encoder Representations from Transformer -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|What does KG-BERT take as input? -What does KG-BERT take as input?|has_answer|entity and relation descriptions -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|has_question|Experimental results on multiple benchmark knowledge graphs show that our method can achieve what? -Experimental results on multiple benchmark knowledge graphs show that our method can achieve what?|has_answer|state-of-the-art performance in triple classification, link prediction and relation prediction tasks -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|What is the name of the EMNLP best paper award? -What is the name of the EMNLP best paper award?|has_answer|Information Bottleneck -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|Pre-trained word embeddings like ELMo and BERT contain what? -Pre-trained word embeddings like ELMo and BERT contain what?|has_answer|rich syntactic and semantic information -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|What information is kept in word embeddings? -What information is kept in word embeddings?|has_answer|information that helps a discriminative parser -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|What do we compress each word embedding to? -What do we compress each word embedding to?|has_answer|discrete tag or a continuous vector -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|What does the discrete version show? -What does the discrete version show?|has_answer|our tags capture most of the information in traditional POS tag annotations -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|has_question|In the continuous version, we show that moderately compressing the word embeddings by our method yields what in 8 of 9 languages? -In the continuous version, we show that moderately compressing the word embeddings by our method yields what in 8 of 9 languages?|has_answer|more accurate parser -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|has_question|What is the semi-supervised method for short text clustering? -What is the semi-supervised method for short text clustering?|has_answer|Semi-supervised Clustering for Short Text via Deep Representation Learning -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|has_question|What clustering process is combined with the representation learning process? -What clustering process is combined with the representation learning process?|has_answer|k-means -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|has_question|What type of method is proposed for short text clustering? -What type of method is proposed for short text clustering?|has_answer|semi-supervised -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|has_question|The semi-supervised method for short text clustering is designed to combine the representation learning process and what other clustering process? -The semi-supervised method for short text clustering is designed to combine the representation learning process and what other clustering process?|has_answer|k-means -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|has_question|How many datasets show that our semi-supervised method works better than several other text clustering methods? -How many datasets show that our semi-supervised method works better than several other text clustering methods?|has_answer|four -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What is the time complexity for a document of size N (characters) and a dictionary of M keywords? -What is the time complexity for a document of size N (characters) and a dictionary of M keywords?|has_answer|O(N) -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What is FlashText designed to only match? -What is FlashText designed to only match?|has_answer|complete words -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What algorithm does FlashText differ from? -What algorithm does FlashText differ from?|has_answer|Aho Corasick Algorithm -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What is FlashText's algorithm designed to go for first? -What is FlashText's algorithm designed to go for first?|has_answer|longest match -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|For an input dictionary 'I like Machine learning', it will only consider the longest match, which is what? -For an input dictionary 'I like Machine learning', it will only consider the longest match, which is what?|has_answer|Machine Learning -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What can FlashText do in one pass over a document? -What can FlashText do in one pass over a document?|has_answer|search or replace keywords -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What is the time complexity of the FlashText algorithm not dependent on? -What is the time complexity of the FlashText algorithm not dependent on?|has_answer|the number of terms being searched or replaced -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|For a document of size N (characters) and a dictionary of M keywords, the time complexity will be what? -For a document of size N (characters) and a dictionary of M keywords, the time complexity will be what?|has_answer|O(N) -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|What algorithm is faster than FlashText? -What algorithm is faster than FlashText?|has_answer|Aho Corasick Algorithm -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|For an input dictionary of 'I like Pineapple', this algorithm won't match it to 'I like Pineapple'. -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|For an input dictionary of 'Apple', this algorithm won't match it to 'I like Pineapple'. This algorithm is -For an input dictionary of 'Apple', this algorithm won't match it to 'I like Pineapple'. This algorithm is|has_answer|longest match -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|For an input dictionary 'I like Machine learning' on a string 'I like Machine learning', it will only consider the longest match, -For an input dictionary 'I like Machine learning' on a string 'I like Machine learning', it will only consider the longest match,|has_answer|Machine Learning -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|has_question|Where is the python implementation of the FlashText algorithm available? -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|How to accelerate contextual representation learning? -How to accelerate contextual representation learning?|has_answer|Efficient Contextual Representation Learning Without Softmax Layer -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|Contextual representation models are difficult to train due to what size parameter sizes? -Contextual representation models are difficult to train due to what size parameter sizes?|has_answer|large -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|What did we redesign to reduce the inefficiency due to the large vocabulary size? -What did we redesign to reduce the inefficiency due to the large vocabulary size?|has_answer|learning objectiv -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|How does the proposed approach bypass the softmax layer? -How does the proposed approach bypass the softmax layer?|has_answer|by performing language modeling with dimension reduction -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|Our framework reduces the time spent on the output layer to what level? -Our framework reduces the time spent on the output layer to what level?|has_answer|negligible -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|When applied to ELMo, our method achieves a 4 times speedup and eliminates what trainable parameters? -When applied to ELMo, our method achieves a 4 times speedup and eliminates what trainable parameters?|has_answer|80% -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend what layer to predict the embedding of the -Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend what layer to predict the embedding of the|has_answer|SEMFIT layer -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|What has achieved great success in improving various downstream tasks? -What has achieved great success in improving various downstream tasks?|has_answer|Contextual representation models -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|Why are language-model-based encoders difficult to train? -Why are language-model-based encoders difficult to train?|has_answer|large parameter sizes and high computational complexity -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|Why does the softmax layer cause significant inefficiency? -Why does the softmax layer cause significant inefficiency?|has_answer|large vocabulary size -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|has_question|What did we propose for training contextual representation models? -What did we propose for training contextual representation models?|has_answer|an efficient framework -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What is an example of a fundamental ML algorithm discovered from scratch? -What is an example of a fundamental ML algorithm discovered from scratch?|has_answer|small neural nets with backprop -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What can evolution be called? -What can evolution be called?|has_answer|Master Algorithm -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|Machine learning research has advanced in multiple aspects, including what? -Machine learning research has advanced in multiple aspects, including what?|has_answer|model structures and learning methods -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What is the effort to automate machine learning research known as? -What is the effort to automate machine learning research known as?|has_answer|AutoML -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What has AutoML relied on as building blocks? -What has AutoML relied on as building blocks?|has_answer|sophisticated expert-designed layers -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What is the goal of AutoML-Zero? -What is the goal of AutoML-Zero?|has_answer|to show that AutoML can go further -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|How does AutoML-Zero demonstrate that it is possible to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks? -How does AutoML-Zero demonstrate that it is possible to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks?|has_answer|introducing a novel framework -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What can evolutionary search still discover? -What can evolutionary search still discover?|has_answer|two-layer neural networks -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|How can simple neural networks be surpassed? -How can simple neural networks be surpassed?|has_answer|by evolving directly on tasks of interest -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What variants of neural networks can be surpassed by evolving directly on tasks of interest? -What variants of neural networks can be surpassed by evolving directly on tasks of interest?|has_answer|CIFAR-10 -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What type of algorithms appear when little data is available? -What type of algorithms appear when little data is available?|has_answer|dropout-like techniques -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|has_question|What do preliminary successes in discovering machine learning algorithms from scratch indicate for the field? -What do preliminary successes in discovering machine learning algorithms from scratch indicate for the field?|has_answer|promising new direction -Hypermedia driven web APIs.br/ The basic idea behind Hydra is to provide a bvocabulary which enables a server to advertise valid state transitions to a client/b. A client can then use this information to construct HTTP requests which modify the server’s state so that a certain desired goal is achieved. Since all the information about the valid state transitions is exchanged in a machine-processable way at runtime instead of being hardcoded into the client at design time, clients can be decoupled from the server and adapt to changes more easily.|has_question|What type of web APIs is Hydra? -Hypermedia driven web APIs.br/ The basic idea behind Hydra is to provide a bvocabulary which enables a server to advertise valid state transitions to a client/b. A client can then use this information to construct HTTP requests which modify the server’s state so that a certain desired goal is achieved. Since all the information about the valid state transitions is exchanged in a machine-processable way at runtime instead of being hardcoded into the client at design time, clients can be decoupled from the server and adapt to changes more easily.|has_question|What can a client use the information from the bvocabulary to construct? -What can a client use the information from the bvocabulary to construct?|has_answer|HTTP requests -Hypermedia driven web APIs.br/ The basic idea behind Hydra is to provide a bvocabulary which enables a server to advertise valid state transitions to a client/b. A client can then use this information to construct HTTP requests which modify the server’s state so that a certain desired goal is achieved. Since all the information about the valid state transitions is exchanged in a machine-processable way at runtime instead of being hardcoded into the client at design time, clients can be decoupled from the server and adapt to changes more easily.|has_question|In what way is information about valid state transitions exchanged at runtime? -In what way is information about valid state transitions exchanged at runtime?|has_answer|machine-processable -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What aims at learning topic representations along with word representations? -What aims at learning topic representations along with word representations?|has_answer|Topic2Vec: Learning Distributed Representations of Topics -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What does LDA stand for? -What does LDA stand for?|has_answer|Word2Vec Latent Dirichlet Allocation -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What distribution from LDA only describes the statistical relationship of occurrences in the corpus? -What distribution from LDA only describes the statistical relationship of occurrences in the corpus?|has_answer|probability -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What has been proposed to represent words and documents by learning essential concepts and representations? -What has been proposed to represent words and documents by learning essential concepts and representations?|has_answer|embedding methods -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What has shown more effectiveness than LDA-style representations in many tasks? -What has shown more effectiveness than LDA-style representations in many tasks?|has_answer|embedded representations -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What approach can learn topic representations in the same semantic vector space with words? -What approach can learn topic representations in the same semantic vector space with words?|has_answer|Topic2Vec -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|has_question|What does Topic2Vec achieve? -What does Topic2Vec achieve?|has_answer|interesting and meaningful results -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|Learning with what has been shown to be able to model large-scale semantic knowledge graphs? -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|Learning with Memory Embeddings Embedding learning is also known as what? -Learning with Memory Embeddings Embedding learning is also known as what?|has_answer|representation learning -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What is a key concept of embedding learning? -What is a key concept of embedding learning?|has_answer|a mapping of the knowledge graph to a tensor representation -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs? -What are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs?|has_answer|Latent variable models -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What models were extended to consider time evolutions, time patterns and subsymbolic representations? -What models were extended to consider time evolutions, time patterns and subsymbolic representations?|has_answer|embedding models -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What kind of problems were embedding models developed for? -What kind of problems were embedding models developed for?|has_answer|technical -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What do we discuss in embedding models? -What do we discuss in embedding models?|has_answer|the path from sensory input to semantic decoding -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|has_question|What do we introduce on human memory that can be derived from the developed mathematical models? -What do we introduce on human memory that can be derived from the developed mathematical models?|has_answer|hypotheses -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|has_question|Recent advances in what are explored in Exploring the Limits of Language Modeling? -Recent advances in what are explored in Exploring the Limits of Language Modeling?|has_answer|Recurrent Neural Networks for large scale Language Modeling -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|has_question|What are two key challenges present in Recurrent Neural Networks for large scale Language Modeling? -What are two key challenges present in Recurrent Neural Networks for large scale Language Modeling?|has_answer|corpora and vocabulary sizes, and complex, long term structure of language -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|has_question|What is another name for character Convolutional Neural Networks? -What is another name for character Convolutional Neural Networks?|has_answer|Long-Short Term Memory -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|has_question|What is the state-of-the-art perplexity? -What is the state-of-the-art perplexity?|has_answer|51.3 down to 30.0 -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|has_question|Who do we release our models for? -Who do we release our models for?|has_answer|NLP and ML community -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|What is the main issue in Self-Taught Hashing for Fast Similarity Search? -What is the main issue in Self-Taught Hashing for Fast Similarity Search?|has_answer|obtaining the codes for previously unseen documents -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|How do we find the optimal l-bit binary codes for all documents in the given corpus? -How do we find the optimal l-bit binary codes for all documents in the given corpus?|has_answer|supervised learning -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|The ability of fast similarity search at large scale is of great importance to many what? -The ability of fast similarity search at large scale is of great importance to many what?|has_answer|Information Retrieval -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|What is a promising way to accelerate similarity search? -What is a promising way to accelerate similarity search?|has_answer|semantic hashing -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|What remains to be a very challenging problem? -What remains to be a very challenging problem?|has_answer|obtaining the codes for previously unseen documents -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|What does STH stand for? -What does STH stand for?|has_answer|Self-Taught Hashing -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|has_question|What is LapEig? -What is LapEig?|has_answer|Laplacian Eigenmap -the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. |has_question|What is another name for grouping objects in such a way that objects in the same group are more similar to each other? -What is another name for grouping objects in such a way that objects in the same group are more similar to each other?|has_answer|cluster -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What does LSTM stand for? -What does LSTM stand for?|has_answer|Long Short-Term Memory -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What does CRF stand for? -What does CRF stand for?|has_answer|Conditional Random Field -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What is the name of the bidirectional LSTM CRF model? -What is the name of the bidirectional LSTM CRF model?|has_answer|BI-LSTM-CRF -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What model can efficiently use both past and future input features thanks to a bidirectional LSTM component? -What model can efficiently use both past and future input features thanks to a bidirectional LSTM component?|has_answer|BI-LSTM-CRF model -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What can the BI-LSTM-CRF model use thanks to a CRF layer? -What can the BI-LSTM-CRF model use thanks to a CRF layer?|has_answer|sentence level tag information -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|What data sets can the BI-LSTM-CRF model produce state of the art accuracy on? -What data sets can the BI-LSTM-CRF model produce state of the art accuracy on?|has_answer|POS, chunking and NER data sets -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|has_question|The BI-LSTM-CRF model has less dependence on what than previous observations? -The BI-LSTM-CRF model has less dependence on what than previous observations?|has_answer|word embedding -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |has_question|What is the name of the term used to describe document similarity? -What is the name of the term used to describe document similarity?|has_answer|Term Frequency-Inverse Document Frequency -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |has_question|Where does Term Frequency-Inverse Document Frequency compute document similarity? -Where does Term Frequency-Inverse Document Frequency compute document similarity?|has_answer|word-count space -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |has_question|What does Term Frequency-Inverse Document Frequency assume that the counts of different words provide? -What does Term Frequency-Inverse Document Frequency assume that the counts of different words provide?|has_answer|independent evidence of similarity -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |has_question|What does Term Frequency-Inverse Document Frequency not use? -What does Term Frequency-Inverse Document Frequency not use?|has_answer|semantic similarities -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|At a high level, linguistic features seem to be represented in what? -At a high level, linguistic features seem to be represented in what?|has_answer|separate semantic and syntactic subspaces -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|What do we find evidence of? -What do we find evidence of?|has_answer|fine-grained geometric representation of word senses -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|What are two examples of syntactic representations? -What are two examples of syntactic representations?|has_answer|attention matrices and individual word embeddings -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|What do these networks appear to extract? -What do these networks appear to extract?|has_answer|linguistic features -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|What is a natural question for networks that extract generally useful linguistic features? -What is a natural question for networks that extract generally useful linguistic features?|has_answer|how such networks represent this information internally -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|What does this paper describe of one particularly effective model, BERT? -What does this paper describe of one particularly effective model, BERT?|has_answer|qualitative and quantitative investigations -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|At a high level, linguistic features seem to be represented in separate what subspaces? -At a high level, linguistic features seem to be represented in separate what subspaces?|has_answer|semantic and syntactic -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|has_question|In addition to attention matrices, what is a mathematical argument to explain the geometry of these representations? -In addition to attention matrices, what is a mathematical argument to explain the geometry of these representations?|has_answer|individual word embeddings -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|has_question|What is a new pretraining method for NLP that significantly improves upon BERT on 20 tasks? -What is a new pretraining method for NLP that significantly improves upon BERT on 20 tasks?|has_answer|XLNet: Generalized Autoregressive Pretraining for Language Understanding -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|has_question|What does BERT rely on to neglect dependency between the masked positions? -What does BERT rely on to neglect dependency between the masked positions?|has_answer|corrupting the input with masks -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|has_question|What is the name of the generalized autoregressive pretraining method? -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|has_question|What is the state-of-the-art autoregressive model? -What is the state-of-the-art autoregressive model?|has_answer|Transformer-XL -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|has_question|What tasks does XLNet outperform BERT on? -What tasks does XLNet outperform BERT on?|has_answer|question answering, natural language inference, sentiment analysis, and document ranking -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|has_question|Learning Sparse, Distributed Representations using what principle? -Learning Sparse, Distributed Representations using what principle?|has_answer|Hebbian -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|has_question|What is the central principle for learning in neuroscience? -What is the central principle for learning in neuroscience?|has_answer|fire together, wire together -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|has_question|What do competitive Hebbian learning flavors produce? -What do competitive Hebbian learning flavors produce?|has_answer|sparse, distributed neural codes -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|has_question|What does AHL stand for? -What does AHL stand for?|has_answer|Adaptive Hebbian Learning -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|has_question|How do we illustrate the distributed nature of the learned representations? -How do we illustrate the distributed nature of the learned representations?|has_answer|output entropy computations -Computes embeddings for the vertices of unlabeled graphs. DeepWalk bridges the gap between network embeddings and word embeddings by treating nodes as words and generating short random walks as sentences. Then, neural language models such as Skip-gram can be applied on these random walks to obtain network embedding.|has_question|What does DeepWalk compute for the vertices of unlabeled graphs? -What does DeepWalk compute for the vertices of unlabeled graphs?|has_answer|embeddings -Computes embeddings for the vertices of unlabeled graphs. DeepWalk bridges the gap between network embeddings and word embeddings by treating nodes as words and generating short random walks as sentences. Then, neural language models such as Skip-gram can be applied on these random walks to obtain network embedding.|has_question|How does DeepWalk bridge the gap between network embeddings and word embeddings? -How does DeepWalk bridge the gap between network embeddings and word embeddings?|has_answer|by treating nodes as words and generating short random walks as sentences -Computes embeddings for the vertices of unlabeled graphs. DeepWalk bridges the gap between network embeddings and word embeddings by treating nodes as words and generating short random walks as sentences. Then, neural language models such as Skip-gram can be applied on these random walks to obtain network embedding.|has_question|What neural language model can be applied on random walks to obtain network embedding? -What neural language model can be applied on random walks to obtain network embedding?|has_answer|Skip-gram -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|What is the name of the large scale evaluation of Knowledge Graph Embedding Models under a Unified Framework? -What is the name of the large scale evaluation of Knowledge Graph Embedding Models under a Unified Framework?|has_answer|Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|How many interaction models were re-implemented and evaluated in the PyKEEN software package? -How many interaction models were re-implemented and evaluated in the PyKEEN software package?|has_answer|19 -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|What did we outline which results could be reproduced with? -What did we outline which results could be reproduced with?|has_answer|hyper-parameters -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|How many GPU hours of computation time were used in the large-scale benchmarking? -How many GPU hours of computation time were used in the large-scale benchmarking?|has_answer|21,246 -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|What do we present insights gained as to for each model? -What do we present insights gained as to for each model?|has_answer|best configurations -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|What is crucial for a model's performance? -What is crucial for a model's performance?|has_answer|model architecture, training approach, loss function, and the explicit modeling of inverse relations -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|has_question|What can obtain results competitive to the state-of-the-art when configured carefully? -What can obtain results competitive to the state-of-the-art when configured carefully?|has_answer|several architectures -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|has_question|What is a fundamental conceptual question in deep neural networks? -What is a fundamental conceptual question in deep neural networks?|has_answer|mathematical theory of semantic development -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|has_question|An extensive body of empirical research has revealed what in the acquisition, organization, deployment, and neural representation of human semantic knowledge? -An extensive body of empirical research has revealed what in the acquisition, organization, deployment, and neural representation of human semantic knowledge?|has_answer|remarkable regularities -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|has_question|How do we address this fundamental conceptual question? -How do we address this fundamental conceptual question?|has_answer|by mathematically analyzing the nonlinear dynamics of learning in deep linear networks -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|has_question|What does the nonlinear dynamics of learning in deep linear networks yield a conceptual explanation for? -What does the nonlinear dynamics of learning in deep linear networks yield a conceptual explanation for?|has_answer|the prevalence of many disparate phenomena in semantic cognition -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|has_question|What model recapitulates many diverse regularities underlying semantic development? -What model recapitulates many diverse regularities underlying semantic development?|has_answer|our simple neural model -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|has_question|What is a significant challenge in many industries and in academic research? -What is a significant challenge in many industries and in academic research?|has_answer|Extracting Tables from Documents -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|has_question|Existing methods that take a bottom-up approach neglect the available prior information relating to table structure. -Existing methods that take a bottom-up approach neglect the available prior information relating to table structure.|has_answer|integrating lines into cells and rows or columns -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|has_question|What approach does our proposed method take? -What approach does our proposed method take?|has_answer|top-down -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What is automated with a full text similarity search? -What is automated with a full text similarity search?|has_answer|patent's prior art -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What do patents guarantee their creators? -What do patents guarantee their creators?|has_answer|protection against infringement -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What must be assessed for an invention to be patentable? -What must be assessed for an invention to be patentable?|has_answer|novelty and inventiveness -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What describes similar inventions to a given patent application? -What describes similar inventions to a given patent application?|has_answer|published work -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|How is the search for prior art currently executed? -How is the search for prior art currently executed?|has_answer|semi-automatically composed keyword queries -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What can cause errors in the search for prior art? -What can cause errors in the search for prior art?|has_answer|different keywords for the same technical concepts may exist across disciplines -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What is used to automatically detect inventions that are similar to the one described in the submitted document? -What is used to automatically detect inventions that are similar to the one described in the submitted document?|has_answer|machine learning and natural language processing techniques -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|Various state-of-the-art approaches for feature extraction and what are evaluated? -Various state-of-the-art approaches for feature extraction and what are evaluated?|has_answer|document comparison -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|The quality of the current search process is assessed based on ratings of what? -The quality of the current search process is assessed based on ratings of what?|has_answer|domain expert -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|has_question|What does our automated approach do to the search results for prior art? -What does our automated approach do to the search results for prior art?|has_answer|improves -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What is used in the final layer of nearly all existing sequence-to-sequence models for language generation? -What is used in the final layer of nearly all existing sequence-to-sequence models for language generation?|has_answer|The Softmax function -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What limits the vocabulary size to a subset of most frequent types? -What limits the vocabulary size to a subset of most frequent types?|has_answer|it is usually the slowest layer to compute -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What is the general technique for replacing the softmax layer? -What is the general technique for replacing the softmax layer?|has_answer|a continuous embedding layer -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What is the primary innovation for replacing the softmax layer with a continuous embedding layer? -What is the primary innovation for replacing the softmax layer with a continuous embedding layer?|has_answer|a novel probabilistic loss -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|On what task do we evaluate a new class of sequence-to-sequence models with continuous outputs? -On what task do we evaluate a new class of sequence-to-sequence models with continuous outputs?|has_answer|neural machine translation -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What is the speed-up in training time of Von Mises-Fisher Loss models? -What is the speed-up in training time of Von Mises-Fisher Loss models?|has_answer|2.5x -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What are our models capable of handling without compromising on translation quality? -What are our models capable of handling without compromising on translation quality?|has_answer|very large vocabularies -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|has_question|What are the models capable of handling large vocabularies without compromising on translation quality? -What are the models capable of handling large vocabularies without compromising on translation quality?|has_answer|They also produce more meaningful errors -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|What is Solomonoff's general theory of inference? -What is Solomonoff's general theory of inference?|has_answer|The Description Length of Deep Learning Models -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|What might seem to go against this principle given the large number of parameters to be encoded? -What might seem to go against this principle given the large number of parameters to be encoded?|has_answer|Deep neural networks -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|We demonstrate the ability of deep neural networks to compress the training data even when accounting for what? -We demonstrate the ability of deep neural networks to compress the training data even when accounting for what?|has_answer|parameter encoding -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|What formalize Occam's razor? -What formalize Occam's razor?|has_answer|Solomonoff's general theory of inference and the Minimum Description Length principle -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|The compression viewpoint originally motivated the use of what in neural networks? -The compression viewpoint originally motivated the use of what in neural networks?|has_answer|variational methods -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|Unexpectedly, we found that variational methods provide what? -Unexpectedly, we found that variational methods provide what?|has_answer|surprisingly poor compression bounds -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|What might explain the relatively poor practical performance of? -What might explain the relatively poor practical performance of?|has_answer|variational methods in deep learning -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|has_question|What yields excellent compression values on deep networks? -What yields excellent compression values on deep networks?|has_answer|simple incremental encoding methods -[Proceedings](https://aclanthology.coli.uni-saarland.de/events/ws-2018#W18-54) the introduction of neural networks has typically come at the cost of our understanding of the system: what are the representations and computations that the network learns? The goal of this workshop is to bring together people who are attempting to peek inside the neural network black box, taking inspiration from machine learning, psychology, linguistics and neuroscience.|has_question|What do neural networks learn? -What do neural networks learn?|has_answer|representations and computations -[Proceedings](https://aclanthology.coli.uni-saarland.de/events/ws-2018#W18-54) the introduction of neural networks has typically come at the cost of our understanding of the system: what are the representations and computations that the network learns? The goal of this workshop is to bring together people who are attempting to peek inside the neural network black box, taking inspiration from machine learning, psychology, linguistics and neuroscience.|has_question|What are some of the sources of inspiration for this workshop? -What are some of the sources of inspiration for this workshop?|has_answer|machine learning, psychology, linguistics and neuroscience -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|has_question|Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data? -Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data?|has_answer|Deep Learning for Symbolic Mathematics -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|has_question|Neural networks can be surprisingly good at what? -Neural networks can be surprisingly good at what?|has_answer|solving differential equations -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|has_question|What can large datasets be used to train? -What can large datasets be used to train?|has_answer|sequence-to-sequence models -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|has_question|We achieve results that outperform commercial Computer Algebra Systems such as what? -We achieve results that outperform commercial Computer Algebra Systems such as what?|has_answer|Matlab or Mathematica -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|What do we aim to automatically generate about an input structured knowledge base? -What do we aim to automatically generate about an input structured knowledge base?|has_answer|natural language descriptions -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|What is our generation framework based on? -What is our generation framework based on?|has_answer|a pointer network -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|What do we propose for evaluation? -What do we propose for evaluation?|has_answer|KB reconstruction based metric -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|How many pairs of structured KBs are included in the new data set? -How many pairs of structured KBs are included in the new data set?|has_answer|106,216 -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|Experiments show that our approach significantly outperforms what? -Experiments show that our approach significantly outperforms what?|has_answer|significantly outperforms state-of-the-art methods -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|has_question|What is the F score of the reconstructed KB? -What is the F score of the reconstructed KB?|has_answer|68.8% - 72.6% -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|What is a study with Machine Translation and Language Modeling Objectives called? -What is a study with Machine Translation and Language Modeling Objectives called?|has_answer|The Bottom-up Evolution of Representations in the Transformer -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|What is MLM? -What is MLM?|has_answer|masked language modeling -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|What did previous work use to show that the representations learned by the Transformer differ significantly depending on the objective? -What did previous work use to show that the representations learned by the Transformer differ significantly depending on the objective?|has_answer|black-box probing tasks -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|What do we use to study how information flows across Transformer layers? -What do we use to study how information flows across Transformer layers?|has_answer|canonical correlation analysis and mutual information estimators -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|What gets vanished as you go from bottom to top layers? -What gets vanished as you go from bottom to top layers?|has_answer|information about the past -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|In what type of model do representations acquire information about the context around a token? -In what type of model do representations acquire information about the context around a token?|has_answer|MLM -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|has_question|Where does the token identity get recreated? -Where does the token identity get recreated?|has_answer|top -in cosine similarity, the number of common attributes is divided by the total number of possible attributes. Whereas in Jaccard Similarity, the number of common attributes is divided by the number of attributes that exist in at least one of the two objects.|has_question|In what similarity is the number of common attributes divided by the total number of possible attributes? -In what similarity is the number of common attributes divided by the total number of possible attributes?|has_answer|cosine -in cosine similarity, the number of common attributes is divided by the total number of possible attributes. Whereas in Jaccard Similarity, the number of common attributes is divided by the number of attributes that exist in at least one of the two objects.|has_question|In what type of similarity is the number of common attributes divided by the number of attributes that exist in at least one object? -In what type of similarity is the number of common attributes divided by the number of attributes that exist in at least one object?|has_answer|Jaccard Similarity -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|has_question|What is the web service for querying an embedding of entities in the Wikidata knowledge graph? -What is the web service for querying an embedding of entities in the Wikidata knowledge graph?|has_answer|Wikidata entity embedding web service -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|has_question|The embedding is trained on the Wikidata dump using what? -The embedding is trained on the Wikidata dump using what?|has_answer|Gensim's Word2Vec implementation -The embedding is trained on the Wikidata dump using what?|has_answer|Gensim's Word2Vec implementation and a simple graph walk -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|has_question|What is implemented for the embedding of entities in the Wikidata knowledge graph? -What is implemented for the embedding of entities in the Wikidata knowledge graph?|has_answer|A REST API -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|has_question|How many Wikidata items and properties does the web service expose? -How many Wikidata items and properties does the web service expose?|has_answer|600'000 -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself? -What is a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself?|has_answer|Span Selection Pre-training for Question Answering -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What type of network is better to offload the requirement of general knowledge to? -What type of network is better to offload the requirement of general knowledge to?|has_answer|sparsely activated network -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is the Span Selection Pre-training for Question Answering? -What is the Span Selection Pre-training for Question Answering?|has_answer|a sentence drawn from a corpus with a term replaced with a special token -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is the answer term replaced by? -What is the answer term replaced by?|has_answer|the blank -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What search determines the relevance of a sentence? -What search determines the relevance of a sentence?|has_answer|BM25 -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is BERT's cloze task? -What is BERT's cloze task?|has_answer|the answer must be drawn from the model itself -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What does the model of general purpose language modeling use to retrieve world knowledge instead of holding it in densely activated transformer encoder layers? -What does the model of general purpose language modeling use to retrieve world knowledge instead of holding it in densely activated transformer encoder layers?|has_answer|indexed long term memory -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is the acronym for Bidirectional Encoder Representations from Transformers? -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What are the two auxiliary tasks that BERT is pre-trained on? -What are the two auxiliary tasks that BERT is pre-trained on?|has_answer|Masked Language Model and Next Sentence Prediction -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What is the new pre-training task inspired by? -What is the new pre-training task inspired by?|has_answer|reading comprehension -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|What does MRC stand for? -What does MRC stand for?|has_answer|multiple reading comprehension -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|How many F1 points does BERT-LARGE outperform on short answer prediction? -How many F1 points does BERT-LARGE outperform on short answer prediction?|has_answer|3 F1 points -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|In what dataset did we establish a new SOTA? -In what dataset did we establish a new SOTA?|has_answer|HotpotQA -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|has_question|When is our pre-training approach particularly effective? -When is our pre-training approach particularly effective?|has_answer|when training data is limited -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|What type of model is your classifier? -What type of model is your classifier?|has_answer|Energy -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|What can be easily computed in an energy based model? -What can be easily computed in an energy based model?|has_answer|standard class probabilities -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|What can be used to train the model on unlabeled data? -What can be used to train the model on unlabeled data?|has_answer|standard discriminative architectures -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|What improves calibration, robustness, andout-of-distribution detection? -What improves calibration, robustness, andout-of-distribution detection?|has_answer|energy based training of the joint distribution -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|What does the presentan approach add compared to standard classification training? -What does the presentan approach add compared to standard classification training?|has_answer|little overhead -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|has_question|Our approach is the first to achieve performance rivaling the state-of-the-art in what two areas? -Our approach is the first to achieve performance rivaling the state-of-the-art in what two areas?|has_answer|generative and discriminative learning within one hybrid model -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What percentage of the tissue is likely to be tumorous? -What percentage of the tissue is likely to be tumorous?|has_answer|75% -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What do users need to know about the most probable outcome? -What do users need to know about the most probable outcome?|has_answer|whether the model is capable enough to provide an answer -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What is the last possible outcome of a binary classification model that is addressed in this paper? -What is the last possible outcome of a binary classification model that is addressed in this paper?|has_answer|don't know -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What is needed for predictions that can lead directly to a decision? -What is needed for predictions that can lead directly to a decision?|has_answer|more data -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What is the ratio of don't know to Uncertain? -What is the ratio of don't know to Uncertain?|has_answer|50:50 -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What is a practical decision where models judge the result Uncertain? -What is a practical decision where models judge the result Uncertain?|has_answer|carry out more detailed laboratory testing of compound or commission new tissue analyses -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|has_question|What is the term used to separate binary predictions from binary predictions? -What is the term used to separate binary predictions from binary predictions?|has_answer|Uncertain -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |has_question|What is another term for learning a representation for a set of data? -What is another term for learning a representation for a set of data?|has_answer|encoding -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |has_question|What is the purpose of learning a representation (encoding) for a set of data? -What is the purpose of learning a representation (encoding) for a set of data?|has_answer|dimensionality reduction -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |has_question|What is trained to reconstruct a given input from its latent representation? -What is trained to reconstruct a given input from its latent representation?|has_answer|unsupervised neural network -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |has_question|What steps are not limited to linear transformations? -What steps are not limited to linear transformations?|has_answer|encoding and decoding -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What is the common association between recurrent networks and? -What is the common association between recurrent networks and?|has_answer|sequence modeling -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What can outperform recurrent networks on tasks such as audio synthesis and machine translation? -What can outperform recurrent networks on tasks such as audio synthesis and machine translation?|has_answer|convolutional architectures -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What should one use given a new sequence modeling task or dataset? -What should one use given a new sequence modeling task or dataset?|has_answer|which architecture -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What do we conduct a systematic evaluation of for sequence modeling? -What do we conduct a systematic evaluation of for sequence modeling?|has_answer|generic convolutional and recurrent architectures -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What are generic convolutional and recurrent architectures commonly used to? -What are generic convolutional and recurrent architectures commonly used to?|has_answer|benchmark recurrent networks -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|A simple convolutional architecture outperforms what canonical recurrent networks? -A simple convolutional architecture outperforms what canonical recurrent networks?|has_answer|LSTMs -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|What should be regarded as a natural starting point for sequence modeling tasks? -What should be regarded as a natural starting point for sequence modeling tasks?|has_answer|convolutional networks -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|has_question|Where is code available to assist related work? -Where is code available to assist related work?|has_answer|http://github.com/locuslab/TCN -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|What is ULMFiT? -What is ULMFiT?|has_answer|unsupervised transfer -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|What does SLU stand for? -What does SLU stand for?|has_answer|improve model performance on Spoken Language Understanding -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|What is ELMo? -What is ELMo?|has_answer|Embeddings from Language Model -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|What is the faster and simpler unsupervised pre-training method for SLU? -What is the faster and simpler unsupervised pre-training method for SLU?|has_answer|ELMo-Light -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|Unsupervised pre-training on a large corpora of unlabeled utterances leads to what? -Unsupervised pre-training on a large corpora of unlabeled utterances leads to what?|has_answer|significantly better -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|What technique can be further improved by supervised transfer? -What technique can be further improved by supervised transfer?|has_answer|supervised transfer -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|has_question|How many labeled in-domain samples are used? -How many labeled in-domain samples are used?|has_answer|1000 -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|What can be seen as a model to map a key to the position of a record within a sorted array? -What can be seen as a model to map a key to the position of a record within a sorted array?|has_answer|a B-Tree-Index -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|What do we call learned indexes? -What do we call learned indexes?|has_answer|deep-learning models -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|What is the key idea of a learned indexes? -What is the key idea of a learned indexes?|has_answer|learn the sort order or structure of lookup keys -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|What are the main challenges in designing? -What are the main challenges in designing?|has_answer|learned index structures -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|By using neural nets, we are able to outperform cache-optimized B-Trees by what percentage in speed? -By using neural nets, we are able to outperform cache-optimized B-Trees by what percentage in speed?|has_answer|70% -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|has_question|What do we believe the idea of replacing core components of a data management system through learned models has? -What do we believe the idea of replacing core components of a data management system through learned models has?|has_answer|implications for future systems designs -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|What do contextualized word embeddings address? -What do contextualized word embeddings address?|has_answer|lexical composition -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|What do contextualized word embeddings recognize? -What do contextualized word embeddings recognize?|has_answer|meaning shift -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|Why is building meaningful phrase representations challenging? -Why is building meaningful phrase representations challenging?|has_answer|phrase meanings are not simply the sum of their constituent meanings -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|What can shift the meanings of constituent words and introduce implicit information? -What can shift the meanings of constituent words and introduce implicit information?|has_answer|Lexical composition -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|What range of textual representations were tested for their capacity to address lexical composition? -What range of textual representations were tested for their capacity to address lexical composition?|has_answer|broad range -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|What performs better than static word embeddings? -What performs better than static word embeddings?|has_answer|contextualized word representations -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|has_question|How many tasks are included in our evaluation suite? -How many tasks are included in our evaluation suite?|has_answer|5 - general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multirelational graphs, and learning word, sentence or document level embeddings [Github](https://github.com/facebookresearch/starSpace) (seems to be the solution for [#Multi-Label classification](/tag/multi_label_classification) that [#FastText](/tag/fasttext) doesn't support very well) |has_question|What model can solve a wide variety of problems? -What model can solve a wide variety of problems?|has_answer|general-purpose neural embedding model -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|How are Word Representations created? -How are Word Representations created?|has_answer|Gaussian Embedding -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|Where do Novel word embedding algorithms embed words directly as Gaussian distributional potential functions? -Where do Novel word embedding algorithms embed words directly as Gaussian distributional potential functions?|has_answer|infinite dimensional function space -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|What does this allow us to map word types not only to vectors but also to in space? -What does this allow us to map word types not only to vectors but also to in space?|has_answer|soft regions -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|What does current work in lexical distributed representations map each word to in low-dimensional space? -What does current work in lexical distributed representations map each word to in low-dimensional space?|has_answer|point vector -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|What is a benefit of using a density instead of a point vector in low-dimensional space? -What is a benefit of using a density instead of a point vector in low-dimensional space?|has_answer|better capturing uncertainty about a representation and its relationships -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|What does this paper advocate for? -What does this paper advocate for?|has_answer|density-based distributed embeddings -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|has_question|What are the embeddings able to model? -What are the embeddings able to model?|has_answer|entailment and other asymmetric relationships -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|What is a popular approach to unsupervised learning of word relationships? -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|What do we present a new set of embeddings for? -What do we present a new set of embeddings for?|has_answer|medical concepts learned using an extremely large collection of multimodal medical data -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|How many medical concepts did cui2vec embed? -How many medical concepts did cui2vec embed?|has_answer|108,477 -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|What is the new benchmark methodology based on? -What is the new benchmark methodology based on?|has_answer|statistical power -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|What is the new benchmark methodology based on statistical power called? -What is the new benchmark methodology based on statistical power called?|has_answer|cui2vec -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|has_question|What does cui2vec provide for other researchers to use? -What does cui2vec provide for other researchers to use?|has_answer|downloadable set of pre-trained embeddings -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|has_question|How many neural sequence labeling models were reproduced? -How many neural sequence labeling models were reproduced?|has_answer|twelve -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|has_question|What benchmark is used for tagging? -What benchmark is used for tagging?|has_answer|POS -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|has_question|Misconceptions and inconsistent conclusions in existing literature are examined and clarified under what? -Misconceptions and inconsistent conclusions in existing literature are examined and clarified under what?|has_answer|statistical experiments -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|has_question|We reach several practical conclusions which can be useful to whom? -We reach several practical conclusions which can be useful to whom?|has_answer|practitioners -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|What tutorial describes conditional random fields? -What tutorial describes conditional random fields?|has_answer|An Introduction to Conditional Random Fields -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|What is a combination of classification and graphical modeling? -What is a combination of classification and graphical modeling?|has_answer|Structured prediction methods -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|What is a popular probabilistic method for structured prediction? -What is a popular probabilistic method for structured prediction?|has_answer|conditional random fields -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|What fields have CRFs seen wide application in? -What fields have CRFs seen wide application in?|has_answer|natural language processing, computer vision, and bioinformatics -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|What methods do we describe for CRFs? -What methods do we describe for CRFs?|has_answer|inference and parameter estimation -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|has_question|Who is this tutorial intended to be useful to? -Who is this tutorial intended to be useful to?|has_answer|practitioners in a wide variety of fields -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|What is a pivotal task for building better user recommendation and notification algorithms? -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|For how many languages does Dailyhunt use Named Entity Recognition? -For how many languages does Dailyhunt use Named Entity Recognition?|has_answer|13+ Indian languages -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|What are some examples of n-grams that do not fit in the definition of Named-Entity? -What are some examples of n-grams that do not fit in the definition of Named-Entity?|has_answer|me too movement, beef ban, alwar mob lynching -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|What type of n-grams can be used as topics and/or hashtags for a news? -What type of n-grams can be used as topics and/or hashtags for a news?|has_answer|case-less n-grams -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|What is used to build a model for Named Entity Recognition? -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|has_question|Named Entity Recognition shows promising results when compared with what other models? -Named Entity Recognition shows promising results when compared with what other models?|has_answer|Flair, Spacy and Stanford-caseless-NER -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What is a hybrid between hard and soft attention memory networks? -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What is organized in a hierarchical structure? -What is organized in a hierarchical structure?|has_answer|memory -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What is used to address memory in a soft way? -What is used to address memory in a soft way?|has_answer|a softmax function -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What type of memory does a hierarchical memory network need to read from? -What type of memory does a hierarchical memory network need to read from?|has_answer|large memories -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What is difficult to train successfully? -What is difficult to train successfully?|has_answer|hard attention mechanisms based on reinforcement learning -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What can a hierarchical memory network be considered as? -What can a hierarchical memory network be considered as?|has_answer|a hybrid between hard and soft attention memory networks -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|How is the memory organized in a hierarchical structure compared to soft attention over a flat memory? -How is the memory organized in a hierarchical structure compared to soft attention over a flat memory?|has_answer|less computation -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What does MIPS stand for? -What does MIPS stand for?|has_answer|Maximum Inner Product Search -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|has_question|What is a challenging large scale factoid question answering task? -What is a challenging large scale factoid question answering task?|has_answer|SimpleQuestions -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What model links better Literal-ly? -What model links better Literal-ly?|has_answer|Knowledge Graph Embeddings with Literals -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What are structured information about a particular domain in the form of entities and relations? -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What do KGs help facilitate between different resources represented in the Linked Data Cloud? -What do KGs help facilitate between different resources represented in the Linked Data Cloud?|has_answer|interconnectivity and interoperability -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What are some examples of applications that KGs have been used in? -What are some examples of applications that KGs have been used in?|has_answer|entity linking, question answering, recommender systems -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What do KG applications suffer from? -What do KG applications suffer from?|has_answer|high computational and storage costs -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What is a low dimensional space that can map high dimensional KGs into? -What is a low dimensional space that can map high dimensional KGs into?|has_answer|embedding space -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What is the unstructured information represented as in KG embedding models? -What is the unstructured information represented as in KG embedding models?|has_answer|literals -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|has_question|What is the general task of Knowledge Graph Embeddings with Literals? -What is the general task of Knowledge Graph Embeddings with Literals?|has_answer|link prediction -Supervised learning techniques that also make use of unlabeled data for training – typically a small amount of labeled data with a large amount of unlabeled data.|has_question|What makes use of unlabeled data for training? -What makes use of unlabeled data for training?|has_answer|Supervised learning techniques -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|has_question|What are now widely used in NLP, but we still don't understand a lot about their inner workings? -What are now widely used in NLP, but we still don't understand a lot about their inner workings?|has_answer|Transformer-based models -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|has_question|Who created the BERT model? -Who created the BERT model?|has_answer|Devlin et al. -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|has_question|How many analysis studies were synthesized in this paper? -How many analysis studies were synthesized in this paper?|has_answer|over 40 -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|has_question|What does this paper provide about the proposed modifications to the BERT model? -What does this paper provide about the proposed modifications to the BERT model?|has_answer|an overview -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|has_question|What do we do with the BERT model? -What do we do with the BERT model?|has_answer|outline the directions for further research -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |has_question|Dictionary learning is also known as what? -Dictionary learning is also known as what?|has_answer|sparse -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |has_question|What is sparse coding? -What is sparse coding?|has_answer|Dictionary learning -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |has_question|What is the term for supervised learning algo? -What is the term for supervised learning algo?|has_answer|Unsupervised learning algo -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |has_question|Images - edge detection is similar to what? -Images - edge detection is similar to what?|has_answer|primary visual cortex -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|has_question|Whose work motivated the notes on Cardinal's Matrices? -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|has_question|What did Cardinal show about certain norm bounds on his matrices? -What did Cardinal show about certain norm bounds on his matrices?|has_answer|implied the Riemann hypothesis -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|has_question|What does a different matrix norm show about the Riemann hypothesis? -What does a different matrix norm show about the Riemann hypothesis?|has_answer|equivalence -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|has_question|What does a deformed version of Cardinal's Mertens function matrices do? -What does a deformed version of Cardinal's Mertens function matrices do?|has_answer|unconditionally satisfies a norm bound -Smoothed Inverse Frequency: a linear representation of a sentence which is better than the simple average of the embeddings of its words 2 ideas: - assign to each word a weighting that depends on the frequency of the word it the corpus (reminiscent of TF-IDF) - some denoising (removing the component from the top singular direction) Todo (?): check implementation as a [sklearn Vectorizer](https://github.com/ChristophAlt/embedding_vectorizer) |has_question|What is a linear representation of a sentence which is better than the simple average of the embeddings of its words? -What is a linear representation of a sentence which is better than the simple average of the embeddings of its words?|has_answer|Smoothed Inverse Frequency -Smoothed Inverse Frequency: a linear representation of a sentence which is better than the simple average of the embeddings of its words 2 ideas: - assign to each word a weighting that depends on the frequency of the word it the corpus (reminiscent of TF-IDF) - some denoising (removing the component from the top singular direction) Todo (?): check implementation as a [sklearn Vectorizer](https://github.com/ChristophAlt/embedding_vectorizer) |has_question|Where can you find a [sklearn Vectorizer]? -Where can you find a [sklearn Vectorizer]?|has_answer|github.com -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|What is the name of the tutorial on distance metric learning? -What is the name of the tutorial on distance metric learning?|has_answer|Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|Distance metric learning can be useful to improve what? -Distance metric learning can be useful to improve what?|has_answer|similarity learning algorithms -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|What is the main problem of distance metric learning? -What is the main problem of distance metric learning?|has_answer|distance metric learning -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|What is one of the most popular distance metric learning techniques used in? -What is one of the most popular distance metric learning techniques used in?|has_answer|classification -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|What is provided in the tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments? -What is provided in the tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments?|has_answer|some experiments to evaluate the performance of the different algorithms -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|has_question|What does this paper discuss in relation to distance metric learning? -What does this paper discuss in relation to distance metric learning?|has_answer|several possibilities of future work -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|What is another name for a virtual knowledge base? -What is another name for a virtual knowledge base?|has_answer|KB -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|What is the name of the neural module that traverses textual data like a KB? -What is the name of the neural module that traverses textual data like a KB?|has_answer|DrKIT -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|What is the maximum inner product search called? -What is the maximum inner product search called?|has_answer|MIPS -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|What can the full system be trained end-to-end using? -What can the full system be trained end-to-end using?|has_answer|gradient based methods -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|How is the contextual representation encoder trained? -How is the contextual representation encoder trained?|has_answer|by generating hard negative examples using existing knowledge bases -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|What is a pretraining scheme for? -What is a pretraining scheme for?|has_answer|contextual representation encoder -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|How much does DrKIT improve accuracy on 3-hop questions in the MetaQA dataset? -How much does DrKIT improve accuracy on 3-hop questions in the MetaQA dataset?|has_answer|9 points -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|On HotpotQA, DrKIT leads to what improvement over a BERT-based re-ranking approach? -On HotpotQA, DrKIT leads to what improvement over a BERT-based re-ranking approach?|has_answer|10% -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|has_question|How much more queries does DrKIT process than existing multi-hop systems? -How much more queries does DrKIT process than existing multi-hop systems?|has_answer|10-100x more queries per second -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|has_question|What is the name of the unsupervised learning of sentence embeddings using Compositional n-Gram Features? -What is the name of the unsupervised learning of sentence embeddings using Compositional n-Gram Features?|has_answer|Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|has_question|What are embeddings of word sequences called? -What are embeddings of word sequences called?|has_answer|semantic representations -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|has_question|What type of unsupervised objective is presented to train distributed representations of sentences? -What type of unsupervised objective is presented to train distributed representations of sentences?|has_answer|simple but efficient -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|has_question|What does our method highlight about general-purpose sentence embeddings? -What does our method highlight about general-purpose sentence embeddings?|has_answer|robustness -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|has_question|What is used for Learning Hierarchical Representations? -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|has_question|Where do we introduce a new approach for learning hierarchical representations of symbolic data? -Where do we introduce a new approach for learning hierarchical representations of symbolic data?|has_answer|hyperbolic space -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|has_question|What do state-of-the-art methods typically learn? -What do state-of-the-art methods typically learn?|has_answer|embeddings in Euclidean vector spaces -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|has_question|What allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity? -What allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity?|has_answer|hyperbolic geometry -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|has_question|What is the efficient algorithm to learn the embeddings based on? -What is the efficient algorithm to learn the embeddings based on?|has_answer|Riemannian optimization -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What is the task to identify mentions of rigid designators from text belonging to predefined semantic types? -What is the task to identify mentions of rigid designators from text belonging to predefined semantic types?|has_answer|Named entity recognition -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation? -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What did early NER systems get a huge success with? -What did early NER systems get a huge success with?|has_answer|cost of human engineering -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What is deep learning empowered by? -What is deep learning empowered by?|has_answer|real-valued vector representations and semantic composition -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What does this paper provide on existing deep learning techniques for NER? -What does this paper provide on existing deep learning techniques for NER?|has_answer|a comprehensive review -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What are some NER resources? -What are some NER resources?|has_answer|tagged NER corpora and off-the-shelf NER tools -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What are the three axes of deep learning? -What are the three axes of deep learning?|has_answer|distributed representations for input, context encoder, and tag decoder -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What do we survey for recent applied techniques of deep learning in new NER problem settings and applications? -What do we survey for recent applied techniques of deep learning in new NER problem settings and applications?|has_answer|most representative methods -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|has_question|What do we present readers with? -What do we present readers with?|has_answer|challenges faced by NER systems - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |has_question|What does each word belong to? -What does each word belong to?|has_answer|the entire corpus sentences - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |has_question|The embeddings are computed from the internal states of what bidirectional Language Model? -The embeddings are computed from the internal states of what bidirectional Language Model?|has_answer|two-layers - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |has_question|What is the name of the github group that is responsible for embeddings from Language Models? -What is the name of the github group that is responsible for embeddings from Language Models?|has_answer|allenai -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|What is the name of the new method to learn latent topics from short texts? -What is the name of the new method to learn latent topics from short texts?|has_answer|Embedding-based Topic Model -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|What model gives correlated words a better chance to be put into the same topic? -What model gives correlated words a better chance to be put into the same topic?|has_answer|Markov Random Field regularized model -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|How much word co-occurrence information is available in short texts? -How much word co-occurrence information is available in short texts?|has_answer|very limited -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|What is the purpose of ETM? -What is the purpose of ETM?|has_answer|improve the coherence of topic modeling -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|What is the name of the new method used to learn latent topics from short texts? -What is the name of the new method used to learn latent topics from short texts?|has_answer|Embedding-based Topic Model -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|has_question|What validates the effectiveness of the ETM model compared with the state-of-the-art models? -What validates the effectiveness of the ETM model compared with the state-of-the-art models?|has_answer|real-world datasets -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What is a critical component for enabling speech based user interactions on smart devices? -What is a critical component for enabling speech based user interactions on smart devices?|has_answer|Keyword spotting -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|Keyword spotting on microcontrollers requires what for good user experience? -Keyword spotting on microcontrollers requires what for good user experience?|has_answer|real-time response and high accuracy -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|Why have neural networks become an attractive choice for KWS architecture? -Why have neural networks become an attractive choice for KWS architecture?|has_answer|superior accuracy -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What is the nature of KWS? -What is the nature of KWS?|has_answer|always-on -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What must the design of neural network architecture for KWS consider? -What must the design of neural network architecture for KWS consider?|has_answer|constraints -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What do we perform neural network architecture evaluation and exploration for running KWS on? -What do we perform neural network architecture evaluation and exploration for running KWS on?|has_answer|resource-constrained microcontrollers -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|Why do we train various neural network architectures for keyword spotting published in literature? -Why do we train various neural network architectures for keyword spotting published in literature?|has_answer|to compare their accuracy and memory/compute requirements -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|How do we optimize neural network architectures for keyword spotting? -How do we optimize neural network architectures for keyword spotting?|has_answer|fit within the memory and compute constraints of microcontrollers -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What is DS-CNN? -What is DS-CNN?|has_answer|depthwise separable convolutional neural network -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|has_question|What is the accuracy of DS-CNN? -What is the accuracy of DS-CNN?|has_answer|95.4% -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|has_question|What framework is based on graph embeddings? -What framework is based on graph embeddings?|has_answer|semi-supervised learning -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|has_question|What do we train for each instance to jointly predict the class label and the neighborhood context in the graph? -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|has_question|What are the two variants of our semi-supervised learning framework? -What are the two variants of our semi-supervised learning framework?|has_answer|transductive and inductive -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|has_question|What are determined by both the learned embeddings and input feature vectors? -What are determined by both the learned embeddings and input feature vectors?|has_answer|the class labels -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|has_question|On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and what? -On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and what?|has_answer|entity classification -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|has_question|What have neural networks re-emerged as? -What have neural networks re-emerged as?|has_answer|machine-learning models -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|has_question|What did neural network models start to be applied to? -What did neural network models start to be applied to?|has_answer|textual natural language signals -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|has_question|From what perspective does this tutorial examine neural network models? -From what perspective does this tutorial examine neural network models?|has_answer|natural language processing research -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|has_question|What does the tutorial cover for natural language tasks? -What does the tutorial cover for natural language tasks?|has_answer|input encoding -Attention mechanism relating different positions of a sequence in order to compute a representation of the same sequence. Useful in machine reading, abstractive summarization, or image description generation |has_question|What mechanism relates different positions of a sequence in order to compute a representation of the same sequence? -What mechanism relates different positions of a sequence in order to compute a representation of the same sequence?|has_answer|Attention -Attention mechanism relating different positions of a sequence in order to compute a representation of the same sequence. Useful in machine reading, abstractive summarization, or image description generation |has_question|Useful in what? -Useful in what?|has_answer|machine reading, abstractive summarization, or image description generation -A statistical model for discovering the abstract topics that occur in a collection of documents. |has_question|What type of model is used to discover abstract topics in a collection of documents? -What type of model is used to discover abstract topics in a collection of documents?|has_answer|statistical -similar items are clustered into classes, an n-gram language model for the class tokens is generated, and then the probabilities for words in a class are distributed according to the smoothed relative unigram frequencies of the words.|has_question|What is generated for class tokens? -What is generated for class tokens?|has_answer|n-gram language model -finding clusters which are defined by only a subset of dimensions (it is not needed to have the agreement of all N features)|has_question|What is defined by only a subset of dimensions? -What is defined by only a subset of dimensions?|has_answer|clusters -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|What can often be solved with vector arithmetic? -What can often be solved with vector arithmetic?|has_answer|word analogies -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|What is SGNS? -What is SGNS?|has_answer|skip-gram with negative sampling -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|How do we explain the phenomenon of non-linear embedding models? -How do we explain the phenomenon of non-linear embedding models?|has_answer|without making the strong assumptions -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|How many implications does our theory have? -How many implications does our theory have?|has_answer|several -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|Why have past work conjectured that linear substructures exist in vector spaces? -Why have past work conjectured that linear substructures exist in vector spaces?|has_answer|relations can be represented as ratios -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|How do we justify the addition of SGNS word vectors? -How do we justify the addition of SGNS word vectors?|has_answer|by showing that it automatically down-weights the more frequent word -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|has_question|What is the information theoretic interpretation of in vector spaces? -What is the information theoretic interpretation of in vector spaces?|has_answer|Euclidean distance -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What is a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context? -What is a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context?|has_answer|Using Knowledge-Graphs for Fact-Aware Language Modeling -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What do the mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context generate? -What do the mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context generate?|has_answer|out-of-vocabulary tokens -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|Modeling human language requires the ability to not only generate fluent text but also what? -Modeling human language requires the ability to not only generate fluent text but also what?|has_answer|encode factual knowledge -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What models are only capable of remembering facts seen at training time? -What models are only capable of remembering facts seen at training time?|has_answer|traditional language models -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What is KGLM? -What is KGLM?|has_answer|knowledge graph language model -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What does the knowledge graph language model generate? -What does the knowledge graph language model generate?|has_answer|out-of-vocabulary tokens -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|The Linked WikiText-2 dataset is aligned to what knowledge graph? -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|How does the KGLM compare to a strong baseline language model? -How does the KGLM compare to a strong baseline language model?|has_answer|significantly better performance -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|has_question|What does the KGLM outperform even very large language models in generating facts? -What does the KGLM outperform even very large language models in generating facts?|has_answer|factual knowledge -Long short-term memory: recurrent neural network architecture well-suited for time series with long time lags between important events. (cf the problem of long time dependencies, such as when you want to predict the next word in I grew up in France… I speak fluent [?]). A solution to the vanishing gradient problem in RNNs |has_question|What is recurrent neural network architecture well-suited for time series with? -What is recurrent neural network architecture well-suited for time series with?|has_answer|long time lags -Long short-term memory: recurrent neural network architecture well-suited for time series with long time lags between important events. (cf the problem of long time dependencies, such as when you want to predict the next word in I grew up in France… I speak fluent [?]). A solution to the vanishing gradient problem in RNNs |has_question|Where did I grow up? -Long short-term memory: recurrent neural network architecture well-suited for time series with long time lags between important events. (cf the problem of long time dependencies, such as when you want to predict the next word in I grew up in France… I speak fluent [?]). A solution to the vanishing gradient problem in RNNs |has_question|What is a solution to the problem in RNNs? -What is a solution to the problem in RNNs?|has_answer|vanishing gradient -A machine learning model that models some of the structural and algorithmic properties of the neocortex. HTM is a biomimetic model based on the memory-prediction theory of brain function described by Jeff Hawkins. HTM is a method for discovering and inferring the high-level causes of observed input patterns and sequences, thus building an increasingly complex model of the world. |has_question|What model models some of the structural and algorithmic properties of the neocortex? -What model models some of the structural and algorithmic properties of the neocortex?|has_answer|machine learning model -A machine learning model that models some of the structural and algorithmic properties of the neocortex. HTM is a biomimetic model based on the memory-prediction theory of brain function described by Jeff Hawkins. HTM is a method for discovering and inferring the high-level causes of observed input patterns and sequences, thus building an increasingly complex model of the world. |has_question|Who described the memory-prediction theory of brain function? -A machine learning model that models some of the structural and algorithmic properties of the neocortex. HTM is a biomimetic model based on the memory-prediction theory of brain function described by Jeff Hawkins. HTM is a method for discovering and inferring the high-level causes of observed input patterns and sequences, thus building an increasingly complex model of the world. |has_question|HTM is a method for discovering and inferring what of observed input patterns and sequences? -HTM is a method for discovering and inferring what of observed input patterns and sequences?|has_answer|high-level causes -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What is the Neural State Machine? -What is the Neural State Machine?|has_answer|Learning by Abstraction -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What does the Neural State Machine perform over the graph? -What does the Neural State Machine perform over the graph?|has_answer|sequential reasoning -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What space does the Neural State Machine operate in? -What space does the Neural State Machine operate in?|has_answer|abstract latent space -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What is the name of the machine that seeks to bridge the gap between the neural and symbolic views of AI? -What is the name of the machine that seeks to bridge the gap between the neural and symbolic views of AI?|has_answer|Neural State Machine -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What does the Neural State Machine serve as? -What does the Neural State Machine serve as?|has_answer|structured world model -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What are two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills? -What are two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills?|has_answer|VQA-CP and GQA -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|has_question|What are some examples of the strong generalization capacity of the Neural State Machine? -What are some examples of the strong generalization capacity of the Neural State Machine?|has_answer|novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|has_question|If current prices fully reflect all information available in past prices, then what does P = NP prove? -If current prices fully reflect all information available in past prices, then what does P = NP prove?|has_answer|if markets are weak-form efficient -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|has_question|What type of problems can markets be programmed to solve? -What type of problems can markets be programmed to solve?|has_answer|NP-complete -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|has_question|What does not equal NP? -What does not equal NP?|has_answer|P -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|has_question|When do markets become increasingly inefficient? -When do markets become increasingly inefficient?|has_answer|as the time series lengthens or becomes more frequent -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|has_question|What confirms the prediction that markets are not efficient? -What confirms the prediction that markets are not efficient?|has_answer|data availability -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|Representation learning for very short texts using weighted what? -Representation learning for very short texts using weighted what?|has_answer|word embedding aggregation -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|Short text messages such as tweets are what in their use of vocabulary? -Short text messages such as tweets are what in their use of vocabulary?|has_answer|very noisy and sparse -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|Traditional textual representations such as what have difficulty grasping the semantic meaning of short texts? -Traditional textual representations such as what have difficulty grasping the semantic meaning of short texts?|has_answer|tf-idf -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|We constructed a method based on what to arrive at low-dimensional representations for short texts designed to capture semantic similarity? -We constructed a method based on what to arrive at low-dimensional representations for short texts designed to capture semantic similarity?|has_answer|semantic word embeddings and frequency information -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|What is the learning procedure based on? -What is the learning procedure based on?|has_answer|a novel median-based loss function -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|On what two sources of data does this paper discuss the results of our model? -On what two sources of data does this paper discuss the results of our model?|has_answer|Wikipedia and Twitter -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|We find that our method outperforms what in the experiments? -We find that our method outperforms what in the experiments?|has_answer|baseline approaches -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|has_question|How is our method applicable? -How is our method applicable?|has_answer|out-of-the-box -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What is encoding high-cardinality categorical variables based on? -What is encoding high-cardinality categorical variables based on?|has_answer|traditional Bayesian statistics -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|Who is commonly faced with the challenge of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms? -Who is commonly faced with the challenge of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms?|has_answer|Applied Data Scientists -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What is described in this paper? -What is described in this paper?|has_answer|a Bayesian encoding technique -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What is the paradigm for encoding categorical variables? -What is the paradigm for encoding categorical variables?|has_answer|ensemble modeling -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What are used as base learners for features in a stacked ensemble model? -What are used as base learners for features in a stacked ensemble model?|has_answer|domain-specific conjugate Bayesian models -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What is a problem-specific prior distribution for a binary classification problem? -What is a problem-specific prior distribution for a binary classification problem?|has_answer|Beta distribution -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What do we update the prior with for each unique value of the given categorical feature? -What do we update the prior with for each unique value of the given categorical feature?|has_answer|conjugate likelihood of the corresponding target variable -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|What does the function of column and value encode? -What does the function of column and value encode?|has_answer|the categorical feature matrix -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|Experimental results on both curated and real world datasets demonstrate what? -Experimental results on both curated and real world datasets demonstrate what?|has_answer|impressive accuracy and computational efficiency -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|has_question|How many levels of categorical features are in the lead scoring engine at WeWork? -How many levels of categorical features are in the lead scoring engine at WeWork?|has_answer|300,000 -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What is a symbolic and logical system? -What is a symbolic and logical system?|has_answer|knowledge graph -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What type of system is a knowledge graph? -What type of system is a knowledge graph?|has_answer|symbolic and logical -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What is neither tractable nor robust when dealing with knowledge graph? -What is neither tractable nor robust when dealing with knowledge graph?|has_answer|Formal logic -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What is the idea of knowledge graphs? -What is the idea of knowledge graphs?|has_answer|Knowledge graph embeddings -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|Each relation is interpreted as an operation over what? -Each relation is interpreted as an operation over what?|has_answer|entity embeddings -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What is a translation of transE? -What is a translation of transE?|has_answer|Bordes et al. -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What is a translation of a knowledge graph? -What is a translation of a knowledge graph?|has_answer|transE -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |has_question|What are the embedding representations usually learnt by minimizing? -What are the embedding representations usually learnt by minimizing?|has_answer|global loss function -Approach to machine translation in which a large neural network is trained to maximize translation performance. It is a radical departure from the phrase-based statistical translation approaches, in which a translation system consists of subcomponents that are separately optimized. A bidirectional recurrent neural network (RNN), known as an encoder, is used by the neural network to encode a source sentence for a second RNN, known as a decoder, that is used to predict words in the target language |has_question|What is a large neural network trained to maximize translation performance? -What is a large neural network trained to maximize translation performance?|has_answer|machine translation -Approach to machine translation in which a large neural network is trained to maximize translation performance. It is a radical departure from the phrase-based statistical translation approaches, in which a translation system consists of subcomponents that are separately optimized. A bidirectional recurrent neural network (RNN), known as an encoder, is used by the neural network to encode a source sentence for a second RNN, known as a decoder, that is used to predict words in the target language |has_question|A large neural network is trained to maximize translation performance, a radical departure from what? -A large neural network is trained to maximize translation performance, a radical departure from what?|has_answer|phrase-based statistical translation approaches -Approach to machine translation in which a large neural network is trained to maximize translation performance. It is a radical departure from the phrase-based statistical translation approaches, in which a translation system consists of subcomponents that are separately optimized. A bidirectional recurrent neural network (RNN), known as an encoder, is used by the neural network to encode a source sentence for a second RNN, known as a decoder, that is used to predict words in the target language |has_question|What is another name for a bidirectional recurrent neural network? -What is another name for a bidirectional recurrent neural network?|has_answer|encoder -[#Word sense disambiguation](/tag/word_sense_disambiguation) algorithm based on the assumption that words in a given neighborhood (section of text) tend to share a common topic |has_question|What do words in a given neighborhood tend to share? -What do words in a given neighborhood tend to share?|has_answer|a common topic -Question Answering for complex questions is often modeled as a graph construction or traversal task, where a solver must build or traverse a graph of facts that answer and explain a given question.|has_question|What is a question answer modeled as? -What is a question answer modeled as?|has_answer|graph construction or traversal task -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|has_question|How can training the Efficient Transformer Large Transformer models be? -How can training the Efficient Transformer Large Transformer models be?|has_answer|prohibitively costly -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|has_question|How many techniques do we introduce to improve the efficiency of Transformers? -How many techniques do we introduce to improve the efficiency of Transformers?|has_answer|two -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|has_question|What is used to replace dot-product attention? -What is used to replace dot-product attention?|has_answer|locality-sensitive hashing -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|has_question|What do we use instead of the standard residuals? -What do we use instead of the standard residuals?|has_answer|reversible residual layers -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|has_question|What model performs on par with Transformer models while being much more memory-efficient and much faster on long sequences? -What model performs on par with Transformer models while being much more memory-efficient and much faster on long sequences?|has_answer|the Reformer -aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean (Most k-means-type algorithms require the number of clusters – k – to be specified in advance)|has_question|How many clusters are n observations partitioned into? -How many clusters are n observations partitioned into?|has_answer|k -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |has_question|What is another name for paragraph2vec? -What is another name for paragraph2vec?|has_answer|sentence embeddings -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |has_question|What extends word2vec algorithm to larger blocks of text? -What extends word2vec algorithm to larger blocks of text?|has_answer|paragraph2vec -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |has_question|What is the dense vector trained to do? -What is the dense vector trained to do?|has_answer|predict words in the document -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |has_question|What is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing -What is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing|has_answer|Paragraph Vectors -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |has_question|Where is Paragraph Vectors implemented? -Where is Paragraph Vectors implemented?|has_answer|[gensim](/tag/gensim) -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |has_question|Who wrote this blog post? -Who wrote this blog post?|has_answer|D. Britz -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |has_question|What is the best explanation related to attention? -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |has_question|Attention creates shortcuts between what and the entire source input? -Attention creates shortcuts between what and the entire source input?|has_answer|the context vector -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |has_question|What does the model learn what to attend to based on? -What does the model learn what to attend to based on?|has_answer|input sentence -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |has_question|What weight matrix can be used to interpret what the model is doing? -What weight matrix can be used to interpret what the model is doing?|has_answer|Attention -Classification under the restriction that we may only observe a single example of each possible class before making a prediction about a test instance.|has_question|What is the restriction that we may only observe a single example of each possible class before making a prediction about a test instance? -What is the restriction that we may only observe a single example of each possible class before making a prediction about a test instance?|has_answer|Classification -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts? -What has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts?|has_answer|Document Embedding with Paragraph Vectors -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What can the embedding of movie review texts be leveraged for? -What can the embedding of movie review texts be leveraged for?|has_answer|sentiment analysis -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What was the proof of concept of Paragraph Vectors? -What was the proof of concept of Paragraph Vectors?|has_answer|narrow -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|Paragraph Vectors is compared to what other document modelling algorithm? -Paragraph Vectors is compared to what other document modelling algorithm?|has_answer|Latent Dirichlet Allocation -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What did we benchmark the models on? -What did we benchmark the models on?|has_answer|two document similarity data sets -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What method performs significantly better than other methods? -What method performs significantly better than other methods?|has_answer|Paragraph Vector method -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|has_question|What can vector operations on Paragraph Vectors perform? -What can vector operations on Paragraph Vectors perform?|has_answer|useful semantic results -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What does the cosine of the angle between the vectors capture? -What does the cosine of the angle between the vectors capture?|has_answer|semantic similarity -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|A set of language modeling and feature learning techniques where words are mapped to vectors of real numbers in a low dimensional space, relative to what -A set of language modeling and feature learning techniques where words are mapped to vectors of real numbers in a low dimensional space, relative to what|has_answer|vocabulary size -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What is the range of dimensions that words in some language can be mapped to? -What is the range of dimensions that words in some language can be mapped to?|has_answer|200 to 500 dimensions -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|How many dimensions does a word embedding have? -How many dimensions does a word embedding have?|has_answer|one dimension per word -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What is a succinct representation of the distribution of other words around a word called? -What is a succinct representation of the distribution of other words around a word called?|has_answer|Plongement lexical -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What is a method to generate word embeddings? -What is a method to generate word embeddings?|has_answer|dimensionality reduction -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|In the new generation of models, the vector estimation problem is handled as a what? -In the new generation of models, the vector estimation problem is handled as a what?|has_answer|supervised -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What is a window of surrounding words? -What is a window of surrounding words?|has_answer|context -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|What is the most known software to produce word embeddings? -What is the most known software to produce word embeddings?|has_answer|Tomas Mikolov's Word2vec -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|Where are pre-trained word embeddings available? -Where are pre-trained word embeddings available?|has_answer|word2vec code.google -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |has_question|Syntactic parsing and sentiment analysis are examples of what? -Syntactic parsing and sentiment analysis are examples of what?|has_answer|boost the performance in NLP tasks -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|How many discourse atoms are present in each extracted word sense? -How many discourse atoms are present in each extracted word sense?|has_answer|2000 -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|The success of our approach is mathematically explained using a variant of what model? -The success of our approach is mathematically explained using a variant of what model?|has_answer|random walk on discourses model -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|Under the assumptions of the random walk on discourses model, what relationship exists between the vector of a word w and the vectors of the words -Under the assumptions of the random walk on discourses model, what relationship exists between the vector of a word w and the vectors of the words|has_answer|linear -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|Where do multiple word senses reside within the word embedding? -Where do multiple word senses reside within the word embedding?|has_answer|linear superposition -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|What can be of independent interest and make the method potentially more useful? -What can be of independent interest and make the method potentially more useful?|has_answer|Discourse atoms -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|has_question|What is used to verify and support the theory? -What is used to verify and support the theory?|has_answer|Empirical tests -or grammatical tagging, or word-category disambiguation: the process of marking up a word in a text as corresponding to a particular part of speech|has_question|What is the process of marking up a word in a text as corresponding to a particular part of speech called? -What is the process of marking up a word in a text as corresponding to a particular part of speech called?|has_answer|grammatical tagging -or grammatical tagging, or word-category disambiguation: the process of marking up a word in a text as corresponding to a particular part of speech|has_question|What is the process of marking up a word in a text as corresponding to a particular part of speech? -What is the process of marking up a word in a text as corresponding to a particular part of speech?|has_answer|word-category disambiguation -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|Transformer-based models are unable to process what? -Transformer-based models are unable to process what?|has_answer|long sequences -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|How does the Longformer's attention mechanism scale with sequence length? -How does the Longformer's attention mechanism scale with sequence length?|has_answer|linearly -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|What is Longformer's attention mechanism? -What is Longformer's attention mechanism?|has_answer|a drop-in replacement for the standard self-attention -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|Transformer-based models are unable to process long sequences due to what? -Transformer-based models are unable to process long sequences due to what?|has_answer|self-attention operation -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|What does the Longformer's attention mechanism make it easy to process? -What does the Longformer's attention mechanism make it easy to process?|has_answer|documents of thousands of tokens or longer -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|Longformer's attention mechanism is a drop-in replacement for what standard? -Longformer's attention mechanism is a drop-in replacement for what standard?|has_answer|self-attention -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|What is Longformer evaluated on? -What is Longformer evaluated on?|has_answer|character-level language modeling -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|What do we do with Longformer? -What do we do with Longformer?|has_answer|finetune it -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|has_question|Longformer consistently outperforms what on long document tasks? -Longformer consistently outperforms what on long document tasks?|has_answer|RoBERTa -[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. |has_question|Who published a 2017 paper? -Who published a 2017 paper?|has_answer|Vaswani, et al. -[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. |has_question|What is all you need? -[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. |has_question|What is the multi-head self-attention mechanism? -What is the multi-head self-attention mechanism?|has_answer|improved self-attention units -When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. The goal of active learning: to reduce the cost of labeling. To this end, the learning algorithm is allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))|has_question|What deals with problems where unlabeled data is abundant yet obtaining labels is expensive? -When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. The goal of active learning: to reduce the cost of labeling. To this end, the learning algorithm is allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))|has_question|What is a learning algorithm used for when a limited number of samples are queryed to obtain the corresponding labels? -What is a learning algorithm used for when a limited number of samples are queryed to obtain the corresponding labels?|has_answer|supervised learning -When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. The goal of active learning: to reduce the cost of labeling. To this end, the learning algorithm is allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))|has_question|What is the goal of active learning? -What is the goal of active learning?|has_answer|to reduce the cost of labeling -When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. The goal of active learning: to reduce the cost of labeling. To this end, the learning algorithm is allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))|has_question|What is the entropy of predicted class probabilities? -What is the entropy of predicted class probabilities?|has_answer|uncertainty -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What is the name of the research that has focused on learning representations for clustering? -What is the name of the research that has focused on learning representations for clustering?|has_answer|Unsupervised Deep Embedding for Clustering Analysis -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What is the name of the method that simultaneously learns feature representations and cluster assignments using deep neural networks? -What is the name of the method that simultaneously learns feature representations and cluster assignments using deep neural networks?|has_answer|Deep Embedded Clustering -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What does DEC iteratively optimize? -What does DEC iteratively optimize?|has_answer|clustering objective -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What has relatively little work focused on? -What has relatively little work focused on?|has_answer|learning representations for clustering -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What does DEC learn? -What does DEC learn?|has_answer|a mapping from the data space to a lower-dimensional feature space -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|has_question|What do our experimental evaluations on image and text corpora show over state-of-the-art methods? -What do our experimental evaluations on image and text corpora show over state-of-the-art methods?|has_answer|significant improvement -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|What is a pretraining technique for NLP? -What is a pretraining technique for NLP?|has_answer|Bidirectional Encoder Representations -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|What is BERT designed to do? -What is BERT designed to do?|has_answer|pre-train deep bidirectional representations -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|What two auxiliary tasks is BERT pre-trained on? -What two auxiliary tasks is BERT pre-trained on?|has_answer|Masked Language Model and Next Sentence Prediction -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|What is the general BERT adaptation approach? -What is the general BERT adaptation approach?|has_answer|alter the model used for pre-training while retaining the transformer encoder layers -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|What does the BERT adaptation approach do? -What does the BERT adaptation approach do?|has_answer|The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|has_question|When were code and pre-trained models open-sourced? -When were code and pre-trained models open-sourced?|has_answer|Nov 3rd, 2018 -ranking function used by search engines to rank matching documents according to their relevance to a given search query. Bag-of-words based. Algorithm used by default in [Elasticsearch](elasticsearch) and [Lucene](lucene) |has_question|What is used by search engines to rank documents according to their relevance to a given search query? -What is used by search engines to rank documents according to their relevance to a given search query?|has_answer|ranking function -ranking function used by search engines to rank matching documents according to their relevance to a given search query. Bag-of-words based. Algorithm used by default in [Elasticsearch](elasticsearch) and [Lucene](lucene) |has_question|What is the ranking function used by search engines to rank documents according to their relevance to a given search query? -ranking function used by search engines to rank matching documents according to their relevance to a given search query. Bag-of-words based. Algorithm used by default in [Elasticsearch](elasticsearch) and [Lucene](lucene) |has_question|What is used by default in Elasticsearch and Lucene? -What is used by default in Elasticsearch and Lucene?|has_answer|Algorithm -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|has_question|What is the name of the project that uses WordPress and SPARQL? -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|has_question|What are SparqlPress's primary ingredients? -What are SparqlPress's primary ingredients?|has_answer|WordPress and SPARQL -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|has_question|What are the two sides of SparqlPress? -What are the two sides of SparqlPress?|has_answer|producing data, and consuming it -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|has_question|What is one goal of SparqlPress? -What is one goal of SparqlPress?|has_answer|to expose more data in SPARQL-friendly form -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|has_question|What is another goal of SparqlPress? -What is another goal of SparqlPress?|has_answer|make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data -Classifier on top of a sentence2vec model. Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features - avoids the OOV (out of vocabulary) problem) (FastText represents words as the sum of their n-gram representations trained with a skip-gram model) Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) |has_question|What is the classifier on top of? -What is the classifier on top of?|has_answer|sentence2vec -Classifier on top of a sentence2vec model. Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features - avoids the OOV (out of vocabulary) problem) (FastText represents words as the sum of their n-gram representations trained with a skip-gram model) Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) |has_question|What carries important information about the meaning of a word? -What carries important information about the meaning of a word?|has_answer|morphological structure -Classifier on top of a sentence2vec model. Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features - avoids the OOV (out of vocabulary) problem) (FastText represents words as the sum of their n-gram representations trained with a skip-gram model) Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) |has_question|What languages have a large number of morphological forms? -What languages have a large number of morphological forms?|has_answer|German, Turkish -Classifier on top of a sentence2vec model. Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features - avoids the OOV (out of vocabulary) problem) (FastText represents words as the sum of their n-gram representations trained with a skip-gram model) Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) |has_question|What character does FastText use as features? -What character does FastText use as features?|has_answer|n-grams -answering arbitrary context-independent questions (e.g. well-known facts or historical details).Typically assumed that the model can access an external collection of knowledge (e.g. a structured knowledge base or unstructured text corpus) (~open-book exam)|has_question|What type of questions are well-known facts or historical details? -What type of questions are well-known facts or historical details?|has_answer|context-independent questions -answering arbitrary context-independent questions (e.g. well-known facts or historical details).Typically assumed that the model can access an external collection of knowledge (e.g. a structured knowledge base or unstructured text corpus) (~open-book exam)|has_question|What are examples of context-independent questions? -What are examples of context-independent questions?|has_answer|well-known facts or historical details -answering arbitrary context-independent questions (e.g. well-known facts or historical details).Typically assumed that the model can access an external collection of knowledge (e.g. a structured knowledge base or unstructured text corpus) (~open-book exam)|has_question|What is an example of a model that can access an external collection of knowledge? -What is an example of a model that can access an external collection of knowledge?|has_answer|open-book exam -nonlinear dimensionality reduction technique that is particularly well suited for embedding high-dimensional data into a space of two or three dimensions, which can then be visualized in a scatter plot. Specifically, it models each high-dimensional object by a two- or three-dimensional point in such a way that similar objects are modeled by nearby points and dissimilar objects are modeled by distant points.|has_question|What can high-dimensional data be visualized in? -What can high-dimensional data be visualized in?|has_answer|scatter plot -nonlinear dimensionality reduction technique that is particularly well suited for embedding high-dimensional data into a space of two or three dimensions, which can then be visualized in a scatter plot. Specifically, it models each high-dimensional object by a two- or three-dimensional point in such a way that similar objects are modeled by nearby points and dissimilar objects are modeled by distant points.|has_question|What are similar objects modeled by? -What are similar objects modeled by?|has_answer|nearby points -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What has been a major goal of modern AI systems? -What has been a major goal of modern AI systems?|has_answer|Integrating logical reasoning within deep learning architectures -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What is the name of the solver that can be integrated into the loop of larger deep learning systems? -What is the name of the solver that can be integrated into the loop of larger deep learning systems?|has_answer|MAXSAT -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What is the MAXSAT solver based on? -What is the MAXSAT solver based on?|has_answer|fast coordinate descent approach -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|How does MAXSAT solve the semidefinite program? -How does MAXSAT solve the semidefinite program?|has_answer|analytically differentiate -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What do we learn by integrating MAXSAT into end-to-end learning systems? -What do we learn by integrating MAXSAT into end-to-end learning systems?|has_answer|logical structure -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What type of Sudoku does MAXSAT solver learn how to play? -What type of Sudoku does MAXSAT solver learn how to play?|has_answer|9x9 -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What kind of Sudok problem does MAXSAT solve? -What kind of Sudok problem does MAXSAT solve?|has_answer|visual -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|has_question|What does MAXSAT solver show promise in? -What does MAXSAT solver show promise in?|has_answer|integrating logical structures within deep learning -Entity Linking often rely on rich structures and properties in the target knowledge base (KB). However, in many applications, the KB may be as simple and sparse as lists of names of the same type (e.g., lists of products) - the List-only entity linking problem |has_question|Entity Linking often rely on what in the target knowledge base? -Entity Linking often rely on what in the target knowledge base?|has_answer|rich structures and properties -Entity Linking often rely on rich structures and properties in the target knowledge base (KB). However, in many applications, the KB may be as simple and sparse as lists of names of the same type (e.g., lists of products) - the List-only entity linking problem |has_question|What is the problem with Entity Linking? -What is the problem with Entity Linking?|has_answer|List-only entity linking -- [Home Page](https://www.fast.ai/) - [MOOC](https://course.fast.ai/) - [Github](https://github.com/fastai/fastai) - [Forum](https://forums.fast.ai/) - [docs.fast.ai](https://docs.fast.ai/) |has_question|What is fastai/fastai? -What is fastai/fastai?|has_answer|[Github] -- [Home Page](https://www.fast.ai/) - [MOOC](https://course.fast.ai/) - [Github](https://github.com/fastai/fastai) - [Forum](https://forums.fast.ai/) - [docs.fast.ai](https://docs.fast.ai/) |has_question|What is the name of the forum? -What is the name of the forum?|has_answer|[docs.fast.ai] -sequence labelling tasks where the goal is to identify the names of entities in a sentence. Named entities can be proper nouns (locations, people, organizations...), or can be much more domain-specific, such as diseases or genes in biomedical NLP.|has_question|What is a task where the goal is to identify the names of entities in a sentence? -What is a task where the goal is to identify the names of entities in a sentence?|has_answer|sequence labelling -sequence labelling tasks where the goal is to identify the names of entities in a sentence. Named entities can be proper nouns (locations, people, organizations...), or can be much more domain-specific, such as diseases or genes in biomedical NLP.|has_question|Named entities can be domain specific, such as what in biomedical NLP? -Named entities can be domain specific, such as what in biomedical NLP?|has_answer|diseases or genes -A method to map documents to a code (e.g., 32-bit memory address) so documents with semantically closed content are mapped to close addresses. Method introduced by Ruslan Salakhutdinov and Geoffrey Hinton in this [paper](/doc/?uri=http%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0888613X08001813) |has_question|What is an example of a method to map documents to a code? -What is an example of a method to map documents to a code?|has_answer|32-bit memory address -A method to map documents to a code (e.g., 32-bit memory address) so documents with semantically closed content are mapped to close addresses. Method introduced by Ruslan Salakhutdinov and Geoffrey Hinton in this [paper](/doc/?uri=http%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0888613X08001813) |has_question|Who introduced the method to map documents to a code? -Who introduced the method to map documents to a code?|has_answer|Ruslan Salakhutdinov and Geoffrey Hinton -Graph Convolutional Networks (GCNs) strike a balance between modeling the full structure of the graph dynamically, as the tensor model does, and modeling the local neighbourhood structure through extracted features (as substructure counting methods and RDF2Vec do). ([source](/doc/2019/08/the_knowledge_graph_as_the_defa)) |has_question|What are GCNs? -Graph Convolutional Networks (GCNs) strike a balance between modeling the full structure of the graph dynamically, as the tensor model does, and modeling the local neighbourhood structure through extracted features (as substructure counting methods and RDF2Vec do). ([source](/doc/2019/08/the_knowledge_graph_as_the_defa)) |has_question|What does Graph Convolutional Networks stand for? -What does Graph Convolutional Networks stand for?|has_answer|the_knowledge_graph -Unsupervised method to learn sentence representations. Conceptually, the model can be interpreted as a natural extension of the word-contexts from C-BOW to a larger sentence context, with the sentence words being specifically optimized towards additive combination over the sentence, by means of the unsupervised objective function |has_question|What method is used to learn sentence representations? -What method is used to learn sentence representations?|has_answer|Unsupervised -Unsupervised method to learn sentence representations. Conceptually, the model can be interpreted as a natural extension of the word-contexts from C-BOW to a larger sentence context, with the sentence words being specifically optimized towards additive combination over the sentence, by means of the unsupervised objective function |has_question|The sentence words are specifically optimized towards what over the sentence? -The sentence words are specifically optimized towards what over the sentence?|has_answer|additive combination -Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines.|has_question|What is the core aspect of search and retrieval systems? -What is the core aspect of search and retrieval systems?|has_answer|recommendation engines -ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. Reduces variance, and hence the risk of overtting. |has_question|ML ensemble meta-algorithm usually applied to what? -ML ensemble meta-algorithm usually applied to what?|has_answer|decision tree methods -ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. Reduces variance, and hence the risk of overtting. |has_question|What is designed to improve the stability and accuracy of ML algorithms used in classification and regression? -What is designed to improve the stability and accuracy of ML algorithms used in classification and regression?|has_answer|ML ensemble meta-algorithm -ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. Reduces variance, and hence the risk of overtting. |has_question|What is a special case of ML ensemble meta-algorithm? -What is a special case of ML ensemble meta-algorithm?|has_answer|model averaging approach -ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. Reduces variance, and hence the risk of overtting. |has_question|Reduces variance and what? -Reduces variance and what?|has_answer|risk of overtting -for instance in image recognition using siamese networks, triplet loss function tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image, thereby learning to differentiate similar images to non similar ones|has_question|What tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image? -What tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image?|has_answer|triplet loss function -for instance in image recognition using siamese networks, triplet loss function tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image, thereby learning to differentiate similar images to non similar ones|has_question|What is a triplet loss function used for? -What is a triplet loss function used for?|has_answer|image recognition using siamese networks -A single decision tree is a highly non-linear classifier with typically low bias but high variance. Random forests address the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees.|has_question|What type of decision tree is a highly non-linear classifier with typically low bias but high variance? -What type of decision tree is a highly non-linear classifier with typically low bias but high variance?|has_answer|single -A single decision tree is a highly non-linear classifier with typically low bias but high variance. Random forests address the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees.|has_question|What solves the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees? -What solves the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees?|has_answer|Random forests -A single decision tree is a highly non-linear classifier with typically low bias but high variance. Random forests address the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees.|has_question|Random forests address the problem of high variance by establishing a committee of identically distributed single decision trees? -Random forests address the problem of high variance by establishing a committee of identically distributed single decision trees?|has_answer|average -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|has_question|What extracts semantic concepts in a query? -What extracts semantic concepts in a query?|has_answer|slot filling -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|has_question|Slot filling extracts what in a query? -Slot filling extracts what in a query?|has_answer|semantic concepts -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|has_question|"The user query could be ""Find me an action movie by whom?""" -"The user query could be ""Find me an action movie by whom?"""|has_answer|Steven Spielberg -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|has_question|What is the intent of the user query? -What is the intent of the user query?|has_answer|“find_movie” -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|has_question|What is a set of values of linearly uncorrelated variables called? -What is a set of values of linearly uncorrelated variables called?|has_answer|principal components -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|has_question|What is PCA based on? -What is PCA based on?|has_answer|extracting the axes -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|has_question|How can PCA be done? -How can PCA be done?|has_answer|eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix -application of machine learning in the construction of ranking models. Training data consists of lists of items with some partial order specified between items in each list. |has_question|What is used in the construction of ranking models? -What is used in the construction of ranking models?|has_answer|machine learning -application of machine learning in the construction of ranking models. Training data consists of lists of items with some partial order specified between items in each list. |has_question|Training data consists of lists of items with what sort of order specified between items in each list? -Training data consists of lists of items with what sort of order specified between items in each list?|has_answer|partial order -Adaptative boosting (Authors won Gödel Prize for their work) output of the 'weak learners' is combined into a weighted sum that represents the final output of the boosted classifier. Sensitive to noisy data and outliers (says wikipedia) AdaBoost (with decision trees as the weak learners) is often referred to as the best out-of-the-box classifier |has_question|What prize did authors win for their work in Adaptative boosting? -What prize did authors win for their work in Adaptative boosting?|has_answer|Gödel Prize -Adaptative boosting (Authors won Gödel Prize for their work) output of the 'weak learners' is combined into a weighted sum that represents the final output of the boosted classifier. Sensitive to noisy data and outliers (says wikipedia) AdaBoost (with decision trees as the weak learners) is often referred to as the best out-of-the-box classifier |has_question|What is often referred to as the best out-of-the-box classifier? -Adaptative boosting (Authors won Gödel Prize for their work) output of the 'weak learners' is combined into a weighted sum that represents the final output of the boosted classifier. Sensitive to noisy data and outliers (says wikipedia) AdaBoost (with decision trees as the weak learners) is often referred to as the best out-of-the-box classifier |has_question|What does AdaBoost use as the weak learners? -What does AdaBoost use as the weak learners?|has_answer|decision trees -construction of a decision tree from class-labeled training tuples frequent problem: overfitting (=high variance) |has_question|What is a common problem in the construction of a decision tree from class-labeled training tuples? -What is a common problem in the construction of a decision tree from class-labeled training tuples?|has_answer|overfitting -A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words LDA is an extension of [LSI/pLSI](latent_semantic_analysis) |has_question|What allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar? -What allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar?|has_answer|generative model -A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words LDA is an extension of [LSI/pLSI](latent_semantic_analysis) |has_question|Models the intuition that what will probabilistically influence the author’s choice of words when writing a document? -Models the intuition that what will probabilistically influence the author’s choice of words when writing a document?|has_answer|the topic of a document -A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words LDA is an extension of [LSI/pLSI](latent_semantic_analysis) |has_question|Documents are interpreted as a mixture of what? -Documents are interpreted as a mixture of what?|has_answer|topics -A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words LDA is an extension of [LSI/pLSI](latent_semantic_analysis) |has_question|What is an extension of [LSI/pLSI](latent_semantic_analysis)? -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|has_question|What is the goal of distance metric learning? -What is the goal of distance metric learning?|has_answer|similarity function -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|has_question|What is the task of learning a distance function over objects consistent with a notion of similarity? -What is the task of learning a distance function over objects consistent with a notion of similarity?|has_answer|Distance metric learning -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|has_question|Distance metric learning is a major tool for a variety of problems in what? -Distance metric learning is a major tool for a variety of problems in what?|has_answer|computer vision -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|has_question|Distance metric learning has been successfully employed for what? -Distance metric learning has been successfully employed for what?|has_answer|image retrieval, near duplicate detection, clustering and zero-shot learning -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|has_question|What is the term for distance metric learning? -What is the term for distance metric learning?|has_answer|no_fuss_distance_m -instead of just processing the words in a sentence from left to right, also go from right to left, allowing later words to help disambiguate the meaning of earlier words and phrases|has_question|What do later words help do? -What do later words help do?|has_answer|disambiguate -Good identifiers for product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions for ca. 300,000 types of product or services that have an entry in the English Wikipedia |has_question|What are product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions? -What are product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions?|has_answer|Good identifiers -Good identifiers for product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions for ca. 300,000 types of product or services that have an entry in the English Wikipedia |has_question|What is the name of the organization that provides Wikipediabr/good identifiers for product types? -Good identifiers for product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions for ca. 300,000 types of product or services that have an entry in the English Wikipedia |has_question|How many types of products or services have an entry in the English Wikipedia? -How many types of products or services have an entry in the English Wikipedia?|has_answer|300,000 -Extend SVMs with the aim of max-margin classification while ensuring that there are as few unlabelled observations near the margin as possible |has_question|What is the goal of extending SVMs? -What is the goal of extending SVMs?|has_answer|max-margin -This specification defines a merge of SPARQL and XQuery, and has the potential to bring XML and RDF closer together. XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. |has_question|XSPARQL has the potential to bring what two things closer together? -XSPARQL has the potential to bring what two things closer together?|has_answer|XML and RDF -This specification defines a merge of SPARQL and XQuery, and has the potential to bring XML and RDF closer together. XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. |has_question|This specification defines a merge of what two languages? -This specification defines a merge of what two languages?|has_answer|SPARQL and XQuery -This specification defines a merge of SPARQL and XQuery, and has the potential to bring XML and RDF closer together. XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. |has_question|What provides concise and intuitive solutions for mapping between XML and RDF in either direction? -techniques that make use of the spectrum (eigenvalues) of the similarity matrix of the data to perform dimensionality reduction before clustering in fewer dimensions.|has_question|What is a technique that makes use of the spectrum (eigenvalues) of the similarity matrix to perform before clustering in fewer dimensions -What is a technique that makes use of the spectrum (eigenvalues) of the similarity matrix to perform before clustering in fewer dimensions|has_answer|dimensionality reduction -techniques that make use of the spectrum (eigenvalues) of the similarity matrix of the data to perform dimensionality reduction before clustering in fewer dimensions.|has_question|What is another term for the spectrum of similarity matrix? -What is another term for the spectrum of similarity matrix?|has_answer|eigenvalues -Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))|has_question|What is the task of predicting the next word in a text given the previous words? -What is the task of predicting the next word in a text given the previous words?|has_answer|Language modeling -Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))|has_question|What is an example of a language model? -What is an example of a language model?|has_answer|intelligent keyboards -Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))|has_question|What models try to learn the probability of the next word given its previous words? -What models try to learn the probability of the next word given its previous words?|has_answer|Statistical language models -Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))|has_question|What is an example of an auto-regressive factorization of the joint probability of a corpus? -What is an example of an auto-regressive factorization of the joint probability of a corpus?|has_answer|n-gram models -several representations are proposed to extend word representation for phrases ([Yin and Schütze, 2014](/doc/?uri=http%3A%2F%2Faclweb.org%2Fanthology%2FP14-3006); Yu and Dredze, 2015; Passos et al., 2014). However, they don’t use structured knowledge to derive phrase representations (as said [here](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1607.07956)) [Sabastian Ruder](/tag/sebastian_ruder) in 2017 says the [following](http://ruder.io/word-embeddings-2017/index.html#phrasesandmultiwordexpressions): explicitly modelling phrases has so far not shown significant improvements on downstream tasks that would justify the additional complexity (but hum: what's about NER - in particular if using external knowledge such as lexicons?) |has_question|In what year did Yu and Schütze propose to extend word representation for phrases? -In what year did Yu and Schütze propose to extend word representation for phrases?|has_answer|2014 -several representations are proposed to extend word representation for phrases ([Yin and Schütze, 2014](/doc/?uri=http%3A%2F%2Faclweb.org%2Fanthology%2FP14-3006); Yu and Dredze, 2015; Passos et al., 2014). However, they don’t use structured knowledge to derive phrase representations (as said [here](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1607.07956)) [Sabastian Ruder](/tag/sebastian_ruder) in 2017 says the [following](http://ruder.io/word-embeddings-2017/index.html#phrasesandmultiwordexpressions): explicitly modelling phrases has so far not shown significant improvements on downstream tasks that would justify the additional complexity (but hum: what's about NER - in particular if using external knowledge such as lexicons?) |has_question|What do they not use to derive phrase representations? -What do they not use to derive phrase representations?|has_answer|structure -A system for rapidly creating training sets with weak supervision The System for Programmatically Building and Managing Training Data|has_question|A system for rapidly creating training sets with what kind of supervision? -A system for rapidly creating training sets with what kind of supervision?|has_answer|weak supervision -Generative models that can be used to analyze the evolution of (unobserved) topics of a collection of documents over time.br/ extension to Latent Dirichlet Allocation (LDA) that can handle sequential documents|has_question|What does LDA stand for? -What does LDA stand for?|has_answer|Latent Dirichlet Allocation -Generative models that can be used to analyze the evolution of (unobserved) topics of a collection of documents over time.br/ extension to Latent Dirichlet Allocation (LDA) that can handle sequential documents|has_question|What can be used to analyze the evolution of (unobserved) topics of a collection of documents over time? -What can be used to analyze the evolution of (unobserved) topics of a collection of documents over time?|has_answer|Generative models -Word embedding technique (unsupervised learning algorithm for obtaining vector representations for words) based on factorizing a matrix of word co-occurence statistics (Training is performed on aggregated global word-word co-occurrence statistics from a corpus). Resulting representations showcase interesting linear substructures of the word vector space. |has_question|What is the unsupervised learning algorithm for obtaining vector representations for words called? -What is the unsupervised learning algorithm for obtaining vector representations for words called?|has_answer|Word embedding technique -Word embedding technique (unsupervised learning algorithm for obtaining vector representations for words) based on factorizing a matrix of word co-occurence statistics (Training is performed on aggregated global word-word co-occurrence statistics from a corpus). Resulting representations showcase interesting linear substructures of the word vector space. |has_question|What is the word embedding technique based on? -What is the word embedding technique based on?|has_answer|word-word co-occurrence statistics -Word embedding technique (unsupervised learning algorithm for obtaining vector representations for words) based on factorizing a matrix of word co-occurence statistics (Training is performed on aggregated global word-word co-occurrence statistics from a corpus). Resulting representations showcase interesting linear substructures of the word vector space. |has_question|What do the results of the word embedding technique showcase? -What do the results of the word embedding technique showcase?|has_answer|interesting linear substructures -formalism of information retrieval useful to derive functions that rank matching documents according to their relevance to a given search query.|has_question|What type of information retrieval is useful to derive functions that rank matching documents according to their relevance to a given search query? -What type of information retrieval is useful to derive functions that rank matching documents according to their relevance to a given search query?|has_answer|formalism -Receiver operating characteristic. Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied. Plotting the true positive rate (TPR: recall) against the false positive rate (FPR: fall-out or probability of false alarm ) at various threshold settings. |has_question|What is a Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied? -What is a Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied?|has_answer|Receiver operating characteristic -Receiver operating characteristic. Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied. Plotting the true positive rate (TPR: recall) against the false positive rate (FPR: fall-out or probability of false alarm ) at various threshold settings. |has_question|Plot used to diagnostic ability of a binary classifier as its what is varied? -Plot used to diagnostic ability of a binary classifier as its what is varied?|has_answer|discrimination threshold -Receiver operating characteristic. Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied. Plotting the true positive rate (TPR: recall) against the false positive rate (FPR: fall-out or probability of false alarm ) at various threshold settings. |has_question|Plotting the true positive rate against what? -Plotting the true positive rate against what?|has_answer|false positive rate -Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. Compared with long texts, topic discovery from short texts has the following three challenges: - only very limited word co-occurrence information is available, - the frequency of words plays a less discriminative role, - and the limited contexts make it more dicult to identify the senses of ambiguous words |has_question|Conventional topic models implicitly capture what level of word co-occurrence patterns? -Conventional topic models implicitly capture what level of word co-occurrence patterns?|has_answer|document-level -Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. Compared with long texts, topic discovery from short texts has the following three challenges: - only very limited word co-occurrence information is available, - the frequency of words plays a less discriminative role, - and the limited contexts make it more dicult to identify the senses of ambiguous words |has_question|Why may topic models not work well on short texts? -Why may topic models not work well on short texts?|has_answer|data sparsity -Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. Compared with long texts, topic discovery from short texts has the following three challenges: - only very limited word co-occurrence information is available, - the frequency of words plays a less discriminative role, - and the limited contexts make it more dicult to identify the senses of ambiguous words |has_question|What type of word co-occurrence information is available on short texts? -What type of word co-occurrence information is available on short texts?|has_answer|very limited -In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. Methods (non exhaustive list): - Dividing the original multi-label classification problem into multiple independent binary classification tasks - computationally expensive - cannot identify the correlation between label information - Label embedding based approaches (deriving a latent label space with reduced dimensionality) - correlation between the labels can be implicitly exploited eg. replace the final softmax layer with a Sigmoid layer and use Binary Cross Entropy loss function to optimize the model.|has_question|In multi-label classification, each sample can be associated with what? -In multi-label classification, each sample can be associated with what?|has_answer|a set of class labels -In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. Methods (non exhaustive list): - Dividing the original multi-label classification problem into multiple independent binary classification tasks - computationally expensive - cannot identify the correlation between label information - Label embedding based approaches (deriving a latent label space with reduced dimensionality) - correlation between the labels can be implicitly exploited eg. replace the final softmax layer with a Sigmoid layer and use Binary Cross Entropy loss function to optimize the model.|has_question|What does multi-class classification aim to do? -What does multi-class classification aim to do?|has_answer|predict a single mutually exclusive label -In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. Methods (non exhaustive list): - Dividing the original multi-label classification problem into multiple independent binary classification tasks - computationally expensive - cannot identify the correlation between label information - Label embedding based approaches (deriving a latent label space with reduced dimensionality) - correlation between the labels can be implicitly exploited eg. replace the final softmax layer with a Sigmoid layer and use Binary Cross Entropy loss function to optimize the model.|has_question|What is the term for creating a latent label space with reduced dimensionality? -What is the term for creating a latent label space with reduced dimensionality?|has_answer|Label embedding based approaches -In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. Methods (non exhaustive list): - Dividing the original multi-label classification problem into multiple independent binary classification tasks - computationally expensive - cannot identify the correlation between label information - Label embedding based approaches (deriving a latent label space with reduced dimensionality) - correlation between the labels can be implicitly exploited eg. replace the final softmax layer with a Sigmoid layer and use Binary Cross Entropy loss function to optimize the model.|has_question|What is the final softmax layer replaced with? -What is the final softmax layer replaced with?|has_answer|Sigmoid layer -cluster analysis which seeks to build a hierarchy of clusters. 2 kinds: - Agglomerative - Divisive|has_question|What seeks to build a hierarchy of clusters? -What seeks to build a hierarchy of clusters?|has_answer|cluster analysis -cluster analysis which seeks to build a hierarchy of clusters. 2 kinds: - Agglomerative - Divisive|has_question|What type of cluster analysis seeks to build a hierarchy of clusters? -What type of cluster analysis seeks to build a hierarchy of clusters?|has_answer|Agglomerative -What type of cluster analysis seeks to build a hierarchy of clusters?|has_answer|Divisive -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What are networks usually represented as? -What are networks usually represented as?|has_answer|adjacency matrices -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What do adjacency matrices suffer from? -What do adjacency matrices suffer from?|has_answer|data sparsity and high-dimensionality -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What aim to represent network vertices into a low-dimensional vector space? -What aim to represent network vertices into a low-dimensional vector space?|has_answer|Network embeddings -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|Algorithms are typically what? -Algorithms are typically what?|has_answer|unsupervised -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What is the idea behind node embeddings? -What is the idea behind node embeddings?|has_answer|similar nodes should have similar vectors -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What is the most cited paper at WWW2015? -What is the most cited paper at WWW2015?|has_answer|LINE Large-scale Information Network Embedding -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |has_question|What is a mixed strategy to learn word embeddings adapted to nodes? -What is a mixed strategy to learn word embeddings adapted to nodes?|has_answer|Node2Vec diff --git a/ckb/datasets/semanlink/test.csv b/ckb/datasets/semanlink/test.csv index 68a62bc..4a44fe6 100644 --- a/ckb/datasets/semanlink/test.csv +++ b/ckb/datasets/semanlink/test.csv @@ -1,803 +1,8631 @@ -Photons corrélés|skos:broader|Mécanique quantique -Régions polaires|skos:broader|Géographie -Ex URSS URSS|skos:broader|Ex URSS URSS -blojsom|skos:broader|Java -Mines d'or|skos:broader|Or -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Jakob Uszkoreit -Chine : écologie|skos:broader|Crise écologique -Sony|skos:broader|Entreprise -Génocide rwandais|skos:broader|La communauté internationale est une garce -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_firstAuthor|Rohan Anil -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:arxiv_author|Thomas Hofmann -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:facebook_fair -Web Application Threats|skos:broader|Cybersecurity Sécurité informatique -MaxEnt classifier (Multinomial logistic regression)|skos:broader|Logistic regression -Trou noir|skos:broader|Astronomie -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:arxiv_author|James Allan -Lee Feigenbaum|skos:broader|SW guys (and girls) -Hippies|skos:broader|I like I like -Pentagon|skos:broader|USA -Sentiment analysis|skos:broader|Sentiment -Filme brasileiro@pt|skos:broader|Film -Java|skos:broader|Sun Microsystems -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|William W. Cohen -What is life ?|skos:broader|Biology -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Zhiyuan Liu -RDF|skos:broader|Data Interchange Format -SemTechBiz|skos:broader|Semantic Web conferences -Milliardaire|skos:broader|Money -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:tag|tag:amazon_alexa -Caetano Veloso|skos:broader|Brésil -Animal|skos:broader|Biology -Web Services|skos:broader|Web -NLP@Stanford|skos:broader|AI@Stanford -Etat policier|skos:broader|Police -Text Editor|skos:broader|Tools -Attentats 13-11-2015|skos:broader|Terrorisme islamiste -Deep Learning frameworks|skos:broader|Frameworks -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:tag|tag:semantic_hashing -Musique du Niger|skos:broader|Music of Africa -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_author|Marc Sloan -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:arxiv_author|Aurko Roy -Travailler moins|skos:broader|Travail -RNN-LM|skos:broader|RNN -Macron|skos:broader|Homme politique -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Gyorgy Kovacs -Missing Labels (ML)|skos:broader|Supervised machine learning -Spark (Java web framework)|skos:broader|Java 8 lambdas -Mutualisme|skos:broader|Biology -Littérature|skos:broader|Art -Explosions cosmiques|skos:broader|Astronomie -Adolescents|skos:broader|Jeunesse -Peintre|skos:broader|Peinture -ESWC|skos:broader|Semantic Web conferences -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_author|Gorka Labaka -Synonym URIs|skos:broader|Linking Open Data -Bitcoin|skos:broader|Peer to peer -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|sl:arxiv_author|Francisco De Sousa Webber -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:tag|tag:language_model -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_author|Yuanzhi Li -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:tag|tag:semantic_search -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_firstAuthor|Ledell Wu -Origine de l'agriculture|skos:broader|Agriculture -Wolfram Language|skos:broader|Stephen Wolfram -Transnets|skos:broader|Journal Le Monde -ELMo|skos:broader|Word sense / Lexical ambiguity -Semantic Web : Application|skos:broader|Semantic Web -Pollueurs payeurs|skos:broader|Pollution -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|sl:arxiv_author|Ivan Titov -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:arxiv_author|Zaid Harchaoui -Domain Knowledge + Deep Learning|skos:broader|Deep Learning -SW in Technical Automotive Documentation|skos:broader|Technical documentation -OWL ontology|skos:broader|OWL -Richard Cyganiak|skos:broader|Technical girls and guys -Nazisme|skos:broader|Méchant -Continent de plastique|skos:broader|Plastic -Aidan Hogan|skos:broader|SW guys (and girls) -faiss|skos:broader|Nearest neighbor search -XTech|skos:broader|Conférences -Terres agricoles|skos:broader|Agriculture -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_author|Mikel Artetxe -Semantic CMS|skos:broader|CMS -SW Wiki|skos:broader|Semantic Web -Jeff Hawkins|skos:broader|AI girls and guys -Web server|skos:broader|Web Serving -Pierre Rebour|skos:broader|Ami -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:combinatorial_generalization -Encelade|skos:broader|Saturne -Uncertainty in Deep Learning|skos:broader|Deep Learning -Grève du sexe|skos:broader|Sexe -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:tag|tag:named_entity_recognition -Neural Bag of Words|skos:broader|NLP techniques -Neo4j|skos:broader|NOSQL -SemWeb Pro|skos:broader|Paris -Crète antique|skos:broader|Antiquité -Javascript tool|skos:broader|Dev tools -Economie allemande|skos:broader|Allemagne -Chirac|skos:broader|Politique française -Semantic web : Use cases|skos:broader|Semantic Web -ElasticSearch|skos:broader|Search Engines -AI teams|skos:broader|Artificial Intelligence -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:tag|tag:polynomial -BBC semantic publishing|skos:broader|Good -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:arxiv_author|Jack W. Rae -LOD|skos:broader|LD -Film américain|skos:broader|Film -ULMFiT|skos:broader|Pre-Trained Language Models -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:arxiv_author|Vered Shwartz -Chef d'état|skos:broader|Homme célèbre -Mallet|skos:broader|Java tool -Venus de Brassempouy|skos:broader|Arts premiers -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|sl:tag|tag:introduction -Chant|skos:broader|Musique -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:arxiv_author|James Philbin -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|sl:arxiv_author|Jang Hyun Cho -Kenya|skos:broader|Afrique de l'Est -JavaOne|skos:broader|Java -Combining text and structured data (ML-NLP)|skos:broader|NLP techniques -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:arxiv_author|Yu-Chiang Frank Wang -RDFa parser|skos:broader|RDFa -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:tag|tag:tomas_mikolov -Archéologie amazonienne|skos:broader|Civilisations précolombiennes -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:tag|tag:arxiv_doc -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:tag|tag:embeddings -Sequence labeling|skos:broader|Machine learning: problems -Fossile vivant|skos:broader|Biology -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:arxiv_author|Pengcheng He -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:tag|tag:arxiv_doc -Pubby|skos:broader|Linking Open Data -Crise de la dette publique grecque|skos:broader|Crise de la dette -Fulani|skos:broader|Peuples -RDF data visualization|skos:broader|Data visualisation -Rare words (NLP)|skos:broader|NLP tasks / problems -Machine learning|skos:broader|Data science -Mars Express|skos:broader|Mars 2004 -Mongol|skos:broader|Peuples -Jsonld/Jena|skos:broader|Jena -Jean Rouch|skos:broader|Réalisateur -Banco|skos:broader|Architecture en terre -XQuery|skos:broader|XML -A La Carte Embedding|skos:broader|Sanjeev Arora -KG Embeddings Library|skos:broader|Machine Learning library -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|Gabriel Pereyra -Multilevel models (also known as hierarchical linear models, nested data models, mixed models, random coefficient, random-effects models, random parameter models, or split-plot designs) are statistical models of parameters that vary at more than one level. An example could be a model of student performance that contains measures for individual students as well as measures for classrooms within which the students are grouped. These models can be seen as generalizations of linear models (in particular, linear regression), although they can also extend to non-linear models. |skos:broader|a statistical process for estimating the relationships among variables. -Semantic web company|skos:broader|Tech company -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_author|Andrej Risteski -Linking Enterprise Data|skos:broader|Semantic Enterprise -Armement|skos:broader|Militaire -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:tag|tag:arxiv_doc -Looks like everything (up to 2020-07-14) refers to this [github project](doc:2020/07/ukplab_sentence_transformers_s), [paper (Sentence-BERT)](doc:2019/08/_1908_10084_sentence_bert_sen)|skos:broader|In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. -SPARQL endpoint|skos:broader|SPARQL -Twine|skos:broader|Social software -Institutions européennes|skos:broader|Europe -Princeton|skos:broader|Universités américaines -Paris|skos:broader|Ville -Carl Lewis|skos:broader|Sportif -Data Scientists|skos:broader|Data science -Diacritics in URI|skos:broader|Diacritics -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Brian Strope -Japonais|skos:broader|Japon -Enterprise Knowledge Graph|skos:broader|Semantic Enterprise Architecture -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:arxiv_author|Naveen Suda -Baïkal|skos:broader|Eau -Smoothed Inverse Frequency: a linear representation of a sentence which is better than the simple average of the embeddings of its words 2 ideas: - assign to each word a weighting that depends on the frequency of the word it the corpus (reminiscent of TF-IDF) - some denoising (removing the component from the top singular direction) Todo (?): check implementation as a [sklearn Vectorizer](https://github.com/ChristophAlt/embedding_vectorizer) |skos:broader|In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. -Digital economy|skos:broader|Internet -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_firstAuthor|Jiaming Xu -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:arxiv_author|Pramod Viswanath -14 juillet|skos:broader|Révolution française -eClassOWL|skos:broader|Product description -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_firstAuthor|German I. Parisi -In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. |skos:broader|The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity -Origines de l'homme|skos:broader|Paléontologie humaine -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:arxiv_author|Prakhar Gupta -DSSM (Deep Semantic Similarity Model)|skos:broader|Siamese networks -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:path_queries -Antibiotiques|skos:broader|Médecine -Louis Jouvet|skos:broader|Acteur -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:nlp -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:arxiv_author|Oriol Vinyals -Tomcat tips|skos:broader|Tomcat -Validation: XML vs RDF|skos:broader|RDF vs XML -Coursera: Computational Neuroscience|skos:broader|Coursera -Fractales|skos:broader|Mathématiques -Guerres coloniales|skos:broader|War -Championnats du monde à Paris-Saint Denis, 2003|skos:broader|Paris -FBI|skos:broader|USA -Graph neural networks|skos:broader|Neural networks -PBS|skos:broader|Télévision -Songhaï|skos:broader|Afrique de l'Ouest -RDF performance issues|skos:broader|RDF -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:arxiv_firstAuthor|Diane Bouchacourt -Stemming|skos:broader|NLP techniques -Satellite images|skos:broader|Photo -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_author|Lea Helmers -Javascript tips|skos:broader|Dev tips -Neuroevolution|skos:broader|Evolutionary computation -paggr|skos:broader|Benjamin Nowack -Cambridge Analytica|skos:broader|Dark side of Tech -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:tag|tag:nlp_facebook -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:tag|tag:transfer_learning -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:arxiv_author|Oriol Vinyals -sig.ma|skos:broader|Giovanni Tummarello -URL|skos:broader|Dev -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:tag|tag:information_bottleneck_method -Anaconda|skos:broader|Python 4 Data science -SPARQL en javascript|skos:broader|JavaScript -Artificial Intelligence|skos:broader|Informatique -Uberisation|skos:broader|Uber -ADN mitochondrial|skos:broader|ADN -Zombie PCs|skos:broader|Cybersecurity Sécurité informatique -Alphago|skos:broader|Google -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_firstAuthor|Stephen H. Bach -Intent classification and slot filling|skos:broader|Intent detection -Serbie|skos:broader|Yougoslavie Ex Yougoslavie -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Rohun Saxena -Amérique latine|skos:broader|Amérique -Graph Convolutional Networks (GCNs) strike a balance between modeling the full structure of the graph dynamically, as the tensor model does, and modeling the local neighbourhood structure through extracted features (as substructure counting methods and RDF2Vec do). ([source](/doc/2019/08/the_knowledge_graph_as_the_defa)) |skos:broader|Feed-forward artificial neural network where the individual neurons are tiled in such a way that they respond to overlapping regions in the visual field. CNN use convolutions over the input layer to compute the output. Widely used models for image and video recognition. Main assumption: Data are compositional, they are formed of patterns that are: - Local - Stationary - Multi-scale (hierarchical) ConvNets leverage the compositionality structure: They extract compositional features and feed them to classifier, recommender, etc (end-to-end). -Automotive and web technologies|skos:broader|Automobile -Equitation|skos:broader|Cheval -France / Afrique|skos:broader|Afrique francophone -SPARQL: sample code|skos:broader|Sample code -iphone app|skos:broader|iphone -Scandale des écoutes en Allemagne|skos:broader|NSA spying scandal -Developer documentation|skos:broader|Documentation -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:tag|tag:arxiv_doc -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:knowledge_graph_deep_learning -Wikidata/RDF|skos:broader|RDF -LDA2vec|skos:broader|Word embeddings -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:tag|tag:arxiv_doc -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:tag|tag:machines_teaching_machines -Islamisme|skos:broader|Islam -rdfQuery|skos:broader|RDFa tool -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:tag|tag:arxiv_doc -ULMFiT|skos:broader|Contextualized word representations -Massacre de la Saint-Barthélemy|skos:broader|Crimes de l'église catholique -Jure Leskovec|skos:broader|AI girls and guys -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:tag|tag:gaussian_embedding -Çatalhöyük|skos:broader|Archéologie -Document embeddings|skos:broader|Embeddings in NLP -Juifs|skos:broader|Peuples -INRIA|skos:broader|Informatique -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:arxiv_author|Kai Chen -Frederick Giasson|skos:broader|Technical girls and guys -Productivité|skos:broader|Economie -JSON-LD|skos:broader|Linked Data -Word embeddings|skos:broader|NN 4 NLP -Semencier|skos:broader|Agriculture industrielle -Eau|skos:broader|Grands problèmes -Gates|skos:broader|Technical guys -Hongrie|skos:broader|Europe -Ruby on Rails|skos:broader|Ruby -Java in python|skos:broader|Python -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:arxiv_author|Urvashi Khandelwal -Sparse dictionary learning|skos:broader|Representation learning -GAN|skos:broader|Unsupervised machine learning -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Thomas Dean -Niger : pétrole|skos:broader|Niger -APIs and Linked Data|skos:broader|API -Vietnam|skos:broader|Asie -M3 Multi Media Museum|skos:broader|Musée -Libération|skos:broader|Liberté -Bosnie|skos:broader|Yougoslavie Ex Yougoslavie -Parenté à plaisanterie|skos:broader|Rigolo -Neo-fascites|skos:broader|Fascisme -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Benjamin Kompa -SproutCore|skos:broader|JavaScript librairies -Toyota|skos:broader|Entreprise -Intellectuel|skos:broader|Penseur -Ruby|skos:broader|Programming language -Ténéré|skos:broader|Sahara -Embeddings in Information Retrieval|skos:broader|Information retrieval -Active learning|skos:broader|Machine learning: techniques -Massively multiplayer online games|skos:broader|Jeux en ligne -Jeremy Howard|skos:broader|AI girls and guys -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:arxiv_doc -Artiste|skos:broader|Art -Explainable NLP|skos:broader|Deep NLP -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:arxiv_author|Rich Caruana -foaf+ssl|skos:broader|foaf -End-To-End Entity Linking|skos:broader|Entity linking -Locality-sensitive hashing (LSH) reduces the dimensionality of high-dimensional data. LSH hashes input items so that similar items map to the same “buckets” with high probability (the number of buckets being much smaller than the number of possible input items). LSH has much in common with data clustering and [#nearest neighbor search](nearest_neighbor_search). LSH employs random linear projections (followed by random thresholding) to map data points close in an Euclidean space to similar codes. See Also [#sparse distributed memory](/tag/sparse_distributed_memory), associative memory |skos:broader|Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines. -Alistair Miles|skos:broader|SW guys (and girls) -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |skos:broader|the machine learning task of inferring a function from labeled training data. -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:tag|tag:bert -Antimilitarisme|skos:broader|Militaire -Beethoven|skos:broader|Musicien -Bayesian classification|skos:broader|Machine learning: techniques -Crète antique|skos:broader|Grèce -Hackers|skos:broader|Informatique -Knowledge Engineering|skos:broader|Knowledge -Antonin Artaud|skos:broader|Ecrivain -Héctor Lavoe|skos:broader|Salsa -Restaurant|skos:broader|Gastronomie -VirtualBox|skos:broader|Oracle -Pubby|skos:broader|Chris Bizer -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Hongyuan Zha -Nick Clegg|skos:broader|Homme politique -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:arxiv_firstAuthor|Deepak Nathani -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:arxiv_author|Bhaskar Mitra -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_author|Christopher Kanan -Bruxelles|skos:broader|Belgique -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:arxiv_author|Mehwish Alam -Elevage porcin|skos:broader|Porc -Minting URIs|skos:broader|URI -OpenStructs|skos:broader|Frederick Giasson -W3C|skos:broader|Technologie -Mixture distribution|skos:broader|Statistics -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:tag|tag:arxiv_doc -Backpropagation vs Biology|skos:broader|Computational Neuroscience -Comète|skos:broader|Astéroïde -Mésopotamie|skos:broader|Irak -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Vinicius Zambaldi -Machine Learning + Semantic Web|skos:broader|Knowledge Graphs -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:arxiv_firstAuthor|Marco Tulio Ribeiro -L'Afrique à la Bastille - 13 juillet 2007|skos:broader|14 juillet -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:tag|tag:bi_lstm -Yougoslavie Ex Yougoslavie|skos:broader|Europe -Internet regulation|skos:broader|Internet -La Ronde de Nuit|skos:broader|Rembrandt -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:arxiv_author|Honglak Lee -Machine Learning: business|skos:broader|Machine learning -Elda|skos:broader|Jena -Croissance|skos:broader|Economie -Pillage du palais d'été|skos:broader|Histoire coloniale -data labeling is usually the bottleneck in developing NLP applications. Pbs of shifting contexts on social networks|skos:broader|the bottleneck of getting labeled training data -Financial Data|skos:broader|Finance -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_author|Cristóbal Esteban -Weapon of mass distraction|skos:broader|The web sucks -Attention in Graphs|skos:broader|Attention mechanism -Dosso|skos:broader|Jerma -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:tag|tag:pdf_extract -Vénus|skos:broader|Système solaire -Java 7|skos:broader|Java -Animal rights|skos:broader|Animal -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:dl_why_does_it_work -Economie de la gratuité|skos:broader|Economie -Climate crisis|skos:broader|Grands problèmes -KBPedia|skos:broader|Frederick Giasson -Keep new|skos:broader|To do -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:tag|tag:visually_rich_documents -Personal archives|skos:broader|Personal-information management -Djibouti|skos:broader|Afrique de l'Est -Coursera: Deep Learning|skos:broader|Machine Learning Course -Janis Joplin|skos:broader|Musicien -Cassandra|skos:broader|apache.org -Meta Reinforcement Learning|skos:broader|Reinforcement learning -OS X Unix|skos:broader|Unix -ELMo|skos:broader|Deep NLP -Cross-Modal Retrieval|skos:broader|Information retrieval -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:arxiv_author|Eric Nalisnick -Champignon|skos:broader|Biology -Pubby|skos:broader|Richard Cyganiak -Archéologie européenne|skos:broader|Archéologie -Mission \Voulet-Chanoine\|skos:broader|Horreur -VOAF|skos:broader|RDF Vocabularies -danbri|skos:broader|Web sémantique sw -Stanford POS Tagger|skos:broader|Part Of Speech Tagging -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:arxiv_firstAuthor|Luke Vilnis -TextBlob|skos:broader|NLTK -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:tag|tag:allen_institute_for_ai_a2i -Talis platform|skos:broader|Talis -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_author|Haisong Zhang -Microsoft Concept Graph|skos:broader|Microsoft Research -M3 Multi Media Museum|skos:broader|hyperSOLutions -La main à la pâte|skos:broader|Education -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:arxiv_author|Zico Kolter -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:tag|tag:consciousness_prior -Prohibition des narcotiques|skos:broader|Drogues -Sarkozy et la recherche|skos:broader|Recherche française -Origine de la vie|skos:broader|Evolution -Microsoft Concept Graph|skos:broader|NLU -Moussa Poussi|skos:broader|Musicien -Linked Data API|skos:broader|Linked Data -RDF Tools|skos:broader|Semantic Web : Tools -Training data|skos:broader|Machine learning -OwlSight|skos:broader|OWL ontology browser -GNU Octave|skos:broader|Programming language -Vaccin|skos:broader|Médecine -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:tag|tag:named_entity_recognition -Brain|skos:broader|Biology -BigQuery|skos:broader|Google Cloud -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:arxiv_firstAuthor|Maximilian Nickel -Tabulator|skos:broader|AJAR -Tagging|skos:broader|Semanlink related -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|sl:tag|tag:arxiv_doc -Biodiversité : effondrement|skos:broader|Crise écologique -A system for rapidly creating training sets with weak supervision The System for Programmatically Building and Managing Training Data|skos:broader|Noisy, limited, or imprecise sources are used to provide supervision signal for labeling large amounts of training data in a supervised learning setting Programmatic or otherwise more efficient but noisier ways of generating training label -JSON Visualization|skos:broader|JSON -Ontologies|skos:broader|Knowledge Representation -Débarquement|skos:broader|2eme guerre mondiale -Verts|skos:broader|Politique -FIBO|skos:broader|Ontologies -Slot tagging|skos:broader|Chatbots -Chirac|skos:broader|Homme politique -La France est un pays régicide|skos:broader|France -RapidMiner/Java|skos:broader|RapidMiner -CNN 4 NLP|skos:broader|NN 4 NLP -Semantic Media Wiki|skos:broader|Semantic Wiki -Siri|skos:broader|Apple Software -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:arxiv_doc -Explainable NLP|skos:broader|Explainable AI -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Adam Pearce -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Ekin D. Cubuk -OntoWiki|skos:broader|Semantic Web : Tools -Unsupervised deep pre-training|skos:broader|Pretrained models -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:arxiv_author|Hongyun Cai -Mac software|skos:broader|Software -Topic Models + Word embedding|skos:broader|Word embeddings -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:tag|tag:arxiv_doc -Devices|skos:broader|Informatique -Darfour|skos:broader|Génocide -Angela Merkel|skos:broader|Chef d'état -Barcamp|skos:broader|Event -Job title normalization|skos:broader|NLP + Human Resources -Wikileaks|skos:broader|Leaks -Syngenta|skos:broader|Suisse -Weak supervision|skos:broader|Machine learning: techniques -ARQ property functions|skos:broader|ARQ -Future Combat Systems|skos:broader|Armement -Hadoop|skos:broader|apache.org -Boucle ferroviaire d’Afrique de l’Ouest|skos:broader|Bolloré -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:tag|tag:multi_hop_reasonning -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:tag|tag:survey -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:tag|tag:graph_neural_networks -Energie solaire|skos:broader|Energies renouvelables -Semanlink2|skos:broader|Semanlink -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:recommender_systems -Masse manquante|skos:broader|Enigmes de la physique -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:arxiv_author|Arthur Szlam -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:arxiv_doc -Cocteau|skos:broader|Poète -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:arxiv_firstAuthor|Zhiqing Sun -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_firstAuthor|Pang Wei Koh -Installing WordPress|skos:broader|WordPress -NLP@IBM|skos:broader|NLP Teams -Censure et maltraitance animale|skos:broader|Censorship -Nuclear Power? No thanks|skos:broader|Industrie nucléaire -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Oriol Vinyals -Uncertainty Reasoning AND Semantic Web|skos:broader|Uncertainty Reasoning -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:arxiv_firstAuthor|Lei Jimmy Ba -Machine translation|skos:broader|Cross-lingual NLP -Recurrent neural network|skos:broader|Neural networks -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:tag|tag:arxiv_doc -gensim|skos:broader|Python-NLP -JsonLD + MongoDB|skos:broader|JSON-LD -NN / Symbolic AI hybridation|skos:broader|Artificial Intelligence -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|sl:arxiv_firstAuthor|Elena Voita -Denny Vrandečić|skos:broader|SW guys (and girls) -Livesearch|skos:broader|IHM web -Nasca|skos:broader|Civilisations précolombiennes -Apprentissage|skos:broader|Divers -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_author|Guillaume Lample -Cybersecurity Sécurité informatique|skos:broader|Security -Linear classifier|skos:broader|Statistical classification -Agriculture africaine|skos:broader|Agriculture -Voyager|skos:broader|Missions spatiales -KR|skos:broader|IA AI -Hydra|skos:broader|REST -Todo list|skos:broader|To do -Catholicisme|skos:broader|Religion -Coupe du monde de football|skos:broader|Football -Grounded Language Learning|skos:broader|NLP -Voiture électrique|skos:broader|Automotive -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_author|Matthew Hayes -Table-based Fact Verification|skos:broader|Fact verification -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:knowledge_distillation -Droit et internet|skos:broader|Internet -Hypercard|skos:broader|Apple Software -Spectral clustering|skos:broader|Dimensionality reduction -ElasticSearch: annotated text field|skos:broader|Entities -MPAA|skos:broader|Content industries -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:arxiv_author|Tomas Mikolov -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:tag|tag:arxiv_doc -Universités américaines|skos:broader|Enseignement supérieur -Text feature extraction|skos:broader|Text: dimension reduction -Protégé|skos:broader|Stanford -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_author|Jialong Han -HttpUnit|skos:broader|Web dev -SPARQL AND Jena|skos:broader|SPARQL -Multiagent AI|skos:broader|Artificial Intelligence -HDMI|skos:broader|DRM -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:google_brain -Civilisation élamite|skos:broader|Antiquité iranienne -Search Engines|skos:broader|Internet -each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the linking decisions. |skos:broader|= named entity disambiguation: the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base. -apache.org|skos:broader|Software -Network embeddings Representation Learning on Networks Graph representation learning Network Representation Learning|skos:broader|embedding -Mac OS X Web serving|skos:broader|Web Serving -the machine learning task of inferring a function from labeled training data.|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:arxiv_author|Austin Slakey -Servlet|skos:broader|Java -Journal Le Monde|skos:broader|Presse -Faille de sécurité|skos:broader|Cybersecurity Sécurité informatique -Drupal/RDF|skos:broader|Drupal -R2RML|skos:broader|Relational Databases and the Semantic Web -IBM Watson|skos:broader|Artificial Intelligence -Biotech industry|skos:broader|Biotechnologies Biotechnologies -Coal seam fire|skos:broader|Fire -gnizr|skos:broader|Tagging -Encyclopédie collaborative|skos:broader|Encyclopédie -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:tag|tag:acl_2019 -Extinction d'espèces|skos:broader|Biodiversité -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:arxiv_author|Arianna Bisazza -RDF-in-JSON|skos:broader|RDF -Erta Ale|skos:broader|Ethiopie -Ecrivain|skos:broader|Homme célèbre -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Zonghan Wu -Paludisme|skos:broader|Maladie -Fichage génétique|skos:broader|Etat policier -EigenVectors|skos:broader|Linear algebra -Rocard|skos:broader|Homme politique -WTP|skos:broader|Eclipse -Ubuntu|skos:broader|Linux -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:arxiv_author|Nick Craswell -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Andrea Tacchetti -Medical IR, ML, IA|skos:broader|IA/ML: domaines d'application -Disparition de langues vivantes|skos:broader|Langues vivantes -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Wenbin Jiang -Synthetic Genome|skos:broader|Artificial life -Obélisque d'Axoum|skos:broader|Pillage de vestiges antiques -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:tag|tag:arxiv_doc -Alan Kay|skos:broader|Technical girls and guys -Bush|skos:broader|Chef d'état -AI@Google|skos:broader|Artificial Intelligence -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_author|Haoran Li -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:arxiv_firstAuthor|Chih-Kuan Yeh -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:arxiv_author|Ole Winther -Micropayments on the web|skos:broader|Finance -Talis platform|skos:broader|Semantic Web Platform -Browser : back button|skos:broader|Web dev -AI, robots and jobs|skos:broader|Artificial Intelligence -RDF and database|skos:broader|Semantic Web: databases -Puceron|skos:broader|Insecte -Semanlink|skos:broader|RDF Application -Banlieue|skos:broader|Ville -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:tag|tag:generative_adversarial_network -Film allemand|skos:broader|Allemagne -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Jesse Thomason -\Self-Governing Neural Networks\|skos:broader| -Histoire de l'art|skos:broader|Art -Conscience|skos:broader|Brain -Peinture|skos:broader|Art -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:arxiv_firstAuthor|Amita Kamath -WWW 2009|skos:broader|TheWebConf -RDFa Lite|skos:broader|RDFa -explains the predictions of a classifier by learning an interpretable model locally around the prediction |skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -Protégé|skos:broader|Ontologies -Machine Learning + Semantic Web|skos:broader|Machine learning -Servlet|skos:broader|Java dev -Hierarchical temporal memory|skos:broader|Memory-prediction framework -Meta Content Framework|skos:broader|Guha -France : dysfonctionnement administratif|skos:broader|Administration française -Coursera: Web Intelligence and Big Data|skos:broader|Coursera -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:arxiv_author|Shaojie Bai -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:tag|tag:ml_evaluation -Semantic Enterprise|skos:broader|Semantic Web -Afrique Centrale|skos:broader|Afrique -Glyphosate|skos:broader|Monsanto -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:arxiv_author|Oyvind Tafjord -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:tag|tag:baidu -Keras Functional API|skos:broader|Keras -Mac OS X Web serving|skos:broader|Mac OS X -Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach|skos:broader|statistical model that relates a set of observable variables (manifest variables) to a set of latent variables A latent variable is one which is not directly observed but which is assumed to affect observed variables. Latent variable models therefore attempt to model the underlying structure of the observed variables, offering an explanation of the dependencies between observed variables which are then seen as conditionally independent, given the latent variable(s). ([source](https://supernlp.github.io/2018/11/10/emnlp-2018/)) -Map–territory relation|skos:broader|General semantics -www.topquadrant.com|skos:broader|Semantic Web : entreprise Semantic Web: enterprise -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_author|John Boaz Lee -Beijing Genomics Institute|skos:broader|Chine : technologie -DL: why does it work?|skos:broader|Deep Learning -Un ivrogne dans la brousse|skos:broader|Nigeria -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:arxiv_author|David M. Blei -Statistical relational learning|skos:broader|Machine learning: problems -Rome|skos:broader|Ville -Brésil|skos:broader|Amérique du sud -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:ray_kurzweil -David Beckett|skos:broader|SW guys (and girls) -Dictateur|skos:broader|Dictature -Dalai Lama|skos:broader|Boudhisme -NLU|skos:broader|NLP tasks / problems -VoIP|skos:broader|Téléphone -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:arxiv_firstAuthor|Florian Schroff -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:tag|tag:arxiv_doc -Trust in the Web of Data|skos:broader|Web of data -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:tag|tag:arxiv_doc -Capitalism Surveillance|skos:broader|Vive le capitalisme ! -Security and REST|skos:broader|Cybersecurity Sécurité informatique -gnowsis|skos:broader|Semantic Desktop -DRM|skos:broader|Propriété intellectuelle -Deep Learning|skos:broader|Neural networks -Niamey|skos:broader|Niger -Munich|skos:broader|Allemagne -SKOS|skos:broader|Semanlink related -Foxconn|skos:broader|Chine -Référentiel des opérations|skos:broader|APV evolution -Syrian Civil War|skos:broader|Syrie -Bertrand Russell|skos:broader|Prix Nobel -Symmetric matrices related to the Mertens function In this paper we explore a family of congruences over N from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. In this paper we explore a family of congruences over $\\N^\\ast$ from which one builds a sequence of symmetric matrices related to the Mertens function. From the results of numerical experiments, we formulate a conjecture about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may come to play a more important role in this classical and difficult problem.|sl:arxiv_author|Jean-Paul Cardinal -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:nlp_long_documents -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Yige Xu -Apache Mahout|skos:broader|Machine Learning tool -Plastic print|skos:broader|3D -limule|skos:broader|Curiosité naturelle -Language model|skos:broader|Unsupervised machine learning -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Gerard de Melo -Banque mondiale|skos:broader|Economie -Neural networks|skos:broader|Machine learning: techniques -Roy T. Fielding|skos:broader|Technical girls and guys -Accueil étranger|skos:broader|Cons de Français -Coursera: Machine Learning|skos:broader|Machine Learning Course -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|sl:arxiv_author|Chandramouli Shama Sastry -Londres|skos:broader|Ville -WWW 2007|skos:broader|TheWebConf -LDOW2008|skos:broader|LDOW -The Guardian|skos:broader|Presse -NLP@IBM|skos:broader|IBM -SPARQL 1.1|skos:broader|SPARQL -Perelman|skos:broader|Conjecture de Poincaré -Web tools|skos:broader|Dev -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:tag|tag:arxiv_doc -Facial Recognition|skos:broader|Image recognition -Zemanta|skos:broader|Semantic Web : Application -Yves Roth|skos:broader|Lycée Alain -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Dinesh Garg -Statistical machine translation|skos:broader|Machine translation -Semantic Camp Paris|skos:broader|Semantic Web -Publication scientifique|skos:broader|Science -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:knowledge_graph_completion -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_author|Jaap Kamps -Ghana|skos:broader|Afrique de l'Ouest -Cazuza|skos:broader|Musique -Backpropagation|skos:broader|Algorithmes -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_author|Sameer Singh -Information sur internet|skos:broader|Journalisme -Lord's Resistance Army|skos:broader|Horreur -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:arxiv_firstAuthor|Ziyi Yang -Périclès|skos:broader|Grèce antique -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Wei Wei -Sören Auer|skos:broader|SW guys (and girls) -Lingo|skos:broader|Carrot2 -ELMo|skos:broader|AllenNLP -Livesearch|skos:broader|Web dev -Madonna|skos:broader|Musicien -Windows Media Player|skos:broader|Windows -Scientifique|skos:broader|Homme célèbre -Wiki|skos:broader|Software -Linked Data Dev|skos:broader|Linked Data -Semantic Camp Paris|skos:broader|Barcamp -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:tag|tag:genetic_algorithm -Mongol|skos:broader|Asie -Yoshua Bengio|skos:broader|NLP girls and guys -Agriculture industrielle|skos:broader|Agriculture -Forward chaining|skos:broader|Entailment -Combining knowledge graphs|skos:broader|Multiple Knowledge Bases -Semantic markup in HTML|skos:broader|RDF -Hierarchical clustering|skos:broader|Clustering -BP|skos:broader|Compagnies pétrolières -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_firstAuthor|Sarath Chandar -Ajax|skos:broader|Asynchronous -RDF in files|skos:broader|RDF -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:knowledge_driven_embeddings -Documentation tool|skos:broader|Dev tools -Synthetic life|skos:broader|Synthetic biology -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:tag|tag:clustering_of_text_documents -Google Web Toolkit|skos:broader|Ajax -SKOS editor|skos:broader|Semanlink related -Epimorphics json-rdf|skos:broader|Epimorphics -Spark (Java web framework)|skos:broader|Microservices -RDF2VEC|skos:broader|RDF embeddings -Javascript RDF Parser|skos:broader|Javascript RDF -Tabulator|skos:broader|Tim Berners-Lee -Parrot|skos:broader|RDF-OWL documentation tool -Rio de Janeiro|skos:broader|Brésil -LDOW2012|skos:broader|WWW 2012 -JCS - Java Caching System|skos:broader|Cache -public-vocabs@w3.org|skos:broader|schema.org -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Antoine Bordes -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:tag|tag:wikidata -NSA spying scandal|skos:broader|Edward Snowden -Text Classification|skos:broader|NLP tasks / problems -Jérusalem|skos:broader|Israël -Wikidata query service|skos:broader|Wikidata -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Qi Ju -Sebastian Schaffert|skos:broader|SW guys (and girls) -RDF Schema|skos:broader|RDF -Chef d'état|skos:broader|Homme politique -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:kd_mkb_biblio -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:arxiv_author|Sylvain Gugger -iMovie|skos:broader|Mac software -Prolétaire|skos:broader|Prolétarisation -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:tag|tag:question_answering -Truffe|skos:broader|Champignon -Boko Haram|skos:broader|Nigeria -Unit test|skos:broader|Dev -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Luis C. Lamb -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:arxiv_author|Simon Thomas -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:arxiv_firstAuthor|Cheng Guo -Jazz|skos:broader|Musique -Minimum Description Length Principle|skos:broader|Information theory -RDF Parser|skos:broader|RDF Tools -Chirac|skos:broader|Cons de Français -Inde|skos:broader|Asie -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_firstAuthor|Fabio Petroni -Darfour|skos:broader|Soudan -Marseillaise|skos:broader|Hymne national -Acoustique musicale|skos:broader|Musique -Asie centrale|skos:broader|Asie -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:artificial_general_intelligence -Poisson|skos:broader|Animal -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|skos:broader|Intent classification: predicting the intent of a query -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:tag|tag:arxiv_doc -Françafrique|skos:broader|France / Afrique -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|sl:arxiv_author|Maximilian Lam -Explosions cosmiques|skos:broader|Rayons cosmiques -XTech 2006|skos:broader|XTech -Nebra Sky Disc|skos:broader|Âge du bronze -La communauté internationale est une garce|skos:broader|Communauté internationale -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:ludovic_denoyer -Sport de combat|skos:broader|Sport -NLP@Stanford|skos:broader|Stanford -Danny Ayers|skos:broader|SW guys (and girls) -Equivalence mining|skos:broader|Synonym URIs -intent classification intent detection|skos:broader|Moteur de recherche -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_author|Ryan A. Rossi -Baidu|skos:broader|Search Engines -Chine|skos:broader|Asie -Munich|skos:broader|Ville -Giovanni Tummarello|skos:broader|SW guys (and girls) -Loi sur le téléchargement|skos:broader|Droit et internet -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:sequence_to_sequence_learning -NLP sample code|skos:broader|NLP -Stardust|skos:broader|Missions spatiales -public-vocabs@w3.org|skos:broader|Web Schemas Task Force -fps blog|skos:broader|fps -OntoWiki|skos:broader|Linked Data publishing -Niger|skos:broader|Sahel -Pays d'Europe|skos:broader|Europe -Java Server Faces|skos:broader|Servlet -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:arxiv_author|Aixin Sun -Attali|skos:broader|Intellectuel -SKOS/OWL|skos:broader|SKOS -machine translation paradigm where translations are generated on the basis of statistical models whose parameters are derived from the analysis of bilingual text corpora. The statistical approach contrasts with the rule-based approaches to machine translation as well as with example-based machine translation |skos:broader|sub-field of computational linguistics that investigates the use of software to translate text or speech from one language to another -Lord of the Flies|skos:broader|Roman -Diacritics in URI|skos:broader|URI encoding -MOAT|skos:broader|Linked Data -Ecole|skos:broader|Education -Censure et maltraitance animale|skos:broader|Factory farming -Brain vs Deep Learning|skos:broader|Neuroscience AND Machine learning -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:arxiv_author|Dipanjan Das -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:tag|tag:arxiv_doc -Protégé|skos:broader|OWL tool -Extinction de masse|skos:broader|Catastrophe -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:tag|tag:matrix_calculus -France|skos:broader|Europe -The Limits to Growth|skos:broader|Economie -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:tag|tag:arxiv_doc -AI@Facebook|skos:broader|Facebook -Rapport Villani|skos:broader|IA AI -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:multitask_learning_in_nlp -Natural Language Semantic Search|skos:broader|Deep Learning -Class based language models|skos:broader|Language model -ML/NLP blog|skos:broader|Machine learning -Placentaires, marsupiaux et monotrèmes|skos:broader|Monotrèmes -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Catherine Wong -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:tag|tag:survey -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:tag|tag:starspace -OpenAI GPT|skos:broader|Pre-Trained Language Models -Termite|skos:broader|Insecte -Asie mineure|skos:broader|Turquie -Antimatière|skos:broader|Physique des particules -Obélisque d'Axoum|skos:broader|Mussolini -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_firstAuthor|Xipeng Qiu -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:tag|tag:arxiv_doc -Chris Bizer|skos:broader|Technical girls and guys -M3 Multi Media Museum|skos:broader|My old things -Coursera: NLP class|skos:broader|NLP@Stanford -Exploration marsienne|skos:broader|Mars -Text Summarization|skos:broader|NLP tasks / problems -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Tomas Pfister -Constraint Satisfaction Problem|skos:broader|Constraint Programming -Hadoop|skos:broader|Big Data -Feynman|skos:broader|Physicien -Education and Linked Data|skos:broader|Linked Data -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Noah Constant -TextRank|skos:broader|Keyword/keyphrase extraction -Tag cloud|skos:broader|Tagging -Data Publica|skos:broader|Data portal -Jena GRDDL Reader|skos:broader|Jena -blogmarks|skos:broader|Social bookmarking -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|David Weiss -Linked Data Browser|skos:broader|Linked Data -Galileo (spacecraft)|skos:broader|Jupiter -Symbiose|skos:broader|Biology -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:arxiv_author|Jingdong Wang -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:tag|tag:topic_embeddings -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:arxiv_firstAuthor|Ian Tenney -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:tag|tag:memory_embeddings -Néandertal|skos:broader|Origines de l'homme -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:these_irit_renault_biblio -Consciousness Prior|skos:broader|Conscience artificielle -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:tag|tag:multi_label_classification -Apache Spark|skos:broader|apache.org -Innoraise|skos:broader|Social Networks -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_author|Xiangnan He -Nok|skos:broader|Archéologie africaine -CSV|skos:broader|Tables +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|creationDate|2021-10-09 +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|tag|http://www.semanlink.net/tag/virtual_currency +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|tag|http://www.semanlink.net/tag/money +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|tag|http://www.semanlink.net/tag/abuse_of_power +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|tag|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|comment|> Central Banks Digital Currencies will ransom our future +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|title|Your Money and Your Life - by Edward Snowden - Continuing Ed — with Edward Snowden +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|bookmarkOf|https://edwardsnowden.substack.com/p/cbdcs +http://www.semanlink.net/doc/2021/10/your_money_and_your_life_by_e|creationTime|2021-10-09T15:03:07Z +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|creationDate|2020-07-03 +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|tag|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|tag|http://www.semanlink.net/tag/stanford_pos_tagger +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|tag|http://www.semanlink.net/tag/chrome_extension +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|title|dicksontsai/stanford-nlp-local-extension: Chrome extension for sending content to localhost server running Stanford NLP tools. +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|bookmarkOf|https://github.com/dicksontsai/stanford-nlp-local-extension +http://www.semanlink.net/doc/2020/07/dicksontsai_stanford_nlp_local_|creationTime|2020-07-03T17:44:02Z +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|creationDate|2021-03-12 +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|tag|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|comment|Not only for English but for 53 Languages +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|title|"Hugging Face sur Twitter : ""Fine-Tuning @facebookai's Wav2Vec2 for Speech Recognition is now possible in Transformers" +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|bookmarkOf|https://twitter.com/huggingface/status/1370401897702641664 +http://www.semanlink.net/doc/2021/03/hugging_face_sur_twitter_fin|creationTime|2021-03-12T18:44:46Z +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|creationDate|2021-02-16 +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|tag|http://www.semanlink.net/tag/pre_trained_models +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|comment|> To facilitate the usage of knowledge graph representations in semantic tasks, we provide a bunch of pre-trained embeddings for some common datasets. +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|title|Pre-trained Models GraphVite +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|bookmarkOf|https://graphvite.io/pretrained_models +http://www.semanlink.net/doc/2021/02/pre_trained_models_%7C_graphvite|creationTime|2021-02-16T20:28:37Z +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|creationDate|2021-08-16 +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|tag|http://www.semanlink.net/tag/war_on_drugs +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|tag|http://www.semanlink.net/tag/afghanistan +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|tag|http://www.semanlink.net/tag/taliban +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|title|"Jeffrey P. Clemens sur Twitter : ""An underrated mistake in US policy in Afghanistan was its long-running effort to suppress the cultivation of opium poppy and, in turn, the production of heroin and other opiates. A thread. 1/19"" / Twitter" +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|bookmarkOf|https://twitter.com/jeffreypclemens/status/1426930889271877633 +http://www.semanlink.net/doc/2021/08/jeffrey_p_clemens_sur_twitter_|creationTime|2021-08-16T09:49:23Z +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|creationDate|2020-08-02 +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|tag|http://www.semanlink.net/tag/probing_ml +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|tag|http://www.semanlink.net/tag/neural_network_interpretability +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|tag|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|tag|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|comment|> Probing turns supervised tasks into tools for interpreting representations. But the use of supervision leads to the question, did I interpret the representation? Or did my probe just learn the task itself? +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|title|Designing and Interpreting Probes · John Hewitt +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|bookmarkOf|https://nlp.stanford.edu/~johnhew/interpreting-probes.html +http://www.semanlink.net/doc/2020/08/designing_and_interpreting_prob|creationTime|2020-08-02T11:19:01Z +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|creationDate|2020-12-31 +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|tag|http://www.semanlink.net/tag/sense2vec +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|title|GitHub - explosion/sense2vec: Contextually-keyed word vectors +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|bookmarkOf|https://github.com/explosion/sense2vec +http://www.semanlink.net/doc/2020/12/github_explosion_sense2vec_c|creationTime|2020-12-31T10:14:41Z +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|creationDate|2021-05-29 +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|tag|http://www.semanlink.net/tag/city_states +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|title|The City-State in Five Cultures Department of History University of Washington +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|bookmarkOf|https://history.washington.edu/research/books/city-state-five-cultures +http://www.semanlink.net/doc/2021/05/the_city_state_in_five_cultures|creationTime|2021-05-29T00:50:24Z +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|creationDate|2021-09-30 +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|tag|http://www.semanlink.net/tag/emnlp_2021 +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_author|Jinhyuk Lee +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_author|Alexander Wettig +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_author|Danqi Chen +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|comment|[Github](doc:2021/09/princeton_nlp_densephrases_acl) +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|relatedDoc|http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|title|[2109.08133] Phrase Retrieval Learns Passage Retrieval, Too +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|bookmarkOf|https://arxiv.org/abs/2109.08133 +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|creationTime|2021-09-30T14:50:09Z +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_summary|"Dense retrieval methods have shown great promise over sparse retrieval +methods in a range of NLP problems. Among them, dense phrase retrieval-the most +fine-grained retrieval unit-is appealing because phrases can be directly used +as the output for question answering and slot filling tasks. In this work, we +follow the intuition that retrieving phrases naturally entails retrieving +larger text blocks and study whether phrase retrieval can serve as the basis +for coarse-level retrieval including passages and documents. We first observe +that a dense phrase-retrieval system, without any retraining, already achieves +better passage retrieval accuracy (+3-5% in top-5 accuracy) compared to passage +retrievers, which also helps achieve superior end-to-end QA performance with +fewer passages. Then, we provide an interpretation for why phrase-level +supervision helps learn better fine-grained entailment compared to +passage-level supervision, and also show that phrase retrieval can be improved +to achieve competitive performance in document-retrieval tasks such as entity +linking and knowledge-grounded dialogue. Finally, we demonstrate how phrase +filtering and vector quantization can reduce the size of our index by 4-10x, +making dense phrase retrieval a practical and versatile solution in +multi-granularity retrieval." +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_firstAuthor|Jinhyuk Lee +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_updated|2021-09-16T17:42:45Z +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_title|Phrase Retrieval Learns Passage Retrieval, Too +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_published|2021-09-16T17:42:45Z +http://www.semanlink.net/doc/2021/09/2109_08133_phrase_retrieval_l|arxiv_num|2109.08133 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|creationDate|2021-04-17 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|title|"Nils Reimers sur Twitter : ""New models for Neural Information Retrieval...""" +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|bookmarkOf|https://twitter.com/Nils_Reimers/status/1383032154276388864?s=20 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_new|creationTime|2021-04-17T10:07:14Z +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|creationDate|2020-11-05 +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|tag|http://www.semanlink.net/tag/epigenetics +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|tag|http://www.semanlink.net/tag/euphrasie +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|tag|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|title|Evolutionary & Physiological Ecology Ruuskanen Group +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|bookmarkOf|https://sites.utu.fi/ruuskanengroup/ +http://www.semanlink.net/doc/2020/11/evolutionary_physiological_ec|creationTime|2020-11-05T00:25:22Z +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|creationDate|2021-03-09 +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|title|"Rodrigo Nogueira sur Twitter : ""Slides of our WSDM 2021 tutorial ""Pretrained Transformers for Text Ranking: BERT and Beyond""" +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|bookmarkOf|https://twitter.com/rodrigfnogueira/status/1368943696750796802?s=20 +http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_|creationTime|2021-03-09T08:09:28Z +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|creationDate|2020-11-14 +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|tag|http://www.semanlink.net/tag/uriburner_com +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|tag|http://www.semanlink.net/tag/diffbot +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|comment|[UriBurner example](https://linkeddata.uriburner.com/about/html/https://linkeddata.uriburner.com/about/id/entity/https/www.technologyreview.com/2020/11/12/1011944/artificial-intelligence-replication-crisis-science-big-tech-google-deepmind-facebook-openai/) +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|title|"Kingsley Uyi Idehen sur Twitter : ""When I read this & other articles, I leverage our @datasniff browser ext. for highlighting key terms;" +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|bookmarkOf|https://twitter.com/kidehen/status/1327396397721772034 +http://www.semanlink.net/doc/2020/11/kingsley_uyi_idehen_sur_twitter|creationTime|2020-11-14T09:05:33Z +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|creationDate|2020-06-13 +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|tag|http://www.semanlink.net/tag/kurdes +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|tag|http://www.semanlink.net/tag/film_turc +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|comment|"Film de Yılmaz Güney + +> l'extinction du monde nomade au contact de la civilisation urbaine" +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|title|Le Troupeau (film) +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|bookmarkOf|https://fr.wikipedia.org/wiki/Le_Troupeau +http://www.semanlink.net/doc/2020/06/le_troupeau_film_|creationTime|2020-06-13T19:16:33Z +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|creationDate|2021-08-06 +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/nlp_reading_comprehension +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_author|Matt Gardner +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_author|Anna Rogers +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_author|Isabelle Augenstein +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|comment|recommandé par [Sebastian Ruder](tag:sebastian_ruder) +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|title|[2107.12708] QA Dataset Explosion: A Taxonomy of NLP Resources for Question Answering and Reading Comprehension +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|bookmarkOf|https://arxiv.org/abs/2107.12708 +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|creationTime|2021-08-06T22:01:16Z +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_summary|"Alongside huge volumes of research on deep learning models in NLP in the +recent years, there has been also much work on benchmark datasets needed to +track modeling progress. Question answering and reading comprehension have been +particularly prolific in this regard, with over 80 new datasets appearing in +the past two years. This study is the largest survey of the field to date. We +provide an overview of the various formats and domains of the current +resources, highlighting the current lacunae for future work. We further discuss +the current classifications of ``reasoning types"" in question answering and +propose a new taxonomy. We also discuss the implications of over-focusing on +English, and survey the current monolingual resources for other languages and +multilingual resources. The study is aimed at both practitioners looking for +pointers to the wealth of existing data, and at researchers working on new +resources." +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_firstAuthor|Anna Rogers +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_updated|2021-07-27T10:09:13Z +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_title|QA Dataset Explosion: A Taxonomy of NLP Resources for Question Answering and Reading Comprehension +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_published|2021-07-27T10:09:13Z +http://www.semanlink.net/doc/2021/08/2107_12708_qa_dataset_explosi|arxiv_num|2107.12708 +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|creationDate|2020-10-04 +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|comment|A guide to choosing and benchmarking BERT models for question answering +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|title|Which flavor of BERT should you use for your QA task? by Olesya Bondarenko Towards Data Science +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|bookmarkOf|https://towardsdatascience.com/which-flavor-of-bert-should-you-use-for-your-qa-task-6d6a0897fb24 +http://www.semanlink.net/doc/2020/10/which_flavor_of_bert_should_you|creationTime|2020-10-04T23:31:57Z +http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res|creationDate|2021-06-02 +http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res|tag|http://www.semanlink.net/tag/lingo +http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res|title|Improving Quality of Search Results Clustering with Approximate Matrix Factorisations (2006) +http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res|bookmarkOf|https://www.researchgate.net/publication/221397444_Improving_Quality_of_Search_Results_Clustering_with_Approximate_Matrix_Factorisations +http://www.semanlink.net/doc/2021/06/improving_quality_of_search_res|creationTime|2021-06-02T01:31:21Z +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|creationDate|2020-09-10 +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|comment|> TL;DR: [Graph neural networks](tag:graph_neural_networks) exploit [relational inductive biases](tag:relational_inductive_biases) for data that come in the form of a graph. However, **in many cases we do not have the graph readily available. Can graph deep learning still be applied in this case?** In this post, I draw parallels between recent works on latent graph learning and older techniques of manifold learning. +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|title|Latent graph neural networks: Manifold learning 2.0? by Michael Bronstein Sep, 2020 +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|bookmarkOf|https://medium.com/@michael.bronstein/manifold-learning-2-99a25eeb677d +http://www.semanlink.net/doc/2020/09/latent_graph_neural_networks_m|creationTime|2020-09-10T14:28:10Z +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|creationDate|2021-06-28 +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|tag|http://www.semanlink.net/tag/seyni_kountche +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|comment|Bonkano +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|title|L'âme damnée du président Kountché (1983) +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|bookmarkOf|https://www.lemonde.fr/archives/article/1983/10/17/l-ame-damnee-du-president-kountche_2833883_1819218.html +http://www.semanlink.net/doc/2021/06/l_ame_damnee_du_president_kount|creationTime|2021-06-28T19:38:47Z +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|creationDate|2021-07-09 +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/nlp_datasets +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/benchmark +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_author|Nandan Thakur +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_author|Nils Reimers +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_author|Andreas Rücklé +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_author|Abhishek Srivastava +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_author|Iryna Gurevych +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|comment|"[GitHub](doc:2021/07/ukplab_beir_a_heterogeneous_be) + +> Our results show **BM25 is a robust baseline** +and Reranking-based models overall achieve +the best zero-shot performances, however, at +high computational costs. In contrast, **Denseretrieval +models are computationally more efficient +but often underperform other approaches** + +17 English evaluation datasets, 9 heterogeneous tasks (Non-English left for future work)" +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|relatedDoc|http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|title|[2104.08663] BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|bookmarkOf|https://arxiv.org/abs/2104.08663 +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|creationTime|2021-07-09T12:36:38Z +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_summary|"Neural IR models have often been studied in homogeneous and narrow settings, +which has considerably limited insights into their generalization capabilities. +To address this, and to allow researchers to more broadly establish the +effectiveness of their models, we introduce BEIR (Benchmarking IR), a +heterogeneous benchmark for information retrieval. We leverage a careful +selection of 17 datasets for evaluation spanning diverse retrieval tasks +including open-domain datasets as well as narrow expert domains. We study the +effectiveness of nine state-of-the-art retrieval models in a zero-shot +evaluation setup on BEIR, finding that performing well consistently across all +datasets is challenging. Our results show BM25 is a robust baseline and +Reranking-based models overall achieve the best zero-shot performances, +however, at high computational costs. In contrast, Dense-retrieval models are +computationally more efficient but often underperform other approaches, +highlighting the considerable room for improvement in their generalization +capabilities. In this work, we extensively analyze different retrieval models +and provide several suggestions that we believe may be useful for future work. +BEIR datasets and code are available at https://github.com/UKPLab/beir." +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_firstAuthor|Nandan Thakur +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_updated|2021-04-28T13:59:17Z +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_title|BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_published|2021-04-17T23:29:55Z +http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno|arxiv_num|2104.08663 +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|creationDate|2020-12-28 +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|tag|http://www.semanlink.net/tag/peinture_rupestre +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|tag|http://www.semanlink.net/tag/colombie +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|tag|http://www.semanlink.net/tag/archeologie_amazonienne +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|comment|"""la chapelle sixtine amazonienne""" +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|title|Serranía de La Lindosa +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|bookmarkOf|https://fr.wikipedia.org/wiki/Serran%C3%ADa_de_La_Lindosa#Arch%C3%A9ologie +http://www.semanlink.net/doc/2020/12/serrania_de_la_lindosa|creationTime|2020-12-28T19:15:56Z +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|creationDate|2020-10-01 +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|tag|http://www.semanlink.net/tag/patrick_gallinari +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|tag|http://www.semanlink.net/tag/ner_unseen_mentions +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_author|Bruno Taillé +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_author|Vincent Guigue +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_author|Patrick Gallinari +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|comment|> In this paper, we quantify the impact of ELMo, Flair and BERT representations on generalization to unseen mentions and new domains in NER. +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|title|[2001.08053] Contextualized Embeddings in Named-Entity Recognition: An Empirical Study on Generalization +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|bookmarkOf|https://arxiv.org/abs/2001.08053 +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|creationTime|2020-10-01T11:43:28Z +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_summary|"Contextualized embeddings use unsupervised language model pretraining to +compute word representations depending on their context. This is intuitively +useful for generalization, especially in Named-Entity Recognition where it is +crucial to detect mentions never seen during training. However, standard +English benchmarks overestimate the importance of lexical over contextual +features because of an unrealistic lexical overlap between train and test +mentions. In this paper, we perform an empirical analysis of the generalization +capabilities of state-of-the-art contextualized embeddings by separating +mentions by novelty and with out-of-domain evaluation. We show that they are +particularly beneficial for unseen mentions detection, especially +out-of-domain. For models trained on CoNLL03, language model contextualization +leads to a +1.2% maximal relative micro-F1 score increase in-domain against ++13% out-of-domain on the WNUT dataset" +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_firstAuthor|Bruno Taillé +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_updated|2020-01-22T15:15:34Z +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_title|Contextualized Embeddings in Named-Entity Recognition: An Empirical Study on Generalization +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_published|2020-01-22T15:15:34Z +http://www.semanlink.net/doc/2020/10/2001_08053_contextualized_emb|arxiv_num|2001.08053 +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|creationDate|2020-09-16 +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|tag|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|tag|http://www.semanlink.net/tag/whistleblower +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|tag|http://www.semanlink.net/tag/manipulations_politiques +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|title|Whistleblower Says Facebook Ignored Global Political Manipulation +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|bookmarkOf|https://www.buzzfeednews.com/article/craigsilverman/facebook-ignore-political-manipulation-whistleblower-memo +http://www.semanlink.net/doc/2020/09/whistleblower_says_facebook_ign|creationTime|2020-09-16T13:36:54Z +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|creationDate|2020-08-21 +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|tag|http://www.semanlink.net/tag/knowledge_graph_construction +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|tag|http://www.semanlink.net/tag/virtual_knowledge_graph +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_author|Tianxing He +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_author|James Glass +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_author|Seunghak Yu +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|comment|"Building a virtual KG from unstructured documents + +> we first extract knowledge tuples in their surface form from unstructured documents, encode them using a pre-trained language model, and link the surface-entities via the encoding to form the graph structure." +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|title|[2008.08995] Constructing a Knowledge Graph from Unstructured Documents without External Alignment +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|bookmarkOf|https://arxiv.org/abs/2008.08995 +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|creationTime|2020-08-21T18:38:32Z +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_summary|"Knowledge graphs (KGs) are relevant to many NLP tasks, but building a +reliable domain-specific KG is time-consuming and expensive. A number of +methods for constructing KGs with minimized human intervention have been +proposed, but still require a process to align into the human-annotated +knowledge base. To overcome this issue, we propose a novel method to +automatically construct a KG from unstructured documents that does not require +external alignment and explore its use to extract desired information. To +summarize our approach, we first extract knowledge tuples in their surface form +from unstructured documents, encode them using a pre-trained language model, +and link the surface-entities via the encoding to form the graph structure. We +perform experiments with benchmark datasets such as WikiMovies and MetaQA. The +experimental results show that our method can successfully create and search a +KG with 18K documents and achieve 69.7% hits@10 (close to an oracle model) on a +query retrieval task." +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_firstAuthor|Seunghak Yu +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_updated|2020-08-20T14:30:33Z +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_title|Constructing a Knowledge Graph from Unstructured Documents without External Alignment +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_published|2020-08-20T14:30:33Z +http://www.semanlink.net/doc/2020/08/2008_08995_constructing_a_kno|arxiv_num|2008.08995 +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|creationDate|2021-01-23 +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|comment|"> In the illustration you can see: +> - The quadratic cost with respect to the sequence length. +> - Size of queries have to be equal of size of keys (because this is dot product attention). Size of values can be different." +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|title|"Javier Abellán sur Twitter : ""Tired of not fully understanding the Attention of the Transformer? I've made this illustration for you.""" +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|bookmarkOf|https://twitter.com/javier_a2/status/1352702338474110976 +http://www.semanlink.net/doc/2021/01/javier_abellan_sur_twitter_t|creationTime|2021-01-23T13:11:28Z +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|creationDate|2020-06-15 +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/acl_2020 +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|tag|http://www.semanlink.net/tag/templatic_documents +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|comment|"> a novel approach using representation learning for tackling the problem of **extracting structured information from form-like document images**. We propose an **extraction system that uses knowledge of the types of the target fields to generate extraction candidates**, and a neural network architecture that learns a dense representation of each candidate based on neighboring words in the document. + +[Blog post](doc:2020/06/google_ai_blog_extracting_stru)" +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|relatedDoc|http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|title|Representation Learning for Information Extraction from Form-like Documents – Google Research +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|bookmarkOf|https://research.google/pubs/pub49122/ +http://www.semanlink.net/doc/2020/06/representation_learning_for_inf|creationTime|2020-06-15T22:58:48Z +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|creationDate|2020-10-11 +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|tag|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|tag|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|comment|"> TAGME is a powerful tool that is able to identify on-the-fly meaningful short-phrases (called ""spots"") in an unstructured text and link them to a pertinent Wikipedia page in a fast and effective way." +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|title|TAGME: on-the-fly annotation of short text fragments! +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|bookmarkOf|https://tagme.d4science.org/tagme/ +http://www.semanlink.net/doc/2020/10/tagme_on_the_fly_annotation_of|creationTime|2020-10-11T02:11:40Z +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|creationDate|2021-03-09 +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|tag|http://www.semanlink.net/tag/nlp_data_anonymization +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|title|"Christopher Dengsø sur Twitter : ""The moderation API now detects addresses in addition to other personal details.""" +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|bookmarkOf|https://twitter.com/chrisdengso/status/1369171580106928130?s=20 +http://www.semanlink.net/doc/2021/03/christopher_dengs%C3%B8_sur_twitter_|creationTime|2021-03-09T08:08:07Z +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|creationDate|2021-08-25 +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Vukosi Marivate +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Orevaoghene Ahia +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Espoir Murhabazi +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Mofe Adeyemi +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Goodness Duru +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Masabata Mokgesi-Selinga +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Abdallah Bashir +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Tshinondiwa Matsila +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Shamsuddeen Hassan Muhammad +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Ricky Macharm +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Blessing Sibanda +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Kevin Degila +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Iroro Orife +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Julia Kreutzer +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Salomon Kabongo +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Solomon Oluwole Akinola +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Ignatius Ezeani +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Blessing Itoro Bassey +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Tajudeen Kolawole +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Lawrence Okegbemi +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Sackey Freshia +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Rubungo Andre Niyongabo +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Timi Fasubaa +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Wilhelmina Nekoto +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Ayodele Olabiyi +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Perez Ogayo +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Bonaventure Dossou +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Kolawole Tajudeen +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Alp Öktem +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Laura Jane Martinus +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Kathleen Siminyu +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Ghollah Kioko +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Musie Meressa +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Jade Abbott +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Taiwo Fagbohungbe +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Herman Kamper +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Christopher Onyefuluchi +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Daniel Whitenack +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Hady Elsahar +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Kelechi Ogueji +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Elan van Biljon +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Jason Webster +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Idris Abdulkabir Dangana +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Arshath Ramkilowan +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Salomey Osei +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Jamiil Toure Ali +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Adewale Akinfaderin +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_author|Chris Emezue +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|comment|about machine translation using parallel corpora only +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|title|[2010.02353] Participatory Research for Low-resourced Machine Translation: A Case Study in African Languages +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|bookmarkOf|https://arxiv.org/abs/2010.02353 +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|creationTime|2021-08-25T17:01:12Z +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_summary|"Research in NLP lacks geographic diversity, and the question of how NLP can +be scaled to low-resourced languages has not yet been adequately solved. +""Low-resourced""-ness is a complex problem going beyond data availability and +reflects systemic problems in society. In this paper, we focus on the task of +Machine Translation (MT), that plays a crucial role for information +accessibility and communication worldwide. Despite immense improvements in MT +over the past decade, MT is centered around a few high-resourced languages. As +MT researchers cannot solve the problem of low-resourcedness alone, we propose +participatory research as a means to involve all necessary agents required in +the MT development process. We demonstrate the feasibility and scalability of +participatory research with a case study on MT for African languages. Its +implementation leads to a collection of novel translation datasets, MT +benchmarks for over 30 languages, with human evaluations for a third of them, +and enables participants without formal training to make a unique scientific +contribution. Benchmarks, models, data, code, and evaluation results are +released under https://github.com/masakhane-io/masakhane-mt." +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_firstAuthor|Wilhelmina Nekoto +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_updated|2020-11-06T23:30:45Z +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_title|Participatory Research for Low-resourced Machine Translation: A Case Study in African Languages +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_published|2020-10-05T21:50:38Z +http://www.semanlink.net/doc/2021/08/2010_02353_participatory_rese|arxiv_num|2010.02353 +http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se|creationDate|2020-12-02 +http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se|title|How to improve Elasticsearch search relevance with boolean queries Elastic Blog +http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se|bookmarkOf|https://www.elastic.co/blog/how-to-improve-elasticsearch-search-relevance-with-boolean-queries +http://www.semanlink.net/doc/2020/12/how_to_improve_elasticsearch_se|creationTime|2020-12-02T14:05:46Z +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|creationDate|2021-05-17 +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|tag|http://www.semanlink.net/tag/entity_type_representation +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_author|Bo Li +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_author|Jingyang Li +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_author|Guanglin Niu +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_author|Shiliang Pu +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_author|Yongfei Zhang +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|comment|head_type +relation = tail_type (Hum, mais pour une relation entre 2 entités de même type ?) +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|title|[2009.12030] AutoETER: Automated Entity Type Representation for Knowledge Graph Embedding +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|bookmarkOf|https://arxiv.org/abs/2009.12030 +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|creationTime|2021-05-17T16:47:20Z +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_summary|"Recent advances in Knowledge Graph Embedding (KGE) allow for representing +entities and relations in continuous vector spaces. Some traditional KGE models +leveraging additional type information can improve the representation of +entities which however totally rely on the explicit types or neglect the +diverse type representations specific to various relations. Besides, none of +the existing methods is capable of inferring all the relation patterns of +symmetry, inversion and composition as well as the complex properties of 1-N, +N-1 and N-N relations, simultaneously. To explore the type information for any +KG, we develop a novel KGE framework with Automated Entity TypE Representation +(AutoETER), which learns the latent type embedding of each entity by regarding +each relation as a translation operation between the types of two entities with +a relation-aware projection mechanism. Particularly, our designed automated +type representation learning mechanism is a pluggable module which can be +easily incorporated with any KGE model. Besides, our approach could model and +infer all the relation patterns and complex relations. Experiments on four +datasets demonstrate the superior performance of our model compared to +state-of-the-art baselines on link prediction tasks, and the visualization of +type clustering provides clearly the explanation of type embeddings and +verifies the effectiveness of our model." +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_firstAuthor|Guanglin Niu +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_updated|2020-10-06T13:52:59Z +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_title|AutoETER: Automated Entity Type Representation for Knowledge Graph Embedding +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_published|2020-09-25T04:27:35Z +http://www.semanlink.net/doc/2021/05/2009_12030_autoeter_automate|arxiv_num|2009.12030 +http://www.semanlink.net/doc/2020/07/sandstorm|creationDate|2020-07-18 +http://www.semanlink.net/doc/2020/07/sandstorm|tag|http://www.semanlink.net/tag/web +http://www.semanlink.net/doc/2020/07/sandstorm|tag|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/doc/2020/07/sandstorm|tag|http://www.semanlink.net/tag/tagged +http://www.semanlink.net/doc/2020/07/sandstorm|comment|Self-host web-based productivity apps easily and securely. Sandstorm is an open source project built by a community of volunteers with the goal of making it really easy to run open source web applications +http://www.semanlink.net/doc/2020/07/sandstorm|title|Sandstorm +http://www.semanlink.net/doc/2020/07/sandstorm|bookmarkOf|https://sandstorm.io/ +http://www.semanlink.net/doc/2020/07/sandstorm|creationTime|2020-07-18T13:31:12Z +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|creationDate|2021-06-04 +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|tag|http://www.semanlink.net/tag/lilian_weng +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|tag|http://www.semanlink.net/tag/contrastive_learning +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|title|Contrastive Representation Learning +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|bookmarkOf|https://lilianweng.github.io/lil-log/2021/05/31/contrastive-representation-learning.html#language-sentence-embedding +http://www.semanlink.net/doc/2021/06/contrastive_representation_lear|creationTime|2021-06-04T19:59:37Z +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|creationDate|2020-12-16 +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|tag|http://www.semanlink.net/tag/chine_leadership +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|tag|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|tag|http://www.semanlink.net/tag/lune +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|tag|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|title|Retour sur Terre de Chang’e-5, une sonde spatiale chinoise transportant des échantillons lunaires +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/12/16/retour-sur-terre-de-chang-e-5-une-sonde-spatiale-chinoise-transportant-des-echantillons-lunaires_6063651_1650684.html +http://www.semanlink.net/doc/2020/12/retour_sur_terre_de_chang%E2%80%99e_5_|creationTime|2020-12-16T23:02:41Z +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|creationDate|2021-02-11 +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|tag|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|comment|"> Huggingface Transformers recently added the Retrieval Augmented Generation (RAG) model, a new NLP architecture that leverages external documents (like Wikipedia) to augment its knowledge and achieve state of the art results on knowledge-intensive tasks + +[Hugging Face sur Twitter : ""Transformers release of the Retrieval-Augmented Generation model in collaboration with @facebookai!""](doc:2021/02/hugging_face_sur_twitter_tra)" +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|relatedDoc|http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|title|Retrieval Augmented Generation with Huggingface Transformers and Ray Distributed Computing with Ray +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|bookmarkOf|https://medium.com/distributed-computing-with-ray/retrieval-augmented-generation-with-huggingface-transformers-and-ray-b09b56161b1e +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|bookmarkOf|https://huggingface.co/blog/ray-rag +http://www.semanlink.net/doc/2021/02/retrieval_augmented_generation_|creationTime|2021-02-11T08:49:32Z +http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi|creationDate|2020-10-05 +http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi|title|How to extract text from PDF files - dida Machine Learning +http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi|bookmarkOf|https://dida.do/blog/how-to-extract-text-from-pdf +http://www.semanlink.net/doc/2020/10/how_to_extract_text_from_pdf_fi|creationTime|2020-10-05T09:36:52Z +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|creationDate|2020-11-07 +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|comment|"[Comments](https://news.ycombinator.com/item?id=24995696) + +cf [Karpathy](tag:andrej_karpathy)'s [Unreasonable Effectiveness of Recurrent Neural Networks](doc:?uri=http%3A%2F%2Fkarpathy.github.io%2F2015%2F05%2F21%2Frnn-effectiveness%2F)" +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|relatedDoc|http://karpathy.github.io/2015/05/21/rnn-effectiveness/ +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|title|The Unreasonable Syntactic Expressivity of RNNs · John Hewitt +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|bookmarkOf|https://nlp.stanford.edu/~johnhew/rnns-hierarchy.html +http://www.semanlink.net/doc/2020/11/the_unreasonable_syntactic_expr|creationTime|2020-11-07T09:44:56Z +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|creationDate|2021-05-20 +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|tag|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_author|Evangelos Milios +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_author|Norbert Zeh +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_author|Md Rashadul Hasan Rakib +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_author|Magdalena Jankowska +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|comment|> Given a clustering of short texts obtained using an arbitrary clustering algorithm, iterative classification applies outlier removal to obtain outlier-free clusters. Then it trains a classification algorithm using the non-outliers based on their cluster distributions. Using the trained classification model, iterative classification reclassifies the outliers to obtain a new set of clusters. +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|title|[2001.11631] Enhancement of Short Text Clustering by Iterative Classification +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|bookmarkOf|https://arxiv.org/abs/2001.11631 +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|creationTime|2021-05-20T17:59:46Z +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_summary|"Short text clustering is a challenging task due to the lack of signal +contained in such short texts. In this work, we propose iterative +classification as a method to b o ost the clustering quality (e.g., accuracy) +of short texts. Given a clustering of short texts obtained using an arbitrary +clustering algorithm, iterative classification applies outlier removal to +obtain outlier-free clusters. Then it trains a classification algorithm using +the non-outliers based on their cluster distributions. Using the trained +classification model, iterative classification reclassifies the outliers to +obtain a new set of clusters. By repeating this several times, we obtain a much +improved clustering of texts. Our experimental results show that the proposed +clustering enhancement method not only improves the clustering quality of +different clustering methods (e.g., k-means, k-means--, and hierarchical +clustering) but also outperforms the state-of-the-art short text clustering +methods on several short text datasets by a statistically significant margin." +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_firstAuthor|Md Rashadul Hasan Rakib +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_updated|2020-01-31T02:12:05Z +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_title|Enhancement of Short Text Clustering by Iterative Classification +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_published|2020-01-31T02:12:05Z +http://www.semanlink.net/doc/2021/05/2001_11631_enhancement_of_sho|arxiv_num|2001.11631 +http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia|creationDate|2020-10-14 +http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia|tag|http://www.semanlink.net/tag/economiste +http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia|title|Philippe Aghion — Wikipédia +http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia|bookmarkOf|https://fr.wikipedia.org/wiki/Philippe_Aghion +http://www.semanlink.net/doc/2020/10/philippe_aghion_wikipedia|creationTime|2020-10-14T00:21:10Z +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|creationDate|2021-08-11 +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|tag|http://www.semanlink.net/tag/cool +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|tag|http://www.semanlink.net/tag/demo +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|tag|http://www.semanlink.net/tag/peinture +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|comment|> Gradio demo for Paint Transformer: Feed Forward Neural Painting with Stroke Prediction. +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|title|PaintTransformer - a Hugging Face Space by akhaliq +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|bookmarkOf|https://huggingface.co/spaces/akhaliq/PaintTransformer +http://www.semanlink.net/doc/2021/08/painttransformer_a_hugging_fa|creationTime|2021-08-11T12:39:46Z +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|creationDate|2021-03-25 +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|tag|http://www.semanlink.net/tag/ranking +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|tag|http://www.semanlink.net/tag/sorting +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|title|"Teddy Koker sur Twitter : ""Torchsort, an implementation of ""Fast Differentiable Sorting and Ranking"" in PyTorch""" +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|bookmarkOf|https://twitter.com/teddykoker/status/1374230481596743680 +http://www.semanlink.net/doc/2021/03/teddy_koker_sur_twitter_torc|creationTime|2021-03-25T17:24:25Z +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/haoussa +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|tag|http://www.semanlink.net/tag/songhai +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_author|Chantal Enguehard LINA +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_author|Mathieu Mangeot LIG +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|comment|This paper relates work done during the DiLAF project. It consists in converting 5 bilingual African language-French dictionaries originally in Word format into XML following the LMF model. The languages processed are Bambara, Hausa, Kanuri, Tamajaq and Songhai-zarma, still considered as under-resourced languages concerning Natural Language Processing tools. +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|title|[1405.5893] Computerization of African languages-French dictionaries +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|bookmarkOf|https://arxiv.org/abs/1405.5893 +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|creationTime|2021-06-30T00:33:09Z +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_summary|"This paper relates work done during the DiLAF project. It consists in +converting 5 bilingual African language-French dictionaries originally in Word +format into XML following the LMF model. The languages processed are Bambara, +Hausa, Kanuri, Tamajaq and Songhai-zarma, still considered as under-resourced +languages concerning Natural Language Processing tools. Once converted, the +dictionaries are available online on the Jibiki platform for lookup and +modification. The DiLAF project is first presented. A description of each +dictionary follows. Then, the conversion methodology from .doc format to XML +files is presented. A specific point on the usage of Unicode follows. Then, +each step of the conversion into XML and LMF is detailed. The last part +presents the Jibiki lexical resources management platform used for the project." +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_firstAuthor|Chantal Enguehard LINA +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_updated|2014-05-22T20:15:57Z +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_title|Computerization of African languages-French dictionaries +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_published|2014-05-22T20:15:57Z +http://www.semanlink.net/doc/2021/06/1405_5893_computerization_of_|arxiv_num|1405.5893 +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|creationDate|2021-08-18 +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|tag|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|comment|"Implementation of [From Text to Knowledge: The Information Extraction Pipeline by Tomaz Bratanic](doc:2021/08/from_text_to_knowledge_the_inf). +> I added the [LUKE](doc:2020/11/2010_01057_luke_deep_context) model to predict relations between entities." +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|relatedDoc|http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|relatedDoc|http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|title|raphaelsty/textokb: Extract knowledge from raw text +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|bookmarkOf|https://github.com/raphaelsty/textokb +http://www.semanlink.net/doc/2021/08/raphaelsty_textokb_extract_kno|creationTime|2021-08-18T16:36:55Z +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|creationDate|2020-11-01 +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|tag|http://www.semanlink.net/tag/the_guardian +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|title|As Europe's governments lose control of Covid, revolt is in the air The Guardian +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|bookmarkOf|https://www.theguardian.com/world/2020/nov/01/governments-lose-control-virus-revolt-civil-unrest-continent +http://www.semanlink.net/doc/2020/11/as_europe_s_governments_lose_co|creationTime|2020-11-01T12:01:28Z +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|creationDate|2020-10-31 +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|tag|http://www.semanlink.net/tag/ml_and_physics +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|tag|http://www.semanlink.net/tag/navier_stokes +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|tag|http://www.semanlink.net/tag/partial_differential_equations +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|tag|http://www.semanlink.net/tag/fourier +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|comment|[paper](https://arxiv.org/abs/2010.08895) +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|title|AI has cracked a key mathematical puzzle for understanding our world MIT Technology Review +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|bookmarkOf|https://www.technologyreview.com/2020/10/30/1011435/ai-fourier-neural-network-cracks-navier-stokes-and-partial-differential-equations/ +http://www.semanlink.net/doc/2020/10/ai_has_cracked_a_key_mathematic|creationTime|2020-10-31T12:32:37Z +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|tag|http://www.semanlink.net/tag/selective_classification +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|comment|> Selective classification, where models can abstain when they are unsure about a prediction, routinely improves average accuracy. Worryingly, we show that s.c. can also hurt accuracy on certain subgroups of the data. [twitter](https://twitter.com/ErikJones313/status/1448681482176790532) +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|title|Selective Classification Can Magnify Disparities Across Groups SAIL Blog +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|bookmarkOf|https://ai.stanford.edu/blog/sc-magnifies-disparities/ +http://www.semanlink.net/doc/2021/10/selective_classification_can_ma|creationTime|2021-10-16T09:13:10Z +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|creationDate|2021-01-18 +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|tag|http://www.semanlink.net/tag/goldfire +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|comment|> Unlock technical knowledge buried in enterprise systems +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|title|Goldfire Cognitive Search IHS Markit +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|bookmarkOf|https://ihsmarkit.com/products/enterprise-knowledge.html +http://www.semanlink.net/doc/2021/01/goldfire_cognitive_search_%7C_ihs|creationTime|2021-01-18T16:56:24Z +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|creationDate|2021-04-19 +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|tag|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|tag|http://www.semanlink.net/tag/spiking_neural_network +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|comment|> However, the ANN-SNN conversion scheme fails to capture the temporal dynamics of a spiking system +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|title|Enabling Spike-Based Backpropagation for Training Deep Neural Network Architectures +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|bookmarkOf|https://www.frontiersin.org/articles/10.3389/fnins.2020.00119/full +http://www.semanlink.net/doc/2021/04/frontiers_%7C_enabling_spike_base|creationTime|2021-04-19T18:21:19Z +http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b|creationDate|2021-04-01 +http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b|tag|http://www.semanlink.net/tag/football +http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b|title|Éloge des éliminatoires – Une balle dans le pied +http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b|bookmarkOf|https://www.lemonde.fr/blog/latta/2021/04/01/eloge-des-eliminatoires/ +http://www.semanlink.net/doc/2021/04/eloge_des_eliminatoires_une_b|creationTime|2021-04-01T15:54:10Z +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|creationDate|2021-04-28 +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|tag|http://www.semanlink.net/tag/elites +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|tag|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|tag|http://www.semanlink.net/tag/nullite_francaise +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|title|Covid-19 : « L’inculture scientifique des élites françaises a des effets profonds sur la conduite des affaires de l’Etat » +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|bookmarkOf|https://www.lemonde.fr/idees/article/2021/04/28/covid-19-l-inculture-scientifique-des-elites-francaises-a-des-effets-profonds-sur-la-conduite-des-affaires-de-l-etat_6078385_3232.html +http://www.semanlink.net/doc/2021/04/covid_19_%C2%AB_l%E2%80%99inculture_scient|creationTime|2021-04-28T22:36:52Z +http://www.semanlink.net/doc/2021/01/weaviate|creationDate|2021-01-18 +http://www.semanlink.net/doc/2021/01/weaviate|tag|http://www.semanlink.net/tag/ai_cloud_service +http://www.semanlink.net/doc/2021/01/weaviate|comment|> Weaviate is a cloud-native, modular, real-time vector search engine built to scale your machine learning models. +http://www.semanlink.net/doc/2021/01/weaviate|title|Weaviate +http://www.semanlink.net/doc/2021/01/weaviate|bookmarkOf|https://www.semi.technology/developers/weaviate/current/ +http://www.semanlink.net/doc/2021/01/weaviate|creationTime|2021-01-18T19:26:34Z +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|creationDate|2020-10-15 +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Jack Joyner +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Stash Rowe +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Yulan Guo +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Isaac Ronald Ward +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Casey Lickfold +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_author|Mohammed Bennamoun +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|title|[2010.05234] A Practical Guide to Graph Neural Networks +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|bookmarkOf|https://arxiv.org/abs/2010.05234 +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|creationTime|2020-10-15T00:07:48Z +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_summary|"Graph neural networks (GNNs) have recently grown in popularity in the field +of artificial intelligence due to their unique ability to ingest relatively +unstructured data types as input data. Although some elements of the GNN +architecture are conceptually similar in operation to traditional neural +networks (and neural network variants), other elements represent a departure +from traditional deep learning techniques. This tutorial exposes the power and +novelty of GNNs to the average deep learning enthusiast by collating and +presenting details on the motivations, concepts, mathematics, and applications +of the most common types of GNNs. Importantly, we present this tutorial +concisely, alongside worked code examples, and at an introductory pace, thus +providing a practical and accessible guide to understanding and using GNNs." +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_firstAuthor|Isaac Ronald Ward +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_updated|2020-10-11T12:36:17Z +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_title|A Practical Guide to Graph Neural Networks +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_published|2020-10-11T12:36:17Z +http://www.semanlink.net/doc/2020/10/2010_05234_a_practical_guide_|arxiv_num|2010.05234 +http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa|creationDate|2020-08-09 +http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa|tag|http://www.semanlink.net/tag/kurdes +http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa|title|Le centenaire de la reconnaissance internationale du Kurdistan – Un si Proche Orient +http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa|bookmarkOf|https://www.lemonde.fr/blog/filiu/2020/08/09/le-centenaire-de-la-reconnaissance-internationale-du-kurdistan/ +http://www.semanlink.net/doc/2020/08/le_centenaire_de_la_reconnaissa|creationTime|2020-08-09T12:09:13Z +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|creationDate|2021-10-02 +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|tag|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|tag|http://www.semanlink.net/tag/marcel_frohlich +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|title|Enterprise Knowledge Graph Foundation +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|bookmarkOf|https://www.ekgf.org/ +http://www.semanlink.net/doc/2021/10/enterprise_knowledge_graph_foun|creationTime|2021-10-02T09:56:03Z +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|tag|http://www.semanlink.net/tag/java +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|comment|pour être averti quand un fichier est modifié +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|title|Guide sur WatchService dans Java NIO2 +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|bookmarkOf|https://www.codeflow.site/fr/article/java-nio2-watchservice +http://www.semanlink.net/doc/2020/09/guide_sur_watchservice_dans_jav|creationTime|2020-09-02T15:29:24Z +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|creationDate|2021-08-08 +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|tag|http://www.semanlink.net/tag/malware +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|tag|http://www.semanlink.net/tag/ouigour +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|tag|http://www.semanlink.net/tag/chine +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|title|China Is Forcing Tourists to Install Text-Stealing Malware at its Border +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|bookmarkOf|https://www.vice.com/en/article/7xgame/at-chinese-border-tourists-forced-to-install-a-text-stealing-piece-of-malware +http://www.semanlink.net/doc/2021/08/china_is_forcing_tourists_to_in|creationTime|2021-08-08T00:22:12Z +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|creationDate|2021-08-18 +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|tag|http://www.semanlink.net/tag/refugies +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|tag|http://www.semanlink.net/tag/goths +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|tag|http://www.semanlink.net/tag/grandes_invasions +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|tag|http://www.semanlink.net/tag/chute_de_l_empire_romain +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|comment|"> En 376, pourchassés par les Huns, des milliers de Goths sont accueillis par l’Empire. Maltraités, les réfugiés se révoltent, et finissent par écraser l’armée romaine le 9 août 378. + +ce qui montre qu'il faut accueillir convenablement les réfugiés. +" +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|title|Quand Rome a été vaincue par des réfugiés +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|bookmarkOf|https://www.lemonde.fr/series-d-ete/article/2021/08/17/a-andrinople-rome-vaincue-par-des-refugies_6091674_3451060.html +http://www.semanlink.net/doc/2021/08/quand_rome_a_ete_vaincue_par_de|creationTime|2021-08-18T10:50:14Z +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|creationDate|2021-04-11 +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|tag|http://www.semanlink.net/tag/continual_learning +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Andrea Gesmundo +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Stanislaw Jastrzebski +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Neil Houlsby +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Quentin de Laroussilhe +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Mona Attariyan +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Andrei Giurgiu +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Sylvain Gelly +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_author|Bruna Morrone +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|comment|"**Adapter tuning for NLP**. + + +A strategy for tuning a large text model on several +downstream tasks, that permits training on +tasks sequentially, and that adds only a small number +of additional parameters per task. + +New modules added between layers of a +pre-trained network. Parameters of the original network are frozen +and therefore may be shared by many tasks. + + +[GitHub](https://github.com/google-research/adapter-bert)" +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|title|[1902.00751] Parameter-Efficient Transfer Learning for NLP +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|bookmarkOf|https://arxiv.org/abs/1902.00751 +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|creationTime|2021-04-11T13:13:13Z +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_summary|"Fine-tuning large pre-trained models is an effective transfer mechanism in +NLP. However, in the presence of many downstream tasks, fine-tuning is +parameter inefficient: an entire new model is required for every task. As an +alternative, we propose transfer with adapter modules. Adapter modules yield a +compact and extensible model; they add only a few trainable parameters per +task, and new tasks can be added without revisiting previous ones. The +parameters of the original network remain fixed, yielding a high degree of +parameter sharing. To demonstrate adapter's effectiveness, we transfer the +recently proposed BERT Transformer model to 26 diverse text classification +tasks, including the GLUE benchmark. Adapters attain near state-of-the-art +performance, whilst adding only a few parameters per task. On GLUE, we attain +within 0.4% of the performance of full fine-tuning, adding only 3.6% parameters +per task. By contrast, fine-tuning trains 100% of the parameters per task." +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_firstAuthor|Neil Houlsby +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_updated|2019-06-13T17:48:30Z +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_title|Parameter-Efficient Transfer Learning for NLP +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_published|2019-02-02T16:29:47Z +http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien|arxiv_num|1902.00751 +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|creationDate|2021-02-23 +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|tag|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|tag|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|comment|"> the **RAG model is trained end-to-end for retrieval-in-the-loop generation**, a new paradigm that allows a model to go find useful information in a text corpus when generating. + +**No need to try to encode all of that knowledge in a trillion parameters any more ;)**" +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|title|"Hugging Face sur Twitter : ""Transformers release of the Retrieval-Augmented Generation model in collaboration with @facebookai!""" +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|bookmarkOf|https://twitter.com/huggingface/status/1310597554774716418?s=20 +http://www.semanlink.net/doc/2021/02/hugging_face_sur_twitter_tra|creationTime|2021-02-23T09:38:55Z +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|creationDate|2021-04-19 +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|tag|http://www.semanlink.net/tag/college_de_france +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|tag|http://www.semanlink.net/tag/patrick_boucheron +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|title|Patrick Boucheron - Histoire des pouvoirs en Europe occidentale, XIIIᵉ-XVIᵉ siècle - Collège de France +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|bookmarkOf|https://www.college-de-france.fr/site/patrick-boucheron/index.htm +http://www.semanlink.net/doc/2021/04/patrick_boucheron_histoire_de|creationTime|2021-04-19T15:50:00Z +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|creationDate|2021-05-24 +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|tag|http://www.semanlink.net/tag/lobby +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|tag|http://www.semanlink.net/tag/publication_scientifique +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|tag|http://www.semanlink.net/tag/antiscience +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|tag|http://www.semanlink.net/tag/biodiversite_declin +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|tag|http://www.semanlink.net/tag/insect_collapse +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|title|« Le “biodiversité-scepticisme”, plus discret que celui contre le dérèglement climatique, est en un sens bien plus inquiétant » +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|bookmarkOf|https://www.lemonde.fr/idees/article/2021/05/24/le-biodiversite-scepticisme-plus-discret-que-celui-contre-le-dereglement-climatique-est-en-un-sens-bien-plus-inquietant_6081238_3232.html +http://www.semanlink.net/doc/2021/05/%C2%AB_le_%E2%80%9Cbiodiversite_scepticisme%E2%80%9D|creationTime|2021-05-24T14:46:11Z +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|creationDate|2020-08-11 +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|tag|http://www.semanlink.net/tag/knn_in_mlc +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|tag|http://www.semanlink.net/tag/manik_varma +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|comment|"Embedding style **algorithm that performs a clustering of the training +points and applies learning procedure in each of the cluster separately**. + +> The main technical contribution in SLEEC is a formulation for learning a small ensemble +of local distance preserving embeddings which can accurately predict infrequently +occurring (tail) labels. This allows SLEEC to break free of the traditional +low-rank assumption and boost classification accuracy by **learning embeddings +which preserve pairwise distances between only the nearest label vectors**. +> + +""Tail labels"" : +> The critical assumption made by embedding methods, that the training label matrix +is low-rank, is violated in almost all real world applications. + + +[Python implementation](https://github.com/xiaohan2012/sleec_python)" +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|title|SLEEC: Sparse Local Embeddings for Extreme Multi-label Classification (2015) +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|bookmarkOf|https://www.microsoft.com/en-us/research/wp-content/uploads/2016/07/BJKVJ15_NIPS.pdf +http://www.semanlink.net/doc/2020/08/sparse_local_embeddings_for_ext|creationTime|2020-08-11T22:13:55Z +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|creationDate|2021-01-23 +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|tag|http://www.semanlink.net/tag/stochastic_parrots +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|tag|http://www.semanlink.net/tag/language_models_size +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|title|"Emily M. Bender sur Twitter : ""On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?""" +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|bookmarkOf|https://twitter.com/emilymbender/status/1352798178672742400 +http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_|creationTime|2021-01-23T16:48:20Z +http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired|creationDate|2020-12-10 +http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired|tag|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired|title|Julie Grollier, a (bio)inspired researcher CNRS News +http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired|bookmarkOf|https://news.cnrs.fr/articles/julie-grollier-a-bioinspired-researcher +http://www.semanlink.net/doc/2020/12/julie_grollier_a_bio_inspired|creationTime|2020-12-10T22:42:01Z +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|creationDate|2021-10-03 +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_author|Sebastin Santy +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_author|Kalika Bali +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_author|Pratik Joshi +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_author|Monojit Choudhury +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_author|Amar Budhiraja +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|title|[2004.09095] The State and Fate of Linguistic Diversity and Inclusion in the NLP World +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|bookmarkOf|https://arxiv.org/abs/2004.09095 +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|creationTime|2021-10-03T11:50:06Z +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_summary|"Language technologies contribute to promoting multilingualism and linguistic +diversity around the world. However, only a very small number of the over 7000 +languages of the world are represented in the rapidly evolving language +technologies and applications. In this paper we look at the relation between +the types of languages, resources, and their representation in NLP conferences +to understand the trajectory that different languages have followed over time. +Our quantitative investigation underlines the disparity between languages, +especially in terms of their resources, and calls into question the ""language +agnostic"" status of current models and systems. Through this paper, we attempt +to convince the ACL community to prioritise the resolution of the predicaments +highlighted here, so that no language is left behind." +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_firstAuthor|Pratik Joshi +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_updated|2021-01-27T03:39:20Z +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_title|The State and Fate of Linguistic Diversity and Inclusion in the NLP World +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_published|2020-04-20T07:19:22Z +http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate|arxiv_num|2004.09095 +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|creationDate|2021-04-03 +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|tag|http://www.semanlink.net/tag/commerce_mondial +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|tag|http://www.semanlink.net/tag/histoire_anglaise +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|tag|http://www.semanlink.net/tag/piraterie +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|tag|http://www.semanlink.net/tag/xviie_siecle +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|tag|http://www.semanlink.net/tag/mughal_empire +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|title|Henry Every, le pirate le plus recherché du XVIIe siècle, refait surface en Nouvelle-Angleterre +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|bookmarkOf|https://www.lemonde.fr/big-browser/article/2021/04/02/henry-every-le-pirate-le-plus-recherche-du-xviie-siecle-refait-surface-en-nouvelle-angleterre_6075415_4832693.html +http://www.semanlink.net/doc/2021/04/henry_every_le_pirate_le_plus_|creationTime|2021-04-03T11:59:47Z +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|creationDate|2020-12-01 +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|tag|http://www.semanlink.net/tag/nlp_pretraining +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|tag|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|tag|http://www.semanlink.net/tag/allennlp +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|tag|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Swabha Swayamdipta +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Iz Beltagy +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Noah A. Smith +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Ana Marasović +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Doug Downey +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Suchin Gururangan +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_author|Kyle Lo +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|comment|> a study across four domains (biomedical and computer science publications, news, and reviews) and eight classification tasks, showing that a second phase of pretraining in-domain (domain-adaptive pretraining) leads to performance gains, under both high- and low-resource settings. Moreover, adapting to the task's unlabeled data (task-adaptive pretraining) improves performance even after domain-adaptive pretraining. +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|title|[2004.10964] Don't Stop Pretraining: Adapt Language Models to Domains and Tasks +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|bookmarkOf|https://arxiv.org/abs/2004.10964 +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|creationTime|2020-12-01T15:43:33Z +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_summary|"Language models pretrained on text from a wide variety of sources form the +foundation of today's NLP. In light of the success of these broad-coverage +models, we investigate whether it is still helpful to tailor a pretrained model +to the domain of a target task. We present a study across four domains +(biomedical and computer science publications, news, and reviews) and eight +classification tasks, showing that a second phase of pretraining in-domain +(domain-adaptive pretraining) leads to performance gains, under both high- and +low-resource settings. Moreover, adapting to the task's unlabeled data +(task-adaptive pretraining) improves performance even after domain-adaptive +pretraining. Finally, we show that adapting to a task corpus augmented using +simple data selection strategies is an effective alternative, especially when +resources for domain-adaptive pretraining might be unavailable. Overall, we +consistently find that multi-phase adaptive pretraining offers large gains in +task performance." +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_firstAuthor|Suchin Gururangan +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_updated|2020-05-05T22:00:44Z +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_title|Don't Stop Pretraining: Adapt Language Models to Domains and Tasks +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_published|2020-04-23T04:21:19Z +http://www.semanlink.net/doc/2020/12/2004_10964_don_t_stop_pretrai|arxiv_num|2004.10964 +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|creationDate|2021-02-08 +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|tag|http://www.semanlink.net/tag/allennlp +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|tag|http://www.semanlink.net/tag/commonsense_question_answering +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_author|Antoine Bosselut +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_author|Ronan Le Bras +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_author|Yejin Choi +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|title|[1911.03876] Dynamic Neuro-Symbolic Knowledge Graph Construction for Zero-shot Commonsense Question Answering +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|bookmarkOf|https://arxiv.org/abs/1911.03876 +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|creationTime|2021-02-08T13:48:51Z +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_summary|"Understanding narratives requires reasoning about implicit world knowledge +related to the causes, effects, and states of situations described in text. At +the core of this challenge is how to access contextually relevant knowledge on +demand and reason over it. +In this paper, we present initial studies toward zero-shot commonsense +question answering by formulating the task as inference over dynamically +generated commonsense knowledge graphs. In contrast to previous studies for +knowledge integration that rely on retrieval of existing knowledge from static +knowledge graphs, our study requires commonsense knowledge integration where +contextually relevant knowledge is often not present in existing knowledge +bases. Therefore, we present a novel approach that generates +contextually-relevant symbolic knowledge structures on demand using generative +neural commonsense knowledge models. +Empirical results on two datasets demonstrate the efficacy of our +neuro-symbolic approach for dynamically constructing knowledge graphs for +reasoning. Our approach achieves significant performance boosts over pretrained +language models and vanilla knowledge models, all while providing interpretable +reasoning paths for its predictions." +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_firstAuthor|Antoine Bosselut +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_updated|2020-10-30T07:30:59Z +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_title|Dynamic Neuro-Symbolic Knowledge Graph Construction for Zero-shot Commonsense Question Answering +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_published|2019-11-10T08:20:20Z +http://www.semanlink.net/doc/2021/02/1911_03876_dynamic_neuro_symb|arxiv_num|1911.03876 +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|creationDate|2020-09-18 +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|title|"Petar Veličković sur Twitter : ""resources I'd recommend for getting started with Graph Neural Nets (GNNs)," +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|bookmarkOf|https://twitter.com/PetarV_93/status/1306689702020382720?s=20 +http://www.semanlink.net/doc/2020/09/petar_velickovic_sur_twitter_|creationTime|2020-09-18T08:45:10Z +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|creationDate|2020-08-15 +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|tag|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|title|A survey of hierarchical classification across different application domains (2011) +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|bookmarkOf|https://www.researchgate.net/publication/225716424_A_survey_of_hierarchical_classification_across_different_application_domains +http://www.semanlink.net/doc/2020/08/a_survey_of_hierarchical_classi|creationTime|2020-08-15T12:10:51Z +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|creationDate|2020-11-11 +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|tag|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_author|Bin Cui +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_author|Shiwen Wu +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_author|Fei Sun +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_author|Wentao Zhang +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|title|[2011.02260] Graph Neural Networks in Recommender Systems: A Survey +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|bookmarkOf|https://arxiv.org/abs/2011.02260 +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|creationTime|2020-11-11T11:04:40Z +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_summary|"With the explosive growth of online information, recommender systems play a +key role to alleviate such information overload. Due to the important +application value of recommender system, there have always been emerging works +in this field. In recent years, graph neural network (GNN) techniques have +gained considerable interests which can naturally integrate node information +and topological structure. Owing to the outperformance of GNN in learning on +graph data, GNN methods have been widely applied in many fields. In recommender +systems, the main challenge is to learn the efficient user/item embeddings from +their interactions and side information if available. Since most of the +information essentially has graph structure and GNNs have superiority in +representation learning, the field of utilizing graph neural network in +recommender systems is flourishing. This article aims to provide a +comprehensive review of recent research efforts on graph neural network based +recommender systems. Specifically, we provide a taxonomy of graph neural +network based recommendation models and state new perspectives pertaining to +the development of this field." +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_firstAuthor|Shiwen Wu +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_updated|2020-11-04T12:57:47Z +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_title|Graph Neural Networks in Recommender Systems: A Survey +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_published|2020-11-04T12:57:47Z +http://www.semanlink.net/doc/2020/11/2011_02260_graph_neural_netwo|arxiv_num|2011.02260 +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|creationDate|2021-10-14 +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|tag|http://www.semanlink.net/tag/duplicate_detection +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|title|Detecting Duplicate Questions (2019) +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|bookmarkOf|https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1194/reports/custom/15842506.pdf +http://www.semanlink.net/doc/2021/10/detecting_duplicate_questions_|creationTime|2021-10-14T11:47:03Z +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/wikipedia2vec +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/entities +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/entity_salience +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|tag|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_author|Ikuya Yamada +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_author|Hiroyuki Shindo +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|comment|"A model that performs **text classification using entities in a knowledge base**. + +> Entities provide unambiguous and relevant semantic signals that are beneficial for capturing semantics in texts. We combine **simple high-recall entity detection based on a dictionary** (word->list of entities), to detect entities in a document, with a novel neural **attention mechanism that enables the model to focus on a small number of unambiguous and relevant entities**. + +2 steps: + +1. Entity detection +2. Classification using the detected entities (+text) as inputs + +Regarding entity linking, a local model which uses cosine +similarity between the embedding of the target +entity and the word-based representation of +the document to capture the relevance of an entity +given a document. + +Embeddings from the KB: computed using [#Wikipedia2Vec](tag:wikipedia2vec) (similar words and entities +close to one another in a unified vector space) + +Model using attention, with 2 features : + +- cosine similarity between the +embedding of the entity and the word based +representation of the document +- the probability that the entity +name refers to the entity in KB. + +Somewhat [related](doc:2020/01/investigating_entity_knowledge_)" +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|relatedDoc|http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_ +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|title|[1909.01259] Neural Attentive Bag-of-Entities Model for Text Classification +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|bookmarkOf|https://arxiv.org/abs/1909.01259 +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|creationTime|2020-09-02T16:46:43Z +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_summary|"This study proposes a Neural Attentive Bag-of-Entities model, which is a +neural network model that performs text classification using entities in a +knowledge base. Entities provide unambiguous and relevant semantic signals that +are beneficial for capturing semantics in texts. We combine simple high-recall +entity detection based on a dictionary, to detect entities in a document, with +a novel neural attention mechanism that enables the model to focus on a small +number of unambiguous and relevant entities. We tested the effectiveness of our +model using two standard text classification datasets (i.e., the 20 Newsgroups +and R8 datasets) and a popular factoid question answering dataset based on a +trivia quiz game. As a result, our model achieved state-of-the-art results on +all datasets. The source code of the proposed model is available online at +https://github.com/wikipedia2vec/wikipedia2vec." +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_firstAuthor|Ikuya Yamada +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_updated|2019-09-10T10:23:49Z +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_title|Neural Attentive Bag-of-Entities Model for Text Classification +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_published|2019-09-03T15:50:34Z +http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b|arxiv_num|1909.01259 +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|creationDate|2021-02-20 +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|tag|http://www.semanlink.net/tag/sparql_endpoint +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|tag|http://www.semanlink.net/tag/olaf_hartig +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|tag|http://www.semanlink.net/tag/rest +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|title|"Olaf Hartig sur Twitter : ""Question: are there any production-ready tools for providing a SPARQL endpoint over a REST-based Web service?""" +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|bookmarkOf|https://twitter.com/olafhartig/status/1362767817305706497 +http://www.semanlink.net/doc/2021/02/olaf_hartig_sur_twitter_ques|creationTime|2021-02-20T09:33:43Z +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|creationDate|2021-10-22 +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|tag|http://www.semanlink.net/tag/symbiose +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|tag|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|tag|http://www.semanlink.net/tag/champignon +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|title|Au royaume des champignons +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|bookmarkOf|https://www.arte.tv/fr/videos/068404-000-A/au-royaume-des-champignons/ +http://www.semanlink.net/doc/2021/10/au_royaume_des_champignons|creationTime|2021-10-22T23:43:49Z +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|creationDate|2021-06-05 +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|tag|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|tag|http://www.semanlink.net/tag/dystopia +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|tag|http://www.semanlink.net/tag/orwell +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|tag|http://www.semanlink.net/tag/macronie +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|comment|> La liberté, disait Orwell, c’est d’abord celle de dire que deux et deux font quatre. En matière de santé ou d’environnement, ce combat reste à mener. +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|title|Santé, environnement : « Où s’arrête le droit de dire la vérité, et où commence le dénigrement ? » +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|bookmarkOf|https://www.lemonde.fr/idees/article/2021/06/05/sante-environnement-ou-s-arrete-le-droit-de-dire-la-verite-et-ou-commence-le-denigrement_6082939_3232.html +http://www.semanlink.net/doc/2021/06/sante_environnement_%C2%AB_ou_s%E2%80%99a|creationTime|2021-06-05T15:46:30Z +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|creationDate|2021-10-21 +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|tag|http://www.semanlink.net/tag/pdf_format +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|tag|http://www.semanlink.net/tag/stackoverflow_q +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|title|How to extract Highlighted Parts from PDF files - Stack Overflow +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|bookmarkOf|https://stackoverflow.com/questions/9099497/how-to-extract-highlighted-parts-from-pdf-files +http://www.semanlink.net/doc/2021/10/how_to_extract_highlighted_part|creationTime|2021-10-21T14:23:17Z +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|creationDate|2020-12-01 +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|tag|http://www.semanlink.net/tag/flair +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_author|Stefan Schweter +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_author|Alan Akbik +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|comment|"> Current state-of-the-art approaches for named entity recognition (NER) using BERT-style transformers typically use one of two different approaches: +> +>1. The first fine-tunes the transformer itself on the NER task and adds only a simple linear layer for word-level predictions. +>2. The second uses the transformer only to provide features to a standard LSTM-CRF sequence labeling architecture and thus performs no fine-tuning. +> +> In this paper, we perform a comparative analysis of both approaches + +Conclusion: + +> We recommend the combination of +document-level features and fine-tuning for NER." +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|title|[2011.06993] FLERT: Document-Level Features for Named Entity Recognition +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|bookmarkOf|https://arxiv.org/abs/2011.06993 +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|creationTime|2020-12-01T09:25:14Z +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_summary|"Current state-of-the-art approaches for named entity recognition (NER) using +BERT-style transformers typically use one of two different approaches: (1) The +first fine-tunes the transformer itself on the NER task and adds only a simple +linear layer for word-level predictions. (2) The second uses the transformer +only to provide features to a standard LSTM-CRF sequence labeling architecture +and thus performs no fine-tuning. In this paper, we perform a comparative +analysis of both approaches in a variety of settings currently considered in +the literature. In particular, we evaluate how well they work when +document-level features are leveraged. Our evaluation on the classic CoNLL +benchmark datasets for 4 languages shows that document-level features +significantly improve NER quality and that fine-tuning generally outperforms +the feature-based approaches. We present recommendations for parameters as well +as several new state-of-the-art numbers. Our approach is integrated into the +Flair framework to facilitate reproduction of our experiments." +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_firstAuthor|Stefan Schweter +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_updated|2020-11-13T16:13:59Z +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_title|FLERT: Document-Level Features for Named Entity Recognition +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_published|2020-11-13T16:13:59Z +http://www.semanlink.net/doc/2020/12/2011_06993_flert_document_le|arxiv_num|2011.06993 +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|creationDate|2021-05-20 +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|tag|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|tag|http://www.semanlink.net/tag/contrastive_learning +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Feng Nan +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Xiaokai Wei +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Ramesh Nallapati +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Kathleen McKeown +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Shangwen Li +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Bing Xiang +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Dejiao Zhang +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Henghui Zhu +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_author|Andrew Arnold +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|comment|"leverages contrastive learning to promote better separation between clusters + +(refers to [Hadifar 2019](doc:2021/05/a_self_training_approach_for_sh))" +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|relatedDoc|http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|title|[2103.12953] Supporting Clustering with Contrastive Learning +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|bookmarkOf|https://arxiv.org/abs/2103.12953 +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|creationTime|2021-05-20T16:55:29Z +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_summary|"Unsupervised clustering aims at discovering the semantic categories of data +according to some distance measured in the representation space. However, +different categories often overlap with each other in the representation space +at the beginning of the learning process, which poses a significant challenge +for distance-based clustering in achieving good separation between different +categories. To this end, we propose Supporting Clustering with Contrastive +Learning (SCCL) -- a novel framework to leverage contrastive learning to +promote better separation. We assess the performance of SCCL on short text +clustering and show that SCCL significantly advances the state-of-the-art +results on most benchmark datasets with 3%-11% improvement on Accuracy and +4%-15% improvement on Normalized Mutual Information. Furthermore, our +quantitative analysis demonstrates the effectiveness of SCCL in leveraging the +strengths of both bottom-up instance discrimination and top-down clustering to +achieve better intra-cluster and inter-cluster distances when evaluated with +the ground truth cluster labels" +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_firstAuthor|Dejiao Zhang +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_updated|2021-03-24T03:05:17Z +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_title|Supporting Clustering with Contrastive Learning +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_published|2021-03-24T03:05:17Z +http://www.semanlink.net/doc/2021/05/2103_12953_supporting_cluster|arxiv_num|2103.12953 +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|creationDate|2020-12-12 +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|tag|http://www.semanlink.net/tag/violence_policiere +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|tag|http://www.semanlink.net/tag/france_police +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|title|Michel Zecler au « Monde » : « Il fallait que ces trois policiers se sentent en confiance pour aller aussi loin dans leurs actes » +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|bookmarkOf|https://www.lemonde.fr/societe/article/2020/12/12/je-me-pose-toujours-cette-question-pourquoi-trois-semaines-apres-michel-zecler-ressasse-les-images-de-son-agression_6063122_3224.html +http://www.semanlink.net/doc/2020/12/michel_zecler_au_%C2%AB_monde_%C2%BB_%C2%AB_|creationTime|2020-12-12T14:53:09Z +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|creationDate|2021-10-07 +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|tag|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|tag|http://www.semanlink.net/tag/emnlp_2021 +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|title|Zexuan Zhong sur Twitter : ...Does this really mean dense models are better? No Our #EMNLP2021 paper shows dense retrievers even fail to answer simple entity-centric questions +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|bookmarkOf|https://twitter.com/ZexuanZhong/status/1445857517276438532 +http://www.semanlink.net/doc/2021/10/zexuan_zhong_sur_twitter_d|creationTime|2021-10-07T02:03:23Z +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|creationDate|2021-04-27 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|title|Nils Reimers sur Twitter : EasyNMT Easy-to-use (3 lines of code), state-of-the-art neural machine translations +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|bookmarkOf|https://twitter.com/Nils_Reimers/status/1387008475243454464?s=20 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_easy|creationTime|2021-04-27T23:34:33Z +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|creationDate|2020-06-04 +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|tag|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|tag|http://www.semanlink.net/tag/andrej_karpathy +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|tag|http://www.semanlink.net/tag/cross_entropy +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|comment|> the cross-entropy objective wants the predicted distribution to have all of its mass on the correct answer. +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|title|Softmax classifier (CS231n Convolutional Neural Networks for Visual Recognition) +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|bookmarkOf|https://cs231n.github.io/linear-classify/#softmax +http://www.semanlink.net/doc/2020/06/softmax_classifier_cs231n_conv|creationTime|2020-06-04T17:48:52Z +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Alexander Bigerl +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Tommaso Soru +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|André Valdestilhas +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Stefano Ruberto +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Diego Moussallem +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Edgard Marx +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_author|Diego Esteves +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|comment|> a simple and fast approach to Knowledge Graph Embedding based on the skip-gram model. Instead of using a predefined scoring function, we learn it relying on Long Short-Term Memories. We show that our embeddings achieve results comparable with the most scalable approaches on knowledge graph completion as well as on a new metric. Yet, KG2Vec can embed large graphs in lesser time by processing more than **250 million triples in less than 7 hours on common hardware**. +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|title|[1803.07828] Expeditious Generation of Knowledge Graph Embeddings +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|bookmarkOf|https://arxiv.org/abs/1803.07828 +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|creationTime|2020-09-02T16:57:44Z +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_summary|"Knowledge Graph Embedding methods aim at representing entities and relations +in a knowledge base as points or vectors in a continuous vector space. Several +approaches using embeddings have shown promising results on tasks such as link +prediction, entity recommendation, question answering, and triplet +classification. However, only a few methods can compute low-dimensional +embeddings of very large knowledge bases without needing state-of-the-art +computational resources. In this paper, we propose KG2Vec, a simple and fast +approach to Knowledge Graph Embedding based on the skip-gram model. Instead of +using a predefined scoring function, we learn it relying on Long Short-Term +Memories. We show that our embeddings achieve results comparable with the most +scalable approaches on knowledge graph completion as well as on a new metric. +Yet, KG2Vec can embed large graphs in lesser time by processing more than 250 +million triples in less than 7 hours on common hardware." +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_firstAuthor|Tommaso Soru +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_updated|2018-11-09T14:26:16Z +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_title|Expeditious Generation of Knowledge Graph Embeddings +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_published|2018-03-21T10:06:28Z +http://www.semanlink.net/doc/2020/09/1803_07828_expeditious_genera|arxiv_num|1803.07828 +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|creationDate|2020-06-06 +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|tag|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|George E. Dahl +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|Gabriel Pereyra +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|Geoffrey E. Hinton +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|Alexandre Passos +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|Rohan Anil +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_author|Robert Ormandi +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|comment|"> we use *codistillation* to refer to distillation performed: +> 1. using the same architecture for all the models; +> 2. using the same dataset to train all the models; and +> 3. using the distillation loss during training before any model has fully converged. + +> In general, we believe the quality gains of codistillation over well-tuned offline distillation will be +minor in practice and the more interesting research direction is exploring codistillation as a distributed +training algorithm + +> Codistillation with +the same data seems to be slightly better than the baseline, but codistillation using different data +gets much better results. These results show that the codistilling models are indeed successfully +transmitting useful information about different parts of the training data to each other. + +Related to [""Deep mutual learning""](doc:2020/05/1706_00384_deep_mutual_learni) paper" +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|relatedDoc|http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|title|[1804.03235] Large scale distributed neural network training through online distillation +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|bookmarkOf|https://arxiv.org/abs/1804.03235 +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|creationTime|2020-06-06T16:51:26Z +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_summary|"Techniques such as ensembling and distillation promise model quality +improvements when paired with almost any base model. However, due to increased +test-time cost (for ensembles) and increased complexity of the training +pipeline (for distillation), these techniques are challenging to use in +industrial settings. In this paper we explore a variant of distillation which +is relatively straightforward to use as it does not require a complicated +multi-stage setup or many new hyperparameters. Our first claim is that online +distillation enables us to use extra parallelism to fit very large datasets +about twice as fast. Crucially, we can still speed up training even after we +have already reached the point at which additional parallelism provides no +benefit for synchronous or asynchronous stochastic gradient descent. Two neural +networks trained on disjoint subsets of the data can share knowledge by +encouraging each model to agree with the predictions the other model would have +made. These predictions can come from a stale version of the other model so +they can be safely computed using weights that only rarely get transmitted. Our +second claim is that online distillation is a cost-effective way to make the +exact predictions of a model dramatically more reproducible. We support our +claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, +and the largest to-date dataset used for neural language modeling, containing +$6\times 10^{11}$ tokens and based on the Common Crawl repository of web data." +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_firstAuthor|Rohan Anil +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_updated|2018-04-09T20:56:03Z +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_title|Large scale distributed neural network training through online distillation +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_published|2018-04-09T20:56:03Z +http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri|arxiv_num|1804.03235 +http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s|creationDate|2021-06-10 +http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s|title|YAKE! Keyword extraction from single documents using multiple local features (2019) +http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s|bookmarkOf|https://www.sciencedirect.com/science/article/abs/pii/S0020025519308588 +http://www.semanlink.net/doc/2021/06/yake_keyword_extraction_from_s|creationTime|2021-06-10T00:51:11Z +http://www.semanlink.net/doc/2021/01/gaspard_koenig|creationDate|2021-01-17 +http://www.semanlink.net/doc/2021/01/gaspard_koenig|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.semanlink.net/doc/2021/01/gaspard_koenig|tag|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/doc/2021/01/gaspard_koenig|title|Gaspard Koenig +http://www.semanlink.net/doc/2021/01/gaspard_koenig|bookmarkOf|https://fr.wikipedia.org/wiki/Gaspard_Koenig +http://www.semanlink.net/doc/2021/01/gaspard_koenig|creationTime|2021-01-17T19:34:01Z +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|creationDate|2020-12-09 +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|tag|http://www.semanlink.net/tag/google_ai_blog +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|tag|http://www.semanlink.net/tag/reformer +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|title|Google AI Blog: Reformer: The Efficient Transformer +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|bookmarkOf|https://ai.googleblog.com/2020/01/reformer-efficient-transformer.html +http://www.semanlink.net/doc/2020/12/google_ai_blog_reformer_the_e|creationTime|2020-12-09T12:07:13Z +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|creationDate|2020-12-12 +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|tag|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_author|Ming-Wei Chang +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_author|Panupong Pasupat +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_author|Zora Tung +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_author|Kelvin Guu +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_author|Kenton Lee +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|comment|"**Augment language model pre-training with a retriever module**, which +is trained using the masked language modeling objective. + +> To capture knowledge in a more modular and interpretable way, we augment language model pre-training with a latent knowledge retriever, which allows the model to retrieve and attend over documents from a large corpus such as Wikipedia, used during pre-training, fine-tuning and inference. **For the first time, we show how to pre-train such a knowledge retriever in an unsupervised manner**, using masked language modeling as the learning signal and backpropagating through a retrieval step that considers millions of documents + +Hum, #TODO: parallel to be drawn with techniques in [KG-augmented Language Models](tag:knowledge_graph_augmented_language_models) which focus ""on the problem of capturing declarative knowledge in the learned parameters of a language model."" + +[Google AI Blog Post](doc:2020/08/google_ai_blog_realm_integrat) + +[Summary](https://joeddav.github.io/blog/2020/03/03/REALM.html) for the [Hugging Face awesome-papers reading group](doc:2021/03/huggingface_awesome_papers_pap)" +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|relatedDoc|http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|relatedDoc|http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|title|[2002.08909] REALM: Retrieval-Augmented Language Model Pre-Training +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|bookmarkOf|https://arxiv.org/abs/2002.08909 +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|creationTime|2020-12-12T02:30:25Z +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_summary|"Language model pre-training has been shown to capture a surprising amount of +world knowledge, crucial for NLP tasks such as question answering. However, +this knowledge is stored implicitly in the parameters of a neural network, +requiring ever-larger networks to cover more facts. +To capture knowledge in a more modular and interpretable way, we augment +language model pre-training with a latent knowledge retriever, which allows the +model to retrieve and attend over documents from a large corpus such as +Wikipedia, used during pre-training, fine-tuning and inference. For the first +time, we show how to pre-train such a knowledge retriever in an unsupervised +manner, using masked language modeling as the learning signal and +backpropagating through a retrieval step that considers millions of documents. +We demonstrate the effectiveness of Retrieval-Augmented Language Model +pre-training (REALM) by fine-tuning on the challenging task of Open-domain +Question Answering (Open-QA). We compare against state-of-the-art models for +both explicit and implicit knowledge storage on three popular Open-QA +benchmarks, and find that we outperform all previous methods by a significant +margin (4-16% absolute accuracy), while also providing qualitative benefits +such as interpretability and modularity." +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_firstAuthor|Kelvin Guu +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_updated|2020-02-10T18:40:59Z +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_title|REALM: Retrieval-Augmented Language Model Pre-Training +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_published|2020-02-10T18:40:59Z +http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a|arxiv_num|2002.08909 +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|creationDate|2021-06-16 +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|tag|http://www.semanlink.net/tag/acl_2021 +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|tag|http://www.semanlink.net/tag/entity_type_prediction +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_author|Hongliang Dai +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_author|Yangqiu Song +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_author|Haixun Wang +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|comment|"> we propose to obtain +training data for ultra-fine entity typing by using +a BERT Masked Language Model. Given a mention in a sentence, our approach +constructs an input for the BERT MLM so that +it predicts context dependent hypernyms of the +mention, which can be used as type labels + +Refers to [[1807.04905] Ultra-Fine Entity Typing](doc:2021/06/1807_04905_ultra_fine_entity_)" +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|relatedDoc|http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_ +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|title|[2106.04098] Ultra-Fine Entity Typing with Weak Supervision from a Masked Language Model +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|bookmarkOf|https://arxiv.org/abs/2106.04098 +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|creationTime|2021-06-16T11:26:44Z +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_summary|"Recently, there is an effort to extend fine-grained entity typing by using a +richer and ultra-fine set of types, and labeling noun phrases including +pronouns and nominal nouns instead of just named entity mentions. A key +challenge for this ultra-fine entity typing task is that human annotated data +are extremely scarce, and the annotation ability of existing distant or weak +supervision approaches is very limited. To remedy this problem, in this paper, +we propose to obtain training data for ultra-fine entity typing by using a BERT +Masked Language Model (MLM). Given a mention in a sentence, our approach +constructs an input for the BERT MLM so that it predicts context dependent +hypernyms of the mention, which can be used as type labels. Experimental +results demonstrate that, with the help of these automatically generated +labels, the performance of an ultra-fine entity typing model can be improved +substantially. We also show that our approach can be applied to improve +traditional fine-grained entity typing after performing simple type mapping." +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_firstAuthor|Hongliang Dai +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_updated|2021-06-08T04:43:28Z +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_title|Ultra-Fine Entity Typing with Weak Supervision from a Masked Language Model +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_published|2021-06-08T04:43:28Z +http://www.semanlink.net/doc/2021/06/2106_04098_ultra_fine_entity_|arxiv_num|2106.04098 +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|creationDate|2021-07-14 +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|tag|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|tag|http://www.semanlink.net/tag/google_ai_blog +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|comment|Semi-Supervised Distillation (SSD). First, the teacher model infers pseudo-labels on the unlabeled dataset from which we then train a new teacher model (T’) that is of equal-or-larger size than the original teacher model. This step, which is essentially self-training, is then followed by knowledge distillation to produce a smaller student model for production. +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|title|Google AI Blog: From Vision to Language: Semi-supervised Learning in Action…at Scale +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|bookmarkOf|https://ai.googleblog.com/2021/07/from-vision-to-language-semi-supervised.html +http://www.semanlink.net/doc/2021/07/google_ai_blog_from_vision_to_|creationTime|2021-07-14T23:34:40Z +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|creationDate|2021-09-19 +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|tag|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|tag|http://www.semanlink.net/tag/politique_et_environnement +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|tag|http://www.semanlink.net/tag/macron_et_l_ecologie +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|comment|> Le temps est venu... d’un « changement fondamental ». Certes, répond en substance la France... mais nous souhaitons malgré tout pouvoir continuer à enfreindre la loi pour permettre à une fraction de pourcent de nos concitoyens de s’adonner au plaisir de tuer des dizaines de milliers d’oiseaux en déclin. +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|title|« L’enjeu environnemental est désormais au cœur d’une rupture du pacte démocratique » +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|bookmarkOf|https://www.lemonde.fr/idees/article/2021/09/19/l-enjeu-environnemental-est-desormais-au-c-ur-d-une-rupture-du-pacte-democratique_6095186_3232.html +http://www.semanlink.net/doc/2021/09/%C2%AB_l%E2%80%99enjeu_environnemental_est_d|creationTime|2021-09-19T10:20:13Z +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|creationDate|2021-03-08 +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|tag|http://www.semanlink.net/tag/wikidata_browser +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|comment|[Try It](doc:2021/03/wikidata_browser) +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|relatedDoc|http://www.semanlink.net/doc/2021/03/wikidata_browser +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|title|ringgaard/sling: SLING - A natural language frame semantics parser +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|bookmarkOf|https://github.com/ringgaard/sling +http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu|creationTime|2021-03-08T08:20:52Z +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|creationDate|2020-06-26 +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Sahand Sharifzadeh +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Max Berrendorf +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Laurent Vermue +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Jens Lehmann +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Mehdi Ali +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Charles Tapley Hoyt +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Volker Tresp +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Asja Fischer +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_author|Mikhail Galkin +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|title|[2006.13365] Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|bookmarkOf|https://arxiv.org/abs/2006.13365 +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|creationTime|2020-06-26T16:33:57Z +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_summary|"The heterogeneity in recently published knowledge graph embedding models' +implementations, training, and evaluation has made fair and thorough +comparisons difficult. In order to assess the reproducibility of previously +published results, we re-implemented and evaluated 19 interaction models in the +PyKEEN software package. Here, we outline which results could be reproduced +with their reported hyper-parameters, which could only be reproduced with +alternate hyper-parameters, and which could not be reproduced at all as well as +provide insight as to why this might be the case. +We then performed a large-scale benchmarking on four datasets with several +thousands of experiments and 21,246 GPU hours of computation time. We present +insights gained as to best practices, best configurations for each model, and +where improvements could be made over previously published best configurations. +Our results highlight that the combination of model architecture, training +approach, loss function, and the explicit modeling of inverse relations is +crucial for a model's performances, and not only determined by the model +architecture. We provide evidence that several architectures can obtain results +competitive to the state-of-the-art when configured carefully. We have made all +code, experimental configurations, results, and analyses that lead to our +interpretations available at https://github.com/pykeen/pykeen and +https://github.com/pykeen/benchmarking" +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_firstAuthor|Mehdi Ali +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_updated|2020-06-23T22:30:52Z +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_title|Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_published|2020-06-23T22:30:52Z +http://www.semanlink.net/doc/2020/06/2006_13365_bringing_light_int|arxiv_num|2006.13365 +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|tag|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|tag|http://www.semanlink.net/tag/wikipedia2vec +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Hideaki Takeda +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Yuji Matsumoto +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Jin Sakuma +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Ikuya Yamada +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Akari Asai +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Yoshiyasu Takefuji +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_author|Hiroyuki Shindo +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|title|[1812.06280] Wikipedia2Vec: An Efficient Toolkit for Learning and Visualizing the Embeddings of Words and Entities from Wikipedia +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|bookmarkOf|https://arxiv.org/abs/1812.06280 +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|creationTime|2020-09-02T16:44:44Z +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_summary|"The embeddings of entities in a large knowledge base (e.g., Wikipedia) are +highly beneficial for solving various natural language tasks that involve real +world knowledge. In this paper, we present Wikipedia2Vec, a Python-based +open-source tool for learning the embeddings of words and entities from +Wikipedia. The proposed tool enables users to learn the embeddings efficiently +by issuing a single command with a Wikipedia dump file as an argument. We also +introduce a web-based demonstration of our tool that allows users to visualize +and explore the learned embeddings. In our experiments, our tool achieved a +state-of-the-art result on the KORE entity relatedness dataset, and competitive +results on various standard benchmark datasets. Furthermore, our tool has been +used as a key component in various recent studies. We publicize the source +code, demonstration, and the pretrained embeddings for 12 languages at +https://wikipedia2vec.github.io/." +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_firstAuthor|Ikuya Yamada +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_updated|2020-01-30T10:58:05Z +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_title|Wikipedia2Vec: An Efficient Toolkit for Learning and Visualizing the Embeddings of Words and Entities from Wikipedia +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_published|2018-12-15T12:51:39Z +http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_|arxiv_num|1812.06280 +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|tag|http://www.semanlink.net/tag/haoussa +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|title|HausaNLP Research Group +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|bookmarkOf|https://hausanlp.github.io/ +http://www.semanlink.net/doc/2021/06/hausanlp_research_group|creationTime|2021-06-30T00:24:43Z +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|creationDate|2020-07-06 +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|title|BERT Word Embeddings Tutorial · Chris McCormick +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|bookmarkOf|https://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/#why-bert-embeddings +http://www.semanlink.net/doc/2020/07/bert_word_embeddings_tutorial_%C2%B7|creationTime|2020-07-06T14:51:33Z +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|creationDate|2020-07-09 +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/neural_memory +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|tag|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_author|Livio Baldini Soares +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_author|Haitian Sun +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_author|William W. Cohen +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_author|Pat Verga +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|comment|"> a neural language model that includes **an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.**... **The model can be updated without re-training by manipulating its symbolic representations**. In particular this model allows us to add new facts and overwrite existing ones. + +> a **neural language model which learns to access information +in a symbolic knowledge graph.** + +> This +model builds on the recently-proposed [Entities as +Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), +which extends the same transformer (Vaswani +et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. +> +> After training EaE, the embedding associated +with an entity will (ideally) capture information +about the textual context in which that +entity appears, and by inference, the entity’s semantic +properties +> +> we include an additional +memory called a fact memory, which encodes +triples from a symbolic KB. +> +> This combination results in a +neural language model which learns to access information +in a the symbolic knowledge graph. + + + +TODO: + +- read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (""an effort to avoid encoding general knowledge in the transformer network itself"") +- compare with [[1907.05242] Large Memory Layers with Product Keys](doc:2019/07/_1907_05242_large_memory_layer) +- how does it relate with [[2002.08909] REALM: Retrieval-Augmented Language Model Pre-Training](doc:2020/12/2002_08909_realm_retrieval_a)?" +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|relatedDoc|http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|relatedDoc|http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|relatedDoc|http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|relatedDoc|http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|title|[2007.00849] Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|bookmarkOf|https://arxiv.org/abs/2007.00849 +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|creationTime|2020-07-09T23:54:59Z +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_summary|"Massive language models are the core of modern NLP modeling and have been +shown to encode impressive amounts of commonsense and factual information. +However, that knowledge exists only within the latent parameters of the model, +inaccessible to inspection and interpretation, and even worse, factual +information memorized from the training corpora is likely to become stale as +the world changes. Knowledge stored as parameters will also inevitably exhibit +all of the biases inherent in the source materials. To address these problems, +we develop a neural language model that includes an explicit interface between +symbolically interpretable factual information and subsymbolic neural +knowledge. We show that this model dramatically improves performance on two +knowledge-intensive question-answering tasks. More interestingly, the model can +be updated without re-training by manipulating its symbolic representations. In +particular this model allows us to add new facts and overwrite existing ones in +ways that are not possible for earlier models." +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_firstAuthor|Pat Verga +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_updated|2020-07-02T03:05:41Z +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_title|Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_published|2020-07-02T03:05:41Z +http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_|arxiv_num|2007.00849 +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|creationDate|2020-11-20 +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|tag|http://www.semanlink.net/tag/ai_startups +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|tag|http://www.semanlink.net/tag/france +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|title|20 French AI startups to watch Sifted +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|bookmarkOf|https://sifted.eu/articles/ai-startups-france/ +http://www.semanlink.net/doc/2020/11/20_french_ai_startups_to_watch_|creationTime|2020-11-20T09:48:21Z +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|creationDate|2021-09-12 +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|title|Modeling AI on the Language of Brain Circuits and Architecture Wu Tsai Neurosciences Institute +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|bookmarkOf|https://neuroscience.stanford.edu/news/modeling-ai-language-brain-circuits-and-architecture?sf150768207=1 +http://www.semanlink.net/doc/2021/09/modeling_ai_on_the_language_of_|creationTime|2021-09-12T23:47:24Z +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|creationDate|2021-01-06 +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|tag|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|tag|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|tag|http://www.semanlink.net/tag/openai +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|tag|http://www.semanlink.net/tag/natural_language_supervision +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|title|CLIP: Connecting Text and Images +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|bookmarkOf|https://openai.com/blog/clip/ +http://www.semanlink.net/doc/2021/01/clip_connecting_text_and_images|creationTime|2021-01-06T15:51:24Z +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|creationDate|2020-10-24 +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|tag|http://www.semanlink.net/tag/arte +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|title|Quand l'histoire fait dates - arte.tv +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|bookmarkOf|https://www.arte.tv/fr/videos/RC-015950/quand-l-histoire-fait-dates/ +http://www.semanlink.net/doc/2020/10/quand_l_histoire_fait_dates_a|creationTime|2020-10-24T15:07:41Z +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|creationDate|2021-01-10 +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|tag|http://www.semanlink.net/tag/nlp_amazon +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|tag|http://www.semanlink.net/tag/classification_relations_between_classes +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|comment|"> Challenges in extending BERT to the XMC problem: +- difficulty of capturing dependencies or correlations among labels +- tractability to scale to the extreme label setting because of the Softmax bottleneck scaling linearly with the output space. + +> X-BERT leverages both the label and input text to build label representations, which induces semantic label clusters to better model label dependencies. At the heart of X-BERT is a procedure to finetune BERT models to capture the contextual relations between input text and the induced label clusters. Finally, an ensemble of the different BERT models trained on heterogeneous label clusters leads to our best final mode" +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|title|X-BERT: eXtreme Multi-label Text Classification using Bidirectional Encoder Representations from Transformers +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|bookmarkOf|https://www.amazon.science/publications/x-bert-extreme-multi-label-text-classification-using-bidirectional-encoder-representations-from-transformers +http://www.semanlink.net/doc/2021/01/x_bert_extreme_multi_label_tex|creationTime|2021-01-10T19:23:20Z +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|creationDate|2020-10-16 +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|tag|http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|tag|http://www.semanlink.net/tag/france_bureaucratie +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|tag|http://www.semanlink.net/tag/france_fiasco_administratif +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|tag|http://www.semanlink.net/tag/declin_de_la_france +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|comment|"> « Le gouvernement était paumé, accablé d’incertitudes, rien ne marchait » + +> Plusieurs élus s’écharpent avec les autorités gouvernementales à +ce sujet : ils veulent tester les personnels asymptomatiques pour +protéger les résidents, mais l’Etat s’y oppose. « Le combat a duré quinze +jours, raconte Anne Hidalgo. C’est le pire souvenir de ma vie politique, un +bras de fer harassant. **Nous nous sommes heurtés à une bureaucratie +délirante qui, de façon fanatique, conçoit son rôle comme celui de +producteur de normes.** »" +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|title|« Nous avons assisté à l’effondrement de l’Etat » : des maires de grandes villes racontent les premiers mois de la pandémie - par Vanessa Schneider (Le Monde) +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|bookmarkOf|https://www.lemonde.fr/politique/article/2020/10/16/covid-19-chez-les-maires-le-virus-de-la-solidarite_6056220_823448.html +http://www.semanlink.net/doc/2020/10/%C2%AB_le_gouvernement_etait_paume_|creationTime|2020-10-16T19:44:05Z +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|creationDate|2020-07-05 +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|comment|> La gestion de crise, c’est un métier, on ne la laisse pas aux directeurs administratifs et financiers +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|title|Coronavirus : un rapport au vitriol des pompiers dénonce la gestion de la crise +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|bookmarkOf|https://www.lemonde.fr/planete/article/2020/07/05/coronavirus-un-rapport-au-vitriol-des-pompiers-denonce-la-gestion-de-la-crise_6045258_3244.html +http://www.semanlink.net/doc/2020/07/coronavirus_un_rapport_au_vit|creationTime|2020-07-05T18:58:56Z +http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_|creationDate|2021-05-05 +http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_|tag|http://www.semanlink.net/tag/heinrich_barth +http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_|title|Heinrich Barth and the Western Sudan +http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_|bookmarkOf|https://www.jstor.org/stable/1790783?seq=1 +http://www.semanlink.net/doc/2021/05/heinrich_barth_and_the_western_|creationTime|2021-05-05T10:30:25Z +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|creationDate|2020-12-14 +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|tag|http://www.semanlink.net/tag/responsabilite +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|tag|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|tag|http://www.semanlink.net/tag/punition_des_mechants +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|tag|http://www.semanlink.net/tag/ecocide +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|comment|> **La responsabilité des chefs d’entreprise et des dirigeants politiques qui, face à la crise climatique, prennent des décisions à l’encontre de l’intérêt général, doit être engagée**, estiment l’investisseur Bertrand Badré, l’écrivain Erik Orsenna et le psychiatre et entrepreneur Bertrand Piccard. +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|title|« Le devoir de toute société humaine est de se protéger contre les déviances de ceux qui détruisent la planète » +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|bookmarkOf|https://www.lemonde.fr/idees/article/2020/10/04/le-devoir-de-toute-societe-humaine-est-de-se-proteger-contre-les-deviances-de-ceux-qui-detruisent-la-planete_6054696_3232.html +http://www.semanlink.net/doc/2020/12/%C2%AB_le_devoir_de_toute_societe_hu|creationTime|2020-12-14T16:37:01Z +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|creationDate|2020-09-19 +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|tag|http://www.semanlink.net/tag/ai_ibm +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_author|Tian Gao +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_author|Pavan Kapanipathi +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_author|Zijun Cui +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_author|Qiang Ji +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_author|Kartik Talamadupula +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|title|[2009.07938] Type-augmented Relation Prediction in Knowledge Graphs +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|bookmarkOf|https://arxiv.org/abs/2009.07938 +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|creationTime|2020-09-19T10:00:31Z +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_summary|"Knowledge graphs (KGs) are of great importance to many real world +applications, but they generally suffer from incomplete information in the form +of missing relations between entities. Knowledge graph completion (also known +as relation prediction) is the task of inferring missing facts given existing +ones. Most of the existing work is proposed by maximizing the likelihood of +observed instance-level triples. Not much attention, however, is paid to the +ontological information, such as type information of entities and relations. In +this work, we propose a type-augmented relation prediction (TaRP) method, where +we apply both the type information and instance-level information for relation +prediction. In particular, type information and instance-level information are +encoded as prior probabilities and likelihoods of relations respectively, and +are combined by following Bayes' rule. Our proposed TaRP method achieves +significantly better performance than state-of-the-art methods on three +benchmark datasets: FB15K, YAGO26K-906, and DB111K-174. In addition, we show +that TaRP achieves significantly improved data efficiency. More importantly, +the type information extracted from a specific dataset can generalize well to +other datasets through the proposed TaRP model." +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_firstAuthor|Zijun Cui +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_updated|2020-09-16T21:14:18Z +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_title|Type-augmented Relation Prediction in Knowledge Graphs +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_published|2020-09-16T21:14:18Z +http://www.semanlink.net/doc/2020/09/2009_07938_type_augmented_rel|arxiv_num|2009.07938 +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|creationDate|2020-06-16 +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|tag|http://www.semanlink.net/tag/visually_rich_documents +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|tag|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|tag|http://www.semanlink.net/tag/alibaba +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|tag|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_author|Huasha Zhao +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_author|Feiyu Gao +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_author|Xiaojing Liu +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_author|Qiong Zhang +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|comment|"Problem addressed in this paper: extracting +the values of pre-defined entities from Visually Rich Documents (VRDs). + +> Graph embeddings are trained to +summarize the context of a text segment in the +document, and further combined with text embeddings +for entity extraction + +> Node embedding encodes textual +features, while edge embedding primarily represents +visual features + +> Graph convolution is applied to compute visual +text embeddings of text segments in the graph, +as shown in Figure 3. Different from existing +works, we define convolution on the node-edge-node +triplets instead of on the node +alone + +> In our model, graph convolution is defined +based on the self-attention mechanism. The idea is to compute the output hidden representation of +each node by attending to its neighbors + +> We combine graph embeddings with token embeddings +and feed them into standard BiLSTM-CRF +for entity extraction + +> We build an annotation system to facilitate the labeling +of the ground truth data." +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|title|[1903.11279] Graph Convolution for Multimodal Information Extraction from Visually Rich Documents +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|bookmarkOf|https://arxiv.org/abs/1903.11279 +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|creationTime|2020-06-16T09:27:40Z +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_summary|"Visually rich documents (VRDs) are ubiquitous in daily business and life. +Examples are purchase receipts, insurance policy documents, custom declaration +forms and so on. In VRDs, visual and layout information is critical for +document understanding, and texts in such documents cannot be serialized into +the one-dimensional sequence without losing information. Classic information +extraction models such as BiLSTM-CRF typically operate on text sequences and do +not incorporate visual features. In this paper, we introduce a graph +convolution based model to combine textual and visual information presented in +VRDs. Graph embeddings are trained to summarize the context of a text segment +in the document, and further combined with text embeddings for entity +extraction. Extensive experiments have been conducted to show that our method +outperforms BiLSTM-CRF baselines by significant margins, on two real-world +datasets. Additionally, ablation studies are also performed to evaluate the +effectiveness of each component of our model." +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_firstAuthor|Xiaojing Liu +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_updated|2019-03-27T07:47:12Z +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_title|Graph Convolution for Multimodal Information Extraction from Visually Rich Documents +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_published|2019-03-27T07:47:12Z +http://www.semanlink.net/doc/2020/06/1903_11279_graph_convolution_|arxiv_num|1903.11279 +http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran|creationDate|2020-10-23 +http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran|title|Construire un bon analyzer français pour Elasticsearch +http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran|bookmarkOf|https://jolicode.com/blog/construire-un-bon-analyzer-francais-pour-elasticsearch +http://www.semanlink.net/doc/2020/10/construire_un_bon_analyzer_fran|creationTime|2020-10-23T14:45:36Z +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|creationDate|2021-09-23 +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|tag|http://www.semanlink.net/tag/mlm +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|tag|http://www.semanlink.net/tag/akkadian_language +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|comment|[tweet](doc:2021/09/koren_lazar_sur_twitter_m) +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|relatedDoc|http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|title|[2109.04513] Filling the Gaps in Ancient Akkadian Texts: A Masked Language Modelling Approach +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|bookmarkOf|https://arxiv.org/abs/2109.04513 +http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i|creationTime|2021-09-23T10:56:10Z +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|creationDate|2021-05-25 +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|tag|http://www.semanlink.net/tag/multi_document_summarization +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|comment|> simultaneously cluster and summarize documents by making use of both the document-term and sentence-term matrices +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|title|Integrating Document Clustering and Multidocument Summarization +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|bookmarkOf|https://www.researchgate.net/publication/220345110_Integrating_Document_Clustering_and_Multidocument_Summarization +http://www.semanlink.net/doc/2021/05/integrating_document_clustering|creationTime|2021-05-25T18:12:00Z +http://www.semanlink.net/doc/2021/10/fastapi|creationDate|2021-10-04 +http://www.semanlink.net/doc/2021/10/fastapi|tag|http://www.semanlink.net/tag/python +http://www.semanlink.net/doc/2021/10/fastapi|tag|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/doc/2021/10/fastapi|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/10/fastapi|title|FastAPI +http://www.semanlink.net/doc/2021/10/fastapi|bookmarkOf|https://fastapi.tiangolo.com/ +http://www.semanlink.net/doc/2021/10/fastapi|creationTime|2021-10-04T16:36:44Z +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|creationDate|2021-05-17 +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|tag|http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_author|Jie Wang +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_author|Jianyu Cai +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_author|Yongdong Zhang +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_author|Zhanqiu Zhang +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|comment|"Models semantic hierarchies by mapping entities into the polar coordinate system +> Specifically, +the radial coordinate aims to model entities at different levels +of the hierarchy... the angular coordinate aims to distinguish +entities at the same level of the hierarchy, and these entities +are expected to have roughly the same radii but different +angles." +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|title|[1911.09419] Learning Hierarchy-Aware Knowledge Graph Embeddings for Link Prediction +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|bookmarkOf|https://arxiv.org/abs/1911.09419 +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|creationTime|2021-05-17T15:11:47Z +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_summary|"Knowledge graph embedding, which aims to represent entities and relations as +low dimensional vectors (or matrices, tensors, etc.), has been shown to be a +powerful technique for predicting missing links in knowledge graphs. Existing +knowledge graph embedding models mainly focus on modeling relation patterns +such as symmetry/antisymmetry, inversion, and composition. However, many +existing approaches fail to model semantic hierarchies, which are common in +real-world applications. To address this challenge, we propose a novel +knowledge graph embedding model---namely, Hierarchy-Aware Knowledge Graph +Embedding (HAKE)---which maps entities into the polar coordinate system. HAKE +is inspired by the fact that concentric circles in the polar coordinate system +can naturally reflect the hierarchy. Specifically, the radial coordinate aims +to model entities at different levels of the hierarchy, and entities with +smaller radii are expected to be at higher levels; the angular coordinate aims +to distinguish entities at the same level of the hierarchy, and these entities +are expected to have roughly the same radii but different angles. Experiments +demonstrate that HAKE can effectively model the semantic hierarchies in +knowledge graphs, and significantly outperforms existing state-of-the-art +methods on benchmark datasets for the link prediction task." +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_firstAuthor|Zhanqiu Zhang +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_updated|2019-12-25T12:31:40Z +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_title|Learning Hierarchy-Aware Knowledge Graph Embeddings for Link Prediction +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_published|2019-11-21T11:37:18Z +http://www.semanlink.net/doc/2021/05/1911_09419_learning_hierarchy|arxiv_num|1911.09419 +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|creationDate|2020-10-17 +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|title|Graph visualization library in JavaScript - Stack Overflow +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|bookmarkOf|https://stackoverflow.com/questions/7034/graph-visualization-library-in-javascript +http://www.semanlink.net/doc/2020/10/graph_visualization_library_in_|creationTime|2020-10-17T16:00:56Z +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|creationDate|2021-03-27 +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/monsanto +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/lobbies_economiques +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/bayer +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/mexique +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|tag|http://www.semanlink.net/tag/mais +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|title|Comment Bayer a fait pression sur le Mexique pour empêcher l’interdiction du glyphosate +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|bookmarkOf|https://www.lemonde.fr/planete/article/2021/03/27/comment-bayer-a-fait-pression-sur-le-mexique-pour-empecher-l-interdiction-du-glyphosate_6074679_3244.html +http://www.semanlink.net/doc/2021/03/comment_bayer_a_fait_pression_s|creationTime|2021-03-27T21:10:08Z +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|creationDate|2020-11-08 +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|tag|http://www.semanlink.net/tag/film_a_voir +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|comment|Howard Hawks, Cary Grant Rita, Hayworth +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|title|Only Angels Have Wings +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|bookmarkOf|https://en.wikipedia.org/wiki/Only_Angels_Have_Wings +http://www.semanlink.net/doc/2020/11/only_angels_have_wings|creationTime|2020-11-08T19:58:06Z +http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit|creationDate|2021-01-12 +http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit|tag|http://www.semanlink.net/tag/signal +http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit|title|Talk To Strangers on Signal With a Public Phone Number +http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit|bookmarkOf|https://theintercept.com/2017/09/28/signal-tutorial-second-phone-number/ +http://www.semanlink.net/doc/2021/01/talk_to_strangers_on_signal_wit|creationTime|2021-01-12T02:33:21Z +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|creationDate|2021-01-08 +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|tag|http://www.semanlink.net/tag/cnrs +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|tag|http://www.semanlink.net/tag/nlp_event +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|title|Journée  TAL grand public – GDR TAL +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|bookmarkOf|https://gdr-tal.ls2n.fr/event/700/ +http://www.semanlink.net/doc/2021/01/journee_tal_grand_public_gdr|creationTime|2021-01-08T19:04:27Z +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|creationDate|2021-08-02 +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|title|Deep Learning for AI July 2021 Communications of the ACM +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|bookmarkOf|https://cacm.acm.org/magazines/2021/7/253464-deep-learning-for-ai/fulltext +http://www.semanlink.net/doc/2021/08/deep_learning_for_ai_%7C_july_202|creationTime|2021-08-02T15:48:37Z +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|creationDate|2021-02-07 +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|comment|Use google BERT to do CoNLL-2003 NER ! +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|title|kamalkraj/BERT-NER: Pytorch-Named-Entity-Recognition-with-BERT +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|bookmarkOf|https://github.com/kamalkraj/BERT-NER +http://www.semanlink.net/doc/2021/02/kamalkraj_bert_ner_pytorch_nam|creationTime|2021-02-07T11:37:39Z +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|creationDate|2021-01-05 +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|tag|http://www.semanlink.net/tag/max_halford +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Saulo Martiello Mastelini +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Jesse Read +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Robin Vaysse +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Geoffrey Bolmier +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Jacob Montiel +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Albert Bifet +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Max Halford +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Heitor Murilo Gomes +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Adil Zouitine +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Raphael Sourty +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_author|Talel Abdessalem +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|comment|[Github](doc:2020/01/creme_ml_creme_online_machine_) +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|relatedDoc|http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_ +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|title|[2012.04740] River: machine learning for streaming data in Python +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|bookmarkOf|https://arxiv.org/abs/2012.04740 +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|creationTime|2021-01-05T16:15:12Z +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_summary|"River is a machine learning library for dynamic data streams and continual +learning. It provides multiple state-of-the-art learning methods, data +generators/transformers, performance metrics and evaluators for different +stream learning problems. It is the result from the merger of the two most +popular packages for stream learning in Python: Creme and scikit-multiflow. +River introduces a revamped architecture based on the lessons learnt from the +seminal packages. River's ambition is to be the go-to library for doing machine +learning on streaming data. Additionally, this open source package brings under +the same umbrella a large community of practitioners and researchers. The +source code is available at https://github.com/online-ml/river." +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_firstAuthor|Jacob Montiel +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_updated|2020-12-08T21:04:44Z +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_title|River: machine learning for streaming data in Python +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_published|2020-12-08T21:04:44Z +http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea|arxiv_num|2012.04740 +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|creationDate|2020-12-22 +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|tag|http://www.semanlink.net/tag/ml_nlp_blog +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|title|"elvis sur Twitter : ""Today I kept thinking about the machine learning / NLP / deep learning related blog posts (not papers) that have been transformational for me...""" +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|bookmarkOf|https://twitter.com/omarsar0/status/1341412257113268225?s=20 +http://www.semanlink.net/doc/2020/12/elvis_sur_twitter_today_i_ke|creationTime|2020-12-22T22:14:15Z +http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b|creationDate|2020-06-16 +http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b|title|AKBC 2020 Automated Knowledge Base Construction +http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b|bookmarkOf|https://www.akbc.ws/2020/ +http://www.semanlink.net/doc/2020/06/akbc_2020_automated_knowledge_b|creationTime|2020-06-16T13:50:56Z +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|creationDate|2021-05-13 +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|tag|http://www.semanlink.net/tag/target_sense_verification +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|tag|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|comment|[Refers to](doc:2021/05/ctlr_wic_tsv_target_sense_veri) +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|relatedDoc|http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|title|Is Word Sense Disambiguation outdated? by Anna Breit May, 2021 Medium +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|bookmarkOf|https://annabreit.medium.com/is-word-sense-disambiguation-outdated-ef05a139576 +http://www.semanlink.net/doc/2021/05/is_word_sense_disambiguation_ou|creationTime|2021-05-13T00:27:16Z +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|creationDate|2020-10-10 +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/guillaume_lample +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|comment|[This](doc:2019/07/_1907_05242_large_memory_layer) was last year +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|relatedDoc|http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|title|"Guillaume Lample sur Twitter : ""Last year, we showed that you can outperform a 24-layer transformer in language modeling with just..." +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|bookmarkOf|https://twitter.com/GuillaumeLample/status/1314597157694042113 +http://www.semanlink.net/doc/2020/10/guillaume_lample_sur_twitter_|creationTime|2020-10-10T03:04:51Z +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|creationDate|2021-06-22 +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|tag|http://www.semanlink.net/tag/acl_2018 +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|tag|http://www.semanlink.net/tag/entity_type_prediction +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_author|Yejin Choi +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_author|Omer Levy +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_author|Eunsol Choi +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|comment|"> a new entity typing task: +given a sentence with an entity mention, +the goal is to predict a set of free-form +phrases (e.g. skyscraper, songwriter, or +criminal) that describe appropriate types +for the target entity" +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|title|[1807.04905] Ultra-Fine Entity Typing +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|bookmarkOf|https://arxiv.org/abs/1807.04905 +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|creationTime|2021-06-22T10:50:58Z +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_summary|"We introduce a new entity typing task: given a sentence with an entity +mention, the goal is to predict a set of free-form phrases (e.g. skyscraper, +songwriter, or criminal) that describe appropriate types for the target entity. +This formulation allows us to use a new type of distant supervision at large +scale: head words, which indicate the type of the noun phrases they appear in. +We show that these ultra-fine types can be crowd-sourced, and introduce new +evaluation sets that are much more diverse and fine-grained than existing +benchmarks. We present a model that can predict open types, and is trained +using a multitask objective that pools our new head-word supervision with prior +supervision from entity linking. Experimental results demonstrate that our +model is effective in predicting entity types at varying granularity; it +achieves state of the art performance on an existing fine-grained entity typing +benchmark, and sets baselines for our newly-introduced datasets. Our data and +model can be downloaded from: http://nlp.cs.washington.edu/entity_type" +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_firstAuthor|Eunsol Choi +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_updated|2018-07-13T04:19:03Z +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_title|Ultra-Fine Entity Typing +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_published|2018-07-13T04:19:03Z +http://www.semanlink.net/doc/2021/06/1807_04905_ultra_fine_entity_|arxiv_num|1807.04905 +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|creationDate|2021-05-26 +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|tag|http://www.semanlink.net/tag/conceptual_clustering +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|title|A review of conceptual clustering algorithms +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|bookmarkOf|https://www.researchgate.net/publication/323968244_A_review_of_conceptual_clustering_algorithms +http://www.semanlink.net/doc/2021/05/a_review_of_conceptual_clusteri|creationTime|2021-05-26T00:55:55Z +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|creationDate|2021-10-13 +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|comment|"Refers to: + +- [[2002.10640] Differentiable Reasoning over a Virtual Knowledge Base](doc:2020/07/2002_10640_differentiable_rea) +- [[2004.07202] Entities as Experts: Sparse Memory Access with Entity Supervision](doc:2020/07/2004_07202_entities_as_expert) +- [[2002.08909] REALM: Retrieval-Augmented Language Model Pre-Training](doc:2020/12/2002_08909_realm_retrieval_a) +" +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|relatedDoc|http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|relatedDoc|http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|relatedDoc|http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|title|[2110.06176] Mention Memory: incorporating textual knowledge into Transformers through entity mention attention +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|bookmarkOf|https://arxiv.org/abs/2110.06176 +http://www.semanlink.net/doc/2021/10/2110_06176_mention_memory_in|creationTime|2021-10-13T15:55:04Z +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|creationDate|2020-09-30 +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|tag|http://www.semanlink.net/tag/comedie_policiere +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|tag|http://www.semanlink.net/tag/humour_noir +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|tag|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|comment|Film de Sydney Lumet, avec Michael Caine et Christopher Reeve +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|title|Deathtrap (film) +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|bookmarkOf|https://en.wikipedia.org/wiki/Deathtrap_(film) +http://www.semanlink.net/doc/2020/09/deathtrap_film_wikipedia|creationTime|2020-09-30T21:23:04Z +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|creationDate|2021-06-22 +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|tag|http://www.semanlink.net/tag/entity_type_representation +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Livio Baldini Soares +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Tom Kwiatkowski +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Jeffrey Ling +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|David Weiss +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Thibault Févry +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Nicholas FitzGerald +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_author|Zifei Shan +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|title|[2001.03765] Learning Cross-Context Entity Representations from Text +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|bookmarkOf|https://arxiv.org/abs/2001.03765 +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|creationTime|2021-06-22T13:42:19Z +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_summary|"Language modeling tasks, in which words, or word-pieces, are predicted on the +basis of a local context, have been very effective for learning word embeddings +and context dependent representations of phrases. Motivated by the observation +that efforts to code world knowledge into machine readable knowledge bases or +human readable encyclopedias tend to be entity-centric, we investigate the use +of a fill-in-the-blank task to learn context independent representations of +entities from the text contexts in which those entities were mentioned. We show +that large scale training of neural models allows us to learn high quality +entity representations, and we demonstrate successful results on four domains: +(1) existing entity-level typing benchmarks, including a 64% error reduction +over previous work on TypeNet (Murty et al., 2018); (2) a novel few-shot +category reconstruction task; (3) existing entity linking benchmarks, where we +match the state-of-the-art on CoNLL-Aida without linking-specific features and +obtain a score of 89.8% on TAC-KBP 2010 without using any alias table, external +knowledge base or in domain training data and (4) answering trivia questions, +which uniquely identify entities. Our global entity representations encode +fine-grained type categories, such as Scottish footballers, and can answer +trivia questions such as: Who was the last inmate of Spandau jail in Berlin?" +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_firstAuthor|Jeffrey Ling +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_updated|2020-01-11T15:30:56Z +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_title|Learning Cross-Context Entity Representations from Text +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_published|2020-01-11T15:30:56Z +http://www.semanlink.net/doc/2021/06/2001_03765_learning_cross_con|arxiv_num|2001.03765 +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|creationDate|2021-08-18 +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|tag|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|comment|> Implementation of information extraction pipeline that includes coreference resolution, entity linking, and relationship extraction techniques. +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|title|From Text to Knowledge: The Information Extraction Pipeline by Tomaz Bratanic Towards Data Science +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|bookmarkOf|https://towardsdatascience.com/from-text-to-knowledge-the-information-extraction-pipeline-b65e7e30273e +http://www.semanlink.net/doc/2021/08/from_text_to_knowledge_the_inf|creationTime|2021-08-18T16:38:54Z +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|creationDate|2020-11-19 +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|tag|http://www.semanlink.net/tag/browserify +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|tag|http://www.semanlink.net/tag/node_js +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|tag|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|tag|http://www.semanlink.net/tag/stackoverflow_q +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|comment|see answer by Galen Long - it works +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|title|node.js - Browserify - How to call function bundled in a file generated through browserify in browser - Stack Overflow +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|bookmarkOf|https://stackoverflow.com/questions/23296094/browserify-how-to-call-function-bundled-in-a-file-generated-through-browserify +http://www.semanlink.net/doc/2020/11/node_js_browserify_how_to_c|creationTime|2020-11-19T16:23:32Z +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|creationDate|2021-09-29 +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|tag|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|tag|http://www.semanlink.net/tag/language_models_size +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_author|Rabeeh Karimi Mahabadi +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_author|Sebastian Ruder +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_author|James Henderson +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|comment|> Compacter (Compact Adapter) layers, a method to adapt large-scale language models, which only trains around 0.05% of a model's parameters and performs on par with fine-tuning. [twitter](https://twitter.com/KarimiRabeeh/status/1404774464441794560) +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|title|[2106.04647] Compacter: Efficient Low-Rank Hypercomplex Adapter Layers +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|bookmarkOf|https://arxiv.org/abs/2106.04647 +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|creationTime|2021-09-29T02:05:29Z +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_summary|"Adapting large-scale pretrained language models to downstream tasks via +fine-tuning is the standard method for achieving state-of-the-art performance +on NLP benchmarks. However, fine-tuning all weights of models with millions or +billions of parameters is sample-inefficient, unstable in low-resource +settings, and wasteful as it requires storing a separate copy of the model for +each task. Recent work has developed parameter-efficient fine-tuning methods, +but these approaches either still require a relatively large number of +parameters or underperform standard fine-tuning. In this work, we propose +Compacter, a method for fine-tuning large-scale language models with a better +trade-off between task performance and the number of trainable parameters than +prior work. Compacter accomplishes this by building on top of ideas from +adapters, low-rank optimization, and parameterized hypercomplex multiplication +layers. +Specifically, Compacter inserts task-specific weight matrices into a +pretrained model's weights, which are computed efficiently as a sum of +Kronecker products between shared ``slow'' weights and ``fast'' rank-one +matrices defined per Compacter layer. By only training 0.047% of a pretrained +model's parameters, Compacter performs on par with standard fine-tuning on GLUE +and outperforms fine-tuning in low-resource settings. Our code is publicly +available in https://github.com/rabeehk/compacter/" +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_firstAuthor|Rabeeh Karimi Mahabadi +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_updated|2021-06-08T19:17:04Z +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_title|Compacter: Efficient Low-Rank Hypercomplex Adapter Layers +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_published|2021-06-08T19:17:04Z +http://www.semanlink.net/doc/2021/09/2106_04647_compacter_efficie|arxiv_num|2106.04647 +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|creationDate|2021-05-10 +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|tag|http://www.semanlink.net/tag/firefox +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|tag|http://www.semanlink.net/tag/apple_sucks +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|tag|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|tag|http://www.semanlink.net/tag/ios +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|title|"Alex Russell sur Twitter : ""If you install Firefox on Windows, MacOS, Linux, ChromeOS, or Android you get *real* Firefox, complete with the Gecko engine. But not on iOS. Apple cripples engine competition in silent, deeply impactful ways."" / Twitter" +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|bookmarkOf|https://twitter.com/slightlylate/status/1176864076716269568?s=20 +http://www.semanlink.net/doc/2021/05/alex_russell_sur_twitter_if_|creationTime|2021-05-10T23:30:43Z +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|creationDate|2020-06-29 +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|tag|http://www.semanlink.net/tag/reformer +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|title|"Patrick von Platen sur Twitter : ""Today, @huggingface is the start of our Reformer series...""" +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|bookmarkOf|https://twitter.com/PatrickPlaten/status/1277593786147897345?s=20 +http://www.semanlink.net/doc/2020/06/patrick_von_platen_sur_twitter_|creationTime|2020-06-29T19:07:30Z +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|creationDate|2021-04-15 +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|tag|http://www.semanlink.net/tag/recherche_francaise +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|tag|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|title|100 000 morts du Covid-19 en France : ferons-nous en sorte que le monde d’après ne permette plus une telle tragédie ? +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|bookmarkOf|https://www.lemonde.fr/planete/article/2021/04/15/100-000-morts-du-covid-19-en-france-aurait-il-pu-en-etre-autrement_6076818_3244.html +http://www.semanlink.net/doc/2021/04/100_000_morts_du_covid_19_en_fr|creationTime|2021-04-15T21:48:59Z +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|creationDate|2021-01-07 +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|tag|http://www.semanlink.net/tag/mexique +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|tag|http://www.semanlink.net/tag/mais_ogm +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|title|Précurseur en Amérique latine, le Mexique bannit le maïs génétiquement modifié et le glyphosate +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|bookmarkOf|https://www.lemonde.fr/planete/article/2021/01/07/precurseur-en-amerique-latine-le-mexique-bannit-le-mais-genetiquement-modifie-et-le-glyphosate_6065495_3244.html +http://www.semanlink.net/doc/2021/01/precurseur_en_amerique_latine_|creationTime|2021-01-07T17:53:18Z +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|creationDate|2020-09-16 +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|tag|http://www.semanlink.net/tag/scikit_learn +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|title|scikit-multilearn: Multi-Label Classification in Python +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|bookmarkOf|http://scikit.ml/index.html +http://www.semanlink.net/doc/2020/09/scikit_multilearn_multi_label_|creationTime|2020-09-16T18:21:54Z +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|creationDate|2021-05-26 +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|tag|http://www.semanlink.net/tag/google_colab +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|comment|> One of the easiest ways to get started with neural networks is by loading pre-trained neural networks through the HuggingFace Transformers pipeline interface +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|title|Transformers Pipelines.ipynb - Colaboratory +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|bookmarkOf|https://colab.research.google.com/drive/1OADpkjDCJT4JSrC6ZI2zZ1j0PG2BWUmd?usp=sharing +http://www.semanlink.net/doc/2021/05/transformers_pipelines_ipynb_|creationTime|2021-05-26T12:13:33Z +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|creationDate|2020-12-19 +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|tag|http://www.semanlink.net/tag/dark_side_of_tech +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|tag|http://www.semanlink.net/tag/gafa +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|tag|http://www.semanlink.net/tag/surveillance_capitalism +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|tag|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|tag|http://www.semanlink.net/tag/techlash +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|comment|> Après l’annonce du départ de Palantir, les étudiants de Stanford ont célébré leur victoire sur Instagram, Facebook ou TikTok. Puis, le soir même, certains ont probablement regardé une série Netflix, commandé un plat sur Uber Eats ou se sont offert un cadeau sur Amazon. Il fallait bien fêter ça. +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|title|Exploitation des données, manipulation de l’opinion, culte du secret… La trahison des GAFA +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|bookmarkOf|https://www.lemonde.fr/m-le-mag/article/2020/12/18/exploitation-des-donnees-manipulation-de-l-opinion-culte-du-secret-la-trahison-des-gafa_6063878_4500055.html +http://www.semanlink.net/doc/2020/12/exploitation_des_donnees_manip|creationTime|2020-12-19T13:46:16Z +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|creationDate|2020-10-11 +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Meysam Chenaghlu +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Erik Cambria +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Narjes Nikzad +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Nal Kalchbrenner +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Jianfeng Gao +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_author|Shervin Minaee +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|title|[2004.03705] Deep Learning Based Text Classification: A Comprehensive Review +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|bookmarkOf|https://arxiv.org/abs/2004.03705 +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|creationTime|2020-10-11T01:16:13Z +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_summary|"Deep learning based models have surpassed classical machine learning based +approaches in various text classification tasks, including sentiment analysis, +news categorization, question answering, and natural language inference. In +this work, we provide a detailed review of more than 150 deep learning based +models for text classification developed in recent years, and discuss their +technical contributions, similarities, and strengths. We also provide a summary +of more than 40 popular datasets widely used for text classification. Finally, +we provide a quantitative analysis of the performance of different deep +learning models on popular benchmarks, and discuss future research directions." +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_firstAuthor|Shervin Minaee +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_updated|2020-04-06T02:00:30Z +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_title|Deep Learning Based Text Classification: A Comprehensive Review +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_published|2020-04-06T02:00:30Z +http://www.semanlink.net/doc/2020/10/2004_03705_deep_learning_base|arxiv_num|2004.03705 +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|creationDate|2021-09-01 +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|tag|http://www.semanlink.net/tag/emnlp_2021 +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|tag|http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_author|Iryna Gurevych +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_author|Kexin Wang +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_author|Nils Reimers +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|comment|"> The most +successful previous approaches like InferSent (Conneau +et al., 2017), Universial Sentence Encoder +(USE) (Cer et al., 2018) and SBERT (Reimers and +Gurevych, 2019) heavily relied on labeled data to +train sentence embedding models. +> +> TSDAE can +achieve up to 93.1% of the performance of indomain +supervised approaches. Further, we +show that TSDAE is **a strong domain adaptation +and pre-training method for sentence +embeddings**, significantly outperforming other +approaches like Masked Language Model. + +> During training, TSDAE +encodes corrupted sentences into fixed-sized +vectors and requires the decoder to reconstruct the +original sentences from this sentence embedding. + +- +- [github](https://github.com/UKPLab/sentence-transformers/tree/master/examples/unsupervised_learning/TSDAE) +- [UKPLab/sentence-transformers: Sentence Embeddings with BERT & XLNet](doc:2020/07/ukplab_sentence_transformers_s) +- [twitter](https://twitter.com/KexinWang2049/status/1433361957579538432): + +> **TSDAE can learn domain-specific sentence embeddings with unlabeled sentences** +> +> Most importantly, instead of STS, **we suggest evaluating unsupervised sentence embeddings on the domain-specific tasks&datasets, which is the real use case for them**. Actually, STS scores do not correlate with performance on specific tasks. + + + +" +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|relatedDoc|http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|title|[2104.06979] TSDAE: Using Transformer-based Sequential Denoising Auto-Encoder for Unsupervised Sentence Embedding Learning +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|bookmarkOf|https://arxiv.org/abs/2104.06979 +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|creationTime|2021-09-01T16:43:01Z +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_summary|"Learning sentence embeddings often requires a large amount of labeled data. +However, for most tasks and domains, labeled data is seldom available and +creating it is expensive. In this work, we present a new state-of-the-art +unsupervised method based on pre-trained Transformers and Sequential Denoising +Auto-Encoder (TSDAE) which outperforms previous approaches by up to 6.4 points. +It can achieve up to 93.1% of the performance of in-domain supervised +approaches. Further, we show that TSDAE is a strong domain adaptation and +pre-training method for sentence embeddings, significantly outperforming other +approaches like Masked Language Model. +A crucial shortcoming of previous studies is the narrow evaluation: Most work +mainly evaluates on the single task of Semantic Textual Similarity (STS), which +does not require any domain knowledge. It is unclear if these proposed methods +generalize to other domains and tasks. We fill this gap and evaluate TSDAE and +other recent approaches on four different datasets from heterogeneous domains." +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_firstAuthor|Kexin Wang +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_updated|2021-08-30T18:23:40Z +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_title|TSDAE: Using Transformer-based Sequential Denoising Auto-Encoder for Unsupervised Sentence Embedding Learning +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_published|2021-04-14T17:02:18Z +http://www.semanlink.net/doc/2021/09/2104_06979_tsdae_using_trans|arxiv_num|2104.06979 +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|creationDate|2020-06-27 +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|comment|"> we here investigated whether state-of-the-art ANN language models (e.g. Devlin et al., 2018; Pennington et al., 2014; Radford et al., 2019) capture human brain activity elicited during language comprehension. +> ... +> **These results support the hypothesis that a drive to predict future inputs may shape human language processing, and perhaps the way knowledge of language is learned and organized in the brain**. In addition, the finding of strong correspondences between ANNs and human representations opens **the door to using the growing suite of tools for neural network interpretation to test hypotheses about the human mind.**" +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|title|Artificial Neural Networks Accurately Predict Language Processing in the Brain bioRxiv +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|bookmarkOf|https://www.biorxiv.org/content/10.1101/2020.06.26.174482v1#disqus_thread +http://www.semanlink.net/doc/2020/06/artificial_neural_networks_accu|creationTime|2020-06-27T20:16:57Z +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|creationDate|2020-07-27 +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|tag|http://www.semanlink.net/tag/markov +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|title|"Federico Errica 🇮🇹🇪🇺 sur Twitter : ""Our “#Probabilistic #Learning on #Graphs via Contextual Architectures”...""" +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|bookmarkOf|https://twitter.com/federico_errica/status/1287689141350797312 +http://www.semanlink.net/doc/2020/07/federico_errica_%F0%9F%87%AE%F0%9F%87%B9%F0%9F%87%AA%F0%9F%87%BA_sur_tw|creationTime|2020-07-27T14:02:01Z +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|creationDate|2020-10-30 +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|title|Knowledge Graphs: An Information Retrieval Perspective +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|bookmarkOf|https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/reinanda-2020-knowledge.pdf +http://www.semanlink.net/doc/2020/10/knowledge_graphs_an_informatio|creationTime|2020-10-30T23:06:35Z +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|creationDate|2021-07-09 +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|tag|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|comment|Related to [UKPLab/beir: A Heterogeneous Benchmark for Information Retrieval.](doc:2021/07/ukplab_beir_a_heterogeneous_be) and [[2106.00882] Efficient Passage Retrieval with Hashing for Open-domain Question Answering](doc:2021/06/2106_00882_efficient_passage_) +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|relatedDoc|http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_ +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|relatedDoc|http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|title|"Nandan Thakur sur Twitter : ""@ikuyamada @Nils_Reimers Thanks @ikuyamad...""" +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|bookmarkOf|https://twitter.com/Nthakur20/status/1413179841428566016 +http://www.semanlink.net/doc/2021/07/nandan_thakur_sur_twitter_i|creationTime|2021-07-09T12:32:10Z +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|creationDate|2020-08-12 +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|tag|http://www.semanlink.net/tag/code +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|tag|http://www.semanlink.net/tag/manik_varma +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|tag|http://www.semanlink.net/tag/benchmark +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|comment|"benchmark datasets, metrics, results and code that can be used for evaluating the performance of extreme multi-label algorithms. + +[Related blog post](doc:2020/08/everything_you_always_wanted_to)" +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|relatedDoc|http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|title|The Extreme Classification Repository +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|bookmarkOf|http://manikvarma.org/downloads/XC/XMLRepository.html +http://www.semanlink.net/doc/2020/08/the_extreme_classification_repo|creationTime|2020-08-12T01:10:51Z +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|creationDate|2021-02-24 +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|comment|"> Snorkel has some limitations: +> - Assumes all data points are i.i.d +> - Cannot take into account “probabilistic” labels +> +> Not well suited for sequence labelling tasks such as Named Entity Recognition (consecutive words in a sentence are not i.i.d.!) +> We have developed a novel weak supervision approach tailored to NER and other sequence labelling tasks + +[Paper](doc:2021/02/named_entity_recognition_withou)" +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|relatedDoc|http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|title|Named Entity Recognition without Labelled Data: A Weak Supervision Approach (2020) (slides) +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|bookmarkOf|https://www.nr.no/~plison/pdfs/talks/nfkf2020.pdf +http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l|creationTime|2021-02-24T15:57:08Z +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|creationDate|2020-09-24 +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Hengshu Zhu +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Hui Xiong +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Qing He +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Keyu Duan +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Dongbo Xi +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Yongchun Zhu +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Fuzhen Zhuang +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_author|Zhiyuan Qi +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|title|[1911.02685] A Comprehensive Survey on Transfer Learning +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|bookmarkOf|https://arxiv.org/abs/1911.02685 +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|creationTime|2020-09-24T18:41:06Z +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_summary|"Transfer learning aims at improving the performance of target learners on +target domains by transferring the knowledge contained in different but related +source domains. In this way, the dependence on a large number of target domain +data can be reduced for constructing target learners. Due to the wide +application prospects, transfer learning has become a popular and promising +area in machine learning. Although there are already some valuable and +impressive surveys on transfer learning, these surveys introduce approaches in +a relatively isolated way and lack the recent advances in transfer learning. +Due to the rapid expansion of the transfer learning area, it is both necessary +and challenging to comprehensively review the relevant studies. This survey +attempts to connect and systematize the existing transfer learning researches, +as well as to summarize and interpret the mechanisms and the strategies of +transfer learning in a comprehensive way, which may help readers have a better +understanding of the current research status and ideas. Unlike previous +surveys, this survey paper reviews more than forty representative transfer +learning approaches, especially homogeneous transfer learning approaches, from +the perspectives of data and model. The applications of transfer learning are +also briefly introduced. In order to show the performance of different transfer +learning models, over twenty representative transfer learning models are used +for experiments. The models are performed on three different datasets, i.e., +Amazon Reviews, Reuters-21578, and Office-31. And the experimental results +demonstrate the importance of selecting appropriate transfer learning models +for different applications in practice." +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_firstAuthor|Fuzhen Zhuang +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_updated|2020-06-23T15:52:46Z +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_title|A Comprehensive Survey on Transfer Learning +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_published|2019-11-07T00:15:02Z +http://www.semanlink.net/doc/2020/09/1911_02685_a_comprehensive_su|arxiv_num|1911.02685 +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|creationDate|2020-07-28 +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_author|Zhiqing Sun +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_author|Yiming Yang +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_author|Soumya Sanyal +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_author|Shikhar Vashishth +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_author|Partha Talukdar +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|title|[1911.03903] A Re-evaluation of Knowledge Graph Completion Methods +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|bookmarkOf|https://arxiv.org/abs/1911.03903 +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|creationTime|2020-07-28T11:27:26Z +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_summary|"Knowledge Graph Completion (KGC) aims at automatically predicting missing +links for large-scale knowledge graphs. A vast number of state-of-the-art KGC +techniques have got published at top conferences in several research fields, +including data mining, machine learning, and natural language processing. +However, we notice that several recent papers report very high performance, +which largely outperforms previous state-of-the-art methods. In this paper, we +find that this can be attributed to the inappropriate evaluation protocol used +by them and propose a simple evaluation protocol to address this problem. The +proposed protocol is robust to handle bias in the model, which can +substantially affect the final results. We conduct extensive experiments and +report the performance of several existing methods using our protocol. The +reproducible code has been made publicly available" +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_firstAuthor|Zhiqing Sun +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_updated|2020-07-08T19:32:34Z +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_title|A Re-evaluation of Knowledge Graph Completion Methods +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_published|2019-11-10T11:19:08Z +http://www.semanlink.net/doc/2020/07/1911_03903_a_re_evaluation_of|arxiv_num|1911.03903 +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|creationDate|2020-08-12 +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|title|"Hugging Face sur Twitter : ""No labeled data? No problem. The 🤗 Transformers master branch now includes a built-in pipeline for zero-shot text classification..." +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|bookmarkOf|https://twitter.com/huggingface/status/1293240692924452864 +http://www.semanlink.net/doc/2020/08/hugging_face_sur_twitter_no_|creationTime|2020-08-12T17:02:34Z +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|creationDate|2020-10-07 +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|tag|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|tag|http://www.semanlink.net/tag/owl2vec +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|tag|http://www.semanlink.net/tag/owl +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|title|"Chris Mungall sur Twitter : ""Reading: OWL2Vec*: Embedding of OWL Ontologies""" +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|bookmarkOf|https://twitter.com/chrismungall/status/1313296287861600256 +http://www.semanlink.net/doc/2020/10/chris_mungall_sur_twitter_re|creationTime|2020-10-07T08:36:28Z +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|creationDate|2020-11-14 +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|tag|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|tag|http://www.semanlink.net/tag/diffbot +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|comment|No more wasted time tracking down data Search or extract anything on the web. Diffbot uses machine learning to transform the internet into accessible, structured data. +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|title|Knowledge Graph, AI Web Data Extraction and Crawling Diffbot +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|bookmarkOf|https://www.diffbot.com/ +http://www.semanlink.net/doc/2020/11/knowledge_graph_ai_web_data_ex|creationTime|2020-11-14T08:52:59Z +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|creationDate|2021-03-27 +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|tag|http://www.semanlink.net/tag/mobilite +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|title|L’hydrogène, une solution incertaine pour la mobilité +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|bookmarkOf|https://www.lemonde.fr/economie/article/2021/03/27/l-hydrogene-une-solution-incertaine-pour-la-mobilite_6074676_3234.html +http://www.semanlink.net/doc/2021/03/l%E2%80%99hydrogene_une_solution_incer|creationTime|2021-03-27T14:39:20Z +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|creationDate|2021-08-02 +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|tag|http://www.semanlink.net/tag/sentiment_analysis +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|tag|http://www.semanlink.net/tag/intent_classification_and_slot_filling +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|tag|http://www.semanlink.net/tag/graph_parsing +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|title|Agostina Calabrese sur Twitter : Structured Sentiment Analysis as Dependency Graph Parsing +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|bookmarkOf|https://twitter.com/agostina_cal/status/1421795350252335111 +http://www.semanlink.net/doc/2021/08/agostina_calabrese_sur_twitter_|creationTime|2021-08-02T08:22:05Z +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|creationDate|2020-09-06 +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_author|Rohit Babbar +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_author|Bernhard Shoelkopf +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|comment|(WSDM 2017) [Code](https://sites.google.com/site/rohitbabbar/dismec) on author's site (several papers related to XClassification) +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|title|[1609.02521] DiSMEC - Distributed Sparse Machines for Extreme Multi-label Classification +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|bookmarkOf|https://arxiv.org/abs/1609.02521 +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|creationTime|2020-09-06T10:57:36Z +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_summary|"Extreme multi-label classification refers to supervised multi-label learning +involving hundreds of thousands or even millions of labels. Datasets in extreme +classification exhibit fit to power-law distribution, i.e. a large fraction of +labels have very few positive instances in the data distribution. Most +state-of-the-art approaches for extreme multi-label classification attempt to +capture correlation among labels by embedding the label matrix to a +low-dimensional linear sub-space. However, in the presence of power-law +distributed extremely large and diverse label spaces, structural assumptions +such as low rank can be easily violated. +In this work, we present DiSMEC, which is a large-scale distributed framework +for learning one-versus-rest linear classifiers coupled with explicit capacity +control to control model size. Unlike most state-of-the-art methods, DiSMEC +does not make any low rank assumptions on the label matrix. Using double layer +of parallelization, DiSMEC can learn classifiers for datasets consisting +hundreds of thousands labels within few hours. The explicit capacity control +mechanism filters out spurious parameters which keep the model compact in size, +without losing prediction accuracy. We conduct extensive empirical evaluation +on publicly available real-world datasets consisting upto 670,000 labels. We +compare DiSMEC with recent state-of-the-art approaches, including - SLEEC which +is a leading approach for learning sparse local embeddings, and FastXML which +is a tree-based approach optimizing ranking based loss function. On some of the +datasets, DiSMEC can significantly boost prediction accuracies - 10% better +compared to SLECC and 15% better compared to FastXML, in absolute terms." +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_firstAuthor|Rohit Babbar +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_updated|2016-09-08T18:17:25Z +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_title|DiSMEC - Distributed Sparse Machines for Extreme Multi-label Classification +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_published|2016-09-08T18:17:25Z +http://www.semanlink.net/doc/2020/09/1609_02521_dismec_distribut|arxiv_num|1609.02521 +http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea|creationDate|2021-07-26 +http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea|tag|http://www.semanlink.net/tag/catastrophic_forgetting +http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea|title|Une solution inspirée du cerveau pour éviter l’oubli catastrophique des IA INSIS +http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea|bookmarkOf|https://insis.cnrs.fr/fr/cnrsinfo/une-solution-inspiree-du-cerveau-pour-eviter-loubli-catastrophique-des-ia +http://www.semanlink.net/doc/2021/07/une_solution_inspiree_du_cervea|creationTime|2021-07-26T15:54:47Z +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|creationDate|2020-10-07 +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|tag|http://www.semanlink.net/tag/emmanuelle_charpentier +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|tag|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|title|Le prix Nobel de chimie décerné à la Française Emmanuelle Charpentier et l’Américaine Jennifer Doudna pour les « ciseaux moléculaires » +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/10/07/le-prix-nobel-de-chimie-a-la-francaise-emmanuelle-charpentier-et-l-americaine-jennifer-doudna-pour-les-ciseaux-moleculaires_6055125_1650684.html +http://www.semanlink.net/doc/2020/10/le_prix_nobel_de_chimie_decerne|creationTime|2020-10-07T15:48:25Z +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|creationDate|2021-07-18 +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|tag|http://www.semanlink.net/tag/nso_pegasus +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|tag|http://www.semanlink.net/tag/espionnage +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|title|« Projet Pegasus » : révélations sur un système mondial d’espionnage de téléphones +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|bookmarkOf|https://www.lemonde.fr/projet-pegasus/article/2021/07/18/projet-pegasus-revelations-sur-un-systeme-mondial-d-espionnage-de-telephones_6088652_6088648.html +http://www.semanlink.net/doc/2021/07/%C2%AB_projet_pegasus_%C2%BB_revelation|creationTime|2021-07-18T23:15:09Z +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|creationDate|2021-05-03 +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|tag|http://www.semanlink.net/tag/entailment +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|tag|http://www.semanlink.net/tag/data_augmentation +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|tag|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_author|Madian Khabsa +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_author|Sinong Wang +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_author|Han Fang +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_author|Hao Ma +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_author|Hanzi Mao +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|comment|"> a new approach, named as EFL, that can turn small LMs into better few-shot learners. The key idea of this approach is to reformulate potential NLP task into an entailment one, and then fine-tune the model with as little as 8 examples +> +> For instance, we can reformulate a sentiment classification task as a textual entailment one +with an input sentence S1 as +xin = [CLS]S1[SEP]S2[EOS]; where S2 = This indicates positive user sentiment, +and let the language modelMto determine the if input sentence S1 entails the label description S2" +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|title|[2104.14690] Entailment as Few-Shot Learner +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|bookmarkOf|https://arxiv.org/abs/2104.14690 +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|creationTime|2021-05-03T23:05:39Z +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_summary|"Large pre-trained language models (LMs) have demonstrated remarkable ability +as few-shot learners. However, their success hinges largely on scaling model +parameters to a degree that makes it challenging to train and serve. In this +paper, we propose a new approach, named as EFL, that can turn small LMs into +better few-shot learners. The key idea of this approach is to reformulate +potential NLP task into an entailment one, and then fine-tune the model with as +little as 8 examples. We further demonstrate our proposed method can be: (i) +naturally combined with an unsupervised contrastive learning-based data +augmentation method; (ii) easily extended to multilingual few-shot learning. A +systematic evaluation on 18 standard NLP tasks demonstrates that this approach +improves the various existing SOTA few-shot learning methods by 12\%, and +yields competitive few-shot performance with 500 times larger models, such as +GPT-3." +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_firstAuthor|Sinong Wang +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_updated|2021-04-29T22:52:26Z +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_title|Entailment as Few-Shot Learner +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_published|2021-04-29T22:52:26Z +http://www.semanlink.net/doc/2021/05/2104_14690_entailment_as_few_|arxiv_num|2104.14690 +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|creationDate|2021-06-29 +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|tag|http://www.semanlink.net/tag/microcredit +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|title|Le surendettement menace des millions d’emprunteurs de microcrédits dans le monde +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|bookmarkOf|https://www.lemonde.fr/economie/article/2021/06/28/des-millions-d-emprunteurs-de-microcredits-menaces-par-le-surendettement_6085965_3234.html +http://www.semanlink.net/doc/2021/06/le_surendettement_menace_des_mi|creationTime|2021-06-29T00:38:41Z +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|creationDate|2020-11-03 +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/thewebconf_2021 +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|tag|http://www.semanlink.net/tag/ckb +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_author|Paul Groth +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_author|Michael Cochez +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_author|Daniel Daza +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|comment|"BLP ""BERT for Link Prediction"": using the textual descriptions of entities when computing entity representations (hence not failing with entities unknown in training) + +> a method for **learning representations +of entities**, that uses a **pre-trained Transformer** based +architecture as an entity encoder, and +**link prediction training on a knowledge graph +with textual entity descriptions**. + +> using entity descriptions, +an entity encoder is trained for link prediction in +a knowledge graph. The encoder can then be used +without fine-tuning to obtain features for entity classification +and information retrieval + +Cites [Xie et al](doc:2020/10/representation_learning_of_know) and [Kepler](doc:2020/11/1911_06136_kepler_a_unified_). They claim that their +objective targeted exclusively for link prediction (and not an objective that combines language modeling +and link prediction as Kepler) +performs better than Kepler's more complex one." +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|relatedDoc|http://www.semanlink.net/doc/2020/10/representation_learning_of_know +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|relatedDoc|http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_ +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|title|[2010.03496] Inductive Entity Representations from Text via Link Prediction +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|bookmarkOf|https://arxiv.org/abs/2010.03496 +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|creationTime|2020-11-03T16:38:59Z +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_summary|"We present a method for learning representations of entities, that uses a +Transformer-based architecture as an entity encoder, and link prediction +training on a knowledge graph with textual entity descriptions. We demonstrate +that our approach can be applied effectively for link prediction in different +inductive settings involving entities not seen during training, outperforming +related state-of-the-art methods (22% MRR improvement on average). We provide +evidence that the learned representations transfer to other tasks that do not +require fine-tuning the entity encoder. In an entity classification task we +obtain an average improvement of 16% accuracy compared with baselines that also +employ pre-trained models. For an information retrieval task, significant +improvements of up to 8.8% in NDCG@10 were obtained for natural language +queries." +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_firstAuthor|Daniel Daza +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_updated|2020-10-07T16:04:06Z +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_title|Inductive Entity Representations from Text via Link Prediction +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_published|2020-10-07T16:04:06Z +http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r|arxiv_num|2010.03496 +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|creationDate|2021-08-24 +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|tag|http://www.semanlink.net/tag/bielorussie +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|tag|http://www.semanlink.net/tag/lituanie +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|tag|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|title|En Lituanie, dans les camps des migrants envoyés par Loukachenko +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|bookmarkOf|https://www.lemonde.fr/international/article/2021/08/24/en-lituanie-dans-les-camps-des-migrants-envoyes-par-loukachenko_6092162_3210.html +http://www.semanlink.net/doc/2021/08/en_lituanie_dans_les_camps_des|creationTime|2021-08-24T15:06:51Z +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|creationDate|2021-05-20 +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|tag|http://www.semanlink.net/tag/sif_embeddings +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|tag|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|comment|> The method we propose, learns discriminative features from both an autoencoder and a sentence embedding ([SIF embeddings](tag:sif_embeddings)), then uses assignments from a clustering algorithm as supervision to update weights of the encoder network. +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|title|A Self-Training Approach for Short Text Clustering - (Hadifar 2019) +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|bookmarkOf|https://www.aclweb.org/anthology/W19-4322/ +http://www.semanlink.net/doc/2021/05/a_self_training_approach_for_sh|creationTime|2021-05-20T16:42:46Z +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|tag|http://www.semanlink.net/tag/pointwise_mutual_information +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|tag|http://www.semanlink.net/tag/mlm +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|comment|"> You can read this paper two ways: +> 1. As a practical speed-up technique for training large LMs. +> 2. Theoretical validation that Transformers are powerful because they ‘learn PMI’." +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|title|"Seth Stafford sur Twitter : ""Here’s a nice paper (ICLR spotlight) on how to apply masking in LM training...""" +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|bookmarkOf|https://twitter.com/seth_stafford/status/1449080449158959106 +http://www.semanlink.net/doc/2021/10/seth_stafford_sur_twitter_he|creationTime|2021-10-16T09:29:03Z +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|creationDate|2020-09-17 +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|tag|http://www.semanlink.net/tag/nlp_ens +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|tag|http://www.semanlink.net/tag/emergence +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|tag|http://www.semanlink.net/tag/children_s_language_acquisition +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|tag|http://www.semanlink.net/tag/phase_transition +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|tag|http://www.semanlink.net/tag/context_free_grammar +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|title|From Random Grammars to Learning Language - Département de Physique de l'Ecole Normale supérieure +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|bookmarkOf|https://www.phys.ens.fr/spip.php?article4135&lang=en +http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin|creationTime|2020-09-17T23:46:39Z +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|creationDate|2021-02-17 +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|tag|http://www.semanlink.net/tag/lybie +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|tag|http://www.semanlink.net/tag/garamantes +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|title|Garamantes ancient civilisation +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|bookmarkOf|http://www.archeolog-home.com/pages/content/libye-explore-garamantes-ancient-civilisation.html +http://www.semanlink.net/doc/2021/02/garamantes_ancient_civilisation|creationTime|2021-02-17T17:27:57Z +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|creationDate|2020-09-01 +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|tag|http://www.semanlink.net/tag/classification_relations_between_classes +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|tag|http://www.semanlink.net/tag/hierarchical_classification_evaluation +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|tag|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|tag|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_author|Aris Kosmopoulos +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_author|Georgios Paliouras +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_author|Eric Gaussier +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_author|Ion Androutsopoulos +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_author|Ioannis Partalas +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|comment|"How to properly evaluate hierarchical classification algorithms? + +> Classification errors in the upper levels of the hierarchy (e.g. when wrongly +classifying a document of the class music into the class food) are more severe +than those in deeper levels (e.g. when classifying a document from progressive +rock as alternative rock)." +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|title|[1306.6802] Evaluation Measures for Hierarchical Classification: a unified view and novel approaches +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|bookmarkOf|https://arxiv.org/abs/1306.6802 +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|creationTime|2020-09-01T23:46:48Z +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_summary|"Hierarchical classification addresses the problem of classifying items into a +hierarchy of classes. An important issue in hierarchical classification is the +evaluation of different classification algorithms, which is complicated by the +hierarchical relations among the classes. Several evaluation measures have been +proposed for hierarchical classification using the hierarchy in different ways. +This paper studies the problem of evaluation in hierarchical classification by +analyzing and abstracting the key components of the existing performance +measures. It also proposes two alternative generic views of hierarchical +evaluation and introduces two corresponding novel measures. The proposed +measures, along with the state-of-the art ones, are empirically tested on three +large datasets from the domain of text classification. The empirical results +illustrate the undesirable behavior of existing approaches and how the proposed +methods overcome most of these methods across a range of cases." +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_firstAuthor|Aris Kosmopoulos +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_updated|2013-07-01T17:33:58Z +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_title|Evaluation Measures for Hierarchical Classification: a unified view and novel approaches +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_published|2013-06-28T11:49:53Z +http://www.semanlink.net/doc/2020/09/1306_6802_evaluation_measures|arxiv_num|1306.6802 +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|creationDate|2021-05-01 +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|tag|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Eugene Ie +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Diego Garcia-Olano +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Larry Lansing +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Alessandro Presta +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Jason Baldridge +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Daniel Gillick +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_author|Sayali Kulkarni +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|comment|"> We show that it is feasible to perform **entity +linking by training a dual encoder (two-tower) +model that encodes mentions and entities in +the same dense vector space**, where candidate +entities are retrieved by approximate nearest +neighbor search. Unlike prior work, **this setup +does not rely on an alias table followed by a +re-ranker, and is thus the first fully learned entity +retrieval model**. + +Contributions: + +> - a dual encoder architecture for +learning entity and mention encodings suitable for +retrieval. A key feature of the architecture is that it +employs a modular **hierarchy of sub-encoders that +capture different aspects of mentions and entities** +> - a simple, fully unsupervised **hard negative +mining** strategy that produces massive gains +in retrieval performance, compared to using only +random negatives +> - high +quality candidate entities very efficiently using approximate nearest neighbor search +> - outperforms discrete retrieval +baselines like an alias table or BM25 + +> strong retrieval +performance across all 5.7 million Wikipedia entities in +around 3ms per mention + +> since we are using a two-tower or dual +encoder architecture, **our model cannot use any kind of attention over +both mentions and entities at once**, nor feature-wise +comparisons as done by Francis-Landau et al. (2016). +This is a fairly severe constraint – for example, **we cannot +directly compare the mention span to the entity title** +– but it permits retrieval with nearest neighbor search +for the entire context against a single, all encompassing +representation for each entity" +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|title|[1909.10506] Learning Dense Representations for Entity Retrieval +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|bookmarkOf|https://arxiv.org/abs/1909.10506 +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|creationTime|2021-05-01T09:11:15Z +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_summary|"We show that it is feasible to perform entity linking by training a dual +encoder (two-tower) model that encodes mentions and entities in the same dense +vector space, where candidate entities are retrieved by approximate nearest +neighbor search. Unlike prior work, this setup does not rely on an alias table +followed by a re-ranker, and is thus the first fully learned entity retrieval +model. We show that our dual encoder, trained using only anchor-text links in +Wikipedia, outperforms discrete alias table and BM25 baselines, and is +competitive with the best comparable results on the standard TACKBP-2010 +dataset. In addition, it can retrieve candidates extremely fast, and +generalizes well to a new dataset derived from Wikinews. On the modeling side, +we demonstrate the dramatic value of an unsupervised negative mining algorithm +for this task." +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_firstAuthor|Daniel Gillick +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_updated|2019-09-23T17:52:34Z +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_title|Learning Dense Representations for Entity Retrieval +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_published|2019-09-23T17:52:34Z +http://www.semanlink.net/doc/2021/05/1909_10506_learning_dense_rep|arxiv_num|1909.10506 +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|creationDate|2021-09-05 +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|tag|http://www.semanlink.net/tag/timeline +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|title|Creating Interactive Timelines with JavaScript by Shachee Swadia Nightingale Medium +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|bookmarkOf|https://medium.com/nightingale/creating-interactive-timelines-with-javascript-b70b7ded3d13 +http://www.semanlink.net/doc/2021/09/creating_interactive_timelines_|creationTime|2021-09-05T03:21:04Z +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|creationDate|2021-06-03 +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Wen-tau Yih +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Sewon Min +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Barlas Oğuz +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Vladimir Karpukhin +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Patrick Lewis +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Danqi Chen +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Sergey Edunov +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_author|Ledell Wu +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|comment|"Uses two BERT models to encode text: one for encoding queries and one for encoding documents. The two models are trained simultaneously in a two-tower configuration to maximize the dot product for passages likely to answer the question + +[Github](https://github.com/facebookresearch/DPR)" +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|title|[2004.04906] Dense Passage Retrieval for Open-Domain Question Answering +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|bookmarkOf|https://arxiv.org/abs/2004.04906 +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|creationTime|2021-06-03T11:06:07Z +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_summary|"Open-domain question answering relies on efficient passage retrieval to +select candidate contexts, where traditional sparse vector space models, such +as TF-IDF or BM25, are the de facto method. In this work, we show that +retrieval can be practically implemented using dense representations alone, +where embeddings are learned from a small number of questions and passages by a +simple dual-encoder framework. When evaluated on a wide range of open-domain QA +datasets, our dense retriever outperforms a strong Lucene-BM25 system largely +by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our +end-to-end QA system establish new state-of-the-art on multiple open-domain QA +benchmarks." +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_firstAuthor|Vladimir Karpukhin +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_updated|2020-09-30T21:27:13Z +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_title|Dense Passage Retrieval for Open-Domain Question Answering +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_published|2020-04-10T04:53:17Z +http://www.semanlink.net/doc/2021/06/2004_04906_dense_passage_retr|arxiv_num|2004.04906 +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|creationDate|2020-11-27 +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|tag|http://www.semanlink.net/tag/inegalites +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|tag|http://www.semanlink.net/tag/film_bresilien +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|tag|http://www.semanlink.net/tag/robert +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|title|Ilha das Flores (curta-metragem) +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|bookmarkOf|https://pt.wikipedia.org/wiki/Ilha_das_Flores_(curta-metragem) +http://www.semanlink.net/doc/2020/11/ilha_das_flores_curta_metragem_|creationTime|2020-11-27T00:36:52Z +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|creationDate|2020-10-16 +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|comment|> Part II of my hydrogen deep dive: the demand side. TLDR: Hydrogen will play a vital role as chemical feedstock, including for shipping and aviation fuels, and as guarantor of resilience in a renewables-based power system. EVERYTHING else goes electric. +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|title|Liebreich: Separating Hype from Hydrogen – Part Two: The Demand Side BloombergNEF +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|bookmarkOf|https://about.bnef.com/blog/liebreich-separating-hype-from-hydrogen-part-two-the-demand-side/ +http://www.semanlink.net/doc/2020/10/liebreich_separating_hype_from|creationTime|2020-10-16T14:42:10Z +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|creationDate|2021-05-19 +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|tag|http://www.semanlink.net/tag/enterprise_search +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|title|Sinequa : Enterprise Search Platform +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|bookmarkOf|https://www.sinequa.com/ +http://www.semanlink.net/doc/2021/05/sinequa_enterprise_search_pla|creationTime|2021-05-19T01:58:27Z +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|creationDate|2021-04-19 +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|tag|http://www.semanlink.net/tag/anisotropy_in_lm_space +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|tag|http://www.semanlink.net/tag/bert_and_sentence_embeddings +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Mingxuan Wang +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Bohan Li +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Hao Zhou +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Yiming Yang +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Junxian He +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_author|Lei Li +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|comment|"> **the sentence +embeddings from the pre-trained language +models without fine-tuning have been +found to poorly capture semantic meaning of +sentences.** +> +> We find that **BERT always induces +a non-smooth anisotropic semantic space of +sentences**, which harms its performance of +semantic similarity. To address this issue, +we propose to transform the anisotropic sentence +embedding distribution to a smooth and +isotropic Gaussian distribution through normalizing +flows that are learned with an unsupervised +objective + +> normalizing flows (Dinh et al., 2015): invertible function parameterized by neural networks. +> **During +training, only the flow network is optimized +while the BERT parameters remain unchanged** + +> When combined with external supervision from +natural language inference tasks (Bowman et al., +2015; Williams et al., 2018), our method outperforms +the [Sentence-BERT](tag:sbert) embeddings + +[GitHub](https://github.com/bohanli/BERT-flow) +" +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|title|[2011.05864] On the Sentence Embeddings from Pre-trained Language Models +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|bookmarkOf|https://arxiv.org/abs/2011.05864 +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|creationTime|2021-04-19T01:13:25Z +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_summary|"Pre-trained contextual representations like BERT have achieved great success +in natural language processing. However, the sentence embeddings from the +pre-trained language models without fine-tuning have been found to poorly +capture semantic meaning of sentences. In this paper, we argue that the +semantic information in the BERT embeddings is not fully exploited. We first +reveal the theoretical connection between the masked language model +pre-training objective and the semantic similarity task theoretically, and then +analyze the BERT sentence embeddings empirically. We find that BERT always +induces a non-smooth anisotropic semantic space of sentences, which harms its +performance of semantic similarity. To address this issue, we propose to +transform the anisotropic sentence embedding distribution to a smooth and +isotropic Gaussian distribution through normalizing flows that are learned with +an unsupervised objective. Experimental results show that our proposed +BERT-flow method obtains significant performance gains over the +state-of-the-art sentence embeddings on a variety of semantic textual +similarity tasks. The code is available at +https://github.com/bohanli/BERT-flow." +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_firstAuthor|Bohan Li +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_updated|2020-11-02T13:14:57Z +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_title|On the Sentence Embeddings from Pre-trained Language Models +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_published|2020-11-02T13:14:57Z +http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em|arxiv_num|2011.05864 +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|creationDate|2021-09-22 +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|title|"Cory Doctorow sur Twitter : ""#Facebook is a rotten company, rotten from the top down, its founder, board and top execs are sociopaths...""" +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|bookmarkOf|https://twitter.com/doctorow/status/1440718917429723141 +http://www.semanlink.net/doc/2021/09/cory_doctorow_sur_twitter_f|creationTime|2021-09-22T23:53:51Z +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|creationDate|2021-06-20 +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|tag|http://www.semanlink.net/tag/virtual_knowledge_graph +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_author|Ruslan Salakhutdinov +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_author|Haitian Sun +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_author|Bhuwan Dhingra +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_author|William W. Cohen +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_author|Pat Verga +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|comment|"> a method for constructing **a virtual KB (VKB) trained entirely from text** + +Open Predicate Query Language (OPQL): constructing a virtual knowledge base (VKB) that supports KB reasoning & open-domain QA, tackling the incompleteness of knowledge bases by constructing a virtual KB only from text + +> OPQL constructs +a VKB by **encoding and indexing a set of +relation mentions** in a way that naturally enables +reasoning and can be trained without any structured +supervision. + +> can be used +as an **external memory integrated into a language +model** + +cf. this earlier paper [[2002.10640] Differentiable Reasoning over a Virtual Knowledge Base](doc:2020/07/2002_10640_differentiable_rea). But does not require an initial structured KB for distant +supervision. + +> The key idea in constructing the OPQL VKB is to use a +dual-encoder pre-training process, similar to +[[1906.03158] Matching the Blanks: Distributional Similarity for Relation Learning](doc:2021/05/1906_03158_matching_the_blank) + +Related work section refers to [[1909.04164] Knowledge Enhanced Contextual Word Representations](doc:2020/05/1909_04164_knowledge_enhanced). Also refers to [[2007.00849] Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge](doc:2020/07/2007_00849_facts_as_experts_) (some authors in common)" +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|relatedDoc|http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|relatedDoc|http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_ +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|relatedDoc|http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|relatedDoc|http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|title|[2102.07043] Reasoning Over Virtual Knowledge Bases With Open Predicate Relations +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|bookmarkOf|https://arxiv.org/abs/2102.07043 +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|creationTime|2021-06-20T08:30:31Z +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_summary|"We present the Open Predicate Query Language (OPQL); a method for +constructing a virtual KB (VKB) trained entirely from text. Large Knowledge +Bases (KBs) are indispensable for a wide-range of industry applications such as +question answering and recommendation. Typically, KBs encode world knowledge in +a structured, readily accessible form derived from laborious human annotation +efforts. Unfortunately, while they are extremely high precision, KBs are +inevitably highly incomplete and automated methods for enriching them are far +too inaccurate. Instead, OPQL constructs a VKB by encoding and indexing a set +of relation mentions in a way that naturally enables reasoning and can be +trained without any structured supervision. We demonstrate that OPQL +outperforms prior VKB methods on two different KB reasoning tasks and, +additionally, can be used as an external memory integrated into a language +model (OPQL-LM) leading to improvements on two open-domain question answering +tasks." +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_firstAuthor|Haitian Sun +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_updated|2021-06-14T19:34:42Z +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_title|Reasoning Over Virtual Knowledge Bases With Open Predicate Relations +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_published|2021-02-14T01:29:54Z +http://www.semanlink.net/doc/2021/06/2102_07043_reasoning_over_vir|arxiv_num|2102.07043 +http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_|creationDate|2020-07-15 +http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_|title|Snorkel is a fundamentally new interface to ML without hand-labeled training data +http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_|bookmarkOf|https://www.snorkel.ai/07-14-2020-snorkel-ai-launch.html +http://www.semanlink.net/doc/2020/07/snorkel_is_a_fundamentally_new_|creationTime|2020-07-15T08:16:35Z +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|creationDate|2020-10-03 +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|tag|http://www.semanlink.net/tag/hierarchical_clustering +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|tag|http://www.semanlink.net/tag/poincare_embeddings +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_author|Albert Gu +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_author|Ines Chami +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_author|Christopher Ré +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_author|Vaggos Chatziafratis +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|comment|"> The key idea of our method, HypHC, is showing a direct correspondence from discrete trees to continuous representations (via the hyperbolic embeddings of their leaf nodes) and back (via a decoding algorithm that maps leaf embeddings to a dendrogram), **allowing us to search the space of discrete binary trees with continuous optimization**. + +Cites [Dasgupta: A cost function for similarity-based hierarchical clustering](https://arxiv.org/abs/1510.05043)" +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|title|[2010.00402] From Trees to Continuous Embeddings and Back: Hyperbolic Hierarchical Clustering +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|bookmarkOf|https://arxiv.org/abs/2010.00402 +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|creationTime|2020-10-03T14:46:20Z +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_summary|"Similarity-based Hierarchical Clustering (HC) is a classical unsupervised +machine learning algorithm that has traditionally been solved with heuristic +algorithms like Average-Linkage. Recently, Dasgupta reframed HC as a discrete +optimization problem by introducing a global cost function measuring the +quality of a given tree. In this work, we provide the first continuous +relaxation of Dasgupta's discrete optimization problem with provable quality +guarantees. The key idea of our method, HypHC, is showing a direct +correspondence from discrete trees to continuous representations (via the +hyperbolic embeddings of their leaf nodes) and back (via a decoding algorithm +that maps leaf embeddings to a dendrogram), allowing us to search the space of +discrete binary trees with continuous optimization. Building on analogies +between trees and hyperbolic space, we derive a continuous analogue for the +notion of lowest common ancestor, which leads to a continuous relaxation of +Dasgupta's discrete objective. We can show that after decoding, the global +minimizer of our continuous relaxation yields a discrete tree with a (1 + +epsilon)-factor approximation for Dasgupta's optimal tree, where epsilon can be +made arbitrarily small and controls optimization challenges. We experimentally +evaluate HypHC on a variety of HC benchmarks and find that even approximate +solutions found with gradient descent have superior clustering quality than +agglomerative heuristics or other gradient based algorithms. Finally, we +highlight the flexibility of HypHC using end-to-end training in a downstream +classification task." +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_firstAuthor|Ines Chami +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_updated|2020-10-01T13:43:19Z +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_title|From Trees to Continuous Embeddings and Back: Hyperbolic Hierarchical Clustering +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_published|2020-10-01T13:43:19Z +http://www.semanlink.net/doc/2020/10/2010_00402_from_trees_to_cont|arxiv_num|2010.00402 +http://www.semanlink.net/doc/2020/12/textgraphs_2020|creationDate|2020-12-13 +http://www.semanlink.net/doc/2020/12/textgraphs_2020|tag|http://www.semanlink.net/tag/graphs_nlp +http://www.semanlink.net/doc/2020/12/textgraphs_2020|tag|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/doc/2020/12/textgraphs_2020|title|TextGraphs 2020 +http://www.semanlink.net/doc/2020/12/textgraphs_2020|bookmarkOf|https://sites.google.com/view/textgraphs2020/ +http://www.semanlink.net/doc/2020/12/textgraphs_2020|creationTime|2020-12-13T23:54:41Z +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|creationDate|2020-09-06 +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/co_training +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/using_literal_descriptions_of_entities +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|tag|http://www.semanlink.net/tag/entity_alignment +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_author|Kai-Wei Chang +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_author|Carlo Zaniolo +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_author|Muhao Chen +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_author|Yingtao Tian +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_author|Steven Skiena +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|title|[1806.06478] Co-training Embeddings of Knowledge Graphs and Entity Descriptions for Cross-lingual Entity Alignment +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|bookmarkOf|https://arxiv.org/abs/1806.06478 +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|creationTime|2020-09-06T16:59:29Z +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_summary|"Multilingual knowledge graph (KG) embeddings provide latent semantic +representations of entities and structured knowledge with cross-lingual +inferences, which benefit various knowledge-driven cross-lingual NLP tasks. +However, precisely learning such cross-lingual inferences is usually hindered +by the low coverage of entity alignment in many KGs. Since many multilingual +KGs also provide literal descriptions of entities, in this paper, we introduce +an embedding-based approach which leverages a weakly aligned multilingual KG +for semi-supervised cross-lingual learning using entity descriptions. Our +approach performs co-training of two embedding models, i.e. a multilingual KG +embedding model and a multilingual literal description embedding model. The +models are trained on a large Wikipedia-based trilingual dataset where most +entity alignment is unknown to training. Experimental results show that the +performance of the proposed approach on the entity alignment task improves at +each iteration of co-training, and eventually reaches a stage at which it +significantly surpasses previous approaches. We also show that our approach has +promising abilities for zero-shot entity alignment, and cross-lingual KG +completion." +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_firstAuthor|Muhao Chen +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_updated|2018-06-18T02:06:46Z +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_title|Co-training Embeddings of Knowledge Graphs and Entity Descriptions for Cross-lingual Entity Alignment +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_published|2018-06-18T02:06:46Z +http://www.semanlink.net/doc/2020/09/1806_06478_co_training_embedd|arxiv_num|1806.06478 +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|creationDate|2020-11-12 +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|tag|http://www.semanlink.net/tag/encryption +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|comment|"> Our InstaHide allows users and IoT devices to ""encrypt"" data yet allowing deep learning on it. Minor efficiency and accuracy loss. Carlini et al broke our 100-image challenge dataset in 12 too 120 GPU hrs. Does it invalidate intended use? [src](https://twitter.com/prfsanjeevarora/status/1326653490261843968?s=20) + +[follow up](https://twitter.com/prfsanjeevarora/status/1326653490261843968?s=20) - ""[Brief response to Nicholas Carlini’s blog post](https://hazelsuko07.github.io/Response_to_carlini_blogpost/)""" +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|title|How to allow deep learning on your data without revealing the data – Off the convex path +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|bookmarkOf|http://www.offconvex.org/2020/11/11/instahide/ +http://www.semanlink.net/doc/2020/11/how_to_allow_deep_learning_on_y|creationTime|2020-11-12T00:28:44Z +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|creationDate|2021-07-13 +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|tag|http://www.semanlink.net/tag/bosch +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|tag|http://www.semanlink.net/tag/multilingual_language_models +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_author|Gowtham Ramesh +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_author|Pratyush Kumar +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_author|Sumanth Doddapaneni +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_author|Mitesh M. Khapra +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_author|Anoop Kunchukuttan +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|comment|"> MLLMs are useful for bilingual tasks, particularly +in low resource scenarios. +> +> The surprisingly good performance of +MLLMs in crosslingual transfer as well as +bilingual tasks motivates the hypothesis that +MLLMs are learning universal patterns. However, +our survey of the studies in this space indicates that +there is no consensus yet." +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|title|[2107.00676] A Primer on Pretrained Multilingual Language Models +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|bookmarkOf|https://arxiv.org/abs/2107.00676 +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|creationTime|2021-07-13T13:33:29Z +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_summary|"Multilingual Language Models (MLLMs) such as mBERT, XLM, XLM-R, \textit{etc.} +have emerged as a viable option for bringing the power of pretraining to a +large number of languages. Given their success in zero shot transfer learning, +there has emerged a large body of work in (i) building bigger MLLMs covering a +large number of languages (ii) creating exhaustive benchmarks covering a wider +variety of tasks and languages for evaluating MLLMs (iii) analysing the +performance of MLLMs on monolingual, zero shot crosslingual and bilingual tasks +(iv) understanding the universal language patterns (if any) learnt by MLLMs and +(v) augmenting the (often) limited capacity of MLLMs to improve their +performance on seen or even unseen languages. In this survey, we review the +existing literature covering the above broad areas of research pertaining to +MLLMs. Based on our survey, we recommend some promising directions of future +research." +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_firstAuthor|Sumanth Doddapaneni +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_updated|2021-07-01T18:01:46Z +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_title|A Primer on Pretrained Multilingual Language Models +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_published|2021-07-01T18:01:46Z +http://www.semanlink.net/doc/2021/07/2107_00676_a_primer_on_pretra|arxiv_num|2107.00676 +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|creationDate|2021-07-15 +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|tag|http://www.semanlink.net/tag/causal_inference +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Nal Kalchbrenner +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Anirudh Goyal +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Bernhard Schölkopf +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Yoshua Bengio +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Francesco Locatello +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Stefan Bauer +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_author|Nan Rosemary Ke +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|comment|"This article reviews fundamental concepts of causal inference and relates them to crucial open problems of machine learning, including transfer learning and generalization, thereby assaying how causality can contribute to modern machine learning research + +Related: [Making sense of raw input](doc:2021/05/making_sense_of_raw_input)" +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|relatedDoc|http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|title|[2102.11107] Towards Causal Representation Learning +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|bookmarkOf|https://arxiv.org/abs/2102.11107 +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|bookmarkOf|https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9363924 +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|creationTime|2021-07-15T00:29:21Z +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_summary|"The two fields of machine learning and graphical causality arose and +developed separately. However, there is now cross-pollination and increasing +interest in both fields to benefit from the advances of the other. In the +present paper, we review fundamental concepts of causal inference and relate +them to crucial open problems of machine learning, including transfer and +generalization, thereby assaying how causality can contribute to modern machine +learning research. This also applies in the opposite direction: we note that +most work in causality starts from the premise that the causal variables are +given. A central problem for AI and causality is, thus, causal representation +learning, the discovery of high-level causal variables from low-level +observations. Finally, we delineate some implications of causality for machine +learning and propose key research areas at the intersection of both +communities." +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_firstAuthor|Bernhard Schölkopf +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_updated|2021-02-22T15:26:57Z +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_title|Towards Causal Representation Learning +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_published|2021-02-22T15:26:57Z +http://www.semanlink.net/doc/2021/07/2102_11107_towards_causal_rep|arxiv_num|2102.11107 +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|creationDate|2021-07-09 +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|tag|http://www.semanlink.net/tag/benchmark +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|tag|http://www.semanlink.net/tag/nlp_based_ir +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|comment|"> BEIR is a heterogeneous benchmark containing diverse IR tasks. +> Easy to use, evaluate your NLP-based retrieval models across 15+ diverse IR datasets. + +[Paper](doc:2021/07/2104_08663_beir_a_heterogeno)" +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|relatedDoc|http://www.semanlink.net/doc/2021/07/2104_08663_beir_a_heterogeno +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|title|UKPLab/beir: A Heterogeneous Benchmark for Information Retrieval. +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|bookmarkOf|https://github.com/UKPLab/beir +http://www.semanlink.net/doc/2021/07/ukplab_beir_a_heterogeneous_be|creationTime|2021-07-09T12:19:50Z +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|creationDate|2021-05-21 +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|tag|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|comment|">... this way we are able to **jointly learn** how to perceive (**mapping raw sensory information to concepts**) and apperceive (**combining concepts into declarative rules**) + +cf. [Making sense of sensory input](doc:2021/04/1910_02227_making_sense_of_se)" +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|relatedDoc|http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|title|Making sense of raw input +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|bookmarkOf|https://www.sciencedirect.com/science/article/pii/S0004370221000722 +http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input|creationTime|2021-05-21T12:09:43Z +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|creationDate|2020-08-10 +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|tag|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|title|Les stèles perdues d’Éthiopie CNRS Le journal +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|bookmarkOf|https://lejournal.cnrs.fr/articles/les-steles-perdues-dethiopie?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1597035636 +http://www.semanlink.net/doc/2020/08/les_steles_perdues_d%E2%80%99ethiopie_%7C|creationTime|2020-08-10T11:29:28Z +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|creationDate|2020-07-02 +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/active_learning +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Alexander C. Berg +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Edward Chou +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Roshan Sumbaly +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|I. Zeki Yalniz +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Peter Bailis +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Matei Zaharia +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Sean Culatana +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_author|Cody Coleman +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|comment|"> Similarity search for Efficient Active Learning and Search (SEALS) + +In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. + +> Our work attacks **both the labeling and computational costs of machine learning**...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to +build accurate classifiers. **SEALS does, however, introduce another system component, a similarity +search index, which adds some additional engineering complexity** to build, tune, and maintain. +Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well +out of the box." +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|relatedDoc|http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|title|[2007.00077] Similarity Search for Efficient Active Learning and Search of Rare Concepts +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|bookmarkOf|https://arxiv.org/abs/2007.00077 +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|creationTime|2020-07-02T15:31:34Z +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_summary|"Many active learning and search approaches are intractable for industrial +settings with billions of unlabeled examples. Existing approaches, such as +uncertainty sampling or information density, search globally for the optimal +examples to label, scaling linearly or even quadratically with the unlabeled +data. However, in practice, data is often heavily skewed; only a small fraction +of collected data will be relevant for a given learning task. For example, when +identifying rare classes, detecting malicious content, or debugging model +performance, the ratio of positive to negative examples can be 1 to 1,000 or +more. In this work, we exploit this skew in large training datasets to reduce +the number of unlabeled examples considered in each selection round by only +looking at the nearest neighbors to the labeled examples. Empirically, we +observe that learned representations effectively cluster unseen concepts, +making active learning very effective and substantially reducing the number of +viable unlabeled examples. We evaluate several active learning and search +techniques in this setting on three large-scale datasets: ImageNet, Goodreads +spoiler detection, and OpenImages. For rare classes, active learning methods +need as little as 0.31% of the labeled data to match the average precision of +full supervision. By limiting active learning methods to only consider the +immediate neighbors of the labeled data as candidates for labeling, we need +only process as little as 1% of the unlabeled data while achieving similar +reductions in labeling costs as the traditional global approach. This process +of expanding the candidate pool with the nearest neighbors of the labeled set +can be done efficiently and reduces the computational complexity of selection +by orders of magnitude." +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_firstAuthor|Cody Coleman +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_updated|2020-06-30T19:46:10Z +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_title|Similarity Search for Efficient Active Learning and Search of Rare Concepts +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_published|2020-06-30T19:46:10Z +http://www.semanlink.net/doc/2020/07/2007_00077_similarity_search_|arxiv_num|2007.00077 +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|creationDate|2020-06-06 +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_author|Jang Hyun Cho +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_author|Bharath Hariharan +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|comment|"Evaluation of the efficacy +of knowledge distillation and its dependence on student +and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 + +> Despite +widespread use, an understanding of when the student can +learn from the teacher is missing. +> +> Our **key finding** +is that knowledge distillation is not a panacea and cannot +succeed when student capacity is too low to successfully +mimic the teacher. We have presented an approach +to mitigate this issue by **stopping teacher training** early" +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|title|[1910.01348] On the Efficacy of Knowledge Distillation +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|bookmarkOf|https://arxiv.org/abs/1910.01348 +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|bookmarkOf|http://openaccess.thecvf.com/content_ICCV_2019/html/Cho_On_the_Efficacy_of_Knowledge_Distillation_ICCV_2019_paper.html +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|creationTime|2020-06-06T17:20:52Z +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_summary|"In this paper, we present a thorough evaluation of the efficacy of knowledge +distillation and its dependence on student and teacher architectures. Starting +with the observation that more accurate teachers often don't make good +teachers, we attempt to tease apart the factors that affect knowledge +distillation performance. We find crucially that larger models do not often +make better teachers. We show that this is a consequence of mismatched +capacity, and that small students are unable to mimic large teachers. We find +typical ways of circumventing this (such as performing a sequence of knowledge +distillation steps) to be ineffective. Finally, we show that this effect can be +mitigated by stopping the teacher's training early. Our results generalize +across datasets and models." +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_firstAuthor|Jang Hyun Cho +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_updated|2019-10-03T08:14:13Z +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_title|On the Efficacy of Knowledge Distillation +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_published|2019-10-03T08:14:13Z +http://www.semanlink.net/doc/2020/06/1910_01348_on_the_efficacy_of|arxiv_num|1910.01348 +http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_|creationDate|2020-06-13 +http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_|tag|http://www.semanlink.net/tag/film_cubain +http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_|title|Lista de espera (Waiting List) Trailer +http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_|bookmarkOf|https://www.youtube.com/watch?v=sPOztEsyv2M +http://www.semanlink.net/doc/2020/06/lista_de_espera_waiting_list_|creationTime|2020-06-13T19:13:46Z +http://www.semanlink.net/doc/2020/08/triple_classification_using_reg|creationDate|2020-08-30 +http://www.semanlink.net/doc/2020/08/triple_classification_using_reg|tag|http://www.semanlink.net/tag/triple_classification +http://www.semanlink.net/doc/2020/08/triple_classification_using_reg|title|Triple Classification Using Regions and Fine-Grained Entity Typing (AAAI 2019) +http://www.semanlink.net/doc/2020/08/triple_classification_using_reg|bookmarkOf|https://www.aaai.org/ojs/index.php/AAAI/article/view/3771 +http://www.semanlink.net/doc/2020/08/triple_classification_using_reg|creationTime|2020-08-30T19:05:59Z +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|creationDate|2021-03-04 +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|title|asahi417/tner: Language model finetuning on NER +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|bookmarkOf|https://github.com/asahi417/tner +http://www.semanlink.net/doc/2021/03/asahi417_tner_language_model_f|creationTime|2021-03-04T08:19:59Z +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|creationDate|2021-08-09 +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|tag|http://www.semanlink.net/tag/giec +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|comment|> Un vogage sans retour +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|title|La crise climatique s’aggrave partout, à des niveaux sans précédent, alerte le GIEC +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|bookmarkOf|https://www.lemonde.fr/planete/article/2021/08/09/la-crise-climatique-s-aggrave-partout-a-des-niveaux-sans-precedent-alerte-le-giec_6090961_3244.html +http://www.semanlink.net/doc/2021/08/la_crise_climatique_s%E2%80%99aggrave_p|creationTime|2021-08-09T11:34:20Z +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|creationDate|2020-07-03 +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|tag|http://www.semanlink.net/tag/paul_graham +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|tag|http://www.semanlink.net/tag/todo_list +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|title|The Top of My Todo List +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|bookmarkOf|http://www.paulgraham.com/todo.html +http://www.semanlink.net/doc/2020/07/the_top_of_my_todo_list|creationTime|2020-07-03T00:49:53Z +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|creationDate|2020-10-02 +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|tag|http://www.semanlink.net/tag/roosevelt +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|comment|"> jointly learns from the symbolic triples and textual descriptions + +> The data involved in our model are the knowledge triples +and the textual descriptions of entities. In experiments, we +adopt the “entity descriptions” of Freebase and the textual +definitions of Wordnet as textual information. + +Obvious but very good remark about link prediction in facts-only KG: + +> the triple (Anna Roosevelt, Parents, Franklin Roosevelt), indicates “Franklin +Roosevelt” is the parent of “Anna Roosevelt”. However, +it’s quite difficult to infer this fact merely from other symbolic +triples." +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|title|SSP: Semantic Space Projection for Knowledge Graph Embedding with Text Descriptions (AAAI 2017) +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|bookmarkOf|https://www.aaai.org/Conferences/AAAI/2017/PreliminaryPapers/14-XiaoH-14306.pdf +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|creationTime|2020-10-02T00:57:11Z +http://www.semanlink.net/doc/2020/10/ssp_semantic_space_projection_|bookmakOf|https://arxiv.org/abs/1604.04835 +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|creationDate|2020-08-12 +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|tag|http://www.semanlink.net/tag/scikit_learn +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|title|scikit-multilearn/scikit-multilearn: A scikit-learn based module for multi-label et. al. classification +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|bookmarkOf|https://github.com/scikit-multilearn/scikit-multilearn +http://www.semanlink.net/doc/2020/08/scikit_multilearn_scikit_multil|creationTime|2020-08-12T12:47:05Z +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|creationDate|2021-01-11 +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|tag|http://www.semanlink.net/tag/citation +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|tag|http://www.semanlink.net/tag/yuval_noah_harari +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|comment|"> You could never convince a monkey to give you a banana by promising him limitless bananas after death in monkey heaven. + +> combien de maris peuvent dire comment leur femme se comportera s'ils sont chargés par un mammouth enragé ? + +(et réciproquement combien de femmes...) + +> “This is the essence of the Agricultural Revolution: the ability to keep more people alive under worse conditions.”" +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|title|Yuval Noah Harari Quotes +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|bookmarkOf|https://www.goodreads.com/author/quotes/395812.Yuval_Noah_Harari +http://www.semanlink.net/doc/2021/01/yuval_noah_harari_quotes|creationTime|2021-01-11T22:42:02Z +http://www.semanlink.net/doc/2021/10/linguistic_diversity|creationDate|2021-10-03 +http://www.semanlink.net/doc/2021/10/linguistic_diversity|tag|http://www.semanlink.net/tag/acl_2020 +http://www.semanlink.net/doc/2021/10/linguistic_diversity|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2021/10/linguistic_diversity|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/10/linguistic_diversity|comment|"> We create a consistent data model to complement the existing ACL Anthology Corpus with data from later years and of non-ACL conferences. We do this by augmenting the corpus using Semantic Scholar’s API and scraping ACL Anthology itself. This is a consolidated dataset for 11 conferences with different attributes. Stay tuned :) + +[[2004.09095] The State and Fate of Linguistic Diversity and Inclusion in the NLP World](doc:2021/10/2004_09095_the_state_and_fate)" +http://www.semanlink.net/doc/2021/10/linguistic_diversity|relatedDoc|http://www.semanlink.net/doc/2021/10/2004_09095_the_state_and_fate +http://www.semanlink.net/doc/2021/10/linguistic_diversity|title|Linguistic Diversity +http://www.semanlink.net/doc/2021/10/linguistic_diversity|bookmarkOf|https://microsoft.github.io/linguisticdiversity/ +http://www.semanlink.net/doc/2021/10/linguistic_diversity|creationTime|2021-10-03T12:39:09Z +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|creationDate|2020-10-25 +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_author|Andrew Gordon Wilson +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_author|Gregory Benton +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_author|Marc Finzi +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_author|Pavel Izmailov +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|comment|how to *learn* symmetries -- rotations, translations, scalings, shears -- from training data alone +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|title|[2010.11882] Learning Invariances in Neural Networks +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|bookmarkOf|https://arxiv.org/abs/2010.11882 +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|creationTime|2020-10-25T12:38:17Z +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_summary|"Invariances to translations have imbued convolutional neural networks with +powerful generalization properties. However, we often do not know a priori what +invariances are present in the data, or to what extent a model should be +invariant to a given symmetry group. We show how to \emph{learn} invariances +and equivariances by parameterizing a distribution over augmentations and +optimizing the training loss simultaneously with respect to the network +parameters and augmentation parameters. With this simple procedure we can +recover the correct set and extent of invariances on image classification, +regression, segmentation, and molecular property prediction from a large space +of augmentations, on training data alone." +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_firstAuthor|Gregory Benton +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_updated|2020-10-22T17:18:48Z +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_title|Learning Invariances in Neural Networks +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_published|2020-10-22T17:18:48Z +http://www.semanlink.net/doc/2020/10/2010_11882_learning_invarianc|arxiv_num|2010.11882 +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|creationDate|2020-09-03 +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|tag|http://www.semanlink.net/tag/shingles +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|comment|"word n-grams. +> Shingles effectively give you the ability to **pre-bake phrase matching**" +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|title|Searching with Shingles Elastic Blog +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|bookmarkOf|https://www.elastic.co/fr/blog/searching-with-shingles +http://www.semanlink.net/doc/2020/09/searching_with_shingles_%7C_elast|creationTime|2020-09-03T17:57:33Z +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|creationDate|2020-10-05 +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|tag|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|tag|http://www.semanlink.net/tag/max_halford +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|tag|http://www.semanlink.net/tag/simple_idea +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|comment|Mentions this [paper](doc:2020/10/towards_unsupervised_text_class) +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|relatedDoc|http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|title|Classifying documents without any training data - Max Halford +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|bookmarkOf|https://maxhalford.github.io/blog/document-classification/ +http://www.semanlink.net/doc/2020/10/classifying_documents_without_a|creationTime|2020-10-05T00:09:59Z +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|creationDate|2021-08-07 +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|tag|http://www.semanlink.net/tag/danse +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|title|Ocasio-Cortez's response to jibes about college dance video? A congressional dance video The Guardian (2019) +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|bookmarkOf|https://www.theguardian.com/us-news/2019/jan/04/alexandria-ocasio-cortez-college-dance-video-discredit-backfires +http://www.semanlink.net/doc/2021/08/ocasio_cortez_s_response_to_jib|creationTime|2021-08-07T11:27:00Z +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|creationDate|2021-09-28 +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|comment|New NLP task: for every pair of base-NP (Noun Phrases) in the text, decide if they can be related by a preposition, and if so, which. +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|title|"(((ل()(ل() 'yoav))))👾 sur Twitter : ""Text-based NP Enrichment""" +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|bookmarkOf|https://twitter.com/yoavgo/status/1442553285534171137 +http://www.semanlink.net/doc/2021/09/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|creationTime|2021-09-28T08:17:14Z +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|creationDate|2021-07-06 +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|tag|http://www.semanlink.net/tag/nlp_low_resource_scenarios +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|tag|http://www.semanlink.net/tag/bosch +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_author|Michael A. Hedderich +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_author|Heike Adel +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_author|Dietrich Klakow +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_author|Jannik Strötgen +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_author|Lukas Lange +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|comment|"Low-resource scenarios: low-resource languages, but also non standard domain and tasks. + +one key goal of this survey is to highlight the underlying assumptions +" +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|title|[2010.12309] A Survey on Recent Approaches for Natural Language Processing in Low-Resource Scenarios +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|bookmarkOf|https://arxiv.org/abs/2010.12309 +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|creationTime|2021-07-06T13:08:01Z +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_summary|"Deep neural networks and huge language models are becoming omnipresent in +natural language applications. As they are known for requiring large amounts of +training data, there is a growing body of work to improve the performance in +low-resource settings. Motivated by the recent fundamental changes towards +neural models and the popular pre-train and fine-tune paradigm, we survey +promising approaches for low-resource natural language processing. After a +discussion about the different dimensions of data availability, we give a +structured overview of methods that enable learning when training data is +sparse. This includes mechanisms to create additional labeled data like data +augmentation and distant supervision as well as transfer learning settings that +reduce the need for target supervision. A goal of our survey is to explain how +these methods differ in their requirements as understanding them is essential +for choosing a technique suited for a specific low-resource setting. Further +key aspects of this work are to highlight open issues and to outline promising +directions for future research." +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_firstAuthor|Michael A. Hedderich +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_updated|2021-04-09T13:48:02Z +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_title|A Survey on Recent Approaches for Natural Language Processing in Low-Resource Scenarios +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_published|2020-10-23T11:22:01Z +http://www.semanlink.net/doc/2021/07/2010_12309_a_survey_on_recent|arxiv_num|2010.12309 +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|creationDate|2020-12-11 +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/nlp_ens +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_author|Edouard Grave +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_author|Gautier Izacard +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|comment|"> a method to train an information retrieval module for downstream tasks, **without using pairs of queries and documents as annotations**. + +Uses two models (standard pipeline for open-domain QA): + +- the first one retrieves documents from a large source of knowledge (the retriever) +- the second one processes the support documents to solve the task (the reader). + +> First the retriever selects support passages in a large knowledge +source. Then these passages are processed by the reader, along with the question, to generate an +answer + +Inspired by knowledge distillation: the reader model is the teacher and the retriever is the student. + +> More precisely, we use a sequence-to-sequence model as the reader, and use +the attention activations over the input documents as synthetic labels to train the retriever. +> (**train the retriever by learning to approximate the attention score of the reader**) + +Refers to: + +- [REALM: Retrieval-Augmented Language Model Pre-Training](doc:2020/12/2002_08909_realm_retrieval_a) +- [Dehghani: Neural Ranking Models with Weak Supervision](doc:?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.08803)" +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|relatedDoc|https://arxiv.org/abs/1704.08803 +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|relatedDoc|http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|title|[2012.04584] Distilling Knowledge from Reader to Retriever for Question Answering +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|bookmarkOf|https://arxiv.org/abs/2012.04584 +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|creationTime|2020-12-11T16:48:13Z +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_summary|"The task of information retrieval is an important component of many natural +language processing systems, such as open domain question answering. While +traditional methods were based on hand-crafted features, continuous +representations based on neural networks recently obtained competitive results. +A challenge of using such methods is to obtain supervised data to train the +retriever model, corresponding to pairs of query and support documents. In this +paper, we propose a technique to learn retriever models for downstream tasks, +inspired by knowledge distillation, and which does not require annotated pairs +of query and documents. Our approach leverages attention scores of a reader +model, used to solve the task based on retrieved documents, to obtain synthetic +labels for the retriever. We evaluate our method on question answering, +obtaining state-of-the-art results." +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_firstAuthor|Gautier Izacard +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_updated|2020-12-08T17:36:34Z +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_title|Distilling Knowledge from Reader to Retriever for Question Answering +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_published|2020-12-08T17:36:34Z +http://www.semanlink.net/doc/2020/12/2012_04584_distilling_knowled|arxiv_num|2012.04584 +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|creationDate|2020-10-14 +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|tag|http://www.semanlink.net/tag/californie +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|tag|http://www.semanlink.net/tag/uberisation +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|tag|http://www.semanlink.net/tag/gig_economy +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|title|En Californie, la « gig economy » soumise à référendum +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|bookmarkOf|https://www.lemonde.fr/economie/article/2020/10/14/en-californie-la-gig-economy-soumise-a-referendum_6055954_3234.html +http://www.semanlink.net/doc/2020/10/en_californie_la_%C2%AB_gig_economy|creationTime|2020-10-14T19:26:53Z +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|creationDate|2020-06-29 +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|tag|http://www.semanlink.net/tag/top_k +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Wei Wei +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Minshuo Chen +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Hongyuan Zha +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Bo Dai +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Yujia Xie +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Tomas Pfister +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Hanjun Dai +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_author|Tuo Zhao +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|comment|"> if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator +> ... +> We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance" +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|title|[2002.06504] Differentiable Top-k Operator with Optimal Transport +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|bookmarkOf|https://arxiv.org/abs/2002.06504 +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|creationTime|2020-06-29T14:04:10Z +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_summary|"The top-k operation, i.e., finding the k largest or smallest elements from a +collection of scores, is an important model component, which is widely used in +information retrieval, machine learning, and data mining. However, if the top-k +operation is implemented in an algorithmic way, e.g., using bubble algorithm, +the resulting model cannot be trained in an end-to-end way using prevalent +gradient descent algorithms. This is because these implementations typically +involve swapping indices, whose gradient cannot be computed. Moreover, the +corresponding mapping from the input scores to the indicator vector of whether +this element belongs to the top-k set is essentially discontinuous. To address +the issue, we propose a smoothed approximation, namely the SOFT (Scalable +Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT +top-k operator approximates the output of the top-k operation as the solution +of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT +operator can then be efficiently approximated based on the optimality +conditions of EOT problem. We apply the proposed operator to the k-nearest +neighbors and beam search algorithms, and demonstrate improved performance." +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_firstAuthor|Yujia Xie +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_updated|2020-02-18T18:56:09Z +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_title|Differentiable Top-k Operator with Optimal Transport +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_published|2020-02-16T04:57:52Z +http://www.semanlink.net/doc/2020/06/2002_06504_differentiable_top|arxiv_num|2002.06504 +http://www.semanlink.net/doc/2021/06/africanlp_workshop|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/africanlp_workshop|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/africanlp_workshop|tag|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/doc/2021/06/africanlp_workshop|title|AfricaNLP Workshop +http://www.semanlink.net/doc/2021/06/africanlp_workshop|bookmarkOf|https://sites.google.com/view/africanlp-workshop +http://www.semanlink.net/doc/2021/06/africanlp_workshop|creationTime|2021-06-30T00:40:19Z +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|creationDate|2021-07-01 +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|tag|http://www.semanlink.net/tag/web_app +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|tag|http://www.semanlink.net/tag/low_code +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|tag|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|comment|"> The premier low-code platform for ML & data science apps. + +[Dash](https://plotly.com/dash/)" +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|title|Plotly: The front end for ML and data science models +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|bookmarkOf|https://plotly.com/ +http://www.semanlink.net/doc/2021/07/plotly_the_front_end_for_ml_an|creationTime|2021-07-01T00:23:56Z +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|creationDate|2021-10-18 +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|tag|http://www.semanlink.net/tag/huggingface_bigscience +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Thomas Wolf +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Gunjan Chhablani +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Andrea Santilli +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Alexander M. Rush +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Mike Tian-Jian Jiang +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Arnaud Stiegler +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|M Saiful Bari +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Jason Alan Fries +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Han Wang +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Nihal Nayak +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Ryan Teehan +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Jonathan Chang +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Canwen Xu +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Sheng Shen +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Tali Bers +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Manan Dey +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Taewoon Kim +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Albert Webson +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Thomas Wang +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Rachel Bawden +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Trishala Neeraj +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Jos Rozen +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Abheesht Sharma +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Harshit Pandey +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Lintang Sutawika +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Zheng Xin Yong +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Zaid Alyafeai +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Leo Gao +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Stella Biderman +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Debajyoti Datta +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Shanya Sharma Sharma +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Colin Raffel +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Matteo Manica +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Antoine Chaffin +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Thibault Fevry +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Victor Sanh +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Eliza Szczechla +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Teven Le Scao +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Urmish Thakker +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Arun Raja +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_author|Stephen H. Bach +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|comment|[Tweet](https://twitter.com/BigscienceW/status/1450084548872744961?s=20) +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|title|[2110.08207] Multitask Prompted Training Enables Zero-Shot Task Generalization +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|bookmarkOf|https://arxiv.org/abs/2110.08207 +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|creationTime|2021-10-18T23:12:20Z +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_summary|"Large language models have recently been shown to attain reasonable zero-shot +generalization on a diverse set of tasks. It has been hypothesized that this is +a consequence of implicit multitask learning in language model training. Can +zero-shot generalization instead be directly induced by explicit multitask +learning? To test this question at scale, we develop a system for easily +mapping general natural language tasks into a human-readable prompted form. We +convert a large set of supervised datasets, each with multiple prompts using +varying natural language. These prompted datasets allow for benchmarking the +ability of a model to perform completely unseen tasks specified in natural +language. We fine-tune a pretrained encoder-decoder model on this multitask +mixture covering a wide variety of tasks. The model attains strong zero-shot +performance on several standard datasets, often outperforming models 16x its +size. Further, our approach attains strong performance on a subset of tasks +from the BIG-Bench benchmark, outperforming models 6x its size. All prompts and +trained models are available at github.com/bigscience-workshop/promptsource/." +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_firstAuthor|Victor Sanh +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_updated|2021-10-15T17:08:57Z +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_title|Multitask Prompted Training Enables Zero-Shot Task Generalization +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_published|2021-10-15T17:08:57Z +http://www.semanlink.net/doc/2021/10/2110_08207_multitask_prompted|arxiv_num|2110.08207 +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|creationDate|2021-07-06 +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_author|Evan Heetderks +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_author|Vincent Carles +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_author|Alexandre Magueresse +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|comment|bof +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|title|[2006.07264] Low-resource Languages: A Review of Past Work and Future Challenges +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|bookmarkOf|https://arxiv.org/abs/2006.07264 +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|creationTime|2021-07-06T13:07:39Z +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_summary|"A current problem in NLP is massaging and processing low-resource languages +which lack useful training attributes such as supervised data, number of native +speakers or experts, etc. This review paper concisely summarizes previous +groundbreaking achievements made towards resolving this problem, and analyzes +potential improvements in the context of the overall future research direction." +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_firstAuthor|Alexandre Magueresse +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_updated|2020-06-12T15:21:57Z +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_title|Low-resource Languages: A Review of Past Work and Future Challenges +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_published|2020-06-12T15:21:57Z +http://www.semanlink.net/doc/2021/07/2006_07264_low_resource_langu|arxiv_num|2006.07264 +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|creationDate|2020-06-30 +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|tag|http://www.semanlink.net/tag/uncertainty_in_deep_learning +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_author|Robin Jia +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_author|Amita Kamath +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_author|Percy Liang +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|comment|"**How you can get a QA model to abstain from answering when it doesn’t know the answer.** + +> Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely." +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|title|[2006.09462] Selective Question Answering under Domain Shift +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|bookmarkOf|https://arxiv.org/abs/2006.09462 +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|creationTime|2020-06-30T10:59:53Z +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_summary|"To avoid giving wrong answers, question answering (QA) models need to know +when to abstain from answering. Moreover, users often ask questions that +diverge from the model's training data, making errors more likely and thus +abstention more critical. In this work, we propose the setting of selective +question answering under domain shift, in which a QA model is tested on a +mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain +on) as many questions as possible while maintaining high accuracy. Abstention +policies based solely on the model's softmax probabilities fare poorly, since +models are overconfident on out-of-domain inputs. Instead, we train a +calibrator to identify inputs on which the QA model errs, and abstain when it +predicts an error is likely. Crucially, the calibrator benefits from observing +the model's behavior on out-of-domain data, even if from a different domain +than the test data. We combine this method with a SQuAD-trained QA model and +evaluate on mixtures of SQuAD and five other QA datasets. Our method answers +56% of questions while maintaining 80% accuracy; in contrast, directly using +the model's probabilities only answers 48% at 80% accuracy." +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_firstAuthor|Amita Kamath +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_updated|2020-06-16T19:13:21Z +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_title|Selective Question Answering under Domain Shift +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_published|2020-06-16T19:13:21Z +http://www.semanlink.net/doc/2020/06/2006_09462_selective_question|arxiv_num|2006.09462 +http://www.semanlink.net/doc/2021/05/hyenes_film_|creationDate|2021-05-29 +http://www.semanlink.net/doc/2021/05/hyenes_film_|tag|http://www.semanlink.net/tag/senegal +http://www.semanlink.net/doc/2021/05/hyenes_film_|tag|http://www.semanlink.net/tag/film +http://www.semanlink.net/doc/2021/05/hyenes_film_|comment|"Adaptation de ""Der Besuch der alten Dame"" de Dürrenmatt" +http://www.semanlink.net/doc/2021/05/hyenes_film_|title|Hyènes (film) +http://www.semanlink.net/doc/2021/05/hyenes_film_|bookmarkOf|https://fr.wikipedia.org/wiki/Hy%C3%A8nes_(film) +http://www.semanlink.net/doc/2021/05/hyenes_film_|creationTime|2021-05-29T23:36:24Z +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|creationDate|2021-05-31 +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|comment|"> Results indicate that keyphrase extraction is still an open research question, with state-of-the-art neural-based models still challenged by simple baselines on some datasets + +[Github](https://github.com/ygorg/JCDL_2020_KPE_Eval)" +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|title|Large-Scale Evaluation of Keyphrase Extraction Models (2020) +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|bookmarkOf|https://hal.archives-ouvertes.fr/hal-02878953/document +http://www.semanlink.net/doc/2021/05/large_scale_evaluation_of_keyph|creationTime|2021-05-31T11:56:12Z +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|creationDate|2020-11-16 +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|tag|http://www.semanlink.net/tag/portland_or +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|tag|http://www.semanlink.net/tag/ville_sans_voiture +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|tag|http://www.semanlink.net/tag/paris +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|tag|http://www.semanlink.net/tag/anne_hidalgo +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|title|Paris’s 15-Minute City Could Be Coming to an Urban Area Near You - Bloomberg +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|bookmarkOf|https://www.bloomberg.com/news/features/2020-11-12/paris-s-15-minute-city-could-be-coming-to-an-urban-area-near-you +http://www.semanlink.net/doc/2020/11/paris%E2%80%99s_15_minute_city_could_be|creationTime|2020-11-16T12:07:26Z +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|creationDate|2020-12-10 +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|tag|http://www.semanlink.net/tag/knowledge_graph_search_engine +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|comment|"Built on Wikipedia’s link structure, it returns a subgraph of connected pages. + +- +- [Blog post](https://kcollective.substack.com/p/exploration-engines)" +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|relatedDoc|http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|title|"Drew Tada sur Twitter : ""Officially launching giantgra.ph A search engine for knowledge graphs...""" +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|bookmarkOf|https://twitter.com/drew_tada/status/1335626244365422598?s=20 +http://www.semanlink.net/doc/2020/12/drew_tada_sur_twitter_offici|creationTime|2020-12-10T13:30:11Z +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|creationDate|2021-07-06 +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Kelechi Nwaike +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Victor Akinode +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Chris Chinenye Emezue +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Tajuddeen Gwadabe +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Adewale Akinfaderin +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Bonaventure F. P. Dossou +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Samuel Oyerinde +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Orevaoghene Ahia +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Constantine Lignos +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Paul Rayson +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Yvonne Wambui +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Emmanuel Anebi +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Blessing Sibanda +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Graham Neubig +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Iroro Orife +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Tendai Marengereke +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Happy Buzaaba +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Chiamaka Chukwuneke +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Nkiruka Odu +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Thierno Ibrahima DIOP +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Shruti Rijhwani +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Julia Kreutzer +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Temilola Oloyede +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Samba Ngom +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Joyce Nakatumba-Nabende +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Tobius Saul Bateesa +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Stephen Mayhew +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Tosin Adewumi +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Sebastian Ruder +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Ayodele Awokoya +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Degaga Wolde +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Ignatius Ezeani +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Abdoulaye Faye +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Clemencia Siro +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Abdoulaye Diallo +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Israel Abebe Azime +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Perez Ogayo +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Rubungo Andre Niyongabo +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Chester Palen-Michel +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Henok Tilaye +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Eric Peter Wairagala +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Deborah Nabagereka +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Daniel D'souza +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Anuoluwapo Aremu +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Maurice Katusiime +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Jonathan Mukiibi +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Derguene Mbaye +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|David Ifeoluwa Adelani +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Davis David +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Seid Muhie Yimam +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Shamsuddeen Muhammad +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Mouhamadane MBOUP +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Catherine Gitau +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Dibora Gebreyohannes +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Mofetoluwa Adeyemi +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Jade Abbott +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Kelechi Ogueji +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Verrah Otiende +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Jesujoba Alabi +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Salomey Osei +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_author|Gerald Muriuki +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|title|[2103.11811] MasakhaNER: Named Entity Recognition for African Languages +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|bookmarkOf|https://arxiv.org/abs/2103.11811 +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|creationTime|2021-07-06T13:08:36Z +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_summary|"We take a step towards addressing the under-representation of the African +continent in NLP research by creating the first large publicly available +high-quality dataset for named entity recognition (NER) in ten African +languages, bringing together a variety of stakeholders. We detail +characteristics of the languages to help researchers understand the challenges +that these languages pose for NER. We analyze our datasets and conduct an +extensive empirical evaluation of state-of-the-art methods across both +supervised and transfer learning settings. We release the data, code, and +models in order to inspire future research on African NLP." +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_firstAuthor|David Ifeoluwa Adelani +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_updated|2021-07-05T15:14:32Z +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_title|MasakhaNER: Named Entity Recognition for African Languages +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_published|2021-03-22T13:12:44Z +http://www.semanlink.net/doc/2021/07/2103_11811_masakhaner_named_|arxiv_num|2103.11811 +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|creationDate|2020-11-05 +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|tag|http://www.semanlink.net/tag/trump +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|tag|http://www.semanlink.net/tag/usa +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|tag|http://www.semanlink.net/tag/the_guardian +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|tag|http://www.semanlink.net/tag/elections_americaines_2020 +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|comment|"> His victory in 2016, it turns out, was no fluke attributable to Vladimir Putin or James Comey. In 2020 his sexism, racism and lie-telling have been legitimised and emboldened. +> When some Americans protested “This is not who we are”, Trump voters replied: “This is exactly who we are – and we’re not going anywhere.”" +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|title|Regardless of the US presidential election outcome, Trumpism lives on The Guardian +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|bookmarkOf|https://www.theguardian.com/us-news/2020/nov/04/trumpism-us-presidential-election +http://www.semanlink.net/doc/2020/11/regardless_of_the_us_presidenti|creationTime|2020-11-05T14:07:39Z +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|creationDate|2020-10-02 +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|tag|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|tag|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_author|Ambedkar Dukkipati +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_author|K M Annervaz +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_author|Somnath Basu Roy Chowdhury +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|comment|"> we propose to enhance learning models with world knowledge in the form of **Knowledge Graph fact triples for NLP tasks**. Our aim is to develop a deep learning model that can extract relevant prior support facts from knowledge graphs depending on the task using attention mechanism. + +Related [blog post](https://medium.com/@anshumanmourya/learning-beyond-datasets-knowledge-graph-augmented-neural-networks-for-natural-language-b937ba49f2e5)" +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|title|[1802.05930] Learning beyond datasets: Knowledge Graph Augmented Neural Networks for Natural language Processing +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|bookmarkOf|https://arxiv.org/abs/1802.05930 +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|creationTime|2020-10-02T01:01:15Z +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_summary|"Machine Learning has been the quintessential solution for many AI problems, +but learning is still heavily dependent on the specific training data. Some +learning models can be incorporated with a prior knowledge in the Bayesian set +up, but these learning models do not have the ability to access any organised +world knowledge on demand. In this work, we propose to enhance learning models +with world knowledge in the form of Knowledge Graph (KG) fact triples for +Natural Language Processing (NLP) tasks. Our aim is to develop a deep learning +model that can extract relevant prior support facts from knowledge graphs +depending on the task using attention mechanism. We introduce a +convolution-based model for learning representations of knowledge graph entity +and relation clusters in order to reduce the attention space. We show that the +proposed method is highly scalable to the amount of prior information that has +to be processed and can be applied to any generic NLP task. Using this method +we show significant improvement in performance for text classification with +News20, DBPedia datasets and natural language inference with Stanford Natural +Language Inference (SNLI) dataset. We also demonstrate that a deep learning +model can be trained well with substantially less amount of labeled training +data, when it has access to organised world knowledge in the form of knowledge +graph." +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_firstAuthor|K M Annervaz +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_updated|2018-05-21T03:44:48Z +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_title|Learning beyond datasets: Knowledge Graph Augmented Neural Networks for Natural language Processing +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_published|2018-02-16T13:38:00Z +http://www.semanlink.net/doc/2020/10/1802_05930_learning_beyond_da|arxiv_num|1802.05930 +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|creationDate|2021-02-27 +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|tag|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|tag|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|title|Sabil Kogl-weogo +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|bookmarkOf|https://www.google.fr/search?q=Sabil+Kogl-weogo +http://www.semanlink.net/doc/2021/02/sabil_kogl_weogo|creationTime|2021-02-27T19:12:10Z +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|creationDate|2021-02-09 +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|tag|http://www.semanlink.net/tag/groenland +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|tag|http://www.semanlink.net/tag/meteorite +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|title|Cape York meteorite - Wikipedia +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|bookmarkOf|https://en.wikipedia.org/wiki/Cape_York_meteorite +http://www.semanlink.net/doc/2021/02/cape_york_meteorite_wikipedia|creationTime|2021-02-09T02:08:16Z +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|creationDate|2021-02-23 +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/nli +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|comment|"> state-of-the-art NLP +models for sequence classification without large annotated training +sets. + +Simple idea: use a single model (eg. [Sentence-BERT](tag:sbert)) to embed both the text data and the class names into the same space. + +Pb: Sentence-BERT is designed to learn +effective sentence-level, not single- or multi-word representations like our +class names -> the label +embeddings may not be as semantically salient as word-level +embedding methods (i.e. word2vec). + +Solution 1: Learn a projection from sentence level embeddings of words to word2vec embeddings, use it for encoding when learning classifier. Can be adapted to few short learning + +Solution 2: ""Classification as [#Natural Language Inference](tag:nli)"". + +> A method which not only embeds +sequences and labels into the same latent space where their distance can +be measured, but that can actually tell us something about the compatibility +of two distinct sequences out of the box." +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|title|Zero-Shot Learning in Modern NLP Joe Davison Blog (2020-05) +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|bookmarkOf|https://joeddav.github.io/blog/2020/05/29/ZSL.html +http://www.semanlink.net/doc/2021/02/zero_shot_learning_in_modern_nl|creationTime|2021-02-23T13:44:34Z +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|creationDate|2021-03-13 +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|tag|http://www.semanlink.net/tag/roam +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|tag|http://www.semanlink.net/tag/obsidian +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|title|Roam vs Obsidian - Which one should you use? Medium +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|bookmarkOf|https://medium.com/@travischan/roam-vs-obsidian-38c271161d18 +http://www.semanlink.net/doc/2021/03/roam_vs_obsidian_which_one_sh|creationTime|2021-03-13T11:27:36Z +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|creationDate|2020-10-18 +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|title|anvaka/VivaGraphJS: Graph drawing library for JavaScript +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|bookmarkOf|https://github.com/anvaka/VivaGraphJS +http://www.semanlink.net/doc/2020/10/anvaka_vivagraphjs_graph_drawi|creationTime|2020-10-18T23:02:40Z +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|creationDate|2021-01-19 +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|comment|"> ""GraphAware are pioneers of graph-powered machine learning""" +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|title|GraphAware - Neo4j consultancy, training, development +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|bookmarkOf|https://graphaware.com/ +http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_|creationTime|2021-01-19T09:23:04Z +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|creationDate|2021-04-10 +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|tag|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_author|Pushmeet Kohli +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_author|Richard Evans +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_author|Marek Sergot +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_author|Johannes Welbl +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_author|Jose Hernandez-Orallo +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|comment|"> what does it mean to “make sense” +of a sensory sequence? Our answer is that making sense means constructing a symbolic theory containing a set +of objects that persist over time, with attributes that change over time, according to general laws. This theory +must both explain the sensory input, and satisfy unity conditions [the +constituents of our theory – objects, properties, and atoms – must be integrated into a coherent whole] + +Sequel: [Making sense of raw input](doc:2021/05/making_sense_of_raw_input)" +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|relatedDoc|http://www.semanlink.net/doc/2021/05/making_sense_of_raw_input +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|title|[1910.02227] Making sense of sensory input +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|bookmarkOf|https://arxiv.org/abs/1910.02227 +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|creationTime|2021-04-10T19:09:06Z +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_summary|"This paper attempts to answer a central question in unsupervised learning: +what does it mean to ""make sense"" of a sensory sequence? In our formalization, +making sense involves constructing a symbolic causal theory that both explains +the sensory sequence and also satisfies a set of unity conditions. The unity +conditions insist that the constituents of the causal theory -- objects, +properties, and laws -- must be integrated into a coherent whole. On our +account, making sense of sensory input is a type of program synthesis, but it +is unsupervised program synthesis. +Our second contribution is a computer implementation, the Apperception +Engine, that was designed to satisfy the above requirements. Our system is able +to produce interpretable human-readable causal theories from very small amounts +of data, because of the strong inductive bias provided by the unity conditions. +A causal theory produced by our system is able to predict future sensor +readings, as well as retrodict earlier readings, and impute (fill in the blanks +of) missing sensory readings, in any combination. +We tested the engine in a diverse variety of domains, including cellular +automata, rhythms and simple nursery tunes, multi-modal binding problems, +occlusion tasks, and sequence induction intelligence tests. In each domain, we +test our engine's ability to predict future sensor values, retrodict earlier +sensor values, and impute missing sensory data. The engine performs well in all +these domains, significantly out-performing neural net baselines. We note in +particular that in the sequence induction intelligence tests, our system +achieved human-level performance. This is notable because our system is not a +bespoke system designed specifically to solve intelligence tests, but a +general-purpose system that was designed to make sense of any sensory sequence." +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_firstAuthor|Richard Evans +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_updated|2020-07-14T03:16:30Z +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_title|Making sense of sensory input +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_published|2019-10-05T07:48:55Z +http://www.semanlink.net/doc/2021/04/1910_02227_making_sense_of_se|arxiv_num|1910.02227 +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|creationDate|2021-06-03 +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|tag|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|tag|http://www.semanlink.net/tag/learning_to_hash +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_author|Ikuya Yamada +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_author|Akari Asai +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_author|Hannaneh Hajishirzi +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|comment|> Integrates a learning to hash technique into [DPR](tag:dense_passage_retrieval) to represent passages using compact binary codes rather than continuous vectors. We simultaneously train the encoders and hash functions in an end-to-end manner. +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|title|[2106.00882] Efficient Passage Retrieval with Hashing for Open-domain Question Answering +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|bookmarkOf|https://arxiv.org/abs/2106.00882 +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|creationTime|2021-06-03T11:11:35Z +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_summary|"Most state-of-the-art open-domain question answering systems use a neural +retrieval model to encode passages into continuous vectors and extract them +from a knowledge source. However, such retrieval models often require large +memory to run because of the massive size of their passage index. In this +paper, we introduce Binary Passage Retriever (BPR), a memory-efficient neural +retrieval model that integrates a learning-to-hash technique into the +state-of-the-art Dense Passage Retriever (DPR) to represent the passage index +using compact binary codes rather than continuous vectors. BPR is trained with +a multi-task objective over two tasks: efficient candidate generation based on +binary codes and accurate reranking based on continuous vectors. Compared with +DPR, BPR substantially reduces the memory cost from 65GB to 2GB without a loss +of accuracy on two standard open-domain question answering benchmarks: Natural +Questions and TriviaQA. Our code and trained models are available at +https://github.com/studio-ousia/bpr." +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_firstAuthor|Ikuya Yamada +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_updated|2021-06-02T01:34:42Z +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_title|Efficient Passage Retrieval with Hashing for Open-domain Question Answering +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_published|2021-06-02T01:34:42Z +http://www.semanlink.net/doc/2021/06/2106_00882_efficient_passage_|arxiv_num|2106.00882 +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|creationDate|2021-05-26 +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/good_related_work_section +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/clustering_small_sets_of_short_texts +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|comment|"the issue of clustering small sets of very short texts. Eg. in organizing brain-storming seminars + +> In order to cope with polysemy we adapt the SenseSearcher +algorithm (SnS), by Kozlowski and Rybinski. In addition, we test the possibilities of improving the quality of clustering ultra-short +texts by means of enriching them semantically. We present two approaches, one based on +neural-based distributional models, and the other based on external knowledge resources. + +> It was shown that **only text-oriented clustering methods (STC, [Lingo](tag:lingo) and SnSRC) +give reasonable results for French ultra short texts**, whereas the clustering quality of +Bisecting k-means in these experiments is very low + +> The experiments with the neural network based models (implemented by means of +Word2vec) showed much better results than other semantic enrichment methods for both +algorithms and for both data sets + +(Good related work section)" +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|title|Clustering of semantically enriched short texts (2018) +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|bookmarkOf|https://link.springer.com/article/10.1007/s10844-018-0541-4 +http://www.semanlink.net/doc/2021/05/clustering_of_semantically_enri|creationTime|2021-05-26T17:22:53Z +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|creationDate|2021-08-11 +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|tag|http://www.semanlink.net/tag/howto +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|title|How I almost won an NLP competition without knowing any Machine Learning - DEV Community +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|bookmarkOf|https://dev.to/ferdi05/how-i-almost-won-an-nlp-competition-without-knowing-any-machine-learning-24la +http://www.semanlink.net/doc/2021/08/how_i_almost_won_an_nlp_competi|creationTime|2021-08-11T12:57:30Z +http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_|creationDate|2020-09-06 +http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_|title|Awesome Knowledge Distillation papers · Seongkyun Han's blog +http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_|bookmarkOf|https://seongkyun.github.io/study/2019/04/05/awesome_kd/ +http://www.semanlink.net/doc/2020/09/awesome_knowledge_distillation_|creationTime|2020-09-06T12:01:58Z +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|creationDate|2021-07-17 +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|tag|http://www.semanlink.net/tag/publicite_politique +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|tag|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|tag|http://www.semanlink.net/tag/targeted_ads +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|comment|"Oana Goga, chercheuse au +@LIGLab +, a développé, avec son équipe, l'outil AdAnalyst, qui leur a permis d’analyser le ciblage des publicités politiques sur #Facebook + +> Nos mesures ont montré que les publicitaires peuvent sélectionner parmi plus de 250 000 attributs, dont beaucoup sont très spécifiques et parfois sensibles tels que « l’intérêt dans les mouvements anti-avortement » ou « la conscience du cancer »." +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|title|Les enjeux de la publicité politique ciblée CNRS Le journal +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|bookmarkOf|https://lejournal.cnrs.fr/billets/les-enjeux-de-la-publicite-politique-ciblee +http://www.semanlink.net/doc/2021/07/les_enjeux_de_la_publicite_poli|creationTime|2021-07-17T15:40:27Z +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|creationDate|2021-01-29 +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|tag|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|tag|http://www.semanlink.net/tag/gamestop +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|title|Cory Doctorow about Gamestop +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|bookmarkOf|https://twitter.com/doctorow/status/1354848494192738304 +http://www.semanlink.net/doc/2021/01/cory_doctorow_about_gamestop|creationTime|2021-01-29T10:15:45Z +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|creationDate|2020-12-19 +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|tag|http://www.semanlink.net/tag/microsoft_azure +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|tag|http://www.semanlink.net/tag/knowledge_mining +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|title|"Pablo Castro sur Twitter : ""Knowledge mining using the knowledge store feature of #AzureSearch""" +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|bookmarkOf|https://twitter.com/pmc/status/1177261655794733056?s=20 +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_kno|creationTime|2020-12-19T11:26:15Z +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|creationDate|2021-03-30 +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|tag|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|comment|> Weaviate is an API-based vector search engine with a graph data model that allows users to add data objects as graph nodes and (automatically or manually) add (machine learning) vectors to represent the nodes. Weaviate can be used for use cases ranging from similarity search to filtering out redundant information (i.e., deduplication) and from image search to enterprise NLP-based search. +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|title|Weaviate Vector Search Engine SeMI Technologies +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|bookmarkOf|https://www.semi.technology/products/weaviate/ +http://www.semanlink.net/doc/2021/03/weaviate_vector_search_engine_%7C|creationTime|2021-03-30T19:21:25Z +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|creationDate|2021-02-24 +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|comment|[Slides](doc:2021/02/developing_nlp_models_without_l), [GitHub](https://github.com/NorskRegnesentral/weak-supervision-for-NER) +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|relatedDoc|http://www.semanlink.net/doc/2021/02/developing_nlp_models_without_l +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|title|Named Entity Recognition without Labelled Data: A Weak Supervision Approach (2020) +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|bookmarkOf|https://www.aclweb.org/anthology/2020.acl-main.139.pdf +http://www.semanlink.net/doc/2021/02/named_entity_recognition_withou|creationTime|2021-02-24T16:24:40Z +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|creationDate|2021-06-23 +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/search +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_author|Shauli Ravfogel +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_author|Yoav Goldberg +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_author|Hillel Taub-Tabib +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|comment|"how to extend a +search paradigm we call “**extractive search**” with +neural similarity techniques. + +> some information needs require extracting +and aggregating sub-sentence information +(words, phrases, or entities) from multiple documents +(e.g. a list of all the risk factors for a specific +disease and their number of mentions, or a comprehensive +table of startups and CEOs). + +> extractive search combines +document selection with information extraction. **The query is extended with capture slots**: +these are **search terms that act as variables, whose +values should be extracted**. +> The user +is then presented with the matched documents, each +annotated with the corresponding captured spans, +as well as aggregate information over the captured +spans + +Conclusion : + +> We presented a system for neural extractive search. +While we found our system to be useful for scientific +search, it also has clear limitations and areas +for improvement, both in terms of accuracy (only +72.2% of the returned results are relevant, both the +alignment and similarity models generalize well to +some relations but not to others), and in terms of +scale + +[Video of demo](https://www.youtube.com/watch?v=TtqWi2GgB5A&t=1832s)" +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|title|[2106.04612] Neural Extractive Search +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|bookmarkOf|https://arxiv.org/abs/2106.04612 +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|creationTime|2021-06-23T01:47:35Z +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_summary|"Domain experts often need to extract structured information from large +corpora. We advocate for a search paradigm called ``extractive search'', in +which a search query is enriched with capture-slots, to allow for such rapid +extraction. Such an extractive search system can be built around syntactic +structures, resulting in high-precision, low-recall results. We show how the +recall can be improved using neural retrieval and alignment. The goals of this +paper are to concisely introduce the extractive-search paradigm; and to +demonstrate a prototype neural retrieval system for extractive search and its +benefits and potential. Our prototype is available at +\url{https://spike.neural-sim.apps.allenai.org/} and a video demonstration is +available at \url{https://vimeo.com/559586687}." +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_firstAuthor|Shauli Ravfogel +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_updated|2021-06-08T18:03:31Z +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_title|Neural Extractive Search +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_published|2021-06-08T18:03:31Z +http://www.semanlink.net/doc/2021/06/2106_04612_neural_extractive_|arxiv_num|2106.04612 +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|creationDate|2020-11-24 +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|tag|http://www.semanlink.net/tag/knowledge_graph_augmented_language_models +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|comment|"About ""Entity Representations in LMs"", refers to: + +- [[2010.01057] LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](doc:2020/11/2010_01057_luke_deep_context) by [Yamada](/showprop.do?pptyuri=http%3A%2F%2Fwww.semanlink.net%2F2001%2F00%2Fsemanlink-schema%23arxiv_author&pptyval=Ikuya%2BYamada) +- Shen et al employ a background KG in +their GLM (Graph-guided Masked Language Model): the graph supplies a +vocabulary of named entities with their connectivity patterns (reachable +entities in k-hops) +- [[2004.07202] Entities as Experts: Sparse Memory Access with Entity Supervision](doc:2020/07/2004_07202_entities_as_expert) +- Poerner et al make use of [Wikipedia2Vec](tag:wikipedia2vec) (by Yamada) in their E-BERT + +Autoregressive KG-augmented LMs: generation process of LMs is conditioned by or enriched with structured knowledge like small subgraphs!" +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|relatedDoc|http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|relatedDoc|http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|title|Knowledge Graphs in NLP @ EMNLP 2020 by Michael Galkin Nov, 2020 Medium +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|bookmarkOf|https://mgalkin.medium.com/knowledge-graphs-in-nlp-emnlp-2020-2f98ec527738#c2ae +http://www.semanlink.net/doc/2020/11/knowledge_graphs_in_nlp_emnlp|creationTime|2020-11-24T09:46:17Z +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|creationDate|2020-07-11 +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/memory_networks +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/knowledge_graph_augmented_language_models +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_author|Nicholas FitzGerald +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_author|Livio Baldini Soares +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_author|Tom Kwiatkowski +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_author|Eunsol Choi +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_author|Thibault Févry +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|comment|"> We focus on the problem of **capturing declarative knowledge in the learned parameters of a language model**... + +> Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; + +> To understand the motivation for distinct and +independent entity representations: A traditional Transformer would need to build an internal representation +of Charles Darwin from the words “Charles” +and “Darwin”... Conversely, EAE can access +a dedicated representation of “Charles Darwin”, +which is a memory of all of the contexts in which +this entity has previously been mentioned.... Having retrieved +and re-integrated this memory it is much easier for +EAE to relate the question to the answer + +> EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance + +Based on transformer architecture + +Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_)" +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|relatedDoc|http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_ +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|title|[2004.07202] Entities as Experts: Sparse Memory Access with Entity Supervision +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|bookmarkOf|https://arxiv.org/abs/2004.07202 +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|creationTime|2020-07-11T15:09:10Z +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_summary|"We focus on the problem of capturing declarative knowledge in the learned +parameters of a language model. We introduce a new model, Entities as Experts +(EaE), that can access distinct memories of the entities mentioned in a piece +of text. Unlike previous efforts to integrate entity knowledge into sequence +models, EaE's entity representations are learned directly from text. These +representations capture sufficient knowledge to answer TriviaQA questions such +as ""Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, +Eric Roberts?"". EaE outperforms a Transformer model with $30\times$ the +parameters on this task. According to the Lama knowledge probes, EaE also +contains more factual knowledge than a similar sized Bert. We show that +associating parameters with specific entities means that EaE only needs to +access a fraction of its parameters at inference time, and we show that the +correct identification, and representation, of entities is essential to EaE's +performance. We also argue that the discrete and independent entity +representations in EaE make it more modular and interpretable than the +Transformer architecture on which it is based." +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_firstAuthor|Thibault Févry +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_updated|2020-04-15T17:00:05Z +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_title|Entities as Experts: Sparse Memory Access with Entity Supervision +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_published|2020-04-15T17:00:05Z +http://www.semanlink.net/doc/2020/07/2004_07202_entities_as_expert|arxiv_num|2004.07202 +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|creationDate|2020-07-02 +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|tag|http://www.semanlink.net/tag/reboisement +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|tag|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|tag|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|tag|http://www.semanlink.net/tag/desert +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|tag|http://www.semanlink.net/tag/baobab +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|title|Au Sahel, des arbres et des bêches pour lutter contre l’avancée du désert +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/06/17/au-sahel-des-arbres-et-des-beches-pour-lutter-contre-l-avancee-du-desert_6043108_3212.html +http://www.semanlink.net/doc/2020/07/au_sahel_des_arbres_et_des_bec|creationTime|2020-07-02T15:12:42Z +http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta|creationDate|2021-08-16 +http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta|tag|http://www.semanlink.net/tag/guantanamo +http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta|title|‘The US should be held accountable’: Guantánamo survivor on the war on terror’s failure Guantánamo Bay The Guardian +http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta|bookmarkOf|https://www.theguardian.com/us-news/2021/aug/16/guantanamo-detainee-mansoor-adayfi +http://www.semanlink.net/doc/2021/08/%E2%80%98the_us_should_be_held_accounta|creationTime|2021-08-16T10:20:46Z +http://www.semanlink.net/doc/2021/01/the_big_short_film_|creationDate|2021-01-16 +http://www.semanlink.net/doc/2021/01/the_big_short_film_|tag|http://www.semanlink.net/tag/crise_des_subprimes +http://www.semanlink.net/doc/2021/01/the_big_short_film_|tag|http://www.semanlink.net/tag/brad_pitt +http://www.semanlink.net/doc/2021/01/the_big_short_film_|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2021/01/the_big_short_film_|comment|avec Ryan Gosling et Brad Pitt. +http://www.semanlink.net/doc/2021/01/the_big_short_film_|title|The Big Short (film) +http://www.semanlink.net/doc/2021/01/the_big_short_film_|bookmarkOf|https://en.wikipedia.org/wiki/The_Big_Short_(film) +http://www.semanlink.net/doc/2021/01/the_big_short_film_|creationTime|2021-01-16T23:59:52Z +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|creationDate|2020-08-12 +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|tag|http://www.semanlink.net/tag/manik_varma +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|tag|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|title|Everything you always wanted to know about extreme classification (but were afraid to ask) - Microsoft Research - 2019 +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|bookmarkOf|https://www.microsoft.com/en-us/research/blog/everything-you-always-wanted-to-know-about-extreme-classification-but-were-afraid-to-ask/ +http://www.semanlink.net/doc/2020/08/everything_you_always_wanted_to|creationTime|2020-08-12T01:08:38Z +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|creationDate|2021-04-28 +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|tag|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|tag|http://www.semanlink.net/tag/histoire_coloniale +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|tag|http://www.semanlink.net/tag/congo_belge +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|title|Comment les mains coupées du Congo ont secoué l’Europe coloniale +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|bookmarkOf|https://www.lemonde.fr/afrique/video/2021/04/25/comment-les-mains-coupees-du-congo-ont-secoue-l-europe-coloniale-flashback-2_6077995_3212.html +http://www.semanlink.net/doc/2021/04/comment_les_mains_coupees_du_co|creationTime|2021-04-28T16:58:55Z +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|creationDate|2020-12-10 +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|tag|http://www.semanlink.net/tag/knowledge_graph_search_engine +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|comment|Wikigraph uses the simplest possible algorithm to generate graphs, which is particularly good for making unexpected connection discoveries. +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|title|Exploration Engines - the koodos collective +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|bookmarkOf|https://kcollective.substack.com/p/exploration-engines +http://www.semanlink.net/doc/2020/12/exploration_engines_the_koodo|creationTime|2020-12-10T13:37:11Z +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|creationDate|2021-08-06 +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/acl_2021 +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|title|Knowledge Graphs in Natural Language Processing @ ACL 2021 by Michael Galkin Aug, 2021 +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|bookmarkOf|https://towardsdatascience.com/knowledge-graphs-in-natural-language-processing-acl-2021-6cac04f39761 +http://www.semanlink.net/doc/2021/08/knowledge_graphs_in_natural_lan|creationTime|2021-08-06T13:28:57Z +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|creationDate|2020-08-01 +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|tag|http://www.semanlink.net/tag/antitrust +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|comment|> Historically, companies that were bad at something would lose to companies that were good at it. But in the new Gilded Age, where we no longer enforce antitrust laws, companies that are bad at things can buy up companies that are good at them, a monopolistic tactic. +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|title|"Cory Doctorow #BLM sur Twitter : ""Late last June, Google bought out ""North,""..." +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|bookmarkOf|https://twitter.com/doctorow/status/1288622926850220032 +http://www.semanlink.net/doc/2020/08/cory_doctorow_blm_sur_twitter_|creationTime|2020-08-01T11:57:38Z +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|creationDate|2021-01-13 +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|tag|http://www.semanlink.net/tag/maxwell_s_demon +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|tag|http://www.semanlink.net/tag/probability_distribution +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|tag|http://www.semanlink.net/tag/classical_mechanics +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|tag|http://www.semanlink.net/tag/optique +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|comment|regarding the computer graphics trick: +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|title|D ␣ a ␣ n ␣ P ␣ i ␣ p ␣ o ␣ n ␣ i on Twitter: As an undergraduate I took an advanced classical mechanics course and one important result was Liouville's theorem... +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|bookmarkOf|https://twitter.com/sigfpe/status/1348653330554253314 +http://www.semanlink.net/doc/2021/01/d_%E2%90%A3_a_%E2%90%A3_n_%E2%90%A3_p_%E2%90%A3_i_%E2%90%A3_p_%E2%90%A3_o_%E2%90%A3_n_%E2%90%A3|creationTime|2021-01-13T11:42:31Z +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|creationDate|2021-10-14 +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|tag|http://www.semanlink.net/tag/annotations +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|tag|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|tag|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|title|[2109.04711] Pre-train or Annotate? Domain Adaptation with a Constrained Budget +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|bookmarkOf|https://arxiv.org/abs/2109.04711 +http://www.semanlink.net/doc/2021/10/2109_04711_pre_train_or_annot|creationTime|2021-10-14T16:01:19Z +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|creationDate|2021-02-01 +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|tag|http://www.semanlink.net/tag/nlp_data_anonymization +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|comment|"> **Presidio**, a production ready open-source service, available for free, for anyone who wishes to address the data privacy problem. +> +> Presidio allows any user to create standard and transparent processes for anonymizing PII entities on structured and unstructured data. To do so, it exposes a set of predefined PII recognizers (for common entities like names, credit card numbers and phone numbers), and tools for extending it with new logic for identifying more specific PII entities." +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|title|Custom NLP Approaches to Data Anonymization by Omri Mendels Towards Data Science +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|bookmarkOf|https://towardsdatascience.com/nlp-approaches-to-data-anonymization-1fb5bde6b929 +http://www.semanlink.net/doc/2021/02/custom_nlp_approaches_to_data_a|creationTime|2021-02-01T08:13:36Z +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|creationDate|2021-05-21 +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|tag|http://www.semanlink.net/tag/symbiose +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|tag|http://www.semanlink.net/tag/champignon +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|title|Un partenariat plantes - champignons à l’origine de la végétalisation terrestre CNRS +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|bookmarkOf|http://www.cnrs.fr/fr/un-partenariat-plantes-champignons-lorigine-de-la-vegetalisation-terrestre +http://www.semanlink.net/doc/2021/05/un_partenariat_plantes_champi|creationTime|2021-05-21T12:00:38Z +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|creationDate|2021-03-30 +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|title|Pulling Turtle RDF triples from the Google Knowledge Graph +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|bookmarkOf|http://www.bobdc.com/blog/turtlefromgooglekg/ +http://www.semanlink.net/doc/2021/03/pulling_turtle_rdf_triples_from|creationTime|2021-03-30T00:43:13Z +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|creationDate|2021-04-11 +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|tag|http://www.semanlink.net/tag/domain_specific_bert +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|tag|http://www.semanlink.net/tag/biomedical_nlp +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Michael Lucas +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Jianfeng Gao +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Robert Tinn +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Hoifung Poon +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Tristan Naumann +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Xiaodong Liu +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Naoto Usuyama +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Yu Gu +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_author|Hao Cheng +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|comment|> A prevailing assumption is that even domain-specific pretraining can benefit by starting from general-domain language models. In this paper, we challenge this assumption by showing that for domains with abundant unlabeled text, such as biomedicine, pretraining language models from scratch results in substantial gains over continual pretraining of general-domain language models +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|title|[2007.15779] Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|bookmarkOf|https://arxiv.org/abs/2007.15779 +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|creationTime|2021-04-11T16:38:59Z +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_summary|"Pretraining large neural language models, such as BERT, has led to impressive +gains on many natural language processing (NLP) tasks. However, most +pretraining efforts focus on general domain corpora, such as newswire and Web. +A prevailing assumption is that even domain-specific pretraining can benefit by +starting from general-domain language models. In this paper, we challenge this +assumption by showing that for domains with abundant unlabeled text, such as +biomedicine, pretraining language models from scratch results in substantial +gains over continual pretraining of general-domain language models. To +facilitate this investigation, we compile a comprehensive biomedical NLP +benchmark from publicly-available datasets. Our experiments show that +domain-specific pretraining serves as a solid foundation for a wide range of +biomedical NLP tasks, leading to new state-of-the-art results across the board. +Further, in conducting a thorough evaluation of modeling choices, both for +pretraining and task-specific fine-tuning, we discover that some common +practices are unnecessary with BERT models, such as using complex tagging +schemes in named entity recognition (NER). To help accelerate research in +biomedical NLP, we have released our state-of-the-art pretrained and +task-specific models for the community, and created a leaderboard featuring our +BLURB benchmark (short for Biomedical Language Understanding & Reasoning +Benchmark) at https://aka.ms/BLURB." +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_firstAuthor|Yu Gu +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_updated|2021-02-11T19:13:59Z +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_title|Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_published|2020-07-31T00:04:15Z +http://www.semanlink.net/doc/2021/04/2007_15779_domain_specific_la|arxiv_num|2007.15779 +http://www.semanlink.net/doc/2021/09/song_of_lawino|creationDate|2021-09-25 +http://www.semanlink.net/doc/2021/09/song_of_lawino|tag|http://www.semanlink.net/tag/ouganda +http://www.semanlink.net/doc/2021/09/song_of_lawino|tag|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/doc/2021/09/song_of_lawino|tag|http://www.semanlink.net/tag/livre +http://www.semanlink.net/doc/2021/09/song_of_lawino|title|Song of Lawino +http://www.semanlink.net/doc/2021/09/song_of_lawino|bookmarkOf|https://en.wikipedia.org/wiki/Song_of_Lawino +http://www.semanlink.net/doc/2021/09/song_of_lawino|creationTime|2021-09-25T16:52:04Z +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|creationDate|2021-03-02 +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|tag|http://www.semanlink.net/tag/chiffres +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|tag|http://www.semanlink.net/tag/r_d +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|tag|http://www.semanlink.net/tag/retard_technologique_francais +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|comment|[video][Vaccins contre le Covid-19 : pourquoi la France accuse-t-elle un tel retard ?](doc:2021/03/vaccins_contre_le_covid_19_po) +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|relatedDoc|http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|title|University-industry collaboration in R&D - World Economic Forum +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|bookmarkOf|http://reports.weforum.org/pdf/gci-2017-2018-scorecard/WEF_GCI_2017_2018_Scorecard_EOSQ072.pdf +http://www.semanlink.net/doc/2021/03/university_industry_collaborati|creationTime|2021-03-02T12:13:55Z +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|creationDate|2020-12-17 +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/nlp_task_as_qa_problem +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/event_extraction +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|tag|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|comment|"The event extraction task formulated as a [Question Answering](tag:question_answering)/machine reading comprehension task. + +> Existing work in event argument extraction typically relies heavily on entity recognition as a preprocessing/concurrent step, causing the well-known problem of error propagation. To avoid this issue, we introduce a new paradigm for event extraction by formulating it as a question answering (QA) task that extracts the event arguments in an end-to-end manner + +[GitHub](https://github.com/xinyadu/eeqa) + +Related to [[1902.10909] BERT for Joint Intent Classification and Slot Filling](doc:2020/01/_1902_10909_bert_for_joint_int)" +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|relatedDoc|http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|title|Event Extraction by Answering (Almost) Natural Questions +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|bookmarkOf|https://www.aclweb.org/anthology/2020.emnlp-main.49.pdf +http://www.semanlink.net/doc/2020/12/event_extraction_by_answering_|creationTime|2020-12-17T14:39:56Z +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|creationDate|2020-08-01 +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|tag|http://www.semanlink.net/tag/multilingual_nlp +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|comment|"> Only a few hundred languages +are represented on the web and speakers of minority languages are severely +limited in the information available to them." +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|title|Why You Should Do NLP Beyond English +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|bookmarkOf|https://ruder.io/nlp-beyond-english/ +http://www.semanlink.net/doc/2020/08/why_you_should_do_nlp_beyond_en|creationTime|2020-08-01T18:50:35Z +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|creationDate|2020-12-05 +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|tag|http://www.semanlink.net/tag/procrastination +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|comment|"> « La plupart des politiques ne comprennent pas que, dans ce genre de dynamique, c’est quand on passe de 2 cas à 4 cas qu’il faut réagir » + +L'exponentielle, c'est plus fort que toi" +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|title|> « La gestion du Covid-19 ressemble à celle du réchauffement climatique : même procrastination du pouvoir devant la certitude du désastre » +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|bookmarkOf|https://www.lemonde.fr/idees/article/2020/12/05/la-gestion-du-covid-ressemble-a-celle-du-climat-meme-procrastination-du-pouvoir-devant-la-certitude-du-desastre_6062280_3232.html +http://www.semanlink.net/doc/2020/12/%C2%AB_la_gestion_du_covid_19_ressem|creationTime|2020-12-05T16:51:35Z +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|creationDate|2021-05-23 +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_author|Noah A. Smith +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_author|William Merrill +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_author|Yoav Goldberg +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_author|Roy Schwartz +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|title|[2104.10809] Provable Limitations of Acquiring Meaning from Ungrounded Form: What will Future Language Models Understand? +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|bookmarkOf|https://arxiv.org/abs/2104.10809 +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|creationTime|2021-05-23T01:20:07Z +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_summary|"Language models trained on billions of tokens have recently led to +unprecedented results on many NLP tasks. This success raises the question of +whether, in principle, a system can ever ""understand"" raw text without access +to some form of grounding. We formally investigate the abilities of ungrounded +systems to acquire meaning. Our analysis focuses on the role of ""assertions"": +contexts within raw text that provide indirect clues about underlying +semantics. We study whether assertions enable a system to emulate +representations preserving semantic relations like equivalence. We find that +assertions enable semantic emulation if all expressions in the language are +referentially transparent. However, if the language uses non-transparent +patterns like variable binding, we show that emulation can become an +uncomputable problem. Finally, we discuss differences between our formal model +and natural language, exploring how our results generalize to a modal setting +and other semantic relations. Together, our results suggest that assertions in +code or language do not provide sufficient signal to fully emulate semantic +representations. We formalize ways in which ungrounded language models appear +to be fundamentally limited in their ability to ""understand""." +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_firstAuthor|William Merrill +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_updated|2021-04-22T01:00:17Z +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_title|Provable Limitations of Acquiring Meaning from Ungrounded Form: What will Future Language Models Understand? +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_published|2021-04-22T01:00:17Z +http://www.semanlink.net/doc/2021/05/2104_10809_provable_limitatio|arxiv_num|2104.10809 +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|creationDate|2020-11-24 +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|tag|http://www.semanlink.net/tag/scientific_information_extraction +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|comment|[Semantic Scholar](doc:2020/11/semantic_scholar_%7C_ai_powered_r) AI-Powered Research Tool +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|relatedDoc|http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|title|Semantic Scholar TLDR Feature +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|bookmarkOf|https://tldr.semanticscholar.org/ +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature|creationTime|2020-11-24T09:32:38Z +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|creationDate|2020-10-03 +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_author|Sami Abu-El-Haija +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_author|Ines Chami +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_author|Bryan Perozzi +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_author|Christopher Ré +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_author|Kevin Murphy +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|comment|> we aim to **bridge the gap between graph neural networks, network embedding and graph regularization models**. We propose a comprehensive taxonomy of representation learning methods for graph-structured data, aiming to unify several disparate bodies of work. Specifically, we propose a Graph Encoder Decoder Model (GRAPHEDM), which generalizes popular algorithms for semi-supervised learning on graphs (e.g. GraphSage, Graph Convolutional Networks, Graph Attention Networks), and unsupervised learning of graph representations (e.g. DeepWalk, node2vec, etc) into a single consistent approach. +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|title|[2005.03675] Machine Learning on Graphs: A Model and Comprehensive Taxonomy +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|bookmarkOf|https://arxiv.org/abs/2005.03675 +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|creationTime|2020-10-03T15:14:22Z +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_summary|"There has been a surge of recent interest in learning representations for +graph-structured data. Graph representation learning methods have generally +fallen into three main categories, based on the availability of labeled data. +The first, network embedding (such as shallow graph embedding or graph +auto-encoders), focuses on learning unsupervised representations of relational +structure. The second, graph regularized neural networks, leverages graphs to +augment neural network losses with a regularization objective for +semi-supervised learning. The third, graph neural networks, aims to learn +differentiable functions over discrete topologies with arbitrary structure. +However, despite the popularity of these areas there has been surprisingly +little work on unifying the three paradigms. Here, we aim to bridge the gap +between graph neural networks, network embedding and graph regularization +models. We propose a comprehensive taxonomy of representation learning methods +for graph-structured data, aiming to unify several disparate bodies of work. +Specifically, we propose a Graph Encoder Decoder Model (GRAPHEDM), which +generalizes popular algorithms for semi-supervised learning on graphs (e.g. +GraphSage, Graph Convolutional Networks, Graph Attention Networks), and +unsupervised learning of graph representations (e.g. DeepWalk, node2vec, etc) +into a single consistent approach. To illustrate the generality of this +approach, we fit over thirty existing methods into this framework. We believe +that this unifying view both provides a solid foundation for understanding the +intuition behind these methods, and enables future research in the area." +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_firstAuthor|Ines Chami +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_updated|2020-05-07T18:00:02Z +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_title|Machine Learning on Graphs: A Model and Comprehensive Taxonomy +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_published|2020-05-07T18:00:02Z +http://www.semanlink.net/doc/2020/10/2005_03675_machine_learning_o|arxiv_num|2005.03675 +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|creationDate|2021-06-25 +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|tag|http://www.semanlink.net/tag/denisovan +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|comment|L'analyse génétique d'une phalange et d'une dent de plus de 50 000 ans complique notre généalogie. +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|title|La fillette de Denisova, mère d'une autre humanité (2010) +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|bookmarkOf|https://www.lemonde.fr/planete/article/2010/12/24/la-fillette-de-denisova-mere-d-une-autre-humanite_1457433_3244.html +http://www.semanlink.net/doc/2021/06/la_fillette_de_denisova_mere_d|creationTime|2021-06-25T20:02:22Z +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|creationDate|2020-08-01 +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|tag|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|tag|http://www.semanlink.net/tag/niger +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|tag|http://www.semanlink.net/tag/france_culture +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|tag|http://www.semanlink.net/tag/fleuve_niger +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|title|"Niger : le ""Grand fleuve"" du Sahel - France Culture - Ép. 3/5 - Chansons d'eau douce" +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|bookmarkOf|https://www.franceculture.fr/emissions/la-serie-musicale-dete/chansons-deau-douce-35-niger-le-grand-fleuve-du-sahel +http://www.semanlink.net/doc/2020/08/niger_le_grand_fleuve_du_sa|creationTime|2020-08-01T21:55:42Z +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|creationDate|2021-10-14 +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|title|MasakhaNER: Named Entity Recognition for African Languages MIT Press +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|bookmarkOf|https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00416/107614/MasakhaNER-Named-Entity-Recognition-for-African +http://www.semanlink.net/doc/2021/10/masakhaner_named_entity_recogn|creationTime|2021-10-14T16:41:22Z +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|creationDate|2021-10-03 +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|tag|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|tag|http://www.semanlink.net/tag/economie +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|tag|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|comment|La faiblesse des rendements des actifs traditionnels pousse les investisseurs à choisir des actifs spéculatifs, au détriment de l’économie productive +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|title|Patrick Artus : « L’économie de spéculation est inefficace » +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|bookmarkOf|https://www.lemonde.fr/idees/article/2021/10/02/patrick-artus-l-economie-de-speculation-est-inefficace_6096824_3232.html +http://www.semanlink.net/doc/2021/10/patrick_artus_%C2%AB_l%E2%80%99economie_de|creationTime|2021-10-03T11:31:40Z +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|creationDate|2021-04-10 +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|tag|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|tag|http://www.semanlink.net/tag/lava_jato +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|title|Au Brésil, le naufrage de l’opération anticorruption « Lava Jato » +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|bookmarkOf|https://www.lemonde.fr/international/article/2021/04/09/au-bresil-une-operation-anticorruption-aux-methodes-contestables_6076204_3210.html +http://www.semanlink.net/doc/2021/04/au_bresil_le_naufrage_de_l%E2%80%99ope|creationTime|2021-04-10T23:32:04Z +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|tag|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|title|AfricaNLP Workshop Putting Africa on the NLP Map. ICLR 2020, Virtual Event +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|bookmarkOf|https://africanlp-workshop.github.io/ +http://www.semanlink.net/doc/2021/06/africanlp_workshop_%7C_putting_af|creationTime|2021-06-30T00:44:07Z +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|creationDate|2021-01-23 +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|tag|http://www.semanlink.net/tag/stochastic_parrots +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|comment|"About [""On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?""](doc:2021/01/emily_m_bender_sur_twitter_) + +> model size is not directly linked to computation efficiency + +> do we want our models to reflect the data as it is, or the world as we believe it should be? +> +> If we take language models as models of human language, do we want the model to be aware of slurs? The paper very clearly +argues that ""no it definitely should not"". But one could easily argue that, yes, we certainly do want the model to be aware of +slurs. Slurs are part of language. + +[Tweet](https://twitter.com/yoavgo/status/1353004612140363780)" +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|relatedDoc|http://www.semanlink.net/doc/2021/01/emily_m_bender_sur_twitter_ +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|title|A criticism of Stochastic Parrots +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|bookmarkOf|https://gist.github.com/yoavg/9fc9be2f98b47c189a513573d902fb27 +http://www.semanlink.net/doc/2021/01/a_criticism_of_stochastic_parro|creationTime|2021-01-23T16:51:11Z +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|creationDate|2020-08-11 +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|tag|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|tag|http://www.semanlink.net/tag/niger +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|tag|http://www.semanlink.net/tag/koure +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|comment|"> Abdou Kadri, qui présidait l’Association +des guides de girafes de Kouré et accompagnait les humanitaires d’Acted, est l’autre +Nigérien à avoir péri dimanche. + +Que la terre lui soit légère" +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|title|« Les fous d’Allah nous les ont arrachés » : le Niger sous le choc après la mort des humanitaires +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/08/11/les-fous-d-allah-nous-les-ont-arraches-le-niger-sous-le-choc-apres-l-assassinat-des-humanitaires_6048681_3212.html +http://www.semanlink.net/doc/2020/08/%C2%AB_les_fous_d%E2%80%99allah_nous_les_ont|creationTime|2020-08-11T14:19:26Z +http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face|creationDate|2021-03-18 +http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face|title|Renault group at Hugging Face +http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face|bookmarkOf|https://huggingface.co/Renault +http://www.semanlink.net/doc/2021/03/renault_group_at_hugging_face|creationTime|2021-03-18T14:27:39Z +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|creationDate|2021-01-12 +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|tag|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_author|Nina Poerner +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_author|Hinrich Schütze +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_author|Ulli Waltinger +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|comment|"> way of **injecting factual knowledge about entities into the pretrained BERT model**. + +(Feeding entity vectors +into BERT as if they +were wordpiece vectors without additional encoder +pretrainin) + +> +> **We align [Wikipedia2Vec](tag:wikipedia2vec) entity vectors (Yamada et al., 2016) with BERT's native wordpiece vector space and use the aligned entity vectors as if they were wordpiece vectors**. The resulting entity-enhanced version of BERT (called E-BERT) is similar in spirit to [ERNIE](tag:ernie) (Zhang et al., 2019) and [KnowBert](tag:knowbert) (Peters et al., 2019), but it **requires no expensive further pretraining of the BERT encoder**. +> +> Our vector space alignment strategy is inspired by +cross-lingual word vector alignment + +Related work on Entity-enhanced BERT: + +> (ERNIE and KnowBert) are based on the design principle +that BERT be adapted to entity vectors. They introduce +new encoder layers to feed pretrained entity +vectors into the Transformer, and they require additional +pretraining to integrate the new parameters. +In contrast, E-BERT’s design principle is that entity +vectors be adapted to BERT. +> +> Two other knowledge-enhanced MLMs are [[1911.06136] KEPLER](doc:2020/11/1911_06136_kepler_a_unified_) +(Wang et al., 2019c) and K-Adapter (Wang +et al., 2020)... Their factual knowledge +does not stem from entity vectors – instead, they +are trained in a multi-task setting on relation classification +and knowledge base completion. + +Not to be cofounded with [[2009.02835] E-BERT: A Phrase and Product Knowledge Enhanced Language Model for E-commerce](doc:2020/12/2009_02835_e_bert_a_phrase_a)" +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|relatedDoc|http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_ +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|relatedDoc|http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|title|[1911.03681] E-BERT: Efficient-Yet-Effective Entity Embeddings for BERT +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|bookmarkOf|https://arxiv.org/abs/1911.03681 +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|creationTime|2021-01-12T18:31:21Z +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_summary|"We present a novel way of injecting factual knowledge about entities into the +pretrained BERT model (Devlin et al., 2019): We align Wikipedia2Vec entity +vectors (Yamada et al., 2016) with BERT's native wordpiece vector space and use +the aligned entity vectors as if they were wordpiece vectors. The resulting +entity-enhanced version of BERT (called E-BERT) is similar in spirit to ERNIE +(Zhang et al., 2019) and KnowBert (Peters et al., 2019), but it requires no +expensive further pretraining of the BERT encoder. We evaluate E-BERT on +unsupervised question answering (QA), supervised relation classification (RC) +and entity linking (EL). On all three tasks, E-BERT outperforms BERT and other +baselines. We also show quantitatively that the original BERT model is overly +reliant on the surface form of entity names (e.g., guessing that someone with +an Italian-sounding name speaks Italian), and that E-BERT mitigates this +problem." +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_firstAuthor|Nina Poerner +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_updated|2020-05-01T09:19:35Z +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_title|E-BERT: Efficient-Yet-Effective Entity Embeddings for BERT +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_published|2019-11-09T13:08:25Z +http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_|arxiv_num|1911.03681 +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|creationDate|2021-08-09 +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|tag|http://www.semanlink.net/tag/age_du_fer +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|tag|http://www.semanlink.net/tag/crochemelier +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|title|L’âge du Fer en Basse-Normandie. Gestes funéraires en Gaule au Second-Âge du Fer. Volumes I et II - Les éperons barrés et petites enceintes au Bronze final et au Premier Âge du fer en Basse-Normandie - Presses universitaires de Franche-Comté +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|bookmarkOf|https://books.openedition.org/pufc/6382 +http://www.semanlink.net/doc/2021/08/l%E2%80%99age_du_fer_en_basse_normandie|creationTime|2021-08-09T22:49:25Z +http://www.semanlink.net/doc/2020/12/xkcd_git|creationDate|2020-12-19 +http://www.semanlink.net/doc/2020/12/xkcd_git|tag|http://www.semanlink.net/tag/git +http://www.semanlink.net/doc/2020/12/xkcd_git|tag|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/doc/2020/12/xkcd_git|title|xkcd: Git +http://www.semanlink.net/doc/2020/12/xkcd_git|bookmarkOf|https://xkcd.com/1597/ +http://www.semanlink.net/doc/2020/12/xkcd_git|creationTime|2020-12-19T11:29:40Z +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|creationDate|2021-10-21 +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_author|Li Dong +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_author|Yunzhi Yao +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_author|Wenhui Wang +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_author|Shaohan Huang +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_author|Furu Wei +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|comment|"> adapting the off-the- +shelf general pretrained models and performing +task-agnostic knowledge distillation +in target domains + +> Our findings suggest that +domain-specific vocabulary and general-domain +language model play vital roles in domain adaptation +of a pretrained model" +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|title|[2106.13474] Adapt-and-Distill: Developing Small, Fast and Effective Pretrained Language Models for Domains +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|bookmarkOf|https://arxiv.org/abs/2106.13474 +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|creationTime|2021-10-21T18:24:46Z +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_summary|"Large pre-trained models have achieved great success in many natural language +processing tasks. However, when they are applied in specific domains, these +models suffer from domain shift and bring challenges in fine-tuning and online +serving for latency and capacity constraints. In this paper, we present a +general approach to developing small, fast and effective pre-trained models for +specific domains. This is achieved by adapting the off-the-shelf general +pre-trained models and performing task-agnostic knowledge distillation in +target domains. Specifically, we propose domain-specific vocabulary expansion +in the adaptation stage and employ corpus level occurrence probability to +choose the size of incremental vocabulary automatically. Then we systematically +explore different strategies to compress the large pre-trained models for +specific domains. We conduct our experiments in the biomedical and computer +science domain. The experimental results demonstrate that our approach achieves +better performance over the BERT BASE model in domain-specific tasks while 3.3x +smaller and 5.1x faster than BERT BASE. The code and pre-trained models are +available at https://aka.ms/adalm." +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_firstAuthor|Yunzhi Yao +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_updated|2021-06-29T05:42:13Z +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_title|Adapt-and-Distill: Developing Small, Fast and Effective Pretrained Language Models for Domains +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_published|2021-06-25T07:37:05Z +http://www.semanlink.net/doc/2021/10/2106_13474_adapt_and_distill_|arxiv_num|2106.13474 +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|creationDate|2020-08-11 +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|tag|http://www.semanlink.net/tag/random_walk +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|tag|http://www.semanlink.net/tag/k_nearest_neighbors_algorithm +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|tag|http://www.semanlink.net/tag/knn_in_mlc +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|title|A novel multi-label classification algorithm based on K-nearest neighbor and random walk - Zhen-Wu Wang, Si-Kai Wang, Ben-Ting Wan, William Wei Song, 2020 +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|bookmarkOf|https://journals.sagepub.com/doi/full/10.1177/1550147720911892 +http://www.semanlink.net/doc/2020/08/a_novel_multi_label_classificat|creationTime|2020-08-11T00:54:22Z +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|creationDate|2021-03-19 +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|tag|http://www.semanlink.net/tag/backpropagation_vs_biology +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|title|Equilibrium Propagation: Bridging the Gap between Energy-Based Models and Backpropagation Frontiers in Computational Neuroscience +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|bookmarkOf|https://www.frontiersin.org/articles/10.3389/fncom.2017.00024/full +http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi|creationTime|2021-03-19T13:32:54Z +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|creationDate|2021-03-02 +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|tag|http://www.semanlink.net/tag/japon +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|title|Le Dit du Genji — Wikipédia +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|bookmarkOf|https://fr.wikipedia.org/wiki/Le_Dit_du_Genji +http://www.semanlink.net/doc/2021/03/le_dit_du_genji_wikipedia|creationTime|2021-03-02T23:17:24Z +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|creationDate|2021-02-20 +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|tag|http://www.semanlink.net/tag/caf +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|tag|http://www.semanlink.net/tag/enfer_administratif +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|tag|http://www.semanlink.net/tag/france_fiasco_administratif +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|tag|http://www.semanlink.net/tag/rsa +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|comment|Une seule solution : le [Revenu Universel Sans Condition de Ressource](tag:guaranteed_basic_income) +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|title|« L’entretien avec l’agent de la CAF a été une humiliation » : les bénéficiaires du RSA dans l’enfer des contrôles +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|bookmarkOf|https://www.lemonde.fr/societe/article/2021/02/20/l-entretien-avec-l-agent-de-la-caf-a-ete-une-humiliation-les-beneficiaires-du-rsa-dans-l-enfer-des-controles_6070648_3224.html +http://www.semanlink.net/doc/2021/02/%C2%AB_l%E2%80%99entretien_avec_l%E2%80%99agent_de_l|creationTime|2021-02-20T12:43:49Z +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|creationDate|2021-07-31 +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|tag|http://www.semanlink.net/tag/rip +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|tag|http://www.semanlink.net/tag/kassav +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|title|Jacob Desvarieux, leader du groupe antillais Kassav’, est mort +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|bookmarkOf|https://www.lemonde.fr/disparitions/article/2021/07/31/jacob-desvarieux-leader-du-groupe-antillais-kassav-est-mort_6090102_3382.html +http://www.semanlink.net/doc/2021/07/jacob_desvarieux_leader_du_gro|creationTime|2021-07-31T08:04:50Z +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|creationDate|2021-01-30 +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|tag|http://www.semanlink.net/tag/francois_scharffe +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|tag|http://www.semanlink.net/tag/knowledge_graph_construction +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|title|The complexity to construct knowledge graphs and how low code tools can help or hurt you Medium +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|bookmarkOf|https://medium.com/kgbase-blog/the-complexity-to-construct-knowledge-graphs-and-how-low-code-tools-can-help-or-hurt-ae7418b2c0ec +http://www.semanlink.net/doc/2021/01/the_complexity_to_construct_kno|creationTime|2021-01-30T13:22:27Z +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|creationDate|2021-06-17 +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/paleoclimatologie +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/peter_breunig +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/age_du_fer +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|tag|http://www.semanlink.net/tag/nok +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|comment|"Des fouilles menées au Nigeria ont révélé l'existence de sociétés complexes constituées d'agriculteurs, d'artisans et de commerçants, qui se sont développées en Afrique de l'Ouest un millénaire avant notre ère. + +Author: Peter Breunig" +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|title|Premières sociétés structurées au Nigéria (Pour la Science 2012) +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|bookmarkOf|https://www.pourlascience.fr/sd/prehistoire/premieres-societes-structurees-6870.php +http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_|creationTime|2021-06-17T09:29:44Z +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|creationDate|2021-09-03 +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|tag|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|title|A Gentle Introduction to Graph Neural Networks +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|bookmarkOf|https://distill.pub/2021/gnn-intro/ +http://www.semanlink.net/doc/2021/09/a_gentle_introduction_to_graph_|creationTime|2021-09-03T01:09:03Z +http://www.semanlink.net/doc/2021/01/die_toten_hosen|creationDate|2021-01-30 +http://www.semanlink.net/doc/2021/01/die_toten_hosen|tag|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/doc/2021/01/die_toten_hosen|tag|http://www.semanlink.net/tag/punk +http://www.semanlink.net/doc/2021/01/die_toten_hosen|title|Die Toten Hosen +http://www.semanlink.net/doc/2021/01/die_toten_hosen|bookmarkOf|https://en.wikipedia.org/wiki/Die_Toten_Hosen +http://www.semanlink.net/doc/2021/01/die_toten_hosen|creationTime|2021-01-30T01:21:12Z +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|creationDate|2020-12-06 +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|comment|"A minimal method for extracting keywords and keyphrases. + +[GitHub](https://github.com/MaartenGr/KeyBERT/) + +> uses BERT-embeddings and simple cosine similarity to find the sub-phrases in a document that are the most similar to the document itself." +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|title|Keyword Extraction with BERT Towards Data Science +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|bookmarkOf|https://towardsdatascience.com/keyword-extraction-with-bert-724efca412ea +http://www.semanlink.net/doc/2020/12/keyword_extraction_with_bert_%7C_|creationTime|2020-12-06T10:07:17Z +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|creationDate|2021-07-10 +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|tag|http://www.semanlink.net/tag/web_search +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|tag|http://www.semanlink.net/tag/colbert +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|title|A Moderate Proposal for Radically Better AI-powered Web Search +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|bookmarkOf|https://hai.stanford.edu/news/moderate-proposal-radically-better-ai-powered-web-search?utm_source=twitter&utm_medium=social&utm_content=Stanford%20HAI_twitter_StanfordHAI_202107091202_sf147340637&utm_campaign=&sf147340637=1 +http://www.semanlink.net/doc/2021/07/a_moderate_proposal_for_radical|creationTime|2021-07-10T09:10:20Z +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|creationDate|2020-10-15 +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|tag|http://www.semanlink.net/tag/macron +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|title|Will a Half-Step by Macron Be Enough to Blunt France’s Second Wave? - The New York Times +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|bookmarkOf|https://www.nytimes.com/2020/10/15/world/europe/france-coronavirus-second-wave.html?action=click&module=Top%20Stories&pgtype=Homepage +http://www.semanlink.net/doc/2020/10/will_a_half_step_by_macron_be_e|creationTime|2020-10-15T23:44:16Z +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|creationDate|2020-09-14 +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|tag|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|comment|"> Models that learn to represent textual and +knowledge base relations in the same continuous +latent space are able to perform +joint inferences among the two kinds of relations +> +> A model that captures +the compositional structure of textual +relations, and jointly optimizes entity, +knowledge base, and textual relation representations. +> +> In this paper we build upon the work of Riedel +et al. (2013), which jointly learns continuous representations +for knowledge base and textual relations. + +[similar subject](doc:?uri=http%3A%2F%2Femnlp2014.org%2Fpapers%2Fpdf%2FEMNLP2014167.pdf), also by Microsoft" +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|relatedDoc|http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|title|Representing Text for Joint Embedding of Text and Knowledge Bases (EMNLP 2015) +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|bookmarkOf|https://www.aclweb.org/anthology/D15-1174/ +http://www.semanlink.net/doc/2020/09/representing_text_for_joint_emb|creationTime|2020-09-14T23:04:28Z +http://www.semanlink.net/doc/2021/03/wikidata_browser|creationDate|2021-03-06 +http://www.semanlink.net/doc/2021/03/wikidata_browser|tag|http://www.semanlink.net/tag/wikidata_browser +http://www.semanlink.net/doc/2021/03/wikidata_browser|comment|[GitHub](doc:2021/03/ringgaard_sling_sling_a_natu) +http://www.semanlink.net/doc/2021/03/wikidata_browser|relatedDoc|http://www.semanlink.net/doc/2021/03/ringgaard_sling_sling_a_natu +http://www.semanlink.net/doc/2021/03/wikidata_browser|title|Wikidata browser +http://www.semanlink.net/doc/2021/03/wikidata_browser|bookmarkOf|https://ringgaard.com/kb/Q466068 +http://www.semanlink.net/doc/2021/03/wikidata_browser|creationTime|2021-03-06T16:15:21Z +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|creationDate|2021-07-31 +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|tag|http://www.semanlink.net/tag/rio_tinto +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|tag|http://www.semanlink.net/tag/aborigenes +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|tag|http://www.semanlink.net/tag/industrie_miniere +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|title|Rio Tinto blasting of 46,000-year-old Aboriginal sites compared to Islamic State's destruction in Palmyra - ABC News (May 2020) +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|bookmarkOf|https://www.abc.net.au/news/2020-05-29/ken-wyatt-says-traditional-owners-tried-to-stop-rio-tinto-blast/12299944 +http://www.semanlink.net/doc/2021/07/rio_tinto_blasting_of_46_000_ye|creationTime|2021-07-31T18:34:37Z +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|creationDate|2020-09-08 +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|tag|http://www.semanlink.net/tag/csv +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|tag|http://www.semanlink.net/tag/stackoverflow_q +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|tag|http://www.semanlink.net/tag/excel +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|comment|> Since I added \uFEFF at the beginning of my CSV file (generated in Java), Excel is able to open them correctly! +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|title|Is it possible to force Excel recognize UTF-8 CSV files automatically? - Stack Overflow +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|bookmarkOf|https://stackoverflow.com/questions/6002256/is-it-possible-to-force-excel-recognize-utf-8-csv-files-automatically +http://www.semanlink.net/doc/2020/09/is_it_possible_to_force_excel_r|creationTime|2020-09-08T13:48:34Z +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|creationDate|2021-01-01 +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|tag|http://www.semanlink.net/tag/energies_renouvelables +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|tag|http://www.semanlink.net/tag/compagnies_petrolieres +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|title|Les champions des énergies renouvelables rivalisent désormais avec les majors du pétrole et du gaz +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|bookmarkOf|https://www.lemonde.fr/economie/article/2020/12/31/les-nouveaux-geants-de-l-energie-misent-sur-l-eolien-et-le-solaire_6064870_3234.html +http://www.semanlink.net/doc/2021/01/les_champions_des_energies_reno|creationTime|2021-01-01T02:14:54Z +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|creationDate|2020-08-21 +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|tag|http://www.semanlink.net/tag/paleoanthropology_genetics +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|tag|http://www.semanlink.net/tag/cameroun +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|title|L’ADN d’un peuple inconnu millénaire découvert au Cameroun - Geo.fr +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|bookmarkOf|https://www.geo.fr/histoire/ladn-dun-peuple-inconnu-millenaire-decouvert-au-cameroun-199738 +http://www.semanlink.net/doc/2020/08/l%E2%80%99adn_d%E2%80%99un_peuple_inconnu_mille|creationTime|2020-08-21T17:04:22Z +http://www.semanlink.net/doc/2021/01/2012_15723|creationDate|2021-01-02 +http://www.semanlink.net/doc/2021/01/2012_15723|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/01/2012_15723|tag|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/doc/2021/01/2012_15723|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_author|Adam Fisch +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_author|Tianyu Gao +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_author|Danqi Chen +http://www.semanlink.net/doc/2021/01/2012_15723|comment|[Tweet](https://twitter.com/adamjfisch/status/1345185238276861953) +http://www.semanlink.net/doc/2021/01/2012_15723|title|[2012.15723] Making Pre-trained Language Models Better Few-shot Learners +http://www.semanlink.net/doc/2021/01/2012_15723|bookmarkOf|https://arxiv.org/abs/2012.15723 +http://www.semanlink.net/doc/2021/01/2012_15723|creationTime|2021-01-02T22:42:12Z +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_summary|"The recent GPT-3 model (Brown et al., 2020) achieves remarkable few-shot +performance solely by leveraging a natural-language prompt and a few task +demonstrations as input context. Inspired by their findings, we study few-shot +learning in a more practical scenario, where we use smaller language models for +which fine-tuning is computationally efficient. We present LM-BFF--better +few-shot fine-tuning of language models--a suite of simple and complementary +techniques for fine-tuning language models on a small number of annotated +examples. Our approach includes (1) prompt-based fine-tuning together with a +novel pipeline for automating prompt generation; and (2) a refined strategy for +dynamically and selectively incorporating demonstrations into each context. +Finally, we present a systematic evaluation for analyzing few-shot performance +on a range of NLP tasks, including classification and regression. Our +experiments demonstrate that our methods combine to dramatically outperform +standard fine-tuning procedures in this low resource setting, achieving up to +30% absolute improvement, and 11% on average across all tasks. Our approach +makes minimal assumptions on task resources and domain expertise, and hence +constitutes a strong task-agnostic method for few-shot learning." +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_firstAuthor|Tianyu Gao +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_updated|2020-12-31T17:21:26Z +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_title|Making Pre-trained Language Models Better Few-shot Learners +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_published|2020-12-31T17:21:26Z +http://www.semanlink.net/doc/2021/01/2012_15723|arxiv_num|2012.15723 +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|creationDate|2020-12-14 +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|tag|http://www.semanlink.net/tag/domain_specific_bert +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|tag|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|tag|http://www.semanlink.net/tag/e_commerce_data +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|tag|http://www.semanlink.net/tag/aspect_detection +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Zixuan Yuan +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Pengyang Wang +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Yanchi Liu +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Hui Xiong +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Denghui Zhang +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Haifeng Chen +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Zuohui Fu +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_author|Fuzhen Zhuang +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|comment|"E-BERT, pre-training framework for product data. + +1. to benefit from phrase-level knowledge: Adaptive Hybrid Masking, a new masking strategy, which allows the model to adaptively switch from learning preliminary word knowledge to learning complex phrases +2. leveraging product-level knowledge: training E-BERT to +predict a product’s associated neighbors (product association) + +Resources used: + +- description of millions of products from the amazon dataset (title, description, reviews) +- e-commerce phrases: extracted from above dataset using [AutoPhrase](doc:2020/12/autophrase_automated_phrase_mi) +- product association graph: pairs of substitutable and complementary products extracted from amazon dataset + +Not to be confounded with [[1911.03681] E-BERT: Efficient-Yet-Effective Entity Embeddings for BERT](doc:2021/01/1911_03681_e_bert_efficient_)" +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|relatedDoc|http://www.semanlink.net/doc/2021/01/1911_03681_e_bert_efficient_ +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|relatedDoc|http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|title|[2009.02835] E-BERT: A Phrase and Product Knowledge Enhanced Language Model for E-commerce +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|bookmarkOf|https://arxiv.org/abs/2009.02835 +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|creationTime|2020-12-14T11:10:29Z +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_summary|"Pre-trained language models such as BERT have achieved great success in a +broad range of natural language processing tasks. However, BERT cannot well +support E-commerce related tasks due to the lack of two levels of domain +knowledge, i.e., phrase-level and product-level. On one hand, many E-commerce +tasks require an accurate understanding of domain phrases, whereas such +fine-grained phrase-level knowledge is not explicitly modeled by BERT's +training objective. On the other hand, product-level knowledge like product +associations can enhance the language modeling of E-commerce, but they are not +factual knowledge thus using them indiscriminately may introduce noise. To +tackle the problem, we propose a unified pre-training framework, namely, +E-BERT. Specifically, to preserve phrase-level knowledge, we introduce Adaptive +Hybrid Masking, which allows the model to adaptively switch from learning +preliminary word knowledge to learning complex phrases, based on the fitting +progress of two modes. To utilize product-level knowledge, we introduce +Neighbor Product Reconstruction, which trains E-BERT to predict a product's +associated neighbors with a denoising cross attention layer. Our investigation +reveals promising results in four downstream tasks, i.e., review-based question +answering, aspect extraction, aspect sentiment classification, and product +classification." +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_firstAuthor|Denghui Zhang +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_updated|2020-09-10T23:00:16Z +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_title|E-BERT: A Phrase and Product Knowledge Enhanced Language Model for E-commerce +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_published|2020-09-07T00:15:36Z +http://www.semanlink.net/doc/2020/12/2009_02835_e_bert_a_phrase_a|arxiv_num|2009.02835 +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|creationDate|2021-07-29 +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|tag|http://www.semanlink.net/tag/multilingual_embeddings +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Francisco Guzmán +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Edouard Grave +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Guillaume Wenzek +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Naman Goyal +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Alexis Conneau +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Vishrav Chaudhary +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Kartikay Khandelwal +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Veselin Stoyanov +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Myle Ott +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|comment|Data: [CC-100: Monolingual Datasets from Web Crawl Data](doc:2021/07/cc_100_monolingual_datasets_fr) +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|relatedDoc|http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|title|[1911.02116] Unsupervised Cross-lingual Representation Learning at Scale +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|bookmarkOf|https://arxiv.org/abs/1911.02116 +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|creationTime|2021-07-29T00:16:13Z +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_summary|"This paper shows that pretraining multilingual language models at scale leads +to significant performance gains for a wide range of cross-lingual transfer +tasks. We train a Transformer-based masked language model on one hundred +languages, using more than two terabytes of filtered CommonCrawl data. Our +model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a +variety of cross-lingual benchmarks, including +14.6% average accuracy on XNLI, ++13% average F1 score on MLQA, and +2.4% F1 score on NER. XLM-R performs +particularly well on low-resource languages, improving 15.7% in XNLI accuracy +for Swahili and 11.4% for Urdu over previous XLM models. We also present a +detailed empirical analysis of the key factors that are required to achieve +these gains, including the trade-offs between (1) positive transfer and +capacity dilution and (2) the performance of high and low resource languages at +scale. Finally, we show, for the first time, the possibility of multilingual +modeling without sacrificing per-language performance; XLM-R is very +competitive with strong monolingual models on the GLUE and XNLI benchmarks. We +will make our code, data and models publicly available." +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_firstAuthor|Alexis Conneau +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_updated|2020-04-08T01:02:17Z +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_title|Unsupervised Cross-lingual Representation Learning at Scale +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_published|2019-11-05T22:42:00Z +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|arxiv_num|1911.02116 +http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross|bookmarOf|https://aclanthology.org/2020.acl-main.747.pdf +http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s|creationDate|2021-09-30 +http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s|tag|http://www.semanlink.net/tag/txtai +http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s|title|neuml/txtai: Build AI-powered semantic search applications +http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s|bookmarkOf|https://github.com/neuml/txtai +http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s|creationTime|2021-09-30T14:39:57Z +http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af|creationDate|2021-08-26 +http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af|title|Masakhane: Using AI to Bring African Languages Into the Global Conversation +http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af|bookmarkOf|https://en.reset.org/blog/masakhane-using-ai-bring-african-languages-global-conversation-02062020 +http://www.semanlink.net/doc/2021/08/masakhane_using_ai_to_bring_af|creationTime|2021-08-26T15:07:12Z +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|creationDate|2021-07-09 +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|tag|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_author|Andrew Yates +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_author|Rodrigo Nogueira +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_author|Jimmy Lin +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|comment|a 155 pages paper! +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|title|[2010.06467] Pretrained Transformers for Text Ranking: BERT and Beyond +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|bookmarkOf|https://arxiv.org/abs/2010.06467 +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|creationTime|2021-07-09T14:50:44Z +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_summary|"The goal of text ranking is to generate an ordered list of texts retrieved +from a corpus in response to a query. Although the most common formulation of +text ranking is search, instances of the task can also be found in many natural +language processing applications. This survey provides an overview of text +ranking with neural network architectures known as transformers, of which BERT +is the best-known example. The combination of transformers and self-supervised +pretraining has, without exaggeration, revolutionized the fields of natural +language processing (NLP), information retrieval (IR), and beyond. In this +survey, we provide a synthesis of existing work as a single point of entry for +practitioners who wish to gain a better understanding of how to apply +transformers to text ranking problems and researchers who wish to pursue work +in this area. We cover a wide range of modern techniques, grouped into two +high-level categories: transformer models that perform reranking in multi-stage +ranking architectures and learned dense representations that attempt to perform +ranking directly. There are two themes that pervade our survey: techniques for +handling long documents, beyond the typical sentence-by-sentence processing +approaches used in NLP, and techniques for addressing the tradeoff between +effectiveness (result quality) and efficiency (query latency). Although +transformer architectures and pretraining techniques are recent innovations, +many aspects of how they are applied to text ranking are relatively well +understood and represent mature techniques. However, there remain many open +research questions, and thus in addition to laying out the foundations of +pretrained transformers for text ranking, this survey also attempts to +prognosticate where the field is heading." +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_firstAuthor|Jimmy Lin +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_updated|2020-10-13T15:20:32Z +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_title|Pretrained Transformers for Text Ranking: BERT and Beyond +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_published|2020-10-13T15:20:32Z +http://www.semanlink.net/doc/2021/07/2010_06467_pretrained_transfo|arxiv_num|2010.06467 +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|creationDate|2021-05-25 +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|tag|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|title|An Introduction to Knowledge Graphs SAIL Blog +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|bookmarkOf|http://ai.stanford.edu/blog/introduction-to-knowledge-graphs/ +http://www.semanlink.net/doc/2021/05/an_introduction_to_knowledge_gr|creationTime|2021-05-25T23:56:44Z +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|creationDate|2020-08-08 +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|tag|http://www.semanlink.net/tag/jure_leskovec +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|tag|http://www.semanlink.net/tag/node2vec +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_author|Jure Leskovec +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_author|Aditya Grover +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|comment|> algorithmic framework for learning continuous feature representations for nodes in networks. In node2vec, we learn a mapping of nodes to a low-dimensional space of features that maximizes the likelihood of preserving network neighborhoods of nodes. We define a flexible notion of a node's network neighborhood and design a biased random walk procedure, which efficiently explores diverse neighborhoods. Our algorithm generalizes prior work which is based on rigid notions of network neighborhoods, and we argue that the added flexibility in exploring neighborhoods is the key to learning richer representations. +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|title|[1607.00653] node2vec: Scalable Feature Learning for Networks +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|bookmarkOf|https://arxiv.org/abs/1607.00653 +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|creationTime|2020-08-08T15:57:03Z +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_summary|"Prediction tasks over nodes and edges in networks require careful effort in +engineering features used by learning algorithms. Recent research in the +broader field of representation learning has led to significant progress in +automating prediction by learning the features themselves. However, present +feature learning approaches are not expressive enough to capture the diversity +of connectivity patterns observed in networks. Here we propose node2vec, an +algorithmic framework for learning continuous feature representations for nodes +in networks. In node2vec, we learn a mapping of nodes to a low-dimensional +space of features that maximizes the likelihood of preserving network +neighborhoods of nodes. We define a flexible notion of a node's network +neighborhood and design a biased random walk procedure, which efficiently +explores diverse neighborhoods. Our algorithm generalizes prior work which is +based on rigid notions of network neighborhoods, and we argue that the added +flexibility in exploring neighborhoods is the key to learning richer +representations. We demonstrate the efficacy of node2vec over existing +state-of-the-art techniques on multi-label classification and link prediction +in several real-world networks from diverse domains. Taken together, our work +represents a new way for efficiently learning state-of-the-art task-independent +representations in complex networks." +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_firstAuthor|Aditya Grover +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_updated|2016-07-03T16:09:30Z +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_title|node2vec: Scalable Feature Learning for Networks +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_published|2016-07-03T16:09:30Z +http://www.semanlink.net/doc/2020/08/1607_00653_node2vec_scalable|arxiv_num|1607.00653 +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|creationDate|2020-11-27 +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|tag|http://www.semanlink.net/tag/markdown +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|tag|http://www.semanlink.net/tag/regex +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|title|Match Markdown links with advanced regex features by Michaël Perrin +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|bookmarkOf|http://blog.michaelperrin.fr/2019/02/04/advanced-regular-expressions/ +http://www.semanlink.net/doc/2020/11/match_markdown_links_with_advan|creationTime|2020-11-27T12:28:32Z +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|creationDate|2021-10-13 +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|tag|http://www.semanlink.net/tag/gpt_3 +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|tag|http://www.semanlink.net/tag/language_models_size +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|title|"Omer Levy sur Twitter : ""What if I told you that fine-tuning T5-Large (0.8B params) on a couple hundred examples could outperform GPT-3 (175B params) on a bunch of tasks?""" +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|bookmarkOf|https://twitter.com/omerlevy_/status/1448000216511229959?s=20 +http://www.semanlink.net/doc/2021/10/omer_levy_sur_twitter_what_i|creationTime|2021-10-13T12:53:20Z +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|creationDate|2020-08-02 +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|tag|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|tag|http://www.semanlink.net/tag/probing_ml +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Ellie Pavlick +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Ian Tenney +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Benjamin Van Durme +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Dipanjan Das +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|R Thomas McCoy +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Berlin Chen +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Samuel R. Bowman +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Najoung Kim +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Adam Poliak +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Patrick Xia +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_author|Alex Wang +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|comment|> We find that existing models trained on language modeling and translation produce strong representations for syntactic phenomena, but only offer comparably small improvements on semantic tasks over a non-contextual baseline. +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|title|[1905.06316] What do you learn from context? Probing for sentence structure in contextualized word representations +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|bookmarkOf|https://arxiv.org/abs/1905.06316 +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|creationTime|2020-08-02T11:25:38Z +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_summary|"Contextualized representation models such as ELMo (Peters et al., 2018a) and +BERT (Devlin et al., 2018) have recently achieved state-of-the-art results on a +diverse array of downstream NLP tasks. Building on recent token-level probing +work, we introduce a novel edge probing task design and construct a broad suite +of sub-sentence tasks derived from the traditional structured NLP pipeline. We +probe word-level contextual representations from four recent models and +investigate how they encode sentence structure across a range of syntactic, +semantic, local, and long-range phenomena. We find that existing models trained +on language modeling and translation produce strong representations for +syntactic phenomena, but only offer comparably small improvements on semantic +tasks over a non-contextual baseline." +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_firstAuthor|Ian Tenney +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_updated|2019-05-15T17:48:56Z +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_title|What do you learn from context? Probing for sentence structure in contextualized word representations +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_published|2019-05-15T17:48:56Z +http://www.semanlink.net/doc/2020/08/1905_06316_what_do_you_learn_|arxiv_num|1905.06316 +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|creationDate|2020-07-10 +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|tag|http://www.semanlink.net/tag/human_in_the_loop +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|tag|http://www.semanlink.net/tag/concept_bottleneck_models +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Yew Siang Tang +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Pang Wei Koh +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Percy Liang +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Thao Nguyen +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Stephen Mussmann +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Emma Pierson +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_author|Been Kim +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|comment|"> We seek to **learn models that we can interact with using high-level concepts**... +> +> We revisit the **classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label**. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time." +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|title|[2007.04612] Concept Bottleneck Models +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|bookmarkOf|https://arxiv.org/abs/2007.04612 +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|creationTime|2020-07-10T09:48:19Z +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_summary|"We seek to learn models that we can interact with using high-level concepts: +if the model did not think there was a bone spur in the x-ray, would it still +predict severe arthritis? State-of-the-art models today do not typically +support the manipulation of concepts like ""the existence of bone spurs"", as +they are trained end-to-end to go directly from raw input (e.g., pixels) to +output (e.g., arthritis severity). We revisit the classic idea of first +predicting concepts that are provided at training time, and then using these +concepts to predict the label. By construction, we can intervene on these +\emph{concept bottleneck models} by editing their predicted concept values and +propagating these changes to the final prediction. On x-ray grading and bird +identification, concept bottleneck models achieve competitive accuracy with +standard end-to-end models, while enabling interpretation in terms of +high-level clinical concepts (""bone spurs"") or bird attributes (""wing color""). +These models also allow for richer human-model interaction: accuracy improves +significantly if we can correct model mistakes on concepts at test time." +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_firstAuthor|Pang Wei Koh +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_updated|2020-07-09T07:47:28Z +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_title|Concept Bottleneck Models +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_published|2020-07-09T07:47:28Z +http://www.semanlink.net/doc/2020/07/2007_04612_concept_bottleneck|arxiv_num|2007.04612 +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|creationDate|2021-05-10 +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|tag|http://www.semanlink.net/tag/cheat_sheet +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|title|fastai v2 cheat sheets +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|bookmarkOf|https://www.cognitivefactory.fr/fastaidocs/ +http://www.semanlink.net/doc/2021/05/fastai_v2_cheat_sheets|creationTime|2021-05-10T08:19:14Z +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|creationDate|2020-07-18 +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|title|A collection of 300+ survey papers on NLP and ML +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|bookmarkOf|https://github.com/NiuTrans/ABigSurvey +http://www.semanlink.net/doc/2020/07/a_collection_of_300_survey_pap|creationTime|2020-07-18T13:28:26Z +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|creationDate|2021-04-12 +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|tag|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|tag|http://www.semanlink.net/tag/embeddings_in_ir +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|tag|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_author|Udhav Sethi +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_author|Anup Anand Deshmukh +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|title|[2007.12603] IR-BERT: Leveraging BERT for Semantic Search in Background Linking for News Articles +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|bookmarkOf|https://arxiv.org/abs/2007.12603 +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|creationTime|2021-04-12T18:27:34Z +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_summary|"This work describes our two approaches for the background linking task of +TREC 2020 News Track. The main objective of this task is to recommend a list of +relevant articles that the reader should refer to in order to understand the +context and gain background information of the query article. Our first +approach focuses on building an effective search query by combining weighted +keywords extracted from the query document and uses BM25 for retrieval. The +second approach leverages the capability of SBERT (Nils Reimers et al.) to +learn contextual representations of the query in order to perform semantic +search over the corpus. We empirically show that employing a language model +benefits our approach in understanding the context as well as the background of +the query article. The proposed approaches are evaluated on the TREC 2018 +Washington Post dataset and our best model outperforms the TREC median as well +as the highest scoring model of 2018 in terms of the nDCG@5 metric. We further +propose a diversity measure to evaluate the effectiveness of the various +approaches in retrieving a diverse set of documents. This would potentially +motivate researchers to work on introducing diversity in their recommended +list. We have open sourced our implementation on Github and plan to submit our +runs for the background linking task in TREC 2020." +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_firstAuthor|Anup Anand Deshmukh +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_updated|2020-07-24T16:02:14Z +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_title|IR-BERT: Leveraging BERT for Semantic Search in Background Linking for News Articles +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_published|2020-07-24T16:02:14Z +http://www.semanlink.net/doc/2021/04/2007_12603_ir_bert_leveragin|arxiv_num|2007.12603 +http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc|creationDate|2020-07-31 +http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc|tag|http://www.semanlink.net/tag/semences_paysanes +http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc|title|L’aventure citoyenne des semences paysannes, « commun » nourricier +http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc|bookmarkOf|https://www.lemonde.fr/series-d-ete/article/2020/07/31/l-aventure-citoyenne-des-semences-paysannes-commun-nourricier_6047785_3451060.html +http://www.semanlink.net/doc/2020/07/l%E2%80%99aventure_citoyenne_des_semenc|creationTime|2020-07-31T15:50:11Z +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|creationDate|2021-01-04 +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|tag|http://www.semanlink.net/tag/quantum_neuromorphic_computing +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|tag|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|title|Quantum neuromorphic computing: Applied Physics Letters: Vol 117, No 15 +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|bookmarkOf|https://aip.scitation.org/doi/10.1063/5.0020014 +http://www.semanlink.net/doc/2021/01/quantum_neuromorphic_computing_|creationTime|2021-01-04T09:36:49Z +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|creationDate|2021-08-16 +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|tag|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|tag|http://www.semanlink.net/tag/paleoclimatologie +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|title|Eté comme hiver, le régime de pluie à l'origine du Sahara vert il y a 9000 ans INEE +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|bookmarkOf|https://inee.cnrs.fr/fr/cnrsinfo/ete-comme-hiver-le-regime-de-pluie-lorigine-du-sahara-vert-il-y-9000-ans +http://www.semanlink.net/doc/2021/08/ete_comme_hiver_le_regime_de_p|creationTime|2021-08-16T09:43:45Z +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|tag|http://www.semanlink.net/tag/cosine_similarity +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|tag|http://www.semanlink.net/tag/faiss +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|title|"James Briggs sur Twitter : *free* course on vector similarity search and Faiss...""" +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|bookmarkOf|https://twitter.com/jamescalam/status/1449044040310263814 +http://www.semanlink.net/doc/2021/10/james_briggs_sur_twitter_fre|creationTime|2021-10-16T13:39:33Z +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|creationDate|2020-09-08 +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|title|Elastic Transformers. Making BERT stretchy — Scalable… by Mihail Dungarov Sep, 2020 Medium +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|bookmarkOf|https://medium.com/@mihail.dungarov/elastic-transformers-ae011e8f5b88 +http://www.semanlink.net/doc/2020/09/elastic_transformers_making_be|creationTime|2020-09-08T19:41:13Z +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|creationDate|2021-10-20 +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|tag|http://www.semanlink.net/tag/alphafold +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|comment|> to recap: AlphaFold 2 finds similar sequences to the input, extracts the information using an especial neural network architecture (a transformer), and then passes that information to another neural network that produces a structure. +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|title|AlphaFold 2 is here: what’s behind the structure prediction miracle Oxford Protein Informatics Group +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|bookmarkOf|https://www.blopig.com/blog/2021/07/alphafold-2-is-here-whats-behind-the-structure-prediction-miracle/ +http://www.semanlink.net/doc/2021/10/alphafold_2_is_here_what%E2%80%99s_beh|creationTime|2021-10-20T00:31:53Z +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|creationDate|2021-09-30 +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|tag|http://www.semanlink.net/tag/nlp_princeton +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|tag|http://www.semanlink.net/tag/emnlp_2021 +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|tag|http://www.semanlink.net/tag/acl_2021 +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|comment|> DensePhrases is a text retrieval model that can return phrases, sentences, passages, or documents for your natural language inputs. Using billions of dense phrase vectors from the entire Wikipedia, DensePhrases searches phrase-level answers to your questions in real-time or retrieves passages for downstream tasks. +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|title|princeton-nlp/DensePhrases: ACL'2021: Learning Dense Representations of Phrases at Scale; EMNLP'2021: Phrase Retrieval Learns Passage Retrieval, Too +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|bookmarkOf|https://github.com/princeton-nlp/DensePhrases +http://www.semanlink.net/doc/2021/09/princeton_nlp_densephrases_acl|creationTime|2021-09-30T14:52:17Z +http://www.semanlink.net/doc/2021/09/structuring_your_project_the_|creationDate|2021-09-23 +http://www.semanlink.net/doc/2021/09/structuring_your_project_the_|tag|http://www.semanlink.net/tag/python +http://www.semanlink.net/doc/2021/09/structuring_your_project_the_|title|Structuring Your Project — The Hitchhiker's Guide to Python +http://www.semanlink.net/doc/2021/09/structuring_your_project_the_|bookmarkOf|https://docs.python-guide.org/writing/structure/ +http://www.semanlink.net/doc/2021/09/structuring_your_project_the_|creationTime|2021-09-23T12:58:28Z +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|creationDate|2020-06-29 +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_author|Xiang Lisa Li +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_author|Jason Eisner +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|comment|EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|relatedDoc|http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_ +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|title|[1910.00163] Specializing Word Embeddings (for Parsing) by Information Bottleneck +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|bookmarkOf|https://www.aclweb.org/anthology/D19-1276/ +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|bookmarkOf|https://arxiv.org/abs/1910.00163 +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|creationTime|2020-06-29T10:08:09Z +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_summary|"Pre-trained word embeddings like ELMo and BERT contain rich syntactic and +semantic information, resulting in state-of-the-art performance on various +tasks. We propose a very fast variational information bottleneck (VIB) method +to nonlinearly compress these embeddings, keeping only the information that +helps a discriminative parser. We compress each word embedding to either a +discrete tag or a continuous vector. In the discrete version, our automatically +compressed tags form an alternative tag set: we show experimentally that our +tags capture most of the information in traditional POS tag annotations, but +our tag sequences can be parsed more accurately at the same level of tag +granularity. In the continuous version, we show experimentally that moderately +compressing the word embeddings by our method yields a more accurate parser in +8 of 9 languages, unlike simple dimensionality reduction." +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_firstAuthor|Xiang Lisa Li +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_updated|2019-10-01T00:47:31Z +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_title|Specializing Word Embeddings (for Parsing) by Information Bottleneck +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_published|2019-10-01T00:47:31Z +http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_|arxiv_num|1910.00163 +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|creationDate|2021-02-19 +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|tag|http://www.semanlink.net/tag/fps_tweet +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|tag|http://www.semanlink.net/tag/neonicotinoides +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|tag|http://www.semanlink.net/tag/macronie +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|tag|http://www.semanlink.net/tag/enseignement_francais +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|comment|"Néonicotinoïdes : des dérogations fondées sur une erreur de calcul + +Les maths à l'école, faudrait vraiment y faire quelque chose" +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|title|"FP Servant sur Twitter : ""On le savait pourtant qu'ils sont nuls en math, au gouvernement""" +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|bookmarkOf|https://twitter.com/hyperfp/status/1362861610059055106?s=20 +http://www.semanlink.net/doc/2021/02/fp_servant_sur_twitter_on_le|creationTime|2021-02-19T21:31:12Z +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|creationDate|2021-05-13 +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|tag|http://www.semanlink.net/tag/relation_learning +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|comment|"> unsupervised method for discovering inference rules from text, such as ""X is author of Y ≈ X wrote Y"", ""X solved Y ≈ X found a solution to Y"", and ""X caused Y ≈ Y is triggered by X"". +> Our algorithm is based on an **extended version of Harris' Distributional Hypothesis**, which states that words that occurred in the same contexts tend to be similar. Instead of using this hypothesis on words, we apply it to paths in the dependency trees of a parsed corpus. + +[Cited by](doc:2021/05/1906_03158_matching_the_blank)" +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|relatedDoc|http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|title|DIRT Discovery of inference rules from text (2001) +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|bookmarkOf|https://dl.acm.org/doi/10.1145/502512.502559 +http://www.semanlink.net/doc/2021/05/dirt_ddiscovery_of_inference_ru|creationTime|2021-05-13T00:56:25Z +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|creationDate|2020-11-26 +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|tag|http://www.semanlink.net/tag/language_identification +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|title|Recherche multilingue grâce à la détection de la langue dans Elasticsearch Elastic Blog +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|bookmarkOf|https://www.elastic.co/fr/blog/multilingual-search-using-language-identification-in-elasticsearch +http://www.semanlink.net/doc/2020/11/recherche_multilingue_grace_a_l|creationTime|2020-11-26T18:24:50Z +http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh|creationDate|2021-08-26 +http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh|title|Vallée de l'Azawagh +http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh|bookmarkOf|https://horizon.documentation.ird.fr/exl-doc/pleins_textes/divers09-03/010020000.pdf +http://www.semanlink.net/doc/2021/08/vallee_de_l_azawagh|creationTime|2021-08-26T14:57:43Z +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|creationDate|2021-09-23 +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|tag|http://www.semanlink.net/tag/akkadian_language +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|comment|[[2109.04513] Filling the Gaps in Ancient Akkadian Texts: A Masked Language Modelling Approach](doc:2021/09/2109_04513_filling_the_gaps_i) +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|relatedDoc|http://www.semanlink.net/doc/2021/09/2109_04513_filling_the_gaps_i +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|title|"Koren Lazar sur Twitter : ""...Modern pre-trained language models are applicable even in extreme low-resource settings as the case of the ancient Akkadian language.""" +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|bookmarkOf|https://twitter.com/LazarKoren/status/1440663616722661385 +http://www.semanlink.net/doc/2021/09/koren_lazar_sur_twitter_m|creationTime|2021-09-23T10:42:17Z +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|creationDate|2021-04-11 +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|tag|http://www.semanlink.net/tag/porsche +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|tag|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|tag|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_author|V. D. Viellieber +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_author|M. Aßenmacher +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|title|[2012.02558] Pre-trained language models as knowledge bases for Automotive Complaint Analysis +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|bookmarkOf|https://arxiv.org/abs/2012.02558 +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|creationTime|2021-04-11T09:30:04Z +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_summary|"Recently it has been shown that large pre-trained language models like BERT +(Devlin et al., 2018) are able to store commonsense factual knowledge captured +in its pre-training corpus (Petroni et al., 2019). In our work we further +evaluate this ability with respect to an application from industry creating a +set of probes specifically designed to reveal technical quality issues captured +as described incidents out of unstructured customer feedback in the automotive +industry. After probing the out-of-the-box versions of the pre-trained models +with fill-in-the-mask tasks we dynamically provide it with more knowledge via +continual pre-training on the Office of Defects Investigation (ODI) Complaints +data set. In our experiments the models exhibit performance regarding queries +on domain-specific topics compared to when queried on factual knowledge itself, +as Petroni et al. (2019) have done. For most of the evaluated architectures the +correct token is predicted with a $Precision@1$ ($P@1$) of above 60\%, while +for $P@5$ and $P@10$ even values of well above 80\% and up to 90\% respectively +are reached. These results show the potential of using language models as a +knowledge base for structured analysis of customer feedback." +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_firstAuthor|V. D. Viellieber +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_updated|2020-12-04T12:49:47Z +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_title|Pre-trained language models as knowledge bases for Automotive Complaint Analysis +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_published|2020-12-04T12:49:47Z +http://www.semanlink.net/doc/2021/04/2012_02558_pre_trained_langua|arxiv_num|2012.02558 +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|creationDate|2021-01-14 +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_author|Gautier Izacard +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_author|Nicola De Cao +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_author|Fabio Petroni +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_author|Sebastian Riedel +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|comment|"One sentence sumary: + +> We address entity retrieval by generating their unique name identifiers, left to right, in an autoregressive fashion, and conditioned on the context showing SOTA results in more than 20 datasets with a tiny fraction of the memory of recent systems. + +> a transformer-based architecture, pre-trained +with a language modeling objective (i.e., we use BART weights from Lewis et al. (2019)) and finetuned +to generate entity names. + +- [tweet](https://twitter.com/nicola_decao/status/1349354669643100161) +- " +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|title|[2010.00904] Autoregressive Entity Retrieval +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|bookmarkOf|https://arxiv.org/abs/2010.00904 +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|creationTime|2021-01-14T10:04:01Z +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_summary|"Entities are at the center of how we represent and aggregate knowledge. For +instance, Encyclopedias such as Wikipedia are structured by entities (e.g., one +per article). The ability to retrieve such entities given a query is +fundamental for knowledge-intensive tasks such as entity linking and +open-domain question answering. One way to understand current approaches is as +classifiers among atomic labels, one for each entity. Their weight vectors are +dense entity representations produced by encoding entity information such as +descriptions. This approach leads to several shortcomings: i) context and +entity affinity is mainly captured through a vector dot product, potentially +missing fine-grained interactions between the two; ii) a large memory footprint +is needed to store dense representations when considering large entity sets; +iii) an appropriately hard set of negative data has to be subsampled at +training time. We propose GENRE, the first system that retrieves entities by +generating their unique names, left to right, token-by-token in an +autoregressive fashion, and conditioned on the context. This enables to +mitigate the aforementioned technical issues: i) the autoregressive formulation +allows us to directly capture relations between context and entity name, +effectively cross encoding both; ii) the memory footprint is greatly reduced +because the parameters of our encoder-decoder architecture scale with +vocabulary size, not entity count; iii) the exact softmax loss can be +efficiently computed without the need to subsample negative data. We show the +efficacy of the approach with more than 20 datasets on entity disambiguation, +end-to-end entity linking and document retrieval tasks, achieving new SOTA, or +very competitive results while using a tiny fraction of the memory of competing +systems. Finally, we demonstrate that new entities can be added by simply +specifying their unambiguous name." +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_firstAuthor|Nicola De Cao +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_updated|2020-10-02T10:13:31Z +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_title|Autoregressive Entity Retrieval +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_published|2020-10-02T10:13:31Z +http://www.semanlink.net/doc/2021/01/2010_00904_autoregressive_ent|arxiv_num|2010.00904 +http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant|creationDate|2021-03-26 +http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant|tag|http://www.semanlink.net/tag/pedra_furada +http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant|title|Découverte d'un artefact datant de 24 000 ans à Vale da Pedra Furada, Piauí, Brésil INSHS +http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant|bookmarkOf|https://www.inshs.cnrs.fr/fr/cnrsinfo/decouverte-dun-artefact-datant-de-24-000-ans-vale-da-pedra-furada-piaui-bresil +http://www.semanlink.net/doc/2021/03/decouverte_d_un_artefact_datant|creationTime|2021-03-26T09:57:36Z +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|tag|http://www.semanlink.net/tag/self_training +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|tag|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|tag|http://www.semanlink.net/tag/text_classification_using_label_names_only +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Yunyi Zhang +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Heng Ji +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Jiawei Han +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Jiaxin Huang +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Chao Zhang +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Yu Meng +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_author|Chenyan Xiong +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|comment|"> In this paper, we explore the potential of only **using the label name of each class** to train classification models on unlabeled data, **without using any labeled documents**. We use pre-trained neural language models both as general linguistic knowledge sources for category understanding and as representation learning models for document classification. Our method +> 1. associates semantically related words with the label names, +> 2. finds category-indicative words and trains the model to predict their implied categories, and +> 3. generalizes the model via self-training." +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|title|[2010.07245] Text Classification Using Label Names Only: A Language Model Self-Training Approach +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|bookmarkOf|https://arxiv.org/abs/2010.07245 +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|creationTime|2021-10-16T13:48:25Z +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_summary|"Current text classification methods typically require a good number of +human-labeled documents as training data, which can be costly and difficult to +obtain in real applications. Humans can perform classification without seeing +any labeled examples but only based on a small set of words describing the +categories to be classified. In this paper, we explore the potential of only +using the label name of each class to train classification models on unlabeled +data, without using any labeled documents. We use pre-trained neural language +models both as general linguistic knowledge sources for category understanding +and as representation learning models for document classification. Our method +(1) associates semantically related words with the label names, (2) finds +category-indicative words and trains the model to predict their implied +categories, and (3) generalizes the model via self-training. We show that our +model achieves around 90% accuracy on four benchmark datasets including topic +and sentiment classification without using any labeled documents but learning +from unlabeled data supervised by at most 3 words (1 in most cases) per class +as the label name." +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_firstAuthor|Yu Meng +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_updated|2020-10-14T17:06:41Z +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_title|Text Classification Using Label Names Only: A Language Model Self-Training Approach +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_published|2020-10-14T17:06:41Z +http://www.semanlink.net/doc/2021/10/2010_07245_text_classificatio|arxiv_num|2010.07245 +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|creationDate|2021-09-05 +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|tag|http://www.semanlink.net/tag/niger +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|tag|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|comment|Une Histoire de l'Ighazer et de sa capitale, la petite ville d'In Gall, siège de la Cure Salée, la plus grande transhumance d'Afrique de l'ouest. +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|title|www.ingall-niger.org +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|bookmarkOf|https://www.ingall-niger.org/ +http://www.semanlink.net/doc/2021/09/www_ingall_niger_org|creationTime|2021-09-05T17:30:02Z +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|creationDate|2020-12-19 +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|tag|http://www.semanlink.net/tag/violence_policiere +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|tag|http://www.semanlink.net/tag/macronie +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|tag|http://www.semanlink.net/tag/france_police +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|comment|> « Vous n’aviez qu’à rester chez vous ! », ont répondu les policiers à Cécile, qui cherchait à s’extraire d’une nasse +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|title|Entre colère et culpabilité, ces Français qui renoncent à manifester par peur des violences +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|bookmarkOf|https://www.lemonde.fr/police-justice/article/2020/12/19/j-ai-eu-peur-pour-ma-vie-face-a-l-escalade-de-la-violence-ils-ont-renonce-a-manifester_6063917_1653578.html +http://www.semanlink.net/doc/2020/12/entre_colere_et_culpabilite_ce|creationTime|2020-12-19T17:15:56Z +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|creationDate|2020-08-14 +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|tag|http://www.semanlink.net/tag/graph_attention_networks +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|tag|http://www.semanlink.net/tag/classification_relations_between_classes +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_author|Muru Selvakumar +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_author|Ankit Pal +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_author|Malaikannan Sankarasubbu +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|comment|"> **Existing methods tend to ignore the relationship among labels**. + +This model employs [Graph Attention Networks](tag:graph_attention_networks) (GAT) to find the correlation between +labels. The generated classifiers are applied to sentence feature vectors obtained from the text feature extraction network (BiLSTM) to enable end-to-end training. + + +> GAT network takes the node features and adjacency +matrix that represents the graph data as inputs. +The adjacency matrix is constructed based on +the samples. **In our case, we do not have a graph +dataset. Instead, we learn the adjacency matrix**, hoping +that the model will determine the graph, thereby +learning the correlation of the labels. +> Our intuition is that by modeling the correlation +among labels as a weighted graph, we force the GAT +network to learn such that the adjacency matrix and +the attention weights together represent the correlation. + +// TODO compare with [this](doc:2019/06/_1905_10070_label_aware_docume)" +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|relatedDoc|http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|title|[2003.11644] MAGNET: Multi-Label Text Classification using Attention-based Graph Neural Network +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|bookmarkOf|https://arxiv.org/abs/2003.11644 +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|creationTime|2020-08-14T16:11:43Z +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_summary|"In Multi-Label Text Classification (MLTC), one sample can belong to more than +one class. It is observed that most MLTC tasks, there are dependencies or +correlations among labels. Existing methods tend to ignore the relationship +among labels. In this paper, a graph attention network-based model is proposed +to capture the attentive dependency structure among the labels. The graph +attention network uses a feature matrix and a correlation matrix to capture and +explore the crucial dependencies between the labels and generate classifiers +for the task. The generated classifiers are applied to sentence feature vectors +obtained from the text feature extraction network (BiLSTM) to enable end-to-end +training. Attention allows the system to assign different weights to neighbor +nodes per label, thus allowing it to learn the dependencies among labels +implicitly. The results of the proposed model are validated on five real-world +MLTC datasets. The proposed model achieves similar or better performance +compared to the previous state-of-the-art models." +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_firstAuthor|Ankit Pal +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_updated|2020-03-22T17:12:43Z +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_title|Multi-Label Text Classification using Attention-based Graph Neural Network +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_published|2020-03-22T17:12:43Z +http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c|arxiv_num|2003.11644 +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|creationDate|2021-01-17 +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|tag|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|tag|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|comment|Related paper: [[2010.11967] Language Models are Open Knowledge Graphs](doc:2020/10/2010_11967_language_models_ar) +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|relatedDoc|http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|title|Language Models are Open Knowledge Graphs... but are hard to mine - Towards Data Science +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|bookmarkOf|https://towardsdatascience.com/language-models-are-open-knowledge-graphs-but-are-hard-to-mine-13e128f3d64d +http://www.semanlink.net/doc/2021/01/language_models_are_open_knowle|creationTime|2021-01-17T21:22:52Z +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|creationDate|2020-12-01 +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|tag|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|title|Salmon Run: Word Sense Disambiguation using BERT as a Language Model +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|bookmarkOf|https://sujitpal.blogspot.com/2020/11/word-sense-disambiguation-using-bert-as.html +http://www.semanlink.net/doc/2020/12/salmon_run_word_sense_disambig|creationTime|2020-12-01T15:45:06Z +http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p|creationDate|2021-09-26 +http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p|tag|http://www.semanlink.net/tag/decroissance +http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p|title|« La décroissance n’est ni un programme ni même une théorie, mais une aspiration » +http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p|bookmarkOf|https://www.lemonde.fr/idees/article/2021/09/24/la-decroissance-n-est-ni-un-programme-ni-meme-une-theorie-mais-une-aspiration_6095900_3232.html +http://www.semanlink.net/doc/2021/09/%C2%AB_la_decroissance_n%E2%80%99est_ni_un_p|creationTime|2021-09-26T16:04:25Z +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|creationDate|2021-03-31 +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|tag|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_author|Antoine J. -P. Tixier +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_author|Moussa Kamal Eddine +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_author|Michalis Vazirgiannis +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|comment|"[On HuggingFace](doc:2021/03/barthez_transformers_4_5_0_de) ; +[GitHub](https://github.com/moussaKam/BARThez) + +([same author](doc:?uri=https%3A%2F%2Fwww2018.thewebconf.org%2Fprogram%2Ftutorials-track%2Ftutorial-213%2F))" +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|relatedDoc|https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/ +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|relatedDoc|http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|title|[2010.12321] BARThez: a Skilled Pretrained French Sequence-to-Sequence Model +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|bookmarkOf|https://arxiv.org/abs/2010.12321 +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|creationTime|2021-03-31T19:08:05Z +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_summary|"Inductive transfer learning has taken the entire NLP field by storm, with +models such as BERT and BART setting new state of the art on countless NLU +tasks. However, most of the available models and research have been conducted +for English. In this work, we introduce BARThez, the first large-scale +pretrained seq2seq model for French. Being based on BART, BARThez is +particularly well-suited for generative tasks. We evaluate BARThez on five +discriminative tasks from the FLUE benchmark and two generative tasks from a +novel summarization dataset, OrangeSum, that we created for this research. We +show BARThez to be very competitive with state-of-the-art BERT-based French +language models such as CamemBERT and FlauBERT. We also continue the +pretraining of a multilingual BART on BARThez' corpus, and show our resulting +model, mBARThez, to significantly boost BARThez' generative performance. Code, +data and models are publicly available." +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_firstAuthor|Moussa Kamal Eddine +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_updated|2021-02-09T09:31:57Z +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_title|BARThez: a Skilled Pretrained French Sequence-to-Sequence Model +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_published|2020-10-23T11:57:33Z +http://www.semanlink.net/doc/2021/03/2010_12321_barthez_a_skilled|arxiv_num|2010.12321 +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|creationDate|2021-03-30 +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_author|Xin Qian +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_author|Chenyan Xiong +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_author|Jordan Boyd-Graber +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_author|Chen Zhao +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|comment|> delft builds a free-text knowledge graph from Wikipedia, with entities as nodes and sentences in which entities co-occur as edges +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|title|[2103.12876] Complex Factoid Question Answering with a Free-Text Knowledge Graph +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|bookmarkOf|https://arxiv.org/abs/2103.12876 +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|creationTime|2021-03-30T00:35:13Z +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_summary|"We introduce DELFT, a factoid question answering system which combines the +nuance and depth of knowledge graph question answering approaches with the +broader coverage of free-text. DELFT builds a free-text knowledge graph from +Wikipedia, with entities as nodes and sentences in which entities co-occur as +edges. For each question, DELFT finds the subgraph linking question entity +nodes to candidates using text sentences as edges, creating a dense and high +coverage semantic graph. A novel graph neural network reasons over the +free-text graph-combining evidence on the nodes via information along edge +sentences-to select a final answer. Experiments on three question answering +datasets show DELFT can answer entity-rich questions better than machine +reading based models, bert-based answer ranking and memory networks. DELFT's +advantage comes from both the high coverage of its free-text knowledge +graph-more than double that of dbpedia relations-and the novel graph neural +network which reasons on the rich but noisy free-text evidence." +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_firstAuthor|Chen Zhao +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_updated|2021-03-23T22:53:09Z +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_title|Complex Factoid Question Answering with a Free-Text Knowledge Graph +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_published|2021-03-23T22:53:09Z +http://www.semanlink.net/doc/2021/03/2103_12876_complex_factoid_qu|arxiv_num|2103.12876 +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|creationDate|2021-06-05 +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|tag|http://www.semanlink.net/tag/peter_breunig +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|tag|http://www.semanlink.net/tag/nok +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|comment|cf. [Premières sociétés structurées au Nigéria (Pour la Science 2012)](doc:2021/06/premieres_societes_structurees_) +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|relatedDoc|http://www.semanlink.net/doc/2021/06/premieres_societes_structurees_ +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|title|Outline of a New Research Project on the Nok Culture of Central Nigeria, West Africa +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|bookmarkOf|https://static1.squarespace.com/static/5bd0e66f8d97400eb0099556/t/5bddff4c8a922de6f7b1a874/1541275468604/Nyame+Akuma+Issue+073-breunig.pdf +http://www.semanlink.net/doc/2021/06/outline_of_a_new_research_proje|creationTime|2021-06-05T19:31:33Z +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|creationDate|2020-08-30 +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|tag|http://www.semanlink.net/tag/java_library +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|comment|[paper](https://www.jmlr.org/papers/volume12/tsoumakas11a/tsoumakas11a.pdf) +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|title|Mulan: A Java library for multi-label learning +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|bookmarkOf|http://mulan.sourceforge.net/ +http://www.semanlink.net/doc/2020/08/mulan_a_java_library_for_multi|creationTime|2020-08-30T12:17:00Z +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|creationDate|2021-06-15 +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|title|Transformer models - Hugging Face Course +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|bookmarkOf|https://huggingface.co/course/chapter1 +http://www.semanlink.net/doc/2021/06/transformer_models_hugging_fa|creationTime|2021-06-15T09:41:10Z +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|creationDate|2020-08-12 +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_author|Nitesh Chawla +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_author|Tomasz Kajdanowicz +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_author|Piotr Szymański +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|comment|> low-complexity approach to multi-label classification built on top of two intuitions that **embedding a label space** may improve classification quality and that **label networks are a viable source of information** in multi-label problems +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|title|[1812.02956] LNEMLC: Label Network Embeddings for Multi-Label Classification +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|bookmarkOf|https://arxiv.org/abs/1812.02956 +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|creationTime|2020-08-12T17:07:25Z +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_summary|"Multi-label classification aims to classify instances with discrete +non-exclusive labels. Most approaches on multi-label classification focus on +effective adaptation or transformation of existing binary and multi-class +learning approaches but fail in modelling the joint probability of labels or do +not preserve generalization abilities for unseen label combinations. To address +these issues we propose a new multi-label classification scheme, LNEMLC - Label +Network Embedding for Multi-Label Classification, that embeds the label network +and uses it to extend input space in learning and inference of any base +multi-label classifier. The approach allows capturing of labels' joint +probability at low computational complexity providing results comparable to the +best methods reported in the literature. We demonstrate how the method reveals +statistically significant improvements over the simple kNN baseline classifier. +We also provide hints for selecting the robust configuration that works +satisfactorily across data domains." +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_firstAuthor|Piotr Szymański +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_updated|2019-01-01T21:11:09Z +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_title|LNEMLC: Label Network Embeddings for Multi-Label Classification +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_published|2018-12-07T09:30:18Z +http://www.semanlink.net/doc/2020/08/1812_02956_lnemlc_label_netw|arxiv_num|1812.02956 +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|creationDate|2021-10-20 +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|tag|http://www.semanlink.net/tag/genetique_histoire +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|tag|http://www.semanlink.net/tag/cheval +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|comment|"> Le cheval moderne a été domestiqué environ 2 200 ans avant notre ère, dans le nord du Caucase. + +> ce cheval s’est répandu à travers l’Asie en même temps que les chariots, la roue à rayons et les langues indo-iraniennes. En revanche, les migrations vers l’Europe de populations indo-européennes des steppes, au cours du troisième millénaire avant notre ère, n’ont pas pu s’appuyer sur l’usage du cheval sa domestication et sa diffusion étant postérieures." +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|title|L’origine des chevaux domestiques enfin établie CNRS +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|bookmarkOf|https://www.cnrs.fr/fr/lorigine-des-chevaux-domestiques-enfin-etablie +http://www.semanlink.net/doc/2021/10/l%E2%80%99origine_des_chevaux_domestiqu|creationTime|2021-10-20T23:05:15Z +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|creationDate|2021-10-08 +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|tag|http://www.semanlink.net/tag/western +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|tag|http://www.semanlink.net/tag/kevin_kostner +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|comment|De et avec Kevin Kostner, et une bonne gunfight +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|title|Open Range (2003 film) +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|bookmarkOf|https://en.wikipedia.org/wiki/Open_Range_(2003_film) +http://www.semanlink.net/doc/2021/10/open_range_2003_film_|creationTime|2021-10-08T23:22:34Z +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|creationDate|2020-12-12 +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|tag|http://www.semanlink.net/tag/language_identification +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|tag|http://www.semanlink.net/tag/java +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|title|pemistahl/lingua: natural language detection library for Java suitable for long and short text alike +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|bookmarkOf|https://github.com/pemistahl/lingua +http://www.semanlink.net/doc/2020/12/pemistahl_lingua_%F0%9F%91%84_the_most_a|creationTime|2020-12-12T01:40:23Z +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|creationDate|2021-10-08 +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|tag|http://www.semanlink.net/tag/retrieval_based_nlp +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_author|Nicola Tonellotto +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_author|Torsten Suel +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_author|Antonio Mallia +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_author|Omar Khattab +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|comment|Mentionned in [Building Scalable, Explainable, and Adaptive NLP Models with Retrieval SAIL Blog](doc:2021/10/building_scalable_explainable_) +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|relatedDoc|http://www.semanlink.net/doc/2021/10/building_scalable_explainable_ +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|title|[2104.12016] Learning Passage Impacts for Inverted Indexes +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|bookmarkOf|https://arxiv.org/abs/2104.12016 +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|creationTime|2021-10-08T14:05:42Z +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_summary|"Neural information retrieval systems typically use a cascading pipeline, in +which a first-stage model retrieves a candidate set of documents and one or +more subsequent stages re-rank this set using contextualized language models +such as BERT. In this paper, we propose DeepImpact, a new document +term-weighting scheme suitable for efficient retrieval using a standard +inverted index. Compared to existing methods, DeepImpact improves impact-score +modeling and tackles the vocabulary-mismatch problem. In particular, DeepImpact +leverages DocT5Query to enrich the document collection and, using a +contextualized language model, directly estimates the semantic importance of +tokens in a document, producing a single-value representation for each token in +each document. Our experiments show that DeepImpact significantly outperforms +prior first-stage retrieval approaches by up to 17% on effectiveness metrics +w.r.t. DocT5Query, and, when deployed in a re-ranking scenario, can reach the +same effectiveness of state-of-the-art approaches with up to 5.1x speedup in +efficiency." +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_firstAuthor|Antonio Mallia +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_updated|2021-04-24T20:18:53Z +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_title|Learning Passage Impacts for Inverted Indexes +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_published|2021-04-24T20:18:53Z +http://www.semanlink.net/doc/2021/10/2104_12016_learning_passage_i|arxiv_num|2104.12016 +http://www.semanlink.net/doc/2020/10/wikifier|creationDate|2020-10-11 +http://www.semanlink.net/doc/2020/10/wikifier|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2020/10/wikifier|tag|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/doc/2020/10/wikifier|tag|http://www.semanlink.net/tag/semantic_annotation +http://www.semanlink.net/doc/2020/10/wikifier|comment|> Semantic Annotation Service for 100 Languages +http://www.semanlink.net/doc/2020/10/wikifier|title|Wikifier +http://www.semanlink.net/doc/2020/10/wikifier|bookmarkOf|http://wikifier.org/ +http://www.semanlink.net/doc/2020/10/wikifier|creationTime|2020-10-11T02:13:46Z +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|creationDate|2020-08-15 +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|tag|http://www.semanlink.net/tag/rare_events +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|tag|http://www.semanlink.net/tag/capsule_networks +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|tag|http://www.semanlink.net/tag/hierarchical_multi_label_text_classification +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|comment|"> Our results confirm the hypothesis that +capsule networks are especially advantageous +for rare events and structurally diverse categories, +which we attribute to their ability to +combine latent encoded information. +> +> For each category in the hierarchy, an associated +capsule outputs latent information of the category +in form of a vector as opposed to a single scalar +value used in traditional neural networks" +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|title|Hierarchical Multi-label Classification of Text with Capsule Networks (2019) +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|bookmarkOf|https://www.aclweb.org/anthology/P19-2045.pdf +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi|creationTime|2020-08-15T14:50:39Z +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|creationDate|2021-10-14 +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|comment|> We focus on extremely low-resource setting, where we are **limited to less than 10k parallel data and no mono-lingual corpora**... we create a characterdecoder-based seq2seq NMT model as a baseline and compare its performance on various levels of data scarcity. Then, we explore the performance benefit of transfer learning by training a model on a different language. .. Lastly, we use **language models and a noisy dictionary to augment our training data**. Utilizing both transfer learning and data augmentation, we see a 1.5 BLEU score improvement over the baseline +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|title|VaLaR NMT: Vastly Lacking Resources Neural Machine Translation (2019) +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|bookmarkOf|https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1194/reports/custom/15811193.pdf +http://www.semanlink.net/doc/2021/10/valar_nmt_vastly_lacking_resou|creationTime|2021-10-14T15:46:04Z +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|creationDate|2021-10-14 +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|tag|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|tag|http://www.semanlink.net/tag/camembert_nlp +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|title|Sahajtomar/french_semantic · Hugging Face +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|bookmarkOf|https://huggingface.co/Sahajtomar/french_semantic +http://www.semanlink.net/doc/2021/10/sahajtomar_french_semantic_%C2%B7_hu|creationTime|2021-10-14T16:08:39Z +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|creationDate|2021-09-17 +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|tag|http://www.semanlink.net/tag/jermakoye +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|tag|http://www.semanlink.net/tag/dosso +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|title|Dosso - TOUBAL N 06 +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|bookmarkOf|https://docplayer.fr/docview/78/77156430/#file=/storage/78/77156430/77156430.pdf +http://www.semanlink.net/doc/2021/09/dosso_toubal_n_06|creationTime|2021-09-17T14:08:09Z +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|creationDate|2020-08-21 +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|tag|http://www.semanlink.net/tag/malbouffe +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|tag|http://www.semanlink.net/tag/imperialisme_americain +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|tag|http://www.semanlink.net/tag/mexique +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|comment|> Coca-colonisation +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|title|Malbouffe et Covid-19, le cocktail mortel mexicain +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|bookmarkOf|https://www.lemonde.fr/planete/article/2020/08/21/malbouffe-et-covid-19-le-cocktail-mortel-mexicain_6049497_3244.html +http://www.semanlink.net/doc/2020/08/malbouffe_et_covid_19_le_cockt|creationTime|2020-08-21T19:47:17Z +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|tag|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|tag|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_author|Muktabh Mayank Srivastava +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_author|Pushpankar Kumar Pushp +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|comment|"> The model learns to predict whether a given sentence is related to a tag or not; unlike other classifiers that learn to classify the sentence as one of the possible classes + +input: concatenation of the embedding of text and embedding of tag ; output : related / not related (binary classifier) + +> We can say that this technique learns the concept of relatedness between +a sentence and a word that can be extended beyond datasets. That said, the levels of accuracy leave +a lot of scope for future work." +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|title|[1712.05972] Train Once, Test Anywhere: Zero-Shot Learning for Text Classification +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|bookmarkOf|https://arxiv.org/abs/1712.05972 +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|creationTime|2021-10-16T13:59:40Z +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_summary|"Zero-shot Learners are models capable of predicting unseen classes. In this +work, we propose a Zero-shot Learning approach for text categorization. Our +method involves training model on a large corpus of sentences to learn the +relationship between a sentence and embedding of sentence's tags. Learning such +relationship makes the model generalize to unseen sentences, tags, and even new +datasets provided they can be put into same embedding space. The model learns +to predict whether a given sentence is related to a tag or not; unlike other +classifiers that learn to classify the sentence as one of the possible classes. +We propose three different neural networks for the task and report their +accuracy on the test set of the dataset used for training them as well as two +other standard datasets for which no retraining was done. We show that our +models generalize well across new unseen classes in both cases. Although the +models do not achieve the accuracy level of the state of the art supervised +models, yet it evidently is a step forward towards general intelligence in +natural language processing." +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_firstAuthor|Pushpankar Kumar Pushp +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_updated|2017-12-23T20:05:03Z +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_title|Train Once, Test Anywhere: Zero-Shot Learning for Text Classification +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_published|2017-12-16T15:17:07Z +http://www.semanlink.net/doc/2021/10/1712_05972_train_once_test_a|arxiv_num|1712.05972 +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|creationDate|2020-10-14 +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|tag|http://www.semanlink.net/tag/multimodal_classification +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_author|Jong-Seok Lee +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_author|Jun-Ho Choi +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|title|[1904.09078] EmbraceNet: A robust deep learning architecture for multimodal classification +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|bookmarkOf|https://arxiv.org/abs/1904.09078 +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|creationTime|2020-10-14T09:55:10Z +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_summary|"Classification using multimodal data arises in many machine learning +applications. It is crucial not only to model cross-modal relationship +effectively but also to ensure robustness against loss of part of data or +modalities. In this paper, we propose a novel deep learning-based multimodal +fusion architecture for classification tasks, which guarantees compatibility +with any kind of learning models, deals with cross-modal information carefully, +and prevents performance degradation due to partial absence of data. We employ +two datasets for multimodal classification tasks, build models based on our +architecture and other state-of-the-art models, and analyze their performance +on various situations. The results show that our architecture outperforms the +other multimodal fusion architectures when some parts of data are not +available." +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_firstAuthor|Jun-Ho Choi +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_updated|2019-04-19T04:46:29Z +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_title|EmbraceNet: A robust deep learning architecture for multimodal classification +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_published|2019-04-19T04:46:29Z +http://www.semanlink.net/doc/2020/10/1904_09078_embracenet_a_robu|arxiv_num|1904.09078 +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|creationDate|2020-11-24 +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|comment|Blog post about [Semantic Scholar TLDR](doc:2020/11/semantic_scholar_%7C_tldr_feature) +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|relatedDoc|http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_tldr_feature +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|title|TL;DR: This AI summarizes research papers so you don’t have to +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|bookmarkOf|https://thenextweb.com/neural/2020/11/20/tldr-this-ai-summarizes-research-papers-so-you-dont-have-to/ +http://www.semanlink.net/doc/2020/11/tl_dr_this_ai_summarizes_resea|creationTime|2020-11-24T09:30:22Z +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|creationDate|2021-04-11 +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/nlp_harvard +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/domain_specific_bert +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|tag|http://www.semanlink.net/tag/biomedical_nlp +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|comment|"Focus on the Embedding of Domain-specific Vocabulary. + +> exBERT +adds a new domain-specific vocabulary and the corresponding +embedding layer, as well as a small +extension module to the original unmodified model + +> a pretraining +method allowing **low-cost embedding of +domain-specific vocabulary in the context of an +existing large pre-trained model such as BERT** + +> exBERT... explicitly incorporates +the new domain’s vocabulary, while being able to +**reuse the original pre-trained model’s weights as is** +to reduce required computation and training data. Specifically, exBERT extends BERT by augmenting +its embeddings for the original vocabulary with +new embeddings for the domain-specific vocabulary +via **a learned small “extension” module**. **The +output of the original and extension modules are +combined via a trainable weighted sum operation** + +In a way similar to concept developed in [[1902.00751] Parameter-Efficient Transfer Learning for NLP](doc:2021/04/1902_00751_parameter_efficien), but not in the fine-tuning paradigm. + +[Github](https://github.com/cgmhaicenter/exBERT)" +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|relatedDoc|http://www.semanlink.net/doc/2021/04/1902_00751_parameter_efficien +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|title|exBERT: Extending Pre-trained Models with Domain-specific Vocabulary Under Constrained Training Resources - ACL Anthology +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|bookmarkOf|https://aclanthology.org/2020.findings-emnlp.129/ +http://www.semanlink.net/doc/2021/04/exbert_extending_pre_trained_m|creationTime|2021-04-11T10:13:43Z +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|creationDate|2020-12-11 +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|tag|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|tag|http://www.semanlink.net/tag/netflix +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|title|Supporting content decision makers with machine learning Dec, 2020 Netflix TechBlog +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|bookmarkOf|https://netflixtechblog.com/supporting-content-decision-makers-with-machine-learning-995b7b76006f +http://www.semanlink.net/doc/2020/12/supporting_content_decision_mak|creationTime|2020-12-11T13:34:30Z +http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet|creationDate|2021-01-26 +http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet|tag|http://www.semanlink.net/tag/voile_mer +http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet|title|« Plus vite que le vent » : cette révolution technique qui fait décoller les bateaux du Vendée Globe +http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet|bookmarkOf|https://www.lemonde.fr/videos/video/2021/01/25/plus-vite-que-le-vent-cette-revolution-technique-qui-fait-decoller-les-bateaux-du-vendee-globe_6067552_1669088.html +http://www.semanlink.net/doc/2021/01/%C2%AB_plus_vite_que_le_vent_%C2%BB_cet|creationTime|2021-01-26T00:09:50Z +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|creationDate|2021-10-20 +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|comment|"> Using my language metaphor, I would say that AI researchers tend to use letters and jump directly to articles without writing the words and sentences in between. + +> In the brain, a variety of architectures coexist and work together to generate general intelligence, whereas most AI systems rely on a single type of circuit architecture." +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|title|Modeling AI on the Language of Brain Circuits and Architecture Wu Tsai Neurosciences Institute +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|bookmarkOf|https://neuroscience.stanford.edu/news/modeling-ai-language-brain-circuits-and-architecture?sf150768224=1 +http://www.semanlink.net/doc/2021/10/modeling_ai_on_the_language_of_|creationTime|2021-10-20T23:12:28Z +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|creationDate|2021-08-09 +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|tag|http://www.semanlink.net/tag/natural_language_generation +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|comment|> Who needs a dictionary when you have a Transformer model? +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|title|Self-Talk: Obtain Knowledge From Text Generation Transformer Models by Eric Fillion Aug, 2021 Towards Data Science +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|bookmarkOf|https://towardsdatascience.com/self-talk-obtain-knowledge-from-text-generation-transformer-models-918277dbfc8b +http://www.semanlink.net/doc/2021/08/self_talk_obtain_knowledge_fro|creationTime|2021-08-09T22:03:11Z +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|creationDate|2021-03-26 +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|tag|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|tag|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_author|Rodrigo Nogueira +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_author|Kyunghyun Cho +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|comment|"a simple re-implementation of BERT for query-based passage re-ranking + +[""Slides of our WSDM 2021 tutorial ""Pretrained Transformers for Text Ranking: BERT and Beyond""](doc:2021/03/rodrigo_nogueira_sur_twitter_)" +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|relatedDoc|http://www.semanlink.net/doc/2021/03/rodrigo_nogueira_sur_twitter_ +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|title|[1901.04085] Passage Re-ranking with BERT +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|bookmarkOf|https://arxiv.org/abs/1901.04085 +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|creationTime|2021-03-26T01:49:42Z +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_summary|"Recently, neural models pretrained on a language modeling task, such as ELMo +(Peters et al., 2017), OpenAI GPT (Radford et al., 2018), and BERT (Devlin et +al., 2018), have achieved impressive results on various natural language +processing tasks such as question-answering and natural language inference. In +this paper, we describe a simple re-implementation of BERT for query-based +passage re-ranking. Our system is the state of the art on the TREC-CAR dataset +and the top entry in the leaderboard of the MS MARCO passage retrieval task, +outperforming the previous state of the art by 27% (relative) in MRR@10. The +code to reproduce our results is available at +https://github.com/nyu-dl/dl4marco-bert" +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_firstAuthor|Rodrigo Nogueira +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_updated|2020-04-14T14:57:40Z +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_title|Passage Re-ranking with BERT +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_published|2019-01-13T23:27:58Z +http://www.semanlink.net/doc/2021/03/1901_04085_passage_re_ranking|arxiv_num|1901.04085 +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|creationDate|2021-05-08 +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|tag|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|tag|http://www.semanlink.net/tag/genetique_histoire +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|title|High-depth African genomes inform human migration and health Nature (2020) +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|bookmarkOf|https://www.nature.com/articles/s41586-020-2859-7 +http://www.semanlink.net/doc/2021/05/high_depth_african_genomes_info|creationTime|2021-05-08T14:02:59Z +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|creationDate|2020-09-14 +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|title|Large-scale network motif analysis using compression peterbloem.nl +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|bookmarkOf|http://peterbloem.nl/publications/compression-for-motifs +http://www.semanlink.net/doc/2020/09/large_scale_network_motif_analy|creationTime|2020-09-14T16:36:36Z +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|creationDate|2021-10-16 +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|tag|http://www.semanlink.net/tag/nlp_low_resource_scenarios +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|comment|8 lectures (plus exercises) focused on NLP in data-scarse languages +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|title|neubig/lowresource-nlp-bootcamp-2020: The website for the CMU Language Technologies Institute low resource NLP bootcamp 2020 +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|bookmarkOf|https://github.com/neubig/lowresource-nlp-bootcamp-2020 +http://www.semanlink.net/doc/2021/10/neubig_lowresource_nlp_bootcamp|creationTime|2021-10-16T14:54:17Z +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|creationDate|2021-03-16 +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|tag|http://www.semanlink.net/tag/israel +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|tag|http://www.semanlink.net/tag/bible +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|title|Israël dévoile un manuscrit biblique vieux de deux mille ans +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|bookmarkOf|https://www.lemonde.fr/international/article/2021/03/16/israel-devoile-un-manuscrit-biblique-vieux-de-deux-mille-ans_6073283_3210.html +http://www.semanlink.net/doc/2021/03/israel_devoile_un_manuscrit_bib|creationTime|2021-03-16T10:36:10Z +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|creationDate|2021-06-25 +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|tag|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|tag|http://www.semanlink.net/tag/denisovan +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|title|L’« homme dragon », un crâne chinois miraculeusement préservé +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|bookmarkOf|https://www.lemonde.fr/sciences/article/2021/06/25/l-homme-dragon-un-crane-chinois-miraculeusement-preserve_6085719_1650684.html +http://www.semanlink.net/doc/2021/06/l%E2%80%99%C2%AB_homme_dragon_%C2%BB_un_crane_ch|creationTime|2021-06-25T20:00:19Z +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|creationDate|2020-10-11 +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|tag|http://www.semanlink.net/tag/label_embedding +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_author|Fabrizio Sebastiani +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_author|Andrea Esuli +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_author|Alejandro Moreo +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|comment|"> In supervised tasks such as multiclass +text classification (the focus of this article) it seems appealing to enhance word representations +with ad-hoc embeddings that encode task-specific information. We propose (supervised) word-class +embeddings (WCEs), and show that, when concatenated to (unsupervised) pre-trained word embeddings, +they substantially facilitate the training of deep-learning models in multiclass classification by +topic. +> +> A differentiating aspect of our method is that it keeps the modelling of word-class interactions separate from the +original word embedding. Word-class correlations are confined in a dedicated vector space, whose vectors enhance +(by concatenation) the unsupervised representations. The net effect is an embedding matrix that is better suited to +classification, and imposes no restriction to the network architecture using it. + +[github](https://github.com/AlexMoreo/word-class-embeddings). Refers to [LEAM](doc:2020/02/joint_embedding_of_words_and_la) : + +> [in LEAM] Once words and labels are embedded in a common vector space, word-label +compatibility is measured via cosine similarity. Our method instead models these compatibilities directly, without +generating intermediate embeddings for words or labels." +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|relatedDoc|http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|title|[1911.11506] Word-Class Embeddings for Multiclass Text Classification +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|bookmarkOf|https://arxiv.org/abs/1911.11506 +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|creationTime|2020-10-11T19:29:28Z +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_summary|"Pre-trained word embeddings encode general word semantics and lexical +regularities of natural language, and have proven useful across many NLP tasks, +including word sense disambiguation, machine translation, and sentiment +analysis, to name a few. In supervised tasks such as multiclass text +classification (the focus of this article) it seems appealing to enhance word +representations with ad-hoc embeddings that encode task-specific information. +We propose (supervised) word-class embeddings (WCEs), and show that, when +concatenated to (unsupervised) pre-trained word embeddings, they substantially +facilitate the training of deep-learning models in multiclass classification by +topic. We show empirical evidence that WCEs yield a consistent improvement in +multiclass classification accuracy, using four popular neural architectures and +six widely used and publicly available datasets for multiclass text +classification. Our code that implements WCEs is publicly available at +https://github.com/AlexMoreo/word-class-embeddings" +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_firstAuthor|Alejandro Moreo +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_updated|2019-11-26T13:11:00Z +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_title|Word-Class Embeddings for Multiclass Text Classification +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_published|2019-11-26T13:11:00Z +http://www.semanlink.net/doc/2020/10/1911_11506_word_class_embeddi|arxiv_num|1911.11506 +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|creationDate|2021-06-22 +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|tag|http://www.semanlink.net/tag/entity_type_representation +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|tag|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_author|Greg Durrett +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_author|Yasumasa Onoe +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_author|Andrew McCallum +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_author|Michael Boratko +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|title|[2101.00345] Modeling Fine-Grained Entity Types with Box Embeddings +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|bookmarkOf|https://arxiv.org/abs/2101.00345 +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|creationTime|2021-06-22T13:40:30Z +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_summary|"Neural entity typing models typically represent fine-grained entity types as +vectors in a high-dimensional space, but such spaces are not well-suited to +modeling these types' complex interdependencies. We study the ability of box +embeddings, which embed concepts as d-dimensional hyperrectangles, to capture +hierarchies of types even when these relationships are not defined explicitly +in the ontology. Our model represents both types and entity mentions as boxes. +Each mention and its context are fed into a BERT-based model to embed that +mention in our box space; essentially, this model leverages typological clues +present in the surface text to hypothesize a type representation for the +mention. Box containment can then be used to derive both the posterior +probability of a mention exhibiting a given type and the conditional +probability relations between types themselves. We compare our approach with a +vector-based typing model and observe state-of-the-art performance on several +entity typing benchmarks. In addition to competitive typing performance, our +box-based model shows better performance in prediction consistency (predicting +a supertype and a subtype together) and confidence (i.e., calibration), +demonstrating that the box-based model captures the latent type hierarchies +better than the vector-based model does." +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_firstAuthor|Yasumasa Onoe +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_updated|2021-06-03T05:51:55Z +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_title|Modeling Fine-Grained Entity Types with Box Embeddings +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_published|2021-01-02T00:59:10Z +http://www.semanlink.net/doc/2021/06/2101_00345_modeling_fine_grai|arxiv_num|2101.00345 +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|creationDate|2020-09-15 +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|tag|http://www.semanlink.net/tag/covid19_impreparation +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|title|Commission d’enquête sur la gestion du Covid-19 : « Les défauts observés lors de la première vague perdurent » +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|bookmarkOf|https://www.lemonde.fr/politique/article/2020/09/15/commission-d-enquete-sur-la-gestion-du-covid-19-les-defauts-observes-lors-de-la-premiere-vague-perdurent_6052191_823448.html +http://www.semanlink.net/doc/2020/09/commission_d%E2%80%99enquete_sur_la_ges|creationTime|2020-09-15T18:08:12Z +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|creationDate|2020-08-30 +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|tag|http://www.semanlink.net/tag/marcel_frohlich +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|tag|http://www.semanlink.net/tag/encoding +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|tag|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|title|"Marcel Fröhlich sur Twitter : ""Biology / information theory /physics question: How do encodings emerge?""" +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|bookmarkOf|https://twitter.com/FroehlichMarcel/status/1297606130248699911 +http://www.semanlink.net/doc/2020/08/marcel_frohlich_sur_twitter_|creationTime|2020-08-30T16:52:04Z +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|creationDate|2020-07-12 +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/acl_2020 +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|title|Knowledge Graphs in Natural Language Processing @ ACL 2020 by Michael Galkin +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|bookmarkOf|https://towardsdatascience.com/knowledge-graphs-in-natural-language-processing-acl-2020-ebb1f0a6e0b1 +http://www.semanlink.net/doc/2020/07/knowledge_graphs_in_natural_lan|creationTime|2020-07-12T01:12:20Z +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|creationDate|2021-02-08 +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|tag|http://www.semanlink.net/tag/police +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|comment|"> Mildred Hayes is grieving over the rape and murder of her teenage daughter, Angela, seven months earlier. Angry over the lack of progress in the investigation, Mildred rents three abandoned billboards near her home and posts on them: ""Raped While Dying"", ""And Still No Arrests?"", and ""How Come, Chief Willoughby?""" +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|title|Three Billboards Outside Ebbing, Missouri +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|bookmarkOf|https://en.wikipedia.org/wiki/Three_Billboards_Outside_Ebbing,_Missouri +http://www.semanlink.net/doc/2021/02/three_billboards_outside_ebbing|creationTime|2021-02-08T01:03:47Z +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|creationDate|2021-04-21 +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|tag|http://www.semanlink.net/tag/poisson +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|tag|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|title|Comment nos ancêtres poissons sont sortis des eaux pour atteindre le milieu terrestre +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|bookmarkOf|https://www.lemonde.fr/sciences/article/2021/04/20/comment-nos-ancetres-sont-sortis-des-eaux_6077455_1650684.html +http://www.semanlink.net/doc/2021/04/comment_nos_ancetres_poissons_s|creationTime|2021-04-21T00:03:17Z +http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra|creationDate|2021-07-16 +http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra|tag|http://www.semanlink.net/tag/cryptographie_quantique +http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra|title|A la découverte de la cryptographie quantique +http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra|bookmarkOf|http://images.math.cnrs.fr/A-la-decouverte-de-la-cryptographie-quantique.html +http://www.semanlink.net/doc/2021/07/a_la_decouverte_de_la_cryptogra|creationTime|2021-07-16T13:37:51Z +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|creationDate|2020-12-30 +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|tag|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|tag|http://www.semanlink.net/tag/boston_dynamics +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|tag|http://www.semanlink.net/tag/danse +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|comment|would you like to dance with me? +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|title|Do You Love Me? - Boston Dynamics video +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|bookmarkOf|https://www.youtube.com/watch?v=fn3KWM1kuAw +http://www.semanlink.net/doc/2020/12/do_you_love_me_youtube|creationTime|2020-12-30T13:11:32Z +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|creationDate|2020-11-06 +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|title|Peter Bloem sur Twitter : Large-scale network motif analysis using compression +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|bookmarkOf|https://twitter.com/pbloemesquire/status/1305468256187228166 +http://www.semanlink.net/doc/2020/11/peter_bloem_sur_twitter_large|creationTime|2020-11-06T18:10:41Z +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|creationDate|2021-08-30 +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/evaluation_measures +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|tag|http://www.semanlink.net/tag/linguistique +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|title|"(((ل()(ل() 'yoav))))👾 sur Twitter : ""my two cents on why NLP as a field is focusing on the ML-ish / algorithmic / leaderboard-ish aspects (incl., now, LLMs) and not on the underlying language phenomena: it is just so much easier, on so many levels.""" +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|bookmarkOf|https://twitter.com/yoavgo/status/1431284873151528960?s=20 +http://www.semanlink.net/doc/2021/08/%D9%84_%D9%84_yoav_%F0%9F%91%BE_sur_twit|creationTime|2021-08-30T19:06:53Z +http://www.semanlink.net/doc/2021/07/l_insulte_film_|creationDate|2021-07-21 +http://www.semanlink.net/doc/2021/07/l_insulte_film_|tag|http://www.semanlink.net/tag/film +http://www.semanlink.net/doc/2021/07/l_insulte_film_|tag|http://www.semanlink.net/tag/liban +http://www.semanlink.net/doc/2021/07/l_insulte_film_|title|L'Insulte (film) +http://www.semanlink.net/doc/2021/07/l_insulte_film_|bookmarkOf|https://fr.wikipedia.org/wiki/L'Insulte +http://www.semanlink.net/doc/2021/07/l_insulte_film_|creationTime|2021-07-21T22:22:01Z +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|creationDate|2021-06-10 +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|tag|http://www.semanlink.net/tag/first_order_logic +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|tag|http://www.semanlink.net/tag/guha +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_author|Ramanathan Guha +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|comment|"> **We would like to have systems that are largely learnt, which +we can also teach** + +> We believe that an essential step in bringing logic and +distributed representations closer is to create a model theory based on embeddings. + +> despite our best attempts, +terms and axioms in knowledge based systems end +up having many of the characteristics of natural language -- LOL" +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|title|[1410.5859] Towards a Model Theory for Distributed Representations +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|bookmarkOf|https://arxiv.org/abs/1410.5859 +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|creationTime|2021-06-10T16:30:07Z +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_summary|"Distributed representations (such as those based on embeddings) and discrete +representations (such as those based on logic) have complementary strengths. We +explore one possible approach to combining these two kinds of representations. +We present a model theory/semantics for first order logic based on vectors of +reals. We describe the model theory, discuss some interesting properties of +such a system and present a simple approach to query answering." +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_firstAuthor|Ramanathan Guha +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_updated|2015-02-05T03:06:09Z +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_title|Towards a Model Theory for Distributed Representations +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_published|2014-10-21T21:15:45Z +http://www.semanlink.net/doc/2021/06/1410_5859_towards_a_model_the|arxiv_num|1410.5859 +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|creationDate|2021-07-28 +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|tag|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|tag|http://www.semanlink.net/tag/lombok +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|title|Lombok into eclipse +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|bookmarkOf|https://projectlombok.org/setup/eclipse +http://www.semanlink.net/doc/2021/07/lombok_into_eclipse|creationTime|2021-07-28T09:25:45Z +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|creationDate|2020-07-11 +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/multi_hop_reasonning +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/virtual_knowledge_graph +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/neural_memory +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|tag|http://www.semanlink.net/tag/differentiable_reasoning_over_text +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|Ruslan Salakhutdinov +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|Manzil Zaheer +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|Vidhisha Balachandran +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|Graham Neubig +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|William W. Cohen +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_author|Bhuwan Dhingra +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|comment|"> We consider the task of answering complex multi-hop questions **using a corpus as a virtual knowledge base** (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a **special index of contextual representations of the mentions**. This module is **differentiable**, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. + +[(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_)" +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|relatedDoc|http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_ +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|title|[2002.10640] Differentiable Reasoning over a Virtual Knowledge Base +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|bookmarkOf|https://arxiv.org/abs/2002.10640 +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|creationTime|2020-07-11T14:03:19Z +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_summary|"We consider the task of answering complex multi-hop questions using a corpus +as a virtual knowledge base (KB). In particular, we describe a neural module, +DrKIT, that traverses textual data like a KB, softly following paths of +relations between mentions of entities in the corpus. At each step the module +uses a combination of sparse-matrix TFIDF indices and a maximum inner product +search (MIPS) on a special index of contextual representations of the mentions. +This module is differentiable, so the full system can be trained end-to-end +using gradient based methods, starting from natural language inputs. We also +describe a pretraining scheme for the contextual representation encoder by +generating hard negative examples using existing knowledge bases. We show that +DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, +cutting the gap between text-based and KB-based state-of-the-art by 70%. On +HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking +approach to retrieving the relevant passages required to answer a question. +DrKIT is also very efficient, processing 10-100x more queries per second than +existing multi-hop systems." +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_firstAuthor|Bhuwan Dhingra +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_updated|2020-02-25T03:13:32Z +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_title|Differentiable Reasoning over a Virtual Knowledge Base +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_published|2020-02-25T03:13:32Z +http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea|arxiv_num|2002.10640 +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|creationDate|2020-11-26 +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/self_attention +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_author|Yuji Matsumoto +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_author|Hideaki Takeda +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_author|Ikuya Yamada +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_author|Akari Asai +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_author|Hiroyuki Shindo +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|comment|"> LUKE is based on bidirectional Transformer, treats words and entities in a text as independent tokens, and outputs contextualized representations of them. The representations can be used to address downstream tasks similarly to BERT. [src](https://twitter.com/ikuyamada/status/1312947499141750786) + +> LUKE is trained using a novel pretraining task that involves predicting randomly masked words (equivalent to BERT’s masked language model) and entities in an entity-annotated corpus obtained from Wikipedia. + +(Hum, ça me rappelle quelque chose // TODO find where) + +> LUKE also uses a new *entity-aware* self-attention mechanism that considers the types of tokens (words or entities) when computing attention scores. + +[github](https://github.com/studio-ousia/luke), [at Hugging Face](https://twitter.com/AkariAsai/status/1389428550298525696), [doc](https://huggingface.co/transformers/model_doc/luke.html), [tweet](https://twitter.com/ikuyamada/status/1392742990586683392?s=20)" +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|title|[2010.01057] LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|bookmarkOf|https://arxiv.org/abs/2010.01057 +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|creationTime|2020-11-26T16:21:30Z +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_summary|"Entity representations are useful in natural language tasks involving +entities. In this paper, we propose new pretrained contextualized +representations of words and entities based on the bidirectional transformer. +The proposed model treats words and entities in a given text as independent +tokens, and outputs contextualized representations of them. Our model is +trained using a new pretraining task based on the masked language model of +BERT. The task involves predicting randomly masked words and entities in a +large entity-annotated corpus retrieved from Wikipedia. We also propose an +entity-aware self-attention mechanism that is an extension of the +self-attention mechanism of the transformer, and considers the types of tokens +(words or entities) when computing attention scores. The proposed model +achieves impressive empirical performance on a wide range of entity-related +tasks. In particular, it obtains state-of-the-art results on five well-known +datasets: Open Entity (entity typing), TACRED (relation classification), +CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), +and SQuAD 1.1 (extractive question answering). Our source code and pretrained +representations are available at https://github.com/studio-ousia/luke." +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_firstAuthor|Ikuya Yamada +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_updated|2020-10-02T15:38:03Z +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_title|LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_published|2020-10-02T15:38:03Z +http://www.semanlink.net/doc/2020/11/2010_01057_luke_deep_context|arxiv_num|2010.01057 +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|creationDate|2021-05-28 +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|tag|http://www.semanlink.net/tag/turkmenistan +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|tag|http://www.semanlink.net/tag/bactria_margiana_archaeological_complex +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|title|La cité oubliée d’Ulug Dépé CNRS Le journal +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|bookmarkOf|https://lejournal.cnrs.fr/articles/la-cite-oubliee-dulug-depe?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1622178648 +http://www.semanlink.net/doc/2021/05/la_cite_oubliee_d%E2%80%99ulug_depe_%7C_c|creationTime|2021-05-28T14:23:24Z +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|creationDate|2020-10-02 +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|tag|http://www.semanlink.net/tag/dkrl +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|comment|"""Description-Embodied Knowledge Representation Learning"" (DKRL) + +> In most knowledge graphs there are usually concise descriptions for entities, which cannot be well utilized by existing methods... Experimental results on real-world datasets show that, our method outperforms other baselines on the knowledge graph completion and entity classification tasks, especially under the zero-shot setting, which indicates that **our method is capable of building representations for novel entities according to their descriptions**. [Source code on github](https://github.com/xrb92/DKRL). + +For fact triples: TransE. Meanwhile, given an entity we will also learn to maximize +the likelihood of predicting its description (using either CBOW or CNN encoder) (head + relation = tail, also in ""text space"") + +Two types of representations for entities: structure-based +representations and description-based representations. They are learned simultaneously into the same +vector space but not forced to be unified **so that novel +entities with only descriptions can be represented**." +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|title|Representation learning of knowledge graphs with entity descriptions (AAAI 2016) +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|bookmarkOf|https://dl.acm.org/doi/10.5555/3016100.3016273 +http://www.semanlink.net/doc/2020/10/representation_learning_of_know|creationTime|2020-10-02T00:37:27Z +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|creationDate|2021-05-31 +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|tag|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|tag|http://www.semanlink.net/tag/google_sucks +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|title|'Apple is eating our lunch': Google employees admit in lawsuit that the company made it nearly impossible for users to keep their location private +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|bookmarkOf|https://www.businessinsider.fr/us/unredacted-google-lawsuit-docs-detail-efforts-to-collect-user-location-2021-5 +http://www.semanlink.net/doc/2021/05/apple_is_eating_our_lunch_go|creationTime|2021-05-31T22:10:51Z +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|creationDate|2021-07-29 +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|tag|http://www.semanlink.net/tag/haoussa +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|comment|includes a [roberta-base-finetuned-hausa](https://huggingface.co/Davlan/xlm-roberta-base-finetuned-hausa) (using data from [CC-100: Monolingual Datasets from Web Crawl Data](doc:2021/07/cc_100_monolingual_datasets_fr)) +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|relatedDoc|http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|title|Davlan (David Adelani) @Huggingface +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|bookmarkOf|https://huggingface.co/Davlan +http://www.semanlink.net/doc/2021/07/davlan_david_adelani_hugging|creationTime|2021-07-29T00:01:52Z +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|creationDate|2020-09-17 +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|tag|http://www.semanlink.net/tag/children_s_language_acquisition +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|tag|http://www.semanlink.net/tag/emergence +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|tag|http://www.semanlink.net/tag/linguistique +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|comment|"Le physicien Eric DeGiuli a récemment proposé un modèle statistique d’apprentissage qui renouvelle la linguistique générative + +[Papier](doc:2020/09/from_random_grammars_to_learnin)" +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|relatedDoc|http://www.semanlink.net/doc/2020/09/from_random_grammars_to_learnin +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|title|Le langage, une émergence explosive +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/05/27/le-langage-une-emergence-explosive_6040877_1650684.html +http://www.semanlink.net/doc/2020/09/le_langage_une_emergence_explo|creationTime|2020-09-17T23:42:25Z +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|creationDate|2020-06-30 +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|tag|http://www.semanlink.net/tag/nlp_pretraining +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|tag|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Armen Aghajanyan +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Gargi Ghosh +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Marjan Ghazvininejad +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Sida Wang +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Mike Lewis +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|title|[2006.15020] Pre-training via Paraphrasing +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|bookmarkOf|https://arxiv.org/abs/2006.15020 +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|creationTime|2020-06-30T11:32:08Z +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_summary|"We introduce MARGE, a pre-trained sequence-to-sequence model learned with an +unsupervised multi-lingual multi-document paraphrasing objective. MARGE +provides an alternative to the dominant masked language modeling paradigm, +where we self-supervise the reconstruction of target text by retrieving a set +of related texts (in many languages) and conditioning on them to maximize the +likelihood of generating the original. We show it is possible to jointly learn +to do retrieval and reconstruction, given only a random initialization. The +objective noisily captures aspects of paraphrase, translation, multi-document +summarization, and information retrieval, allowing for strong zero-shot +performance on several tasks. For example, with no additional task-specific +training we achieve BLEU scores of up to 35.8 for document translation. We +further show that fine-tuning gives strong performance on a range of +discriminative and generative tasks in many languages, making MARGE the most +generally applicable pre-training method to date." +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_firstAuthor|Mike Lewis +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_updated|2020-06-26T14:43:43Z +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_title|Pre-training via Paraphrasing +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_published|2020-06-26T14:43:43Z +http://www.semanlink.net/doc/2020/06/2006_15020_pre_training_via_p|arxiv_num|2006.15020 +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|creationDate|2020-07-29 +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|tag|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|title|"Aran Komatsuzaki sur Twitter : ""Big Bird: Transformers for Longer Sequences...""" +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|bookmarkOf|https://twitter.com/arankomatsuzaki/status/1288280728245923842 +http://www.semanlink.net/doc/2020/07/aran_komatsuzaki_sur_twitter_|creationTime|2020-07-29T08:19:09Z +http://www.semanlink.net/doc/2021/06/masakhane|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/masakhane|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/masakhane|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/06/masakhane|comment|A grassroots NLP community for Africa, by Africans. [Twitter @MasakhaneNLP](https://twitter.com/MasakhaneNLP) +http://www.semanlink.net/doc/2021/06/masakhane|title|Masakhane +http://www.semanlink.net/doc/2021/06/masakhane|bookmarkOf|https://www.masakhane.io/ +http://www.semanlink.net/doc/2021/06/masakhane|creationTime|2021-06-30T00:46:56Z +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|creationDate|2020-12-17 +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|tag|http://www.semanlink.net/tag/retard_technologique_francais +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|tag|http://www.semanlink.net/tag/covid19_vaccin +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|comment|> Le pays de Pasteur se voyait en premier de la classe -- LOL +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|title|Vaccins : « La France doit d’urgence donner à sa recherche les moyens de ses ambitions » +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|bookmarkOf|https://www.lemonde.fr/economie/article/2020/12/16/vaccins-la-france-doit-d-urgence-donner-a-sa-recherche-les-moyens-de-ses-ambitions_6063581_3234.html +http://www.semanlink.net/doc/2020/12/vaccins_%C2%AB_la_france_doit_d%E2%80%99ur|creationTime|2020-12-17T00:11:20Z +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|creationDate|2021-04-09 +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|tag|http://www.semanlink.net/tag/peinture_rupestre +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|tag|http://www.semanlink.net/tag/congo_kinshasa +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|title|Le massif de Lovo, un trésor d'art rupestre à préserver CNRS Le journal +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|bookmarkOf|https://lejournal.cnrs.fr/articles/le-massif-de-lovo-un-tresor-dart-rupestre-a-preserver?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1617944796 +http://www.semanlink.net/doc/2021/04/le_massif_de_lovo_un_tresor_d_|creationTime|2021-04-09T12:32:48Z +http://www.semanlink.net/doc/2021/06/librairy|creationDate|2021-06-22 +http://www.semanlink.net/doc/2021/06/librairy|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2021/06/librairy|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2021/06/librairy|tag|http://www.semanlink.net/tag/concept_hierarchies +http://www.semanlink.net/doc/2021/06/librairy|comment|"> Combines NLP techniques with Machine Learning algorithms and semantic resources to explore large textual corpora. +> Analyze your corpus by aggregating services + +> A novel hashing algorithm based on [approximate nearest-neighbor](tag:approximate_nearest_neighbor) techniques that uses [hierarchical sets of topics](tag:concept_hierarchies) as hash codes is proposed to explore document collections. +" +http://www.semanlink.net/doc/2021/06/librairy|title|librAIry +http://www.semanlink.net/doc/2021/06/librairy|bookmarkOf|https://librairy.github.io/ +http://www.semanlink.net/doc/2021/06/librairy|creationTime|2021-06-22T13:56:47Z +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|creationDate|2021-03-12 +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|tag|http://www.semanlink.net/tag/nlp_pretraining +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|tag|http://www.semanlink.net/tag/self_training +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Alexis Conneau +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Ves Stoyanov +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Vishrav Chaudhary +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Beliz Gunel +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Michael Auli +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Jingfei Du +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Onur Celebi +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_author|Edouard Grave +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|title|[2010.02194] Self-training Improves Pre-training for Natural Language Understanding +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|bookmarkOf|https://arxiv.org/abs/2010.02194 +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|creationTime|2021-03-12T06:17:22Z +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_summary|"Unsupervised pre-training has led to much recent progress in natural language +understanding. In this paper, we study self-training as another way to leverage +unlabeled data through semi-supervised learning. To obtain additional data for +a specific task, we introduce SentAugment, a data augmentation method which +computes task-specific query embeddings from labeled data to retrieve sentences +from a bank of billions of unlabeled sentences crawled from the web. Unlike +previous semi-supervised methods, our approach does not require in-domain +unlabeled data and is therefore more generally applicable. Experiments show +that self-training is complementary to strong RoBERTa baselines on a variety of +tasks. Our augmentation approach leads to scalable and effective self-training +with improvements of up to 2.6% on standard text classification benchmarks. +Finally, we also show strong gains on knowledge-distillation and few-shot +learning." +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_firstAuthor|Jingfei Du +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_updated|2020-10-05T17:52:25Z +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_title|Self-training Improves Pre-training for Natural Language Understanding +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_published|2020-10-05T17:52:25Z +http://www.semanlink.net/doc/2021/03/2010_02194_self_training_impr|arxiv_num|2010.02194 +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|creationDate|2020-08-07 +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|comment|> « Une voix singulière et forte, un penseur de la technique et du contemporain hors du commun, qui a cherché à inventer une nouvelle langue et de nouvelles subversions » +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|title|Le philosophe Bernard Stiegler est mort à l’âge de 68 ans +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|bookmarkOf|https://www.lemonde.fr/disparitions/article/2020/08/07/le-philosophe-bernard-stiegler-est-mort-a-l-age-de-68-ans_6048379_3382.html +http://www.semanlink.net/doc/2020/08/le_philosophe_bernard_stiegler_|creationTime|2020-08-07T13:19:22Z +http://www.semanlink.net/doc/2021/04/event_camera|creationDate|2021-04-15 +http://www.semanlink.net/doc/2021/04/event_camera|tag|http://www.semanlink.net/tag/event_camera +http://www.semanlink.net/doc/2021/04/event_camera|title|Event camera +http://www.semanlink.net/doc/2021/04/event_camera|bookmarkOf|https://en.wikipedia.org/wiki/Event_camera +http://www.semanlink.net/doc/2021/04/event_camera|creationTime|2021-04-15T17:13:30Z +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|creationDate|2020-10-08 +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|tag|http://www.semanlink.net/tag/boura +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|title|Site archéologique de Bura - UNESCO World Heritage Centre +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|bookmarkOf|https://whc.unesco.org/fr/listesindicatives/5045/ +http://www.semanlink.net/doc/2020/10/site_archeologique_de_bura_un|creationTime|2020-10-08T23:12:39Z +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|creationDate|2020-06-09 +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/faiss +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|comment|"Algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM. Faiss is written in C++ with complete wrappers for Python/numpy. + +[paper](https://arxiv.org/abs/1702.08734)" +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|title|facebookresearch/faiss: A library for efficient similarity search and clustering of dense vectors. +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|bookmarkOf|https://github.com/facebookresearch/faiss +http://www.semanlink.net/doc/2020/06/facebookresearch_faiss_a_libra|creationTime|2020-06-09T20:51:47Z +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|creationDate|2021-06-26 +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|tag|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|tag|http://www.semanlink.net/tag/drones +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|title|Guerre des drones : la menace des essaims +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|bookmarkOf|https://www.lemonde.fr/international/article/2021/06/25/guerre-des-drones-acte-ii-la-menace-des-essaims_6085696_3210.html +http://www.semanlink.net/doc/2021/06/guerre_des_drones_la_menace_d|creationTime|2021-06-26T10:09:41Z +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|creationDate|2021-08-10 +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|tag|http://www.semanlink.net/tag/france_delabrement +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|tag|http://www.semanlink.net/tag/paris_2024 +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|title|Jeux olympiques : les défis de Paris 2024 +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|bookmarkOf|https://www.lemonde.fr/idees/article/2021/08/10/jeux-olympiques-les-defis-de-paris-2024_6091066_3232.html +http://www.semanlink.net/doc/2021/08/jeux_olympiques_les_defis_de_|creationTime|2021-08-10T12:46:13Z +http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a|creationDate|2020-12-06 +http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a|tag|http://www.semanlink.net/tag/hayabusa2 +http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a|title|La sonde japonaise Hayabusa-2 a rapporté des échantillons d’astéroïde sur Terre +http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/12/05/la-sonde-japonaise-hayabusa-2-envoie-sur-terre-des-echantillons-d-asteroide_6062323_1650684.html +http://www.semanlink.net/doc/2020/12/la_sonde_japonaise_hayabusa_2_a|creationTime|2020-12-06T22:31:06Z +http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma|creationDate|2021-09-24 +http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma|tag|http://www.semanlink.net/tag/first_americans +http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma|title|La découverte d’empreintes humaines vieilles de 23 000 ans réécrit l’histoire du peuplement de l’Amérique +http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma|bookmarkOf|https://www.lemonde.fr/sciences/article/2021/09/24/des-empreintes-humaines-vieilles-de-23-000-ans-reecrivent-l-histoire-du-peuplement-de-l-amerique_6095815_1650684.html +http://www.semanlink.net/doc/2021/09/la_decouverte_d%E2%80%99empreintes_huma|creationTime|2021-09-24T12:28:21Z +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|creationDate|2020-07-02 +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|tag|http://www.semanlink.net/tag/encryption +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|tag|http://www.semanlink.net/tag/crime +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|tag|http://www.semanlink.net/tag/hack +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|title|VICE - How Police Secretly Took Over a Global Phone Network for Organised Crime +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|bookmarkOf|https://www.vice.com/en_uk/article/3aza95/how-police-secretly-took-over-a-global-phone-network-for-organised-crime +http://www.semanlink.net/doc/2020/07/vice_how_police_secretly_took|creationTime|2020-07-02T22:59:10Z +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|creationDate|2021-09-20 +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|tag|http://www.semanlink.net/tag/haystack +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|comment|[Haystack](doc:2021/09/haystack) +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|relatedDoc|http://www.semanlink.net/doc/2021/09/haystack +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|title|NLP Solutions to Streamline Neural Search and Question Answering deepset +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|bookmarkOf|https://www.deepset.ai/ +http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu|creationTime|2021-09-20T17:00:13Z +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|creationDate|2021-08-03 +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|tag|http://www.semanlink.net/tag/q_a +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|tag|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|title|[Q&A #02] Bombs vs. Bugs - by Edward Snowden - Continuing Ed — with Edward Snowden +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|bookmarkOf|https://edwardsnowden.substack.com/p/qa02 +http://www.semanlink.net/doc/2021/08/q_a_02_bombs_vs_bugs_by_e|creationTime|2021-08-03T17:27:28Z +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|creationDate|2021-08-13 +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|tag|http://www.semanlink.net/tag/marisa_monte +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|comment|> La chanteuse carioca publie le somptueux « Portas », premier album sous son seul nom depuis dix ans. +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|title|Chanson : Marisa Monte, beauté et volupté +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|bookmarkOf|https://www.lemonde.fr/culture/article/2021/08/13/chanson-marisa-monte-beaute-et-volupte_6091322_3246.html +http://www.semanlink.net/doc/2021/08/chanson_marisa_monte_beaute_|creationTime|2021-08-13T11:38:00Z +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|creationDate|2020-12-22 +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|tag|http://www.semanlink.net/tag/foreign_policy_of_the_united_states +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|tag|http://www.semanlink.net/tag/collective_punishment +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|tag|http://www.semanlink.net/tag/venezuela +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|tag|http://www.semanlink.net/tag/economic_sanctions +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|comment|> This paper looks at some of the most important impacts of the economic sanctions imposed on Venezuela by the US government since August of 2017. It finds that most of the impact of these sanctions has not been on the government but on the civilian population. +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|title|Economic Sanctions as Collective Punishment: The Case of Venezuela +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|bookmarkOf|https://cepr.net/images/stories/reports/venezuela-sanctions-2019-04.pdf +http://www.semanlink.net/doc/2020/12/economic_sanctions_as_collectiv|creationTime|2020-12-22T21:00:32Z +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|creationDate|2020-07-14 +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|tag|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|tag|http://www.semanlink.net/tag/nso_pegasus +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|title|Des militants catalans visés par un logiciel espion ultraperfectionné +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|bookmarkOf|https://www.lemonde.fr/pixels/article/2020/07/14/des-militants-catalans-vises-par-un-logiciel-espion-ultraperfectionne_6046138_4408996.html +http://www.semanlink.net/doc/2020/07/des_militants_catalans_vises_pa|creationTime|2020-07-14T12:24:22Z +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|creationDate|2020-11-03 +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|tag|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|tag|http://www.semanlink.net/tag/good_related_work_section +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Xiaozhi Wang +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Jian Tang +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Zhaocheng Zhu +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Tianyu Gao +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Juanzi Li +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_author|Zhiyuan Liu +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|comment|"A unified model for knowledge embedding (KE, ie relational facts) and pre-trained language representation (PLM) + +> can not only better integrate +factual knowledge into PLMs but also +effectively learn KE through the abundant +information in text + +> Inspired by [Xie et al. (2016)](doc:2020/10/representation_learning_of_know) ([DKRL](tag:dkrl)), we take **entity descriptions** +to bridge the gap between KE and PLM. + +> We encode the texts +and entities into a unified semantic space with the +same PLM as the encoder, and jointly optimize the +KE and the masked language modeling (MLM) objectives +during pre-training. For the KE objective, +we encode the entity descriptions as their corresponding +entity embeddings, and then learn them +in the same way as conventional KE methods. For +the MLM objective, we follow the approach of existing +PLMs + + +" +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|relatedDoc|http://www.semanlink.net/doc/2020/10/representation_learning_of_know +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|title|[1911.06136] KEPLER: A Unified Model for Knowledge Embedding and Pre-trained Language Representation +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|bookmarkOf|https://arxiv.org/abs/1911.06136 +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|creationTime|2020-11-03T16:41:30Z +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_summary|"Pre-trained language representation models (PLMs) cannot well capture factual +knowledge from text. In contrast, knowledge embedding (KE) methods can +effectively represent the relational facts in knowledge graphs (KGs) with +informative entity embeddings, but conventional KE models do not utilize the +rich text data. In this paper, we propose a unified model for Knowledge +Embedding and Pre-trained LanguagE Representation (KEPLER), which can not only +better integrate factual knowledge into PLMs but also effectively learn KE +through the abundant information in text. In KEPLER, we encode textual +descriptions of entities with a PLM as their embeddings, and then jointly +optimize the KE and language modeling objectives. Experimental results show +that KEPLER achieves state-of-the-art performance on various NLP tasks, and +also works remarkably well as an inductive KE model on the link prediction +task. Furthermore, for pre-training KEPLER and evaluating the KE performance, +we construct Wikidata5M, a large-scale KG dataset with aligned entity +descriptions, and benchmark state-of-the-art KE methods on it. It shall serve +as a new KE benchmark and facilitate the research on large KG, inductive KE, +and KG with text. The dataset can be obtained from +https://deepgraphlearning.github.io/project/wikidata5m." +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_firstAuthor|Xiaozhi Wang +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_updated|2020-02-19T07:46:52Z +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_title|KEPLER: A Unified Model for Knowledge Embedding and Pre-trained Language Representation +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_published|2019-11-13T05:21:45Z +http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_|arxiv_num|1911.06136 +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|creationDate|2020-07-02 +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|comment|Neat! Transformers as RNNs -- linearized attention helps produce transformer-like models with complexity linear (rather than quadratic) complexity in sequence length. +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|title|"Dr Simon Osindero sur Twitter : ""Neat! Transformers as RNNs""" +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|bookmarkOf|https://twitter.com/sindero/status/1278372867110035458?s=20 +http://www.semanlink.net/doc/2020/07/dr_simon_osindero_sur_twitter_|creationTime|2020-07-02T15:54:14Z +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|creationDate|2021-10-25 +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|tag|http://www.semanlink.net/tag/jedi_blue +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|tag|http://www.semanlink.net/tag/google_advertising +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|title|"Project NERA: State Attorneys General claim Google is planning to turn the internet into a ""walled garden"" - MSPoweruser" +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|bookmarkOf|https://mspoweruser.com/project-nera-state-attorneys-general-claim-google-is-planning-to-turn-the-internet-into-a-walled-garden/ +http://www.semanlink.net/doc/2021/10/project_nera_state_attorneys_g|creationTime|2021-10-25T12:00:56Z +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|creationDate|2021-05-14 +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|tag|http://www.semanlink.net/tag/backpropagation_vs_biology +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|title|How can synaptic plasticity lead to meaningful learning? +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|bookmarkOf|https://twitter.com/tyrell_turing/status/1246814610981892097 +http://www.semanlink.net/doc/2021/05/how_can_synaptic_plasticity_lea|creationTime|2021-05-14T10:08:29Z +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|creationDate|2020-08-22 +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|tag|http://www.semanlink.net/tag/ai_book +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|comment|> This book is my attempt to provide a brief but comprehensive introduction to graph representation learning, including methods for embedding graph data, graph neural networks, and deep generative models of graphs. +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|title|Graph Representation Learning Book - Will Hamilton +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|bookmarkOf|https://www.cs.mcgill.ca/~wlh/grl_book/ +http://www.semanlink.net/doc/2020/08/graph_representation_learning_b|creationTime|2020-08-22T15:15:17Z +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|creationDate|2021-09-20 +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|tag|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|comment|> a family of topic models that use pre-trained representations of language (e.g., BERT) to support topic modeling. +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|title|Contextualized Topic Models +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|bookmarkOf|https://contextualized-topic-models.readthedocs.io/en/latest/introduction.html +http://www.semanlink.net/doc/2021/09/contextualized_topic_models|creationTime|2021-09-20T23:12:26Z +http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r|creationDate|2020-11-14 +http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r|tag|http://www.semanlink.net/tag/diffbot +http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r|title|This know-it-all AI learns by reading the entire web nonstop MIT Technology Review +http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r|bookmarkOf|https://www.technologyreview.com/2020/09/04/1008156/knowledge-graph-ai-reads-web-machine-learning-natural-language-processing/?utm_medium=tr_social&utm_campaign=site_visitor.unpaid.engagement&utm_source=Twitter#Echobox=1600097723 +http://www.semanlink.net/doc/2020/11/this_know_it_all_ai_learns_by_r|creationTime|2020-11-14T08:59:49Z +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|creationDate|2021-06-10 +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|tag|http://www.semanlink.net/tag/phd_thesis +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|tag|http://www.semanlink.net/tag/fabien_gandon +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|title|Matching and mining in knowledge graphs of the Web of data Applications in pharmacogenomics +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|bookmarkOf|https://hal.inria.fr/tel-03122326/document +http://www.semanlink.net/doc/2021/06/matching_and_mining_in_knowledg|creationTime|2021-06-10T16:08:37Z +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|creationDate|2021-05-13 +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|tag|http://www.semanlink.net/tag/relation_learning +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_author|Nicholas FitzGerald +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_author|Livio Baldini Soares +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_author|Tom Kwiatkowski +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_author|Jeffrey Ling +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|comment|"> a new method +of learning relation representations directly from +text +> +> First, we study the **ability of the Transformer +neural network architecture (Vaswani et al., 2017) +to encode relations between entity pairs**, and we +identify a method of representation that outperforms +previous work in supervised relation extraction. +Then, we present a method of training this relation +representation **without any supervision from +a knowledge graph or human annotators** from widely available distant supervision +in the form of entity linked text +> +> **we assume** access +to a corpus of text in which entities have been +linked to unique identifiers and we define a relation statement to be a block of text containing two +marked entities." +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|title|[1906.03158] Matching the Blanks: Distributional Similarity for Relation Learning +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|bookmarkOf|https://arxiv.org/abs/1906.03158 +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|creationTime|2021-05-13T00:39:03Z +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_summary|"General purpose relation extractors, which can model arbitrary relations, are +a core aspiration in information extraction. Efforts have been made to build +general purpose extractors that represent relations with their surface forms, +or which jointly embed surface forms with relations from an existing knowledge +graph. However, both of these approaches are limited in their ability to +generalize. In this paper, we build on extensions of Harris' distributional +hypothesis to relations, as well as recent advances in learning text +representations (specifically, BERT), to build task agnostic relation +representations solely from entity-linked text. We show that these +representations significantly outperform previous work on exemplar based +relation extraction (FewRel) even without using any of that task's training +data. We also show that models initialized with our task agnostic +representations, and then tuned on supervised relation extraction datasets, +significantly outperform the previous methods on SemEval 2010 Task 8, KBP37, +and TACRED." +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_firstAuthor|Livio Baldini Soares +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_updated|2019-06-07T15:26:50Z +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_title|Matching the Blanks: Distributional Similarity for Relation Learning +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_published|2019-06-07T15:26:50Z +http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank|arxiv_num|1906.03158 +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|creationDate|2020-06-25 +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|tag|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|tag|http://www.semanlink.net/tag/stack_overflow +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|tag|http://www.semanlink.net/tag/python_tips +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|tag|http://www.semanlink.net/tag/ssl +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|tag|http://www.semanlink.net/tag/scraping +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|comment|"trying to connect to some https url + +``` +SSLCertVerificationError Traceback (most recent call last) +/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py in do_open(self, http_class, req, **http_conn_args) +1316 h.request(req.get_method(), req.selector, req.data, headers, +-> 1317 encode_chunked=req.has_header('Transfer-encoding')) +1318 except OSError as err: # timeout error +/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py in request(self, method, url, body, headers, encode_chunked) +1228 """"""Send a complete request to the server."""""" +-> 1229 self._send_request(method, url, body, headers, encode_chunked) +1230 +``` + +> go to Macintosh HD > Applications > Python3.6 folder (or whatever version of python you're using) > double click on ""Install Certificates.command"" file + +(pour le trouver, Finder find ""Install Certificates.command)" +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|title|python - Scraping: SSL: CERTIFICATE_VERIFY_FAILED - Stack Overflow +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|bookmarkOf|https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org +http://www.semanlink.net/doc/2020/06/python_scraping_ssl_certifi|creationTime|2020-06-25T15:35:03Z +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|creationDate|2021-05-31 +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|tag|http://www.semanlink.net/tag/acl_2018 +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|tag|http://www.semanlink.net/tag/unsupervised_keyphrase_extraction +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|comment|"> unsupervised +method to automatically extract keyphrases from +a document, that **only requires +the document itself** +> +> 1. We extract **candidate phrases +from the text, based on part-of-speech sequences**. +More precisely, we keep only those phrases that +consist of zero or more adjectives followed by one +or multiple nouns (Wan and Xiao, 2008). +> 2. We +use sentence embeddings **to embed +both the candidate phrases and the document itself +in the same high-dimensional vector space** +> 3. We rank the candidate phrases to select +the output keyphrases. In addition we show how to improve the +ranking step, by providing a way to tune the diversity +of the extracted keyphrases." +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|title|Simple Unsupervised Keyphrase Extraction using Sentence Embeddings - ACL Anthology (2018) +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|bookmarkOf|https://www.aclweb.org/anthology/K18-1022/ +http://www.semanlink.net/doc/2021/05/simple_unsupervised_keyphrase_e|creationTime|2021-05-31T11:47:52Z +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|creationDate|2021-07-29 +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|tag|http://www.semanlink.net/tag/neandertal +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|tag|http://www.semanlink.net/tag/denisovan +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|tag|http://www.semanlink.net/tag/sang +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|title|Les groupes sanguins de Neandertal et Denisova décryptés CNRS +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|bookmarkOf|http://www.cnrs.fr/fr/les-groupes-sanguins-de-neandertal-et-denisova-decryptes +http://www.semanlink.net/doc/2021/07/les_groupes_sanguins_de_neander|creationTime|2021-07-29T00:37:49Z +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|creationDate|2020-07-06 +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|tag|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|tag|http://www.semanlink.net/tag/phd_thesis +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|comment|"> Th‘is thesis develops methods which leverage the strength of both neural and +symbolic approaches. Specifically, we **augment raw text with symbolic structure +about entities and their relations from a knowledge graph**, and learn task-speci€c +neural embeddings of the combined data structure. We also develop algorithms for +doing **multi-step reasoning over the embeddings in a di‚fferentiable manner**, leading +to **end-to-end models for answering complex queries**. Along the way we develop +variants of recurrent and graph neural networks suited to modeling textual and +multi-relational data, respectively, and use transfer learning to improve generalization. + +Related paper by same author: [[2002.10640] Differentiable Reasoning over a Virtual Knowledge Base](doc:2020/07/2002_10640_differentiable_rea)" +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|relatedDoc|http://www.semanlink.net/doc/2020/07/2002_10640_differentiable_rea +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|title|End-to-End Learning with Text & Knowledge Bases (Bhuwan Dhingra PhD Thesis) +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|bookmarkOf|http://www.cs.cmu.edu/~bdhingra/docs/bhuwan_dhingra_thesis.pdf +http://www.semanlink.net/doc/2020/07/end_to_end_learning_with_text_|creationTime|2020-07-06T17:41:29Z +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|creationDate|2021-06-03 +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|title|"Nandan Thakur sur Twitter : ""how to create sentence-embeddings when little or zero in-domain training data is available""" +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|bookmarkOf|https://twitter.com/Nthakur20/status/1318231686174355457 +http://www.semanlink.net/doc/2021/06/nandan_thakur_sur_twitter_ho|creationTime|2021-06-03T12:20:01Z +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|creationDate|2021-08-12 +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|tag|http://www.semanlink.net/tag/cancers_pediatriques +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|tag|http://www.semanlink.net/tag/pollution_de_l_eau +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|tag|http://www.semanlink.net/tag/france_delabrement +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|tag|http://www.semanlink.net/tag/pesticide +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|title|A Saint-Rogatien, les cancers pédiatriques alimentent la suspicion sur les pollutions de l’environnement +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|bookmarkOf|https://www.lemonde.fr/planete/article/2021/08/12/a-saint-rogatien-les-cancers-pediatriques-alimentent-la-suspicion-sur-les-pollutions-de-l-environnement_6091226_3244.html +http://www.semanlink.net/doc/2021/08/a_saint_rogatien_les_cancers_p|creationTime|2021-08-12T11:54:52Z +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|creationDate|2020-07-14 +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|tag|http://www.semanlink.net/tag/xlnet +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|comment|[paper](doc:2019/08/_1908_10084_sentence_bert_sen) +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|relatedDoc|http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|title|UKPLab/sentence-transformers: Sentence Embeddings with BERT & XLNet +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|bookmarkOf|https://github.com/UKPLab/sentence-transformers +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|creationTime|2020-07-14T19:08:40Z +http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s|mainDoc|http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|creationDate|2021-09-12 +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|tag|http://www.semanlink.net/tag/foundation_models +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|comment|"> The +report says, unironically, “we do not fully understand the +nature or quality of the foundation that foundation models +provide”, but then why grandiosely call them foundation +models at all?" +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|title|Has AI found a new Foundation? +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|bookmarkOf|https://thegradient.pub/has-ai-found-a-new-foundation/ +http://www.semanlink.net/doc/2021/09/has_ai_found_a_new_foundation_|creationTime|2021-09-12T23:23:12Z +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|creationDate|2020-07-11 +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|comment|"Oumarou Amadou Ide (2009) L’état de l'archéologie au Niger, Azania: Archaeological Research in Africa, 44:1, 121-130, DOI: 10.1080/00671990902795798 + +[semanticscholar](https://www.semanticscholar.org/paper/L’état-de-l%27archéologie-au-Niger-Ide/4bd8e7b4694e6ce15ff81937f0475658e9839cbe)" +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|title|L’état de l'archéologie au Niger: Azania: Archaeological Research in Africa (2009) +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|bookmarkOf|https://www.tandfonline.com/doi/abs/10.1080/00671990902795798 +http://www.semanlink.net/doc/2020/07/l%E2%80%99etat_de_l_archeologie_au_nige|creationTime|2020-07-11T17:22:20Z +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|creationDate|2021-09-30 +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|tag|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|tag|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|tag|http://www.semanlink.net/tag/txtai +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|comment|"> This [article](https://towardsdatascience.com/building-a-sentence-embedding-index-with-fasttext-and-bm25-f07e7148d240) covers sentence embeddings and how codequestion built **a fastText + BM25 embeddings search**. Source code can be found on github. + +Same people as [neuml/txtai: Build AI-powered semantic search applications](doc:2021/09/neuml_txtai_build_ai_powered_s)" +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|relatedDoc|http://www.semanlink.net/doc/2021/09/neuml_txtai_build_ai_powered_s +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|relatedDoc|http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|title|Building a sentence embedding index with fastText and BM25 by David Mezzetti Towards Data Science +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|bookmarkOf|https://github.com/neuml/codequestion +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|bookmarkOf|https://towardsdatascience.com/building-a-sentence-embedding-index-with-fasttext-and-bm25-f07e7148d240 +http://www.semanlink.net/doc/2021/09/building_a_sentence_embedding_i|creationTime|2021-09-30T14:45:22Z +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|creationDate|2021-06-22 +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|tag|http://www.semanlink.net/tag/howto +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|comment|> Adding hard negatives improve performance for search, but not for clustering +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|title|"Nils Reimers sur Twitter : ""How to train state-of-the-art sentence embeddings?""" +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|bookmarkOf|https://twitter.com/Nils_Reimers/status/1407309773238935564?s=20 +http://www.semanlink.net/doc/2021/06/nils_reimers_sur_twitter_how|creationTime|2021-06-22T14:54:27Z +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|creationDate|2020-09-19 +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|tag|http://www.semanlink.net/tag/good_question +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|tag|http://www.semanlink.net/tag/physique +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|comment|> Linear or quadratic laws often come from a Taylor series expansion around an equilibrium point. Usually the first derivative is non-zero, so you get a linear law. If the first derivative vanishes (e.g. due to a symmetry), you get a quadratic law instead. Rare for both to be zero. [src](https://twitter.com/Kurt_M_Barry/status/1307362324978769921?s=20) +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|title|Laura Deming sur Twitter : Is there a good reason why many basic laws of physics are linear or quadratic (for example, F=ma), not much more complex? +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|bookmarkOf|https://twitter.com/LauraDeming/status/1307352888771899392 +http://www.semanlink.net/doc/2020/09/laura_deming_sur_twitter_is_|creationTime|2020-09-19T21:12:27Z +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|creationDate|2021-02-07 +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|tag|http://www.semanlink.net/tag/kirghizistan +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|tag|http://www.semanlink.net/tag/nouvelle_route_de_la_soie +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|title|Au Kirghizistan, l’indésirable présence de la Chine +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|bookmarkOf|https://www.lemonde.fr/international/article/2021/02/05/au-kirghizistan-l-indesirable-presence-chinoise_6068910_3210.html +http://www.semanlink.net/doc/2021/02/au_kirghizistan_l%E2%80%99indesirable_|creationTime|2021-02-07T11:59:58Z +http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov|creationDate|2020-11-19 +http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov|tag|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov|title|jsomers.net I should have loved biology +http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov|bookmarkOf|https://jsomers.net/i-should-have-loved-biology/ +http://www.semanlink.net/doc/2020/11/jsomers_net_%7C_i_should_have_lov|creationTime|2020-11-19T13:41:21Z +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|creationDate|2020-09-04 +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|tag|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|tag|http://www.semanlink.net/tag/epr +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|comment|"> un certain nombre de matériels souffrent d’un défaut imprévu : même +inutilisés, ils ont vieilli et doivent être remplacés" +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|title|Nucléaire : les nouvelles dérives de l’EPR en Finlande pourraient coûter cher à l’Etat français +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|bookmarkOf|https://www.lemonde.fr/economie/article/2020/09/04/nucleaire-les-nouvelles-derives-de-l-epr-en-finlande-pourraient-couter-cher-a-l-etat-francais_6050898_3234.html +http://www.semanlink.net/doc/2020/09/nucleaire_les_nouvelles_deriv|creationTime|2020-09-04T09:41:00Z +http://www.semanlink.net/doc/2021/10/sphinx|creationDate|2021-10-15 +http://www.semanlink.net/doc/2021/10/sphinx|tag|http://www.semanlink.net/tag/documentation_tool +http://www.semanlink.net/doc/2021/10/sphinx|tag|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/doc/2021/10/sphinx|title|Sphinx +http://www.semanlink.net/doc/2021/10/sphinx|bookmarkOf|https://www.sphinx-doc.org/en/master/ +http://www.semanlink.net/doc/2021/10/sphinx|creationTime|2021-10-15T17:18:31Z +http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to|creationDate|2021-08-26 +http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to|title|AI in Africa: Teaching a bot to read my mum's texts - BBC News (2020) +http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to|bookmarkOf|https://www.bbc.co.uk/news/amp/world-africa-52411797 +http://www.semanlink.net/doc/2021/08/ai_in_africa_teaching_a_bot_to|creationTime|2021-08-26T14:59:45Z +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|creationDate|2021-09-06 +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|tag|http://www.semanlink.net/tag/multilingual_language_models +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|tag|http://www.semanlink.net/tag/dictionnaire +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_author|Aditi Chaudhary +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_author|Karthik Raman +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_author|Jiecao Chen +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_author|Krishna Srinivasan +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|comment|"> Despite the strong representation learning capability enabled by MLM, we demonstrate an inherent limitation of MLM for multilingual representation learning. In particular, by requiring the model to predict the language-specific token, the MLM objective disincentivizes learning a language-agnostic representation -- which is a key goal of multilingual pre-training +> +> DICT-MLM works by incentivizing the model +to be able to predict not just the original +masked word, but potentially any of its crosslingual +synonyms as well." +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|title|[2010.12566] DICT-MLM: Improved Multilingual Pre-Training using Bilingual Dictionaries +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|bookmarkOf|https://arxiv.org/abs/2010.12566 +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|creationTime|2021-09-06T18:27:44Z +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_summary|"Pre-trained multilingual language models such as mBERT have shown immense +gains for several natural language processing (NLP) tasks, especially in the +zero-shot cross-lingual setting. Most, if not all, of these pre-trained models +rely on the masked-language modeling (MLM) objective as the key language +learning objective. The principle behind these approaches is that predicting +the masked words with the help of the surrounding text helps learn potent +contextualized representations. Despite the strong representation learning +capability enabled by MLM, we demonstrate an inherent limitation of MLM for +multilingual representation learning. In particular, by requiring the model to +predict the language-specific token, the MLM objective disincentivizes learning +a language-agnostic representation -- which is a key goal of multilingual +pre-training. Therefore to encourage better cross-lingual representation +learning we propose the DICT-MLM method. DICT-MLM works by incentivizing the +model to be able to predict not just the original masked word, but potentially +any of its cross-lingual synonyms as well. Our empirical analysis on multiple +downstream tasks spanning 30+ languages, demonstrates the efficacy of the +proposed approach and its ability to learn better multilingual representations." +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_firstAuthor|Aditi Chaudhary +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_updated|2020-10-23T17:53:11Z +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_title|DICT-MLM: Improved Multilingual Pre-Training using Bilingual Dictionaries +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_published|2020-10-23T17:53:11Z +http://www.semanlink.net/doc/2021/09/2010_12566_dict_mlm_improved|arxiv_num|2010.12566 +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|creationDate|2021-09-03 +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|tag|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|comment|"> Many GNN layers have been able to be applied to the link prediction +task directly. But due to some graph structure and graph neural network limitations, +the performance of the neural style link prediction sometimes will be negatively +influenced. To address these issues, we propose a novel approach to implicitly +guide GNN with extracted knowledge." +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|title|Link Prediction with Graph Neural Networks and Knowledge Extraction +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|bookmarkOf|http://cs230.stanford.edu/projects_spring_2020/reports/38854344.pdf +http://www.semanlink.net/doc/2021/09/link_prediction_with_graph_neur|creationTime|2021-09-03T01:36:16Z +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|creationDate|2021-01-26 +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|tag|http://www.semanlink.net/tag/nlp_task_as_qa_problem +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|tag|http://www.semanlink.net/tag/richard_socher +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|tag|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|comment|[cf.](doc:?uri=https%3A%2F%2Fgithub.com%2Fsalesforce%2FdecaNLP) +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|relatedDoc|https://github.com/salesforce/decaNLP +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|title|The Natural Language Decathlon: Multitask Learning as Question Answering (slides) +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|bookmarkOf|https://web.stanford.edu/class/cs224n/slides/cs224n-2019-lecture17-multitask.pdf +http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_|creationTime|2021-01-26T14:43:51Z +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|creationDate|2020-10-16 +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|tag|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|tag|http://www.semanlink.net/tag/mais_ogm +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|comment|Teosinte, a wild relative of maize originating from Mexico recently emerged as an invasive weed in Europe. +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|title|Evolution of a crop’s wild relative into a weed that includes an herbicide resistance gene +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|bookmarkOf|https://twitter.com/hyperfp/status/1317010367487397888?s=20 +http://www.semanlink.net/doc/2020/10/evolution_of_a_crop%E2%80%99s_wild_rela|creationTime|2020-10-16T10:04:20Z +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|creationDate|2020-11-09 +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|tag|http://www.semanlink.net/tag/ckb +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|tag|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|comment|Une implémentation de [[2010.03496] Inductive Entity Representations from Text via Link Prediction](doc:2020/11/2010_03496_inductive_entity_r) +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|relatedDoc|http://www.semanlink.net/doc/2020/11/2010_03496_inductive_entity_r +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|title|raphaelsty/ckb: Contextual knowledge bases +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|bookmarkOf|https://github.com/raphaelsty/ckb +http://www.semanlink.net/doc/2020/11/raphaelsty_ckb_contextual_know|creationTime|2020-11-09T16:10:42Z +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|creationDate|2021-04-04 +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|tag|http://www.semanlink.net/tag/zombie +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|tag|http://www.semanlink.net/tag/dette +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|tag|http://www.semanlink.net/tag/crise_des_subprimes +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|tag|http://www.semanlink.net/tag/digital_economy +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|title|Cory Doctorow: The zombie economy and digital arm-breakers +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|bookmarkOf|https://pluralistic.net/2021/04/02/innovation-unlocks-markets/#digital-arm-breakers +http://www.semanlink.net/doc/2021/04/cory_doctorow_the_zombie_econo|creationTime|2021-04-04T13:44:14Z +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|creationDate|2021-02-05 +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|tag|http://www.semanlink.net/tag/random_walk +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|tag|http://www.semanlink.net/tag/pagerank +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|title|Linking Entities with Knowledge Graphs by Sigurd Berglann Strise Medium +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|bookmarkOf|https://medium.com/strise/linking-entities-with-knowledge-graphs-b961090622de +http://www.semanlink.net/doc/2021/02/linking_entities_with_knowledge|creationTime|2021-02-05T10:44:11Z +http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de|creationDate|2021-03-31 +http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de|title|BARThez — transformers 4.5.0.dev0 documentation +http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de|bookmarkOf|https://huggingface.co/transformers/master/model_doc/barthez.html +http://www.semanlink.net/doc/2021/03/barthez_transformers_4_5_0_de|creationTime|2021-03-31T23:00:47Z +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|creationDate|2020-06-13 +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|tag|http://www.semanlink.net/tag/bourse +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|title|Nikola ou quand le camion à hydrogène enflamme la Bourse +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|bookmarkOf|https://www.lemonde.fr/economie/article/2020/06/13/nikola-ou-quand-le-camion-a-hydrogene-enflamme-la-bourse_6042740_3234.html +http://www.semanlink.net/doc/2020/06/nikola_ou_quand_le_camion_a_hyd|creationTime|2020-06-13T19:29:59Z +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|creationDate|2020-08-21 +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|tag|http://www.semanlink.net/tag/singular_value_decomposition +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|title|"srdjan ostojic sur Twitter : ""During my physics undergrad, I have never heard of Singular Value Decomposition (SVD). Why?...""" +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|bookmarkOf|https://twitter.com/ostojic_srdjan/status/1296748282765553669 +http://www.semanlink.net/doc/2020/08/srdjan_ostojic_sur_twitter_d|creationTime|2020-08-21T17:55:38Z +http://www.semanlink.net/doc/2021/09/haystack|creationDate|2021-09-20 +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/haystack +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2021/09/haystack|tag|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/doc/2021/09/haystack|comment|"[deepset](doc:2021/09/nlp_solutions_to_streamline_neu) + +> Haystack is an **open-source framework** for building search systems that work intelligently over large document collections. Recent advances in NLP have enabled the application of question answering, retrieval and summarization to real world settings and Haystack is designed to be the bridge between research and industry." +http://www.semanlink.net/doc/2021/09/haystack|relatedDoc|http://www.semanlink.net/doc/2021/09/nlp_solutions_to_streamline_neu +http://www.semanlink.net/doc/2021/09/haystack|title|Haystack (deepset) +http://www.semanlink.net/doc/2021/09/haystack|bookmarkOf|https://haystack.deepset.ai +http://www.semanlink.net/doc/2021/09/haystack|bookmarkOf|https://haystack.deepset.ai/overview/get-started +http://www.semanlink.net/doc/2021/09/haystack|creationTime|2021-09-20T17:03:13Z +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|creationDate|2021-07-26 +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|tag|http://www.semanlink.net/tag/python +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|tag|http://www.semanlink.net/tag/spiking_neural_network +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|comment|a Python package for performing gradient-based learning with spiking neural networks +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|title|jeshraghian/snntorch: Deep learning with spiking neural networks in Python +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|bookmarkOf|https://github.com/jeshraghian/snntorch +http://www.semanlink.net/doc/2021/07/jeshraghian_snntorch_deep_lear|creationTime|2021-07-26T15:46:20Z +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|creationDate|2020-12-17 +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|tag|http://www.semanlink.net/tag/hydrogen +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|title|L’hydrogène tiendra-t-il ses promesses ? CNRS Le journal +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|bookmarkOf|https://lejournal.cnrs.fr/articles/lhydrogene-tiendra-t-il-ses-promesses?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1608207127 +http://www.semanlink.net/doc/2020/12/l%E2%80%99hydrogene_tiendra_t_il_ses_pr|creationTime|2020-12-17T14:10:56Z +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|creationDate|2020-08-06 +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|tag|http://www.semanlink.net/tag/jure_leskovec +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|tag|http://www.semanlink.net/tag/node2vec +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|comment|New in Neo4j Graph Data Science library (v1.3): [Graph Embeddings](tag:graph_embeddings). +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|title|Bringing traditional ML to your Neo4j Graph with node2vec Dave Voutila +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|bookmarkOf|https://www.sisu.io/posts/embeddings/ +http://www.semanlink.net/doc/2020/08/bringing_traditional_ml_to_your|creationTime|2020-08-06T17:45:37Z +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|creationDate|2020-08-28 +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|tag|http://www.semanlink.net/tag/arn +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|tag|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|tag|http://www.semanlink.net/tag/cell +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|tag|http://www.semanlink.net/tag/molecular_biology +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|comment|> To fully understand the processes occurring in present-day living cells, we need to consider how they arose in evolution... +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|title|The RNA World and the Origins of Life - Molecular Biology of the Cell +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|bookmarkOf|https://www.ncbi.nlm.nih.gov/books/NBK26876/ +http://www.semanlink.net/doc/2020/08/the_rna_world_and_the_origins_o|creationTime|2020-08-28T13:33:19Z +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|creationDate|2021-02-23 +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|tag|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|comment|"You can now train an efficient classifier with unlabeled data This new script lets you **distill our @huggingface zero-shot classifier with your specified class names, speeding up inference by 100x or more** + +[Zero-shot classifier distillation at master · huggingface/transformers](doc:2021/02/zero_shot_classifier_distillati)" +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|relatedDoc|http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|title|Distill our @huggingface zero-shot classifier with your specified class names +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|bookmarkOf|https://twitter.com/joeddav/status/1363543296166002688 +http://www.semanlink.net/doc/2021/02/distill_our_huggingface_zero_s|creationTime|2021-02-23T13:57:46Z +http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri|creationDate|2021-05-27 +http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri|tag|http://www.semanlink.net/tag/carrot2 +http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri|title|Carrot2 search results clustering engine (online) +http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri|bookmarkOf|https://search.carrot2.org/#/workbench +http://www.semanlink.net/doc/2021/05/carrot2_search_results_clusteri|creationTime|2021-05-27T15:30:59Z +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|creationDate|2020-07-04 +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/acl_2020 +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/slot_tagging +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/oov +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|tag|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|comment|"Aim to leverage both contextual representation of input text (deep LMs) and knowledge derived +from curated KBs ([Wordnet](tag:wordnet)) to improve [slot tagging](tag:slot_tagging) in the presence of [out-of-vocab](tag:oov) words ([few-shot scenario](tag:few_shot_learning)) + +Method: + +1. retrieve potentially relevant KB entities and +encode them into distributed representations that +describe global graph-structured information +2. BERT encoder +layer to capture context-aware representations of +the sequence and attend to the KB embeddings +using multi-level graph attention +3. integrate +BERT embeddings and the KB embeddings +to predict the slot type + +Contributions: + +1. feasibility of applying lexical ontology +to facilitate recognizing OOV words. First to consider the large-scale background +knowledge for enhancing context-aware +slot tagging models. +2. a knowledge integration mechanism that uses multi-level graph +attention to model explicit lexical relations. +3.experiments on two benchmark datasets + +> our method makes a notable difference in a +scenario where samples are linguistically diverse, +and large vocab exists. + +(Better improvements when using RNN than BERT, because BERT already contains a lot of background knowledge)" +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|title|Learning to Tag OOV Tokens by Integrating Contextual Representation and Background Knowledge (ACL Anthology 2020) +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|bookmarkOf|https://www.aclweb.org/anthology/2020.acl-main.58/ +http://www.semanlink.net/doc/2020/07/learning_to_tag_oov_tokens_by_i|creationTime|2020-07-04T11:34:35Z +http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv|creationDate|2021-10-18 +http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv|tag|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv|title|Les nouvelles frontières du vivant CNRS Le journal +http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv|bookmarkOf|https://lejournal.cnrs.fr/articles/les-nouvelles-frontieres-du-vivant?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1634552638 +http://www.semanlink.net/doc/2021/10/les_nouvelles_frontieres_du_viv|creationTime|2021-10-18T14:31:02Z +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|creationDate|2020-11-24 +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|tag|http://www.semanlink.net/tag/allennlp +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|comment|Semantic Scholar is a free, AI-powered research tool for scientific literature, based at the Allen Institute for AI. +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|title|Semantic Scholar AI-Powered Research Tool +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|bookmarkOf|https://www.semanticscholar.org/ +http://www.semanlink.net/doc/2020/11/semantic_scholar_%7C_ai_powered_r|creationTime|2020-11-24T09:28:22Z +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|creationDate|2021-03-15 +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|comment|Same author: [Zero-shot classifier distillation at master · huggingface/transformers](doc:2021/02/zero_shot_classifier_distillati) +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|relatedDoc|http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|title|New pipeline for zero-shot text classification - 🤗Transformers - Hugging Face Forums +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|bookmarkOf|https://discuss.huggingface.co/t/new-pipeline-for-zero-shot-text-classification/681 +http://www.semanlink.net/doc/2021/03/new_pipeline_for_zero_shot_text|creationTime|2021-03-15T17:57:01Z +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|creationDate|2020-07-07 +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|tag|http://www.semanlink.net/tag/kg_embeddings_library +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|tag|http://www.semanlink.net/tag/ai_amazon +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|title|awslabs/dgl-ke: package for learning large-scale knowledge graph embeddings. +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|bookmarkOf|https://github.com/awslabs/dgl-ke +http://www.semanlink.net/doc/2020/07/awslabs_dgl_ke_package_for_lea|creationTime|2020-07-07T19:15:54Z +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|creationDate|2021-07-01 +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|tag|http://www.semanlink.net/tag/pesticide +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|tag|http://www.semanlink.net/tag/sante +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|tag|http://www.semanlink.net/tag/inserm +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|comment|Cancers, troubles cognitifs, maladies neurodégénératives, endométriose… Les experts mandatés par l’Institut national de la santé et de la recherche médicale ont dressé le tableau le plus exhaustif à ce jour des effets de l’exposition à ces produits. +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|title|Pesticides et santé : les conclusions inquiétantes de l’expertise collective de l’Inserm +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|bookmarkOf|https://www.lemonde.fr/planete/article/2021/07/01/pesticides-et-sante-les-conclusions-inquietantes-de-l-expertise-collective-de-l-inserm_6086451_3244.html +http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl|creationTime|2021-07-01T12:12:34Z +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|creationDate|2021-02-25 +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|tag|http://www.semanlink.net/tag/cnrs +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|tag|http://www.semanlink.net/tag/quantum_computing +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|title|Une collaboration CNRS atteint à son tour l’avantage quantique ! CNRS Le journal +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|bookmarkOf|https://lejournal.cnrs.fr/articles/une-collaboration-cnrs-atteint-a-son-tour-lavantage-quantique?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1614229767 +http://www.semanlink.net/doc/2021/02/une_collaboration_cnrs_atteint_|creationTime|2021-02-25T08:21:57Z +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|creationDate|2020-12-30 +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|tag|http://www.semanlink.net/tag/genetique_et_evolution +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|title|Un même gène a permis « d’inventer » l'hémoglobine plusieurs fois CNRS +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|bookmarkOf|http://www.cnrs.fr/fr/un-meme-gene-permis-dinventer-lhemoglobine-plusieurs-fois +http://www.semanlink.net/doc/2020/12/un_meme_gene_a_permis_%C2%AB_d%E2%80%99inven|creationTime|2020-12-30T13:41:51Z +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|creationDate|2020-10-09 +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|tag|http://www.semanlink.net/tag/paleoclimatologie +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|title|Relation société-milieu en domaine sahélien au Sud-Ouest du Niger au cours des 4 derniers millénaires (Thèse) +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|bookmarkOf|http://books.content.datacup.io/Rodrigue_Guillon-Relation_societe-milieu_en_domaine_sahelien_au_sud-ouest_du_Niger_au_cours_des_quatre_derniers_millenaires-DCFR00200005.pdf +http://www.semanlink.net/doc/2020/10/relation_societe_milieu_en_doma|creationTime|2020-10-09T00:56:40Z +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|creationDate|2021-03-26 +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|tag|http://www.semanlink.net/tag/nlp_papers +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|title|huggingface/awesome-papers: Papers & presentation materials from Hugging Face's internal science day +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|bookmarkOf|https://github.com/huggingface/awesome-papers +http://www.semanlink.net/doc/2021/03/huggingface_awesome_papers_pap|creationTime|2021-03-26T01:58:09Z +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|creationDate|2021-04-20 +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|title|A Survey of Text Clustering Algorithms - C. C. Aggarwal (2012) +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|bookmarkOf|http://www.charuaggarwal.net/text-cluster.pdf +http://www.semanlink.net/doc/2021/04/a_survey_of_text_clustering_alg|creationTime|2021-04-20T01:08:01Z +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|creationDate|2020-08-15 +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|tag|http://www.semanlink.net/tag/hierarchical_multi_label_text_classification +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|comment|They have made an implementaion of a Multi-Label Classification algorithm on Tree- and DAG-Structured Label Hierarchies ([GitHub](https://github.com/sushobhannayak/cssag)) +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|title|A Study of multilabel text classification and the effect of label hierarchy (2015) +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|bookmarkOf|https://nlp.stanford.edu/courses/cs224n/2013/reports/nayak.pdf +http://www.semanlink.net/doc/2020/08/a_study_of_multilabel_text_clas|creationTime|2020-08-15T14:43:20Z +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|creationDate|2021-10-21 +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|tag|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|tag|http://www.semanlink.net/tag/aspect_target_sentiment_classification +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|tag|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_author|Paul Opitz +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_author|Sebastian Stabinger +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_author|Stefan Engl +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_author|Alexander Rietzler +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|title|[1908.11860] Adapt or Get Left Behind: Domain Adaptation through BERT Language Model Finetuning for Aspect-Target Sentiment Classification +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|bookmarkOf|https://arxiv.org/abs/1908.11860 +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|creationTime|2021-10-21T12:56:49Z +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_summary|"Aspect-Target Sentiment Classification (ATSC) is a subtask of Aspect-Based +Sentiment Analysis (ABSA), which has many applications e.g. in e-commerce, +where data and insights from reviews can be leveraged to create value for +businesses and customers. Recently, deep transfer-learning methods have been +applied successfully to a myriad of Natural Language Processing (NLP) tasks, +including ATSC. Building on top of the prominent BERT language model, we +approach ATSC using a two-step procedure: self-supervised domain-specific BERT +language model finetuning, followed by supervised task-specific finetuning. Our +findings on how to best exploit domain-specific language model finetuning +enable us to produce new state-of-the-art performance on the SemEval 2014 Task +4 restaurants dataset. In addition, to explore the real-world robustness of our +models, we perform cross-domain evaluation. We show that a cross-domain adapted +BERT language model performs significantly better than strong baseline models +like vanilla BERT-base and XLNet-base. Finally, we conduct a case study to +interpret model prediction errors." +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_firstAuthor|Alexander Rietzler +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_updated|2019-11-19T10:17:52Z +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_title|Adapt or Get Left Behind: Domain Adaptation through BERT Language Model Finetuning for Aspect-Target Sentiment Classification +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_published|2019-08-30T17:44:30Z +http://www.semanlink.net/doc/2021/10/1908_11860_adapt_or_get_left_|arxiv_num|1908.11860 +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|creationDate|2021-03-19 +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|tag|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|tag|http://www.semanlink.net/tag/spikes +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|comment|cf. [Equilibrium Propagation: Bridging the Gap between Energy-Based Models and Backpropagation](doc:2021/03/equilibrium_propagation_bridgi) +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|relatedDoc|http://www.semanlink.net/doc/2021/03/equilibrium_propagation_bridgi +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|title|"Julie Grollier sur Twitter : ""EqSpike: spike-driven equilibrium propagation for neuromorphic implementations""" +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|bookmarkOf|https://twitter.com/julie_grollier/status/1372883511938539521?s=20 +http://www.semanlink.net/doc/2021/03/julie_grollier_sur_twitter_e|creationTime|2021-03-19T13:36:13Z +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|creationDate|2021-08-05 +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|tag|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|tag|http://www.semanlink.net/tag/g8 +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|tag|http://www.semanlink.net/tag/poutine +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|title|2007 G8 en Grande Bretagne Quand Vladimir Poutine humilia Nicolas Sarkozy - YouTube +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|bookmarkOf|https://www.youtube.com/watch?v=dWt-JU3hVXA +http://www.semanlink.net/doc/2021/08/2007_g8_en_grande_bretagne_quan|creationTime|2021-08-05T14:24:05Z +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|creationDate|2020-07-05 +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|tag|http://www.semanlink.net/tag/comedie +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|comment|avec Charles Laughton +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|title|Ruggles of Red Gap +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|bookmarkOf|https://fr.wikipedia.org/wiki/L%27Extravagant_Mr_Ruggles +http://www.semanlink.net/doc/2020/07/ruggles_of_red_gap|creationTime|2020-07-05T23:45:26Z +http://www.semanlink.net/doc/2020/06/thatmuse|creationDate|2020-06-13 +http://www.semanlink.net/doc/2020/06/thatmuse|tag|http://www.semanlink.net/tag/louvre +http://www.semanlink.net/doc/2020/06/thatmuse|tag|http://www.semanlink.net/tag/musee +http://www.semanlink.net/doc/2020/06/thatmuse|comment|We are THATMuse: The Museum Treasure Hunt Experts +http://www.semanlink.net/doc/2020/06/thatmuse|title|THATMuse +http://www.semanlink.net/doc/2020/06/thatmuse|bookmarkOf|https://thatmuse.com/thatmuse/ +http://www.semanlink.net/doc/2020/06/thatmuse|creationTime|2020-06-13T14:00:38Z +http://www.semanlink.net/doc/2021/07/practical_natural_language_proc|creationDate|2021-07-06 +http://www.semanlink.net/doc/2021/07/practical_natural_language_proc|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/practical_natural_language_proc|title|Practical Natural Language Processing for Low-Resource Languages +http://www.semanlink.net/doc/2021/07/practical_natural_language_proc|bookmarkOf|http://www-personal.umich.edu/~benking/resources/papers/thesis.pdf +http://www.semanlink.net/doc/2021/07/practical_natural_language_proc|creationTime|2021-07-06T12:51:20Z +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|creationDate|2020-12-01 +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|tag|http://www.semanlink.net/tag/domain_specific_bert +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|comment|"Chances are you won’t be able to pre-train BERT on your own dataset, though, for the following reasons. + +1. Pre-training BERT requires a huge corpus +2. Huge Model + Huge Corpus = Lots of GPUs " +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|title|Domain-Specific BERT Models · Chris McCormick +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|bookmarkOf|https://mccormickml.com/2020/06/22/domain-specific-bert-tutorial/ +http://www.semanlink.net/doc/2020/12/domain_specific_bert_models_%C2%B7_c|creationTime|2020-12-01T15:08:22Z +http://www.semanlink.net/doc/2021/04/the_nlp_index|creationDate|2021-04-30 +http://www.semanlink.net/doc/2021/04/the_nlp_index|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2021/04/the_nlp_index|tag|http://www.semanlink.net/tag/nlp_papers +http://www.semanlink.net/doc/2021/04/the_nlp_index|comment|searchable aggregator of NLP repos, including papers and their code +http://www.semanlink.net/doc/2021/04/the_nlp_index|title|The NLP Index +http://www.semanlink.net/doc/2021/04/the_nlp_index|bookmarkOf|https://index.quantumstat.com/ +http://www.semanlink.net/doc/2021/04/the_nlp_index|creationTime|2021-04-30T10:19:03Z +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|creationDate|2020-07-07 +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|tag|http://www.semanlink.net/tag/covid19_impreparation +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|comment|> [M. Hirsch] a simplement confié son regret de ne pas avoir pu donner des masques aux soignants qui se déplaçaient en métro. +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|title|La « positive attitude » des directeurs d’hôpitaux face au coronavirus décontenance la commission d’enquête +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|bookmarkOf|https://www.lemonde.fr/societe/article/2020/07/07/coronavirus-la-positive-attitude-des-directeurs-d-hopitaux-decontenance-la-commission-d-enquete_6045418_3224.html +http://www.semanlink.net/doc/2020/07/la_%C2%AB_positive_attitude_%C2%BB_des_di|creationTime|2020-07-07T09:22:34Z +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|creationDate|2020-08-13 +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|comment|"> a new open-source method for language model pre-training that uses a supplemental knowledge retriever that enables it to perform well on knowledge-intensive tasks without billions of parameters. + +[Paper: REALM: Retrieval-Augmented Language Model Pre-Training](doc:2020/12/2002_08909_realm_retrieval_a)" +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|relatedDoc|http://www.semanlink.net/doc/2020/12/2002_08909_realm_retrieval_a +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|title|Google AI Blog: REALM: Integrating Retrieval into Language Representation Models +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|bookmarkOf|https://ai.googleblog.com/2020/08/realm-integrating-retrieval-into.html +http://www.semanlink.net/doc/2020/08/google_ai_blog_realm_integrat|creationTime|2020-08-13T10:09:38Z +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_author|Jade Z. Abbott +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_author|Laura Martinus +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|title|[1906.05685] A Focus on Neural Machine Translation for African Languages +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|bookmarkOf|https://arxiv.org/abs/1906.05685 +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|creationTime|2021-06-30T01:03:36Z +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_summary|"African languages are numerous, complex and low-resourced. The datasets +required for machine translation are difficult to discover, and existing +research is hard to reproduce. Minimal attention has been given to machine +translation for African languages so there is scant research regarding the +problems that arise when using machine translation techniques. To begin +addressing these problems, we trained models to translate English to five of +the official South African languages (Afrikaans, isiZulu, Northern Sotho, +Setswana, Xitsonga), making use of modern neural machine translation +techniques. The results obtained show the promise of using neural machine +translation techniques for African languages. By providing reproducible +publicly-available data, code and results, this research aims to provide a +starting point for other researchers in African machine translation to compare +to and build upon." +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_firstAuthor|Laura Martinus +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_updated|2019-06-14T12:48:25Z +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_title|A Focus on Neural Machine Translation for African Languages +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_published|2019-06-11T15:38:34Z +http://www.semanlink.net/doc/2021/06/1906_05685_a_focus_on_neural_|arxiv_num|1906.05685 +http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc|creationDate|2021-04-06 +http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc|title|thunlp/OpenMatch: An Open-Source Package for Information Retrieval. +http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc|bookmarkOf|https://github.com/thunlp/OpenMatch +http://www.semanlink.net/doc/2021/04/thunlp_openmatch_an_open_sourc|creationTime|2021-04-06T09:28:03Z +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|creationDate|2021-01-22 +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|tag|http://www.semanlink.net/tag/centrales_nucleaires +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|tag|http://www.semanlink.net/tag/edf +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|comment|> « la rigueur d’exploitation des centrales d’EDF est en recul » et que le nombre d’événements significatifs « augmente régulièrement depuis plusieurs années ». On peut citer le problème de la fragilité des diesels de secours face au risque de séismes : l’ASN parle d’une anomalie générique – c’est-à-dire qui peut concerner tous les réacteurs –, du mauvais état ou de mauvais montage des ancrages de ces systèmes. Or, si le diesel qui assure l’alimentation en électricité ne démarre pas, c’est l’accident grave. +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|title|Nucléaire : « L’état du parc français est préoccupant » +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|bookmarkOf|https://www.lemonde.fr/planete/article/2021/01/22/nucleaire-on-demande-a-une-personne-malade-de-courir-un-marathon_6067145_3244.html +http://www.semanlink.net/doc/2021/01/nucleaire_%C2%AB_l%E2%80%99etat_du_parc_fr|creationTime|2021-01-22T23:35:59Z +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|creationDate|2021-07-14 +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|tag|http://www.semanlink.net/tag/chinua_achebe +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|tag|http://www.semanlink.net/tag/roman +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|comment|"""Things fall apart"" + +> Tant que les lions n’auront pas leurs propres historiens, l’histoire de la chasse glorifiera toujours le chasseur." +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|title|Livre : « Tout s’effondre », un hommage à l’Afrique anté-coloniale à l’heure de sa désagrégation +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|bookmarkOf|https://www.lemonde.fr/afrique/article/2021/07/10/livre-tout-s-effondre-un-hommage-a-l-afrique-ante-coloniale-a-l-heure-de-sa-desagregation_6087806_3212.html +http://www.semanlink.net/doc/2021/07/livre_%C2%AB_tout_s%E2%80%99effondre_%C2%BB_un|creationTime|2021-07-14T22:16:37Z +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|creationDate|2021-03-09 +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|tag|http://www.semanlink.net/tag/entities +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|comment|BabelNet 5 classifies synsets (synonym sets) into concepts (e.g. university) and named entities (e.g. Sapienza University of Rome) with manual check of 100k synsets. +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|title|BabelNet Le plus grand dictionnaire encyclopédique et réseau sémantique +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|bookmarkOf|https://babelnet.org/ +http://www.semanlink.net/doc/2021/03/babelnet_%7C_le_plus_grand_dictio|creationTime|2021-03-09T08:11:17Z +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|creationDate|2021-08-26 +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|tag|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|title|The 4 Biggest Open Problems in NLP (2019) +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|bookmarkOf|https://ruder.io/4-biggest-open-problems-in-nlp/ +http://www.semanlink.net/doc/2021/08/the_4_biggest_open_problems_in_|creationTime|2021-08-26T15:23:03Z +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|creationDate|2020-11-13 +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|tag|http://www.semanlink.net/tag/maria +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|tag|http://www.semanlink.net/tag/exil +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|tag|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|tag|http://www.semanlink.net/tag/tombouctou +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|comment|"> O you who go to Gao, do so by way of Timbuktu and murmur my name to my friends. Give them the fragrant greetings of an exile who sighs after the soil where his friends, his family and his neighbors reside. + +Ahmed Baba" +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|title|"Maria sur Twitter : ""O you who go to Gao, do so by way of Timbuktu and murmur my name to my friends. Give them ...""" +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|bookmarkOf|https://twitter.com/SERVANTMARIA/status/1327275715277049857?s=20 +http://www.semanlink.net/doc/2020/11/maria_sur_twitter_o_you_who_|creationTime|2020-11-13T16:45:32Z +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|creationDate|2020-10-05 +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|tag|http://www.semanlink.net/tag/text_classification_using_label_names_only +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|tag|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|tag|http://www.semanlink.net/tag/human_in_the_loop +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|comment|"Unsupervised approach to classify +documents into categories simply described by +a label + +> The proposed method... draws on textual similarity between the most +relevant words in each document and a dictionary +of keywords for each category reflecting +its semantics and lexical field. The novelty +of our method hinges on the enrichment +of the category labels through a combination +of human expertise and language models, both +generic and domain specific. + +> models the task as a +**text similarity problem between two sets of words: +One containing the most relevant words in the document +and another containing keywords derived +from the label of the target category**. While the +key advantage of this approach is its simplicity, its +success hinges on the good definition of a dictionary +of words for each category." +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|title|Towards Unsupervised Text Classification Leveraging Experts and Word Embeddings - (ACL 2019) +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|bookmarkOf|https://www.aclweb.org/anthology/P19-1036/ +http://www.semanlink.net/doc/2020/10/towards_unsupervised_text_class|creationTime|2020-10-05T00:28:20Z +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|creationDate|2020-07-28 +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|tag|http://www.semanlink.net/tag/regex +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|tag|http://www.semanlink.net/tag/stackoverflow_q +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|comment|"This is a regex feature known as partial matching, it's available in several regex engines such as PCRE, Boost, Java but not in JavaScript. : [Regex - check if input still has chances to become matching](https://stackoverflow.com/questions/22483214/regex-check-if-input-still-has-chances-to-become-matching/22489941#22489941) + +(en java: regex utils hitEnd())" +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|title|javascript - Partial matching a string against a regex - Stack Overflow +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|bookmarkOf|https://stackoverflow.com/questions/42461651/partial-matching-a-string-against-a-regex +http://www.semanlink.net/doc/2020/07/javascript_partial_matching_a|creationTime|2020-07-28T16:12:03Z +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|creationDate|2021-07-04 +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|tag|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|tag|http://www.semanlink.net/tag/lobbies_economiques +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|title|« Au vu des forces économiques en présence, les abeilles et les pollinisateurs apparaissent indéfendables » +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|bookmarkOf|https://www.lemonde.fr/idees/article/2021/07/03/au-vu-des-forces-economiques-en-presence-les-abeilles-et-les-pollinisateurs-apparaissent-indefendables_6086806_3232.html +http://www.semanlink.net/doc/2021/07/%C2%AB_au_vu_des_forces_economiques_|creationTime|2021-07-04T09:50:12Z +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|creationDate|2021-01-26 +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|tag|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|comment|"A Platform to Access RDF Data via Natural Language. Dennis Diefenbach, CEO & CTO + +[GitHub](https://github.com/QAnswer) + +[Discussion on LinkedIn](https://www.linkedin.com/feed/?msgControlName=reply_to_sender&msgConversationId=6759814672632938496&msgOverlay=true&trk=false)" +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|title|QAnswer · Accessing your Knowledge via Natural Language +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|bookmarkOf|https://www.qanswer.eu/ +http://www.semanlink.net/doc/2021/01/qanswer_%C2%B7_accessing_your_knowle|creationTime|2021-01-26T13:46:28Z +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|creationDate|2020-09-01 +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|tag|http://www.semanlink.net/tag/hierarchical_multi_label_text_classification +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|comment|"> In this paper, we investigate **a simple and computationally fast approach** for multi-label classification with a focus on labels that share a structure, such as a hierarchy (taxonomy). This approach can work with established neural network architectures such as a convolutional neural network (CNN) by simply **initializing the final output layer to leverage the co-occurrences between the labels in the training data**. + +Il faut que les données labellisées contiennent la hiérarchie" +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|title|Initializing neural networks for hierarchical multi-label text classification (2017) +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|bookmarkOf|https://www.aclweb.org/anthology/W17-2339.pdf +http://www.semanlink.net/doc/2020/09/initializing_neural_networks_fo|creationTime|2020-09-01T23:49:27Z +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|creationDate|2021-08-26 +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|comment|JoeyNMT is a minimalist neural machine translation toolkit for educational purposes. +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|title|Joey NMT’s documentation! +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|bookmarkOf|https://joeynmt.readthedocs.io +http://www.semanlink.net/doc/2021/08/joey_nmt%E2%80%99s_documentation_|creationTime|2021-08-26T13:46:11Z +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|creationDate|2020-12-16 +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|tag|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|title|Katalin Kariko +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|bookmarkOf|https://en.wikipedia.org/wiki/Katalin_Kariko +http://www.semanlink.net/doc/2020/12/katalin_kariko_wikipedia|creationTime|2020-12-16T01:38:11Z +http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv|creationDate|2021-04-04 +http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv|tag|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv|title|Zinder (Camille Lefebvre Langarchiv) +http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv|bookmarkOf|https://langarchiv.hypotheses.org/author/clefebvre +http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv|creationTime|2021-04-04T19:39:39Z +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|creationDate|2020-10-26 +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|tag|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_author|Chenguang Wang +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_author|Xiao Liu +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_author|Dawn Song +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|title|[2010.11967] Language Models are Open Knowledge Graphs +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|bookmarkOf|https://arxiv.org/abs/2010.11967 +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|creationTime|2020-10-26T17:10:56Z +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_summary|"This paper shows how to construct knowledge graphs (KGs) from pre-trained +language models (e.g., BERT, GPT-2/3), without human supervision. Popular KGs +(e.g, Wikidata, NELL) are built in either a supervised or semi-supervised +manner, requiring humans to create knowledge. Recent deep language models +automatically acquire knowledge from large-scale corpora via pre-training. The +stored knowledge has enabled the language models to improve downstream NLP +tasks, e.g., answering questions, and writing code and articles. In this paper, +we propose an unsupervised method to cast the knowledge contained within +language models into KGs. We show that KGs are constructed with a single +forward pass of the pre-trained language models (without fine-tuning) over the +corpora. We demonstrate the quality of the constructed KGs by comparing to two +KGs (Wikidata, TAC KBP) created by humans. Our KGs also provide open factual +knowledge that is new in the existing KGs. Our code and KGs will be made +publicly available." +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_firstAuthor|Chenguang Wang +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_updated|2020-10-22T18:01:56Z +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_title|Language Models are Open Knowledge Graphs +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_published|2020-10-22T18:01:56Z +http://www.semanlink.net/doc/2020/10/2010_11967_language_models_ar|arxiv_num|2010.11967 +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|creationDate|2020-10-19 +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|tag|http://www.semanlink.net/tag/sylvain_gugger +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|title|"Sylvain Gugger sur Twitter : ""Training a transformer model for text classification...""" +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|bookmarkOf|https://twitter.com/GuggerSylvain/status/1318215507242291202 +http://www.semanlink.net/doc/2020/10/sylvain_gugger_sur_twitter_t|creationTime|2020-10-19T18:44:17Z +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|creationDate|2020-11-22 +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/cookie +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/accelerated_mobile_pages +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/monopolies +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|tag|http://www.semanlink.net/tag/publicite_internet +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|title|"Cory Doctorow: ""Google's moves... really ARE good for users. I'd love to find proposals to fix this stuff WITHOUT creating monopolies"" / Twitter" +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|bookmarkOf|https://twitter.com/doctorow/status/1329891756452634626 +http://www.semanlink.net/doc/2020/11/cory_doctorow_ii_google_s_mo|creationTime|2020-11-22T00:01:54Z +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|creationDate|2020-07-14 +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|tag|http://www.semanlink.net/tag/nso_pegasus +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|tag|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|tag|http://www.semanlink.net/tag/amnesty_international +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|title|Amnesty International dénonce l’espionnage d’un journaliste marocain par une technologie quasi indétectable +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|bookmarkOf|https://www.lemonde.fr/pixels/article/2020/06/22/amnesty-denonce-l-espionnage-d-un-journaliste-marocain-defenseur-des-droits-de-l-homme-par-une-technologie-quasi-indetectable_6043666_4408996.html +http://www.semanlink.net/doc/2020/07/amnesty_international_denonce_l|creationTime|2020-07-14T12:28:59Z +http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra|creationDate|2021-06-21 +http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra|tag|http://www.semanlink.net/tag/boura +http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra|title|Bura Funerary Urns: Niger Terracottas: An Interpretive Limbo? African Arts MIT Press +http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra|bookmarkOf|https://direct.mit.edu/afar/article/53/1/66/55147/Bura-Funerary-Urns-Niger-Terracottas-An +http://www.semanlink.net/doc/2021/06/bura_funerary_urns_niger_terra|creationTime|2021-06-21T12:10:50Z +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|creationDate|2020-06-15 +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|tag|http://www.semanlink.net/tag/templatic_documents +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|tag|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|comment|"[About this paper](doc:2020/06/representation_learning_for_inf) + +Templatic documents (eg. invoices): such documents do not contain “natural +language” but +instead resemble forms, with data often presented in tables + +> an approach that **uses knowledge of target field types to identify +candidate fields**. These are then scored using **a neural network that +learns a dense representation of each candidate using the words in its +neighborhood**. Experiments on two corpora (invoices and receipts) show +that we’re able to generalize well to unseen layouts. +> +> An understanding of the **two-dimensional layout of text** +on the page is key to understanding such documents. On the other hand, +treating this purely as an image segmentation problem makes it difficult +to take advantage of the semantics of the text. +> +> Our approach to this problem allows developers to train and deploy an +extraction system for a given domain (like invoices) using **two inputs — a +target schema (i.e., a list of fields to extract and their corresponding +types) and a small collection of documents labeled with the ground truth +for use as a training set** + +- The input document is first run through an [OCR service](doc:2020/06/detecter_le_texte_dans_les_fich). +- a candidate generator identifies spans of text in the OCR output that might correspond to +an instance of a given field (uses pre-existing +libraries associated with each field type) +- Each candidate is then scored using a neural +network (that is trained as a binary classifier)" +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|relatedDoc|http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|relatedDoc|http://www.semanlink.net/doc/2020/06/representation_learning_for_inf +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|title|Google AI Blog: Extracting Structured Data from Templatic Documents (2020) +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|bookmarkOf|https://ai.googleblog.com/2020/06/extracting-structured-data-from.html +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|creationTime|2020-06-15T22:51:23Z +http://www.semanlink.net/doc/2020/06/google_ai_blog_extracting_stru|mainDoc|http://www.semanlink.net/doc/2020/06/representation_learning_for_inf +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|creationDate|2021-10-20 +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|tag|http://www.semanlink.net/tag/molecular_biology +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|tag|http://www.semanlink.net/tag/alphafold +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|title|L’intelligence artificielle, génie de la biologie moléculaire +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|bookmarkOf|https://www.lemonde.fr/sciences/article/2021/10/18/l-intelligence-artificielle-genie-de-la-biologie-moleculaire_6098876_1650684.html +http://www.semanlink.net/doc/2021/10/l%E2%80%99intelligence_artificielle_ge|creationTime|2021-10-20T00:26:36Z +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|creationDate|2021-01-19 +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|tag|http://www.semanlink.net/tag/2020 +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|title|ML and NLP Research Highlights of 2020 +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|bookmarkOf|https://ruder.io/research-highlights-2020/ +http://www.semanlink.net/doc/2021/01/ml_and_nlp_research_highlights_|creationTime|2021-01-19T13:57:26Z +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|creationDate|2021-06-14 +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|tag|http://www.semanlink.net/tag/entity_type_prediction +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|title|raphaelsty/entitype : Predict entities type in context using transformers. +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|bookmarkOf|https://github.com/raphaelsty/entitype +http://www.semanlink.net/doc/2021/06/raphaelsty_entitype_predict_e|creationTime|2021-06-14T16:23:57Z +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|creationDate|2021-10-10 +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|tag|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|tag|http://www.semanlink.net/tag/senegal +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|tag|http://www.semanlink.net/tag/greve +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|title|Les Bouts de bois de Dieu +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|bookmarkOf|https://fr.wikipedia.org/wiki/Les_Bouts_de_bois_de_Dieu +http://www.semanlink.net/doc/2021/10/les_bouts_de_bois_de_dieu|creationTime|2021-10-10T10:55:57Z +http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso|creationDate|2020-06-08 +http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso|tag|http://www.semanlink.net/tag/greffe_arbre +http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso|title|Le principe de la greffe - Association Vergers Vivants +http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso|bookmarkOf|http://www.vergers-vivants.fr/spip/spip.php?article474 +http://www.semanlink.net/doc/2020/06/le_principe_de_la_greffe_asso|creationTime|2020-06-08T15:49:23Z +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|creationDate|2021-10-27 +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|comment|> the world of sentence embeddings was ignited with the introduction of SBERT in 2019. Since then, many more sentence transformers have been introduced. These models quickly made the original SBERT obsolete. How did these newer sentence transformers manage to outperform SBERT so quickly? The answer is multiple negatives ranking (MNR) loss. +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|title|Next-Gen Sentence Embeddings with Multiple Negatives Ranking Loss Pinecone +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|bookmarkOf|https://www.pinecone.io/learn/fine-tune-sentence-transformers-mnr/ +http://www.semanlink.net/doc/2021/10/next_gen_sentence_embeddings_wi|creationTime|2021-10-27T01:24:49Z +http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove|creationDate|2021-10-17 +http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove|tag|http://www.semanlink.net/tag/incremental_clustering +http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove|title|Incremental Clustering - an overview ScienceDirect Topics +http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove|bookmarkOf|https://www.sciencedirect.com/topics/computer-science/incremental-clustering +http://www.semanlink.net/doc/2021/10/incremental_clustering_an_ove|creationTime|2021-10-17T10:17:19Z +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|creationDate|2021-01-27 +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|tag|http://www.semanlink.net/tag/nlp_based_ir +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|title|NLP Based Information Retrieval System by Nikhil Sharma Towards Data Science +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|bookmarkOf|https://towardsdatascience.com/nlp-based-information-retrieval-system-answer-key-questions-from-the-scientific-literature-b8e5c3aa5a3e +http://www.semanlink.net/doc/2021/01/nlp_based_information_retrieval|creationTime|2021-01-27T13:36:38Z +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|creationDate|2021-10-11 +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|title|"Kelechi sur Twitter : ""Excited to present AfriBERTa, a multilingual LM pretrained from scratch on 11 African languages with a joint corpus of less than 1GB.""" +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|bookmarkOf|https://twitter.com/Kelechukwu_/status/1446500378955161620 +http://www.semanlink.net/doc/2021/10/kelechi_sur_twitter_excited_|creationTime|2021-10-11T22:37:54Z +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|creationDate|2021-10-07 +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|tag|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|tag|http://www.semanlink.net/tag/colbert +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|tag|http://www.semanlink.net/tag/retrieval_based_nlp +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|comment|"> The black-box nature of large language models like T5 and GPT-3 makes them inefficient to train and deploy, opaque in their knowledge representations and in backing their claims with provenance, and static in facing a constantly evolving world and diverse downstream contexts. **This post explores retrieval-based NLP, where models retrieve information pertinent to solving their tasks from a plugged-in text corpus**. +> +> Retrieval-based NLP methods view tasks as “open-book” exams: knowledge +is encoded explicitly in the form of a text corpus like Wikipedia, the medical literature, or a +software’s API documentation. When solving a language task, **the model learns to search for +pertinent passages** and to then use the retrieved information for crafting knowledgeable +responses. In doing so, **retrieval helps decouple the capacity that language models have for +understanding text from how they store knowledge**" +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|title|Building Scalable, Explainable, and Adaptive NLP Models with Retrieval SAIL Blog +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|bookmarkOf|https://ai.stanford.edu/blog/retrieval-based-NLP/ +http://www.semanlink.net/doc/2021/10/building_scalable_explainable_|creationTime|2021-10-07T02:08:49Z +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|creationDate|2021-08-02 +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|tag|http://www.semanlink.net/tag/stack_overflow +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|tag|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|tag|http://www.semanlink.net/tag/windows +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|title|cmd - How do I kill the process currently using a port on localhost in Windows? - Stack Overflow +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|bookmarkOf|https://stackoverflow.com/questions/39632667/how-do-i-kill-the-process-currently-using-a-port-on-localhost-in-windows +http://www.semanlink.net/doc/2021/08/cmd_how_do_i_kill_the_process|creationTime|2021-08-02T23:11:43Z +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|creationDate|2021-09-20 +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|title|"Nils Reimers sur Twitter : ""Introduction - Neural Search""" +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|bookmarkOf|https://twitter.com/Nils_Reimers/status/1439917604404092963?s=20 +http://www.semanlink.net/doc/2021/09/nils_reimers_sur_twitter_int|creationTime|2021-09-20T16:25:18Z +http://www.semanlink.net/doc/2020/10/librairies_independantes|creationDate|2020-10-31 +http://www.semanlink.net/doc/2020/10/librairies_independantes|tag|http://www.semanlink.net/tag/livre +http://www.semanlink.net/doc/2020/10/librairies_independantes|tag|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/doc/2020/10/librairies_independantes|title|librairies indépendantes +http://www.semanlink.net/doc/2020/10/librairies_independantes|bookmarkOf|https://www.librairiesindependantes.com/ +http://www.semanlink.net/doc/2020/10/librairies_independantes|creationTime|2020-10-31T00:54:14Z +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|creationDate|2020-10-21 +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|tag|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|tag|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|tag|http://www.semanlink.net/tag/osiris_rex +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|title|La sonde américaine Osiris-Rex a réussi sa manœuvre sur l’astéroïde Bénou +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/10/21/la-sonde-americaine-osiris-rex-a-reussi-sa-man-uvre-sur-l-asteroide-benou_6056763_1650684.html +http://www.semanlink.net/doc/2020/10/la_sonde_americaine_osiris_rex_|creationTime|2020-10-21T04:32:50Z +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|creationDate|2021-01-30 +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|tag|http://www.semanlink.net/tag/python_library +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|title|DerwenAI/kglab: an abstraction layer in Python for building knowledge graphs +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|bookmarkOf|https://github.com/DerwenAI/kglab +http://www.semanlink.net/doc/2021/01/derwenai_kglab_graph_based_dat|creationTime|2021-01-30T14:30:27Z +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|creationDate|2021-10-13 +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|tag|http://www.semanlink.net/tag/google_ai_blog +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|comment|> With T5, we propose reframing all NLP tasks into a unified text-to-text-format where the input and output are always text strings, in contrast to BERT-style models that can only output either a class label or a span of the input. Our text-to-text framework allows us to use the same model, loss function, and hyperparameters on any NLP task, including machine translation, document summarization, question answering, and classification tasks +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|title|Google AI Blog: Exploring Transfer Learning with T5: the Text-To-Text Transfer Transformer (2020) +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|bookmarkOf|https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html +http://www.semanlink.net/doc/2021/10/google_ai_blog_exploring_trans|creationTime|2021-10-13T12:49:44Z +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|creationDate|2021-10-25 +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/google_advertising +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/jedi_blue +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|title|"fasterthanlime 🌌 sur Twitter : google has a secret deal with facebook called ""Jedi Blue""" +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|bookmarkOf|https://twitter.com/fasterthanlime/status/1452053938195341314 +http://www.semanlink.net/doc/2021/10/fasterthanlime_%F0%9F%8C%8C_sur_twitter_|creationTime|2021-10-25T11:56:25Z +http://www.semanlink.net/doc/2021/06/streamlit|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/streamlit|tag|http://www.semanlink.net/tag/web_app +http://www.semanlink.net/doc/2021/06/streamlit|tag|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/doc/2021/06/streamlit|comment|"> Streamlit turns data scripts into shareable web apps in minutes. All in Python + +> Our entire architecture is such that each connected user has her own session object in the server, and her own separate thread where the app’s source file is executed." +http://www.semanlink.net/doc/2021/06/streamlit|title|Streamlit +http://www.semanlink.net/doc/2021/06/streamlit|bookmarkOf|https://streamlit.io/ +http://www.semanlink.net/doc/2021/06/streamlit|creationTime|2021-06-30T16:39:57Z +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|creationDate|2020-07-12 +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|tag|http://www.semanlink.net/tag/bert_and_sentence_embeddings +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|comment|"links to [UKPLab/sentence-transformers](doc:2020/07/ukplab_sentence_transformers_s) + +[Another answer](https://github.com/huggingface/transformers/issues/2986) + +" +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|relatedDoc|http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|title|How to use BERT for finding similar sentences or similar news? · Issue #876 · huggingface/transformers +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|bookmarkOf|https://github.com/huggingface/transformers/issues/876 +http://www.semanlink.net/doc/2020/07/how_to_use_bert_for_finding_sim|creationTime|2020-07-12T15:26:41Z +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|creationDate|2020-11-14 +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|tag|http://www.semanlink.net/tag/arbres_remarquables +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|tag|http://www.semanlink.net/tag/crete +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|tag|http://www.semanlink.net/tag/oliviers +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|title|L'olivier millénaire de Kavousi en Crète +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|bookmarkOf|https://blog.tramier.fr/olivier-millenaire-kavousi-crete-3500-ans-plus-vieil-olivier-monde/ +http://www.semanlink.net/doc/2020/11/l_olivier_millenaire_de_kavousi|creationTime|2020-11-14T15:20:54Z +http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_|creationDate|2021-08-02 +http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_|tag|http://www.semanlink.net/tag/neuroscience_of_consciousness +http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_|title|Minimal physicalism as a scale-free substrate for cognition and consciousness Neuroscience of Consciousness Oxford Academic +http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_|bookmarkOf|https://academic.oup.com/nc/article/2021/2/niab013/6334115 +http://www.semanlink.net/doc/2021/08/minimal_physicalism_as_a_scale_|creationTime|2021-08-02T18:24:48Z +http://www.semanlink.net/doc/2021/06/gobero|creationDate|2021-06-19 +http://www.semanlink.net/doc/2021/06/gobero|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/doc/2021/06/gobero|tag|http://www.semanlink.net/tag/tenere +http://www.semanlink.net/doc/2021/06/gobero|comment|"> plus ancien site funéraire connu au Sahara. 8500 av. J.-C. + +> Les premiers habitants à Gobero... étaient des chasseurs-pêcheurs, grands et forts. Ils ont laissé des poteries. Ils sont restés là jusque vers 6000 av. J.-C. Les Ténéréens qui se sont installés ensuite dans la région environ 1 000 ans après, sont des pasteurs nomades... Ils étaient d'une plus petite corpulence, étaient plus graciles et leur style de poterie est distinct." +http://www.semanlink.net/doc/2021/06/gobero|title|Gobero +http://www.semanlink.net/doc/2021/06/gobero|bookmarkOf|https://fr.wikipedia.org/wiki/Gobero +http://www.semanlink.net/doc/2021/06/gobero|creationTime|2021-06-19T18:55:48Z +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|creationDate|2020-09-11 +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|tag|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|title|raphaelsty/abayes: Autoregressive Bayesian linear model +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|bookmarkOf|https://github.com/raphaelsty/abayes +http://www.semanlink.net/doc/2020/09/raphaelsty_abayes_autoregressi|creationTime|2020-09-11T11:40:34Z +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|creationDate|2021-03-02 +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|tag|http://www.semanlink.net/tag/covid19_vaccin +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|tag|http://www.semanlink.net/tag/retard_technologique_francais +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|comment|Crédits publics en R&D pour la santé, de 2011 à 2018 : USA +8%, Allemagne +11% (->6 milliards), France -28% (-> un peu plus de 2milliards) (source OCDE) +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|title|Vaccins contre le Covid-19 : pourquoi la France accuse-t-elle un tel retard ? +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|bookmarkOf|https://www.lemonde.fr/planete/video/2021/02/21/vaccins-contre-le-covid-19-pourquoi-la-france-accuse-t-elle-un-tel-retard_6070716_3244.html +http://www.semanlink.net/doc/2021/03/vaccins_contre_le_covid_19_po|creationTime|2021-03-02T12:16:38Z +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|creationDate|2020-10-22 +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|title|Building a Faster and Accurate Search Engine on Custom Dataset with Transformers 🤗 by Shivanand Roy Analytics Vidhya Sep, 2020 Medium +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|bookmarkOf|https://medium.com/analytics-vidhya/building-a-faster-and-accurate-search-engine-on-custom-dataset-with-transformers-d1277bedff3d +http://www.semanlink.net/doc/2020/10/building_a_faster_and_accurate_|creationTime|2020-10-22T11:17:43Z +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|creationDate|2021-05-09 +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|tag|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|tag|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|title|"Yann LeCun sur Twitter : ""Barlow Twins: a new super-simple self-supervised method to train joint-embedding architectures (aka Siamese nets) non contrastively. """ +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|bookmarkOf|https://twitter.com/ylecun/status/1391162771987996674 +http://www.semanlink.net/doc/2021/05/yann_lecun_sur_twitter_barlo|creationTime|2021-05-09T23:49:08Z +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|creationDate|2021-06-07 +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|tag|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|tag|http://www.semanlink.net/tag/jena +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|title|Calling your own JavaScript functions from SPARQL queries +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|bookmarkOf|http://www.bobdc.com/blog/arqjavascript/ +http://www.semanlink.net/doc/2021/06/calling_your_own_javascript_fun|creationTime|2021-06-07T23:57:31Z +http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo|creationDate|2020-10-11 +http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo|title|Top 6 Open Source Pretrained Models for Text Classification you should use +http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo|bookmarkOf|https://www.sunjackson.com/post/7767/ +http://www.semanlink.net/doc/2020/10/top_6_open_source_pretrained_mo|creationTime|2020-10-11T01:12:13Z +http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng|creationDate|2021-06-30 +http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng|title|Calenda - Advances and challenges of NLP (Natural Language Processing) for african languages +http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng|bookmarkOf|https://calenda.org/837411?lang=en +http://www.semanlink.net/doc/2021/06/calenda_advances_and_challeng|creationTime|2021-06-30T00:42:45Z +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|creationDate|2021-02-08 +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|tag|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|tag|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|tag|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|title|OpenLux : Luxembourg, radiographie d’un paradis fiscal +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|bookmarkOf|https://www.lemonde.fr/les-decodeurs/article/2021/02/08/openlux-luxembourg-radiographie-d-un-paradis-fiscal_6069143_4355770.html +http://www.semanlink.net/doc/2021/02/openlux_luxembourg_radiograp|creationTime|2021-02-08T21:05:46Z +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|creationDate|2021-07-10 +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|tag|http://www.semanlink.net/tag/chine +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|tag|http://www.semanlink.net/tag/chine_leadership +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|tag|http://www.semanlink.net/tag/chine_vs_occident +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|title|Thomas Piketty : « Face au régime chinois, la bonne réponse passe par une nouvelle forme de socialisme démocratique et participatif » +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|bookmarkOf|https://www.lemonde.fr/idees/article/2021/07/10/thomas-piketty-face-au-regime-chinois-la-bonne-reponse-passe-par-une-nouvelle-forme-de-socialisme-democratique-et-participatif_6087784_3232.html +http://www.semanlink.net/doc/2021/07/thomas_piketty_%C2%AB_face_au_regi|creationTime|2021-07-10T13:38:28Z +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|creationDate|2021-05-03 +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|tag|http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|tag|http://www.semanlink.net/tag/timeline +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|comment|maps, timeline, bibliographies thématiques +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|title|The History of West Africa at a Glance +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|bookmarkOf|https://www.culturesofwestafrica.com/history-of-west-africa/ +http://www.semanlink.net/doc/2021/05/the_history_of_west_africa_at_a|creationTime|2021-05-03T00:42:29Z +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|creationDate|2020-12-26 +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|tag|http://www.semanlink.net/tag/evangelistes +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|tag|http://www.semanlink.net/tag/censorship +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|tag|http://www.semanlink.net/tag/humour +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|tag|http://www.semanlink.net/tag/bolsonaro +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|tag|http://www.semanlink.net/tag/cancel_culture +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|comment|> o “brasileiro só será livre quando o último Bolsonaro for enforcado nas tripas do último pastor da Igreja Universal”. +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|title|Douglas Kennedy : « A l’ère de la “cancel culture” – où un simple bon mot peut chambouler votre carrière –, surveiller ce qu’on dit en public est devenu crucial » +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|bookmarkOf|https://www.lemonde.fr/idees/article/2020/12/26/douglas-kennedy-en-ces-temps-deconcertants-l-ironie-est-un-concept-mouvant_6064528_3232.html +http://www.semanlink.net/doc/2020/12/douglas_kennedy_%C2%AB_a_l%E2%80%99ere_de_|creationTime|2020-12-26T13:01:23Z +http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un|creationDate|2021-08-31 +http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un|title|Jean Latreille : « Le revenu universel ne nous fera pas moins travailler, au contraire » +http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un|bookmarkOf|https://www.lemonde.fr/idees/article/2021/04/29/jean-latreille-le-revenu-universel-ne-nous-fera-pas-moins-travailler-au-contraire_6078544_3232.html +http://www.semanlink.net/doc/2021/08/jean_latreille_%C2%AB_le_revenu_un|creationTime|2021-08-31T09:07:33Z +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|creationDate|2020-06-15 +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|tag|http://www.semanlink.net/tag/tools +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|tag|http://www.semanlink.net/tag/twitter +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|title|Search your favorited tweets and articles with Twitter Discover — Daniel Nouri's Blog +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|bookmarkOf|http://danielnouri.org/notes/2020/06/14/search-your-favorited-tweets-and-articles-with-twitter-discover/ +http://www.semanlink.net/doc/2020/06/search_your_favorited_tweets_an|creationTime|2020-06-15T14:36:22Z +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|creationDate|2021-01-19 +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|tag|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|tag|http://www.semanlink.net/tag/metropolitan_museum_of_art +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|comment|"similarity recommendations. +Based on [GraphAware](doc:2021/01/graphaware_neo4j_consultancy_)" +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|relatedDoc|http://www.semanlink.net/doc/2021/01/graphaware_neo4j_consultancy_ +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|title|Exploring The MET Art Collections with Hume #2 GraphAware +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|bookmarkOf|https://graphaware.com/hume/2021/01/06/exploring-the-met-art-collections-2.html +http://www.semanlink.net/doc/2021/01/exploring_the_met_art_collectio|creationTime|2021-01-19T09:24:50Z +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|creationDate|2020-06-29 +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|tag|http://www.semanlink.net/tag/reformer +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_author|Nikita Kitaev +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_author|Łukasz Kaiser +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_author|Anselm Levskaya +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|title|[2001.04451] Reformer: The Efficient Transformer +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|bookmarkOf|https://arxiv.org/abs/2001.04451 +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|creationTime|2020-06-29T19:04:03Z +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_summary|"Large Transformer models routinely achieve state-of-the-art results on a +number of tasks but training these models can be prohibitively costly, +especially on long sequences. We introduce two techniques to improve the +efficiency of Transformers. For one, we replace dot-product attention by one +that uses locality-sensitive hashing, changing its complexity from O($L^2$) to +O($L\log L$), where $L$ is the length of the sequence. Furthermore, we use +reversible residual layers instead of the standard residuals, which allows +storing activations only once in the training process instead of $N$ times, +where $N$ is the number of layers. The resulting model, the Reformer, performs +on par with Transformer models while being much more memory-efficient and much +faster on long sequences." +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_firstAuthor|Nikita Kitaev +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_updated|2020-02-18T16:01:18Z +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_title|Reformer: The Efficient Transformer +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_published|2020-01-13T18:38:28Z +http://www.semanlink.net/doc/2020/06/2001_04451_reformer_the_effi|arxiv_num|2001.04451 +http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na|creationDate|2021-09-26 +http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na|tag|http://www.semanlink.net/tag/paleoclimatologie +http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na|title|Paleo Data Search Search National Centers for Environmental Information (NCEI) +http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na|bookmarkOf|https://www.ncdc.noaa.gov/paleo-search/ +http://www.semanlink.net/doc/2021/09/paleo_data_search_%7C_search_%7C_na|creationTime|2021-09-26T13:00:21Z +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|creationDate|2021-05-26 +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|tag|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|comment|"> Our aim is to classify short invoice descriptions, +in such a way that each class reflects a different +group of products or services + + +> The inherent advantage of embeddings +in dealing with out-of-vocabulary words +presents, at the same time, the disadvantage of +providing a text representation that does not focus +on the importance of individual terms for the +classification. +> +> a method that combines the advantages of +word embeddings with conventional term extraction +techniques + +> employs terms to create distinctive semantic concept clusters. These clusters are ranked using a semantic similarity function which in turn defines a semantic feature space that can be used for text classification" +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|title|Term Based Semantic Clusters for Very Short Text Classification (2019) +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|bookmarkOf|https://www.aclweb.org/anthology/R19-1102.pdf +http://www.semanlink.net/doc/2021/05/term_based_semantic_clusters_fo|creationTime|2021-05-26T14:20:11Z +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|creationDate|2021-10-17 +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|tag|http://www.semanlink.net/tag/self_organizing_maps +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|title|Application of Self-Organizing Maps in Text Clustering: A Review IntechOpen (2012) +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|bookmarkOf|https://www.intechopen.com/chapters/37680 +http://www.semanlink.net/doc/2021/10/application_of_self_organizing_|creationTime|2021-10-17T10:49:59Z +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|creationDate|2020-10-15 +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|tag|http://www.semanlink.net/tag/samare_de_l_erable +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|tag|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|tag|http://www.semanlink.net/tag/dispersion_des_graines +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|title|« La dispersion des graines a permis à Dame Nature de parfaire ses qualités d’ingénieur aéronautique » +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/10/14/la-dispersion-des-graines-a-permis-a-dame-nature-de-parfaire-ses-qualites-d-ingenieur-aeronautique_6056025_1650684.html +http://www.semanlink.net/doc/2020/10/%C2%AB_la_dispersion_des_graines_a_p|creationTime|2020-10-15T01:59:30Z +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|creationDate|2021-07-01 +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|tag|http://www.semanlink.net/tag/lobbies_economiques +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|tag|http://www.semanlink.net/tag/choose_science +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|comment|[Pesticides et santé : les conclusions inquiétantes de l’expertise collective de l’Inserm](doc:2021/07/pesticides_et_sante_les_concl) +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|relatedDoc|http://www.semanlink.net/doc/2021/07/pesticides_et_sante_les_concl +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|title|Du glyphosate aux SDHI, les ressorts de la controverse +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|bookmarkOf|https://www.lemonde.fr/planete/article/2021/07/01/du-glyphosate-aux-sdhi-les-ressorts-de-la-controverse_6086481_3244.html +http://www.semanlink.net/doc/2021/07/du_glyphosate_aux_sdhi_les_res|creationTime|2021-07-01T13:46:19Z +http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f|creationDate|2020-06-16 +http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f|tag|http://www.semanlink.net/tag/booking_com +http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f|title|Face à Booking, des hôteliers français tentent le boycott +http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f|bookmarkOf|https://www.lemonde.fr/economie/article/2020/06/16/face-a-booking-des-hoteliers-francais-tentent-le-boycott_6042968_3234.html +http://www.semanlink.net/doc/2020/06/face_a_booking_des_hoteliers_f|creationTime|2020-06-16T08:21:54Z +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|creationDate|2020-08-06 +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|tag|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|tag|http://www.semanlink.net/tag/agriculture_biologique +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|tag|http://www.semanlink.net/tag/neonicotinoides +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|tag|http://www.semanlink.net/tag/macron_et_l_ecologie +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|tag|http://www.semanlink.net/tag/insecticide +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|comment|> la betterave cultivée selon le cahier des charges de l’agriculture biologique semble, elle, peu ou pas touchée par la jaunisse +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|title|Le gouvernement va réintroduire les insecticides « tueurs d’abeilles » +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|bookmarkOf|https://www.lemonde.fr/planete/article/2020/08/06/face-a-la-jaunisse-de-la-betterave-sucriere-l-executif-veut-permettre-un-recours-aux-neonicotinoides_6048305_3244.html +http://www.semanlink.net/doc/2020/08/le_gouvernement_va_reintroduire|creationTime|2020-08-06T22:31:58Z +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|creationDate|2020-08-31 +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|tag|http://www.semanlink.net/tag/hierarchical_multi_label_classification +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|comment|> architecture for HMC called HMCN, capable of simultaneously optimizing local and global loss functions for **discovering local hierarchical class-relationships and global information from the entire class hierarchy** while penalizing hierarchical violation +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|title|Hierarchical Multi-Label Classification Networks (2018) +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|bookmarkOf|http://proceedings.mlr.press/v80/wehrmann18a/wehrmann18a.pdf +http://www.semanlink.net/doc/2020/08/hierarchical_multi_label_classi_1|creationTime|2020-08-31T09:52:39Z +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|creationDate|2021-10-26 +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|tag|http://www.semanlink.net/tag/anisotropy_in_lm_space +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|tag|http://www.semanlink.net/tag/nlp_baidu +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|title|Isotropy in the Contextual Embedding Space: Clusters and Manifolds OpenReview +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|bookmarkOf|https://openreview.net/forum?id=xYGNO86OWDH +http://www.semanlink.net/doc/2021/10/isotropy_in_the_contextual_embe|creationTime|2021-10-26T16:02:15Z +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|creationDate|2020-11-07 +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|tag|http://www.semanlink.net/tag/hash_uris +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|tag|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|tag|http://www.semanlink.net/tag/hash_bang_uris +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|title|Hash URIs W3C Blog +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|bookmarkOf|https://www.w3.org/blog/2011/05/hash-uris/ +http://www.semanlink.net/doc/2020/11/hash_uris_%7C_w3c_blog|creationTime|2020-11-07T11:15:13Z +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|creationDate|2021-04-12 +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|tag|http://www.semanlink.net/tag/wiktionnaire +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|tag|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|comment|"- +- [AFU](https://fr.wiktionary.org/wiki/assistance_au_freinage_d’urgence)" +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|title|Projet:Requêter le Wiktionnaire — Wiktionnaire +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|bookmarkOf|https://fr.wiktionary.org/wiki/Projet:Requ%C3%AAter_le_Wiktionnaire#SPARQL_avec_le_Dictionnaire_des_francophones +http://www.semanlink.net/doc/2021/04/projet_requeter_le_wiktionnaire|creationTime|2021-04-12T17:16:12Z +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|creationDate|2020-07-15 +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|tag|http://www.semanlink.net/tag/vietnam +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|title|Covid19 : pourquoi zéro mort au Vietnam ? +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|bookmarkOf|https://www.lemonde.fr/blog/huet/2020/07/15/covid19-pourquoi-zero-mort-au-vietnam/ +http://www.semanlink.net/doc/2020/07/covid19_pourquoi_zero_mort_au|creationTime|2020-07-15T23:32:29Z +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|creationDate|2021-01-13 +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|tag|http://www.semanlink.net/tag/elevage_industriel +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|tag|http://www.semanlink.net/tag/yuval_noah_harari +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|comment|> Tens of billions of sentient beings, each with complex sensations and emotions, live and die on a production line +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|title|Industrial farming is one of the worst crimes in history The Guardian (2015) +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|bookmarkOf|https://www.theguardian.com/books/2015/sep/25/industrial-farming-one-worst-crimes-history-ethical-question +http://www.semanlink.net/doc/2021/01/industrial_farming_is_one_of_th|creationTime|2021-01-13T00:24:01Z +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|creationDate|2020-12-18 +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|tag|http://www.semanlink.net/tag/glove +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|title|"Pablo Castro sur Twitter : ""Random finding of the day for word embeddings: vec(""apple"")-vec(""apples"") yields a vector close to ipad, ipod, etc. (apples removes the ""fruitness"" from apple)" +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|bookmarkOf|https://twitter.com/pmc/status/1339649235847823361 +http://www.semanlink.net/doc/2020/12/pablo_castro_sur_twitter_ran|creationTime|2020-12-18T05:37:54Z +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|creationDate|2020-12-13 +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|tag|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|comment|"> Facebook must be razed to the ground leaving no stone on top of another. Spread salt on the ruins so nothing shall grow where it once was. Erase the very memory of it. + +Carthago delenda est" +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|title|"Yrjänä Rankka @ghard@mastodon.social sur Twitter : ""Facebook must be razed to the ground...""" +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|bookmarkOf|https://twitter.com/ghard/status/1063112819292454912 +http://www.semanlink.net/doc/2020/12/yrjana_rankka_ghard_mastodon_s|creationTime|2020-12-13T11:14:22Z +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|tag|http://www.semanlink.net/tag/rdf2vec +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_author|Heiko Paulheim +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_author|Andreea Iana +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|comment|> we argue that despite the huge body of work devoted on completing missing information in knowledge graphs, such missing implicit information is actually a signal, not a defect +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|title|[2009.00318] More is not Always Better: The Negative Impact of A-box Materialization on RDF2vec Knowledge Graph Embeddings +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|bookmarkOf|https://arxiv.org/abs/2009.00318 +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|creationTime|2020-09-02T16:52:32Z +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_summary|"RDF2vec is an embedding technique for representing knowledge graph entities +in a continuous vector space. In this paper, we investigate the effect of +materializing implicit A-box axioms induced by subproperties, as well as +symmetric and transitive properties. While it might be a reasonable assumption +that such a materialization before computing embeddings might lead to better +embeddings, we conduct a set of experiments on DBpedia which demonstrate that +the materialization actually has a negative effect on the performance of +RDF2vec. In our analysis, we argue that despite the huge body of work devoted +on completing missing information in knowledge graphs, such missing implicit +information is actually a signal, not a defect, and we show examples +illustrating that assumption." +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_firstAuthor|Andreea Iana +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_updated|2020-09-01T09:52:33Z +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_title|More is not Always Better: The Negative Impact of A-box Materialization on RDF2vec Knowledge Graph Embeddings +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_published|2020-09-01T09:52:33Z +http://www.semanlink.net/doc/2020/09/2009_00318_more_is_not_always|arxiv_num|2009.00318 +http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_|creationDate|2020-12-19 +http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_|tag|http://www.semanlink.net/tag/ornithorynque +http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_|title|'The platypuses were glowing': the secret light of Australia's marsupials Science The Guardian +http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_|bookmarkOf|https://www.theguardian.com/science/2020/dec/19/tasmanian-devils-glow-in-the-dark-australian-animals-glowing-platypus-wombat-echidna-bandicoot-scientists-investigate-australia-marsupials-light +http://www.semanlink.net/doc/2020/12/the_platypuses_were_glowing_|creationTime|2020-12-19T15:13:47Z +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|creationDate|2021-07-26 +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|title|raphaelsty/rebert: Renault Bert +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|bookmarkOf|https://github.com/raphaelsty/rebert +http://www.semanlink.net/doc/2021/07/raphaelsty_rebert_renault_bert|creationTime|2021-07-26T16:44:11Z +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|creationDate|2021-02-10 +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|tag|http://www.semanlink.net/tag/nlp_juridique +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|tag|http://www.semanlink.net/tag/flair +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|tag|http://www.semanlink.net/tag/nlp_data_anonymization +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|comment|[Part 2: NER algo benchmark: spaCy, Flair, m-BERT and camemBERT on anonymizing French commercial legal cases](doc:2019/12/ner_algo_benchmark_spacy_flai) +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|relatedDoc|http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|title|Why we switched from Spacy to Flair to anonymize French case law by Michaël Benesty Towards Data Science +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|bookmarkOf|https://towardsdatascience.com/why-we-switched-from-spacy-to-flair-to-anonymize-french-legal-cases-e7588566825f +http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f|creationTime|2021-02-10T14:17:23Z +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|creationDate|2020-07-24 +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|tag|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|title|raphaelsty/mkb: Knowledge Base Embedding By Cooperative Knowledge Distillation +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|bookmarkOf|https://github.com/raphaelsty/mkb +http://www.semanlink.net/doc/2020/07/raphaelsty_kdmkb|creationTime|2020-07-24T23:43:00Z +http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de|creationDate|2020-06-04 +http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de|tag|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de|title|The Softmax function and its derivative - Eli Bendersky's website +http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de|bookmarkOf|https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/ +http://www.semanlink.net/doc/2020/06/the_softmax_function_and_its_de|creationTime|2020-06-04T02:37:00Z +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|creationDate|2021-02-20 +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|tag|http://www.semanlink.net/tag/covid19_vaccin +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|tag|http://www.semanlink.net/tag/the_lancet +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|title|What does 95% COVID-19 vaccine efficacy really mean? - The Lancet Infectious Diseases +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|bookmarkOf|https://www.thelancet.com/journals/laninf/article/PIIS1473-3099(21)00075-X/fulltext +http://www.semanlink.net/doc/2021/02/what_does_95_covid_19_vaccine_|creationTime|2021-02-20T01:15:05Z +http://www.semanlink.net/doc/2020/06/on_word_embeddings|creationDate|2020-06-05 +http://www.semanlink.net/doc/2020/06/on_word_embeddings|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/06/on_word_embeddings|tag|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/doc/2020/06/on_word_embeddings|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/06/on_word_embeddings|comment|"History of word embeddings in the context of language +modelling. [Next post in serie](doc:2020/06/approximating_the_softmax_for_l)" +http://www.semanlink.net/doc/2020/06/on_word_embeddings|relatedDoc|http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l +http://www.semanlink.net/doc/2020/06/on_word_embeddings|title|On word embeddings +http://www.semanlink.net/doc/2020/06/on_word_embeddings|bookmarkOf|https://ruder.io/word-embeddings-1/index.html +http://www.semanlink.net/doc/2020/06/on_word_embeddings|creationTime|2020-06-05T01:31:14Z +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|creationDate|2020-08-27 +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|tag|http://www.semanlink.net/tag/targeted_ads +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|tag|http://www.semanlink.net/tag/apple +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|comment|> Facebook has apologized to its users and advertisers for being forced to respect people’s privacy in an upcoming update to Apple’s mobile operating system – and promised it will do its best to invade their privacy on other platforms. +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|title|Facebook apologizes to users, businesses for Apple’s monstrous efforts to protect its customers' privacy • The Register +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|bookmarkOf|https://www.theregister.com/2020/08/27/facebook_ios_ads/ +http://www.semanlink.net/doc/2020/08/facebook_apologizes_to_users_b|creationTime|2020-08-27T13:44:56Z +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|creationDate|2020-12-03 +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|tag|http://www.semanlink.net/tag/fps_tweet +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|tag|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|tag|http://www.semanlink.net/tag/personal_knowledge_graph +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|comment|"> constructing a personal knowledge graph as a support for learning (and a metaphor of the learning experience). From googling, browsing wikipedia/KBs, discovering new words and concepts to organizing all of this into your own concept graph = acquiring knowledge. + +Semanlink: my digital twin?" +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|title|"FP Servant sur Twitter : ""constructing a personal knowledge graph as a support for learning (and a metaphor of the learning experience)...""" +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|bookmarkOf|https://twitter.com/hyperfp/status/1317460511991209987 +http://www.semanlink.net/doc/2020/12/fp_servant_sur_twitter_const|creationTime|2020-12-03T01:29:13Z +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|creationDate|2020-06-21 +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|comment|> La question est : pourquoi le président [français Emmanuel] Macron, le président [du Conseil italien Giuseppe] Conte, pourquoi le premier ministre [britannique Boris] Johnson, pourquoi le président [américain Donald] Trump n’ont-ils rien fait ? Ne comprenaient-ils pas ce qui se passait en Chine ? Ne croyaient-ils pas les Chinois ? N’ont-ils pas demandé à leurs représentations diplomatiques à Pékin d’enquêter ? Je ne comprends pas. Les preuves étaient très claires, dès fin janvier. Donc je pense que les politiciens vont devoir s’expliquer. +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|title|Richard Horton, patron du « Lancet » : « Le Covid-19 montre une faillite catastrophique des gouvernements occidentaux » +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/06/20/richard-horton-le-covid-19-montre-une-faillite-catastrophique-des-gouvernements-occidentaux_6043590_1650684.html +http://www.semanlink.net/doc/2020/06/richard_horton_patron_du_%C2%AB_lan|creationTime|2020-06-21T00:17:37Z +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|creationDate|2020-06-04 +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|tag|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|comment|"- Softmax-based Approaches + - Hierarchical Softmax + - Differentiated Softmax + - CNN-Softmax +- Sampling-based Approaches + - Importance Sampling + - ... + - [Noise contrastive estimation](tag:noise_contrastive_estimation) + - [Negative Sampling](tag:negative_sampling) + - ... + +[prev post in serie](doc:2020/06/on_word_embeddings)" +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|relatedDoc|http://www.semanlink.net/doc/2020/06/on_word_embeddings +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|title|Approximating the Softmax for Learning Word Embeddings +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|bookmarkOf|https://ruder.io/word-embeddings-softmax/index.html#samplingbasedapproaches +http://www.semanlink.net/doc/2020/06/approximating_the_softmax_for_l|creationTime|2020-06-04T02:44:09Z +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|creationDate|2020-07-02 +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|tag|http://www.semanlink.net/tag/ranked_entities_in_search_results +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|tag|http://www.semanlink.net/tag/patent +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|tag|http://www.semanlink.net/tag/google_ranking +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|comment|A recently approved Google patent sheds light on the search engine's process behind showing ranked lists of entities in the SERPs +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|title|Ranked Entities in Search Results at Google +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|bookmarkOf|https://www.searchenginejournal.com/ranked-entities-google-search-results/372973/#close +http://www.semanlink.net/doc/2020/07/ranked_entities_in_search_resul|creationTime|2020-07-02T15:45:01Z +http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi|creationDate|2020-12-14 +http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi|tag|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi|title|AutoPhrase: Automated Phrase Mining from Massive Text Corpora +http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi|bookmarkOf|https://github.com/shangjingbo1226/AutoPhrase +http://www.semanlink.net/doc/2020/12/autophrase_automated_phrase_mi|creationTime|2020-12-14T19:15:04Z +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|creationDate|2021-03-20 +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|tag|http://www.semanlink.net/tag/fps_tweet +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|title|"""Text is the API for humans""" +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|bookmarkOf|https://twitter.com/hyperfp/status/1372547138803077121?s=20 +http://www.semanlink.net/doc/2021/03/text_is_the_api_for_humans_|creationTime|2021-03-20T17:06:34Z +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|creationDate|2020-06-15 +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|tag|http://www.semanlink.net/tag/ocr +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|tag|http://www.semanlink.net/tag/google_cloud_platform +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|title|Détecter le texte dans les fichiers (PDF/TIFF)    API Cloud Vision +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|bookmarkOf|https://cloud.google.com/vision/docs/pdf +http://www.semanlink.net/doc/2020/06/detecter_le_texte_dans_les_fich|creationTime|2020-06-15T23:46:43Z +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|creationDate|2020-07-03 +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|tag|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|tag|http://www.semanlink.net/tag/denny_vrandecic +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|title|Abstract Wikipedia/July 2020 announcement +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|bookmarkOf|https://meta.wikimedia.org/wiki/Abstract_Wikipedia/July_2020_announcement +http://www.semanlink.net/doc/2020/07/abstract_wikipedia_july_2020_an|creationTime|2020-07-03T00:59:55Z +http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene|creationDate|2020-07-23 +http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene|title|Le plan de la filière hydrogène pour accélérer le développement de la technologie +http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene|bookmarkOf|https://www.lemonde.fr/economie/article/2020/07/23/le-plan-de-la-filiere-hydrogene-pour-accelerer-le-developpement-de-la-technologie_6047006_3234.html +http://www.semanlink.net/doc/2020/07/le_plan_de_la_filiere_hydrogene|creationTime|2020-07-23T00:50:05Z +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|creationDate|2021-04-18 +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|tag|http://www.semanlink.net/tag/contrastive_learning +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|tag|http://www.semanlink.net/tag/bert_and_sentence_embeddings +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|comment|"(by one of the authors of [KEPLER](doc:2020/11/1911_06136_kepler_a_unified_)) + +a contrastive sentence +embedding framework, which can be used to produce +sentence embeddings, from either +unlabeled or labeled data. + +> 1. **an unsupervised approach, +which takes an input sentence and predicts +itself in a contrastive objective, with only +standard dropout** used as noise +> 2. we draw inspiration +from the recent success of learning sentence +embeddings from natural language inference +(NLI) datasets and incorporate annotated +pairs from NLI datasets into contrastive +learning by using “entailment” pairs as positives +and “contradiction” pairs as hard negatives + +Cites [[2011.05864] On the Sentence Embeddings from Pre-trained Language Models](doc:2021/04/2011_05864_on_the_sentence_em) (question of the anisotropic semantic space of BERT's sentences)" +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|relatedDoc|http://www.semanlink.net/doc/2021/04/2011_05864_on_the_sentence_em +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|relatedDoc|http://www.semanlink.net/doc/2020/11/1911_06136_kepler_a_unified_ +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|title|SimCSE: Simple Contrastive Learning of Sentence Embeddings +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|bookmarkOf|https://gaotianyu1350.github.io/assets/simcse/simcse.pdf +http://www.semanlink.net/doc/2021/04/simcse_simple_contrastive_lear|creationTime|2021-04-18T18:28:29Z +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|creationDate|2021-01-20 +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|tag|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|tag|http://www.semanlink.net/tag/film +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|tag|http://www.semanlink.net/tag/comedie +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|tag|http://www.semanlink.net/tag/commandant_cousteau +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|title|La Vie Aquatique +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|bookmarkOf|https://fr.wikipedia.org/wiki/La_Vie_aquatique +http://www.semanlink.net/doc/2021/01/la_vie_aquatique|creationTime|2021-01-20T22:25:15Z +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|creationDate|2021-03-28 +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|tag|http://www.semanlink.net/tag/bible +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|comment|à l’époque d’Abraham la première religion monothéiste était loin d’être une entité cohérente… La plupart des Juifs ont, par exemple, longtemps adoré des dieux païens. Par ailleurs, nombre d’entre eux pensaient que Dieu avait une épouse qui était elle-même une idole vénérée +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|title|Les secrets révélés de la Bible ARTE +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|bookmarkOf|https://www.arte.tv/fr/videos/041368-000-A/les-secrets-reveles-de-la-bible/ +http://www.semanlink.net/doc/2021/03/les_secrets_reveles_de_la_bible|creationTime|2021-03-28T16:16:09Z +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|creationDate|2021-06-05 +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|tag|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|tag|http://www.semanlink.net/tag/howto +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|comment|> SentenceTransformers is designed in such way that fine-tuning your own sentence / text embeddings models is easy. +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|title|Semantic Search with S-BERT is all you need +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|bookmarkOf|https://medium.com/mlearning-ai/semantic-search-with-s-bert-is-all-you-need-951bc710e160 +http://www.semanlink.net/doc/2021/06/semantic_search_with_s_bert_is_|creationTime|2021-06-05T16:02:26Z +http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022|creationDate|2021-10-18 +http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022|tag|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022|title|Année de la biologie 2021-2022 +http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022|bookmarkOf|https://anneedelabiologie.cnrs.fr/ +http://www.semanlink.net/doc/2021/10/annee_de_la_biologie_2021_2022|creationTime|2021-10-18T14:30:30Z +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|creationDate|2021-02-03 +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|tag|http://www.semanlink.net/tag/deep_learning_frameworks +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|title|arogozhnikov/einops: Deep learning operations reinvented (for pytorch, tensorflow, jax and others) +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|bookmarkOf|https://github.com/arogozhnikov/einops +http://www.semanlink.net/doc/2021/02/arogozhnikov_einops_deep_learn|creationTime|2021-02-03T15:37:19Z +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|creationDate|2021-04-24 +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|tag|http://www.semanlink.net/tag/manipulations_politiques +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|tag|http://www.semanlink.net/tag/whistleblower +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|title|Manipulations politiques en ligne : la lanceuse d’alerte Sophie Zhang dénonce les lacunes de Facebook +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|bookmarkOf|https://www.lemonde.fr/pixels/article/2021/04/24/manipulations-politiques-en-ligne-la-lanceuse-d-alerte-sophie-zhang-denonce-les-lacunes-de-facebook_6077877_4408996.html +http://www.semanlink.net/doc/2021/04/manipulations_politiques_en_lig|creationTime|2021-04-24T11:32:13Z +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|creationDate|2021-02-24 +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|tag|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|title|Recent Advances in Language Model Fine-tuning +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|bookmarkOf|https://ruder.io/recent-advances-lm-fine-tuning/ +http://www.semanlink.net/doc/2021/02/recent_advances_in_language_mod|creationTime|2021-02-24T12:17:09Z +http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations|creationDate|2021-01-30 +http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations|tag|http://www.semanlink.net/tag/the_knowledge_graph_conference +http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations|title|KGC 2021 Call for Presentations The Knowledge Graph Conference +http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations|bookmarkOf|https://www.knowledgegraph.tech/conference-2021/kgc-2021-call-for-presentations/ +http://www.semanlink.net/doc/2021/01/kgc_2021_call_for_presentations|creationTime|2021-01-30T13:51:19Z +http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5|creationDate|2021-03-01 +http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5|tag|http://www.semanlink.net/tag/ernest +http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5|title|Ernest ILISCA (0000-0002-3842-586X) - ORCID Connecting Research and Researchers +http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5|bookmarkOf|https://orcid.org/0000-0002-3842-586X +http://www.semanlink.net/doc/2021/03/ernest_ilisca_0000_0002_3842_5|creationTime|2021-03-01T13:26:47Z +http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth|creationDate|2021-10-24 +http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth|tag|http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy +http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth|title|« Le massacre de la Saint-Barthélemy s’est joué entre voisins » +http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth|bookmarkOf|https://www.lemonde.fr/le-monde-des-religions/article/2021/10/24/le-massacre-de-la-saint-barthelemy-s-est-joue-entre-voisins_6099676_6038514.html +http://www.semanlink.net/doc/2021/10/%C2%AB_le_massacre_de_la_saint_barth|creationTime|2021-10-24T11:28:51Z +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|creationDate|2020-10-10 +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|tag|http://www.semanlink.net/tag/dette_covid +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|title|Thomas Piketty : « Que faire de la dette Covid-19 ? » +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|bookmarkOf|https://www.lemonde.fr/idees/article/2020/10/10/thomas-piketty-que-faire-de-la-dette-covid-19_6055524_3232.html +http://www.semanlink.net/doc/2020/10/thomas_piketty_%C2%AB_que_faire_de|creationTime|2020-10-10T14:36:49Z +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|creationDate|2020-10-10 +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|tag|http://www.semanlink.net/tag/arte +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|tag|http://www.semanlink.net/tag/musique +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|tag|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|title|A Trick Of The Light - Villagers Berlin Live – ARTE Concert (2019) +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|bookmarkOf|https://www.youtube.com/watch?v=3_l2pBinTH4&t=41m28s +http://www.semanlink.net/doc/2020/10/villagers_%7C_berlin_live_arte_|creationTime|2020-10-10T03:07:51Z +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|creationDate|2021-03-25 +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|tag|http://www.semanlink.net/tag/documentation +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|title|SentenceTransformers Documentation +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|bookmarkOf|https://www.sbert.net/ +http://www.semanlink.net/doc/2021/03/sentencetransformers_documentat|creationTime|2021-03-25T19:05:01Z +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|creationDate|2020-06-06 +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|tag|http://www.semanlink.net/tag/france_bureaucratie +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|tag|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|tag|http://www.semanlink.net/tag/france_fiasco_administratif +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|comment|"> L’Etat a présenté son pire visage, soit une +étroitesse bureaucratique, un côté tatillon, autoritaire, voire persécuteur, sans se +montrer efficace pour autant." +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|title|La crise du coronavirus montre que « nous ne jouons plus dans la cour des grands » +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|bookmarkOf|https://www.lemonde.fr/idees/article/2020/06/06/marcel-gauchet-nous-ne-jouons-plus-dans-la-cour-des-grands_6041961_3232.html +http://www.semanlink.net/doc/2020/06/la_crise_du_coronavirus_montre_|creationTime|2020-06-06T13:55:03Z +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|creationDate|2021-08-06 +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|tag|http://www.semanlink.net/tag/encryption +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|tag|http://www.semanlink.net/tag/apple +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|tag|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|tag|http://www.semanlink.net/tag/apple_sucks +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|tag|http://www.semanlink.net/tag/backdoor +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|title|"Apple's Plan to ""Think Different"" About Encryption Opens a Backdoor to Your Private Life Electronic Frontier Foundation" +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|bookmarkOf|https://www.eff.org/deeplinks/2021/08/apples-plan-think-different-about-encryption-opens-backdoor-your-private-life +http://www.semanlink.net/doc/2021/08/apple_s_plan_to_think_differen|creationTime|2021-08-06T12:45:22Z +http://www.semanlink.net/doc/2021/03/the_angels_share|creationDate|2021-03-17 +http://www.semanlink.net/doc/2021/03/the_angels_share|tag|http://www.semanlink.net/tag/film_britannique +http://www.semanlink.net/doc/2021/03/the_angels_share|tag|http://www.semanlink.net/tag/ecosse +http://www.semanlink.net/doc/2021/03/the_angels_share|tag|http://www.semanlink.net/tag/whisky +http://www.semanlink.net/doc/2021/03/the_angels_share|comment|film by Ken Loach +http://www.semanlink.net/doc/2021/03/the_angels_share|title|The Angels' Share +http://www.semanlink.net/doc/2021/03/the_angels_share|bookmarkOf|https://en.wikipedia.org/wiki/The_Angels%27_Share +http://www.semanlink.net/doc/2021/03/the_angels_share|creationTime|2021-03-17T22:31:59Z +http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis|creationDate|2020-09-27 +http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis|tag|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis|title|Le discours du griot généalogiste chez les Zarma du Niger - Sandra Bornand - Google Books +http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis|bookmarkOf|https://books.google.fr/books?id=3wmQrkZL7sUC&pg=PA360&lpg=PA360&dq=kour%C3%A9+niger+histoire&source=bl&ots=e_Oi_3R05L&sig=ACfU3U0_0hxvbCJomz1AY4dKCO77QD-THg&hl=en&sa=X&ved=2ahUKEwj2uPelzonsAhVJDmMBHYfUCIU4WhDoATAJegQIChAB#v=onepage&q=kour%C3%A9%20niger%20histoire&f=false +http://www.semanlink.net/doc/2020/09/le_discours_du_griot_genealogis|creationTime|2020-09-27T17:58:15Z +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|creationDate|2021-05-04 +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|tag|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|tag|http://www.semanlink.net/tag/inria +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|tag|http://www.semanlink.net/tag/gpt_3 +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|title|"Inria Paris NLP (ALMAnaCH team) sur Twitter : ""#PAGnol, a new, free, GPT-3-like generative LM for French" +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|bookmarkOf|https://twitter.com/InriaParisNLP/status/1389688691216560128?s=20 +http://www.semanlink.net/doc/2021/05/inria_paris_nlp_almanach_team_|creationTime|2021-05-04T23:23:44Z +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|creationDate|2020-06-29 +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|comment|[Related paper](doc:2020/06/1910_00163_specializing_word_) +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|relatedDoc|http://www.semanlink.net/doc/2020/06/1910_00163_specializing_word_ +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|title|Information Bottleneck for NLP (parsing & summarization) +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|bookmarkOf|https://medium.com/jasonwu0731/information-bottleneck-for-nlp-parsing-summarization-961418fbb697 +http://www.semanlink.net/doc/2020/06/information_bottleneck_for_nlp_|creationTime|2020-06-29T10:15:03Z +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|creationDate|2021-04-22 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|tag|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|title|"Nils Reimers sur Twitter : ""SBERT Release v1.1.0""" +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|bookmarkOf|https://twitter.com/Nils_Reimers/status/1385145914604630016 +http://www.semanlink.net/doc/2021/04/nils_reimers_sur_twitter_sbe|creationTime|2021-04-22T19:35:49Z +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|creationDate|2020-11-14 +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|tag|http://www.semanlink.net/tag/grece_mycenienne +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|tag|http://www.semanlink.net/tag/crete +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|tag|http://www.semanlink.net/tag/late_bronze_age_collapse +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|comment|"> le chaînon manquant, long de six +siècles dits « obscurs », entre la chute des palais mycéniens, à l’âge du bronze, et l’apparition des +cités-Etats, à l’aube de la démocratie grecque." +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|title|Archéologie : en Crète, sur les traces des « âges obscurs » +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/11/03/archeologie-en-crete-sur-les-traces-des-ages-obscurs_6058354_1650684.html +http://www.semanlink.net/doc/2020/11/archeologie_en_crete_sur_les|creationTime|2020-11-14T15:10:15Z +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|creationDate|2020-09-18 +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|tag|http://www.semanlink.net/tag/access_token +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|tag|http://www.semanlink.net/tag/securite +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|title|JSON Web Tokens - jwt.io +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|bookmarkOf|https://jwt.io/ +http://www.semanlink.net/doc/2020/09/json_web_tokens_jwt_io|creationTime|2020-09-18T15:55:21Z +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|creationDate|2021-04-04 +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|tag|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|comment|En 2018 le CNRS réunit et présente à Zinder une série de photographies d'archives prises dans cette ville vers 1900. +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|title|CNRS- Zinder 1900-2019 - Pauline Rousseau +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|bookmarkOf|http://paulinerousseau.com/index.php/cnrs-zinder-1900-2019/ +http://www.semanlink.net/doc/2021/04/cnrs_zinder_1900_2019_paulin|creationTime|2021-04-04T19:27:15Z +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|creationDate|2021-04-09 +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|tag|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|tag|http://www.semanlink.net/tag/proletaires2_0 +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|comment|La multinationale devra payer 50 millions d’euros après avoir été mise en cause par la Commission fédérale du commerce (FTC) pour avoir mis en place ce mécanisme pendant plus de deux ans et demi. +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|title|Amazon reconnue coupable d’avoir gardé une partie des pourboires de ses livreurs aux Etats-Unis +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|bookmarkOf|https://www.lemonde.fr/economie/article/2021/02/03/amazon-debourse-plus-de-50-millions-d-euros-pour-regler-une-affaire-de-pourboires-detournes_6068602_3234.html +http://www.semanlink.net/doc/2021/04/amazon_reconnue_coupable_d%E2%80%99avoi|creationTime|2021-04-09T19:13:42Z +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|creationDate|2021-07-29 +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|comment|Attempt to recreate the dataset used for training XLM-R ([[1911.02116] Unsupervised Cross-lingual Representation Learning at Scale](doc:2021/07/1911_02116_unsupervised_cross)) +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|relatedDoc|http://www.semanlink.net/doc/2021/07/1911_02116_unsupervised_cross +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|title|CC-100: Monolingual Datasets from Web Crawl Data +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|bookmarkOf|http://data.statmt.org/cc-100/ +http://www.semanlink.net/doc/2021/07/cc_100_monolingual_datasets_fr|creationTime|2021-07-29T00:20:28Z +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|creationDate|2021-05-13 +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|tag|http://www.semanlink.net/tag/target_sense_verification +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|tag|http://www.semanlink.net/tag/jose_moreno +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|comment|Cites [Matching the Blanks: Distributional Similarity for Relation Learning](doc:2021/05/1906_03158_matching_the_blank) +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|relatedDoc|http://www.semanlink.net/doc/2021/05/1906_03158_matching_the_blank +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|title|CTLR@WiC-TSV: Target Sense Verification using Marked Inputs and Pre-trained Models (2021) +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|bookmarkOf|https://paperswithcode.com/paper/ctlr-wic-tsv-target-sense-verification-using +http://www.semanlink.net/doc/2021/05/ctlr_wic_tsv_target_sense_veri|creationTime|2021-05-13T00:29:13Z +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|creationDate|2020-06-17 +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|tag|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|title|Lord of the Wiki Ring: Importing Wikidata into Neo4j and analyzing family trees +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|bookmarkOf|https://towardsdatascience.com/lord-of-the-wiki-ring-importing-wikidata-into-neo4j-and-analyzing-family-trees-da27f64d675e +http://www.semanlink.net/doc/2020/06/lord_of_the_wiki_ring_importin|creationTime|2020-06-17T19:09:37Z +http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea|creationDate|2020-10-15 +http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea|title|Top Trends of Graph Machine Learning in 2020 by Sergei Ivanov Towards Data Science +http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea|bookmarkOf|https://towardsdatascience.com/top-trends-of-graph-machine-learning-in-2020-1194175351a3 +http://www.semanlink.net/doc/2020/10/top_trends_of_graph_machine_lea|creationTime|2020-10-15T17:56:46Z +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|creationDate|2021-04-20 +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|title|Simultaneous Categorization of Text Documents And Identification of Cluster-dependent Keywords (2003) +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|bookmarkOf|https://www.researchgate.net/publication/2562519_Simultaneous_Categorization_of_Text_Documents_And_Identification_of_Cluster-dependent_Keywords +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|bookmarkOf|https://link.springer.com/chapter/10.1007%2F978-1-4757-4305-0_3 +http://www.semanlink.net/doc/2021/04/simultaneous_categorization_of_|creationTime|2021-04-20T01:31:31Z +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|creationDate|2020-09-09 +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|tag|http://www.semanlink.net/tag/voltaire +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|tag|http://www.semanlink.net/tag/blaspheme +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|tag|http://www.semanlink.net/tag/torture +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|title|François-Jean Lefebvre de La Barre — Wikipédia +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|bookmarkOf|https://fr.wikipedia.org/wiki/Fran%C3%A7ois-Jean_Lefebvre_de_La_Barre +http://www.semanlink.net/doc/2020/09/francois_jean_lefebvre_de_la_ba|creationTime|2020-09-09T21:26:52Z +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|creationDate|2021-09-20 +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|tag|http://www.semanlink.net/tag/python_library +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|title|stanfordnlp/stanza: Official Stanford NLP Python Library for Many Human Languages +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|bookmarkOf|https://github.com/stanfordnlp/stanza +http://www.semanlink.net/doc/2021/09/stanfordnlp_stanza_official_st|creationTime|2021-09-20T16:54:01Z +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|creationDate|2021-02-13 +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|tag|http://www.semanlink.net/tag/jean_rouch +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|tag|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|title|Le calendrier mythique chez les Songhay-Zarma (Niger) +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|bookmarkOf|https://journals.openedition.org/span/134 +http://www.semanlink.net/doc/2021/02/le_calendrier_mythique_chez_les|creationTime|2021-02-13T10:54:38Z +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|creationDate|2021-01-03 +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|tag|http://www.semanlink.net/tag/neonicotinoides +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|tag|http://www.semanlink.net/tag/macronie +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|title|« Il existe déjà des pratiques agricoles alternatives à l’emploi du glyphosate et des néonicotinoïdes » +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|bookmarkOf|https://www.lemonde.fr/idees/article/2021/01/02/il-existe-deja-des-pratiques-agricoles-alternatives-a-l-emploi-du-glyphosate-et-des-neonicotinoides_6065017_3232.html +http://www.semanlink.net/doc/2021/01/%C2%AB_il_existe_deja_des_pratiques_|creationTime|2021-01-03T14:57:19Z +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|creationDate|2021-05-19 +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|tag|http://www.semanlink.net/tag/eccenca +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|tag|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|title|Enterprise Knowledge Graph Solutions eccenca +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|bookmarkOf|https://eccenca.com/ +http://www.semanlink.net/doc/2021/05/enterprise_knowledge_graph_solu|creationTime|2021-05-19T14:12:58Z +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|creationDate|2020-07-04 +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|tag|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|tag|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|title|Journal des africanistes Société des africanistes +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|bookmarkOf|http://africanistes.org/revue +http://www.semanlink.net/doc/2020/07/journal_des_africanistes_%7C_soci|creationTime|2020-07-04T13:32:55Z +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|creationDate|2021-03-13 +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|tag|http://www.semanlink.net/tag/declin_de_la_france +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|comment|"> En France, deux défauts majeurs ont donné des longueurs d’avance à la pandémie : l’effacement de notre culture de la prévention et l’affaissement de notre science. Autant de signes tangibles d’un déclin inquiétant pour l’avenir et le rayonnement de notre pays. + +**Très inquiétant** pour un pays couvert de centrales nucléaires" +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|title|Un an d’épidémie de Covid-19 : retrouver le sens du long terme +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|bookmarkOf|https://www.lemonde.fr/idees/article/2021/03/12/un-an-d-epidemie-de-covid-19-retrouver-le-sens-du-long-terme_6072879_3232.html +http://www.semanlink.net/doc/2021/03/un_an_d%E2%80%99epidemie_de_covid_19_|creationTime|2021-03-13T09:56:42Z +http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet|creationDate|2021-08-09 +http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet|tag|http://www.semanlink.net/tag/crochemelier +http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet|title|Hauptsacht. bis 42.1876: Bulletin monumental ou Collection de mémoires et d... - Google Books +http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet|bookmarkOf|https://www.google.fr/books/edition/Hauptsacht_bis_42_1876_Bulletin_monument/M_NEAAAAYAAJ?hl=en&gbpv=1&dq=crochemelier&pg=PA345&printsec=frontcover +http://www.semanlink.net/doc/2021/08/hauptsacht_bis_42_1876_bullet|creationTime|2021-08-09T22:37:43Z +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|creationDate|2021-04-08 +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|tag|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|comment|"> 1. Write a prompt that a pre-trained LM can complete to give the answer to your problem, GPT-style. +> 2. Use backpropagation on fine-tuning data to learn the correct completions. The model can then draw information from both your task description and the supervised data! ([src](https://twitter.com/huggingface/status/1379805752509005825?s=20))" +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|title|How many data points is a prompt worth ? +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|bookmarkOf|https://huggingface.co/blog/how_many_data_points/ +http://www.semanlink.net/doc/2021/04/how_many_data_points_is_a_promp|creationTime|2021-04-08T13:58:48Z +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|creationDate|2021-10-04 +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|tag|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|title|raphaelsty/RetrieverReader: Fast API QA +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|bookmarkOf|https://github.com/raphaelsty/RetrieverReader +http://www.semanlink.net/doc/2021/10/raphaelsty_retrieverreader_fas|creationTime|2021-10-04T16:35:24Z +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|creationDate|2020-07-23 +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|comment|Neuromorphic engineering aims to create computing hardware that mimics biological nervous systems. How it began. +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|title|How we created neuromorphic engineering Nature Electronics +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|bookmarkOf|https://www.nature.com/articles/s41928-020-0448-2.epdf?sharing_token=be5oH-GZGNyxc0EeTPozu9RgN0jAjWel9jnR3ZoTv0MaUfpX8ghK818KUA_VZS97V4dXry2OgXtDS2KqLsDfgLps7I90LC87d4HpF9JhoRY-Cvy1B_IUCfuzolVkmdez180wB26SMocHTSoO52fY9MqQ0dGPWQJkJPXMTvQrsiI%3D +http://www.semanlink.net/doc/2020/07/how_we_created_neuromorphic_eng|creationTime|2020-07-23T00:08:46Z +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|creationDate|2021-06-10 +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|tag|http://www.semanlink.net/tag/eswc_2021 +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|tag|http://www.semanlink.net/tag/knowledge_graph_ml +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|tag|http://www.semanlink.net/tag/scikit_learn +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|comment|"> The kgextension package allows to access and use Linked Open Data to augment existing datasets for improving a classification/clustering task. + +How to create data analysis pipeline using background knowledge from knowledge graphs + +[Github](https://github.com/om-hb/kgextension/blob/master/examples/book_genre_prediction.ipynb)" +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|title|scikit-learn Pipelines meet Knowledge Graphs - The Python kgextension Package ESWC 2021 +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|bookmarkOf|https://openreview.net/forum?id=2FQgZhV-LDL +http://www.semanlink.net/doc/2021/06/scikit_learn_pipelines_meet_kno|creationTime|2021-06-10T15:59:47Z +http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_|creationDate|2021-06-06 +http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_|title|Efficient open-domain question-answering on Vespa.ai Vespa Blog +http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_|bookmarkOf|https://blog.vespa.ai/efficient-open-domain-question-answering-on-vespa/ +http://www.semanlink.net/doc/2021/06/efficient_open_domain_question_|creationTime|2021-06-06T08:56:08Z +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|creationDate|2021-03-14 +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|tag|http://www.semanlink.net/tag/low_tech +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|tag|http://www.semanlink.net/tag/innovation +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|tag|http://www.semanlink.net/tag/voyage +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|comment|Sur le catamaran Nomade des mers, l'ingénieur Corentin de Chatelperron voyage à travers le monde à la découverte des perspectives de la low-tech. +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|title|Nomade des mers, les escales de l'innovation +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|bookmarkOf|https://www.arte.tv/fr/videos/062238-004-A/nomade-des-mers-les-escales-de-l-innovation/ +http://www.semanlink.net/doc/2021/03/nomade_des_mers_les_escales_de|creationTime|2021-03-14T17:55:53Z +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|creationDate|2020-07-10 +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|tag|http://www.semanlink.net/tag/identification_of_similar_documents +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|tag|http://www.semanlink.net/tag/faiss +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|title|Finding similar documents with transformers · Codegram +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|bookmarkOf|https://www.codegram.com/blog/finding-similar-documents-with-transformers/ +http://www.semanlink.net/doc/2020/07/finding_similar_documents_with_|creationTime|2020-07-10T09:30:37Z +http://www.semanlink.net/doc/2021/01/why_generative_modeling|creationDate|2021-01-12 +http://www.semanlink.net/doc/2021/01/why_generative_modeling|tag|http://www.semanlink.net/tag/deep_generative_modeling +http://www.semanlink.net/doc/2021/01/why_generative_modeling|tag|http://www.semanlink.net/tag/generative_model +http://www.semanlink.net/doc/2021/01/why_generative_modeling|tag|http://www.semanlink.net/tag/uncertainty_in_deep_learning +http://www.semanlink.net/doc/2021/01/why_generative_modeling|comment|> if we want to build AI systems that make reliable decisions and can communicate with us, human beings, they must understand the environment first. For this purpose, they cannot simply learn how to make decisions, but they should be able to quantify their beliefs about their surrounding using the language of probability (Bishop, 2013; Ghahramani, 2015). In order to do that, we claim that estimating the distribution over objects, p(x), is crucial. +http://www.semanlink.net/doc/2021/01/why_generative_modeling|title|Why generative modeling +http://www.semanlink.net/doc/2021/01/why_generative_modeling|bookmarkOf|https://jmtomczak.github.io/blog/1_introduction.html +http://www.semanlink.net/doc/2021/01/why_generative_modeling|creationTime|2021-01-12T17:11:52Z +http://www.semanlink.net/doc/2021/01/plantu_2020|creationDate|2021-01-02 +http://www.semanlink.net/doc/2021/01/plantu_2020|tag|http://www.semanlink.net/tag/2020 +http://www.semanlink.net/doc/2021/01/plantu_2020|tag|http://www.semanlink.net/tag/plantu +http://www.semanlink.net/doc/2021/01/plantu_2020|title|Plantu 2020 +http://www.semanlink.net/doc/2021/01/plantu_2020|bookmarkOf|https://pbs.twimg.com/media/Equob8UXMAEVn2w?format=jpg&name=large +http://www.semanlink.net/doc/2021/01/plantu_2020|creationTime|2021-01-02T22:47:45Z +http://www.semanlink.net/doc/2021/08/ige_le_crochemelier|creationDate|2021-08-09 +http://www.semanlink.net/doc/2021/08/ige_le_crochemelier|tag|http://www.semanlink.net/tag/crochemelier +http://www.semanlink.net/doc/2021/08/ige_le_crochemelier|title|Igé – Le Crochemélier +http://www.semanlink.net/doc/2021/08/ige_le_crochemelier|bookmarkOf|https://journals.openedition.org/adlfi/7563 +http://www.semanlink.net/doc/2021/08/ige_le_crochemelier|creationTime|2021-08-09T22:28:50Z +http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d|creationDate|2021-06-23 +http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d|tag|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d|title|How Dense Passage Retrievers (DPR) Work Towards Data Science +http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d|bookmarkOf|https://towardsdatascience.com/how-to-create-an-answer-from-a-question-with-dpr-d76e29cc5d60 +http://www.semanlink.net/doc/2021/06/how_dense_passage_retrievers_d|creationTime|2021-06-23T02:38:57Z +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|creationDate|2020-09-05 +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|tag|http://www.semanlink.net/tag/risk_analysis +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|comment|cela dit, elles sont nulles dans les 2 cas +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|title|« Pourquoi les autorités sont-elles si prudentes pour le Covid-19 et pas pour le réchauffement climatique ? » +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|bookmarkOf|https://www.lemonde.fr/idees/article/2020/09/04/on-peut-s-interroger-sur-l-adoption-de-politiques-de-confinement-qui-paralysent-les-economies_6050976_3232.html +http://www.semanlink.net/doc/2020/09/%C2%AB_pourquoi_les_autorites_sont_e|creationTime|2020-09-05T18:58:54Z +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|creationDate|2021-10-07 +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|tag|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|tag|http://www.semanlink.net/tag/huggingface_bigscience +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|title|"BigScience Research Workshop sur Twitter : ""Come help us improve language resource visibility over the next week...""" +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|bookmarkOf|https://twitter.com/BigscienceW/status/1446020624455974912 +http://www.semanlink.net/doc/2021/10/bigscience_research_workshop_su|creationTime|2021-10-07T12:05:24Z +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|creationDate|2021-02-23 +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|tag|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|comment|This script provides a way to improve the speed and memory performance of a zero-shot classifier by training a more efficient student model from the zero-shot teacher's predictions over an unlabeled dataset. +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|title|Zero-shot classifier distillation at master · huggingface/transformers +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|bookmarkOf|https://github.com/huggingface/transformers/tree/master/examples/research_projects/zero-shot-distillation +http://www.semanlink.net/doc/2021/02/zero_shot_classifier_distillati|creationTime|2021-02-23T13:54:22Z +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|creationDate|2020-12-30 +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|tag|http://www.semanlink.net/tag/patrick_boucheron +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|title|INITIATION AUX ÉTUDES HISTORIQUES +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|bookmarkOf|https://reader.epubcloudservice.com/process/reader/book.php?ean=9782380941210 +http://www.semanlink.net/doc/2020/12/initiation_aux_etudes_historiqu|creationTime|2020-12-30T11:13:42Z +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|creationDate|2021-09-07 +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|tag|http://www.semanlink.net/tag/statistical_machine_translation +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|comment|[twitter](https://twitter.com/lena_voita/status/1434891467600941056) +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|title|NMT Training through the Lens of SMT +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|bookmarkOf|https://lena-voita.github.io/posts/nmt_training_through_smt_lens.html +http://www.semanlink.net/doc/2021/09/nmt_training_through_the_lens_o|creationTime|2021-09-07T00:53:42Z +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|creationDate|2020-12-06 +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|tag|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|comment|"> Graph embeddings are data structures used for fast-comparison of +similar data structures" +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|title|Understanding Graph Embeddings by Dan McCreary Nov, 2020 Medium +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|bookmarkOf|https://dmccreary.medium.com/understanding-graph-embeddings-79342921a97f +http://www.semanlink.net/doc/2020/12/understanding_graph_embeddings%7C|creationTime|2020-12-06T10:21:10Z +http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo|creationDate|2021-10-23 +http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo|title|Sentence Embeddings and Transformers Pinecone +http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo|bookmarkOf|https://www.pinecone.io/learn/sentence-embeddings/ +http://www.semanlink.net/doc/2021/10/sentence_embeddings_and_transfo|creationTime|2021-10-23T01:04:37Z +http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo|creationDate|2021-10-03 +http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo|tag|http://www.semanlink.net/tag/pandora_papers +http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo|title|« Pandora Papers » : plongée mondiale dans les secrets de la finance offshore +http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo|bookmarkOf|https://www.lemonde.fr/les-decodeurs/article/2021/10/03/pandora-papers-plongee-mondiale-dans-les-secrets-de-la-finance-offshore_6096967_4355770.html +http://www.semanlink.net/doc/2021/10/%C2%AB_pandora_papers_%C2%BB_plongee_mo|creationTime|2021-10-03T22:39:05Z +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|creationDate|2021-10-20 +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|tag|http://www.semanlink.net/tag/data_augmentation +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|title|"Peter Bloem sur Twitter : ""Clever idea. When you use augmentation, why throw away the information of which instances are augmentations of each other?"" / Twitter" +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|bookmarkOf|https://twitter.com/pbloemesquire/status/1450699484078432258 +http://www.semanlink.net/doc/2021/10/peter_bloem_sur_twitter_clev|creationTime|2021-10-20T08:32:58Z +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|creationDate|2021-07-26 +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|tag|http://www.semanlink.net/tag/fulani +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|tag|http://www.semanlink.net/tag/amadou_hampate_ba +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|title|« Amkoullel, l’enfant peul », fresque vivante d’une jeunesse malienne au début du XXe siècle +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|bookmarkOf|https://www.lemonde.fr/afrique/article/2021/07/24/amkoullel-l-enfant-peul-fresque-vivante-d-une-jeunesse-malienne-au-debut-du-xxe-siecle_6089402_3212.html +http://www.semanlink.net/doc/2021/07/%C2%AB_amkoullel_l%E2%80%99enfant_peul_%C2%BB_f|creationTime|2021-07-26T21:11:08Z +http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo|creationDate|2020-12-05 +http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo|tag|http://www.semanlink.net/tag/kd_mkb_paper +http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo|title|Knowledge Base Embedding By Cooperative Knowledge Distillation - ACL Anthology +http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo|bookmarkOf|https://www.aclweb.org/anthology/2020.coling-main.489/ +http://www.semanlink.net/doc/2020/12/knowledge_base_embedding_by_coo|creationTime|2020-12-05T11:03:01Z +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|creationDate|2020-06-12 +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|tag|http://www.semanlink.net/tag/nlp_in_enterprise +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|title|IBM Research addressing Enterprise NLP challenges in 2020 +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|bookmarkOf|https://medium.com/@sroukos/ibm-research-addressing-enterprise-nlp-challenges-in-2020-a4b28603c0c +http://www.semanlink.net/doc/2020/06/ibm_research_addressing_enterpr|creationTime|2020-06-12T09:41:21Z +http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua|creationDate|2020-10-07 +http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua|tag|http://www.semanlink.net/tag/debug_deep_learning +http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua|title|Clarifying exceptions and visualizing tensor operations in deep learning code +http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua|bookmarkOf|https://explained.ai/tensor-sensor/index.html +http://www.semanlink.net/doc/2020/10/clarifying_exceptions_and_visua|creationTime|2020-10-07T08:34:39Z +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|creationDate|2020-09-24 +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|title|Transfer Learning - Machine Learning's Next Frontier (2017) +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|bookmarkOf|https://ruder.io/transfer-learning/ +http://www.semanlink.net/doc/2020/09/transfer_learning_machine_lea|creationTime|2020-09-24T19:14:14Z +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|creationDate|2020-08-23 +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|title|"Amit Chaudhary sur Twitter : ""How to learn transformers:...""" +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|bookmarkOf|https://twitter.com/amitness/status/1297471740012982273?s=20 +http://www.semanlink.net/doc/2020/08/amit_chaudhary_sur_twitter_h|creationTime|2020-08-23T23:44:40Z +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|creationDate|2020-10-18 +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|title|html - JavaScript network visualization? - Stack Overflow +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|bookmarkOf|https://stackoverflow.com/questions/10886705/javascript-network-visualization +http://www.semanlink.net/doc/2020/10/html_javascript_network_visua|creationTime|2020-10-18T23:03:23Z +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|creationDate|2020-12-06 +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|tag|http://www.semanlink.net/tag/targeted_ads +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|comment|"[Tweet](https://twitter.com/thomasgermain/status/1197201725708476422) + +> the out-of-home advertising business is adopting the model that runs ads on the web + +> Today, the internet is altering the way we experience the physical world + +so it should be hackable" +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|title|Digital Billboards Are Tracking You - Consumer Reports +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|bookmarkOf|https://www.consumerreports.org/privacy/digital-billboards-are-tracking-you-and-they-want-you-to-see-their-ads/ +http://www.semanlink.net/doc/2020/12/digital_billboards_are_tracking|creationTime|2020-12-06T12:21:17Z +http://www.semanlink.net/doc/2020/11/tree_of_life_web_project|creationDate|2020-11-29 +http://www.semanlink.net/doc/2020/11/tree_of_life_web_project|tag|http://www.semanlink.net/tag/tree_of_life +http://www.semanlink.net/doc/2020/11/tree_of_life_web_project|title|Tree of Life Web Project +http://www.semanlink.net/doc/2020/11/tree_of_life_web_project|bookmarkOf|http://tolweb.org/tree/ +http://www.semanlink.net/doc/2020/11/tree_of_life_web_project|creationTime|2020-11-29T10:55:25Z +http://www.semanlink.net/doc/2021/08/cynthia_fleury|creationDate|2021-08-07 +http://www.semanlink.net/doc/2021/08/cynthia_fleury|tag|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/doc/2021/08/cynthia_fleury|comment|Supporte le [Universal basic income](tag:guaranteed_basic_income) +http://www.semanlink.net/doc/2021/08/cynthia_fleury|title|Cynthia Fleury +http://www.semanlink.net/doc/2021/08/cynthia_fleury|bookmarkOf|https://fr.wikipedia.org/wiki/Cynthia_Fleury +http://www.semanlink.net/doc/2021/08/cynthia_fleury|creationTime|2021-08-07T20:23:19Z +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|creationDate|2020-12-17 +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|tag|http://www.semanlink.net/tag/structured_data_embedding +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|comment|> a BERT-like transformers model pretrained on a large corpus of English data from Wikipedia in a self-supervised fashion +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|title|google/tapas-base-finetuned-wtq · Hugging Face +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|bookmarkOf|https://huggingface.co/google/tapas-base-finetuned-wtq +http://www.semanlink.net/doc/2020/12/google_tapas_base_finetuned_wtq|creationTime|2020-12-17T22:40:56Z +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|creationDate|2021-02-16 +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|tag|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|title|raphaelsty/kdmlm: Combine knowledge bases with language models. +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|bookmarkOf|https://github.com/raphaelsty/kdmlm +http://www.semanlink.net/doc/2021/02/raphaelsty_kdmlm_combine_knowl|creationTime|2021-02-16T10:37:25Z +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|creationDate|2020-09-13 +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|tag|http://www.semanlink.net/tag/neonicotinoides +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|comment|> Les poissons ne vont pas butiner dans les rizières +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|title|« Les néonicotinoïdes sont des substances trop efficaces et trop persistantes pour que leur usage puisse être contrôlé » +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|bookmarkOf|https://www.lemonde.fr/idees/article/2020/09/12/les-neonicotinoides-sont-des-substances-trop-efficaces-et-trop-persistantes-pour-que-leur-usage-puisse-etre-controle_6051948_3232.html +http://www.semanlink.net/doc/2020/09/%C2%AB_les_neonicotinoides_sont_des_|creationTime|2020-09-13T09:48:35Z +http://www.semanlink.net/doc/2020/11/visual_rdf|creationDate|2020-11-29 +http://www.semanlink.net/doc/2020/11/visual_rdf|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/11/visual_rdf|tag|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/doc/2020/11/visual_rdf|title|Visual RDF +http://www.semanlink.net/doc/2020/11/visual_rdf|bookmarkOf|http://cltl.nl/visualrdf/?url=http%3A%2F%2Fwww.semanlink.net%2Ftag%2Fniger.rdf +http://www.semanlink.net/doc/2020/11/visual_rdf|creationTime|2020-11-29T09:24:51Z +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|creationDate|2020-11-29 +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|tag|http://www.semanlink.net/tag/christopher_olah +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|tag|http://www.semanlink.net/tag/tree_of_life +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|title|"Chris Olah sur Twitter : ""Today I realized I know almost nothing about the tree of life... What's the best resource for learning?""" +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|bookmarkOf|https://twitter.com/ch402/status/1332505748144721922 +http://www.semanlink.net/doc/2020/11/chris_olah_sur_twitter_today|creationTime|2020-11-29T10:29:08Z +http://www.semanlink.net/doc/2020/09/rdf2vec_org|creationDate|2020-09-02 +http://www.semanlink.net/doc/2020/09/rdf2vec_org|tag|http://www.semanlink.net/tag/rdf2vec +http://www.semanlink.net/doc/2020/09/rdf2vec_org|title|RDF2vec.org +http://www.semanlink.net/doc/2020/09/rdf2vec_org|bookmarkOf|http://www.rdf2vec.org/ +http://www.semanlink.net/doc/2020/09/rdf2vec_org|creationTime|2020-09-02T16:18:07Z +http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co|creationDate|2020-10-26 +http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co|tag|http://www.semanlink.net/tag/osiris_rex +http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co|title|NASA’s OSIRIS-REx Spacecraft Collects Significant Amount of Asteroid NASA +http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co|bookmarkOf|https://www.nasa.gov/press-release/nasa-s-osiris-rex-spacecraft-collects-significant-amount-of-asteroid +http://www.semanlink.net/doc/2020/10/nasa%E2%80%99s_osiris_rex_spacecraft_co|creationTime|2020-10-26T23:39:02Z +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|creationDate|2020-09-06 +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|tag|http://www.semanlink.net/tag/co_training +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|tag|http://www.semanlink.net/tag/aspect_detection +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|title|Leveraging Just a Few Keywords for Fine-Grained Aspect Detection Through Weakly Supervised Co-Training - ACL Anthology (2019) +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|bookmarkOf|https://www.aclweb.org/anthology/D19-1468/ +http://www.semanlink.net/doc/2020/09/leveraging_just_a_few_keywords_|creationTime|2020-09-06T16:43:10Z +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|creationDate|2020-11-03 +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|tag|http://www.semanlink.net/tag/ijcai +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|title|Text-Enhanced Representation Learning for Knowledge Graph (IJCAI 2016) +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|bookmarkOf|https://www.ijcai.org/Proceedings/16/Papers/187.pdf +http://www.semanlink.net/doc/2020/11/text_enhanced_representation_le|creationTime|2020-11-03T17:50:54Z +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|creationDate|2021-05-25 +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|tag|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|tag|http://www.semanlink.net/tag/yves_peirsman +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|title|Adventures in Zero-Shot Text Classification +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|bookmarkOf|https://nlp.town/blog/zero-shot-classification/ +http://www.semanlink.net/doc/2021/05/adventures_in_zero_shot_text_cl|creationTime|2021-05-25T16:02:20Z +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|creationDate|2020-12-27 +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|tag|http://www.semanlink.net/tag/niger +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|comment|> **Il est un fait qui devrait relever de la normalité électorale, mais qui distingue pourtant le Niger des autres pays de la région.** Dimanche 27 décembre, le président sortant ne sera pas candidat à sa réélection, pour un troisième mandat à la tête du pays. La Constitution ne l’y autorisait pas. Mahamadou Issoufou – qui fêtera ses 69 ans le 1er janvier – n’a pas essayé de la réécrire pour s’éterniser au pouvoir. +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|title|Le Niger appelé à élire le successeur de Mahamadou Issoufou +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/12/27/le-niger-appele-a-elire-le-successeur-de-mahamadou-issoufou_6064588_3212.html +http://www.semanlink.net/doc/2020/12/le_niger_appele_a_elire_le_succ|creationTime|2020-12-27T11:55:23Z +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|creationDate|2021-06-02 +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|tag|http://www.semanlink.net/tag/image_classification +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|title|Quand l’intelligence artificielle devient une alliée des archéologues +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|bookmarkOf|https://www.clubic.com/technologies-d-avenir/intelligence-artificielle/actualite-372503-quand-l-intelligence-artificielle-devient-une-alliee-des-archeologues.html +http://www.semanlink.net/doc/2021/06/quand_l%E2%80%99intelligence_artificiel|creationTime|2021-06-02T10:10:41Z diff --git a/ckb/datasets/semanlink/train.csv b/ckb/datasets/semanlink/train.csv index 1523f76..1fb09f4 100644 --- a/ckb/datasets/semanlink/train.csv +++ b/ckb/datasets/semanlink/train.csv @@ -1,6422 +1,78747 @@ -Afrique du Sud|skos:broader|Afrique australe -- Siamese network with two deep sub-models - Projects input and candidate texts into embedding space - Trained by maximizing cosine similarity between correct input-output pairs [source](/doc/2019/08/neural_models_for_information_r)|skos:broader|In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. -Markus Lanthaler|skos:broader|Technical girls and guys -Google Spreadsheets|skos:broader|Spreadsheets -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Michal Růžička -Safari|skos:broader|Brouteur -Cosmic inflation|skos:broader|Cosmologie -Lost City|skos:broader|Océan -Linked Data Exploration|skos:broader|Linked Data -Self-driving car|skos:broader|Robotics Robot -Catastrophe écologique|skos:broader|Écologie -Bag-of-words|skos:broader|NLP: Text Representation -Neural Models for Information Retrieval|skos:broader|Neural networks -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_firstAuthor|Jan A. Botha -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:arxiv_author|Zhiqing Sun -Linking Enterprise Data|skos:broader|Linked Data -France : bureaucratie|skos:broader|Bureaucratie -StackOverFlow Q|skos:broader|Stack Overflow -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_author|Alberto Cetoli -MOAT|skos:broader|RDF Vocabularies -Ranking SVM|skos:broader|Support vector machine -LibShortText|skos:broader|NLP tools -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:tag|tag:bertology -Royaume Uni|skos:broader|Pays d'Europe -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_firstAuthor|Shijie Wu -HuggingFace|skos:broader|NLP Groups -Gradient descent|skos:broader|Machine learning: techniques -Reinforcement learning|skos:broader|Machine learning: techniques -Java dev|skos:broader|Java -URI|skos:broader|Web architecture -Stromatolithes|skos:broader|Paléontologie -Sentence Embeddings|skos:broader|Embeddings in NLP -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:tomas_mikolov -Romancier|skos:broader|Littérature -Symmetric matrices related to the Mertens function In this paper we explore a family of congruences over N from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. In this paper we explore a family of congruences over $\\N^\\ast$ from which one builds a sequence of symmetric matrices related to the Mertens function. From the results of numerical experiments, we formulate a conjecture about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may come to play a more important role in this classical and difficult problem.|sl:tag|tag:arxiv_doc -css|skos:broader|Dev -Droit à l'information|skos:broader|Justice -- Siamese network with two deep sub-models - Projects input and candidate texts into embedding space - Trained by maximizing cosine similarity between correct input-output pairs [source](/doc/2019/08/neural_models_for_information_r)|skos:broader|Introduced in the early 1990s by Bromley and LeCun to solve signature verification as an image matching problem -Histoire de l'Asie|skos:broader|Histoire -Java tip|skos:broader|Dev tips -Feynman|skos:broader|Prix Nobel de physique -Target Entity Disambiguation|skos:broader|Entity linking -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|sl:arxiv_firstAuthor|Philip Resnik -Xenophon|skos:broader|Grèce antique -iMovie|skos:broader|Digital Video -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:tag|tag:tutorial -Les 100 pièges de l'Anglais|skos:broader|Anglais -RDF-OWL documentation tool|skos:broader|OWL tool -Acronyms (NLP)|skos:broader|Named Entity Recognition -Jena|skos:broader|Java dev -Wikilinks Corpus|skos:broader|Named Entity Recognition -Applet|skos:broader|Java -Génétique + Histoire|skos:broader|Genetics Génétique -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:arxiv_firstAuthor|Łukasz Kaiser -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Chong Luo -Hydrogen Cars|skos:broader|Automobile -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_author|Alexander Toshev -JavaScript|skos:broader|Programming language -NLP + juridique|skos:broader|NLP: use cases -Yago|skos:broader|Linked Data -IBM developerWorks|skos:broader|IBM -KZ|skos:broader|Nazisme -Satori|skos:broader|Microsoft Research -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:arxiv_firstAuthor|Judea Pearl -Obélisque d'Axoum|skos:broader|Italie -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_firstAuthor|Farahnaz Akrami Department of Computer Science and Engineering, University of Texas at Arlington -Javascript closures|skos:broader|Closure -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:tag|tag:transfer_learning -Monotrèmes|skos:broader|Animal -Arxiv Doc|skos:broader|Favoris - Pierre-Yves Vandenbussche|skos:broader|SW guys (and girls) -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_author|Kazuya Kawakami -sig.ma|skos:broader|Mashups -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:bert -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Cassandra Xia -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:arxiv_doc -Coursera: Deep Learning|skos:broader|Coursera -SQL to RDF mapping|skos:broader|SQL -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:tag|tag:arxiv_doc -SIMILE|skos:broader|Open Source -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:deep_learning_attention -OAuth2|skos:broader|OAuth -Critique du libéralisme|skos:broader|Politique -Semantic web company|skos:broader|Semantic Web -Thérapie génique|skos:broader|Genetics Génétique -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_author|Marc'Aurelio Ranzato -Conscience artificielle|skos:broader|Technological singularity -Microformats|skos:broader|HTML Data -NLP tools|skos:broader|Tools -Nikolai Vavilov|skos:broader|Grand Homme -OWL-Full|skos:broader|OWL -Football|skos:broader|Sport -Apprendre une langue|skos:broader|Langues -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:arxiv_firstAuthor|Omer Levy -Semantic web : présentation|skos:broader|Semantic Web -Désertification|skos:broader|Désert -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Marcelo Prates -NLP and humanities|skos:broader|NLP: use cases -Corée du Sud|skos:broader|Corée -Smart contracts|skos:broader|Blockchain -Bibliothèque numérique|skos:broader|Digital Media -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:guillaume_lample -Collège|skos:broader|Ecole -Text preprocessing|skos:broader|Text processing -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:arxiv_firstAuthor|Timothy Niven -Apache Mahout|skos:broader|Hadoop -Frameworks|skos:broader|Programming -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Guodong Long -Memory in deep learning|skos:broader|Mémoire (informatique) -Bombay|skos:broader|Inde -Carrot2|skos:broader|NLP tools -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:bert -iTunes|skos:broader|Apple -Egit|skos:broader|Eclipse -INRIA|skos:broader|Recherche -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:arxiv_author|Jiaqi Mu -Pékin 2008|skos:broader|Pékin -Roméo Dallaire|skos:broader|Génocide rwandais -Identity Crisis in Linked Data|skos:broader|Linked Data -Karen Blixen|skos:broader|Ecrivain -Imprimantes|skos:broader|Devices -One-Shot Learning|skos:broader|Machine learning -Pablo Neruda|skos:broader|Prix Nobel -Pluton|skos:broader|Système solaire -The web sucks|skos:broader|Web -Semantic web: evangelization|skos:broader|Semantic Web -Messenger|skos:broader|Missions spatiales -Solr and NLP|skos:broader|Solr -Ian Hickson|skos:broader|Technical guys -Jeremy Howard|skos:broader|NLP girls and guys -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Zhe Zhao -Backtranslation|skos:broader|Data Augmentation -Stacking (ensemble learning)|skos:broader|Ensemble learning -Aster Aweke|skos:broader|Ethiopie -Solr (not english only)|skos:broader|Solr -fps: paper|skos:broader|fps -Arundhati Roy|skos:broader|Inde -Online Course Materials|skos:broader|Online Learning -C2G|skos:broader|Configuration -SOAP vs REST|skos:broader|SOAP -Javascript RDF Parser in IE|skos:broader|Internet Explorer -Astronomie multi-signaux|skos:broader|Astronomie -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:arxiv_author|Ilya Kostrikov -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:arxiv_firstAuthor|Alexandre Passos -Equivalence mining|skos:broader|Linking Open Data -GINCO (Culture)|skos:broader|Thesaurus -Relations franco-américaines|skos:broader|France -Caetano Veloso|skos:broader|Musique brésilienne -Obamacare|skos:broader|Obama -Grèce antique|skos:broader|Grèce -Leningrad|skos:broader|Ville -HTTP Cache|skos:broader|HTTP -Bijan Parsia|skos:broader|SW guys (and girls) -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:tag|tag:text_multi_label_classification -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:tag|tag:entity_linking -Sebastian Germesin|skos:broader|SW guys (and girls) -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:arxiv_firstAuthor|Ning Xie -Zapata|skos:broader|Personnage historique -Shallow parsing (Chunking)|skos:broader|General NLP tasks -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:ludovic_denoyer -Google Rich Cards|skos:broader|Google -Javascript closures|skos:broader|Function closures -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Hong Wang -Synchrotron|skos:broader|Physique -Damian Steer|skos:broader|SW guys (and girls) -iPod|skos:broader|Devices -Maïs OGM|skos:broader|Maïs -NLP + Sem web|skos:broader|Semantic Web -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:kd_mkb_biblio -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:tag|tag:conditional_random_field -Memory-prediction framework|skos:broader|Machine learning -Combining numerical and text features|skos:broader|Combining text and structured data (ML-NLP) -Contrastive Self-Supervised Learning|skos:broader|Representation learning -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:tag|tag:these_irit_renault_biblio_initiale -NLP Teams|skos:broader|AI teams -Quora Question Pairs|skos:broader|NLP datasets -Cohn-Bendit|skos:broader|Politique française -Google: SEO|skos:broader|Google -Zouk|skos:broader|Antilles -Linked Data Platform|skos:broader|W3C Working group -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Philip S. Yu -Chris Manning|skos:broader|NLP girls and guys -Porc|skos:broader|Animal -Knowledge Graphs and NLP|skos:broader|NLP -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:tag|tag:doc2vec -Entity recommendation|skos:broader|Entities -Convolutional Knowledge Graph Embeddings|skos:broader|Convolutional neural network -RDF in files|skos:broader|Semanlink related -Chine / Afrique|skos:broader|Afrique -Script tag hack|skos:broader|cross-domain data fetching -Sequence-To-Sequence Encoder-Decoder Architecture|skos:broader|Encoder-Decoder architecture -fps: paper|skos:broader|fps pres -Configuration and SW|skos:broader|Configuration -Paléontologie|skos:broader|Géologie -GAN|skos:broader|Deep latent variable models -Sarkozy : immigration|skos:broader|Immigration -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_author|Luca Costabello -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:tag|tag:cross_lingual_nlp -Nodalities|skos:broader|Talis -Verts|skos:broader|Écologie -SPARQL perfs|skos:broader|SPARQL -The DAO|skos:broader|Ethereum -Décroissance|skos:broader|Croissance -RDFj is a set of conventions forbr/- constructing JSON objects in such a way that they can easily be interpreted as RDF;br/ - taking RDF and arriving at canonical JSON objects.|skos:broader|A JavaScript library that provides cross-browser XForms, RDFa, and SMIL support. -Finlande|skos:broader|Scandinavie -Maven tips|skos:broader|Maven -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:multi_task_learning -Global workspace theory|skos:broader|Neuroscience -Jsonld-java|skos:broader|JSON-LD -Spiking Neural Networks SNN|skos:broader|ANN NN Artificial neural network -RDF performance issues|skos:broader|RDF dev -D2RQ|skos:broader|Richard Cyganiak -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_author|David Charte -Tribunal Pénal International|skos:broader|Crime contre l'Humanité -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:arxiv_author|Timothy M. Hospedales -Embeddings in IR|skos:broader|IR -Design pattern|skos:broader|Informatique -Atoll|skos:broader|Océan -Graph Embeddings|skos:broader|Graphs+Machine Learning -Cortical.io|skos:broader|Semantic folding -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:tag|tag:nlp_short_texts -NLP: negation|skos:broader|NLP tasks / problems -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Songtai Dai -Méroé|skos:broader|Nubie -Origines de l'homme|skos:broader|Préhistoire -AI girls and guys|skos:broader|Artificial Intelligence -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:tag|tag:arxiv_doc -Amérique du sud|skos:broader|Amérique latine -Microsoft Concept Graph|skos:broader|Knowledge Graphs -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Claudio Gutierrez -Cinéma brésilien|skos:broader|Cinéma -SPARQL: shortcomings|skos:broader|SPARQL -Apprendre une langue|skos:broader|Education -Platonov|skos:broader|Littérature russe -War|skos:broader|Horreur -Enterprise Content Management|skos:broader|GED -Pythagore|skos:broader|Grèce antique -Semantic Networks|skos:broader|Semantic Web -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_author|Matteo Palmonari -Good Practice When Generating URIs|skos:broader|Minting URIs -Périclès|skos:broader|Chef d'état -Nicolas Hulot|skos:broader|Écologie -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Jihyeok Kim -Europeana|skos:broader|Cultural heritage -Axoum|skos:broader|Ethiopie -OWL: Introduction|skos:broader|OWL -Rules language|skos:broader|Rules -Mami Wata|skos:broader|Mythologie -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:reasoning -Car Options Ontology|skos:broader|Automotive ontologies -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_firstAuthor|Mostafa Dehghani -C2GWeb RDF|skos:broader|C2GWeb -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_firstAuthor|Kevin Clark -OS|skos:broader|Software -Memory Embeddings|skos:broader|Memory in deep learning -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:tag|tag:nlp_pretraining -intent classification intent detection|skos:broader|Chatbot -Russie|skos:broader|Asie -Multilingual embeddings|skos:broader|Cross-lingual NLP -Zika|skos:broader|Virus -Semantic CMS|skos:broader|Semantic Web : Application -Digital economy|skos:broader|Economie -Graph database and NLP|skos:broader|NLP techniques -Similarity queries|skos:broader|Recommender Systems -Origines du sida|skos:broader|Sida -Autoencoder|skos:broader|Dimensionality reduction -HTML Dev|skos:broader|HTML -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:arxiv_author|Lei Jimmy Ba -Semantic Web / Web 2.0|skos:broader|Web 2.0 -httpRange-14|skos:broader|Information resources -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:tag|tag:arxiv_doc -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:arxiv_author|Piotr Bojanowski -LOD mailing list|skos:broader|Linking Open Data -Nova Spivak|skos:broader|Technical girls and guys -Emmanuel Ledinot|skos:broader|Technical girls and guys -dbpedia|skos:broader|Linking Open Data -Encyclopedia of Life|skos:broader|Encyclopédie collaborative -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:memory_in_deep_learning -RDF Access to Relational Databases|skos:broader|Relational Databases and the Semantic Web -Vénus préhistoriques|skos:broader|Vénus (divinité) -evilstreak/markdown-js|skos:broader|GitHub project -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:arxiv_author|Inderjit S. Dhillon -Multi-label Text classification|skos:broader|Multi-label classification -Constitution européenne|skos:broader|Institutions européennes -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:huggingface_transformers -RDF and statistics|skos:broader|Web sémantique sw -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:arxiv_author|Xiaoran Xu -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:arxiv_firstAuthor|Victor Y. Pan -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_author|Nesreen K. Ahmed -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:neuroscience_and_machine_learning -LDOW2008|skos:broader|Linked Data -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Xipeng Qiu -Tortures américaines|skos:broader|Torture -del.icio.us|skos:broader|Tagging -Lost City|skos:broader|Extrémophiles -Internet tool|skos:broader|Internet -Solid|skos:broader|Privacy and internet -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|sl:arxiv_firstAuthor|Francisco De Sousa Webber -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:tag|tag:entity_linking -RDF and database|skos:broader|Web sémantique sw -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Zhilin Yang -Kapuscinski|skos:broader|Pologne -Pour les nuls|skos:broader|Howto, tutorial, FAQ -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Emily Pitler -schema.org|skos:broader|Web of data -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:arxiv_author|Florent Perronnin -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:arxiv_author|Evgeniy Gabrilovich -Windows|skos:broader|Microsoft -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:arxiv_doc -OpenAI GPT|skos:broader|OpenAI -Pillage du palais d'été|skos:broader|Guerres coloniales -Terrorisme islamiste|skos:broader|Terrorisme -Hypothèse de Riemann|skos:broader|Nombres premiers -Jean-Claude Ameisen|skos:broader|Darwin -Google: SEO|skos:broader|SEO -Technique of analyzing relationships between a set of documents and the terms they contain, by producing a set of concepts related to the documents and terms. LSA assumes that words that are close in meaning will occur in similar pieces of text. LSI transforms documents from either bag-of-words or (preferrably) TfIdf-weighted space into a latent space of a lower dimensionality. A matrix containing word counts (in lines) per paragraph (column) is constructed from a large piece of text. [Singular value decomposition (SVD)](singular_value_decomposition) is used to reduce the number of rows while preserving the similarity structure among columns. Similarities between words and/or docs can then be evaluated using cosine-distance in the low-dimensional space - pros: - alleviate the problem of synonymy (note: wikipedia se contredit en ce qui concerne la polysémie. Je dirais que LSI ne peut pas régler ce pb) - can output topics in a ranked order. - cons: - requires a num_topics parameter. - dimensions have no easily interpretable meaning in natural language - SVD is computation intensive (still a pb with improved algos?) - wikipedia says that the probabilistic model of LSA does not match observed data: LSA assumes that words and documents form a joint Gaussian model (ergodic hypothesis), while a Poisson distribution has been observed. Thus, a newer alternative is probabilistic latent semantic analysis, based on a multinomial model, which is reported to give better results than standard LSA [Gensim tuto about transformations](https://markroxor.github.io/gensim/static/notebooks/Topics_and_Transformations.html) says that LSI training is unique in that it can continue at any point, simply by providing more training documents. (LSI or LSA ? Truncated SVD applied to document similarity is called Latent Semantic Indexing (LSI), but it is called Latent Semantic Analysis (LSA) when applied to word similarity.) 4 ways of looking at the Truncated SVD ([cf.](http://www.jair.org/media/2934/live-2934-4846-jair.pdf)) : - Latent meaning: the truncated SVD creates a low-dimensional linear mapping between words in row space and context in columns which captures the hidden (latent) meaning in the words and contexts - Noise reduction: the truncated SVD can be seen as a smoothed version of the original matrix ( which captures the signal and leaves out the noise) - A way to discover high-order co-occurrence: when 2 words appear in similar context - Sparsity reduction: the origin matrix is sparse, but the truncated SVD is dense. Sparsity may be viewed as a problem of insufficient data and truncated SVD as a way of simulating the missing text [See also Introduction to Information Retrieval Manning 2008](https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html) |skos:broader|Methods for quantifying and categorizing semantic similarities between linguistic items based on their distributional properties in large samples of language data. Basic idea: the Distributional hypothesis: linguistic items with similar distributions have similar meanings. Basic approach: collect distributional information in high-dimensional vectors, and define similarity in terms of vector similarity Models: latent semantic analysis (LSA), Hyperspace Analogue to Language (HAL), syntax- or dependency-based models, random indexing, semantic folding and various variants of the topic model. -ML: conditioning|skos:broader|Machine learning: problems -GloVe|skos:broader|NLP@Stanford -Immigration|skos:broader|Grands problèmes -Folksonomies vs ontologies|skos:broader|Ontologies -Jena dev|skos:broader|Jena -Attention in Graphs|skos:broader|Graphs+Machine Learning -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:arxiv_firstAuthor|Bhuwan Dhingra -Semantic Enterprise Architecture|skos:broader|Semantic Enterprise -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:arxiv_firstAuthor|Jeremy Howard -Guillaume Genthial|skos:broader|NLP girls and guys -Tombe d'amphipolis|skos:broader|Découverte archéologique -Niger : agriculture|skos:broader|Niger -Andrew Ng|skos:broader|AI girls and guys -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:arxiv_doc -Pauli|skos:broader|Mécanique quantique -David Peterson|skos:broader|Australie -OAT|skos:broader|XMLHttpRequest -Mandelbrot|skos:broader|Mathématicien -Web 3.0|skos:broader|Semantic Web -Littérature africaine|skos:broader|Littérature -CJNN|skos:broader|Souvenirs -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:tag|tag:loosely_formatted_text -Lucilie bouchère|skos:broader|Insecte -Zero-shot Entity Linking|skos:broader|Zero-Shot Learning -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_author|Wayne Xin Zhao -14 juillet|skos:broader|Fête nationale -OPML|skos:broader|Outliner -Mission \Voulet-Chanoine\|skos:broader|Histoire du Niger -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|sl:arxiv_author|Bharath Hariharan -Entities to topics|skos:broader|Knowledge Graphs -Machine Learning Course|skos:broader|Online Course Materials -NLP Groups|skos:broader|Natural Language Processing -Périclès|skos:broader|Athènes -Présidentielles 2007|skos:broader|Politique française -Amsterdam|skos:broader|Pays-Bas -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:tag|tag:sebastian_ruder -A La Carte Embedding|skos:broader|Word embeddings -Sören Auer|skos:broader|Technical girls and guys -Ontoprise|skos:broader|Semantic web company -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:word2vec -Sense embeddings|skos:broader|Embeddings in NLP -AI, robots and jobs|skos:broader|Robotisation -A suivre|skos:broader|Todo list -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Eduard Hovy -NLP: current state|skos:broader|NLP -LDP: implementations|skos:broader|Linked Data Platform -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:nlp_google -2eme guerre mondiale|skos:broader|Histoire -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:nlp_facebook -fps AND LDOW2008|skos:broader|fps and WWW 2008 -JavaScript librairies|skos:broader|Library (code) -Egypte antique|skos:broader|Archéologie africaine -GoodRelations|skos:broader|Ontologies -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:tag|tag:graph_neural_networks -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:tag|tag:kd_mkb_biblio -Spellchecker|skos:broader|NLP tasks / problems -Etat policier|skos:broader|Ca craint -GNU Octave|skos:broader|GNU -Spiking Neural Network|skos:broader|Neural networks -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:arxiv_firstAuthor|Sebastian Ruder -Conjecture de Poincaré|skos:broader|Poincaré -Angela Merkel|skos:broader|Allemagne -Same architecture as autoencoder, but make strong assumptions concerning the distribution of latent variables. They use variational approach for latent representation learning (\Stochastic Gradient Variational Bayes\ (SGVB) training algorithm)|skos:broader|ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). -Twine|skos:broader|Nova Spivak -Content negotiation|skos:broader|HTTP -Approximate nearest-neighbor|skos:broader|Nearest neighbor search -How do we do node embeddings? ([source](http://snap.stanford.edu/proj/embeddings-www/index.html#materials)) Intuition: Find embedding of nodes so that “similar” nodes in the graph have embeddings that are close together. 1. Define an encoder (i.e., a mapping from nodes to embeddings) - Shallow embedding (simplest encoding approach): encoder is just an embedding-lookup. Ex: [node2vec](/tag/node2vec), DeepWalk, LINE 2. Define a node similarity function, eg. nodes are similar if: - they are connected? - they share neighbours? - have structural similar roles? - ... 3. Optimize the parameters of the encoder so that similarity in the embedding space (e.g., dot product) approximates similarity in the original network Defining similarity: - Adjacency-based Similarity - Multihop similarity (measure overlap between node neighborhoods) these two methods are expensive. - Random-walk Embeddings (Estimate probability of visiting node v on a random walk starting from node u using some random walk strategy, optimize embeddings to encode random walk statistics). Expressivity (incorporates both local and higher-order neighbourhood information) and efficiency (do not need to consider all pairs when training) Which random walk strategy? - fixed-length random walks starting from each node: DeepWalk (Perozzi et al., 2013) - biased random walks that can trade off between local and global views of the network: Node2Vec (Micro-view / marco-view of neighbourhood) No method wins in all the cases |skos:broader|Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. -Boulgakov|skos:broader|Ukraine -Hepp's PropertyValue|skos:broader|Product description -AI cloud service|skos:broader|Cloud -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |skos:broader|Class of algorithms for pattern analysis (eg. SVM). Kernel trick: transforming data into another dimension that has a clear dividing margin between classes of data, without computing the coordinates of the data in that space, but the inner products between the images of all pairs of data in the feature space (using a user-defined similarity function, the kernel function) Kernel methods are powerful learning methodologies that provide a simple way to construct nonlinear algorithms from linear ones. Despite their popularity, they suffer from poor scalability in big data scenarios ([src](https://arxiv.org/abs/1706.06296)). Kernel trick: Kernel functions enable to operate in a high-dimensional, implicit feature space without computing the coordinates of the data in that space, by simply computing the inner products between the images of all pairs of data in the feature space. Algorithms capable of operating with kernels include SVM, Gaussian processes,PCA, spectral clustering... Any linear model can be turned into a non-linear model by applying the kernel trick to the model: replacing its features (predictors) by a kernel function. -NLP@Facebook|skos:broader|Facebook -Belzoni|skos:broader|Explorateur -TensorFlow|skos:broader|Google Research -Javascript RDF Parser in IE|skos:broader|Tabulator -AI: books & journals|skos:broader|Livre -SL GUI|skos:broader|Semanlink -Archéologie du Niger|skos:broader|Archéologie africaine -Music of Africa|skos:broader|Afrique -Nigeria|skos:broader|Afrique de l'Ouest -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:arxiv_author|Raphael Tang -Combining Statistics and Semantics|skos:broader|Statistics -de Broglie|skos:broader|Mécanique quantique -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:tag|tag:knowledge_distillation -SearchMonkey|skos:broader|Semantic Web : Application -Common Tag|skos:broader|Semanlink related -Deep latent variable models|skos:broader|Latent variable model -Horizontal gene transfer|skos:broader|Génétique et Évolution -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:arxiv_author|Hyeonju Lee -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:tag|tag:arxiv_doc -Kindle|skos:broader|Amazon -Black hole|skos:broader|Gravity -Color naming|skos:broader|Language -High-frequency trading|skos:broader|Marchés financiers -Dynamic Semantic Publishing|skos:broader|Semantic Web Dev -Moussa Kaka|skos:broader|RFI -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:arxiv_author|Yoshiyasu Takefuji -TDB|skos:broader|RDF database -ELMo|skos:broader|Pre-Trained Language Models -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:arxiv_firstAuthor|Terence Parr -Fossile|skos:broader|Paléontologie -Train a model in an unsupervised way on a large amount of data, and then fine-tune it to achieve good performance on many different tasks|skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Concept Bottleneck Models|skos:broader|Explainable AI -Socrate|skos:broader|Grèce antique -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:arxiv_firstAuthor|Kevin Musgrave -Garbage Collector|skos:broader|Programming language -Probabilistic relevance model|skos:broader|Similarity queries -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_author|Furu Wei -Athlétisme|skos:broader|Sport -GBIF|skos:broader|Biodiversity data -Mission Villani sur l'IA|skos:broader|Cédric Villani -Arduino|skos:broader|Open Source -Tree embeddings|skos:broader|Embeddings -Immigration familiale|skos:broader|Immigration -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:tag|tag:semanlink2_related -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Ping Wang -Semences paysanes|skos:broader|Semencier -DistilBERT|skos:broader|Knowledge distillation -WordPress|skos:broader|Blog software -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Luke Zettlemoyer -Cambridge Analytica|skos:broader|Personal data -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_author|Fabio Petroni -Meetup Web Sémantique|skos:broader|Meetup -SémanticPédia|skos:broader|dbpedia francophone -Autoencoder|skos:broader|Neural networks -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|George Dahl -Peer to peer|skos:broader|Internet Related Technologies -ML technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. Allows the optimization of an arbitrary differentiable loss function. |skos:broader|ML ensemble meta-algorithm for primarily reducing bias, and also variance in supervised learning, and a family of machine learning algorithms that convert weak learners to strong ones. -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_author|Luke Zettlemoyer -Cocoon|skos:broader|XML -Pythagore|skos:broader|Géométrie -Industrie du disque|skos:broader|Content industries -Debug|skos:broader|Dev -Guéant|skos:broader|Gouvernement Sarkozy -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Gustav Grund Pihlgren -New Africa|skos:broader|NTIC et développement -Pauli|skos:broader|Scientifique -Ian Horrocks|skos:broader|SW guys (and girls) -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:tag|tag:top_k -Unsupervised keyword/keyphrase extraction algorithm. Creates a graph of the words and relationships between them from a document (using a sliding window), then identifies the most important vertices of the graph (words) based on importance scores calculated recursively from the entire graph. |skos:broader|“the automatic selection of important and topical phrases from the body of a document” (Turney, 2000) -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:tag|tag:kd_mkb_biblio -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:arxiv_author|Christian Jilek -Thrace|skos:broader|Bulgarie -IGN|skos:broader|Géographie -Google|skos:broader|Internet -Abstractions in AI|skos:broader|Artificial general intelligence -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:jeremy_howard -Neural Memory|skos:broader|Memory in deep learning -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:arxiv_firstAuthor|Jiaqi Mu -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Bhuwan Dhingra -Multiword Expressions|skos:broader|NLP tasks / problems -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:graph_embeddings -D2RQ|skos:broader|Chris Bizer -KGE KG embedding Knowledge graph embedding|skos:broader|Network embeddings Representation Learning on Networks Graph representation learning Network Representation Learning -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:tag|tag:deep_learning -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:arxiv_firstAuthor|Zeynep Akata -Alexandria|skos:broader|Egypte -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:tag|tag:active_learning -Capitalistes|skos:broader|Capitalisme -Unsupervised keyword/keyphrase extraction algorithm. Creates a graph of the words and relationships between them from a document (using a sliding window), then identifies the most important vertices of the graph (words) based on importance scores calculated recursively from the entire graph. |skos:broader|application of machine learning in the construction of ranking models. Training data consists of lists of items with some partial order specified between items in each list. -Roam|skos:broader|Semanlink related -Chinafrique|skos:broader|Africa -The task of identifying target entities of the same domain|skos:broader|= named entity disambiguation: the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base. -Google App Engine|skos:broader|Google -Chine-Europe|skos:broader|Chine -Amsterdam|skos:broader|Ville -Jermakoye|skos:broader|Jerma -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:arxiv_author|Zoubin Ghahramani -Type system|skos:broader|Programming language -Araméen|skos:broader|Langues -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:nlu -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Sumit Chopra -Global brain|skos:broader|Brain -APIs and Linked Data|skos:broader|Linked Data -Continent de plastique|skos:broader|Pollution des océans -Artificial Human Intelligence|skos:broader|Artificial Intelligence -ISP / Servlet Hosting|skos:broader|Servlet -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:yoshua_bengio -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:tag|tag:elmo -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:arxiv_author|James Philbin -Dev tools|skos:broader|Tools -Disco Hyperdata Browser|skos:broader|Linked Data -First Americans|skos:broader|Paléontologie humaine -Symmetric matrices related to the Mertens function In this paper we explore a family of congruences over N from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. In this paper we explore a family of congruences over $\\N^\\ast$ from which one builds a sequence of symmetric matrices related to the Mertens function. From the results of numerical experiments, we formulate a conjecture about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may come to play a more important role in this classical and difficult problem.|sl:arxiv_firstAuthor|Jean-Paul Cardinal -NLP+Automotive|skos:broader|NLP in enterprise -Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Data mining|skos:broader|Data science -Carl Lewis|skos:broader|Athlétisme -Jeni Tennison|skos:broader|SW guys (and girls) -Bacterial to Animal Gene Transfer|skos:broader|Horizontal gene transfer -Rosetta Project|skos:broader|Langues -Daphne Koller|skos:broader|Education -Semantic Web Dev|skos:broader|Dev -RIF|skos:broader|Rules -Intervention française au Mali|skos:broader|France / Afrique -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:tag|tag:arxiv_doc -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|sl:tag|tag:arxiv_doc -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_firstAuthor|Thomas Dean -ELMo|skos:broader|Word embeddings -Patent Landscaping|skos:broader|Patent -Lenka Zdeborová|skos:broader|Physicien -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Zhengyan Zhang -EMNLP 2018|skos:broader|J'y étais -Boulgakov|skos:broader|Littérature russe -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:arxiv_author|Timothy Niven -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|sl:tag|tag:word2vec -Entity discovery and linking|skos:broader|NLP tasks / problems -Cassini|skos:broader|Missions spatiales -Pékin|skos:broader|Chine -Deep Unsupervised Learning|skos:broader|Deep Learning -BNF|skos:broader|Bibliothèque -AI@Google|skos:broader|Google -Semantic Desktop|skos:broader|Semantic Web -Niger : pétrole|skos:broader|Pétrole -Vénus préhistoriques|skos:broader|Sculpture -Deep Learning|skos:broader|Machine learning: techniques -Bush|skos:broader|USA -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:tag|tag:arxiv_doc -Neural coding|skos:broader|Computational Neuroscience -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_firstAuthor|Haitian Sun -Siri|skos:broader|Speech-to-Text -Çatalhöyük|skos:broader|Néolithique -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Rahul Kuchhal -Configuration and SW|skos:broader|Semantic Web -Film indien|skos:broader|Inde -Orange (data mining)|skos:broader|Python 4 Data science -Parlement européen|skos:broader|Union européenne -RDF Thrift|skos:broader|RDF/binary -Freie Universität Berlin|skos:broader|Berlin -Système solaire|skos:broader|Astronomie -Triple Pattern Fragment|skos:broader|Linked Data Fragments -Nok|skos:broader|Art d'Afrique -John Sofakolle|skos:broader|Musicien -User Driven Modelling|skos:broader|Informatique -semblog|skos:broader|Jena -Javascript RDF Parser in IE|skos:broader|Compatibilité Javascript -AI Conference|skos:broader|Conférences -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:tag|tag:pixelwise_dense_prediction -RDFizers|skos:broader|RDF -SW guys (and girls)|skos:broader|Technical girls and guys -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_author|Pete Skomoroch -esa|skos:broader|Exploration spatiale -ML: Sequential data|skos:broader|Machine learning: problems -Hubble|skos:broader|Télescope -Prix Nobel d'économie|skos:broader|Economie -NLP@Stanford|skos:broader|NLP Teams -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:tag|tag:rotate -Crise des banlieues|skos:broader|Banlieue -NLP|skos:broader|Language -Lula|skos:broader|Brésil -HBase™|skos:broader|apache.org -Reproducible Research|skos:broader|Recherche -Graph-based Semi-Supervised Learning|skos:broader|Graphs+Machine Learning -Zouk|skos:broader|Musique -Miriam Makeba|skos:broader|Afrique du Sud -Attention is All You Need Transformer Transformers|skos:broader|Sequence Modeling Seq2Seq -Ben Adida|skos:broader|Technical girls and guys -Unsupervised machine learning|skos:broader|Machine learning -Pékin 2008|skos:broader|Jeux Olympiques -Histoire de l'Europe|skos:broader|Europe -Open Domain Question Answering|skos:broader|Question Answering -Yago|skos:broader|Semantic Web : Application -Consciousness Prior|skos:broader|Human Level AI -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Thomas S. Huang -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:tag|tag:arxiv_doc -Semanlink todo|skos:broader|Semanlink dev -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:arxiv_author|Stanislav Morozov -SGNN|skos:broader|On device NLP -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Stephen Mussmann -Multilingual embeddings|skos:broader|Embeddings -C2GWeb, Product description and Makolab|skos:broader|C2GWeb and Product description -Automotive Ontology Community Group|skos:broader|Automotive AND W3C -La France vue de l'étranger|skos:broader|France -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_author|Omer Levy -QuickTime|skos:broader|Apple -Solr|skos:broader|Text Search -Smart energy grids|skos:broader|Economies d'énergie -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_author|Quoc V. Le -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:global_workspace_theory -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:tag|tag:arxiv_doc -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|sl:tag|tag:arxiv_doc -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Sebastian Riedel -Macintosh|skos:broader|Apple -js|skos:broader|Web app dev -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:arxiv_author|Feiyu Gao -Climate crisis|skos:broader|Changement climatique -WS- vs. POX/HTTP|skos:broader|Web Services -Insecte|skos:broader|Animal -J'ai un petit problème avec mon ordinateur|skos:broader|J'ai un petit problème -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_firstAuthor|Hassan Ismail Fawaz -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:tag|tag:arxiv_doc -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:arxiv_author|Alp Kucukelbir -Politique française|skos:broader|France -Multitask Learning in NLP|skos:broader|Multi-task learning -W3C Data Activity|skos:broader|W3C -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:arxiv_author|Yazhe Li -Henri Bergius|skos:broader|SW guys (and girls) -Olivier Grisel|skos:broader|AI girls and guys -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Son N. Tran -Singe|skos:broader|Primate -La main à la pâte|skos:broader|Science -Google Web Toolkit|skos:broader|Dev -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:arxiv_author|Ellie Pavlick -Hydra|skos:broader|HATEOAS -Girafe|skos:broader|Animal -(LOV) Linked Open Vocabularies|skos:broader|Linked Data -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:embeddings -Euro Crisis|skos:broader|Euro -evilstreak/markdown-js|skos:broader|Markown / Javascript -classification decision based on the value of a linear combination of the feature values |skos:broader|the machine learning task of inferring a function from labeled training data. -Franco-Allemand|skos:broader|France -COO|skos:broader|Car ontology -Jena : Introduction|skos:broader|Introduction -Archéologie du Niger|skos:broader|Niger -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:tag|tag:yoshua_bengio -Banque mondiale|skos:broader|Institutions internationales -Fado tropical|skos:broader|Portugal -Howto|skos:broader|Howto, tutorial, FAQ -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Kihyuk Sohn -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:arxiv_author|Wei-Jen Ko -Rijksmuseum|skos:broader|Amsterdam -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:tag|tag:sebastian_ruder -NLP datasets|skos:broader|NLP -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:arxiv_author|Vincent W. Zheng -Diffa|skos:broader|Niger -Tasmanie|skos:broader|Australie -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:tag|tag:good -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:tag|tag:arxiv_doc -Yves Raymond|skos:broader|SW guys (and girls) -IPython|skos:broader|Python -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_author|Jaime G. Carbonell -DistilBERT|skos:broader|BERT -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_author|Mohammad Akbari -Antiracisme|skos:broader|Racisme -(LOV) Linked Open Vocabularies|skos:broader|Bernard Vatant -ACL|skos:broader|NLP conference -SW: coreferences|skos:broader|URI -Hepp's PropertyValue|skos:broader|Martin Hepp -Boucle ferroviaire d’Afrique de l’Ouest|skos:broader|New Africa -Intent detection|skos:broader|Chatbots -SPARQL Demo|skos:broader|SPARQL -Unsupervised machine translation|skos:broader|Unsupervised machine learning -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Haotang Deng -Web site design|skos:broader|Internet -Word Mover’s Distance|skos:broader|Using word embeddings -Biopiles|skos:broader|Energies renouvelables -Sebastian Ruder|skos:broader|NLP girls and guys -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|sl:tag|tag:arxiv_doc -Beijing Genomics Institute|skos:broader|Shenzhen -Sida|skos:broader|Immune system Système immunitaire -Fukushima|skos:broader|Catastrophe écologique -Federated SPARQL queries|skos:broader|SPARQL -Micropayments on the web|skos:broader|Payment -“one learning algorithm” hypothesis|skos:broader|Computational Neuroscience -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_firstAuthor|Jiaming Shen -Crète antique|skos:broader|Crète -QuickTime|skos:broader|Apple Software -Indo-européen|skos:broader|Antiquité -t-SNE|skos:broader|Dimensionality reduction -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Yujia Li -XML Schema|skos:broader|XML -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:tag|tag:sense_embeddings -Président des USA|skos:broader|USA -IBM|skos:broader|Informatique -GooglePlus|skos:broader|Social Networks -J'y étais|skos:broader|Souvenirs -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Chun-Liang Li -EMNLP 2019|skos:broader|NLP conference -Alexandre le Grand|skos:broader|Personnage historique -Information retrieval: techniques|skos:broader|Information retrieval -Semanlink2 related|skos:broader|Semanlink related -Chico Buarque|skos:broader|Brésil -Disparition des abeilles|skos:broader|Abeille -Tutorial|skos:broader|Howto, tutorial, FAQ -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:arxiv_author|Sandeep Subramanian -Hugh Glaser|skos:broader|SW guys (and girls) -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_firstAuthor|Wenhu Chen -Eurogroupe|skos:broader|Europe -L'Afrique à la Bastille - 13 juillet 2007|skos:broader|RFI -HADOPI|skos:broader|Loi sur le téléchargement -Carrot2|skos:broader|Clustering of text documents -SDMX|skos:broader|Statistical data -Film turc|skos:broader|Turquie -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:arxiv_author|Hamed Zamani -Peintre|skos:broader|Artiste -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_author|Thibault Févry -Constraint Programming|skos:broader|Logic -classification method that generalizes logistic regression to multiclass problems. Assumes that a linear combination of the observed features and some problem-specific parameters can be used to determine the probability of each particular outcome of the dependent variable. If you want to assign probabilities to an object being one of several different things, softmax is the thing to do. Even later on, when we train more sophisticated models, the final step will be a layer of softmax. [cf.](http://www.tensorflow.org/tutorials/mnist/beginners/index.md) |skos:broader|predicting a single label among mutually exclusive labels. -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|sl:tag|tag:arxiv_doc -Histoire des Jermas|skos:broader|Jerma -Knowledge Graphs + Text KG + NLP|skos:broader|Knowledge Graph KG -NASA|skos:broader|USA -JSP|skos:broader|Java -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:arxiv_author|Wan-Duo Kurt Ma -Roman|skos:broader|Livre -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:arxiv_author|Lajanugen Logeswaran -Language Models + Knowledge|skos:broader|Language model -Blosxom|skos:broader|Blog software -Julie Grollier|skos:broader|AI girls and guys -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:tag|tag:attention_knowledge_graphs -Gouvernement français|skos:broader|France -Craig Venter|skos:broader|Scientifique -Pierre de Volvic|skos:broader|Volcan -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:arxiv_firstAuthor|Nils Reimers -DeleteFB|skos:broader|The web sucks -Google Visualization API|skos:broader|Data Visualization Tools -Digital Video|skos:broader|Digital Media -Transformer + Knowledge Graphs|skos:broader|Graph + Transformer -Coreference resolution|skos:broader|NLP tasks / problems -OWL: Introduction|skos:broader|Introduction -URI Synonymity|skos:broader|Synonym URIs -Cross-Origin Resource Sharing|skos:broader|cross-domain data fetching -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:tag|tag:knowledge_graph_completion -Thesaurus & Taxonomies |skos:broader|Knowledge Representation -Manuscrits de Tombouctou|skos:broader|Tombouctou -Tagging|skos:broader|Semantic annotation -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Rob Malkin -Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach|skos:broader|a set of algorithms in machine learning that attempt to model high-level abstractions in data by using architectures composed of multiple non-linear transformations. Deep learning is part of a broader family of machine learning methods based on learning representations of data. One of the promises of deep learning is replacing handcrafted features with efficient algorithms for unsupervised or semi-supervised feature learning and hierarchical feature extraction With Deep Learning, Ng says, you just give the system a lot of data so it can discover by itself what some of the concepts in the world are ([cf.](http://www.wired.com/2013/05/neuro-artificial-intelligence/all/)) -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:arxiv_author|Ikuya Yamada -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Lukasz Kaiser -SGNN|skos:broader|NLP@Google -Raphaël Troncy|skos:broader|SW guys (and girls) -Semantic Web conferences|skos:broader|Conférences -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Jacob Andreas -dbpedia francophone|skos:broader|dbpedia -jersey|skos:broader|Java dev -Fichage génétique|skos:broader|Fichage -Semantic Web Client Library|skos:broader|Chris Bizer -Database to RDF mapping|skos:broader|Relational Databases and the Semantic Web -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:tag|tag:arxiv_doc -JavaScript librairies|skos:broader|JavaScript -Société française Société française|skos:broader|Société -Compagnies pétrolières|skos:broader|Exploitation pétrolière -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:arxiv_firstAuthor|Chien-Chun Ni -CNRS|skos:broader|Recherche française -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Michelle Lam -RDFa|skos:broader|RDF -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Aidan Hogan -XMLHttpRequest|skos:broader|js -BLINK|skos:broader|NLP@Facebook -Illusion d'optique|skos:broader|Divers -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:dans_deep_averaging_neural_networks -Décroissance|skos:broader|Critique du libéralisme -GoogleTechTalks|skos:broader|Google -Robert McLiam Wilson|skos:broader|Ecrivain -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:arxiv_author|Xin Huang -David Peterson|skos:broader|SW guys (and girls) -Cédric Villani|skos:broader|Mathématicien -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:tag|tag:knowledge_distillation -Tag Clusters|skos:broader|Tagging -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:tag|tag:machine_learned_ranking -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_author|Gaetano Rossiello -Apache web server|skos:broader|apache.org -Paris NLP meetup|skos:broader|Meetup -Hash URIs|skos:broader|httpRange-14 -Apple|skos:broader|Entreprise -OntoWiki|skos:broader|PHP -Tombouctou|skos:broader|Sahara -OWL tool|skos:broader|Semantic Web : Tools -Ténéré|skos:broader|Niger -Chatbots|skos:broader|NLP: use cases -Rwanda|skos:broader|Afrique -Chômage|skos:broader|Société -Talis RDF/JSON|skos:broader|Talis -Horreur économique|skos:broader|Critique du libéralisme -phpMyAdmin|skos:broader|MySQL -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:tag|tag:bayesian_deep_learning -Agriculture|skos:broader|Economie -Bulgarie|skos:broader|Europe -Chris Welty|skos:broader|Technical girls and guys -SQL to RDF mapping|skos:broader|RDF -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Ashish Vaswani -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_author|Shijie Wu -What is life ?|skos:broader|Origine de la vie -HP|skos:broader|Entreprise -Lynn Margulis|skos:broader|Femme célèbre (où qui mérite de l'être) -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:tag|tag:google_research -Spectral clustering|skos:broader|Statistics -Musique en ligne|skos:broader|NTIC -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:arxiv_author|Serge Belongie -Film français|skos:broader|Film -public-hydra@w3.org|skos:broader|Mailing list -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:information_theory_and_deep_learning -Mobile apps dev|skos:broader|Mobile apps -the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known.|skos:broader|the machine learning task of inferring a function from labeled training data. -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:yahoo -XML|skos:broader|Dev -NER|skos:broader|Sequence Tagging -Semantic Web Outliner|skos:broader|Semantic Web : Application -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:arxiv_author|Joshua B. Tenenbaum -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Vinaychandran Pondenkandath -Election|skos:broader|Société -SIF embeddings|skos:broader|Sentence Embeddings -Astronomie|skos:broader|Science -Database to RDF mapping|skos:broader|Converting data into RDF -Mobile phone|skos:broader|Mobile device -Leopard|skos:broader|Mac OS X -GNU Octave|skos:broader|Open Source -TEMIS|skos:broader|French Semantic web company -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:tag|tag:arxiv_doc -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:arxiv_author|Mohammad Taher Pilehvar -DSSM (Deep Semantic Similarity Model)|skos:broader|Similarity queries -Plante|skos:broader|Botanique -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:tag|tag:categorical_variables -Android|skos:broader|Google -Combining text and structured data (ML-NLP)|skos:broader|Features (Machine Learning) -RDF2h|skos:broader|Linked Data Browser -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Axel-Cyrille Ngonga Ngomo -fps@EC-Web'14|skos:broader|EC-Web'14 -535|skos:broader|Accident climatique -SDB: A SPARQL Database for Jena|skos:broader|SPARQL AND Jena -SIF embeddings|skos:broader|Sanjeev Arora -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:tag|tag:survey -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:arxiv_firstAuthor|Shuang Chen -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:arxiv_author|John Miller -Une suite de matrices symétriques en rapport avec la fonction de Mertens we explore a class of equivalence relations over N from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. In this paper we explore a class of equivalence relations over $\\N^\\ast$ from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem.|sl:arxiv_firstAuthor|Jean-Paul Cardinal -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:arxiv_doc -Scandale des écoutes en Allemagne|skos:broader|Allemagne -Semanlink dev|skos:broader|Semanlink -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:emnlp_2018 -Boulgakov|skos:broader|Ecrivain -Génocide|skos:broader|Horreur -Ford|skos:broader|Automobile -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:arxiv_firstAuthor|Hsiang-Fu Yu -Liberté de pensée|skos:broader|Liberté -Ethereum|skos:broader|Virtual currency -Intent detection|skos:broader|Search Engines -Patent Infringement|skos:broader|Patent -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:arxiv_firstAuthor|Kevin Clark -De-extinction|skos:broader|Clonage -Hello World|skos:broader|Sample code -Plantu|skos:broader|Humour -Martynas Jusevicius|skos:broader|SW guys (and girls) -Lesk algorithm|skos:broader|Algorithmes -\Better entity LINKing\, @facebookai open-source entity linker. [GitHub](https://github.com/facebookresearch/BLINK)|skos:broader|Entity linking with Wikipedia as the target knowledge base -Back Propagation|skos:broader|ANN NN Artificial neural network -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:arxiv_author|Christopher D. Manning -Peace Corps|skos:broader|Coopération -Aster Aweke|skos:broader|Music of Africa -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_author|Qingheng Zhang State Key Laboratory for Novel Software Technology, Nanjing University -Equivalence mining|skos:broader|Linked Data -C2GWeb, Product description and Makolab|skos:broader|Product description -Three-way decisions|skos:broader|Statistical classification -Apache Marmotta|skos:broader|apache.org -Afrique|skos:broader|Géographie -Chinese|skos:broader|China -Memory-prediction framework|skos:broader|Neuroscience -Markown / Javascript|skos:broader|Markdown -Bigtable|skos:broader|Big Data -Soudan|skos:broader|Afrique de l'Est -PBS program|skos:broader|PBS -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:arxiv_author|Timothy P. Lillicrap -data.gouv.fr|skos:broader|Government data -Parthe|skos:broader|Mésopotamie -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|sl:tag|tag:cluster_analysis -Andrej Karpathy|skos:broader|AI girls and guys -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_author|Danilo Jimenez Rezende -Décisions en entreprise|skos:broader|Management -Fermi's paradox|skos:broader|Astronomie -Cooperative Knowledge Distillation for Representation Learning Across Multiple Knowledge Bases [GitHub](doc:2020/07/raphaelsty_kdmkb)|skos:broader| Apprentissage profond pour l’accès aux textes et bases de connaissances Apprentissage de représentations d'informations sémantiques, adaptées au Traitement du Langage Naturel et à la Recherche d'Information, à partir de textes et de bases de connaissances formelles du domaine automobile -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:arxiv_author|Elena Demidova -RDF-in-JSON|skos:broader|JSON -Exploration marsienne|skos:broader|Missions spatiales -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:tag|tag:intent_classification_and_slot_filling -SW online tools|skos:broader|Semantic Web : Tools -One-Shot Learning|skos:broader|Few-shot learning -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:arxiv_author|Jeremy Howard -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Jaime Carbonell -Taiwan|skos:broader|Asie -Lee Sedol|skos:broader|Go (Game) -AI: books & journals|skos:broader|Artificial Intelligence -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_author|Dhanush Bekal -Inde moderne|skos:broader|Inde -Criquet|skos:broader|Insecte -Google Hummingbird|skos:broader|Google -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:arxiv_author|Gabrielle Ras -Film italien|skos:broader|Film -Recommended reading |skos:broader|Livre à lire -Flippant|skos:broader|Ca craint -Rare events|skos:broader|Machine learning: problems -Hydrogen Cars|skos:broader|Hydrogen economy -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:arxiv_author|Diane Bouchacourt -Separation of man and ape|skos:broader|Origines de l'homme -OKKAM|skos:broader|Semantic Web : Application -FAQ|skos:broader|Howto, tutorial, FAQ -Diacritics in URI|skos:broader|URI -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:ai_knowledge_bases -RDF Schema inferencing|skos:broader|Inference -Courtadon|skos:broader|Pierre de Volvic -FN|skos:broader|Extrème droite -NG4J|skos:broader|Named Graphs -Stanford NER|skos:broader|Conditional random fields -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:nearest_neighbor_search -Neuroscience|skos:broader|Brain -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:arxiv_doc -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:arxiv_author|William W. Cohen -Diplomatie américaine|skos:broader|Diplomatie -Test ADN de filiation|skos:broader|ADN -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Xin Jiang -Liberté de la presse|skos:broader|Liberté -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Rakshit Trivedi -Mladic|skos:broader|Serbie -Poker|skos:broader|Jeux -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_author|Piotr Indyk -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:tag|tag:graph_convolutional_networks -Apache web server|skos:broader|Open Source -RDF2RDFa|skos:broader|RDFa -Online Course Materials|skos:broader|MOOC -Mathieu d'Aquin|skos:broader|SW guys (and girls) -Clustering|skos:broader|Machine learning: problems -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:neural_machine_translation -Antiwork|skos:broader|Travail -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:arxiv_author|Vage Egiazarian -Belgique|skos:broader|Pays d'Europe -LODr|skos:broader|Semanlink related -Jeopardy|skos:broader|Jeux -Gina Lollobrigida|skos:broader|Actrice -Feature selection|skos:broader|Features (Machine Learning) -general implementation of (arbitrary order) linear chain Conditional Random Field (CRF) sequence models |skos:broader|Class of statistical modelling method for structured prediction ( prediction of structured objects, rather than scalar) that can take context into account; e.g., in NLP, the linear chain CRF predicts sequences of labels for sequences of input samples (take as input a set of features for each token in a sentence, and learn to predict an optimal sequence of labels for the full sentence) Applications in POS Tagging, shallow parsing, named entity recognition, gene finding -QuickTime|skos:broader|Media Player -Ranking SVM|skos:broader|Learning to rank -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:tag|tag:word2vec -k-nearest neighbors algorithm|skos:broader|Algorithmes -Semantic Statistics|skos:broader|RDF -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Yun-Hsuan Sung -Wikilinks Corpus|skos:broader|Google Research -Neural networks|skos:broader|Artificial Intelligence -SPARQL AND Jena|skos:broader|Jena -Distant reading|skos:broader|Livre -Semantic Web : Portal|skos:broader|Semantic Web -Archéologie chinoise|skos:broader|Chine -Genetically Engineered Micro and Nanodevices|skos:broader|DNA nanotechnology -Credit default swap|skos:broader|Marchés financiers -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:tag|tag:guillaume_lample -Semantic Web Products|skos:broader|Semantic Web -Multimedia|skos:broader|Technologie -Bijan Parsia|skos:broader|Technical girls and guys -Steve Jobs|skos:broader|Homme célèbre -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:nn_symbolic_ai_hybridation -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:knowledge_augmented_language_models -Semantic Web : UI|skos:broader|UI -Driverless car|skos:broader|Automobile -TripleStore|skos:broader|RDF -Consciousness Prior|skos:broader|Representation learning -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Hassan Ismail Fawaz -IKS Workshop Salzburg 2012|skos:broader|Workshop -Duck Typing|skos:broader|Type system -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_author|Federico Bianchi -Messenger|skos:broader|NASA -Disque à retrouver|skos:broader|Musique -LODr|skos:broader|Linked Data -RDF editor|skos:broader|RDF Tools -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_author|German I. Parisi -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Peter W. Battaglia -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_author|Nikunj Saunshi -Le Pen|skos:broader|Immigration -Sustainable materials lifecycle|skos:broader|Économie écologique -ElasticSearch: annotated text field|skos:broader|ElasticSearch -Insecticide|skos:broader|Insecte -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:arxiv_author|Ming-Wei Chang -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:arxiv_author|Dan Svenstrup -fast.ai|skos:broader|Online Course Materials -Graph database|skos:broader|Database -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:arxiv_firstAuthor|Pat Verga -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:arxiv_author|Rami Al-Rfou -Jardinage|skos:broader|Divers -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:tag|tag:arxiv_doc -Recherche française|skos:broader|Recherche -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:arxiv_firstAuthor|Seokkyu Choi -Javascript RDF|skos:broader|JavaScript -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:arxiv_firstAuthor|Geoffrey Hinton -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:arxiv_author|Xiang Lisa Li -Ondes gravitationnelles|skos:broader|Gravitation -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:arxiv_author|Zhiguo Wang -Etudes scientifiques|skos:broader|Enseignement -Chine : technologie|skos:broader|Chine -Ex URSS URSS|skos:broader|Europe -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:tag|tag:regex -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Sujay K. Jauhar -Blogs Le Monde|skos:broader|Blog -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_author|Minh-Thang Luong -NLP: Français|skos:broader|Natural Language Processing -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:arxiv_author|Patrick H. Chen -Peinture rupestre|skos:broader|Rockart -Beijing Genomics Institute|skos:broader|Clonage -WSDL|skos:broader|Web Services -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_firstAuthor|Daniel Cer -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_author|Jiateng Xie -Tandja|skos:broader|Niger -Lexical Resource Text corpus Text corpora|skos:broader|Natural Language Processing -Java 8 lambdas|skos:broader|Java 8 -XTech 2007|skos:broader|XTech -Néolithique|skos:broader|Archéologie -Juan Sequeda|skos:broader|SW guys (and girls) -Aho–Corasick algorithm|skos:broader|String-searching algorithm -Stefan Zweig|skos:broader|Grand Homme -Tomas Mikolov|skos:broader|AI girls and guys -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Artur Garcez -Réparation automobile|skos:broader|Automobile -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:tag|tag:automl -Duck Typing|skos:broader|Programming -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:arxiv_author|Artem Babenko -BBC semantic publishing|skos:broader|semantic web sites -Logiciel libre|skos:broader|Open Source -Wikipedia page to concept|skos:broader|Wikipedia -Tomcat|skos:broader|apache.org -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:tag|tag:lstm_networks -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:deep_learning_attention -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:tag|tag:survey -RFID passports|skos:broader|Big Brother -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:arxiv_author|Hugo Cui -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Kuansan Wang -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Tian Tian -Lynda Tamine|skos:broader|NLP girls and guys -Variabilité du génome humain|skos:broader|Génétique humaine -Using word embeddings|skos:broader|Word embeddings -Hollywood|skos:broader|Content industries -Conjecture de Poincaré|skos:broader|Grands problèmes mathématiques -Machine learning: problems|skos:broader|Machine learning -Doc by Google|skos:broader|Google -Prix Nobel de physique|skos:broader|Physique -Pre-Trained Language Models|skos:broader|NLP techniques -Goldman Sachs|skos:broader|Finance -W3C Note|skos:broader|W3C -Nombres premiers|skos:broader|Mathématiques -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Luis Lamb -Internet en Afrique|skos:broader|Internet -Rosetta|skos:broader|Missions spatiales -Object Oriented Programming|skos:broader|Dev -Decision tree learning|skos:broader|Machine learning: techniques -Mutual information|skos:broader|Information theory -Government data|skos:broader|Public data -Pollueurs payeurs|skos:broader|Économie écologique -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_author|Sebastian Riedel -Scientologie|skos:broader|Manipulation -Fado tropical|skos:broader|Conquistadores -Emmanuelle Bernes|skos:broader|SW guys (and girls) -Sarraounia Mangou|skos:broader|Empire colonial français -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:tag|tag:statistical_relational_learning -Minimum wage|skos:broader|Bas salaires -Scientifique|skos:broader|Science -FastText|skos:broader|Text Classification -Axel Ngonga|skos:broader|SW guys (and girls) -Hypermedia driven web APIs.br/ The basic idea behind Hydra is to provide a bvocabulary which enables a server to advertise valid state transitions to a client/b. A client can then use this information to construct HTTP requests which modify the server’s state so that a certain desired goal is achieved. Since all the information about the valid state transitions is exchanged in a machine-processable way at runtime instead of being hardcoded into the client at design time, clients can be decoupled from the server and adapt to changes more easily.|skos:broader|Hypermedia as the Engine of Application State -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:multi_task_learning -Histoire de la Chine|skos:broader|Histoire -k-means clustering|skos:broader|Clustering -IMS VDEX|skos:broader|Thesaurus -LD|skos:broader|Web sémantique sw -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:arxiv_author|Xin-Yu Dai -Semantic Web Services vs SOAP|skos:broader|SOAP -Krakatoa|skos:broader|Indonésie -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|sl:arxiv_firstAuthor|Michael Fop -AI 4 IP|skos:broader|IA/ML: domaines d'application -Terrorisme islamiste|skos:broader|Guerres de religion -Océan indien|skos:broader|Océan -js|skos:broader|Langage de programmation -BRAIN Initiative|skos:broader|Neuroscience -Steve Cayzer|skos:broader|Technical girls and guys -Dynamic Semantic Publishing|skos:broader|Linked Data -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:facebook_fair -H5N1|skos:broader|Virus -Outlier Detection|skos:broader|Machine learning: problems -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Jessica B. Hamrick -Bart van Leeuwen|skos:broader|Firefighter -Relational inductive bias|skos:broader|Learning bias -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_author|Volker Tresp -Identification of similar documents|skos:broader|Text Similarity -Apes|skos:broader|Endangered Species -SIMILE|skos:broader|Semantic Web : Application -Cliqz|skos:broader|Search Engines -RapidMiner|skos:broader|Analyse sémantique -Semantic Web: CRM|skos:broader|Semantic Web -Laurent Lafforgue|skos:broader|Médaille Fields -Turquie|skos:broader|Asie -Jermakoye|skos:broader|Dosso -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:tag|tag:language_model -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:arxiv_author|Ivan Vulić -Text feature extraction|skos:broader|General NLP tasks -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Christos Faloutsos -Deep Unsupervised Learning|skos:broader|Unsupervised machine learning -Catastrophe naturelle|skos:broader|Catastrophe -NLP: Reading Comprehension|skos:broader|NLP tasks / problems -email classification|skos:broader|email -FBI v. Apple|skos:broader|Tim Cook -Zika|skos:broader|Moustique -China's Social Credit System|skos:broader|Reputation system -Cloud based LOD platform|skos:broader|Linked Data Platform -General NLP tasks|skos:broader|NLP tasks / problems -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:tag|tag:arxiv_doc -Syngenta|skos:broader|Biotech industry -Fichage génétique|skos:broader|Big Brother -TopQuadrant|skos:broader|Semantic web company -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:arxiv_author|Vineet Kumar -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:tag|tag:arxiv_doc -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:tag|tag:axel_polleres -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:arxiv_author|Deng Cai -Bayesian analysis|skos:broader|Artificial Intelligence -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:arxiv_firstAuthor|Hamed Zamani -the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. |skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Industrie nucléaire|skos:broader|Energie -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:tag|tag:conditional_random_field -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |skos:broader|Algebraic model for representing text documents as vectors of identifiers such as index terms.br/ Documents and queries are represented as vectors. Each dimension corresponds to a separate term. If a term occurs in the document, its value in the vector is non-zero. One way of computing the value: TD-IDF -CNN 4 NLP|skos:broader|Convolutional neural network -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_author|Dan Jurafsky -Socrate|skos:broader|Grand Homme -EC-Web'14|skos:broader|Munich -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:tag|tag:survey -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:bert -HTTP Cache|skos:broader|Cache -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_author|Yair Movshovitz-Attias -NLP@Microsoft|skos:broader|NLP Teams -Programming language|skos:broader|Dev -Sparse coding|skos:broader|Feature learning -Sommet de Copenhague|skos:broader|Négociations climat -Mai 68|skos:broader|Contestation -KBPedia|skos:broader|Knowledge-based AI -Knowledge Graph + Deep Learning|skos:broader|Knowledge Graphs -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Zhilin Yang -Google über alles|skos:broader|Google -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:tag|tag:arxiv_doc -Intent detection|skos:broader|NLP tasks / problems -Bart van Leeuwen|skos:broader|SW guys (and girls) -Distributional semantics|skos:broader|Analyse sémantique -TAP|skos:broader|Guha -gnowsis|skos:broader|Leo Sauermann -JUnit|skos:broader|Java dev -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:tag|tag:zero_shot_learning -BlackboxNLP (2018 workshop)|skos:broader|Blackbox NLP -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Chris Dyer -Neural machine translation|skos:broader|Machine translation -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_author|Farahnaz Akrami Department of Computer Science and Engineering, University of Texas at Arlington -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:tag|tag:information_theory_and_deep_learning -SWEO: Renault use case|skos:broader|SWEO Interest Group -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:arxiv_firstAuthor|Aseem Wadhwa -Semantic Web : Business|skos:broader|Semantic Web -CKAN|skos:broader|Open Source -Parrot|skos:broader|OWL tool -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:arxiv_author|William W. Cohen -Crise financière|skos:broader|Finance -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:arxiv_firstAuthor|Zhiguo Wang -Drupal|skos:broader|CMS -jsFiddle|skos:broader|Javascript tool -Afrique australe|skos:broader|Afrique -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Emily Reif -Hierarchical text classification|skos:broader|Text Classification -Berkeley|skos:broader|Universités américaines -Cameroun|skos:broader|Afrique -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:tag|tag:link_prediction -Woody Allen|skos:broader|Réalisateur -Lynn Margulis|skos:broader|Biology -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Andrew Ballard -Embedding evaluation|skos:broader|ML: evaluation -Computes embeddings for the vertices of unlabeled graphs. DeepWalk bridges the gap between network embeddings and word embeddings by treating nodes as words and generating short random walks as sentences. Then, neural language models such as Skip-gram can be applied on these random walks to obtain network embedding.|skos:broader|How do we do node embeddings? ([source](http://snap.stanford.edu/proj/embeddings-www/index.html#materials)) Intuition: Find embedding of nodes so that “similar” nodes in the graph have embeddings that are close together. 1. Define an encoder (i.e., a mapping from nodes to embeddings) - Shallow embedding (simplest encoding approach): encoder is just an embedding-lookup. Ex: [node2vec](/tag/node2vec), DeepWalk, LINE 2. Define a node similarity function, eg. nodes are similar if: - they are connected? - they share neighbours? - have structural similar roles? - ... 3. Optimize the parameters of the encoder so that similarity in the embedding space (e.g., dot product) approximates similarity in the original network Defining similarity: - Adjacency-based Similarity - Multihop similarity (measure overlap between node neighborhoods) these two methods are expensive. - Random-walk Embeddings (Estimate probability of visiting node v on a random walk starting from node u using some random walk strategy, optimize embeddings to encode random walk statistics). Expressivity (incorporates both local and higher-order neighbourhood information) and efficiency (do not need to consider all pairs when training) Which random walk strategy? - fixed-length random walks starting from each node: DeepWalk (Perozzi et al., 2013) - biased random walks that can trade off between local and global views of the network: Node2Vec (Micro-view / marco-view of neighbourhood) No method wins in all the cases -SL Feature Request|skos:broader|SL todo -Learning to rank|skos:broader|Supervised machine learning -Sequence Modeling: CNN vs RNN|skos:broader|Sequence-to-sequence learning -Uncertainty in Deep Learning|skos:broader|Accountable AI -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:consciousness_prior -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:tag|tag:arxiv_doc -Displaying XML with css|skos:broader|XML -Cloud and Linked Data|skos:broader|Linked Data -RDFj|skos:broader|RDF-in-JSON -Recherche française|skos:broader|France -Multimodal Models|skos:broader|Machine learning: problems -REST|skos:broader|Roy T. Fielding -Clerezza|skos:broader|apache.org -Python sample code|skos:broader|Python -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Charles Tapley Hoyt -Hypothèse de Riemann|skos:broader|Grands problèmes mathématiques -Slime mold|skos:broader|Curiosité naturelle -Read-Write Linked Data|skos:broader|Linked Data -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:tag|tag:knowledge -Libéralisme|skos:broader|Capitalisme -Alexandre Passant|skos:broader|SW guys (and girls) -de Broglie|skos:broader|Scientifique -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:tag|tag:alibaba -Shoira Otabekova|skos:broader|Musique -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Claudia d'Amato -ESWC 2019|skos:broader|ESWC -PFIA 2018|skos:broader|AI Conference -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|François Petitjean -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:arxiv_author|Liunian Harold Li -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:arxiv_author|Mark Rowan -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_author|Klaus-Robert Müller -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Peng Wang -Human Level AI|skos:broader|Artificial general intelligence -Syrie|skos:broader|Asie -Moral machines|skos:broader|Nous vivons une époque moderne -Kaguya|skos:broader|Japon -jersey|skos:broader|JAX-RS -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:tag|tag:embedding_evaluation -Bacterial to Animal Gene Transfer|skos:broader|Bacteria -Javascript RDF Parser|skos:broader|RDF Parser -Money|skos:broader|Finance -Antiquité iranienne|skos:broader|Antiquité -Genetics Génétique|skos:broader|Science -Jeux en ligne|skos:broader|Jeux -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_author|Hervé Jégou -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:tag|tag:arxiv_doc -Tag ontology|skos:broader|Semanlink related -Punk|skos:broader|Musique -Enterprise Knowledge Graph|skos:broader|Knowledge Graphs -Hip Hop|skos:broader|Musique -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:arxiv_firstAuthor|Victor Sanh -matplotlib|skos:broader|Python -iphone|skos:broader|Smartphone -Cambridge Analytica|skos:broader|Privacy and internet -Java 1.5 Mac OS X|skos:broader|Mac OS X -Compatibilité Javascript|skos:broader|JavaScript -Génétique + Histoire|skos:broader|Histoire -VSO|skos:broader|GoodRelations -Open Source|skos:broader|Software -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:overfitting -Google Visualization API|skos:broader|Google -Income inequality|skos:broader|Inégalités -Insect collapse|skos:broader|Insecte -Acteur|skos:broader|Cinéma -PyTorch|skos:broader|Deep Learning frameworks -Stefano Mazzocchi|skos:broader|Technical girls and guys -Tiger|skos:broader|Mac OS X -Stack Overflow|skos:broader|FAQ -Masse manquante|skos:broader|Physique -Alexandria Ocasio-Cortez|skos:broader|USA -Antarctique|skos:broader|Régions polaires -Hypercard|skos:broader|Software -Sicile|skos:broader|Italie -Conjecture de Goldbach|skos:broader|Nombres premiers -[Proceedings](https://aclanthology.coli.uni-saarland.de/events/ws-2018#W18-54) the introduction of neural networks has typically come at the cost of our understanding of the system: what are the representations and computations that the network learns? The goal of this workshop is to bring together people who are attempting to peek inside the neural network black box, taking inspiration from machine learning, psychology, linguistics and neuroscience.|skos:broader|Conference on Empirical Methods in Natural Language Processing. [Proceedings](https://aclanthology.coli.uni-saarland.de/events/emnlp-2018) -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:tag|tag:arxiv_doc -ISWC|skos:broader|Semantic Web conferences -Google Patents|skos:broader|AI 4 IP -Pétrole et corruption|skos:broader|Pétrole -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:mathematiques -Fossile vivant|skos:broader|Evolution -Struts|skos:broader|MVC -Sagesse du langage|skos:broader|Language -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:tag|tag:arxiv_doc -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Kevin Knight -Mac Mini|skos:broader|Macintosh -Ramanujan|skos:broader|Mathématicien -Accident climatique|skos:broader|Climat -Astrophysique|skos:broader|Physique -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:arxiv_author|Mohammad Taher Pilehvar -Eclipse project|skos:broader|fps dev -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|sl:arxiv_author|Elena Voita -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:minimum_description_length_principle -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|I. Zeki Yalniz -Private equity|skos:broader|Capitalisme financier -Propriété intellectuelle|skos:broader|Juridique -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:tag|tag:recurrent_neural_network -HTTP PATCH|skos:broader|HTTP -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:arxiv_author|Jonas Meinertz Hansen -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:arxiv_author|Peter Clark -Protection de l'environnement|skos:broader|Environnement -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:arxiv_firstAuthor|Vikash Singh -Time in RDF|skos:broader|Temps -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:arxiv_author|Ke Tran -Bob DuCharme|skos:broader|Technical girls and guys -AdaBoost|skos:broader|Boosting -Denisovan|skos:broader|Paléontologie humaine -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:tag|tag:deep_learning_attention -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Lifu Huang -in cosine similarity, the number of common attributes is divided by the total number of possible attributes. Whereas in Jaccard Similarity, the number of common attributes is divided by the number of attributes that exist in at least one of the two objects.|skos:broader|Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines. -DARPA Grand Challenge|skos:broader|Driverless car -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Hongyuan Zha -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Jan Rygl -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:tag|tag:rdf -Ethiopie|skos:broader|Afrique de l'Est -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_author|Guillaume Lample -Struts|skos:broader|JSP -Causal inference|skos:broader|Inference -Seyni Kountché|skos:broader|Histoire du Niger -Suède|skos:broader|Pays d'Europe -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:tag|tag:knowledge_graph_embeddings -Cyborg|skos:broader|Robotique -Robot humanoïde|skos:broader|Robotique -public-hydra@w3.org|skos:broader|Hydra -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:arxiv_author|Jeff Dean -Evolution|skos:broader|Science -Museum d'Histoire Naturelle|skos:broader|Musée -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:arxiv_author|Finn Årup Nielsen -Semanlink|skos:broader|Personal-information management -Musique|skos:broader|Art -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|G P Shrivatsa Bhargav -JsonLD + MongoDB|skos:broader|MongoDB -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|Will Grathwohl -JavaScript Tutorial|skos:broader|Tutorial -Aho–Corasick algorithm|skos:broader|Algorithmes -StarSpace|skos:broader|Embeddings -Concise Bounded Description|skos:broader|RDF graphs -Deep Learning frameworks|skos:broader|Machine Learning tool -W3C Data Activity|skos:broader|Semantic Web -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:tag|tag:arxiv_doc -Patricia Highsmith|skos:broader|Thriller -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Francis Song -Neurala: Lifelong-DNN|skos:broader|Continual Learning -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:tag|tag:nlp_short_texts -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:arxiv_author|Jeremy Howard -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:arxiv_firstAuthor|Dell Zhang -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_author|Denis Krompaß -Business Intelligence and Semantic Web|skos:broader|Semantic Web -Roy T. Fielding|skos:broader|REST -NLP event|skos:broader|NLP -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:tag|tag:vector_space_model -Rembrandt|skos:broader|Peintre -AdSense|skos:broader|Google -Francis Pisani|skos:broader|Technical girls and guys -Statistical classification|skos:broader|Supervised machine learning -Markets|skos:broader|Finance -Charlie Hebdo|skos:broader|Presse -log4j|skos:broader|Java dev -ANN: introduction|skos:broader|Neural networks -Apache Spark|skos:broader|Big Data -Monsanto|skos:broader|Semencier -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:arxiv_doc -backplanejs|skos:broader|RDFa -Anticolonialisme|skos:broader|Colonisation -Named Graphs|skos:broader|RDF -Einstein|skos:broader|Grand Homme -Chatbot|skos:broader|NLP: applications -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:transfer_learning -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:arxiv_author|Ljubomir Buturovic -Terrorisme|skos:broader|Grands problèmes -Brain vs Deep Learning|skos:broader|Brain -AI@Google|skos:broader|AI teams -Jean-Claude Juncker|skos:broader|Eurogroupe -W3C Recommendation|skos:broader|W3C -semblog|skos:broader|Steve Cayzer -FBI v. Apple|skos:broader|iphone -fps@EC-Web'14|skos:broader|schema.org -Ranking (information retrieval)|skos:broader|Information retrieval: techniques -Twine|skos:broader|Semantic Web : Application -Mali|skos:broader|Afrique de l'Ouest -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:arxiv_author|Ludovic Denoyer -nbdev.fast.ai|skos:broader|fast.ai -faiss|skos:broader|Facebook FAIR -Deep Learning: Optimization methods|skos:broader|Deep Learning -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:tag|tag:arxiv_doc -Yves Peirsman|skos:broader|NLP girls and guys -W3C Community Group|skos:broader|W3C -LDOW2008|skos:broader|Workshop -Calais|skos:broader|Linking Open Data -Word embeddings|skos:broader|NLP techniques -France / Afrique|skos:broader|France -Three Mile Island|skos:broader|Industrie nucléaire -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:arxiv_author|J. Zico Kolter -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|Kathryn Mazaitis -Ng|skos:broader|Technical guys -Neural-Symbolic Computing|skos:broader|NN / Symbolic AI hybridation -Stanford classifier|skos:broader|NLP@Stanford -Maidsafe|skos:broader|Peer to peer -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Ruslan Salakhutdinov -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|sl:tag|tag:out_of_distribution_detection -Memory leak|skos:broader|Mémoire (informatique) -Rosetta|skos:broader|esa -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_author|Yinchong Yang -Decentralized social network|skos:broader|Social Networks -RDF Tools|skos:broader|RDF -SKOS W3C document|skos:broader|W3C -Edvige|skos:broader|Fichage -RFID passports|skos:broader|Etat policier -Colza transgénique|skos:broader|Colza -Intervention française au Mali|skos:broader|Mali -Encyclopedia of Life|skos:broader|Biodiversity data -AllenNLP|skos:broader|NLP tools -Semantic web: training|skos:broader|Semantic Web -Link Prediction|skos:broader|Knowledge Graph Completion -Piratage des œuvres|skos:broader|Propriété intellectuelle -Information visualization|skos:broader|GUI -Google|skos:broader|Entreprise -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|sl:arxiv_author|Philip Resnik -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:tag|tag:three_way_decisions -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:arxiv_author|Yuan Luo -bash|skos:broader|Unix -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Alfio Gliozzo -Ford|skos:broader|Entreprise -Brad Pitt|skos:broader|Acteur -Tika|skos:broader|apache.org -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:tag|tag:arxiv_doc -Orange (data mining)|skos:broader|Data mining tools -Linked Learning|skos:broader|Linked Data -Technique de l'insecte stérile|skos:broader|Nous vivons une époque moderne -Championnat du monde d'athlétisme|skos:broader|Athlétisme -DuckDuckGo|skos:broader|Search Engines -Glue|skos:broader|Social Networks -Knowledge Graph + Deep Learning|skos:broader|Graph neural networks -Uranium|skos:broader|Matière première -LD-PATCH|skos:broader|LDP: updates -Coursera: Introduction to Data Science|skos:broader|Coursera -Cancer|skos:broader|Grands problèmes -Windows|skos:broader|OS -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:arxiv_author|Aditya Siddhant -Hittites|skos:broader|Anatolie -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:tag|tag:explainable_ai -Semanlink|skos:broader|Personal Knowledge Management -Attention + Knowledge Graphs|skos:broader|Knowledge Graph + Deep Learning -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|sl:arxiv_firstAuthor|Chandramouli Shama Sastry -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Sheng-yi Kong -Online Security|skos:broader|Cybersecurity Sécurité informatique -Drupal modules|skos:broader|Drupal -Nok|skos:broader|Nigeria -Artificial general intelligence|skos:broader|Artificial Intelligence -Wiki|skos:broader|Collaborative editing -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:arxiv_author|Roberto Silveira -AI + Knowledge Bases|skos:broader|Knowledge bases -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:arxiv_author|Victor Sanh -Chevènement|skos:broader|Homme politique -SL todo|skos:broader|Todo -Ensemble learning|skos:broader|Machine learning: techniques -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Alexander H. Miller -Amazon|skos:broader|Internet -Ultralingua|skos:broader|Online dictionary -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Peter Bailis -Musée de Niamey|skos:broader|Niamey -Death of Hyperlink|skos:broader|Hyperlinks -Savant|skos:broader|sciences -Naomi Klein|skos:broader|Critique de la société occidentale -Kullback–Leibler divergence|skos:broader|Information theory -Ecriture|skos:broader|Divers -Europeana|skos:broader|Bibliothèque numérique -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Roy Schwartz -Texaco|skos:broader|Compagnies pétrolières -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_firstAuthor|Tim Kraska -Danemark|skos:broader|Europe -LIME|skos:broader|Statistical classification -Turtle|skos:broader|RDF -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:tag|tag:word_embedding -JDD Apple|skos:broader|Apple -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:arxiv_firstAuthor|Simon Gottschalk -Semantic Web Client Library|skos:broader|SPARQL -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:tag|tag:contextualised_word_representations -Feynman|skos:broader|Scientifique -Eclipse project|skos:broader|Eclipse -Europeana|skos:broader|European project - general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multirelational graphs, and learning word, sentence or document level embeddings [Github](https://github.com/facebookresearch/starSpace) (seems to be the solution for [#Multi-Label classification](/tag/multi_label_classification) that [#FastText](/tag/fasttext) doesn't support very well) |skos:broader|The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity -Rada Mihalcea|skos:broader|NLP girls and guys -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:tag|tag:arxiv_doc -SOAP vs REST|skos:broader|REST -Disparition des abeilles|skos:broader|Catastrophe écologique -Tepuys|skos:broader|Venezuela -Configuration ontology|skos:broader|C2GWeb RDF -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:tag|tag:nn_symbolic_ai_hybridation -Nuclear war|skos:broader|War -Functional programming|skos:broader|Programming language -Stack Overflow|skos:broader|Dev tools -RDF Validator|skos:broader|Validator -Richard Stallman|skos:broader|Technical girls and guys -Vector space model|skos:broader|Information retrieval: techniques -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:tag|tag:arxiv_doc -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:tag|tag:arxiv_doc -Intelligence collective|skos:broader|Intelligence -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:arxiv_author|Kyle Richardson -open-source framework for gradient boosting (java, python, etc)|skos:broader|ML technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. Allows the optimization of an arbitrary differentiable loss function. -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Bo Xu -delicious api|skos:broader|API -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:arxiv_firstAuthor|Gary Marcus -Encyclopedia of Life|skos:broader|Biology -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|William W. Cohen -fpservant@slideshare|skos:broader|slides fps -Gilberto Gil|skos:broader|Musique brésilienne -RDF dev|skos:broader|RDF -SemWeb Pro|skos:broader|Conférences -Panthéon (Paris)|skos:broader|Paris -Consciousness Prior|skos:broader|NN / Symbolic AI hybridation -scikit-learn|skos:broader|Python 4 Data science -Dev tools|skos:broader|Dev -Brain-to-Brain Interface|skos:broader|Brain -Penseur|skos:broader|Divers -Euro 2016|skos:broader|Football -Jeux en ligne|skos:broader|Computer game -Antiquité|skos:broader|Histoire -Flair|skos:broader|NLP tools -TensorFlow|skos:broader|Deep Learning frameworks -Dremel|skos:broader|Google -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Ari Holtzman -Web Services|skos:broader|Dev -Google car|skos:broader|Google -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Ning Dai -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Allen Schmaltz -Minimum wage|skos:broader|Salaire -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_firstAuthor|Urvashi Khandelwal -REST Security|skos:broader|REST -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:taxonomies -ATerm|skos:broader|Dev -Cross-Entropy|skos:broader|Information theory -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:arxiv_author|Yifan Liu -C2GWeb and Product description|skos:broader|C2GWeb -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|sl:arxiv_author|Graham W. Taylor -k-means clustering|skos:broader|Unsupervised machine learning -RDF Framework|skos:broader|RDF -Hidden Markov model|skos:broader|Machine learning: techniques -New Horizons|skos:broader|Missions spatiales -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:arxiv_author|Angeliki Metallinou -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_author|Armand Joulin -matplotlib|skos:broader|Data Visualization Tools -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:arxiv_author|Andrew McCallum -LOD mailing list|skos:broader|Mailing list -Langues|skos:broader|Language -Keras|skos:broader|Deep Learning frameworks -Linked Data: application|skos:broader|Linked Data -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:tag|tag:machine_learning -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:arxiv_firstAuthor|Wan-Duo Kurt Ma -RW Linked Data|skos:broader|LD -Java 1.5 Mac OS X|skos:broader|Java 5 -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Llion Jones -Twittérature|skos:broader|Twitter -Common Tag|skos:broader|RDFa -Ora Lassila|skos:broader|Technical girls and guys -Relation Extraction|skos:broader|NLP tasks / problems -Daimler|skos:broader|Automobile -Docker-Tomcat|skos:broader|Docker -Spam|skos:broader|Internet -Droit et internet|skos:broader|Droit -ACL 2019|skos:broader|ACL -Embeddings in NLP|skos:broader|Embeddings -Sun Microsystems|skos:broader|Entreprise -WWW 2015|skos:broader|TheWebConf -1ere guerre mondiale|skos:broader|War -Gabon|skos:broader|Afrique équatoriale -Dan Connolly|skos:broader|Technical girls and guys -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:memory_in_deep_learning -gnowsis|skos:broader|Semanlink related -OWLED|skos:broader|OWL -Blair|skos:broader|UK -The Web is dying|skos:broader|Web -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:tag|tag:graph_embeddings -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:arxiv_author|Wei Xu -Europe and UK|skos:broader|Union européenne -N-gram|skos:broader|Language Modeling Statistical Language Model -Neural network interpretability|skos:broader|Neural networks -Bayesian analysis|skos:broader|Uncertainty Reasoning -Knowledge Discovery|skos:broader|Knowledge Engineering -Iapetus|skos:broader|Saturne -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:tag|tag:table_based_fact_verification -Text in KG embeddings|skos:broader|Knowledge Graph Embeddings -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:arxiv_firstAuthor|Weiguo Zheng -GAN|skos:broader|Neural networks -About RDF|skos:broader|RDF -Google Maps|skos:broader|Carte -KD-MKB biblio|skos:broader|KD-MKB -RDFa tool|skos:broader|Dev tools -Deutsch@de|skos:broader|Langues vivantes -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:tag|tag:rigolo -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:tag|tag:sequence_labeling -OWLED 2007|skos:broader|OWLED -Stemming|skos:broader|General NLP tasks -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:arxiv_author|Naftali Tishby -Seq2Seq with Attention|skos:broader|Attention mechanism -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:arxiv_firstAuthor|Andrew M. Saxe -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:tag|tag:metric_learning -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_author|Arturs Backurs -NLP: Reading Comprehension|skos:broader|NLU -Paludisme|skos:broader|Grands problèmes -Les petites cases|skos:broader|Gautier Poupeau -Semantic Web Dev|skos:broader|Semantic Web -Web architecture|skos:broader|Test of independent invention -Spatial search|skos:broader|Algorithmes -Bing|skos:broader|Search Engines -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|sl:tag|tag:arxiv_doc -Film de guerre|skos:broader|Film -Graph Convolutional Networks|skos:broader|Convolutional neural network -Proposed in a paper at EMLNP 2018: use Odd-Man-Out puzzles|skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -Jean Rouch|skos:broader|Afrique -Museum d'Histoire Naturelle|skos:broader|Biology -Adolescents|skos:broader|Société -URI dereferencing|skos:broader|URI -Relation Extraction|skos:broader|Knowledge Extraction -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:question_answering -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Noam Shazeer -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:tag|tag:semi_supervised_learning -Topic Models + Word embedding|skos:broader|Topic Modeling -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:arxiv_author|Huchuan Lu -TimBL TBL|skos:broader|Technical guys -Text mining|skos:broader|Data mining -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:tag|tag:facebook_fair -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Quoc V. Le -Smalltalk|skos:broader|Programming language -Semantic Wiki|skos:broader|Wiki -Sud des Etats-Unis|skos:broader|USA -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:arxiv_author|Jinsong Lu -Swift|skos:broader|Programming language -Mussolini|skos:broader|Fascisme -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:tag|tag:arxiv_doc -Cheval|skos:broader|Animal -Graph Convolutional Network GCN|skos:broader|CNN Convnet Convnets Convolutional neural networks -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_author|Jose L. Part -Paradoxe Einstein-Podolsky-Rosen|skos:broader|Einstein -SPARQL Tips|skos:broader|SPARQL -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:arxiv_firstAuthor|Jose Camacho-Collados -Phishing|skos:broader|Internet -Natural Language Processing|skos:broader|favorites -Cringely|skos:broader|PBS -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:arxiv_author|Gary Marcus -Catastrophe industrielle|skos:broader|Catastrophe -Vidéosurveillance|skos:broader|Big Brother -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:tag|tag:andrew_mccallum -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:arxiv_firstAuthor|John Foley -Tesla|skos:broader|Automotive -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:tag|tag:arxiv_doc -EME|skos:broader|DRM -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:arxiv_author|Swapnil Ashok Jadhav -Javascript tool|skos:broader|JavaScript -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:arxiv_author|Jatin Chauhan -Tony Blair|skos:broader|Royaume Uni -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Hugo Larochelle -Squeak|skos:broader|Open Source -Cocoon|skos:broader|Dev -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:arxiv_firstAuthor|Genet Asefa Gesese -cross-domain data fetching|skos:broader|Web dev -Nazisme|skos:broader|Fascisme -Censure et maltraitance animale|skos:broader|Droit à l'information -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:tag|tag:word_embedding -URI Template|skos:broader|URI -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:arxiv_author|Mei Zhang -dajobe Dave Beckett|skos:broader|Technical guys -Marie-Jo Pérec|skos:broader|Athlétisme -Mac dev|skos:broader|Dev -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:tag|tag:pre_trained_language_models -Loi sur le voile|skos:broader|Société -Web Services for JavaScript|skos:broader|Web Services -Loropéni|skos:broader|Archéologie africaine -Knowledge-augmented language models|skos:broader|Language Models + Knowledge -Automotive AND W3C|skos:broader|Automobile -Principal component analysis|skos:broader|Dimensionality reduction -Deutsch@de|skos:broader|Allemagne -Graph-based Semi-Supervised Learning|skos:broader|Graph -Empire romain|skos:broader|Antiquité romaine -Stefan Zweig|skos:broader|Autriche -LDP @ W3C|skos:broader|W3C -Eclipse|skos:broader|Dev tools -Supervised learning techniques that also make use of unlabeled data for training – typically a small amount of labeled data with a large amount of unlabeled data.|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Journaliste|skos:broader|Journalisme -Une suite de matrices symétriques en rapport avec la fonction de Mertens we explore a class of equivalence relations over N from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. In this paper we explore a class of equivalence relations over $\\N^\\ast$ from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem.|sl:arxiv_author|Jean-Paul Cardinal -New Horizons|skos:broader|Pluton -Liberté, égalité, fraternité|skos:broader|Révolution française -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:tag|tag:arxiv_doc -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Qingyun Wang -Miriam Makeba|skos:broader|Musicien -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:nlp_facebook -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:tag|tag:arxiv_doc -John Sofakolle|skos:broader|Ami -Product Knowledge Graph|skos:broader|Knowledge Graphs -Provocation policière|skos:broader|Police -Topic Modeling|skos:broader|Analyse sémantique -Guerres de religion|skos:broader|Religion -Walmart|skos:broader|Entreprise -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:tag|tag:learned_index_structures -huggingface/transformers|skos:broader|GitHub project -Obélisque|skos:broader|Sculpture -Alexandre Bertails|skos:broader|SW guys (and girls) -NLP in enterprise|skos:broader|NLP -Honda|skos:broader|Entreprise -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:arxiv_author|Olga Kovaleva -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:arxiv_author|Christopher Pal -Mars 2004|skos:broader|Exploration marsienne -Finlande|skos:broader|Europe -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_firstAuthor|Arturs Backurs -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Sean Culatana -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Microblogs|skos:broader|Blog -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:tag|tag:unsupervised_machine_translation -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:tag|tag:bert -Samba|skos:broader|Danse -Lynn Margulis|skos:broader|Scientifique -dowhatimean.net|skos:broader|Technical guys -Intel|skos:broader|Technologie -Support vector machine|skos:broader|Supervised machine learning -Amphibiens|skos:broader|Animal -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:arxiv_firstAuthor|Lajanugen Logeswaran -Eric Baetens|skos:broader|Ecole des Mines -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_firstAuthor|Michael Glass -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:tag|tag:manaal_faruqui -François Yvon|skos:broader|NLP girls and guys -Industrie nucléaire|skos:broader|industrie -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:tag|tag:brain_vs_deep_learning -Jamendo|skos:broader|Musique -Feynman|skos:broader|Mécanique quantique -foaf|skos:broader|RDF Vocabularies -Tag ontology|skos:broader|Tagging -Clonage|skos:broader|Genetics Génétique -Civilisation de l'Indus|skos:broader|Antiquité du Pakistan -Banksy|skos:broader|Street art -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:tag|tag:arxiv_doc -Arbres remarquables|skos:broader|Arbres -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:arxiv_author|Hiroyuki Shindo -Dark matter|skos:broader|Masse manquante -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:arxiv_author|Dmitry Kalenichenko -Electric car|skos:broader|Automobile -Maladie|skos:broader|Médecine -Renato Matos|skos:broader|Brésil -Pseudo relevance feedback|skos:broader|Information retrieval -Google Colab|skos:broader|AI cloud service -Siamese networks|skos:broader|Similarity learning -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:tag|tag:knowledge_graph -Calais (jungle)|skos:broader|Honteux -Volcan|skos:broader|Géologie -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:arxiv_author|Yoshua Bengio -Mladic|skos:broader|Bosnie -fps' post|skos:broader|fps -Davos|skos:broader|Economie -QuickTime|skos:broader|Multimedia -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:tag|tag:arxiv_doc -NSA spying scandal|skos:broader|NSA -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:tag|tag:knowledge_graph -DBTune|skos:broader|Yves Raymond -Écologie|skos:broader|Grands problèmes -Pubby|skos:broader|Linked Data -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:arxiv_author|Vladimir Vapnik -PoolParty|skos:broader|SKOS editor -Samba|skos:broader|Musique -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:arxiv_author|Jeffrey C. Lagarias -Toyota|skos:broader|Automobile -Relations Europe-USA|skos:broader|USA -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:arxiv_author|Li-Qiang Niu -Semantic startup|skos:broader|Startups -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:tag|tag:kd_mkb_biblio -Alpinisme|skos:broader|Montagne -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:arxiv_author|Zeynep Akata -Web services : critique|skos:broader|Web Services -OpenLink Ajax Toolkit (OAT)|skos:broader|Ajax -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:arxiv_doc -Privacy and internet|skos:broader|Vie privée -Séquençage du génome|skos:broader|Génome -Smoothed Inverse Frequency: a linear representation of a sentence which is better than the simple average of the embeddings of its words 2 ideas: - assign to each word a weighting that depends on the frequency of the word it the corpus (reminiscent of TF-IDF) - some denoising (removing the component from the top singular direction) Todo (?): check implementation as a [sklearn Vectorizer](https://github.com/ChristophAlt/embedding_vectorizer) |skos:broader|best known for his work on probabilistically checkable proofs and, in particular, the PCP theorem. [Off the convex path](http://www.offconvex.org/) -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:arxiv_firstAuthor|Finn Årup Nielsen -Révolution française|skos:broader|Révolution -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Alvaro Sanchez-Gonzalez -Aho–Corasick algorithm|skos:broader|Text processing -The Limits to Growth|skos:broader|Crise écologique -Cringely|skos:broader|Technical girls and guys -Python install|skos:broader|Python -Browser : back button|skos:broader|Brouteur -Symmetric matrices related to the Mertens function In this paper we explore a family of congruences over N from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. In this paper we explore a family of congruences over $\\N^\\ast$ from which one builds a sequence of symmetric matrices related to the Mertens function. From the results of numerical experiments, we formulate a conjecture about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may come to play a more important role in this classical and difficult problem.|sl:tag|tag:jean_paul -Personal ontology|skos:broader|Ontologies -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:arxiv_doc -create.js|skos:broader|Henri Bergius -Extinction des dinosaures|skos:broader|Extinction de masse -Universal basic income|skos:broader|Travail -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:tag|tag:knowledge_distillation -Machine Reading Comprehension Reading Comprehension|skos:broader|Natural Language Understanding -DSSM (Deep Semantic Similarity Model)|skos:broader|Web search -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:tag|tag:ludovic_denoyer -Ouïgour|skos:broader|Peuples -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Pavel Kuksa -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Martin Wattenberg -SPARQL Update|skos:broader|SPARQL -booking.com|skos:broader|Uberisation -Délocalisations|skos:broader|Economie -jsFiddle|skos:broader|Sample code -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_author|Marc'Aurelio Ranzato -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:tag|tag:arxiv_doc -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|Manzil Zaheer -Watson Speech-to-Text|skos:broader|Speech-to-Text -SIMILE|skos:broader|MIT -Linked Data|skos:broader|Semantic Web -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_author|Ledell Wu -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_firstAuthor|Alexis Conneau -Knowledge Graph Embeddings|skos:broader|Knowledge Representation -Generative model|skos:broader|Machine learning: techniques -Driverless car|skos:broader|Robotique -Tchad|skos:broader|Afrique -COO|skos:broader|VW -FBI v. Apple|skos:broader|Big Brother -Deep NLP|skos:broader|Deep Learning -Patent finding|skos:broader|AI 4 IP -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Ray Kurzweil -Poincaré|skos:broader|Mathématicien -Empire colonial français|skos:broader|Histoire de France -fps blog|skos:broader|Blogger -Toyota|skos:broader|Japon -Justice américaine|skos:broader|USA -Hong Kong|skos:broader|Ville -scikit-learn|skos:broader|Machine Learning library -Technology Enhanced Learning|skos:broader|Education -Java web dev|skos:broader|Java dev -Normale Sup|skos:broader|Enseignement supérieur -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:tag|tag:similarity_learning -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:question_answering -Antilles|skos:broader|Amérique -Collaborative ontologie creation|skos:broader|Linked Data / collaborative editing -Open Education|skos:broader|Education -Information resources|skos:broader|Linked Data -iPod|skos:broader|Musique -Poésie|skos:broader|Littérature -Denny Britz|skos:broader|AI girls and guys -Apple CarPlay|skos:broader|Automobile 2.0 -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:arxiv_author|Samuel J. Gershman -De-extinction|skos:broader|Nous vivons une époque moderne -Vary Header|skos:broader|HTTP -Personnage historique|skos:broader|Homme célèbre -Python tips|skos:broader|Dev tips -Yoshua Bengio|skos:broader|AI girls and guys -sindice|skos:broader|Linked Data -Google Maps|skos:broader|Google -Riemann|skos:broader|Mathématicien -Scientific information extraction|skos:broader|Information extraction -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:tag|tag:nlp_short_texts -Chelsea Manning|skos:broader|Whistleblower -Active learning|skos:broader|Training data -Ranking (information retrieval)|skos:broader|Information retrieval -Jean-Jacques Annaud|skos:broader|Réalisateur -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:tag|tag:sent2vec -Séquençage du génome|skos:broader|Biotechnologies Biotechnologies -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:tag|tag:ruslan_salakhutdinov -HttpUnit|skos:broader|JUnit -Accountable AI|skos:broader|Artificial Intelligence -OGM|skos:broader|Grands problèmes -Panama papers|skos:broader|Leaks -Norilsk|skos:broader|Industrie minière -Livre à lire|skos:broader|Livre -Autriche|skos:broader|Pays d'Europe -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:knowledge_augmented_language_models -James Stewart|skos:broader|Acteur -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Xiaoman Pan -Leigh Dodds|skos:broader|SW guys (and girls) -WWW 2008|skos:broader|Pékin -2D-NLP|skos:broader|NLP -Ressources halieutiques|skos:broader|Pêche -Crise financière|skos:broader|Money -Internet Related Technologies|skos:broader|Internet -Deep Learning frameworks|skos:broader|Machine Learning library -Gaulois|skos:broader|Histoire de France -Government data|skos:broader|Site web gouvernemental -Bill Gates|skos:broader|Technical girls and guys -Film américain|skos:broader|Cinéma américain -Darwin|skos:broader|Explorateur -Jersey Cache-Control|skos:broader|HTTP Cache -MyCarEvent|skos:broader|Réparation automobile -Conceptual modeling|skos:broader|Knowledge Representation -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:arxiv_author|Juan Luis Suárez -Egypte|skos:broader|Afrique du Nord -KD-MKB|skos:broader|Thèse IRIT-Renault NLP-KB -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:arxiv_firstAuthor|Sainbayar Sukhbaatar -TF-IDF|skos:broader|Vector space model -NLP techniques|skos:broader|NLP -V.S. Naipaul V. S. Naipaul|skos:broader|Prix Nobel -RSS extensions|skos:broader|RSS -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:tag|tag:nearest_neighbor_search -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:arxiv_author|Pat Verga -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:tag|tag:ai_stanford -Elias Torres|skos:broader|Technical girls and guys -Niklas Lindström|skos:broader|SW guys (and girls) -1984|skos:broader|Big Brother -OWL|skos:broader|Semantic Web -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:nlp_using_knowledge_graphs -Knowledge Graph Embeddings|skos:broader|Embeddings -Apple-Intel|skos:broader|Apple -Gore Vidal|skos:broader|Intellectuel -Parenté à plaisanterie|skos:broader|Afrique de l'Ouest -TensorFlow|skos:broader|AI@Google -SSL|skos:broader|Access Control -Ebola|skos:broader|Épidémie -Nokia|skos:broader|Entreprise -KG Embeddings Library|skos:broader|Library (code) -Françafrique|skos:broader|France -JSONLD|skos:broader|LD -Tyrannical exploitation of nature by mankind|skos:broader|Crise écologique -Hixie|skos:broader|Technical girls and guys -Touareg|skos:broader|Peuples -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:tag|tag:poincare_embeddings -Gene therapy|skos:broader|Manipulations génétiques -Word Embedding Compositionality|skos:broader|Word embeddings -Fourmi|skos:broader|Insecte -Marlon Brando|skos:broader|Acteur -ONU|skos:broader|Institutions internationales -Huge RDF data source|skos:broader|RDF -Aventure|skos:broader|I like I like -Humour noir|skos:broader|Humour -Ex URSS URSS|skos:broader|Asie -AI & IR|skos:broader|Information retrieval -Voir [notes](/sl/doc/2015/09/semanlink2Notes.md) |skos:broader|Semantic Web, Semantic Me. -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|Haitian Sun -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:tag|tag:survey -Backpropagation|skos:broader|Neural networks -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:arxiv_author|Harald Sack -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:tag|tag:cross_lingual_nlp -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:tag|tag:categorical_variables -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_author|Oriol Vinyals -RDF and database|skos:broader|RDF -RDF Net API|skos:broader|RDF -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:allen_institute_for_ai_a2i -IRD|skos:broader|Développement -Olivier Rossel|skos:broader|Technical girls and guys -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:tag|tag:arxiv_doc -Archéologue|skos:broader|Archéologie -Catastrophe humanitaire|skos:broader|Catastrophe -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:nlp_text_classification -Yann LeCun|skos:broader|AI girls and guys -Internet of Things|skos:broader|Internet -Wikidata|skos:broader|Wikipedia -Undecidability|skos:broader|Inference -Cellule souche Stem cell|skos:broader|Médecine -Word2vec|skos:broader|NLP@Google -Hortefeux|skos:broader|Gouvernement Sarkozy -Factory farming|skos:broader|Agriculture industrielle - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |skos:broader|replacement of the vectorial representation of words with a matrix representation where each word’s representation includes information about its context Embedding words through a language model Language-model-based encoders The key idea underneath is to train a contextual encoder with a language model objective on a large unannotated text corpus. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. ([source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1902.11269)) -Explainable AI|skos:broader|Artificial Intelligence -Ministère de l'enseignement supérieur et de la recherche|skos:broader|Gouvernement -Fukushima|skos:broader|Industrie nucléaire -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:arxiv_author|Ping Chen -Information bottleneck method|skos:broader|Information theory -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:explainable_ai -Ouïgour|skos:broader|Chine -Semantic folding|skos:broader|Semantic fingerprints -Henry Story|skos:broader|SW guys (and girls) -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_firstAuthor|Zhangyang Wang -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:arxiv_author|Liangzhen Lai -Annotation tools|skos:broader|Labeling data -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:arxiv_author|Wen Wang -IA: limites|skos:broader|IA AI -Nelson Mandela|skos:broader|Afrique du Sud -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:tag|tag:arxiv_doc -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_author|Orestis Plevrakis -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|Ruslan Salakhutdinov -Mission Villani sur l'IA|skos:broader|Artificial Intelligence -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|sl:arxiv_firstAuthor|Jang Hyun Cho -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:arxiv_author|Ruslan Salakhutdinov -Historic images|skos:broader|Photo -Belém|skos:broader|Ville -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:tag|tag:attention_is_all_you_need -Semi-supervised learning|skos:broader|Supervised machine learning -Humour|skos:broader|Rigolo -Cortical.io|skos:broader|NLP tools -Coursera: Computational Neuroscience|skos:broader|Computational Neuroscience -RDFa tool|skos:broader|RDFa -Platonov|skos:broader|Ecrivain -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:arxiv_doc -SKOS W3C document|skos:broader|SKOS -Brain vs Deep Learning|skos:broader|Deep Learning -DBpedia Mobile|skos:broader|dbpedia -Semantic Web Services vs SOAP|skos:broader|Web Services -Apache web server|skos:broader|HTTP -Musée de Niamey|skos:broader|Musées africains -NoSQL and eventual consistency|skos:broader|Distributed computing -pandas|skos:broader|Python 4 Data science -Arduino|skos:broader|Robotique -Patti Smith|skos:broader|Rock -RDF Framework|skos:broader|Frameworks -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|sl:arxiv_firstAuthor|Yoav Goldberg -Kingsley Idehen|skos:broader|SW guys (and girls) -Keyword/keyphrase extraction|skos:broader|NLP tasks / problems -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_author|Sam Shah -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:tag|tag:jeremy_howard -RDF Next Steps|skos:broader|RDF -WWW 2013|skos:broader|J'y étais -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_author|Sungchul Kim -TextRank|skos:broader|Learning to rank -Dark matter|skos:broader|Physique -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_firstAuthor|Danilo Jimenez Rezende -Toutankhamon|skos:broader|Pharaon -XBRL|skos:broader|Financial Data -Taxe carbone|skos:broader|Pollueurs payeurs -Apache OpenNLP|skos:broader|apache.org -HTML Data|skos:broader|Web of data -Biohackers|skos:broader|Hackers -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:tag|tag:machine_learning -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:question_answering -JavaScript Tutorial|skos:broader|JavaScript -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_author|Christopher D. Manning -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:tag|tag:arxiv_doc -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:tag|tag:arxiv_doc -FBI v. Apple|skos:broader|FBI -war on drugs|skos:broader|Prohibition des narcotiques -Attention mechanism relating different positions of a sequence in order to compute a representation of the same sequence. Useful in machine reading, abstractive summarization, or image description generation |skos:broader|Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) -Sentence Embeddings|skos:broader|Document embeddings -RBM|skos:broader|ANN NN Artificial neural network -fps ontologies|skos:broader|fps dev -Guerres coloniales|skos:broader|Colonisation -Médecins sans frontières|skos:broader|Humanitaire -A statistical model for discovering the abstract topics that occur in a collection of documents. |skos:broader|Methods for quantifying and categorizing semantic similarities between linguistic items based on their distributional properties in large samples of language data. Basic idea: the Distributional hypothesis: linguistic items with similar distributions have similar meanings. Basic approach: collect distributional information in high-dimensional vectors, and define similarity in terms of vector similarity Models: latent semantic analysis (LSA), Hyperspace Analogue to Language (HAL), syntax- or dependency-based models, random indexing, semantic folding and various variants of the topic model. -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:tag|tag:nlp_short_texts -Calvin|skos:broader|Fanatisme -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:tag|tag:nlp_facebook -Enigmes de la physique|skos:broader|Enigme -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:tag|tag:label_embedding -Apple CarPlay|skos:broader|iphone -Espagne|skos:broader|Europe -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Zhihong Shen -Maladie contagieuse|skos:broader|Maladie -Joseki|skos:broader|SPARQL AND Jena -RDF repository|skos:broader|RDF -Apple Developer Connection|skos:broader|Dev -External memory algorithm|skos:broader|Scaling -Grèce antique|skos:broader|Antiquité -Phil Archer|skos:broader|SW guys (and girls) -Yagán|skos:broader|Amérindien -Tensor2Tensor|skos:broader|TensorFlow -Attentats 13-11-2015|skos:broader|Paris -delicious java|skos:broader|del.icio.us -Restricted Boltzmann machine|skos:broader|Neural networks -Anaconda|skos:broader|Python -Magnétisme terrestre|skos:broader|Magnétisme -DITA|skos:broader|Technical documentation -Leo Sauermann|skos:broader|SW guys (and girls) -Property Graphs|skos:broader|Graph database -Paris|skos:broader|France -Image classification|skos:broader|Statistical classification -OSEMA 2011|skos:broader|ESWC 2011 -François Chollet|skos:broader|AI girls and guys -classification method that generalizes logistic regression to multiclass problems. Assumes that a linear combination of the observed features and some problem-specific parameters can be used to determine the probability of each particular outcome of the dependent variable. If you want to assign probabilities to an object being one of several different things, softmax is the thing to do. Even later on, when we train more sophisticated models, the final step will be a layer of softmax. [cf.](http://www.tensorflow.org/tutorials/mnist/beginners/index.md) |skos:broader|regression model where the dependent variable is categorical. -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Justin Gilmer -MINOS Neutrino Experiment|skos:broader|Expérience scientifique -Tree of life|skos:broader|Biology -Dalai Lama|skos:broader|Tibet -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_author|Yi Luan -classification decision based on the value of a linear combination of the feature values |skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -RDF forms|skos:broader|Linked Data -Bibliothèque numérique|skos:broader|Bibliothèque -Natural Language Processing|skos:broader|Langage -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:arxiv_author|Andreas Dengel -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:tag|tag:dropout -Country ontologies|skos:broader|Ontologies -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:embeddings -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:arxiv_author|Hideaki Takeda -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:tag|tag:elmo -Anglais|skos:broader|Langues -LDOW2008|skos:broader|WWW 2008 -Spark (Java web framework)|skos:broader|Web dev framework -AI black box|skos:broader|Artificial Intelligence -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:active_learning -Documentaire TV|skos:broader|Documentaire -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:personal_assistant -Innoraise|skos:broader|Semantic Web : Application -FAO|skos:broader|Institutions internationales -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:tag|tag:elmo -public-lod@w3.org|skos:broader|Linked Data -TransE|skos:broader|Entity embeddings -Topic modelling for humans ; Python framework for fast Vector Space Modelling |skos:broader|Algebraic model for representing text documents as vectors of identifiers such as index terms.br/ Documents and queries are represented as vectors. Each dimension corresponds to a separate term. If a term occurs in the document, its value in the vector is non-zero. One way of computing the value: TD-IDF -LDOW2008|skos:broader|Linking Open Data -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:arxiv_author|Ser-Nam Lim -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:ml_google -OGM|skos:broader|Biotechnologies Biotechnologies -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:arxiv_author|Zhu Zhuo -Phoenix Mars Lander|skos:broader|Exploration marsienne -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:occam_s_razor -Knowledge Graphs and NLP|skos:broader|Knowledge Graphs -Synonym URIs|skos:broader|Linked Data -ARIMA|skos:broader|Time Series -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Sungjin Ahn -GoodRelations|skos:broader|Semantic SEO -similar items are clustered into classes, an n-gram language model for the class tokens is generated, and then the probabilities for words in a class are distributed according to the smoothed relative unigram frequencies of the words.|skos:broader|Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146)) -Wordnet|skos:broader|Anglais -Littérature africaine|skos:broader|Afrique -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|sl:tag|tag:arxiv_doc -Erreur judiciaire|skos:broader|Justice -Cory Doctorow|skos:broader|Ecrivain -Richard Socher|skos:broader|NLP girls and guys -finding clusters which are defined by only a subset of dimensions (it is not needed to have the agreement of all N features)|skos:broader|the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:arxiv_firstAuthor|Naftali Tishby Hebrew University and NEC Research Institute -Zemanta|skos:broader|Keep new -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:arxiv_author|David Duvenaud -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Percy Liang -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:k_nearest_neighbors_algorithm -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:arxiv_author|Liping Jing -SPARQL Extension Functions|skos:broader|SPARQL -Word embedding: evaluation|skos:broader|Embedding evaluation -Equateur|skos:broader|Amérique du sud -Niger : festival de la jeunesse|skos:broader|Niger -CFPM|skos:broader|Niamey -Linked Data in enterprise|skos:broader|LD -Feature learning|skos:broader|Machine learning: problems -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:facebook_fair -LOD & museum|skos:broader|Culture et sem web -WWW 2008|skos:broader|TheWebConf -Société française Société française|skos:broader|France -Mur de Berlin|skos:broader|Berlin -BlackboxNLP (2018 workshop)|skos:broader|EMNLP 2018 -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Tim Rocktäschel -PCA|skos:broader|Representation learning -Prohibition des narcotiques|skos:broader|Trafic de drogue -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:tag|tag:aidan_hogan -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:tag|tag:survey -Knowledge Graph Construction|skos:broader|Knowledge Graph Completion -Swoogle|skos:broader|Semantic Web search engine -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:arxiv_author|John Foley -Archéologie amazonienne|skos:broader|Amazonie -Politique économique française|skos:broader|Economie française -Unit test|skos:broader|Tests -Stanford classifier|skos:broader|NLP tools -Turing|skos:broader|Mathématicien -LibShortText|skos:broader|Text Classification -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Chenyan Xiong -Tombe d'amphipolis|skos:broader|Grèce antique -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:arxiv_doc -Affaires de Gado à Niamey|skos:broader|Niamey -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Yousef Hindy -Hackers|skos:broader|Hack -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Rahul Iyer -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:geometry_of_language_embeddings -Benjamin Nowack|skos:broader|SW guys (and girls) -ElasticSearch: nearest neighbor(s)|skos:broader|Nearest neighbor search -Dean Allemang|skos:broader|SW guys (and girls) -Thucydide|skos:broader|Grèce antique -RDF Application|skos:broader|RDF -Honda|skos:broader|Automobile -Troubleshooting|skos:broader|J'ai un petit problème avec mon ordinateur -Scala|skos:broader|Java -PageRank|skos:broader|Google ranking -Pierre de Volvic|skos:broader|Volvic -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Juan Sequeda -Shoah|skos:broader|Génocide -Cornell|skos:broader|Universités américaines -The Limits to Growth|skos:broader|Croissance -Cancer|skos:broader|Maladie -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:tag|tag:arxiv_doc -John Sofakolle|skos:broader|CFPM -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:arxiv_firstAuthor|Hugo Cui -Seevl|skos:broader|Linked Data: application -KIWI project|skos:broader|Semantic Wiki -Turquie|skos:broader|Pays d'Europe -Tasmanian devil|skos:broader|Cancer -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Ronan Collobert -SPARQL: sample code|skos:broader|SPARQL -Antiquité|skos:broader|Archéologie -Ranked Entities in Search Results|skos:broader|NLP and Search -Long short-term memory: recurrent neural network architecture well-suited for time series with long time lags between important events. (cf the problem of long time dependencies, such as when you want to predict the next word in I grew up in France… I speak fluent [?]). A solution to the vanishing gradient problem in RNNs |skos:broader|the natural architecture of NN to deal with sequences. NN where connections between units form a directed cycle. This creates an internal state of the network which allows it to exhibit dynamic temporal behavior. Unlike feedforward neural networks, RNNs can use their internal memory to process arbitrary sequences of inputs. This makes them applicable to tasks such as unsegmented connected handwriting recognition or speech recognition. 2 broad classes: finite impulse and infinite impulse (a finite impulse RNN can be unrolled and replaced with a strictly feedforward neural network) Problems with RNNs: - they suffer from the vanishing gradient problem that prevents them from learning long-range dependencies. [#LSTMs](/tag/lstm_networks) improve upon this by using a gating mechanism that allows for explicit memory deletes and updates. - inherently sequential computation which prevents parallelization across elements of the input sequence RNN in NLP: - Goal: reprenting a sequence of words as dense vectors - input: seq of words (or chars) - ouput: a seq of hidden states with each a representation of the seq from the beginning to a specific posiition - advantages: encoding sequential relationships and dependency among words -Virtuoso|skos:broader|Semantic Web Platform -Document embeddings|skos:broader|Text Embeddings -Peter Mika|skos:broader|SW guys (and girls) -Forms|skos:broader|HTML Dev -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Gargi Ghosh -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:arxiv_firstAuthor|Sandeep Subramanian -Linked Data in enterprise|skos:broader|Enterprise Semantic Web Semantic Web in the enterprise Corporate Semantic Web -VoCamp|skos:broader|Semantic Web -TouchGraph|skos:broader|Applet -A machine learning model that models some of the structural and algorithmic properties of the neocortex. HTM is a biomimetic model based on the memory-prediction theory of brain function described by Jeff Hawkins. HTM is a method for discovering and inferring the high-level causes of observed input patterns and sequences, thus building an increasingly complex model of the world. |skos:broader|a theory of brain function created by Jeff Hawkins about mammalian neocortex. Role of the mammalian neocortex in matching sensory inputs to stored memory patterns, and how this process leads to predictions of what will happen in the future. -France : dysfonctionnement administratif|skos:broader|France : dysfonctionnement des institutions -David Ricardo|skos:broader|Capitalisme -gensim|skos:broader|Vector space model -BBC semantic publishing|skos:broader|RDF Application -Le Pen|skos:broader|FN -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:arxiv_author|Seokkyu Choi -Fast.ai course|skos:broader|MOOC -UNIX Tips|skos:broader|Unix -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:arxiv_firstAuthor|Xin Huang -SDMX-RDF|skos:broader|RDF -Google Colab|skos:broader|Google Research -Hymne à la joie|skos:broader|Beethoven -todo à voir : https://www.researchgate.net/publication/315590093_Cost-sensitive_sequential_three-way_decision_modeling_using_a_deep_neural_network https://arxiv.org/pdf/1611.05134.pdf https://www.researchgate.net/publication/261379944_Three-way_decisions_with_artificial_neural_networks |skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -Taxe carbone|skos:broader|Économie écologique -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:tag|tag:topic_modeling_over_short_texts -WWW 2018|skos:broader|WWW Conference -Javascript RDF Parser in IE|skos:broader|Javascript RDF Parser -Data Augmentation|skos:broader|Machine learning: techniques -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:tag|tag:arxiv_doc -Graph Attention Networks |skos:broader|Attention in Graphs -Internet Explorer|skos:broader|Microsoft -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:tag|tag:string_searching_algorithm -jersey/RDF|skos:broader|jersey -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:arxiv_author|Anders Søgaard -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:tag|tag:elasticsearch -Moyen-âge|skos:broader|Histoire -A La Carte Embedding|skos:broader|N-grams -Ontology|skos:broader|Web sémantique sw -JSON 2 JSON-LD|skos:broader|JSON-LD -Read-Write Secure Data Web|skos:broader|Read-Write Linked Data -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:tag|tag:arxiv_doc -Poète|skos:broader|Ecrivain -WWW 2012|skos:broader|J'y étais -Missions spatiales|skos:broader|Exploration spatiale -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Gerald Tesauro -data.gouv.fr|skos:broader|Gouvernement français -Wikipedia|skos:broader|Encyclopédie collaborative -Aldous Huxley|skos:broader|Ecrivain -Snorkel|skos:broader|Weak supervision -CommonTag|skos:broader|RDF/A -m2eclipse|skos:broader|Maven -LIME|skos:broader|AI black box -LDP|skos:broader|RW Linked Data -Iguane|skos:broader|Reptile -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Jason Weston -Corruption|skos:broader|Grands problèmes -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:tag|tag:nn_symbolic_ai_hybridation -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:tag|tag:ml_google -Docker-Volumes|skos:broader|Docker -Bitcoin|skos:broader|Cryptocurrency -Film|skos:broader|Cinéma -Eglise catholique|skos:broader|Catholicisme -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:arxiv_author|Yulia Tsvetkov -ML Engineering|skos:broader|Machine learning -Querying Remote SPARQL Services|skos:broader|SPARQL -Antisémitisme|skos:broader|Racisme -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:memory_networks -Education and Linked Data|skos:broader|Technology Enhanced Learning -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:arxiv_firstAuthor|Ke Tran -Portland (OR)|skos:broader|Oregon -iPod|skos:broader|Digital entertainment -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Nicolas Heess -Apache Spark|skos:broader|jma -OWL|skos:broader|Knowledge Representation -IJCAI|skos:broader|AI Conference -Stephen Wolfram|skos:broader|Technical girls and guys -IBM SPSS Text Analytics for Surveys|skos:broader|Survey analysis -Meaning in NLP|skos:broader|NLP tasks / problems -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:guillaume_lample -Norilsk|skos:broader|Pollution -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Edward Chou -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:tag|tag:arxiv_doc -Mike Bergman|skos:broader|SW guys (and girls) -LDP|skos:broader|LD -Areva|skos:broader|Industrie nucléaire -R2RML|skos:broader|Database to RDF mapping -Flask|skos:broader|Python tools -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Axel Polleres -Google Structured Data Testing Tool|skos:broader|Web tools -Document embeddings|skos:broader|Embeddings -Security and REST|skos:broader|REST -NTIC|skos:broader|Technologie -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:tag|tag:named_entity_recognition -Fado tropical|skos:broader|Chanson -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:information_bottleneck_method -Recommender Systems|skos:broader|Machine learning: problems -Jiroft|skos:broader|Antiquité iranienne -Supraconductivité|skos:broader|Mécanique quantique -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:tag|tag:jeremy_howard -Stanbol|skos:broader|Interactive Knowledge Stack -Blogs Le Monde|skos:broader|Journal Le Monde -Jean-Paul Cardinal|skos:broader|Mathématicien -Mladic|skos:broader|Guerre de Yougoslavie -Paradoxe Einstein-Podolsky-Rosen|skos:broader|Photons corrélés -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:arxiv_author|Esteban Real -Écologie|skos:broader|Nature -Nike|skos:broader|Entreprise -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Yuezhang Li -Saturne|skos:broader|Système solaire -OwlSight|skos:broader|Clark and Parsia -Makolab Semantic Day|skos:broader|Makolab -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:tag|tag:nlp_facebook -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:tag|tag:arxiv_doc -RAKE|skos:broader|Python-NLP -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:tag|tag:recommender_systems -Primate|skos:broader|Animal -Amour|skos:broader|Sentiment -AI risks|skos:broader|IA AI -KnowBert|skos:broader|Contextualized word representations -RDA|skos:broader|Communisme -Social bookmarking|skos:broader|Tagging -Scandinavie|skos:broader|Europe -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|sl:arxiv_firstAuthor|Philip Maymin -Knowledge Graphs and NLP|skos:broader|AI + Knowledge Bases -Déforestation|skos:broader|Écologie -EC-Web'14|skos:broader|EC-Web -Civilisations précolombiennes|skos:broader|Amérindien -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:tag|tag:bertology -ATerm|skos:broader|Dev tools -Coursera: NLP class|skos:broader|Stanford -Patent|skos:broader|Propriété intellectuelle -JavaScript librairies|skos:broader|Web dev framework -Neo4j|skos:broader|Graph database -Censure et maltraitance animale|skos:broader|Animal rights -Femme célèbre (où qui mérite de l'être)|skos:broader|Femme -Musique du Niger|skos:broader|Niger -Category Embedding|skos:broader|Entity embeddings -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:sentence_embeddings -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_author|Tim Kraska -GitHub project|skos:broader|GitHub -Mussolini|skos:broader|Dictature -Google Web Toolkit|skos:broader|JavaScript librairies -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Marjan Ghazvininejad -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_author|Ludovic Denoyer -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:arxiv_firstAuthor|Mohammad Taher Pilehvar -Covid19|skos:broader|Virus -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:arxiv_author|Cedric De Boom -Docker-Python|skos:broader|Docker -Plantation d'arbres|skos:broader|Arbres -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:tag|tag:nlp_text_classification -Caroline Fourest|skos:broader|Journaliste -Information theory AND Deep Learning|skos:broader|Information theory -C2GWeb and Product description|skos:broader|Product description -Génétique + Histoire|skos:broader|ADN -Attention mechanism|skos:broader|Deep Learning -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Illia Polosukhin -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:ruslan_salakhutdinov -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:tag|tag:arxiv_doc -Google Cloud|skos:broader|Google -Philosophe|skos:broader|Philosophie -Google Rich Snippets|skos:broader|SEO -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:arxiv_author|Surya Ganguli -Thucydide|skos:broader|Historien -Cafard|skos:broader|Insecte -Cosmologie|skos:broader|Science -Amazon Alexa|skos:broader|Enceintes connectées -Antiquité de l'Inde|skos:broader|Inde -Introduced in the early 1990s by Bromley and LeCun to solve signature verification as an image matching problem|skos:broader|The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m)) -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_author|W. Bruce Croft -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Aleksandr Nisnevich -Chine / Afrique|skos:broader|Chine -Fado tropical|skos:broader|Poésie -Google Patents|skos:broader|Patent finding -Semantic indexing|skos:broader|Semantic Web -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|sl:arxiv_firstAuthor|Maximilian Lam -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:tag|tag:naftali_tishby -Lobby nucléaire|skos:broader|Industrie nucléaire -AI: startups|skos:broader|Startups -Maroc|skos:broader|Afrique du Nord -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:tag|tag:arxiv_doc -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:tag|tag:rnn_based_language_model -Hierarchical tags|skos:broader|Tagging -sindice|skos:broader|Linking Open Data -semantic-web@w3.org|skos:broader|Semantic Web -Concise Bounded Description|skos:broader|SPARQL -Économie écologique|skos:broader|Economie -SPARQL Demo|skos:broader|SW demo -Vint Cerf|skos:broader|Technical girls and guys -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:normale_sup -Astrophysique|skos:broader|Science -HTTP|skos:broader|Internet -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:arxiv_author|Luca Saglietti -Dinosaures|skos:broader|Paléontologie -Real-Time Communications|skos:broader|Real-Time -Solid|skos:broader|Linked Data: application -IRD|skos:broader|Zones intertropicales -Harry Halpin|skos:broader|Technical girls and guys -Amazon|skos:broader|Entreprise -Chimie|skos:broader|Science -Knowledge Graphs|skos:broader|Knowledge bases -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_firstAuthor|Cedric De Boom -predicting a single label among mutually exclusive labels.|skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_author|Daan Wierstra -SparqlPress|skos:broader|SPARQL -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:arxiv_author|Kawin Ethayarajh -Genome editing|skos:broader|Gene editing -Chine : écologie|skos:broader|Chine -Espace|skos:broader|Astronomie -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:tag|tag:entity_linking -Linked Data / collaborative editing|skos:broader|Collaborative editing -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:tag|tag:xlnet -Voyager|skos:broader|Exploit -Ecrivain|skos:broader|Littérature -Virus|skos:broader|Biology -Un ivrogne dans la brousse|skos:broader|Littérature africaine -Semantic Web: Life Sciences|skos:broader|Semantic web : Use cases -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_firstAuthor|Quan Wang -Knowledge Graph KG|skos:broader|KR -Semanlink|skos:broader|Favoris -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:good -Multi-language support|skos:broader|Language -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_firstAuthor|Lea Helmers -Musicien|skos:broader|Musique -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Shuai Huang -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:tag|tag:arxiv_doc -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:tag|tag:sequence_labeling -Richard Stallman|skos:broader|Logiciel libre -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Anisa Rula -Data mining tools|skos:broader|Data mining -spaCy|skos:broader|Python-NLP -Zika|skos:broader|Maladie -Einstein|skos:broader|Physicien -Egypte antique|skos:broader|Egypte -RapidMiner|skos:broader|NLP tools -Inference|skos:broader|Artificial Intelligence -NLP: use cases|skos:broader|NLP -RDF vs XML|skos:broader|RDF -TF1|skos:broader|Télévision -Bio inspired computing devices|skos:broader|Nous vivons une époque moderne -Capitalisme|skos:broader|Economie -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:arxiv_author|Léon Bottou -Text processing|skos:broader|NLP techniques -Word embeddings with lexical resources|skos:broader|Word embeddings -Reporters sans frontières|skos:broader|Liberté de la presse -Fukushima|skos:broader|Catastrophe industrielle -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:tag|tag:named_entity_recognition -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:arxiv_author|Livio Baldini Soares -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_firstAuthor|Kihyuk Sohn -Miriam Makeba|skos:broader|Music of Africa -Hérodote|skos:broader|Géographie -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Tuo Zhao -RDF embeddings|skos:broader|Knowledge Graph Embeddings -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:arxiv_author|Chunhua Shen -semblog|skos:broader|Semantic Blog -Erta Ale|skos:broader|Volcan -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:tag|tag:embeddings -Zune|skos:broader|Microsoft -Separation of man and ape|skos:broader|Grands Singes -Dave Winer|skos:broader|Technical girls and guys -Réalisateur|skos:broader|Cinéma -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Sabrina Kirrane -Lutte traditionnelle|skos:broader|Sport de combat -ESWC 2012|skos:broader|ESWC -Nikolai Vavilov|skos:broader|Genetics Génétique -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:tag|tag:triplet_loss -DSSM (Deep Semantic Similarity Model)|skos:broader|Microsoft Research -Venus Express|skos:broader|esa -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:tag|tag:arxiv_doc -foaf|skos:broader|Social Semantic Web -SPARQL Clipboard|skos:broader|Live Clipboard -R|skos:broader|Data science -Edvige|skos:broader|Etat policier -Latent Dirichlet allocation|skos:broader|Topic Modeling -SVD|skos:broader|Algèbre linéaire -(LOV) Linked Open Vocabularies|skos:broader|Mondeca -Artificial neurons|skos:broader|Bio inspired computing devices -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:entity_recommendation -NTIC et développement|skos:broader|NTIC -David Peterson|skos:broader|Technical girls and guys -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:tag|tag:sentence_embeddings -Pape François|skos:broader|Pape -SemTechBiz Berlin 2012|skos:broader|SemTechBiz -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:backpropagation_vs_biology -Mussolini|skos:broader|Italie -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:arxiv_author|William Bialek NEC Research Institute -Manuscrits de Tombouctou|skos:broader|Manuscrits -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Yonatan Bisk -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:arxiv_doc -Photons corrélés|skos:broader|Photon -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:tag|tag:stacking_ensemble_learning -Entity linking with Wikipedia as the target knowledge base|skos:broader|= named entity disambiguation: the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base. - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |skos:broader|open-source NLP research library, built on PyTorch -Droit d'auteur|skos:broader|Propriété intellectuelle -KnowBert|skos:broader|Knowledge Graphs in NLP -Slime mold|skos:broader|Biology -Poincaré|skos:broader|Scientifique -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |skos:broader|[Surveys](/tag/?and=knowledge_graph&and=survey) (see also [surveys about graphs](/tag/?and=graph&and=survey)) -James Hendler|skos:broader|SW guys (and girls) -Ivan Herman|skos:broader|Technical girls and guys -Slot filling|skos:broader|Natural Language Understanding -Rumba|skos:broader|Musique -AngularJS|skos:broader|Google -SIMILE Exhibit|skos:broader|JSON -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:arxiv_author|Achyudh Ram -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:tag|tag:arxiv_doc -Ours|skos:broader|Animal -Safari|skos:broader|Apple Software -Médecine|skos:broader|Santé -Extrémisme islamique|skos:broader|Grands problèmes -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:tag|tag:bert -Nokia|skos:broader|Finlande -Multitask Learning in NLP|skos:broader|NLP tasks / problems -LOD2|skos:broader|Linking Open Data -AI cloud service|skos:broader|Artificial Intelligence -AI3:::Adaptive Information|skos:broader|Technical guys -Bayesian Deep Learning|skos:broader|Uncertainty in Deep Learning -Database to RDF mapping|skos:broader|RDF and database -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:imbalanced_data -Archéologie|skos:broader|Favoris -Integrating Tomcat with Apache|skos:broader|Tomcat -SIgnets|skos:broader|Browser -Critique du libéralisme|skos:broader|Critique du capitalisme -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:tag|tag:google_deepmind -Linked Data Fragments|skos:broader|Linked Data -Royaume Uni|skos:broader|Europe -Search Engines|skos:broader|Informatique -Dialogs in javascript|skos:broader|JavaScript -J'ai un petit problème|skos:broader|Howto -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_firstAuthor|Jan Rygl -Assemblée nationale|skos:broader|France -NoSQL pour les nuls|skos:broader|NOSQL -Rare events|skos:broader|Imbalanced Data -Information retrieval|skos:broader|Technologie -Extinction des dinosaures|skos:broader|Météorite -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Yunfan Shao -Economie française|skos:broader|France -Pre-Trained Language Models|skos:broader|Unsupervised deep pre-training -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_author|Chris Dyer -IKS|skos:broader|LD -Julian Assange|skos:broader|Wikileaks -RDFa 1.1 Lite|skos:broader|RDFa 1.1 -MyFaces|skos:broader|apache.org -Wiki|skos:broader|Social Networks -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:tag|tag:knowledge_graph -DAO attack|skos:broader|Hack -SPARQL en javascript|skos:broader|Javascript RDF -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:arxiv_author|Markus Schröder -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:arxiv_author|Denis Mazur -IHM|skos:broader|Informatique -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|Kevin Swersky -Approach to machine translation in which a large neural network is trained to maximize translation performance. It is a radical departure from the phrase-based statistical translation approaches, in which a translation system consists of subcomponents that are separately optimized. A bidirectional recurrent neural network (RNN), known as an encoder, is used by the neural network to encode a source sentence for a second RNN, known as a decoder, that is used to predict words in the target language |skos:broader|sub-field of computational linguistics that investigates the use of software to translate text or speech from one language to another -[#Word sense disambiguation](/tag/word_sense_disambiguation) algorithm based on the assumption that words in a given neighborhood (section of text) tend to share a common topic |skos:broader|Methods for quantifying and categorizing semantic similarities between linguistic items based on their distributional properties in large samples of language data. Basic idea: the Distributional hypothesis: linguistic items with similar distributions have similar meanings. Basic approach: collect distributional information in high-dimensional vectors, and define similarity in terms of vector similarity Models: latent semantic analysis (LSA), Hyperspace Analogue to Language (HAL), syntax- or dependency-based models, random indexing, semantic folding and various variants of the topic model. -NLP girls and guys|skos:broader|NLP -EC-Web|skos:broader|Conférences -QOTD|skos:broader|Citation -Relation Extraction|skos:broader|Entities -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:arxiv_author|Thomas Wolf -Same architecture as autoencoder, but make strong assumptions concerning the distribution of latent variables. They use variational approach for latent representation learning (\Stochastic Gradient Variational Bayes\ (SGVB) training algorithm)|skos:broader|Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach -Land Degradation|skos:broader|Crise écologique -GUI|skos:broader|UI -Geoffrey Hinton|skos:broader|AI girls and guys -Mars|skos:broader|Système solaire -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Foteini Simistira -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:tag|tag:arxiv_doc -Web Pollution|skos:broader|Web -Ghana Empire|skos:broader|Histoire de l'Afrique -Linked Data Cache|skos:broader|Cache -Grounded Language Learning|skos:broader|NLU -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|David Berthelot -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_author|Franziska Horn -Mobile apps dev|skos:broader|Dev -GAO|skos:broader|Automotive ontologies -HATEOAS|skos:broader|REST -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Asja Fischer -Enswers|skos:broader|Digital Video -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|Jörn-Henrik Jacobsen -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:arxiv_author|Luke Vilnis -Archéologie|skos:broader|Histoire -Ecocide|skos:broader|Écologie -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_author|Wangchunshu Zhou -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:arxiv_doc -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:tag|tag:sentence_embeddings -Bayesian networks bayesian Réseaux bayésiens|skos:broader|IA AI -Union européenne|skos:broader|Institutions européennes -RDF Framework|skos:broader|Semantic Web : Tools -Ranked Entities in Search Results|skos:broader|Search Engines -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:tag|tag:convolutional_neural_network -Bruxelles|skos:broader|Ville -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:tag|tag:information_bottleneck_method -Ouzbékistan|skos:broader|Ex URSS URSS -ESWC 2007|skos:broader|ESWC -Europe|skos:broader|Géographie -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:nlp_facebook -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:arxiv_author|Jason Yosinski -Sculpture|skos:broader|Art -Guha|skos:broader|Technical girls and guys -Jena: assembler|skos:broader|Jena dev -Knowledge Graph Embeddings Library|skos:broader|KGE KG embedding Knowledge graph embedding -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Bo Dai -Blackbox NLP|skos:broader|AI black box -LODr|skos:broader|Tagging -Javascript RDF Parser|skos:broader|JavaScript -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:arxiv_author|Genet Asefa Gesese -Kingsley Idehen|skos:broader|OpenLink Software -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:tag|tag:word_embedding_compositionality -Censure et maltraitance animale|skos:broader|Vive le capitalisme ! -Bioterrorisme|skos:broader|Terrorisme -Feedly|skos:broader|RSS -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:arxiv_author|Vladlen Koltun -Enterprise Semantic Web Semantic Web in the enterprise Corporate Semantic Web|skos:broader|Web sémantique sw -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Daniel F. Schmidt -Cognition|skos:broader|Divers -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:tag|tag:arxiv_doc -Contextualized word representations|skos:broader|NLP techniques -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:chris_manning -Javadoc|skos:broader|Documentation tool -Text feature extraction|skos:broader|NLP techniques -Genetic data|skos:broader|Genetics Génétique -- Siamese network with two deep sub-models - Projects input and candidate texts into embedding space - Trained by maximizing cosine similarity between correct input-output pairs [source](/doc/2019/08/neural_models_for_information_r)|skos:broader|Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines. -Lobby agroalimentaire|skos:broader|Agriculture -Schema.org roles|skos:broader|schema.org -Linking Open Data|skos:broader|Open Data -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Reinald Kim Amplayo -Angela Merkel|skos:broader|Homme politique -IBM|skos:broader|NTIC -Doc2Vec|skos:broader|Word2vec -the bottleneck of getting labeled training data|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Science fiction|skos:broader|Fiction -Guerres puniques|skos:broader|Antiquité romaine -Question Answering for complex questions is often modeled as a graph construction or traversal task, where a solver must build or traverse a graph of facts that answer and explain a given question.|skos:broader|For a description of the variants of this task, see this [paper](/doc/2020/02/how_much_knowledge_can_you_pack) - reading comprehesion - open-domain QA - open-book exam - open-book exam -iTunes|skos:broader|Music store -Biologie|skos:broader|sciences -gensim|skos:broader|Topic Modeling -Fado tropical|skos:broader|Chico Buarque -Rébellion touarègue|skos:broader|Niger -CIMBA|skos:broader|Personal data -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_author|Steven Bohez -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:arxiv_author|Nikita Kitaev -AI Chip|skos:broader|Chip -Regroupement familial et test ADN de filiation|skos:broader|Test ADN de filiation -Snippet Manager|skos:broader|Semanlink related -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_firstAuthor|Kumar Shridhar -Explainable AI|skos:broader|Accountable AI -Silicon Valley|skos:broader|Californie -Semantic Statistics|skos:broader|Semantic Web -Restful semantic web services|skos:broader|REST -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:tag|tag:arxiv_doc -Soleil|skos:broader|Système solaire -Microsoft Concept Graph|skos:broader|Knowledge Extraction -DNA nanotechnology|skos:broader|ADN -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:rare_events -Google Refine|skos:broader|OpenRefine -Liberté d'expression|skos:broader|Liberté -1ere guerre mondiale|skos:broader|Histoire -Newton|skos:broader|Physique -Manu Dibango|skos:broader|Jazz -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_firstAuthor|Yujia Xie -Croisade des enfants|skos:broader|Croisades -Antiquité du Pakistan|skos:broader|Pakistan -Construction européenne|skos:broader|Europe -Triple-store powered site|skos:broader|TripleStore -Linked Learning|skos:broader|Online Learning -Wikipedia page to concept|skos:broader|Information resources -Sida|skos:broader|Grands problèmes -Sônia Braga|skos:broader|Actrice -C2GWeb|skos:broader|SW at Renault -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Mario Guajardo-Cespedes -Conjecture de Goldbach|skos:broader|Grands problèmes mathématiques -Louis Jouvet|skos:broader|Cinéma français -KD-MKR biblio|skos:broader|KD-MKR -Mésopotamie|skos:broader|Antiquité -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:tag|tag:sequence_to_sequence_learning -Tanis-KT|skos:broader|Extinction des dinosaures -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:tag|tag:cross_lingual_nlp -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:arxiv_doc -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_firstAuthor|Cody Coleman -Ben Adida|skos:broader|SW guys (and girls) -Shelley Powers|skos:broader|Technical girls and guys -Discounted cumulative gain|skos:broader|Ranking (information retrieval) -HATEOAS|skos:broader|Hypermedia -Geometry of language embeddings|skos:broader|Text Embeddings -Guerre de Yougoslavie|skos:broader|War -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:arxiv_doc -Afrique de l'Est|skos:broader|Afrique -Coupe du monde 2010|skos:broader|Coupe du monde de football -Net Neutrality|skos:broader|Internet -Mirek Sopek|skos:broader|SW guys (and girls) -Facebook Open Graph|skos:broader|Facebook -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:knowledge_graph_completion -NLP@Google|skos:broader|AI@Google -Abeille|skos:broader|Insecte -Conditional random fields|skos:broader|Probabilistic Graphical Models -Information resources|skos:broader|URI -Backpropagation vs Biology|skos:broader|Brain vs Deep Learning -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Volker Tresp -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_author|Wei Hu State Key Laboratory for Novel Software Technology, Nanjing University -Publishing RDF Vocabularies|skos:broader|RDF Vocabularies -Shelley Powers|skos:broader|SW guys (and girls) -Smartphone|skos:broader|Mobile device -Coursera: Deep Learning|skos:broader|Andrew Ng -Bricolage Mac|skos:broader|Bricolage -Clustering of text documents|skos:broader|NLP tasks / problems -Film argentin|skos:broader|Argentine -Linux hosting|skos:broader|Linux -Hierarchical text classification|skos:broader|Hierarchical Categories -Transfer learning|skos:broader|Machine learning -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:tag|tag:neuroscience_and_machine_learning -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_author|Eunyee Koh -Paul Graham|skos:broader|Technical girls and guys -Principal component analysis|skos:broader|Unsupervised machine learning -Industrie textile|skos:broader|industrie -Disque à retrouver|skos:broader|Souvenirs -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:arxiv_doc -Word sense / Lexical ambiguity|skos:broader|Ambiguity (NLP) -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Chris Tar -Word2vec|skos:broader|Word embeddings -DRM in HTML 5|skos:broader|DRM -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_author|Franziska Biegler -jQuery|skos:broader|JavaScript librairies -Government data as Linked Data|skos:broader|Government data -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:arxiv_author|Yuan Zhang -BERT Rediscovers the Classical NLP Pipeline We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.|sl:arxiv_author|Ian Tenney -Silk Road|skos:broader|Dark Web -Protection de la nature|skos:broader|Nature -Maven tips|skos:broader|Tips -Pre-Trained Language Models|skos:broader|Language model -Memory requirements in NN|skos:broader|Mémoire (informatique) -RDF and social networks|skos:broader|RDF Application -Pologne|skos:broader|Pays d'Europe -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:tag|tag:category_embedding -Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the given data succinctly. Unsupervised learning algo. Images - edge detection (similar to primary visual cortex) |skos:broader|techniques (mostly unsupervised learning algorithms) that learn a feature: a transformation of raw data input to a representation that can be effectively exploited in machine learning tasks (= aim at discovering better representations of the inputs provided during training. Classical examples include principal components analysis and cluster analysis. Representation learning algorithms often attempt to preserve the information in their input but transform it in a way that makes it useful) -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Kumar Shridhar -Thèse IRIT-Renault: biblio initiale|skos:broader|Thèse IRIT-Renault: biblio -Calais|skos:broader|Thomson Reuters -aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean (Most k-means-type algorithms require the number of clusters – k – to be specified in advance)|skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:arxiv_author|Anna Potapenko -TopBraid|skos:broader|Semantic Web : Tools -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |skos:broader|In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. -Millennium Goal|skos:broader|Pauvreté -RDFJS|skos:broader|Javascript RDF -Enseignement scientifique|skos:broader|Education -Multi-hop reasonning|skos:broader|Question Answering -Plastic|skos:broader|Grands problèmes -VIE Vienna IKS Editables|skos:broader|JavaScript librairies -Boube Gado|skos:broader|Archéologue -RNN|skos:broader|ANN NN Artificial neural network -Littérature russe|skos:broader|Littérature -Sorbonne|skos:broader|Universités françaises -DeepWalk|skos:broader|Node Embeddings -delicious api|skos:broader|del.icio.us -Google Refine|skos:broader|Google -EMNLP 2018|skos:broader|NLP conference -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|William W. Cohen -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:neuroscience_and_machine_learning -Google Cloud Platform|skos:broader|Google Cloud -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:nlp_facebook -TV advertising|skos:broader|Publicité -Shallow parsing (Chunking)|skos:broader|NLP tasks / problems -Linked Learning 2012|skos:broader|WWW 2012 -Traders|skos:broader|Finance -Semantic markup in HTML|skos:broader|Semantic Web -Bijan Parsia|skos:broader|Clark and Parsia -Fossile vivant|skos:broader|Paléontologie -Wikipedia page to concept|skos:broader|dbpedia -GRDDL|skos:broader|XSLT -Knowledge Graphs in NLP|skos:broader|Knowledge Graphs and NLP -Ouzbékistan|skos:broader|Asie centrale -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:arxiv_author|Iryna Gurevych -Facebook FAIR|skos:broader|AI@Facebook -Insectes fossiles|skos:broader|Insecte -Virtuoso Open-Source Edition|skos:broader|OpenLink Software -Brown Corpus|skos:broader|Text Corpora and Lexical Resources -Integrating Tomcat with Apache|skos:broader|Apache web server -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:arxiv_author|Jianglei Han -Cinéma américain|skos:broader|Cinéma -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:arxiv_author|Simon Gottschalk -PIMO|skos:broader|gnowsis -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:arxiv_author|Maximilian Nickel -Sursauts gamma|skos:broader|Explosions cosmiques -Quoc V. Le|skos:broader|AI girls and guys -Pompe à eau|skos:broader|Irrigation -Faim|skos:broader|Alimentation -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:arxiv_author|Eunjeong Lucy Park -Bourse|skos:broader|Economie -Volkswagen|skos:broader|Automobile -Relational Databases and the Semantic Web|skos:broader|RDF -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:tag|tag:human_level_ai -Sécheresse|skos:broader|Eau -New Yorker|skos:broader|Presse -Universités américaines|skos:broader|Université -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:tag|tag:arxiv_doc -Ronan Collobert|skos:broader|NLP girls and guys -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:tag|tag:acl_2019 -Bookmarks|skos:broader|Brouteur -Stardust|skos:broader|Comet Wild 2 -A La Carte Embedding|skos:broader|Rare words (NLP) -SW in Technical Automotive Documentation|skos:broader|Semantic Web : Application -Entity alignment@de|skos:broader|Entities -Hierarchical linear model|skos:broader|Regression analysis -DAO attack|skos:broader|The DAO -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Noah A. Smith -DANs (Deep Averaging Neural Networks)|skos:broader|Neural Bag of Words -ElasticSearch|skos:broader|Text Search -Flickr|skos:broader|photos online -Spotlight (OSX)|skos:broader|Mac OS X -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:arxiv_author|Yoav Goldberg -Turing test|skos:broader|Turing -Java 5|skos:broader|Java -Google Knowledge Graph|skos:broader|Semantic Web -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:arxiv_firstAuthor|Juan Luis Suárez -Linked Data demo|skos:broader|Linked Data -CamemBERT|skos:broader|INRIA -Voice recognition Speech recognition|skos:broader|Sequence Modeling Seq2Seq -Dave Reynolds|skos:broader|Technical girls and guys -Peter Mika|skos:broader|Yahoo! -RDF graphs|skos:broader|RDF -Tchernobyl|skos:broader|Catastrophe écologique -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_author|Neoklis Polyzotis -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:tag|tag:nlp_reading_comprehension -summly|skos:broader|iphone app -Self-driving car|skos:broader|Automotive -css/html templates|skos:broader|css -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:tag|tag:language_model -iPod|skos:broader|Apple -Ranked Entities in Search Results|skos:broader|Entities -Cassini|skos:broader|NASA -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:tag|tag:embeddings -Philae|skos:broader|Rosetta -Datao|skos:broader|Semantic Web search engine -Phrase embeddings|skos:broader|Phrases (NLP) -data.gouv.fr|skos:broader|Open Data -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:arxiv_firstAuthor|Yundong Zhang -Histropedia|skos:broader|Histoire -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:tag|tag:blink -Minoen|skos:broader|Langues -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:tag|tag:entity_linking -Docker-Mac|skos:broader|Docker -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:arxiv_firstAuthor|Yarin Gal -Java 8 lambdas|skos:broader|Lambda calculus -KDE|skos:broader|Linux -SWRL|skos:broader|Rules -Musique en ligne|skos:broader|Digital Media -LDA|skos:broader|Topic model -LD-PATCH|skos:broader|HTTP PATCH -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:arxiv_author|Christian S. Perone -Roosevelt|skos:broader|Président des USA -Censure et maltraitance animale|skos:broader|Justice -Uriqr|skos:broader|Linking Open Data -Latent Semantic Analysis|skos:broader|NLP techniques -PyDev|skos:broader|Python -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:tag|tag:arxiv_doc -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:tag|tag:emnlp_2019 -Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. Possible to interpret what the model is doing by looking at the Attention weight matrix Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: - attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) |skos:broader|a set of algorithms in machine learning that attempt to model high-level abstractions in data by using architectures composed of multiple non-linear transformations. Deep learning is part of a broader family of machine learning methods based on learning representations of data. One of the promises of deep learning is replacing handcrafted features with efficient algorithms for unsupervised or semi-supervised feature learning and hierarchical feature extraction With Deep Learning, Ng says, you just give the system a lot of data so it can discover by itself what some of the concepts in the world are ([cf.](http://www.wired.com/2013/05/neuro-artificial-intelligence/all/)) -Dependency Injection|skos:broader|Inversion of Control -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:transfer_learning -URIs within URIs|skos:broader|URI -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:tag|tag:ml_google -Brain-computer interface|skos:broader|Nous vivons une époque moderne -AI: dangers|skos:broader|Artificial Intelligence -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_author|Salvador García -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Xu Shi -Knowledge Graph + Deep Learning|skos:broader|AI + Knowledge Bases -Extrémophiles|skos:broader|Curiosité naturelle -KIWI project|skos:broader|Commission européenne -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:tag|tag:noise_contrastive_estimation -Économie écologique|skos:broader|Écologie -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:arxiv_author|Drew A. Hudson -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Zihang Dai -Brains in silicon Neuromorphique Neuromorphic engineering|skos:broader|Cerveau -String theory|skos:broader|Physics -Leonardo da Vinci|skos:broader|Scientifique -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:tag|tag:question_answering -Katie Portwin|skos:broader|Technical girls and guys -Hierarchical clustering of text documents|skos:broader|Clustering of text documents -HTTPS|skos:broader|Cybersecurity Sécurité informatique -OSEMA/DERI-Renault paper|skos:broader|OSEMA 2011 -Londres|skos:broader|Royaume Uni -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:arxiv_author|Nataliya Le Vine -Film danois|skos:broader|Film -CWL|skos:broader|Web sémantique sw -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Tianxiang Sun -RDF Access to Relational Databases|skos:broader|RDF and database -Matière noire|skos:broader|Physics -Mauritanie|skos:broader|Afrique de l'Ouest -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:tag|tag:arxiv_doc -Aaron Swartz|skos:broader|Technical girls and guys -Photo numérique|skos:broader|Photo -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:tag|tag:bert -Cambridge Analytica|skos:broader|DeleteFB -Cross-Origin Resource Sharing|skos:broader|JavaScript -Mac OS X|skos:broader|OS -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:tag|tag:knowledge_graph_embeddings -Pape|skos:broader|Catholicisme -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:arxiv_author|Kin Sum Liu -Abel Prize|skos:broader|Mathématiques -Street art|skos:broader|Art -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:tag|tag:arxiv_doc -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:tag|tag:arxiv_doc -Semanlink|skos:broader|Dev -RDF123|skos:broader|Excel and SW -JSONP|skos:broader|cross-domain data fetching -Virtual knowledge graph|skos:broader|Knowledge Graphs -NLP Teams|skos:broader|NLP -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:knowbert -Classification under the restriction that we may only observe a single example of each possible class before making a prediction about a test instance.|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Guillaume Lample|skos:broader|NLP girls and guys -Fascisme|skos:broader|Extrème droite -CERN|skos:broader|Recherche -DNA nanotechnology|skos:broader|Nanotechnologies -Aïchi|skos:broader|Exposition universelle -Covid19: incompétence gouvernementale|skos:broader|Covid19 -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Honghui Shi -Entity alignment@de|skos:broader|Combining knowledge graphs -Antiquité romaine|skos:broader|Rome -SOAP|skos:broader|XML -Part Of Speech Tagging|skos:broader|Sequence labeling -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:ai_stanford -Rosée|skos:broader|Eau -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_author|Hannaneh Hajishirzi -Bricolage Mac|skos:broader|Macintosh -RDF/binary|skos:broader|RDF -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:tag|tag:christopher_olah -Cinéma africain|skos:broader|Cinéma -My old things|skos:broader|fps -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:arxiv_doc -A La Carte Embedding|skos:broader|Sentence Embeddings -Richard Cyganiak|skos:broader|SW guys (and girls) -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Shiyang Li -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:tag|tag:arxiv_doc -Wiki markup|skos:broader|Text tools -Lesk algorithm|skos:broader|Distributional semantics -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Braden Hancock -SPARQL Update|skos:broader|Read-Write Linked Data -foaf|skos:broader|RDF and social networks -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:tag|tag:arxiv_doc -Yahoo - My Web 2.0|skos:broader|Yahoo! -D2RQ|skos:broader|SQL to RDF mapping -[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. |skos:broader|Methods for quantifying and categorizing semantic similarities between linguistic items based on their distributional properties in large samples of language data. Basic idea: the Distributional hypothesis: linguistic items with similar distributions have similar meanings. Basic approach: collect distributional information in high-dimensional vectors, and define similarity in terms of vector similarity Models: latent semantic analysis (LSA), Hyperspace Analogue to Language (HAL), syntax- or dependency-based models, random indexing, semantic folding and various variants of the topic model. -KnowBert|skos:broader|Knowledge-driven embeddings -Semantic Web project|skos:broader|Semantic Web -Ludovic Denoyer|skos:broader|NLP girls and guys -Python-NLP|skos:broader|NLP tools -Darwin|skos:broader|Evolution -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:tag|tag:medical_data -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Kaiming He -SWEO: Renault use case|skos:broader|SW in Technical Automotive Documentation -Phrase embeddings|skos:broader|Embeddings in NLP -Philippe Cudré-Mauroux|skos:broader|SW guys (and girls) -Configuration ontology|skos:broader|fps ontologies -Cinéma africain|skos:broader|Art d'Afrique -Galileo (spacecraft)|skos:broader|Missions spatiales -Attention + Knowledge Graphs|skos:broader|Attention in Graphs -Knowledge Graphs in NLP|skos:broader|NLP: using Knowledge -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:tag|tag:graph_embeddings -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_author|Martin Josifoski -Reagan|skos:broader|Chef d'état -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:arxiv_firstAuthor|Yoshua Bengio -Cyc|skos:broader|Artificial Intelligence -Java profiling|skos:broader|Java dev -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:tag|tag:uncertainty_in_deep_learning -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:tag|tag:named_entity_recognition -Cloud based LOD platform|skos:broader|Cloud and Linked Data -AI 4 IP|skos:broader|NLP: use cases -Knowledge Vault|skos:broader|Google -Freedom Box|skos:broader|Internet libre -SL|skos:broader|favorites -Ténéré|skos:broader|Désert -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:tag|tag:survey -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:attention_is_all_you_need -HTTP Redirect|skos:broader|HTTP -Marklogic|skos:broader|NOSQL -Dev|skos:broader|Technologie -faiss|skos:broader|Machine Learning library -Chômage|skos:broader|Economie -Disco Hyperdata Browser|skos:broader|RDF browser -Publicité Internet|skos:broader|Publicité -NLP + Sem web|skos:broader|NLP -Président des USA|skos:broader|Chef d'état -YouTube video|skos:broader|YouTube -Génocide|skos:broader|Crime contre l'Humanité -Ontologies: use cases|skos:broader|Ontologies -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_author|Aliaksei Severyn -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|Graham Neubig -Folksonomies vs ontologies|skos:broader|Folksonomy -SDMX-RDF|skos:broader|SDMX -China's Social Credit System|skos:broader|Big Brother -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:arxiv_author|Prateek Jain -Lord's Resistance Army|skos:broader|Ouganda -Mobile phone|skos:broader|Téléphone -Antibiotic resistance|skos:broader|Antibiotiques -JAX-RS|skos:broader|Java -Extinction de masse de la fin du permien|skos:broader|Paléontologie -Genetic Programming|skos:broader|Evolution -Semantic Web Services|skos:broader|Semantic Web -NMT|skos:broader|Traduction automatique -Energies renouvelables|skos:broader|Energie -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:arxiv_author|Chin-Yew Lin -Monsanto|skos:broader|Entreprise -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Sabbir M. Rashid -Private wiki|skos:broader|Wiki -KGE KG embedding Knowledge graph embedding|skos:broader|KR -Bookmarks|skos:broader|Tagging -Google Visualization API|skos:broader|API -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_author|Bart Dhoedt -Tim Cook|skos:broader|Apple -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:tag|tag:nlp_using_knowledge_graphs -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:tag|tag:arxiv_doc -Eric Schmidt|skos:broader|Google -Configuration and SW|skos:broader|Constraints in the SW -VoCamp|skos:broader|Barcamp -(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. Implemented in [gensim](/tag/gensim) |skos:broader|group of related models that are used to produce word embeddings -Mécanique quantique|skos:broader|Physique -Kaguya|skos:broader|Lune -Court métrage|skos:broader|Cinéma -Googling|skos:broader|Google -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:tag|tag:arxiv_doc -CIMBA|skos:broader|Linked Data Platform -Redland|skos:broader|RDF Tools -Magie noire|skos:broader|Magie -Europe écologie|skos:broader|Verts -Grands Singes|skos:broader|Singe -Extrème droite|skos:broader|Politique -Auvergne|skos:broader|France -Einstein|skos:broader|Physique -LDA2vec|skos:broader|Latent Dirichlet allocation -Graph-based Text Representations|skos:broader|Graph -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:tag|tag:image_recognition -TF-IDF|skos:broader|Probabilistic relevance model -HTML Dev|skos:broader|Dev -Requin|skos:broader|Poisson -Automotive AND W3C|skos:broader|Automobile 2.0 -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:arxiv_author|Alessandro Moschitti -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:tag|tag:judea_pearl -fps AND LDOW2008|skos:broader|Linking Enterprise Data -Île Maurice|skos:broader|Afrique -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:tag|tag:attention_is_all_you_need -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Haifeng Wang -Boucle ferroviaire d’Afrique de l’Ouest|skos:broader|Train -Mur de Berlin|skos:broader|Communisme -Sem web: context|skos:broader|Semantic Web Dev -Piggy Bank|skos:broader|Ajax -SPARQL Tutorial|skos:broader|Tutorial -Alexandre le Grand|skos:broader|Grèce antique -Film japonais|skos:broader|Film -Burkina Faso|skos:broader|Afrique de l'Ouest -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_author|Ming Zhou -Thought alone controlled device|skos:broader|Pensée -Fujitsu|skos:broader|Entreprise -NLTK|skos:broader|NLP tools -Inverse-functional properties|skos:broader|OWL -Apache Hive|skos:broader|Big Data Tools -Spark (Java web framework)|skos:broader|RESTful Web Services -Pédagogie numérique|skos:broader|Enseignement -Axoum|skos:broader|Archéologie africaine -Ivan Herman|skos:broader|W3C -ARQ|skos:broader|Andy Seaborne -Music source separation|skos:broader|IA/ML: domaines d'application -Linking Enterprise Data|skos:broader|Enterprise Data -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:emnlp_2018 -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:arxiv_author|Thomas S. Paula -Australia's evolutionary history|skos:broader|Evolution -\Better entity LINKing\, @facebookai open-source entity linker. [GitHub](https://github.com/facebookresearch/BLINK)|skos:broader|each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the linking decisions. -Panama papers|skos:broader|Paradis fiscaux -Anzo|skos:broader|TripleStore -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:arxiv_author|Tomer D. Ullman -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:tag|tag:bert -Semantic Web: Life Sciences|skos:broader|Biology -Chimpanzé|skos:broader|Grands Singes -Darwin|skos:broader|Scientifique -Chico Buarque|skos:broader|Musicien -Semantic SEO|skos:broader|SEO -Glue|skos:broader|Firefox extension -Mexique|skos:broader|Amérique -Juliana Rotich|skos:broader|Afrique -Java Server Faces|skos:broader|Java -GloVe|skos:broader|Global Semantic Context -AI@Amazon|skos:broader|Artificial Intelligence -delicious java|skos:broader|Tagging -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Hongmin Wang -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:arxiv_author|Ruslan Salakhutdinov -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:tag|tag:arxiv_doc -Andrew Ng|skos:broader|Technical girls and guys -Grèce|skos:broader|Pays d'Europe -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:tag|tag:arxiv_doc -Russie|skos:broader|Pays d'Europe -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Jing Liu -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:arxiv_author|Jianfeng Gao -Ajax|skos:broader|XML -ANN NN Artificial neural network|skos:broader|IA AI -Nicolas Hulot|skos:broader|Télévision -Commission européenne|skos:broader|Europe -localization|skos:broader|Dev -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|David Raposo -Tesla, Inc|skos:broader|Automobile -Chris Manning|skos:broader|AI girls and guys -Singular value decomposition|skos:broader|Linear algebra -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:bertology -Logistic regression|skos:broader|Machine learning: techniques -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|Rohan Anil -or grammatical tagging, or word-category disambiguation: the process of marking up a word in a text as corresponding to a particular part of speech|skos:broader|pattern recognition task that involves the assignment of a categorical label to each member of a sequence of observed values. Eg: POS tagging -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:arxiv_author|Dell Zhang -Gautier Poupeau|skos:broader|Technical girls and guys -Business case: semantic web|skos:broader|Semantic Web : Business -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:arxiv_firstAuthor|Brenden M. Lake -Physicien|skos:broader|Physique -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:arxiv_author|Kelvin Guu -Subword embeddings|skos:broader|Word embeddings -Machine Learning Course|skos:broader|Machine learning -Censure et maltraitance animale|skos:broader|Maltraitance animale -Graphviz|skos:broader|Graph visualization -Smartphone|skos:broader|Mobile phone -Canal+|skos:broader|Télévision -David Ricardo|skos:broader|Economiste -OWL ontology browser|skos:broader|OWL -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_author|Livio Baldini Soares -Coursera: R Programming|skos:broader|Coursera -Insectes fossiles|skos:broader|Fossile -Pakistan|skos:broader|Asie -Frederick Giasson|skos:broader|SW guys (and girls) -Apple|skos:broader|Technologie - Create delightful python projects using Jupyter Notebooks|skos:broader|- shift-tab (de 1 à 3 fois) - ?xxx eg. ?learn.predict - doc - ??xxx eg. `??learn.predict - source code - H - liste des raccourcis -IKS Workshop Salzburg 2012|skos:broader|Salzburg -Backpropagation vs Biology|skos:broader|Backpropagation -Nikolai Vavilov|skos:broader|Ex URSS URSS -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:arxiv_firstAuthor|Zhiheng Huang -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_author|Tengyu Ma -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:tag|tag:continual_learning -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:tag|tag:artificial_neural_network -Wiki Software|skos:broader|Wiki -Rock|skos:broader|Musique -African languages|skos:broader|Langues -Explosion cambrienne|skos:broader|Histoire de la vie -SemTechBiz Berlin 2012|skos:broader|Berlin -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:thewebconf_2020 -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_author|Graham Neubig -URI Reference|skos:broader|URI -Newton|skos:broader|Physicien -Synonymy|skos:broader|NLP tasks / problems -Zhang Qian|skos:broader|Explorateur -Lumières|skos:broader|Philosophie -RDF Schema querying|skos:broader|RDF Schema -ExoMars|skos:broader|Exploration marsienne -Relativité|skos:broader|Physique -Embeddings in IR|skos:broader|embedding -Cour européenne de justice|skos:broader|Institutions européennes -Sparse coding|skos:broader|Representation learning -Slot tagging|skos:broader|NLU -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:tag|tag:arxiv_doc -Attentats 13-11-2015|skos:broader|Etat islamique -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:tag|tag:weak_supervision -Global brain|skos:broader|NTIC -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:recommender_systems -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:arxiv_author|François Charton -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:tag|tag:google_research -Clint Eastwood|skos:broader|Acteur -Elliotte Rusty Harold|skos:broader|Technical girls and guys -FBI v. Apple|skos:broader|Terrorisme -AJAR|skos:broader|Ajax -Novartis|skos:broader|Entreprise -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:ulmfit -rdfQuery|skos:broader|jQuery -Graph visualization|skos:broader|Graph -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_author|Sebastian Ruder -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:tag|tag:arxiv_doc -Thought alone controlled device|skos:broader|Robotique -Pays-Bas|skos:broader|Pays d'Europe -DRM|skos:broader|Content industries -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Jonathan Weber -Personal Archive|skos:broader|PIM -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:tag|tag:bertology -Nuxeo|skos:broader|Enterprise Content Management -Resources-Oriented Web Services|skos:broader|Linked Data -An ontology for linking product descriptions and business entities on the Web|skos:broader|An ontology is a specification of a conceptualization. -Evolutionary algorithm|skos:broader|Evolutionary computation -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:arxiv_author|Steven Skiena -Cognition-as-a-Service|skos:broader|Cognition -Apache Spark|skos:broader|In-memory computing -JAX-RS|skos:broader|REST -Manu Sporny|skos:broader|SW guys (and girls) -Stemming|skos:broader|Text preprocessing -Internet tool|skos:broader|Tools -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:arxiv_author|Zhi-Hong Deng -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Nathan P. Palmer -blojsom|skos:broader|Blog software -Civilisation de l'Indus|skos:broader|Antiquité de l'Inde -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Andrew L. Beam -OS X Unix|skos:broader|Mac OS X -Knowledge bases|skos:broader|Knowledge -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:fasttext -Boris Johnson|skos:broader|Royaume Uni -Paradise Papers|skos:broader|Paradis fiscaux -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:tag|tag:grounded_language_learning -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:tag|tag:recurrent_neural_network -Detroit|skos:broader|USA -Excel and SW|skos:broader|Excel -Intent classification and slot filling|skos:broader|Slot tagging -Web services: document vs RPC Style|skos:broader|Web Services -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:arxiv_author|Iz Beltagy -Graph database and NLP|skos:broader|Graph database -Photon|skos:broader|Physique des particules -Knowledge Compilation|skos:broader|Artificial Intelligence -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:arxiv_author|Bhuwan Dhingra -Word2vec: howto|skos:broader|Word2vec -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Isaac S. Kohane -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Fabio Petroni -Histoire de la vie|skos:broader|Evolution -Reformer|skos:broader|AI@Google -Word + Entity embeddings|skos:broader|Word embeddings -scikit-learn|skos:broader|Python -Musées africains|skos:broader|Musée -Fossile|skos:broader|Histoire de la vie -[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. |skos:broader|Attention mechanism relating different positions of a sequence in order to compute a representation of the same sequence. Useful in machine reading, abstractive summarization, or image description generation -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:graph_embeddings -paggr|skos:broader|SPARQL -Russie|skos:broader|Europe -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:arxiv_author|Łukasz Kaiser -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:arxiv_author|Volker Tresp -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Jiaming Xu -Rosetta Project|skos:broader|Disparition de langues vivantes -Justice internationale|skos:broader|Justice -NLP sample code|skos:broader|Sample code -Nanotechnologies|skos:broader|Technologie -Carrot2|skos:broader|Open Source -OSEMA/DERI-Renault paper|skos:broader|Fadi Badra -UNIX Tips|skos:broader|Howto -Denny Britz|skos:broader|NLP girls and guys -Pangolin|skos:broader|Espèces menacées -Generative adversarial networks Generative adversarial network|skos:broader|ANN NN Artificial neural network -Sense embeddings|skos:broader|Embeddings -Mémoire (informatique)|skos:broader|Mémoire -OpenLink Software|skos:broader|Semantic Web -Jena|skos:broader|apache.org -W3C Incubator Group Report|skos:broader|W3C -Zitgist|skos:broader|RDF browser -H5N1|skos:broader|Grippe aviaire -Graphs+Machine Learning|skos:broader|Machine learning -Afrique équatoriale|skos:broader|Afrique -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:tag|tag:arxiv_doc -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:tag|tag:snorkel -Yahoo!|skos:broader|Search Engines -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:tag|tag:feature_hashing -Ofir|skos:broader|Gastronomie -Dan Brickley|skos:broader|SW guys (and girls) -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Alexander Ratner -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:tag|tag:arxiv_doc -Topic Modeling|skos:broader|Distributional semantics -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:arxiv_author|Rich Caruana -Musique brésilienne|skos:broader|Brésil -Samba|skos:broader|Brésil -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Zihang Dai -Talis RDF/JSON|skos:broader|RDF-in-JSON -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:arxiv_author|Marco Tulio Ribeiro -Traduction automatique|skos:broader|Natural Language Processing -Eléphant|skos:broader|Animal -Transnets|skos:broader|NTIC -Gaulois|skos:broader|Antiquité -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:arxiv_author|J. P. Lewis -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:tag|tag:arxiv_doc -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:arxiv_author|Damjan Krstajic -Poincaré Embeddings|skos:broader|Poincaré -535|skos:broader|Eruption volcanique -Zune|skos:broader|Musique en ligne -Watson IBM's Watson|skos:broader|IA AI -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:arxiv_firstAuthor|Kawin Ethayarajh -BERT|skos:broader|NLP@Google -KD-MKB related|skos:broader|KD-MKB -HypiosVoCampParisMay2010|skos:broader|Hypios -Semantic Web|skos:broader|Semantic technology -Biodiversité : effondrement|skos:broader|Grands problèmes -Word2vec: howto|skos:broader|Using word embeddings -Jena and database|skos:broader|Semantic Web: databases -Trust in the Web of Data|skos:broader|Trust -Image recognition|skos:broader|Machine learning: problems -Politique française|skos:broader|Politique -Label Embedding|skos:broader|Embeddings -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_author|Xiang Wang -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:tag|tag:arxiv_doc -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:arxiv_author|Alexandre Passos -Brains in silicon Neuromorphique Neuromorphic engineering|skos:broader|C'est déjà demain -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:tag|tag:k_nearest_neighbors_algorithm -Pollution|skos:broader|Grands problèmes -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:arxiv_author|Zhiheng Huang -AI & IR|skos:broader|Artificial Intelligence -Toutankhamon|skos:broader|Egypte antique -Javascript tips|skos:broader|JavaScript -Rocard|skos:broader|Politique française -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Joyce Chai -Cache buster|skos:broader|Cache -Gravitation|skos:broader|Physique -Alphago|skos:broader|Deep Learning -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_author|Alex Beutel -Semantic framework|skos:broader|Semantic Web Dev -Cour européenne de justice|skos:broader|Union européenne -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|David Duvenaud -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:tag|tag:arxiv_doc -XTech|skos:broader|NTIC -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:tag|tag:these_irit_renault_biblio_initiale -Film danois|skos:broader|Danemark -Ajax|skos:broader|Dev -Global brain|skos:broader|Conscience artificielle -Virtuoso Open-Source Edition|skos:broader|Open Source -Graph database|skos:broader|Graph -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:tag|tag:ulmfit -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Zhiruo Wang -Credit card|skos:broader|Banque -Oum Kalsoum|skos:broader|Musicien -Text Editor|skos:broader|Text tools -Comet Wild 2|skos:broader|Comète -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:arxiv_author|Yue Zhang -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_author|Alexandre Sablayrolles -Périclès|skos:broader|Grand Homme -Génétique et Évolution|skos:broader|Genetics Génétique -Earth map|skos:broader|Carte -Digital Audio|skos:broader|Musique -SIMILE Exhibit|skos:broader|SIMILE -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:nlp_tools -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Geoffrey I. Webb -Regroupement familial et test ADN de filiation|skos:broader|Sarkozy : immigration -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_author|Rik Koncel-Kedziorski -Bura|skos:broader|African art -Solr documentation|skos:broader|Solr -Flask|skos:broader|Web dev -RDC|skos:broader|Afrique Centrale -JCS - Java Caching System|skos:broader|Java dev -Relations franco-américaines|skos:broader|USA -SourceForge|skos:broader|Software -When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. The goal of active learning: to reduce the cost of labeling. To this end, the learning algorithm is allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))|skos:broader|the bottleneck of getting labeled training data -J'ai un petit problème avec mon ordinateur|skos:broader|Ca craint -Saudade@pt|skos:broader|Souvenirs -Alexandria|skos:broader|Ville -Herschel telescope|skos:broader|Télescope -Louvre|skos:broader|Musée -Variational Bayesian methods|skos:broader|Bayesian Reasoning -Critique du capitalisme|skos:broader|Capitalisme -Kingsley Idehen|skos:broader|Technical girls and guys -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:arxiv_author|Christopher D. Manning -Subventions agricoles|skos:broader|Agriculture -Dean Allemang|skos:broader|TopQuadrant -France Télécom|skos:broader|Entreprise -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:arxiv_author|Ross Girshick -RDF data visualization|skos:broader|RDF -Egypte antique|skos:broader|Antiquité -CIA|skos:broader|USA -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:knowledge_distillation -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Zhiting Hu -Obélisque d'Axoum|skos:broader|Obélisque -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Antoine Zimmermann -Test of independent invention|skos:broader|Web architecture -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|sl:tag|tag:p_np -log4j|skos:broader|apache.org -RDF search engine|skos:broader|Moteur de recherche -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:arxiv_author|Xiaodong Liu -Bayesian analysis|skos:broader|Probabilistic Graphical Models -Combining Statistics and Semantics|skos:broader|Semantics -Paul Krugman|skos:broader|Economiste -Jean Rouch|skos:broader|Ethnologie -Plante|skos:broader|Nature -Read-Write Secure Data Web|skos:broader|Security and REST -Thèse IRIT-Renault NLP-KB|skos:broader|Knowledge Graph + Deep Learning -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:tag|tag:france_is_ai_2018 -Do Deep Nets Really Need to be Deep? Currently, deep neural networks are the state of the art on problems such as speech recognition and computer vision. In this extended abstract, we show that shallow feed-forward networks can learn the complex functions previously learned by deep nets and achieve accuracies previously only achievable with deep models. Moreover, in some cases the shallow neural nets can learn these deep functions using a total number of parameters similar to the original deep model. We evaluate our method on the TIMIT phoneme recognition task and are able to train shallow fully-connected nets that perform similarly to complex, well-engineered, deep convolutional architectures. Our success in training shallow neural nets to mimic deeper models suggests that there probably exist better algorithms for training shallow feed-forward nets than those currently available.|sl:tag|tag:arxiv_doc -Lalibela|skos:broader|Ethiopie -Mladic|skos:broader|Crime contre l'Humanité -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:arxiv_author|Nikolaos Kolitsas -Property Graphs|skos:broader|Graph -Automotive Ontology Working Group|skos:broader|Ontologies -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:tag|tag:jean_paul -gnowsis|skos:broader|Semantic Web project -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:arxiv_author|David R. So -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:nlp_text_classification -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:arxiv_author|Cho-Jui Hsieh -Canada|skos:broader|Amérique -Constitution européenne|skos:broader|Europe -Machine Learning + Semantic Web|skos:broader|Semantic Web -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:tag|tag:neural_symbolic_computing -Tabulator|skos:broader|Linked Data -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:tag|tag:time_series -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:tag|tag:arxiv_doc -Privacy and internet|skos:broader|Cybersecurity Sécurité informatique -Poincaré Embeddings|skos:broader|Embeddings -Paraphrase identification|skos:broader|Identification of similar documents -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Ryan Faulkner -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:graphs_machine_learning -Mike Bergman|skos:broader|Technical girls and guys -Pará|skos:broader|Brésil -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_firstAuthor|Ledell Wu -Handwriting recognition|skos:broader|OCR -Clandestins|skos:broader|Immigration -Neural machine translation|skos:broader|Neural networks -StarSpace|skos:broader|Entity embeddings -Seq2Seq Encoder-Decoder|skos:broader|Sequence Modeling Seq2Seq -Pêche|skos:broader|Poisson -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:arxiv_author|Lenka Zdeborová -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_author|Michael Conover -Négociations climat |skos:broader|Changement climatique -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Radim Řehůřek -Airport|skos:broader|Apple -ONU|skos:broader|Etat du monde -Metric Learning|skos:broader|Similarity learning -GNN|skos:broader|ANN NN Artificial neural network -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:arxiv_author|Chenguang Zhu -Phrases (NLP)|skos:broader|General NLP tasks -Bug Brother|skos:broader|Big Brother -Apache Shiro|skos:broader|Security -Allemagne|skos:broader|Pays d'Europe -iMac|skos:broader|Macintosh -Nginx|skos:broader|Web server -Named Entity Recognition|skos:broader|Sequence labeling -Sense2vec|skos:broader|Sense embeddings -Italie|skos:broader|Pays d'Europe -Lesk algorithm|skos:broader|Word-sense disambiguation -Judea Pearl|skos:broader|AI girls and guys -Reinhard Mey|skos:broader|Allemagne -Venus Express|skos:broader|Missions spatiales -Brésil|skos:broader|Amérique -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_author|Tal Wagner -Cookies|skos:broader|Web dev -Encoding|skos:broader|Dev -aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean (Most k-means-type algorithms require the number of clusters – k – to be specified in advance)|skos:broader|the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:arxiv_author|Daniel Salas -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:tag|tag:knowledge_resources -RotatE|skos:broader|Knowledge Graph Embeddings -Lauryn Hill|skos:broader|Musique -Imbalanced Data|skos:broader|Machine learning: problems -Origine de la vie|skos:broader|Histoire de la vie -Europe : aberrations|skos:broader|Construction européenne -Franco-Allemand|skos:broader|Allemagne -Synthetic life|skos:broader|Biology -Industrie de l'armement|skos:broader|Armement -Apple-Intel|skos:broader|Intel -Exploration marsienne|skos:broader|Exploration spatiale -Oregon|skos:broader|USA -Convolutional Knowledge Graph Embeddings|skos:broader|Knowledge Graph Embeddings -PocketSphinx|skos:broader|Speech-to-Text -Karen Blixen|skos:broader|Danemark -Cogema|skos:broader|Nucléaire -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_firstAuthor|Aditi Chaudhary -BERT + Sentence Embeddings|skos:broader|BERT -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Avirup Sil -Jsonld/Jena|skos:broader|JSON-LD -MyCarEvent|skos:broader|OWL -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:tag|tag:knowledge_base -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_firstAuthor|Yair Movshovitz-Attias -Palmyra|skos:broader|Archéologie -Semantic tagging|skos:broader|Tagging -k-nearest neighbors algorithm|skos:broader|Nearest neighbor search -Liberté, liberté chérie|skos:broader|Liberté -Ian Davis|skos:broader|Technical girls and guys -Hierarchical Memory Networks|skos:broader|Memory in deep learning -Filme brasileiro@pt|skos:broader|Brésil -SWEO Interest Group|skos:broader|W3C -Extreme classification|skos:broader|Multi-label classification -KG Embeddings Library|skos:broader|Knowledge Graph Embeddings -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|sl:arxiv_author|Rico Sennrich -VoIP|skos:broader|Internet -Film cubain|skos:broader|Film -Mozilla|skos:broader|Dev -Lobbies économiques|skos:broader|Lobby -Logistic regression|skos:broader|Regression analysis -TAP|skos:broader|Semantic Web : Application -Cambridge Analytica|skos:broader|Social manipulation -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:arxiv_author|Ofir Nachum -Sem web: future|skos:broader|Semantic Web -Tensor|skos:broader|Machine learning: techniques -Beatles|skos:broader|Musique -Benjamin Heinzerling|skos:broader|NLP girls and guys -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|sl:arxiv_author|Philip Maymin -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:tag|tag:ruslan_salakhutdinov -Shanghaï|skos:broader|Ville -Frog|skos:broader|Animal -Mac OS X 10.4|skos:broader|OSX OS X -Bitcoin|skos:broader|Money -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Peter Lu -Knowledge distillation|skos:broader|Machines teaching machines -MinHash|skos:broader|Algorithmes -OWL 2|skos:broader|OWL -Préhistoire|skos:broader|Histoire -Entity linking|skos:broader|Entity discovery and linking -Text Search|skos:broader|Information retrieval -Identity Crisis in Linked Data|skos:broader|URI -Clerezza|skos:broader|Restful semantic web services -Linux|skos:broader|Unix -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|Alexandre Passos -On device NLP|skos:broader|NLP -Visualization Tools|skos:broader|Tools -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|sl:arxiv_author|Terrance DeVries -War|skos:broader|Conflits -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Seung-won Hwang -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Ryan McDonald -Learning english|skos:broader|Anglais -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:arxiv_author|Kevin Chen-Chuan Chang -Aïchi|skos:broader|Japon -Histoire coloniale|skos:broader|Colonisation -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:tag|tag:arxiv_doc -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:tag|tag:oov -Manu Dibango|skos:broader|Music of Africa -Carbon sequestration|skos:broader|Climate crisis -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_firstAuthor|Jialong Han -Le gouvernement Chirac est trop con|skos:broader|Con de Chirac -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:arxiv_author|Jian Tang -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:language_model -foaf+ssl|skos:broader|WebID -Phrase embeddings|skos:broader|Embeddings -English-grammar|skos:broader|Anglais -ESWC 2014|skos:broader|ESWC -Pierre Fresnay|skos:broader|Acteur -Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018.|skos:broader|replacement of the vectorial representation of words with a matrix representation where each word’s representation includes information about its context Embedding words through a language model Language-model-based encoders The key idea underneath is to train a contextual encoder with a language model objective on a large unannotated text corpus. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. ([source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1902.11269)) -Dallol|skos:broader|Ethiopie -Musubi|skos:broader|Social Networks -Terre de Feu|skos:broader|Chili -fps ontologies|skos:broader|Ontologies -Semantic Web Services vs SOAP|skos:broader|Semantic Web Services -Paul Miller|skos:broader|Technical girls and guys -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Rishav Chakravarti -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:tag|tag:uncertainty_in_deep_learning -RDF Validator|skos:broader|RDF Tools -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:arxiv_author|Hung-Yu Kao -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:nlp_microsoft -AdSense|skos:broader|Publicité Internet -Bernard Vatant|skos:broader|Technical girls and guys -Linked Data Platform|skos:broader|Linked Data -Denisovan|skos:broader|Origines de l'homme -Banque|skos:broader|Finance -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:arxiv_author|Maximilian Nickel -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:tag|tag:arxiv_doc -Einstein|skos:broader|Scientifique -Drupal/RDF|skos:broader|RDF -eRDF|skos:broader|RDF -Web tools|skos:broader|Tools -Medical Information Search|skos:broader|Medical Data -Microsoft|skos:broader|Software -Hayabusa|skos:broader|Japon -Knowledge Graph Completion|skos:broader|Knowledge Discovery -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:grounded_language_learning -Google Rich Snippets|skos:broader|Google: SEO -CEA, LIST|skos:broader|CEA -bengee|skos:broader|Technical guys -Learning to rank|skos:broader|Machine learning: problems -TopBraid/SPIN|skos:broader|Inference -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:arxiv_author|Vikash Singh -eClassOWL|skos:broader|GoodRelations -University of Maryland|skos:broader|Universités américaines -SW demo|skos:broader|Demo -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Pang Wei Koh -Lobby agroalimentaire|skos:broader|Lobby -Méroé|skos:broader|Archéologie africaine -Amérindien|skos:broader|Amérique -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:yann_lecun -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:tag|tag:face_recognition -Zotero|skos:broader|Open Source -Supervised machine learning|skos:broader|Machine learning -Text Embeddings|skos:broader|Embeddings in NLP -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:arxiv_author|Andrew M. Saxe -Semantic Web : Tools|skos:broader|Semantic Web -Semantic Statistics|skos:broader|Statistical data -Pêche|skos:broader|Océan -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:tag|tag:algorithmes -Meetup Web Sémantique|skos:broader|Paris -Cinéma|skos:broader|Art -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:tag|tag:arxiv_doc -Patent Landscaping|skos:broader|AI 4 IP -Lisp|skos:broader|Programming language -Uber|skos:broader|Automobile -Ted Nelson|skos:broader|Technical girls and guys -Jeu d'échecs|skos:broader|Divers -Concept Extraction / Linking|skos:broader|Keyword/keyphrase extraction -Automotive and web technologies|skos:broader|Automobile 2.0 -Nissan|skos:broader|Automobile -Python sample code|skos:broader|Sample code -Seyni Kountché|skos:broader|Niger -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:arxiv_firstAuthor|Matteo Pagliardini -Tabulator|skos:broader|Linking Open Data -RDF Parser|skos:broader|RDF -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:arxiv_author|Haitian Sun -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:tag|tag:arxiv_doc -fps dev|skos:broader|fps -Conversational AI|skos:broader|NLP tasks / problems -Treeview|skos:broader|Dev -Vidéo Ina.fr|skos:broader|Video -Al-Qaida|skos:broader|Terrorisme islamiste -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:tag|tag:conditional_random_field -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Matt Botvinick -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:arxiv_author|Shuang Chen -Feature learning|skos:broader|Representation learning -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|sl:tag|tag:word_embedding -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:tag|tag:phrase_embeddings -Arbres|skos:broader|Nature -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Pedro Avelar -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:arxiv_author|Zhilin Yang -Missing Matter|skos:broader|Physics -Wolfram|skos:broader|Technical guys -Jeu d'échecs|skos:broader|Jeux -Fulani|skos:broader|Afrique de l'Ouest -Seevl|skos:broader|Alexandre Passant -Extinction d'espèces|skos:broader|Écologie -Variational autoencoder (VAE)|skos:broader|Autoencoder -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Zizhao Zhang -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:arxiv_author|Brenden M. Lake -Gouvernement Sarkozy|skos:broader|Sarkozy -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:representation_learning -Semantic Web|skos:broader|Web of data -Egit|skos:broader|Git -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:arxiv_author|Anh Nguyen -Cathares|skos:broader|Chrétienté -Unix|skos:broader|OS -Le Pen|skos:broader|Extrème droite -SPARQL|skos:broader|Semantic Web -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:tag|tag:embedding_evaluation -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:arxiv_firstAuthor|Anh Nguyen -Accident climatique|skos:broader|Catastrophe naturelle -Lyrics|skos:broader|Musique -OSEMA/DERI-Renault paper|skos:broader|fps: paper -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:arxiv_doc -Hydra/Templated Links|skos:broader|Hydra -each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the linking decisions. |skos:broader|learn a classifier f : X → Y that must predict novel values of Y that were omitted from the training set (classification under the restriction that the model cannot look at any examples from the target classes) -Boura|skos:broader|Terre cuite -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:tag|tag:backpropagation -Linked Data publishing|skos:broader|RDF -Gorille|skos:broader|Grands Singes -Zinder : alimentation en eau|skos:broader|Chine / Afrique -Redis|skos:broader|NOSQL -Documentaire télé|skos:broader|TV -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_author|Shakir Mohamed -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_firstAuthor|Andy Coenen -GNU|skos:broader|Open Source -Glaciologie|skos:broader|Glacier -Vie sur Mars|skos:broader|Vie extraterrestre -Rockart|skos:broader|Archéologie -Government data as Linked Data|skos:broader|Linked Data -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:tag|tag:arxiv_doc -Google Cloud|skos:broader|Cloud -Antibiotic resistance|skos:broader|Drug-resistant germs -Semantic markup in HTML|skos:broader|HTML -Blind relevance feedback|skos:broader|IR -Word Representations via Gaussian Embedding Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space. Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages, including better capturing uncertainty about a representation and its relationships, expressing asymmetries more naturally than dot product or cosine similarity, and enabling more expressive parameterization of decision boundaries. This paper advocates for density-based distributed embeddings and presents a method for learning representations in the space of Gaussian distributions. We compare performance on various word embedding benchmarks, investigate the ability of these embeddings to model entailment and other asymmetric relationships, and explore novel properties of the representation.|sl:tag|tag:word_embedding -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:arxiv_author|Jose Camacho-Collados -Taxonomy expansion task|skos:broader|Knowledge Graph Completion -Winch 5|skos:broader|Francis Pisani -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Fernanda Viégas -Boris Johnson|skos:broader|Homme politique -Tree of life|skos:broader|Evolution -Looks like everything (up to 2020-07-14) refers to this [github project](doc:2020/07/ukplab_sentence_transformers_s), [paper (Sentence-BERT)](doc:2019/08/_1908_10084_sentence_bert_sen)|skos:broader|Bidirectional Encoder Representations from Transformers: pretraining technique for NLP. [Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT representations can be fine-tuned with just one additional output layer BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction (but it has been shown in the RoBERTa paper that this training objective doesn’t help that much). The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to predict the target task. All parameters are then fine tuned on the target task Builds on [#The Transformer](/tag/attention_is_all_you_need) Code and pre-trained models open-sourced on Nov 3rd, 2018. -SemanticCampParis|skos:broader|Web sémantique sw -Wordnet|skos:broader|Text Corpora and Lexical Resources -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_firstAuthor|Yuezhang Li -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:google_research -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_firstAuthor|Sanjeev Arora -Covid19 : impréparation|skos:broader|Covid19 -Robotique|skos:broader|Technologie -Loudness war|skos:broader|Musique -Maria-Fac|skos:broader|Maria -Commercialising the Semantic Web|skos:broader|Semantic Web -Coursera|skos:broader|Online Course Materials -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:tag|tag:language_model -Semantic Web blog|skos:broader|Blog -Sylvain Gugger|skos:broader|AI girls and guys -Madame Bovary|skos:broader|Flaubert -Mami Wata|skos:broader|Afrique -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Zhangyang Wang -Cheatsheet|skos:broader|Dev tip -Semantic Web propaganda|skos:broader|Semantic Web -Minoen|skos:broader|Crète antique -NLP: using Knowledge|skos:broader|NLP techniques -Internet Explorer|skos:broader|Brouteur -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:tag|tag:deep_learning -Jena User Conference|skos:broader|Jena -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Caglar Gulcehre -Java microframeworks|skos:broader|Java web dev -ranking function used by search engines to rank matching documents according to their relevance to a given search query. Bag-of-words based. Algorithm used by default in [Elasticsearch](elasticsearch) and [Lucene](lucene) |skos:broader|formalism of information retrieval useful to derive functions that rank matching documents according to their relevance to a given search query. -IRD|skos:broader|Recherche -Knowledge Compilation|skos:broader|Knowledge Representation -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:arxiv_author|Raymond Li -Uncertainty Reasoning|skos:broader|Artificial Intelligence -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Pushmeet Kohli -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:transfer_learning -GBIF|skos:broader|Biodiversité -Solr - autocomplete|skos:broader|Solr -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:arxiv_doc -SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data.|skos:broader|Backed by the flexibility of the RDF data model, and consisting of both a query language and data access protocol SPARQL has the potential to become a key component in Web 2.0 applications. SPARQL could provide a common query language for all Web 2.0 applications. -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:tag|tag:tf_idf -Classifier on top of a sentence2vec model. Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features - avoids the OOV (out of vocabulary) problem) (FastText represents words as the sum of their n-gram representations trained with a skip-gram model) Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) |skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -Yahoo - My Web 2.0|skos:broader|Web 2.0 -Haoussa|skos:broader|Peuples -Tribunal Pénal International|skos:broader|Justice internationale -OK, but where are the data about VW range? Partial stuff here: a href=http://www.volkswagen.co.uk/new/polo-v/which-model/compare/interiorhttp://www.volkswagen.co.uk/new/polo-v/which-model/compare/interior/a |skos:broader|An ontology for linking product descriptions and business entities on the Web -Web 2.0 application|skos:broader|Web 2.0 -Bas salaires|skos:broader|Economie -GooglePlus|skos:broader|Google -Mathématiques|skos:broader|Science -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:arxiv_firstAuthor|Denis Mazur -Library of Alexandria|skos:broader|Alexandria -SPARQL Tutorial|skos:broader|SPARQL -JavaScript Promises|skos:broader|JavaScript -Terrorisme islamiste|skos:broader|Extrémisme islamique -Islam|skos:broader|Religion -Cassini-Huygens|skos:broader|Cassini -Linked Data Cache|skos:broader|Linked Data -answering arbitrary context-independent questions (e.g. well-known facts or historical details).Typically assumed that the model can access an external collection of knowledge (e.g. a structured knowledge base or unstructured text corpus) (~open-book exam)|skos:broader|For a description of the variants of this task, see this [paper](/doc/2020/02/how_much_knowledge_can_you_pack) - reading comprehesion - open-domain QA - open-book exam - open-book exam -Foxconn|skos:broader|Entreprise -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Petr Sojka -regression model where the dependent variable is categorical.|skos:broader|a statistical process for estimating the relationships among variables. -Ajax applications|skos:broader|Ajax -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:arxiv_firstAuthor|Aaron van den Oord -Jena|skos:broader|RDF -Thrace|skos:broader|Antiquité -Jure Leskovec|skos:broader|NLP girls and guys -OS X app|skos:broader|Mac OS X -Unsupervised machine translation|skos:broader|Machine translation -OPML|skos:broader|Dave Winer -Oiseau|skos:broader|Animal -TripleStore|skos:broader|RDF and database -Tim Berners-Lee|skos:broader|SW guys (and girls) -Dallol|skos:broader|Volcan -Stanford POS Tagger|skos:broader|NLP tools -Big Data Tools|skos:broader|Big Data -République Tchèque|skos:broader|Pays d'Europe -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Benjamin Lucas -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:arxiv_author|Omer Levy -BBC - Programmes|skos:broader|BBC semantic publishing -Javadoc|skos:broader|Developer documentation -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:unsupervised_machine_translation -Semantic CMS|skos:broader|Semantic technology -Vary Header|skos:broader|HTTP Cache -Plastic waste trade|skos:broader|Plastic -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Quan Wang -KBPedia|skos:broader|Mike Bergman -Fourier|skos:broader|Mathématicien -PRISM|skos:broader|Big Brother -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Andy Coenen -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Marcus Gomez -iphone|skos:broader|Apple -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_author|Mike Lewis -Database to RDF mapping|skos:broader|RDF -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:tag|tag:ml_google -Berlin|skos:broader|Allemagne -ML: evaluation|skos:broader|Machine learning: problems -Scandale des écoutes en Allemagne|skos:broader|Espionnage -fpservant@slideshare|skos:broader|fps -Twitter|skos:broader|Microblogs -AngularJS|skos:broader|Javascript framework -Bacteria|skos:broader|Biology -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:tag|tag:arxiv_doc -National Taiwan University|skos:broader|Université -What's encoded by a NN|skos:broader|Neural networks -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:arxiv_firstAuthor|Maximilian Nickel -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:arxiv_author|Graeme Hirst -Livesearch|skos:broader|Ajax -nonlinear dimensionality reduction technique that is particularly well suited for embedding high-dimensional data into a space of two or three dimensions, which can then be visualized in a scatter plot. Specifically, it models each high-dimensional object by a two- or three-dimensional point in such a way that similar objects are modeled by nearby points and dissimilar objects are modeled by distant points.|skos:broader|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. -Uncertainty Reasoning AND Semantic Web|skos:broader|Semantic Web -C2GWeb, Product description and Makolab|skos:broader|Mirek Sopek -NASA|skos:broader|Exploration spatiale -Programming language|skos:broader|Informatique -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:tag|tag:link_prediction -Erta Ale|skos:broader|Lac de lave -FIBO|skos:broader|Financial Data -Google car|skos:broader|Driverless car -Juliana Rotich|skos:broader|New Africa -Entities to topics|skos:broader|Entities -Universités françaises|skos:broader|Enseignement supérieur -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:tag|tag:arxiv_doc -Intel|skos:broader|Entreprise -De-extinction|skos:broader|Disparition d'espèces -AI: limits|skos:broader|Artificial Intelligence -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:arxiv_firstAuthor|Xiang Lisa Li -Sigma.js|skos:broader|JavaScript -French Semantic web company|skos:broader|Semantic web company -Notes d'install|skos:broader|fps notes -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:arxiv_author|Ido Dagan -Universités françaises|skos:broader|Université -Satori|skos:broader|Semantic Web -LinkTo Semanlink|skos:broader|About Semanlink -Building Machines That Learn and Think Like People we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.|sl:tag|tag:human_like_ai -AI 4 IP|skos:broader|NLP + juridique -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:ronan_collobert -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:arxiv_author|Quoc V. Le -Trou noir|skos:broader|Gravitation -RSS Dev|skos:broader|Web dev -gensim|skos:broader|NLP tools -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:tag|tag:arxiv_doc -IPython notebook|skos:broader|IPython -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:arxiv_firstAuthor|Peter Clark -WSDL|skos:broader|Service description -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:nlp_microsoft -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:arxiv_firstAuthor|Junyuan Xie -Reto Bachmann-Gmür|skos:broader|SW guys (and girls) -Amazon Mechanical Turk|skos:broader|Web marchand -Rijksmuseum|skos:broader|Musée -Enseignement français|skos:broader|Education -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:machine_translation -Histoire du monde|skos:broader|Histoire -Graph-based Semi-Supervised Learning|skos:broader|Semi-supervised learning -Héliosphère|skos:broader|Système solaire -Solid|skos:broader|Data ownership -SemWeb Pro 2011|skos:broader|SemWeb Pro -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_firstAuthor|Artur d'Avila Garcez -Semantic Web Outliner|skos:broader|Semantic Web : Tools -NLP conference|skos:broader|NLP event -XSPARQL|skos:broader|XQuery -ISP|skos:broader|Internet -Extinction de masse|skos:broader|Extinction d'espèces -Internet en Afrique|skos:broader|Afrique -Christine Golbreich|skos:broader|Technical girls and guys -Crustacé|skos:broader|Animal -Belgique|skos:broader|Europe -Carte d'identité|skos:broader|Divers -Hash-bang URIs|skos:broader|Hash URIs -Peter Patel-Schneider|skos:broader|Technical girls and guys -RDF and social networks|skos:broader|Social Networks -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:tag|tag:human_in_the_loop -Learning to rank|skos:broader|Ranking (information retrieval) -Recette de cuisine|skos:broader|Gastronomie -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:tag|tag:arxiv_doc -Nebra Sky Disc|skos:broader|Archéologie européenne -Brouteur|skos:broader|Web -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Jens Lehmann -Hindu/Muslim riots|skos:broader|Guerres de religion -Voûte nubienne|skos:broader|Architecture en terre -SKOS|skos:broader|RDF Vocabularies -Lune|skos:broader|Système solaire -Music source separation|skos:broader|Digital Audio -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:chris_manning -Technique de l'insecte stérile|skos:broader|Insecte -RDF browser|skos:broader|RDF -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:tag|tag:machine_learning -John Sofakolle|skos:broader|Musique du Niger -inductive biases which impose constraints on relationships and interactions among entities in a learning process |skos:broader|learning bias: the set of assumptions that a model makes in order to generalize to new inputs. An inductive bias allows a learning algorithm to prioritize one solution (or interpretation) over another, independent of the observed data (Mitchell, 1980). In a Bayesian model, inductive biases are typically expressed through the choice and parameterization of the prior distribution [source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1806.01261) -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:arxiv_author|Siddhant M. Jayakumar -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Keith Adams -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:tag|tag:attention_in_graphs -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:arxiv_author|Feng Jiang -ATOM (Text editor)|skos:broader|Text Editor -TheWebConf 2018|skos:broader|TheWebConf -Minéralogie|skos:broader|Géologie -NN tips|skos:broader|Neural networks -Tsunami|skos:broader|Catastrophe naturelle -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:tag|tag:arxiv_doc -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:arxiv_author|Christopher Olah -Maxent models|skos:broader|NLP techniques -Apache Spark|skos:broader|Machine Learning tool -css example|skos:broader|css -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_firstAuthor|David Charte -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:thought_vector -Software|skos:broader|Technologie -Metropolitan Museum of Art|skos:broader|Musée -Désobéissance civile|skos:broader|Esprit de résistance -Part Of Speech Tagging|skos:broader|NLP tasks / problems -Championnat du monde|skos:broader|Sport -Jena TDB|skos:broader|TripleStore -Text to SQL|skos:broader|NLP tasks / problems -Edgar Morin|skos:broader|Intellectuel -EventKG: A Multilingual Event-Centric Temporal Knowledge Graph 690 thousand contemporary and historical events and over 2.3 million temporal relations One of the key requirements to facilitate semantic analytics of information regarding contemporary and historical events on the Web, in the news and in social media is the availability of reference knowledge repositories containing comprehensive representations of events and temporal relations. Existing knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, focus mostly on entity-centric information and are insufficient in terms of their coverage and completeness with respect to events and temporal relations. EventKG presented in this paper is a multilingual event-centric temporal knowledge graph that addresses this gap. EventKG incorporates over 690 thousand contemporary and historical events and over 2.3 million temporal relations extracted from several large-scale knowledge graphs and semi-structured sources and makes them available through a canonical representation.|sl:tag|tag:knowledge_graph -Knowledge Graphs|skos:broader|Knowledge Representation -Bush|skos:broader|Président des USA -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:arxiv_author|Anselm Levskaya -CamemBERT|skos:broader|NLP: French -Kleenex|skos:broader|Société de consommation -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_author|Shuming Shi -Javadoc|skos:broader|Java -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:tag|tag:named_entity_recognition -Gene editing|skos:broader|Manipulations génétiques -Pre-Trained Language Models|skos:broader|NLP: pretraining -XSL|skos:broader|XML -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_author|Saurabh Singh -Hugo|skos:broader|Ecrivain -Indiens du Brésil|skos:broader|Brésil -Microdata|skos:broader|HTML Data -GINCO (Culture)|skos:broader|Semantic Web : Application -Mapping data from spreadsheets to RDF|skos:broader|Converting data into RDF -DeleteFB|skos:broader|Facebook -Thomas Piketty|skos:broader|Economiste -Jupiter|skos:broader|Système solaire -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:arxiv_author|Tong Wang -Offshore leaks|skos:broader|Paradis fiscaux -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:tag|tag:named_entity_recognition -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:bert_and_sentence_embeddings -Hindu/Muslim riots|skos:broader|Inde -Quantum computing|skos:broader|Mécanique quantique -Yahoo!|skos:broader|Internet -RDF in HTML|skos:broader|Web sémantique sw -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Yiming Yang -Géométrie|skos:broader|Mathématiques -Genome editing|skos:broader|Manipulations génétiques -Coupe du monde 2018|skos:broader|Coupe du monde de football -Lucene|skos:broader|Text Search -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_firstAuthor|Aidan Hogan -Extrémisme islamique|skos:broader|Fondamentalisme islamique -RFID passports|skos:broader|RFID -Patricia Highsmith|skos:broader|Ecrivain -Baobab|skos:broader|Arbres -Semantic Overflow|skos:broader|Semantic Web -Feature hashing (\Hashing trick\)|skos:broader|Machine learning: techniques -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:arxiv_author|Luke Zettlemoyer -Vidéosurveillance|skos:broader|Etat policier -Gaël de Chalendar|skos:broader|NLP girls and guys -Semantic Web P2P|skos:broader|Semantic Web : Application -Embedding evaluation|skos:broader|Embeddings -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:arxiv_author|Shuailong Liang -Quantum biology|skos:broader|Mécanique quantique -Feature extraction|skos:broader|Machine learning: techniques -ReactJS|skos:broader|JavaScript framework -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:tag|tag:survey -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:arxiv_author|Priya L. Donti -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:tag|tag:word_embedding -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:tag|tag:embeddings -Dan Connolly|skos:broader|SW guys (and girls) -RNN based Language Model|skos:broader|Recurrent neural network -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:arxiv_firstAuthor|David Lopez-Paz -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|Kuan-Chieh Wang -Energie du vide|skos:broader|Enigmes de la physique -Ajax|skos:broader|Web dev -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:arxiv_author|William W. Cohen -303-redirect|skos:broader|Concept's URI -Cédric Villani|skos:broader|Médaille Fields -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:tag|tag:nlp_microsoft -Maïs OGM|skos:broader|OGM -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Ann Yuan -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Ji Ma -Semantic Blog|skos:broader|Blog -Moteur de recherche|skos:broader|IR -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:arxiv_firstAuthor|Ying Zhang -Slot tagging|skos:broader|NLP tasks / problems -D3js|skos:broader|Data Visualization Tools -Google App Engine|skos:broader|Web dev -Le Pen|skos:broader|Homme politique -AI: business perspectives|skos:broader|Artificial Intelligence -Chirac ami des Africains|skos:broader|Chirac -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:frequently_cited_paper -Théatre|skos:broader|Art -Paris NLP meetup|skos:broader|Paris -Variational Bayesian methods|skos:broader|Machine learning: techniques -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Cody Coleman -Histropedia|skos:broader|Timeline -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:arxiv_firstAuthor|Po-Wei Wang -Brexit|skos:broader|Europe and UK -KnowBert|skos:broader|Allen Institute for AI (A2I) -Cookie|skos:broader|Web app dev -Norilsk|skos:broader|Arctique -OGM|skos:broader|Manipulations génétiques -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Charles Nash -Photo|skos:broader|Divers -Java library|skos:broader|Java -Sportif|skos:broader|Sport -Moussa Poussi|skos:broader|Musique du Niger -Open Knowledge Network|skos:broader|Knowledge Graphs -Leonardo da Vinci|skos:broader|Peintre -Entity Linking often rely on rich structures and properties in the target knowledge base (KB). However, in many applications, the KB may be as simple and sparse as lists of names of the same type (e.g., lists of products) - the List-only entity linking problem |skos:broader|= named entity disambiguation: the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base. -fast.ai|skos:broader|Deep Learning -Yagán|skos:broader|Peuples -Bombe atomique|skos:broader|Armement -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:tag|tag:multi_label_classification -Faim|skos:broader|Grands problèmes -Semantic Hashing|skos:broader|NN 4 NLP -Logic|skos:broader|Mathématiques -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:geometry_of_language_embeddings -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:arxiv_author|Qian Chen -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_author|María J. del Jesus -KBPedia|skos:broader|Knowledge Graphs -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:sparse_dictionary_learning -Média conversationnel|skos:broader|Médias -Stefan Zweig|skos:broader|Intellectuel -Second Life|skos:broader|Massively multiplayer online games -Jena : Introduction|skos:broader|Jena -Le gouvernement Chirac est trop con|skos:broader|Gouvernement Chirac -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:arxiv_firstAuthor|Liunian Harold Li -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:tag|tag:artificial_general_intelligence -AI black box|skos:broader|Explainable AI -Apache OpenNLP|skos:broader|NLP tools -Representation Learning for NLP|skos:broader|Representation learning -Maximum Entropy Classifier Softmax regression|skos:broader|Multiclass classification -GINCO (Culture)|skos:broader|Ministère de la culture -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:arxiv_author|Tobias Weyand -Zitgist|skos:broader|Linked Data -Pont couvert|skos:broader|Pont -Pauvreté|skos:broader|Grands problèmes -fps@EC-Web'14|skos:broader|C2GWeb and Product description -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_author|Ilya Razenshteyn -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Anton Bakhtin -Resources-Oriented Web Services|skos:broader|Schema.org Actions -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Roshan Sumbaly -NLTK|skos:broader|Python-NLP -Classification du vivant|skos:broader|Biologie -Mali|skos:broader|Sahel -BERT|skos:broader|Pre-Trained Language Models -Philosophe|skos:broader|Penseur -Film allemand|skos:broader|Film -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:tag|tag:knowledge_distillation -Semanlink: archives|skos:broader|Semanlink -FAQ|skos:broader|Q&A -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:tag|tag:semantic_hashing -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Minji Seo -ATOM (Text editor)|skos:broader|GitHub -WWW 2013|skos:broader|Rio de Janeiro -MOOC|skos:broader|Online Learning -Mapping data from spreadsheets to RDF|skos:broader|Spreadsheets -xgboost|skos:broader|Gradient boosting -Épidémie|skos:broader|Maladie -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:arxiv_author|Chien-Chun Ni -CIA|skos:broader|Services secrets -Deep Learning frameworks|skos:broader|Deep Learning -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Sameer Singh -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:arxiv_author|Huasha Zhao -Perceptron|skos:broader|Linear classifier -Jeux Olympiques|skos:broader|Sport -Relativité générale|skos:broader|Relativité -Maven-Eclipse on My mac|skos:broader|Eclipse -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_firstAuthor|Rik Koncel-Kedziorski -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:tag|tag:patent_landscaping -RDF forms|skos:broader|Semantic Web Services -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Daniel Cer -Semantic Text Matching|skos:broader|Identification of similar documents -Object Oriented Programming|skos:broader|Programming -Automobile 2.0|skos:broader|Automobile -Semanlink2 related|skos:broader|Semanlink2 -Big Brother|skos:broader|Etat policier -Google Groups|skos:broader|Google -Drupal/RDF|skos:broader|Linked Data -- [Home Page](https://www.fast.ai/) - [MOOC](https://course.fast.ai/) - [Github](https://github.com/fastai/fastai) - [Forum](https://forums.fast.ai/) - [docs.fast.ai](https://docs.fast.ai/) |skos:broader|a set of algorithms in machine learning that attempt to model high-level abstractions in data by using architectures composed of multiple non-linear transformations. Deep learning is part of a broader family of machine learning methods based on learning representations of data. One of the promises of deep learning is replacing handcrafted features with efficient algorithms for unsupervised or semi-supervised feature learning and hierarchical feature extraction With Deep Learning, Ng says, you just give the system a lot of data so it can discover by itself what some of the concepts in the world are ([cf.](http://www.wired.com/2013/05/neuro-artificial-intelligence/all/)) -Crimes de l'église catholique|skos:broader|Horreur -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:tag|tag:language_model -Coursera: Machine Learning|skos:broader|Coursera - Deep contextualized word representations each word is assigned a representation which is a function of the entire corpus sentences to which they belong. The embeddings are computed from the internal states of a two-layers bidirectional Language Model, hence the name “ELMo”: Embeddings from Language Models. [Github](https://github.com/allenai/bilm-tf) |skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:arxiv_author|Quoc V. Le -Changement climatique|skos:broader|Climat -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:vector_space_model -Guêpe|skos:broader|Insecte -jersey|skos:broader|RESTful Web Services -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:information_retrieval -Servlet 3.0|skos:broader|Servlet -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:attention_is_all_you_need -Semi-supervised learning|skos:broader|Machine learning -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Pedro Alonso -Metagenomics|skos:broader|Genetics Génétique -Administration française|skos:broader|France -Windows Media Player|skos:broader|Media Player -Vie extraterrestre|skos:broader|Biology -Perse|skos:broader|Antiquité iranienne -Google Brain|skos:broader|AI@Google -Hydra|skos:broader|Service description -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Colin Raffel -Croisades|skos:broader|Moyen-âge -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:tag|tag:knowledge_graph_completion -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:tag|tag:arxiv_doc -paggr|skos:broader|RDF Application -AI Conference|skos:broader|Artificial Intelligence -Deep Learning Book|skos:broader|Deep Learning -mSpace|skos:broader|Semantic Web : Application -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|sl:tag|tag:markets -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Xiyou Zhou -Uriqr|skos:broader|Linked Data -Antiquité du Pakistan|skos:broader|Antiquité -Immune system Système immunitaire|skos:broader|Biology -Configuration as Linked Data|skos:broader|Linked Data: application -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:tag|tag:pre_trained_language_models -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:tag|tag:arxiv_doc -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:tag|tag:arxiv_doc -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_author|Hervé Jégou -C2GWeb-JS|skos:broader|C2GWeb -Semantic web and AI|skos:broader|Artificial Intelligence -HTML parsing|skos:broader|HTML Dev -Google Spreadsheets|skos:broader|Google -Capsule networks|skos:broader|Neural networks -Linked Data Service|skos:broader|Linked Data -FN|skos:broader|Politique française -JSON-LD frame|skos:broader|JSON-LD -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:attention_is_all_you_need -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_author|Ronald Kemker -Berlin|skos:broader|Ville -Social Networks|skos:broader|Internet -Download & Execute Javascript|skos:broader|JavaScript -Physique|skos:broader|Science -Pre-Trained Language Models|skos:broader|Transfer learning in NLP -Écologie|skos:broader|Environnement -Poincaré Embeddings|skos:broader|NLP@Facebook -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_author|Sanjeev Arora -Steve Jobs|skos:broader|Apple -C2GWeb on the web|skos:broader|C2GWeb -1789|skos:broader|Révolution française -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:knowledge_graph_deep_learning -Photo journalisme|skos:broader|Photo -RDF Application|skos:broader|Semantic Web : Application -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:arxiv_doc -HTML|skos:broader|Dev -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Sahand Sharifzadeh -Calais|skos:broader|Semantic Web : Application -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:word_embedding -Transnets|skos:broader|Blog -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:tag|tag:arxiv_doc -Mallet|skos:broader|NLP tools -EDF|skos:broader|Entreprise -slides fps|skos:broader|Slides -Patel-Schneider|skos:broader|Technical guys -AI@Stanford|skos:broader|Stanford -Michael Rakowitz|skos:broader|Artiste -Knowledge Graphs + Text KG + NLP|skos:broader|Natural Language Processing -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_firstAuthor|Rafal Jozefowicz -Semantic Web blog|skos:broader|Semantic Web -Prix Nobel d'économie|skos:broader|Prix Nobel -Histoire anglaise|skos:broader|Histoire -Film espagnol|skos:broader|Espagne -snorql|skos:broader|dbpedia -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:tag|tag:survey -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Nicholas Carlini -Cocoon|skos:broader|Java -Kristallnacht|skos:broader|Nazisme -Voiture à hydrogène|skos:broader|Automotive -faiss|skos:broader|Library (code) -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:tag|tag:arxiv_doc -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:arxiv_author|Purushottam Kar -Planet under pressure|skos:broader|Écologie -Car Options Ontology|skos:broader|GoodRelations -Java tip|skos:broader|Java -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Michael Spranger -Afrique médiévale|skos:broader|Histoire de l'Afrique -Etudes scientifiques|skos:broader|sciences -INRIA|skos:broader|France -Jena rules|skos:broader|Jena -Génomique|skos:broader|Biotechnologies Biotechnologies -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:tag|tag:siamese_network -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:what_s_encoded_by_a_nn -Snorkel|skos:broader|Training Data (NLP) -TripleStore|skos:broader|Semantic Web: databases -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:google_deepmind -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|George E. Dahl -First Americans|skos:broader|Civilisations précolombiennes -OpenLink Ajax Toolkit (OAT)|skos:broader|Kingsley Idehen -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:tag|tag:ml_google -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Victoria Langston -Drupal|skos:broader|Website: creation -General Motors|skos:broader|Entreprise -Grèce|skos:broader|Europe -Google Patents|skos:broader|Patent -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Han Zhang -Semantic Web : critique|skos:broader|Semantic Web -Supernova|skos:broader|Explosions cosmiques -Topic Modeling|skos:broader|NLP techniques -Uncertainty in Deep Learning|skos:broader|Uncertainty Reasoning -Macaw: An Extensible Conversational Information Seeking Platform Conversational information seeking (CIS) has been recognized as a major emerging research area in information retrieval. Such research will require data and tools, to allow the implementation and study of conversational systems. This paper introduces Macaw, an open-source framework with a modular architecture for CIS research. Macaw supports multi-turn, multi-modal, and mixed-initiative interactions, and enables research for tasks such as document retrieval, question answering, recommendation, and structured data exploration. It has a modular design to encourage the study of new CIS algorithms, which can be evaluated in batch mode. It can also integrate with a user interface, which allows user studies and data collection in an interactive mode, where the back end can be fully algorithmic or a wizard of oz setup. Macaw is distributed under the MIT License.|sl:tag|tag:chatbot -Samy Bengio|skos:broader|AI girls and guys -Word + Entity embeddings|skos:broader|Entity embeddings -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_author|Aditi Chaudhary -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_firstAuthor|Robert L. Logan IV -Dean Allemang|skos:broader|Technical girls and guys -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:tag|tag:categorical_variables -Ian Horrocks|skos:broader|Technical girls and guys -URI Identity|skos:broader|URI -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Matei Zaharia -[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. |skos:broader|Training models to convert sequences from one domain (e.g. sentences in English) to sequences in another domain (e.g. the same sentences translated to French). Example of transformation: translation from one language to another one (text or audio), QA answering, parsing sentences into grammar tree. The seq2seq model generally uses an encoder-decoder architecture, where both encoder and decoder are RNN: - the encoder encodes the input as a fixed length vector (the context vector) - the decoder is initialized with the context vector to emit the output Problems: - fixed-length context vector is unable to remember long sentences. [#Attention mechanism](/tag/deep_learning_attention) allows to solve this problem - since RNN-based seq2seq model are sequential models, they cannot be parallelized. [#The Transformer](/tag/attention_is_all_you_need) solves this -Memory requirements in NN|skos:broader|Neural networks -fps and WWW 2008|skos:broader|fps -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:arxiv_author|Chih-Kuan Yeh -Musique africaine African music|skos:broader|Africa -Foxconn|skos:broader|Chine : technologie -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:tag|tag:statistical_classification -Named Entity Recognition|skos:broader|Entity discovery and linking -Salzburg|skos:broader|Autriche -Impôt|skos:broader|Société -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:tag|tag:arxiv_doc -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:tag|tag:using_word_embedding -AI 4 IP|skos:broader|Propriété intellectuelle -Amy Winehouse|skos:broader|Musicien -NEPOMUK|skos:broader|Semantic Web project -Antidot|skos:broader|French Semantic web company -Bhaskar Mitra|skos:broader|Microsoft Research -TheWebConf|skos:broader|Conférences -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:tag|tag:embeddings_in_ir -Catholicisme|skos:broader|Chrétienté -TV advertising|skos:broader|Télévision -Internet|skos:broader|NTIC -Ciao Vito|skos:broader|Vito -Moussa Kaka|skos:broader|Niger -CERN|skos:broader|Physique des particules -Riz|skos:broader|Agriculture -Vector space model|skos:broader|NLP techniques -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Patrick Lewis -Personal Knowledge Graph|skos:broader|Knowledge Graphs -Reformer|skos:broader|Transformers -MaxEnt classifier (Multinomial logistic regression)|skos:broader|Multi-class classification -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:arxiv_author|Jimmy Lin -HATEOAS|skos:broader|API -Maxent models|skos:broader|Maximum Entropy -NLP@Facebook|skos:broader|NLP Teams -Lost Boy|skos:broader|Technical guys -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:tag|tag:arxiv_doc -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:tag|tag:survey -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:arxiv_author|Steven Van Canneyt -\Rapid Automatic Keyword Extraction\|skos:broader|“the automatic selection of important and topical phrases from the body of a document” (Turney, 2000) -Vie sur Mars|skos:broader|Mars -Sanjeev Arora|skos:broader|Technical girls and guys -Web 2.0|skos:broader|Internet -Pologne|skos:broader|Europe -Mac software|skos:broader|Macintosh -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:tag|tag:arxiv_doc -Armée américaine|skos:broader|USA -Semantic Integration Hub|skos:broader|Semantic Web -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Yann LeCun -Çatalhöyük|skos:broader|Asie mineure -Audio classification|skos:broader|Statistical classification -Plantu|skos:broader|Journal Le Monde -Configuration ontology|skos:broader|Configuration as Linked Data -Deep Learning|skos:broader|Artificial Intelligence -Corent|skos:broader|Gaulois -Outsourcing|skos:broader|Délocalisations -Dynamic Object Model Pattern|skos:broader|Design pattern -Le Pen|skos:broader|Politique française -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:tag|tag:knowledge_graph_deep_learning -Good|skos:broader|I like I like -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:tag|tag:knowledge_base -Acoustique|skos:broader|Physique -Google ranking|skos:broader|Ranking (information retrieval) -Hayabusa|skos:broader|Missions spatiales -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:arxiv_author|Bryan Perozzi -Mallet|skos:broader|Machine Learning library -Filets à nuages|skos:broader|Eau -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|sl:tag|tag:arxiv_doc -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:bert -Apache Hive|skos:broader|apache.org -France is AI 2018|skos:broader|AI Conference -Mbilia Bel|skos:broader|Musicien -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:graph_neural_networks -slides fps|skos:broader|fps -Météorite|skos:broader|Astronomie -Film argentin|skos:broader|Film -W3C TAG|skos:broader|W3C -Hadoop|skos:broader|Distributed computing -Cheat sheet|skos:broader|Dev tips -Apache on my mac|skos:broader|Notes d'install -JSONP|skos:broader|Script tag hack -Semantic web and AI|skos:broader|Semantic Web -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:arxiv_author|Chenliang Li -Multimedia + LD|skos:broader|Multimedia -ARQ|skos:broader|SPARQL AND Jena -LDOW2012|skos:broader|LDOW -Gaussian embedding|skos:broader|Embeddings -Une suite de matrices symétriques en rapport avec la fonction de Mertens we explore a class of equivalence relations over N from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. In this paper we explore a class of equivalence relations over $\\N^\\ast$ from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem.|sl:tag|tag:hypothese_de_riemann -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:arxiv_author|Thomas Demeester -Math|skos:broader|sciences -Vehicular communication systems|skos:broader|Automobile -KnowBert|skos:broader|BERT -Solid|skos:broader|Linked Data -Google Rich Snippets|skos:broader|Google -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:arxiv_author|Tomas Mikolov -Archéologie africaine|skos:broader|Histoire de l'Afrique -Enhancing the Power of Cardinal's Algorithm Cardinal's factorization algorithm of 1996 splits a univariate polynomial into two factors with root sets separated by the imaginary axis, which is an important goal itself and a basic step toward root-finding. The novelty of the algorithm and its potential power have been well recognized by experts immediately, but by 2016, that is, two decades later, its practical value still remains nil, particularly because of the high computational cost of performing its final stage by means of computing approximate greatest common divisor of two polynomials. We briefly recall Cardinal's algorithm and its difficulties, amend it based on some works performed since 1996, extend its power to splitting out factors of a more general class, and reduce the final stage of the algorithm to quite manageable computations with structured matrices. Some of our techniques can be of independent interest for matrix computations.|sl:arxiv_author|Victor Y. Pan -Nikolai Vavilov|skos:broader|Botanique -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:tag|tag:language_model -NSA|skos:broader|USA -Oum Kalsoum|skos:broader|Egypte -Extractive Text Summarization|skos:broader|Text Summarization -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:ai_stanford -Mark Birbeck|skos:broader|SW guys (and girls) -sequence labelling tasks where the goal is to identify the names of entities in a sentence. Named entities can be proper nouns (locations, people, organizations...), or can be much more domain-specific, such as diseases or genes in biomedical NLP.|skos:broader|pattern recognition task that involves the assignment of a categorical label to each member of a sequence of observed values. Eg: POS tagging -Eminem|skos:broader|Rap -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:arxiv_author|David E Leahy -Graph Convolutional Networks|skos:broader|Graph neural networks -Common Tag|skos:broader|Semantic tagging -PlaNet - Photo Geolocation with Convolutional Neural Networks Is it possible to build a system to determine the location where a photo was taken using just its pixels? In general, the problem seems exceptionally difficult: it is trivial to construct situations where no location can be inferred. Yet images often contain informative cues such as landmarks, weather patterns, vegetation, road markings, and architectural details, which in combination may allow one to determine an approximate location and occasionally an exact location. Websites such as GeoGuessr and View from your Window suggest that humans are relatively good at integrating these cues to geolocate images, especially en-masse. In computer vision, the photo geolocation problem is usually approached using image retrieval methods. In contrast, we pose the problem as one of classification by subdividing the surface of the earth into thousands of multi-scale geographic cells, and train a deep network using millions of geotagged images. While previous approaches only recognize landmarks or perform approximate matching using global image descriptors, our model is able to use and integrate multiple visible cues. We show that the resulting model, called PlaNet, outperforms previous approaches and even attains superhuman levels of accuracy in some cases. Moreover, we extend our model to photo albums by combining it with a long short-term memory (LSTM) architecture. By learning to exploit temporal coherence to geolocate uncertain photos, we demonstrate that this model achieves a 50% performance improvement over the single-image model.|sl:arxiv_firstAuthor|Tobias Weyand -Ciao Vito|skos:broader|Restaurant -Philosophe|skos:broader|Homme célèbre -Tulipe|skos:broader|Fleur -Manipulations génétiques|skos:broader|Genetics Génétique -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:arxiv_author|Percy Liang -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Joseph Turian -Tomcat tips|skos:broader|Tips -Bactéries|skos:broader|Biologie -LibShortText|skos:broader|Python-NLP -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:arxiv_author|Jeremy Howard -Tim Berners-Lee|skos:broader|Grand Homme -Subtitles|skos:broader|Digital Video -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:multitask_learning_in_nlp -Supervised learning techniques that also make use of unlabeled data for training – typically a small amount of labeled data with a large amount of unlabeled data.|skos:broader|the machine learning task of inferring a function from labeled training data. -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_firstAuthor|Michael Conover -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:tag|tag:nlp_long_documents -Amazon Mechanical Turk|skos:broader|Délocalisation des services -Java library|skos:broader|Library (code) -LDOW2011|skos:broader|LDOW -Distributional semantics|skos:broader|NLP techniques -Cassini|skos:broader|Saturne -NLU is hard|skos:broader|NLU -Prohibition des narcotiques|skos:broader|Prohibition -fpservant@slideshare|skos:broader|SlideShare -Louvre|skos:broader|Paris -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:tag|tag:ruslan_salakhutdinov -Go (Game)|skos:broader|Jeu -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Maosong Sun -Âge du bronze|skos:broader|Préhistoire -Dengue|skos:broader|Maladie -Hierarchical clustering of text documents|skos:broader|Hierarchical clustering -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:arxiv_author|Jon D. McAuliffe -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_author|Andrew D. O'Harney -XML|skos:broader|Data Interchange Format -Connectionist vs symbolic debate|skos:broader|Artificial Intelligence -Explorateur|skos:broader|Grand voyageur -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:tag|tag:reformer -A method to map documents to a code (e.g., 32-bit memory address) so documents with semantically closed content are mapped to close addresses. Method introduced by Ruslan Salakhutdinov and Geoffrey Hinton in this [paper](/doc/?uri=http%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0888613X08001813) |skos:broader|Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines. -Henri Verdier|skos:broader|Technical girls and guys -Touareg|skos:broader|Sahara -Linux|skos:broader|Open Source -Xavier Bertrand|skos:broader|Gouvernement Sarkozy -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_author|Rafal Jozefowicz -Darwin|skos:broader|Grand Homme -Information visualization|skos:broader|Information -Film cubain|skos:broader|Cuba -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:arxiv_author|Douwe Kiela -Targeted ads|skos:broader|Publicité -Exoplanètes|skos:broader|Astronomie -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_firstAuthor|Guillaume Lample -Homme de Florès|skos:broader|Indonésie -Semantic framework|skos:broader|Frameworks -Extinction des dinosaures|skos:broader|Catastrophe naturelle -Voir com.hp.hpl.jena.sparql.engine.QueryExecutionBase|skos:broader|ARQ - A SPARQL Processor for Jena -Zitgist|skos:broader|Semantic Web : Application -StarSpace|skos:broader|Antoine Bordes -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:arxiv_firstAuthor|Naftali Tishby -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_author|Yingyu Liang -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:tag|tag:deep_learning -Memory in deep learning|skos:broader|Deep Learning -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Marcus Liwicki -Irak|skos:broader|Asie -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:arxiv_author|Tomas Mikolov -NLP and Search|skos:broader|NLP: use cases -Apple Software|skos:broader|Apple -Displaying XML with css|skos:broader|css -Voûte nubienne|skos:broader|Sahel -Pérou|skos:broader|Amérique -SDB: A SPARQL Database for Jena|skos:broader|Jena and database -Affaires de Gado à Niamey|skos:broader|Gado -NLP girls and guys|skos:broader|AI girls and guys -VIE Vienna IKS Editables|skos:broader|Interactive Knowledge Stack -Semantic Web Platform|skos:broader|Semantic Web -Webmasters @ Google|skos:broader|Google -SPARQL en javascript|skos:broader|SPARQL -Bob Dylan|skos:broader|Musicien -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:human_ai_collaboration -Crise des migrants|skos:broader|Immigration -Google Fusion Tables|skos:broader|Google -Parthénogenèse|skos:broader|Sexe -Antonin Artaud|skos:broader|Folie -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:nlp_facebook -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:tag|tag:combining_word_and_entity_embeddings -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:tag|tag:bert -RDF and Property Graphs|skos:broader|Property Graphs -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Jonathan May -Thermodynamique|skos:broader|Physique -URI encoding|skos:broader|Encoding -SQL to RDF mapping|skos:broader|Database to RDF mapping -AI@Facebook|skos:broader|AI teams -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Suncong Zheng -Beijing Genomics Institute|skos:broader|Séquençage du génome -Israël|skos:broader|Juifs -Graph Convolutional Network GCN|skos:broader|GNN -Enfance|skos:broader|Jeunesse -Crise de la dette|skos:broader|Crise financière -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:memory_in_deep_learning -Watson Speech-to-Text|skos:broader|IBM Watson -Medical IR, ML, IA|skos:broader|Santé -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:arxiv_author|Jian-Yun Nie -Génome|skos:broader|Genetics Génétique -DRM in HTML 5|skos:broader|HTML5 -Semantic Web search engine|skos:broader|Semantic Web : Tools -Développement durable|skos:broader|Écologie -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:language_model -Automotive ontologies|skos:broader|Automotive and web technologies -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:tag|tag:ranking_information_retrieval -Film espagnol|skos:broader|Film -Automatic tagging|skos:broader|Tagging -Cohn-Bendit|skos:broader|I like I like -Sparse coding|skos:broader|Unsupervised machine learning -Spritz|skos:broader|Reading -dbpedia|skos:broader|Linked Data -Industrie pharmaceutique|skos:broader|Santé -Knowledge Graph Completion|skos:broader|Knowledge Graphs -Musée de Niamey|skos:broader|Musée -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:arxiv_firstAuthor|Austin Slakey -salesforce|skos:broader|Entreprise -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:arxiv_author|Léonard Blier -Part Of Speech Tagging|skos:broader|General NLP tasks -Loi Renseignement|skos:broader|Big Brother -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:arxiv_author|David Lopez-Paz -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:tag|tag:arxiv_doc -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:tag|tag:knowledge_distillation -SWEO Interest Group|skos:broader|Semantic web: evangelization -Hippopotame|skos:broader|Animal -SL|skos:broader|PIM -Les 100 pièges de l'Anglais|skos:broader|Learning english -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_author|Hamed Zamani -Civilisations précolombiennes|skos:broader|Amérique -SW: coreferences|skos:broader|Linked Data -NTIC et développement|skos:broader|Innovation -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:arxiv_firstAuthor|Zhilin Yang -OWL DL|skos:broader|OWL -Shanghaï|skos:broader|Chine -RDFa 1.1 Lite|skos:broader|RDFa Lite -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_author|Robert L. Logan IV -Microsoft Concept Graph|skos:broader|NLP@Microsoft -Baidu|skos:broader|Chine : technologie -NLP: using Knowledge|skos:broader|Knowledge -Hydra|skos:broader|APIs and Linked Data -Wiki|skos:broader|Internet -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_firstAuthor|Ronan Collobert -Mission \Voulet-Chanoine\|skos:broader|Tchad -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:tag|tag:arxiv_doc -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:tag|tag:sequence_to_sequence_learning -Flaubert|skos:broader|Ecrivain -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:tag|tag:hypothese_de_riemann -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Shiyu Chang -Loi sur le voile|skos:broader|Islam -AI: startups|skos:broader|Artificial Intelligence -Tchernobyl|skos:broader|Industrie nucléaire -Empires d'Afrique de l'Ouest|skos:broader|Afrique de l'Ouest -OWL editor|skos:broader|OWL -DSSM (Deep Semantic Similarity Model)|skos:broader|Document embeddings -Hittite|skos:broader|Asie mineure -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_author|Matt Gardner -Fleur|skos:broader|Plante -Semantic Web Client Library|skos:broader|Linked Data -Good idea|skos:broader|Good -CCFD|skos:broader|Catholicisme -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Leon Bottou -Computer vision|skos:broader|Artificial Intelligence -Burkina Faso|skos:broader|Sahel -Crise écologique|skos:broader|Écologie -Crime contre l'Humanité|skos:broader|Horreur -Ora Lassila|skos:broader|Nokia -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:tag|tag:contrastive_self_supervised_learning -Freedom Box|skos:broader|Internet -Multi-label Text classification|skos:broader|Text Classification -Graph Convolutional Networks (GCNs) strike a balance between modeling the full structure of the graph dynamically, as the tensor model does, and modeling the local neighbourhood structure through extracted features (as substructure counting methods and RDF2Vec do). ([source](/doc/2019/08/the_knowledge_graph_as_the_defa)) |skos:broader|Neural networks that operate on graphs -Ontology|skos:broader|KR -Unsupervised method to learn sentence representations. Conceptually, the model can be interpreted as a natural extension of the word-contexts from C-BOW to a larger sentence context, with the sentence words being specifically optimized towards additive combination over the sentence, by means of the unsupervised objective function |skos:broader|In practice, many NLP applications rely on a simple sentence embedding: the average of the embeddings of the words in it. We can do better. Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster sentences in documents, which aids in the automatic extraction of key information from large bodies of text. -AI black box|skos:broader|Deep Learning -Entity discovery and linking|skos:broader|Entities -Lobby nucléaire|skos:broader|Lobby -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:tag|tag:image_classification -Nemrud|skos:broader|Asie mineure -Semantic web company|skos:broader|Entreprise -Business case: semantic web|skos:broader|Business case -Knowledge Vault|skos:broader|Knowledge bases -France : bureaucratie|skos:broader|France -Con de Chirac|skos:broader|Chirac -SIOC|skos:broader|RDF and social networks -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_firstAuthor|Canwen Xu -Economie allemande|skos:broader|Economie -Desktop search|skos:broader|Informatique -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:tag|tag:semantic_hashing -Lobby agroalimentaire|skos:broader|Lobbies économiques -NSA|skos:broader|Services secrets -Haskell|skos:broader|Functional programming -Roam|skos:broader|Note taking app -Java: JNI|skos:broader|Java dev -Kassav'|skos:broader|Zouk -Text Summarization|skos:broader|Information extraction -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:tag|tag:arxiv_doc -Natural selection|skos:broader|Evolution -NLP@Microsoft|skos:broader|Microsoft -hyperdoc2vec: Distributed Representations of Hypertext Documents Hypertext documents, such as web pages and academic papers, are of great importance in delivering information in our daily life. Although being effective on plain documents, conventional text embedding methods suffer from information loss if directly adapted to hyper-documents. In this paper, we propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, along with four criteria characterizing necessary information that hyper-document embedding models should preserve. Systematic comparisons are conducted between hyperdoc2vec and several competitors on two tasks, i.e., paper classification and citation recommendation, in the academic paper domain. Analyses and experiments both validate the superiority of hyperdoc2vec to other models w.r.t. the four criteria.|sl:arxiv_author|Yan Song -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_firstAuthor|Tomas Mikolov -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Michael Cochez -Web dev framework|skos:broader|Web dev -Quora Question Pairs|skos:broader|Kaggle -POWDER|skos:broader|W3C Working Draft -Digital Media|skos:broader|Technologie -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:tag|tag:arxiv_doc -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:arxiv_author|Upamanyu Madhow -Belo Horizonte|skos:broader|Ville -Analyse sémantique|skos:broader|Semantic technology -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|sl:tag|tag:out_of_distribution_detection -Livre|skos:broader|Reading -Machine translation|skos:broader|NLP -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:multiple_knowledge_bases -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:arxiv_firstAuthor|Haochen Chen -République Tchèque|skos:broader|Europe - Create delightful python projects using Jupyter Notebooks|skos:broader|- [Home Page](https://www.fast.ai/) - [MOOC](https://course.fast.ai/) - [Github](https://github.com/fastai/fastai) - [Forum](https://forums.fast.ai/) - [docs.fast.ai](https://docs.fast.ai/) -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_author|Kevin Clark -Publishing RDF Vocabularies|skos:broader|Semanlink related -Multimedia + LD|skos:broader|Linked Data -Chili|skos:broader|Amérique du sud -Loi sur le téléchargement|skos:broader|Industrie du disque -Azawad|skos:broader|Touareg -Delon|skos:broader|Acteur -IPv6|skos:broader|IP address -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:tag|tag:nlp_microsoft -Truffe|skos:broader|Curiosité naturelle -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:human_level_ai -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:tag|tag:gensim -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:arxiv_doc -Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines.|skos:broader|predict the ating\ a user would give to an item -Cassini-Huygens|skos:broader|Titan -apache.org|skos:broader|Open Source -Evolution|skos:broader|Biology -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Victor Bapst -Knowledge Graph Embeddings|skos:broader|Knowledge Graphs -Tools|skos:broader|Dev -Doc2Vec|skos:broader|Sentence Embeddings -ARN|skos:broader|Genetics Génétique -RAKE|skos:broader|Keyword/keyphrase extraction -Entity linking|skos:broader|Entities -AllenNLP|skos:broader|Allen Institute for AI (A2I) -delicious api|skos:broader|Tagging -Knowledge-driven embeddings|skos:broader|Embeddings -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:arxiv_firstAuthor|Tomas Mikolov -Jean-Claude Ameisen|skos:broader|Hérédité -Plastic print|skos:broader|Imprimantes -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:tag|tag:arxiv_doc -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:tag|tag:nlp_stanford -Médecins sans frontières|skos:broader|Prix Nobel -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:rules -Biotechnologies Biotechnologies|skos:broader|Biology -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:tag|tag:labeled_data -Afrique de l'Ouest|skos:broader|Afrique -Bayesian Deep Learning|skos:broader|Bayesian Reasoning -Musique brésilienne|skos:broader|Musique -ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. Reduces variance, and hence the risk of overtting. |skos:broader|Methods that use multiple learning algorithms to obtain better predictive performance -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:artificial_human_intelligence -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Jun Ma -Présidentielles 2012|skos:broader|Politique française -Norilsk|skos:broader|Russie -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Mirella Lapata -Jiroft|skos:broader|Âge du bronze -Thérapie génique|skos:broader|Médecine -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:tag|tag:good -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:tag|tag:weak_supervision -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Chris Dyer -Ian Goodfellow|skos:broader|AI girls and guys -Tiers-Monde|skos:broader|Grands problèmes -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_firstAuthor|Manaal Faruqui -Yandex|skos:broader|Search Engines -Constraint Programming|skos:broader|Artificial Intelligence -Conquête spatiale|skos:broader|Espace -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Michael Smith -Sida|skos:broader|Maladie -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:tag|tag:nlp_short_texts -One-Shot Generalization|skos:broader|ML -Alexandria Ocasio-Cortez|skos:broader|Homme politique -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:tag|tag:contextualised_word_representations -for instance in image recognition using siamese networks, triplet loss function tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image, thereby learning to differentiate similar images to non similar ones|skos:broader|Introduced in the early 1990s by Bromley and LeCun to solve signature verification as an image matching problem -Jena User Conference|skos:broader|Semantic Web conferences -supervised learning models used for classification and regression analysis. An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the kernel trick (implicitly mapping the inputs into high-dimensional feature spaces.) |skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:arxiv_author|Alex Kurakin -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:knowledge_graph -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:arxiv_author|Yan Zhang -Wikilinks Corpus|skos:broader|Big Data -Personal assistant|skos:broader|Human-AI collaboration -Loi sur les oeuvres indisponibles|skos:broader|Propriété intellectuelle -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:arxiv_firstAuthor|Lingzhen Chen -Japon|skos:broader|Asie -Richesses sous-marines|skos:broader|Océan -URL|skos:broader|URI -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:arxiv_author|Kristina Toutanova -Pensée|skos:broader|Brain -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:tag|tag:arxiv_doc -Enterprise System|skos:broader|Information System -Nietzsche|skos:broader|Philosophe -Pollution des océans|skos:broader|Pollution -Yagán|skos:broader|Terre de Feu -Biotech industry|skos:broader|Entreprise -httpRange-14|skos:broader|HTTP -Mai 68|skos:broader|France -Manu Dibango|skos:broader|I like I like -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Germain Forestier -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:tag|tag:evolutionary_algorithm -Hongrie|skos:broader|Pays d'Europe -Knowledge Extraction|skos:broader|Artificial Intelligence -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:arxiv_firstAuthor|Léonard Blier -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:kd_mkb_biblio -General Motors|skos:broader|Automobile -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:tag|tag:arxiv_doc -AQMI|skos:broader|Al-Qaida -Françafrique|skos:broader|Afrique -Semantic Web: CRM|skos:broader|CRM -Ouganda|skos:broader|Afrique -Natural Language Processing|skos:broader|IA AI -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:tag|tag:arxiv_doc -MyFaces|skos:broader|Java Server Faces -Howto|skos:broader|Dev -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:arxiv_author|Deepak Nathani -Bigtable|skos:broader|Google -Knowledge Graph KG|skos:broader|Knowledge Base -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:microsoft_research -Singular value decomposition|skos:broader|Dimensionality reduction -HBase™|skos:broader|NOSQL -booking.com|skos:broader|Hôtel -Technical girls and guys|skos:broader|Technologie -Belzoni|skos:broader|Archéologue -New York Times|skos:broader|Presse -Graph visualization|skos:broader|Visualization Tools -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:arxiv_firstAuthor|Andrew M. Dai -HTML Editor|skos:broader|HTML -Internet en Afrique|skos:broader|NTIC et développement -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:tag|tag:semi_supervised_learning -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:arxiv_author|Tim Oppermann -Topic Modeling|skos:broader|NLP tasks / problems -Elda|skos:broader|Linked Data API -NG4J|skos:broader|Jena -Concept linking Concept extraction|skos:broader|Topic extraction Keyword extraction Keyphrase extraction -AI, robots and jobs|skos:broader|Travail -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:arxiv_author|Andrew McCallum -Lac de lave|skos:broader|Volcan -Portugal|skos:broader|Europe -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:tag|tag:nlp_facebook -ML as a service|skos:broader|Machine learning -Hannibal|skos:broader|Guerres puniques -Bhaskar Mitra|skos:broader|NLP girls and guys -Memory networks|skos:broader|Memory in deep learning -First introduced by vivisimo (bought by IBM in 2012 - \Now, Vivisimo Velocity Platform is IBM Watson Explorer\)|skos:broader|cluster analysis which seeks to build a hierarchy of clusters. 2 kinds: - Agglomerative - Divisive -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:ml_google -Désert|skos:broader|Géographie -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_firstAuthor|Xiang Wang -foaf+ssl|skos:broader|SSL -Déclin de l'Europe|skos:broader|Europe -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:computational_neuroscience -Robots and society Jobbotization|skos:broader|IA AI -Cryptography|skos:broader|Cryptage -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:tag|tag:lime -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Adam Santoro -Text Similarity|skos:broader|NLP tasks / problems -Concept Extraction / Linking|skos:broader|NLP tasks / problems -Ruslan Salakhutdinov|skos:broader|AI girls and guys -Machine learning|skos:broader|Artificial Intelligence -A single decision tree is a highly non-linear classifier with typically low bias but high variance. Random forests address the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees.|skos:broader|Methods that use multiple learning algorithms to obtain better predictive performance -Mur de Berlin|skos:broader|Allemagne -Validation: XML vs RDF|skos:broader|Validator -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:tag|tag:attention_is_all_you_need -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Jan A. Botha -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:arxiv_firstAuthor|Jie Yang -Graph database and NLP|skos:broader|Knowledge Representation -Taxe carbone|skos:broader|Climate crisis -Peter Bloem|skos:broader|NLP girls and guys -Pre-Trained Language Models|skos:broader|Deep NLP -NLP datasets|skos:broader|NLP tools -Deep learning: implementing|skos:broader|Deep Learning -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:tag|tag:autoencoder -Maladie|skos:broader|Santé -Personnage historique|skos:broader|Histoire -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:ai_facebook -Stanford NER|skos:broader|Named Entity Recognition -Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks Current machine learning systems operate, almost exclusively, in a statistical, or model-free mode, which entails severe theoretical limits on their power and performance. Such systems cannot reason about interventions and retrospection and, therefore, cannot serve as the basis for strong AI. To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks. To demonstrate the essential role of such models, I will present a summary of seven tasks which are beyond reach of current machine learning systems and which have been accomplished using the tools of causal modeling.|sl:arxiv_author|Judea Pearl -Musubi|skos:broader|Téléphone -Hugo|skos:broader|Grand Homme -RDF and SOA|skos:broader|SOA -Conquistadores|skos:broader|Histoire -RDFLib|skos:broader|Python -2D-NLP|skos:broader|Information extraction -React.js|skos:broader|JavaScript librairies -Jena|skos:broader|Semantic Web Dev -Orri Erling|skos:broader|SW guys (and girls) -Hervé Kempf|skos:broader|Journal Le Monde -Fake Blogs|skos:broader|Blog -SW guys (and girls)|skos:broader|Semantic Web -Cons de Français|skos:broader|France -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:arxiv_author|Anna Rogers -Ciao Vito|skos:broader|Portland (OR) -Henry Story|skos:broader|Technical girls and guys -Fact-checking|skos:broader|Vérité -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Sarath Chandar -k-means clustering|skos:broader|Data mining -Coursera: The Data Scientist’s Toolbox|skos:broader|Coursera -Une suite de matrices symétriques en rapport avec la fonction de Mertens we explore a class of equivalence relations over N from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. In this paper we explore a class of equivalence relations over $\\N^\\ast$ from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem.|sl:tag|tag:arxiv_doc -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:tomas_mikolov -African art|skos:broader|Africa -Shoah|skos:broader|Nazisme -NIPS 2017|skos:broader|Deep Learning -Industrie du disque|skos:broader|Musique -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:tag|tag:word_mover_s_distance -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:arxiv_author|Bryan Wilder -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_author|Karol Gregor -Pharaon|skos:broader|Egypte antique -RDFLib|skos:broader|RDF Tools -Enseignement supérieur|skos:broader|Education -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Quoc V. Le -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:arxiv_author|Yoni Schamroth -RDFa|skos:broader|XHTML -Java|skos:broader|Dev -Information retrieval|skos:broader|Informatique -Ecole Montessori|skos:broader|Ecole -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Razvan Pascanu -Histoire de France|skos:broader|Histoire -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|sl:tag|tag:nn_4_nlp -Modeling car diversity|skos:broader|Car diversity -Topic Modeling over Short Texts|skos:broader|NLP: short texts -Keras embedding layer|skos:broader|Keras -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:arxiv_firstAuthor|Esteban Real -Sequence-To-Sequence Encoder-Decoder Architecture|skos:broader|Sequence-to-sequence learning -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:tag|tag:survey -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|sl:arxiv_author|Thomas Brendan Murphy -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:arxiv_doc -Lingo|skos:broader|Clustering of text documents -Amazon Mechanical Turk|skos:broader|Artificial, Artificial Intelligence -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:tag|tag:arxiv_doc -Innoraise|skos:broader|RDF and social networks -Pétrole et corruption|skos:broader|Corruption -Bitcoin|skos:broader|Virtual currency -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_firstAuthor|Zihang Dai -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:tag|tag:survey -Fabien Gandon|skos:broader|SW guys (and girls) -OWLED 2007 AND fps|skos:broader|OWLED 2007 -LOD use case|skos:broader|Linking Open Data -L'Afrique à la Bastille - 13 juillet 2007|skos:broader|Music of Africa -Hong Kong|skos:broader|Chine -Génomique|skos:broader|Genetics Génétique -Client side XSLT|skos:broader|XSLT -Martin Hepp|skos:broader|Technical girls and guys -Société de consommation|skos:broader|Capitalisme -Bag-of-words|skos:broader|NLP techniques -general implementation of (arbitrary order) linear chain Conditional Random Field (CRF) sequence models |skos:broader|sequence labelling tasks where the goal is to identify the names of entities in a sentence. Named entities can be proper nouns (locations, people, organizations...), or can be much more domain-specific, such as diseases or genes in biomedical NLP. -Transformers|skos:broader|NLP@Google -China's Social Credit System|skos:broader|Chine -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:tag|tag:tutorial -NN 4 NLP|skos:broader|Neural networks -Efficient Contextual Representation Learning Without Softmax Layer how to accelerate contextual representation learning. Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objectiv. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. decouples learning contexts and words Instead of using a softmax layer to predict the distribution of the missing word, we utilize and extend the SEMFIT layer (Kumar and Tsvetkov, 2018) to predict the embedding of the missing word. Contextual representation models have achieved great success in improving various downstream tasks. However, these language-model-based encoders are difficult to train due to the large parameter sizes and high computational complexity. By carefully examining the training procedure, we find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. Therefore, we redesign the learning objective and propose an efficient framework for training contextual representation models. Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks.|sl:arxiv_author|Kai-Wei Chang -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:tag|tag:arxiv_doc -Shenzhen|skos:broader|Chine -Wuhan|skos:broader|Chine -k-nearest neighbors algorithm|skos:broader|Machine learning: techniques -Delip Rao|skos:broader|AI girls and guys -Consciousness Prior|skos:broader|Yoshua Bengio -Espèces menacées|skos:broader|Écologie -REST|skos:broader|Informatique -Blog software|skos:broader|Blog -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:arxiv_author|Matthew E. Peters -Neutrino|skos:broader|Physique des particules -XSS|skos:broader|Malicious code -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Wenhu Chen -Coding|skos:broader|Software -Agriculture française|skos:broader|Agriculture -Venus de Brassempouy|skos:broader|Paléolithique -Océanie|skos:broader|Géographie -Sentence Embeddings|skos:broader|Embeddings -Arts premiers|skos:broader|Art -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Weijie Liu -Lord of the Flies|skos:broader|L'humanité mérite de disparaître -String-searching algorithm|skos:broader|Algorithmes -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:taxonomy_expansion_task -Pulsar|skos:broader|Astrophysique -Concept Extraction / Linking|skos:broader|Automatic tagging -Grippe aviaire|skos:broader|Maladie -Zinder|skos:broader|Niger -Coursera: A History of the World since 1300|skos:broader|Coursera -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:tag|tag:arxiv_doc -ESWC 2008|skos:broader|ESWC -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Ruslan Salakhutdinov -Montagne|skos:broader|Géographie -Graph-based Text Representations|skos:broader|NLP: Text Representation -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:tag|tag:concept_bottleneck_models -Cinéma français|skos:broader|France -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:arxiv_author|Rob Fergus -Word embedding: evaluation|skos:broader|Word embeddings -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:tag|tag:arxiv_doc -Danemark|skos:broader|Pays d'Europe -Apple Java|skos:broader|Java -Parc du W|skos:broader|Niger -Mac OS X|skos:broader|Apple Software -FastText|skos:broader|NLP@Facebook -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:tag|tag:wikipedia -URI Synonymity|skos:broader|Linked Data -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Sophia Sanchez -OWL RL|skos:broader|OWL -Semantic Interoperability|skos:broader|Semantic Web -Knowledge-based AI|skos:broader|Artificial Intelligence -Edvige|skos:broader|Sarkozy -CamemBERT|skos:broader|BERT -Voyage en Chine|skos:broader|Chine -Logiciel libre|skos:broader|Informatique -AI + Knowledge Bases|skos:broader|Artificial Intelligence -Espagne|skos:broader|Pays d'Europe -Google Patents|skos:broader|Google -Stanford|skos:broader|Universités américaines -DSSM (Deep Semantic Similarity Model)|skos:broader|Sentence Embeddings -Burningbird Mad Techie Woman|skos:broader|Technical guys -Cybersecurity Sécurité informatique|skos:broader|Sécurité -Lagos|skos:broader|Ville -LOD: Limitations on browseable data|skos:broader|Linked Data -Nick Clegg|skos:broader|Grande-Bretagne -Ecocide|skos:broader|Crime -Keras embedding layer|skos:broader|Embeddings -Mercure (Planète)|skos:broader|Système solaire -Bertrand Sajus|skos:broader|Ministère de la culture -Bernard Stiegler|skos:broader|Philosophe -Shoira Otabekova|skos:broader|Ouzbékistan -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Maurice Chiang -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:tag|tag:entity_linking -Vive le capitalisme !|skos:broader|Capitalisme -CKAN|skos:broader|Data portal -Drupal/RDF|skos:broader|Semantic CMS -NLP|skos:broader|Artificial Intelligence -Benjamin Nowack|skos:broader|Technical girls and guys -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Eva Blomqvist -Révolution française|skos:broader|Histoire de France -Afrique francophone|skos:broader|Afrique -Missoula Floods|skos:broader|Catastrophe naturelle -Word embeddings with lexical resources|skos:broader|Sense embeddings -France|skos:broader|Pays d'Europe -Novartis|skos:broader|Industrie pharmaceutique -- Intent classification: predicting the intent of a query - slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). For example the user query could be “Find me an action movie by Steven Spielberg”. The intent here is “find_movie” while the slots are “genre” with value “action” and “directed_by” with value “Steven Spielberg”.|skos:broader|extracting semantic concepts in a query (a sequence labeling task that tags the input word sequence). -HTML5|skos:broader|HTML -Pedro Almodóvar|skos:broader|Espagne -Metadata indexing|skos:broader|Semantic Web -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:tag|tag:word_embedding -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:arxiv_author|Suma Bhat -Athènes|skos:broader|Grèce -Web Pollution|skos:broader|Grands problèmes -Acoustique musicale|skos:broader|Acoustique -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:tag|tag:2d_nlp -Pandémie|skos:broader|Maladie contagieuse -Enseignement scientifique|skos:broader|Science -Notes d'install|skos:broader|Installing apps -Ambiguity (NLP)|skos:broader|NLP tasks / problems -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:tag|tag:information_theory_and_deep_learning -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:tag|tag:arxiv_doc -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:tag|tag:nn_4_nlp -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:arxiv_author|Ashutosh Adhikari -Cocteau|skos:broader|Réalisateur -Forêt|skos:broader|Arbres -SonarQube|skos:broader|Dev tools -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:kd_mkb_related -Zaïre|skos:broader|RDC -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:tree_embeddings -Grande-Bretagne|skos:broader|Royaume Uni -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_firstAuthor|Ashish Vaswani -RDF Data source|skos:broader|RDF -Multnomah Falls|skos:broader|Oregon -Etat islamique|skos:broader|Terrorisme islamiste -Human-AI collaboration|skos:broader|Artificial Intelligence -Périclès|skos:broader|Personnage historique -JDBC|skos:broader|SQL -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:arxiv_author|Ivo Danihelka -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:tag|tag:arxiv_doc -Bio inspired computing devices|skos:broader|Brain -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:arxiv_author|Jing Li -Gilles Lepin|skos:broader|Ami -Honda|skos:broader|Japon -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:tag|tag:knowledge_graph_embeddings -Pétrole|skos:broader|Matières premières -Multilinguisme|skos:broader|Langues -C2GWeb: SEO|skos:broader|C2GWeb -RDF-driven web sites|skos:broader|RDF -Mbilia Bel|skos:broader|Zaïre -Label-Embedding for Image Classification Attributes act as intermediate representations that enable parameter sharing between classes, a must when training data is scarce. We propose to view attribute-based image classification as a label-embedding problem: each class is embedded in the space of attribute vectors. We introduce a function that measures the compatibility between an image and a label embedding. The parameters of this function are learned on a training set of labeled samples to ensure that, given an image, the correct classes rank higher than the incorrect ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets show that the proposed framework outperforms the standard Direct Attribute Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a built-in ability to leverage alternative sources of information instead of or in addition to attributes, such as e.g. class hierarchies or textual descriptions. Moreover, label embedding encompasses the whole range of learning settings from zero-shot learning to regular learning with a large number of labeled examples.|sl:arxiv_author|Cordelia Schmid -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:nn_symbolic_ai_hybridation -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Jaime Carbonell -Winch 5|skos:broader|Livre -Naive Bayes classifier|skos:broader|Bayesian classification -Deep Learning Book|skos:broader|AI: books & journals -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:tag|tag:nlp_using_knowledge_graphs -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:bert -Variable Selection Methods for Model-based Clustering Model-based clustering is a popular approach for clustering multivariate data which has seen applications in numerous fields. Nowadays, high-dimensional data are more and more common and the model-based clustering approach has adapted to deal with the increasing dimensionality. In particular, the development of variable selection techniques has received a lot of attention and research effort in recent years. Even for small size problems, variable selection has been advocated to facilitate the interpretation of the clustering results. This review provides a summary of the methods developed for variable selection in model-based clustering. Existing R packages implementing the different methods are indicated and illustrated in application to two data analysis examples.|sl:arxiv_author|Michael Fop -Paris NLP meetup|skos:broader|NLP event -Cryptage|skos:broader|Cybersecurity Sécurité informatique -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:arxiv_author|Nils Reimers -Keyword/keyphrase extraction|skos:broader|Keywords -Question Answering over Knowledge Graphs via Structural Query Patterns Natural language question answering over knowledge graphs is an important and interesting task as it enables common users to gain accurate answers in an easy and intuitive manner. However, it remains a challenge to bridge the gap between unstructured questions and structured knowledge graphs. To address the problem, a natural discipline is building a structured query to represent the input question. Searching the structured query over the knowledge graph can produce answers to the question. Distinct from the existing methods that are based on semantic parsing or templates, we propose an effective approach powered by a novel notion, structural query pattern, in this paper. Given an input question, we first generate its query sketch that is compatible with the underlying structure of the knowledge graph. Then, we complete the query graph by labeling the nodes and edges under the guidance of the structural query pattern. Finally, answers can be retrieved by executing the constructed query graph over the knowledge graph. Evaluations on three question answering benchmarks show that our proposed approach outperforms state-of-the-art methods significantly.|sl:arxiv_author|Weiguo Zheng -Brain implants|skos:broader|Brain -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:tag|tag:variational_bayesian_methods -Programming language|skos:broader|Programming -Multi-label classification|skos:broader|Statistical classification -fps AND LDOW2008|skos:broader|LDOW2008 -Google Fusion Tables|skos:broader|Tables -Semantic Wiki|skos:broader|Semantic Web : Application -Calvin|skos:broader|Religion -fps@EC-Web'14|skos:broader|fps: paper -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Xu Han -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:tag|tag:arxiv_doc -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:arxiv_author|Mostafa Dehghani -Semanlink|skos:broader|Semantic tagging -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Emma Pierson -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|skos:broader|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_author|Stefano Bragaglia -ML|skos:broader|IA AI -Hugo|skos:broader|Poète -Placentaires, marsupiaux et monotrèmes|skos:broader|Marsupiaux -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_author|Piotr Bojanowski -Akhênaton|skos:broader|Egypte antique -Pillage de vestiges antiques|skos:broader|Archéologie -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:tag|tag:arxiv_doc -Cerveau|skos:broader|Biologie -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:arxiv_author|Cheng Guo -Clustering of text documents|skos:broader|Clustering -Botanique|skos:broader|Biology -Fungal infections|skos:broader|Problèmes sanitaires -Feynman|skos:broader|Physique -Turtle|skos:broader|David Beckett -VW|skos:broader|Automotive -LOD cloud|skos:broader|Linking Open Data -RDF embeddings|skos:broader|Embeddings -Chinois|skos:broader|Chine -semblog|skos:broader|Blog software -JPA|skos:broader|Java -Question Answering|skos:broader|NLP tasks / problems -Jeux en ligne|skos:broader|Internet -SPARQL Clipboard|skos:broader|SPARQL -Quentin Tarantino|skos:broader|Réalisateur -C2GWeb-JS|skos:broader|Configuration ontology -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:arxiv_firstAuthor|Aditya Siddhant -Grands problèmes|skos:broader|Etat du monde -Franco-Allemand|skos:broader|Europe -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:kg_and_nlp -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:arxiv_firstAuthor|Jacob Devlin -Niklas Lindström|skos:broader|Technical girls and guys -Linking Open Data|skos:broader|Linked Data -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:tag|tag:text_in_kg_embeddings -OWL DL|skos:broader|Description Logic -Eclipse|skos:broader|Dev -Bernhard Haslhofer|skos:broader|SW guys (and girls) -Automatic tagging|skos:broader|NLP tasks / problems -Michel Serres|skos:broader|Intellectuel -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:deep_learning -Ian Davis|skos:broader|SW guys (and girls) -application of machine learning in the construction of ranking models. Training data consists of lists of items with some partial order specified between items in each list. |skos:broader|the machine learning task of inferring a function from labeled training data. -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:arxiv_author|Arman Cohan -Bayesian classification|skos:broader|Statistical classification -Jean Rouch|skos:broader|Niger -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:good -Genetics Génétique|skos:broader|Biology -Python 4 Data science|skos:broader|Data science -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:tag|tag:arxiv_doc -Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Suppose you want to implement a ML-based search engine that, given a query, ranks a variable number of documents by relevance. Metric learning is essentially learning a function that, given two inputs, tells you how relevant they are. [src](https://twitter.com/ericjang11/status/1259207970916667392?s=20)|skos:broader|The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m)) -MaxEnt for NLP|skos:broader|Maxent models -Culture et sem web|skos:broader|Culture -Encoder-Decoder architecture|skos:broader|Machine learning: techniques -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_author|Tat-Seng Chua -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:guillaume_lample -Documentaire TV|skos:broader|Télévision -Paléontologie humaine|skos:broader|Paléontologie -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:tag|tag:information_bottleneck_method -RDF Vocabularies|skos:broader|RDF -Mur de Berlin|skos:broader|RDA -Robotisation|skos:broader|Robotique -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:tag|tag:allen_institute_for_ai_a2i -Biology|skos:broader|Science -Microsoft Media Center|skos:broader|Microsoft -Politique de l'enfant unique|skos:broader|Chine -Extrémophiles|skos:broader|Biology -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:tag|tag:sentence_embeddings -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:arxiv_author|Yuxiang Wu -Category Embedding|skos:broader|Embeddings -Tabulator|skos:broader|RDF browser -Knowledge Graph Conference 2019|skos:broader|Conférences -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:arxiv_author|Sungchul Choi -Contestation|skos:broader|I like I like -Toumaï|skos:broader|Origines de l'homme -Makolab|skos:broader|Entreprise -V.S. Naipaul V. S. Naipaul|skos:broader|Ecrivain -Covid19|skos:broader|Pandémie -Yoav Goldberg|skos:broader|NLP girls and guys -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:arxiv_firstAuthor|Jeffrey C. Lagarias -Maven|skos:broader|Dev tools -Semanlink Feature Request|skos:broader|Semanlink todo -RSS Dev|skos:broader|RSS -Indonésie|skos:broader|Asie -Web architecture|skos:broader|Internet -Photo aérienne|skos:broader|Photo -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:tag|tag:arxiv_doc -Coursera: NLP class|skos:broader|NLP -Cross-validation|skos:broader|Machine learning: techniques -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_author|Eunsol Choi -François Scharffe|skos:broader|SW guys (and girls) -John Steinbeck|skos:broader|Ecrivain -Implementing a Jena Graph|skos:broader|Jena dev -ML ensemble meta-algorithm for primarily reducing bias, and also variance in supervised learning, and a family of machine learning algorithms that convert weak learners to strong ones. |skos:broader|Methods that use multiple learning algorithms to obtain better predictive performance -Falashas|skos:broader|Juifs -Knowledge Graph Construction|skos:broader|Knowledge Graphs -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:arxiv_author|Been Kim -Neuroevolution|skos:broader|Neural networks -Jena|skos:broader|RDF Tools -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:tag|tag:arxiv_doc -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Lhassane Idoumghar -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:arxiv_firstAuthor|Anna Rogers -TF-IDF|skos:broader|Information retrieval: techniques -MP3|skos:broader|Musique -535|skos:broader|Catastrophe naturelle -backplanejs|skos:broader|JavaScript librairies -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Houman Alborzi -Leigh Dodds|skos:broader|Technical girls and guys -List-only Entity Linking|skos:broader|Entity linking -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Kyungjae Lee -Hypercard|skos:broader|Apple -OpenStructs|skos:broader|Semantic framework -Hugh Glaser|skos:broader|Technical girls and guys -Sesame|skos:broader|RDF Framework -Arbres|skos:broader|Plante -JSON-LD APIs|skos:broader|JSON-LD -Unsupervised deep pre-training|skos:broader|Unsupervised machine learning -Text in KG embeddings|skos:broader|Knowledge Graphs and NLP -Language model|skos:broader|NLP techniques -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:nlp_microsoft -Falashas|skos:broader|Ethiopie -SWAD-E|skos:broader|Semantic Web -Bombay|skos:broader|Ville -Tomcat 7|skos:broader|Tomcat -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:tag|tag:sentence_embeddings -Audi|skos:broader|Automobile -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:tag|tag:similarity_learning -Solid|skos:broader|Decentralized social network -Irlande|skos:broader|Europe -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_author|Stephan Baier -Dan Jurafsky|skos:broader|NLP girls and guys -Finding RDF documents|skos:broader|RDF -Alex Allauzen|skos:broader|NLP girls and guys -RDF and Property Graphs|skos:broader|RDF graphs -Blé|skos:broader|Agriculture -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:connectionist_vs_symbolic_debate -Time Series|skos:broader|Machine learning: problems -Diacritics in URI|skos:broader|Encoding -Cognition-as-a-Service|skos:broader|Cognitive computing -C2GWeb|skos:broader|C2G -RNN based Language Model|skos:broader|Language model -many early knowledge graph embeddings do not use literal attributes, only structure of the graph... eg.: Description-Embodied Knowledge Representation Learning (DKRL) learns a structure-based representation (as TransE) and a description-based representation that can be used in an integrated scoring function, thus combining the relative information coming from both text and facts.|skos:broader|How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. -Pretrained models|skos:broader|Machine learning: techniques -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:arxiv_firstAuthor|Qian Chen -Drug-resistant germs|skos:broader|Grands problèmes -Survey analysis|skos:broader|NLP: use cases -Bolsonaro|skos:broader|Neo-fascites -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_firstAuthor|Will Grathwohl -Matthew Honnibal|skos:broader|NLP girls and guys -Micropayments on the web|skos:broader|Web -Google Play|skos:broader|Google -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:arxiv_firstAuthor|Jing Li -Juliana Rotich|skos:broader|Technical girls and guys -Sahel|skos:broader|Afrique -TopBraid/SPIN|skos:broader|TopBraid -Génocide rwandais|skos:broader|Génocide -Laure Soulier|skos:broader|NLP girls and guys -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:tag|tag:pre_trained_language_models -Censure et maltraitance animale|skos:broader|Cruauté -Pic de Hubbert|skos:broader|Pétrole -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Ledell Wu -OWL|skos:broader|Ontologies -ensemble learning technique that combines multiple classification or regression models via a meta-classifier or a meta-regressor. The base level models are trained based on a complete training set, then the meta-model is trained on the outputs of the base level model as features. ([source](/doc/2019/07/ensemble_learning_to_improve_ma)) |skos:broader|Methods that use multiple learning algorithms to obtain better predictive performance -Tony Blair|skos:broader|Chef d'état -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:arxiv_author|Marcel van Gerven -General Motors|skos:broader|USA -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:tag|tag:embeddings -NoSQL pour les nuls|skos:broader|Pour les nuls -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:tag|tag:api - “a good cluster—or document grouping—is one, which possesses a good, readable description”.|skos:broader|Open Source Search Results Clustering Engine. It can automatically organize small collections of documents into thematic categories. -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:arxiv_author|Jacob Devlin -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_firstAuthor|Matthew E. Peters -BNF|skos:broader|France -Adaptative boosting (Authors won Gödel Prize for their work) output of the 'weak learners' is combined into a weighted sum that represents the final output of the boosted classifier. Sensitive to noisy data and outliers (says wikipedia) AdaBoost (with decision trees as the weak learners) is often referred to as the best out-of-the-box classifier |skos:broader|ML ensemble meta-algorithm for primarily reducing bias, and also variance in supervised learning, and a family of machine learning algorithms that convert weak learners to strong ones. -Monsanto|skos:broader|Biotech industry -Médicaments génériques|skos:broader|Propriété intellectuelle -JSP|skos:broader|Internet Related Technologies -Semi-supervised learning|skos:broader|Machine learning: techniques -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:tag|tag:lexical_ambiguity -Paul Krugman|skos:broader|Prix Nobel d'économie -Restful semantic web services|skos:broader|Semantic Web Services -httpRange-14 (solution)|skos:broader|httpRange-14 -535|skos:broader|Histoire -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:tag|tag:knowledge_graph_embeddings -LHC|skos:broader|Expérience scientifique -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_firstAuthor|Zhengyan Zhang -Kadhafi|skos:broader|Lybie -LSTM|skos:broader|Recurrent neural network -Insecte|skos:broader|Arthropodes -HypiosVoCampParisMay2010|skos:broader|VoCamp -Ontologie visualization|skos:broader|Ontologies -Afripedia|skos:broader|Wikimedia -Naomi Klein|skos:broader|Critique du capitalisme -GRDDL|skos:broader|XHTML -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:tag|tag:bert -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:tag|tag:arxiv_doc -Africa's Last Wild Places|skos:broader|Afrique -Civilisations précolombiennes|skos:broader|Archéologie -Semanlink|skos:broader|Personal Knowledge Graph -Steve Cayzer|skos:broader|SW guys (and girls) -construction of a decision tree from class-labeled training tuples frequent problem: overfitting (=high variance) |skos:broader|the machine learning task of inferring a function from labeled training data. -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:arxiv_author|Samy Bengio -Spritz|skos:broader|GUI -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:arxiv_author|Mirella Lapata -Machine Learning Basics|skos:broader|Machine learning -Capitalism Surveillance|skos:broader|Big Brother -Tours|skos:broader|Ville -Homme politique|skos:broader|Politique -W3C Working Draft|skos:broader|W3C -Conscience artificielle|skos:broader|Anticipation -Mondeca|skos:broader|French Semantic web company -OWLED 2007|skos:broader|OWL -Medical Data|skos:broader|Santé -Negative Sampling|skos:broader|Machine learning: techniques -Simple idea|skos:broader|Good idea -Configuration ontology|skos:broader|C2GWeb -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Jesse Dodge -Autoencoder|skos:broader|Unsupervised machine learning -Olaf Hartig|skos:broader|SW guys (and girls) -Baïkal|skos:broader|Russie -Chris Bizer|skos:broader|Freie Universität Berlin -ElasticSearch: nearest neighbor(s)|skos:broader|ElasticSearch -Cinéma français|skos:broader|Cinéma -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|Manzil Zaheer -Apigee|skos:broader|API management -SOA|skos:broader|Web Services -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:arxiv_author|Roberto Navigli -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_author|Steven Van Canneyt -Pyramide|skos:broader|Architecture -Turtle in HTML|skos:broader|Turtle -Antibiotic resistance|skos:broader|Problèmes sanitaires -Bulgarie|skos:broader|Pays d'Europe -LDP: updates|skos:broader|Linked Data Platform -Information theory|skos:broader|Information -Alpinisme|skos:broader|Sport -Semantic Camp Paris|skos:broader|Paris -A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words LDA is an extension of [LSI/pLSI](latent_semantic_analysis) |skos:broader|A statistical model for discovering the abstract topics that occur in a collection of documents. -SemWeb China|skos:broader|Chine : technologie -A Primer on Neural Network Models for Natural Language Processing Over the past few years, neural networks have re-emerged as powerful machine-learning models, yielding state-of-the-art results in fields such as image recognition and speech processing. More recently, neural network models started to be applied also to textual natural language signals, again with very promising results. This tutorial surveys neural network models from the perspective of natural language processing research, in an attempt to bring natural-language researchers up to speed with the neural techniques. The tutorial covers input encoding for natural language tasks, feed-forward networks, convolutional networks, recurrent networks and recursive networks, as well as the computation graph abstraction for automatic gradient computation.|sl:arxiv_author|Yoav Goldberg -Reader mode (browsers)|skos:broader|Brouteur -Guha|skos:broader|SW guys (and girls) -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:quoc_le -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Jiaming Shen -Cybersex|skos:broader|Internet -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:arxiv_doc -Toumaï|skos:broader|Separation of man and ape -NLP + Human Resources|skos:broader|NLP: use cases -Chirac|skos:broader|Chef d'état -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Souvik Sen -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:tag|tag:arxiv_doc -sig.ma|skos:broader|Linked Data Browser -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:nlp_google -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:graph_embeddings -M3 Multi Media Museum|skos:broader|Digital Collections -Windows Vista|skos:broader|Windows -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_author|Matthew E. Peters -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:arxiv_author|Sebastian Ruder -NSA spying scandal|skos:broader|Big Brother -GPL|skos:broader|Open Source -Horizontal gene transfer|skos:broader|Genetics Génétique -The goal is to learn from examples a similarity function that measures how similar or related two objects are. Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. Distance metric learning is a major tool for a variety of problems in computer vision. It has successfully been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))|skos:broader|the machine learning task of inferring a function from labeled training data. -René Vautier|skos:broader|Anticolonialisme -Sibérie|skos:broader|Russie -Sahara|skos:broader|Désert -MusicBrainz|skos:broader|Musique -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:tag|tag:word2vec -Multi-Document Summarization|skos:broader|Text Summarization -Conférences|skos:broader|Event -Python tips|skos:broader|Python -LDOW2013|skos:broader|LDOW -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|Geoffrey E. Hinton -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Xin Luna Dong -Marchés financiers|skos:broader|Economie -Knowledge Representation|skos:broader|Artificial Intelligence -Linked Data demo|skos:broader|SW demo -Lutte anti-terroriste|skos:broader|Terrorisme -MOAT|skos:broader|Tagging -Reasoning|skos:broader|Logic -Euro|skos:broader|Money -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:tag|tag:arxiv_doc -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:word_embedding -Open University|skos:broader|Université -GraphDB|skos:broader|Graph database -Extinction de masse de la fin du permien|skos:broader|Extinction de masse -Mots/expressions remarquables|skos:broader|Langues -Enswers|skos:broader|Winch 5 -NLP@Google|skos:broader|NLP Teams -AppleScript|skos:broader|Mac dev -Graph Attention Networks |skos:broader|Graph neural networks -Fukushima|skos:broader|Japon -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:tag|tag:arxiv_doc -Sylvain|skos:broader|Ami -Boucle ferroviaire d’Afrique de l’Ouest|skos:broader|Afrique de l'Ouest -Jussieu|skos:broader|Sorbonne -Cascade|skos:broader|Eau -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:arxiv_author|Alexander C. Berg -Squeak|skos:broader|Smalltalk -Championnat du monde d'athlétisme|skos:broader|Championnat du monde -Data Visualization Tools|skos:broader|Data visualisation -A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers Most state-of-the-art models for named entity recognition (NER) rely on the availability of large amounts of labeled data, making them challenging to extend to new, lower-resourced languages. However, there are now several proposed approaches involving either cross-lingual transfer learning, which learns from other highly resourced languages, or active learning, which efficiently selects effective training data based on model predictions. This paper poses the question: given this recent progress, and limited human annotation, what is the most effective method for efficiently creating high-quality entity recognizers in under-resourced languages? Based on extensive experimentation using both simulated and real human annotation, we find a dual-strategy approach best, starting with a cross-lingual transferred model, then performing targeted annotation of only uncertain entity spans in the target language, minimizing annotator effort. Results demonstrate that cross-lingual transfer is a powerful tool when very little data can be annotated, but an entity-targeted annotation strategy can achieve competitive accuracy quickly, with just one-tenth of training data.|sl:arxiv_author|Zaid Sheikh -Biotechnologies Biotechnologies|skos:broader|Technologie -Linear algebra|skos:broader|Algèbre -Technique of analyzing relationships between a set of documents and the terms they contain, by producing a set of concepts related to the documents and terms. LSA assumes that words that are close in meaning will occur in similar pieces of text. LSI transforms documents from either bag-of-words or (preferrably) TfIdf-weighted space into a latent space of a lower dimensionality. A matrix containing word counts (in lines) per paragraph (column) is constructed from a large piece of text. [Singular value decomposition (SVD)](singular_value_decomposition) is used to reduce the number of rows while preserving the similarity structure among columns. Similarities between words and/or docs can then be evaluated using cosine-distance in the low-dimensional space - pros: - alleviate the problem of synonymy (note: wikipedia se contredit en ce qui concerne la polysémie. Je dirais que LSI ne peut pas régler ce pb) - can output topics in a ranked order. - cons: - requires a num_topics parameter. - dimensions have no easily interpretable meaning in natural language - SVD is computation intensive (still a pb with improved algos?) - wikipedia says that the probabilistic model of LSA does not match observed data: LSA assumes that words and documents form a joint Gaussian model (ergodic hypothesis), while a Poisson distribution has been observed. Thus, a newer alternative is probabilistic latent semantic analysis, based on a multinomial model, which is reported to give better results than standard LSA [Gensim tuto about transformations](https://markroxor.github.io/gensim/static/notebooks/Topics_and_Transformations.html) says that LSI training is unique in that it can continue at any point, simply by providing more training documents. (LSI or LSA ? Truncated SVD applied to document similarity is called Latent Semantic Indexing (LSI), but it is called Latent Semantic Analysis (LSA) when applied to word similarity.) 4 ways of looking at the Truncated SVD ([cf.](http://www.jair.org/media/2934/live-2934-4846-jair.pdf)) : - Latent meaning: the truncated SVD creates a low-dimensional linear mapping between words in row space and context in columns which captures the hidden (latent) meaning in the words and contexts - Noise reduction: the truncated SVD can be seen as a smoothed version of the original matrix ( which captures the signal and leaves out the noise) - A way to discover high-order co-occurrence: when 2 words appear in similar context - Sparsity reduction: the origin matrix is sparse, but the truncated SVD is dense. Sparsity may be viewed as a problem of insufficient data and truncated SVD as a way of simulating the missing text [See also Introduction to Information Retrieval Manning 2008](https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html) |skos:broader|Algebraic model for representing text documents as vectors of identifiers such as index terms.br/ Documents and queries are represented as vectors. Each dimension corresponds to a separate term. If a term occurs in the document, its value in the vector is non-zero. One way of computing the value: TD-IDF -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:tag|tag:arxiv_doc -Allen Institute for AI (A2I)|skos:broader|NLP Teams -Few-shot learning|skos:broader|Machine learning -Rayons cosmiques|skos:broader|Astronomie -Archéologie|skos:broader|Science -Impôt|skos:broader|Prélèvements obligatoires -Seq2Seq with Attention|skos:broader|Sequence-to-sequence learning -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:arxiv_author|Urvashi Khandelwal -JavaScript framework|skos:broader|js -Romancier|skos:broader|Intellectuel -XMLHttpRequest|skos:broader|Web app dev -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_firstAuthor|Thibault Févry -Benjamin Franklin|skos:broader|Grand Homme -Stanford POS Tagger|skos:broader|NLP@Stanford -Accelerated Mobile Pages (AMP)|skos:broader|Google -Antibiotic resistance|skos:broader|Grands problèmes -Archéologie amazonienne|skos:broader|Indiens du Brésil -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Daan Wierstra -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Daniel Rodriguez -Tim Bray|skos:broader|Technical girls and guys -Finlande|skos:broader|Pays d'Europe -Snorkel|skos:broader|Labeled Data -Thriller|skos:broader|Film -Sumer|skos:broader|Mésopotamie -NLP: book|skos:broader|AI: books & journals -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Pingping Huang -String-searching algorithm|skos:broader|Text processing -Programming|skos:broader|Dev -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_author|Mohammed Samiul Saeef Department of Computer Science and Engineering, University of Texas at Arlington -Scandale des écoutes en Allemagne|skos:broader|Angela Merkel -Semantic Web Services|skos:broader|Web Services -Pays-Bas|skos:broader|Europe -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_firstAuthor|Yonatan Bisk -Cambridge Analytica|skos:broader|Facebook -No more DRM|skos:broader|DRM -Sahara|skos:broader|Afrique -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Alex Salcianu -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Jan Pomikálek -Logic and semantic web|skos:broader|Semantic Web -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:tag|tag:backpropagation_vs_biology -Banlieue|skos:broader|Société -public-lod@w3.org|skos:broader|Mailing list -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:arxiv_author|Guillaume Lample -Orson Welles|skos:broader|Réalisateur -Zero-shot Entity Linking|skos:broader|Entity linking -Orri Erling|skos:broader|Technical girls and guys -Virtuoso Open-Source Edition|skos:broader|Virtuoso -LOD & museum|skos:broader|Linking Open Data -Open University|skos:broader|Education -4store|skos:broader|TripleStore -Global brain|skos:broader|Anticipation -Gravity|skos:broader|Physics -Sarkozy et extrème droite|skos:broader|Sarkozyland -P=NP|skos:broader|Grands problèmes mathématiques -Automotive Ontology Working Group|skos:broader|Automobile -RDF embeddings|skos:broader|RDF -Synthetic life|skos:broader|Genetics Génétique -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:arxiv_author|Jeff Clune -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Hanjun Dai -DBTune|skos:broader|Musique -Venus Express|skos:broader|Vénus -Identifying triples|skos:broader|RDF -Dereferencing HTTP URIs|skos:broader|URI dereferencing -Semantic Web : entreprise Semantic Web: enterprise|skos:broader|Web sémantique sw -Histoire des Jermas|skos:broader|Histoire de l'Afrique -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:tag|tag:topic_models_word_embedding -Recurrent Memory Networks for Language Modeling Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. We demonstrate the power of RMN on language modeling and sentence completion tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin.|sl:arxiv_author|Christof Monz -Marchés financiers|skos:broader|Finance -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:tag|tag:information_extraction -Deep Patent Landscaping Model Using Transformer and Graph Embedding a transformer encoder for analyzing textual data present in patent documents and a graph convolutional network for analyzing patent metadata. A benchmarking dataset for patent landscaping based on patent trends reports published by the Korean Patent Office. Data acquisition using Google's BigQuery public datasets. 10% improvement comparing to Google’s proposed Automated Patent Landscaping. Empirical analysis of the importance of features (text vs metadata, citations vs classification) Patent landscaping is a method used for searching related patents during a research and development (R&D) project. To avoid the risk of patent infringement and to follow current trends in technology, patent landscaping is a crucial task required during the early stages of an R&D project. As the process of patent landscaping requires advanced resources and can be tedious, the demand for automated patent landscaping has been gradually increasing. However, a shortage of well-defined benchmark datasets and comparable models makes it difficult to find related research studies. In this paper, we propose an automated patent landscaping model based on deep learning. To analyze the text of patents, the proposed model uses a modified transformer structure. To analyze the metadata of patents, we propose a graph embedding method that uses a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark datasets for comparing related research studies in patent landscaping. The datasets are produced by querying Google BigQuery, based on a search formula from a Korean patent attorney. The obtained results indicate that the proposed model and datasets can attain state-of-the-art performance, as compared with current patent landscaping models.|sl:tag|tag:attention_is_all_you_need -Word embeddings|skos:broader|Distributional semantics -Industrie de l'armement|skos:broader|industrie -Missing Labels (ML)|skos:broader|Machine learning: problems -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_firstAuthor|Bhuwan Dhingra -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Yew Siang Tang -Named Entity Recognition|skos:broader|NLP tasks / problems -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:arxiv_author|Fernando C. Pereira ATT Shannon Laboratory -Spectral clustering|skos:broader|Clustering -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:google_research -Ethnologie|skos:broader|Géographie -Times|skos:broader|Presse -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Orri Erling|skos:broader|OpenLink Software -Quora Question Pairs|skos:broader|Paraphrase identification -SWSE|skos:broader|Semantic Search -Mbilia Bel|skos:broader|Music of Africa -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_author|Thomas Demeester -Charlie Hebdo|skos:broader|Rigolo -Apache Shiro|skos:broader|apache.org -Ammonite|skos:broader|Fossile -Tag Clusters|skos:broader|Semanlink related -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:tag|tag:arxiv_doc -NLP: Text Representation|skos:broader|NLP techniques -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:tag|tag:meaning_in_nlp -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:arxiv_firstAuthor|Xiaojing Liu -Crise financière|skos:broader|Marchés financiers -Glaciologie|skos:broader|Science -instead of just processing the words in a sentence from left to right, also go from right to left, allowing later words to help disambiguate the meaning of earlier words and phrases|skos:broader|Long short-term memory: recurrent neural network architecture well-suited for time series with long time lags between important events. (cf the problem of long time dependencies, such as when you want to predict the next word in I grew up in France… I speak fluent [?]). A solution to the vanishing gradient problem in RNNs -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:arxiv_author|Yundong Zhang -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:tag|tag:arxiv_doc -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Yintao Liu -fps@EC-Web'14|skos:broader|Semantic SEO -RDF|skos:broader|Semantic Web -Sparte|skos:broader|Grèce antique -Sarkozy et extrème droite|skos:broader|Sarkozy -Learning Confidence for Out-of-Distribution Detection in Neural Networks Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.|sl:arxiv_firstAuthor|Terrance DeVries -Urbanisation|skos:broader|Monde moderne -SPARQL Construct|skos:broader|SPARQL -Apache Hive|skos:broader|Database -Patrick Gallinari|skos:broader|AI girls and guys -Telelamarna|skos:broader|Akhênaton -L'Afrique à la Bastille - 13 juillet 2007|skos:broader|Liberté, égalité, fraternité -Apache Hive|skos:broader|Data Warehouse -Belzoni|skos:broader|Egypte antique -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:tag|tag:knowledge_graph_embeddings -Smart energy grids|skos:broader|Energie -Machine Learning tool|skos:broader|Machine learning -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:tag|tag:arxiv_doc -Words|skos:broader|Langues -Predicting numeric values from text|skos:broader|NLP tasks / problems -SQL to RDF mapping|skos:broader|Relational Databases and the Semantic Web -Semantic Web and OOP|skos:broader|Semantic Web -POS POS Tagging|skos:broader|Sequence Tagging -Linked Data Platform|skos:broader|Read-Write Linked Data -Calais|skos:broader|Semantic Web : Tools -Semantically searchable distributed repository|skos:broader|Semantic Web : Application -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:poincare_embeddings -Steve Cayzer|skos:broader|HP -OWL 1.1|skos:broader|OWL -Lalibela|skos:broader|Histoire de l'Afrique -Labeling data|skos:broader|Labeled Data -Mladic|skos:broader|Génocide -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:arxiv_author|Stefan Wermter -Java 1.5 Mac OS X|skos:broader|Apple Java -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_firstAuthor|Peter W. Battaglia -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:tag|tag:quoc_le -Gilles Taddei|skos:broader|Ami -Beijing Genomics Institute|skos:broader|Génomique -Socrate|skos:broader|Philosophe -GWT|skos:broader|XMLHttpRequest -Mac dev|skos:broader|Macintosh -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:tag|tag:surprises_me -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:tag|tag:arxiv_doc -Etat de la France|skos:broader|France -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:arxiv_author|Percy Liang -Catastrophe écologique|skos:broader|Catastrophe -create.js|skos:broader|JavaScript -Virtuoso Universal Server|skos:broader|Virtuoso -93|skos:broader|Banlieue -Good identifiers for product types based on Wikipediabr/ GoodRelations-compatible OWL DL class definitions for ca. 300,000 types of product or services that have an entry in the English Wikipedia |skos:broader|An ontology for linking product descriptions and business entities on the Web -Python|skos:broader|Programming language -NLP|skos:broader|Favoris -End-To-End Entity Linking|skos:broader|Entity discovery and linking -Pubby|skos:broader|SPARQL endpoint -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:arxiv_author|Charu Sharma -Celera ou Craig Venter|skos:broader|Biotechnologies Biotechnologies -Keyword/keyphrase extraction|skos:broader|Automatic tagging -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Lucy Wang -RDF Schema|skos:broader|RDF Vocabularies -Transductive Learning|skos:broader|Machine learning: techniques -Common Web Language|skos:broader|Semantic Web -Rigolo|skos:broader|Fun -Ukraine|skos:broader|Ex URSS URSS -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:arxiv_author|Aaron van den Oord -David Cameron|skos:broader|Royaume Uni -Multiple Knowledge Bases|skos:broader|Knowledge bases -JavaScript DOM|skos:broader|JavaScript -A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications Graph is an important data representation which appears in a wide diversity of real-world scenarios. Effective graph analytics provides users a deeper understanding of what is behind the data, and thus can benefit a lot of useful applications such as node classification, node recommendation, link prediction, etc. However, most graph analytics methods suffer the high computation and space cost. Graph embedding is an effective yet efficient way to solve the graph analytics problem. It converts the graph data into a low dimensional space in which the graph structural information and graph properties are maximally preserved. In this survey, we conduct a comprehensive review of the literature in graph embedding. We first introduce the formal definition of graph embedding as well as the related concepts. After that, we propose two taxonomies of graph embedding which correspond to what challenges exist in different graph embedding problem settings and how the existing work address these challenges in their solutions. Finally, we summarize the applications that graph embedding enables and suggest four promising future research directions in terms of computation efficiency, problem settings, techniques and application scenarios.|sl:arxiv_firstAuthor|Hongyun Cai -huggingface/transformers|skos:broader|Hugging Face -TheWebConf 2018|skos:broader|J'y étais -Relational Databases and the Semantic Web|skos:broader|Database -Manipulations génétiques|skos:broader|Biotechnologies Biotechnologies -Tombouctou|skos:broader|Mali -Deep Learning: A Critical Appraisal Although deep learning has historical roots going back decades, neither the term deep learning nor the approach was popular just over five years ago, when the field was reignited by papers such as Krizhevsky, Sutskever and Hinton's now classic (2012) deep network model of Imagenet. What has the field discovered in the five subsequent years? Against a background of considerable progress in areas such as speech recognition, image recognition, and game playing, and considerable enthusiasm in the popular press, I present ten concerns for deep learning, and suggest that deep learning must be supplemented by other techniques if we are to reach artificial general intelligence.|sl:tag|tag:ia_limites -HBase™|skos:broader|Big Data -Nasca|skos:broader|Pérou -Antifascisme|skos:broader|Fascisme -SOAP vs REST|skos:broader|Web services : critique -Embeddings|skos:broader|Machine learning: techniques -NoSQL and eventual consistency|skos:broader|NOSQL -Référentiel des opérations|skos:broader|SW in Technical Automotive Documentation -RDF forms|skos:broader|Forms -Tasmanian devil|skos:broader|Espèces menacées -Hierarchical temporal memory|skos:broader|Machine learning: techniques -Overfitting/Generalization|skos:broader|Machine learning: problems -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|sl:tag|tag:semantic_folding -Extend SVMs with the aim of max-margin classification while ensuring that there are as few unlabelled observations near the margin as possible |skos:broader|Reasoning from observed, specific (training) cases to specific (test) cases. In contrast, induction is reasoning from observed training cases to general rules, which are then applied to the test cases -DAO attack|skos:broader|Ethereum -This specification defines a merge of SPARQL and XQuery, and has the potential to bring XML and RDF closer together. XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. |skos:broader|Backed by the flexibility of the RDF data model, and consisting of both a query language and data access protocol SPARQL has the potential to become a key component in Web 2.0 applications. SPARQL could provide a common query language for all Web 2.0 applications. -Yamana|skos:broader|Native americans -Artificial Intelligence|skos:broader|Intelligence -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:tag|tag:natural_language_generation -Archéologie africaine|skos:broader|Archéologie -Talis|skos:broader|Semantic web company -RESTful Web Services|skos:broader|Web Services -OKFN Datahub|skos:broader|Data management platform -Paul Miller|skos:broader|SW guys (and girls) -Paléontologie|skos:broader|Science -iphoto|skos:broader|OS X app -JSONP|skos:broader|JSON -J'ai un petit problème avec mon ordinateur|skos:broader|Computers -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:transfer_learning_in_nlp -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:embeddings -Models of consciousness|skos:broader|Conscience -Rio de Janeiro|skos:broader|Ville -Numérisation des œuvres indisponibles|skos:broader|Bibliothèque numérique -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_author|Canwen Xu -Principal component analysis|skos:broader|Feature learning -Large Memory Layers with Product Keys a structured memory which can be easily integrated into a neural network. The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! [Implementation](/doc/2019/08/product_key_memory_pkm_minima) This paper introduces a structured memory which can be easily integrated into a neural network. The memory is very large by design and significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on product keys, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. This memory layer allows us to tackle very large scale language modeling tasks. In our experiments we consider a dataset with up to 30 billion words, and we plug our memory layer in a state-of-the-art transformer-based architecture. In particular, we found that a memory augmented model with only 12 layers outperforms a baseline transformer model with 24 layers, while being twice faster at inference time. We release our code for reproducibility purposes.|sl:arxiv_firstAuthor|Guillaume Lample -Continual Lifelong Learning with Neural Networks: A Review Humans and animals have the ability to continually acquire, fine-tune, and transfer knowledge and skills throughout their lifespan. This ability, referred to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms that together contribute to the development and specialization of our sensorimotor skills as well as to long-term memory consolidation and retrieval. Consequently, lifelong learning capabilities are crucial for autonomous agents interacting in the real world and processing continuous streams of information. However, lifelong learning remains a long-standing challenge for machine learning and neural network models since the continual acquisition of incrementally available information from non-stationary data distributions generally leads to catastrophic forgetting or interference. This limitation represents a major drawback for state-of-the-art deep neural network models that typically learn representations from stationary batches of training data, thus without accounting for situations in which information becomes incrementally available over time. In this review, we critically summarize the main challenges linked to lifelong learning for artificial learning systems and compare existing neural network approaches that alleviate, to different extents, catastrophic forgetting. We discuss well-established and emerging research motivated by lifelong learning factors in biological systems such as structural plasticity, memory replay, curriculum and transfer learning, intrinsic motivation, and multisensory integration.|sl:tag|tag:arxiv_doc -Histoire du Niger|skos:broader|Niger -URI Synonymity|skos:broader|URI -Gautier Poupeau|skos:broader|SW guys (and girls) -Brinxmat|skos:broader|SW guys (and girls) -Mer|skos:broader|Géographie -Cost of Linked Data|skos:broader|Linked Data -Feature extraction|skos:broader|Features (Machine Learning) -Délocalisation des services|skos:broader|Délocalisations -Meetup Web Sémantique|skos:broader|Semantic Web -Commission européenne|skos:broader|Institutions européennes -Semantic data|skos:broader|Semantic Web -Swoogle|skos:broader|RDF Data source -USA|skos:broader|Amérique -KGE KG embedding Knowledge graph embedding|skos:broader|Knowledge Graph KG -AI girls and guys|skos:broader|Technical girls and guys -Microsoft|skos:broader|Entreprise -An efficient framework for learning sentence representations Quick Thoughts. Framework for learning sentence representations from unlabelled data. we reformulate the problem of predicting the context in which a sentence appears as a classification problem. In this work we propose a simple and efficient framework for learning sentence representations from unlabelled data. Drawing inspiration from the distributional hypothesis and recent work on learning sentence representations, we reformulate the problem of predicting the context in which a sentence appears as a classification problem. Given a sentence and its context, a classifier distinguishes context sentences from other contrastive sentences based on their vector representations. This allows us to efficiently learn different types of encoding functions, and we show that the model learns high-quality sentence representations. We demonstrate that our sentence representations outperform state-of-the-art unsupervised and supervised representation learning methods on several downstream NLP tasks that involve understanding sentence semantics while achieving an order of magnitude speedup in training time.|sl:tag|tag:nlp_google -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:tag|tag:survey -Hérédité|skos:broader|Genetics Génétique -Chine-Europe|skos:broader|Europe -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:tag|tag:arxiv_doc -Zinder : alimentation en eau|skos:broader|Zinder -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Matthew E. Peters -SW Wiki|skos:broader|Wiki -css|skos:broader|Web dev -OWLED 2007 AND fps|skos:broader|SW in Technical Automotive Documentation -Rap|skos:broader|Musique -Niger|skos:broader|Afrique de l'Ouest -Vidéo Ina.fr|skos:broader|INA -Périodes glacières|skos:broader|Climat -African land grab|skos:broader|Terres agricoles -C2GWeb-JS|skos:broader|C2GWeb RDF -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:tag|tag:arxiv_doc -Election|skos:broader|Démocratie -Zhang Qian|skos:broader|Histoire de la Chine -Word sense / Lexical ambiguity|skos:broader|NLP tasks / problems -Binary classification models with \Uncertain\ predictions Binary classification models which can assign probabilities to categories such as the tissue is 75% likely to be tumorous or the chemical is 25% likely to be toxic are well understood statistically, but their utility as an input to decision making is less well explored. We argue that users need to know which is the most probable outcome, how likely that is to be true and, in addition, whether the model is capable enough to provide an answer. It is the last case, where the potential outcomes of the model explicitly include don't know that is addressed in this paper. Including this outcome would better separate those predictions that can lead directly to a decision from those where more data is needed. Where models produce an Uncertain answer similar to a human reply of don't know or 50:50 in the examples we refer to earlier, this would translate to actions such as operate on tumour or remove compound from use where the models give a more true than not answer. Where the models judge the result Uncertain the practical decision might be carry out more detailed laboratory testing of compound or commission new tissue analyses. The paper presents several examples where we first analyse the effect of its introduction, then present a methodology for separating Uncertain from binary predictions and finally, we provide arguments for its use in practice.|sl:arxiv_firstAuthor|Damjan Krstajic -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:nlp_facebook -PhD Thesis|skos:broader|PhD -Chalutage en eaux profondes|skos:broader|Pêche -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_firstAuthor|Mikel Artetxe -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:arxiv_author|Po-Wei Wang -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Armen Aghajanyan -Matière noire|skos:broader|Missing Matter -NLP tasks / problems|skos:broader|NLP -SW at Renault|skos:broader|Semantic Web -Parrot|skos:broader|RIF -ERNIE: Enhanced Language Representation with Informative Entities We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The source code of this paper can be obtained from https://github.com/thunlp/ERNIE.|sl:arxiv_author|Qun Liu -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:arxiv_author|Xuanjing Huang -Docker-Tomcat|skos:broader|Tomcat -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_author|Sergey Ioffe -Chrome|skos:broader|Brouteur -Attention Models in Graphs: A Survey An attention mechanism aids a model by allowing it to focus on the most relevant parts of the input to make decisions Graph-structured data arise naturally in many different application domains. By representing data as graphs, we can capture entities (i.e., nodes) as well as their relationships (i.e., edges) with each other. Many useful insights can be derived from graph-structured data as demonstrated by an ever-growing body of work focused on graph mining. However, in the real-world, graphs can be both large - with many complex patterns - and noisy which can pose a problem for effective graph mining. An effective way to deal with this issue is to incorporate attention into graph mining solutions. An attention mechanism allows a method to focus on task-relevant parts of the graph, helping it to make better decisions. In this work, we conduct a comprehensive and focused survey of the literature on the emerging field of graph attention models. We introduce three intuitive taxonomies to group existing work. These are based on problem setting (type of input and output), the type of attention mechanism used, and the task (e.g., graph classification, link prediction, etc.). We motivate our taxonomies through detailed examples and use each to survey competing approaches from a unique standpoint. Finally, we highlight several challenges in the area and discuss promising directions for future work.|sl:arxiv_firstAuthor|John Boaz Lee -k-means clustering|skos:broader|Machine learning: techniques -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:good -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:tag|tag:google_research -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:tag|tag:arxiv_doc -Regroupement familial et test ADN de filiation|skos:broader|Regroupement familial -Film turc|skos:broader|Film -Uranium|skos:broader|Energie -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:arxiv_author|Aseem Wadhwa -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Mike Lewis -Semantic Web|skos:broader|Internet -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Christopher Ré -Labeled Data|skos:broader|Machine learning: problems -public-linked-json@w3.org|skos:broader|Mailing list -JVisualVM|skos:broader|Java dev -Marie-Jo Pérec|skos:broader|Sportif -Les petites cases|skos:broader|Semantic Web blog -France Inter|skos:broader|Radio -Disparition des abeilles|skos:broader|Insect collapse -Loi sur le voile|skos:broader|France -Solr + RDF|skos:broader|Solr -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|Vidhisha Balachandran -Sônia Braga|skos:broader|Brésil -Amazon Alexa|skos:broader|Amazon -Crimes de l'église catholique|skos:broader|Catholicisme -Java|skos:broader|Programming language -Sani Aboussa|skos:broader|Musique du Niger -Taxi|skos:broader|Automobile -Equitation|skos:broader|Sport -Youtube tutorial|skos:broader|Tutorial -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:sanjeev_arora -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:arxiv_author|Matthew Zeigenfuse -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Anthony Ferritto -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:arxiv_firstAuthor|Quoc V. Le -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:tag|tag:graph_embeddings -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:arxiv_firstAuthor|Charles Sutton -jsFiddle|skos:broader|Web tools -Social bookmarking|skos:broader|Social Content Services -Enigmes de la physique|skos:broader|Physique -Sites du Patrimoine mondial de l'Unesco|skos:broader|UNESCO -Information theory AND Deep Learning|skos:broader|Deep Learning -Shoah|skos:broader|Juifs -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:arxiv_author|Edouard Grave -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:tag|tag:explainable_ai -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Yoshua Bengio -Tom Heath|skos:broader|SW guys (and girls) -La communauté internationale est une garce|skos:broader|Garce -Newton|skos:broader|Grand Homme -Encrypted Media Extensions|skos:broader|DRM in HTML 5 -Portugal|skos:broader|Pays d'Europe -New Africa|skos:broader|Afrique -Wikipedia|skos:broader|Wiki -Caetano Veloso|skos:broader|Musicien -Physique des particules : modèle standard|skos:broader|Physique des particules -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Haidong Shao -Andrew McCallum|skos:broader|NLP girls and guys -Sarraounia Mangou|skos:broader|Histoire du Niger -Online Learning|skos:broader|NTIC -Word-sense disambiguation|skos:broader|Word sense / Lexical ambiguity -Triplet Loss|skos:broader|Siamese networks -RDF Template|skos:broader|RDF -Sentence Similarity|skos:broader|Text Similarity -Fact-checking|skos:broader|Médias -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Yinfei Yang -ELMo|skos:broader|Contextualized word representations -Dev tips|skos:broader|Dev -Sem web demo|skos:broader|Semantic Web -Graphs+Machine Learning|skos:broader|Graph -CRISPR|skos:broader|Gene editing -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_author|Eneko Agirre -techniques that make use of the spectrum (eigenvalues) of the similarity matrix of the data to perform dimensionality reduction before clustering in fewer dimensions.|skos:broader|the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. -Jerma|skos:broader|Niger -Antiwork|skos:broader|Travailler moins -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:tag|tag:hierarchical_memory_networks -Ofir|skos:broader|Ami -Colza transgénique|skos:broader|OGM -Filme brasileiro@pt|skos:broader|Cinéma brésilien -Musée archéologique de Bagdad|skos:broader|Musée -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_firstAuthor|Sanjeev Arora -SIOC|skos:broader|RDF Vocabularies -email classification|skos:broader|Text Classification -Jupyter|skos:broader|Python tools -IBM developerWorks|skos:broader|Dev -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:tag|tag:natural_language_generation -Semantic Web / Web 2.0|skos:broader|Semantic Web -Enceladus|skos:broader|Saturn -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_author|Ruslan Salakhutdinov -Research papers|skos:broader|Recherche -Collaborative ontologie creation|skos:broader|Ontologies -Chatbots|skos:broader|Conversational AI -Antiquité iranienne|skos:broader|Iran -Biodiversité : effondrement|skos:broader|Biodiversité -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_firstAuthor|Weijie Liu -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Guanhua Tian -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:arxiv_author|Terence Parr -Blob|skos:broader|Biologie -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:arxiv_author|Ying Zhang -W3C Working group|skos:broader|W3C -Latent Semantic Analysis|skos:broader|Vector space model -Agriculture africaine|skos:broader|Afrique -C2GWeb|skos:broader|Configuration as Linked Data -RFID|skos:broader|NTIC -RDF data visualization|skos:broader|RDF Tools -Cosmic microwave background|skos:broader|Big bang -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:tag|tag:sebastian_ruder -NSA|skos:broader|Cybersurveillance -Pétrole|skos:broader|Energie -EMNLP 2018|skos:broader|Bruxelles -Musicien|skos:broader|Artiste -Film français|skos:broader|France -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:tag|tag:knowledge_graph_embeddings -Exalead|skos:broader|Search Engines -Patent finding|skos:broader|Patent -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:tag|tag:memory_networks -Culture et sem web|skos:broader|Semantic Web -Ópera do Malandro|skos:broader|Filme brasileiro@pt -Google Research|skos:broader|Google -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Ronghuo Zheng -Text Clustering|skos:broader|Data clustering Cluster analysis -Big data & semantic web|skos:broader|Big Data -Lenka Zdeborová|skos:broader|AI girls and guys -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:arxiv_author|Robin Jia -Rome|skos:broader|Italie -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Inbar Fried -Gouvernement Chirac|skos:broader|Chirac -SQL to RDF mapping|skos:broader|RDF and database -New Horizons|skos:broader|NASA -Cazuza|skos:broader|Brésil -Jean Dujardin|skos:broader|Acteur -sindice|skos:broader|Semantic Web search engine -Film italien|skos:broader|Italie -Pellet|skos:broader|Clark and Parsia -Training Data (NLP)|skos:broader|Training data -BERT|skos:broader|Contextualized word representations -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Laurent Vermue -Online Course Materials|skos:broader|Education -Semantic mashups|skos:broader|Mashups -Jena GRDDL Reader|skos:broader|GRDDL -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:tag|tag:backpropagation -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Yoshua Bengio -Uruguay|skos:broader|Amérique du sud -Text Generation from Knowledge Graphs with Graph Transformers Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.|sl:tag|tag:kg_and_nlp -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:tag|tag:ai_facebook -A voir|skos:broader|Todo list -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:arxiv_author|Boli Chen -Siamese network|skos:broader|ANN NN Artificial neural network -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Marco Gori -GitHub Pages|skos:broader|GitHub -RDFa|skos:broader|HTML Data -Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))|skos:broader|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_author|Alexis Conneau -Sigma.js|skos:broader|Graph visualization -Géologie|skos:broader|Science -Episodic Memory|skos:broader|Mémoire humaine -SKOS editor|skos:broader|SKOS -Poincaré Embeddings|skos:broader|Entity embeddings -Ora Lassila|skos:broader|SW guys (and girls) -Leningrad|skos:broader|Ex URSS URSS -TransE|skos:broader|Knowledge Graph Embeddings -OWLlink Protocol|skos:broader|OWL -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Koray Kavukcuoglu -African languages|skos:broader|Afrique -Newton|skos:broader|Scientifique -N-grams|skos:broader|Language model -Technorati|skos:broader|Tagging -Tony Blair|skos:broader|Homme politique -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:tag|tag:arxiv_doc -Semantic Web: databases|skos:broader|Semantic Web -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:tag|tag:arxiv_doc -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:tag|tag:zero_shot_entity_linking -Modèles économiques|skos:broader|Economie -ExxonMobil|skos:broader|Compagnies pétrolières -Topic embeddings|skos:broader|Embeddings in NLP -Semantic feature extraction|skos:broader|Semantic technology -Leo Sauermann|skos:broader|Semantic Desktop -To see|skos:broader|Todo list -David Cameron|skos:broader|Homme politique -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Minshuo Chen -Sarkozyland|skos:broader|Sarkozy -Apache web server|skos:broader|Web server -Semantic feature extraction|skos:broader|Feature extraction -Clint Eastwood|skos:broader|Réalisateur -Datao|skos:broader|Olivier Rossel -Pape|skos:broader|Eglise catholique -Restful semantic web services|skos:broader|RESTful Web Services -Firefighter|skos:broader|Fire -Voyager|skos:broader|NASA -Grands Singes|skos:broader|Espèces menacées -Craig Venter Institute|skos:broader|Celera ou Craig Venter -Turing|skos:broader|Scientifique -Tahar Ben Jelloun|skos:broader|Maroc -Loi sur le téléchargement|skos:broader|Piratage des œuvres -TopBraid|skos:broader|TopQuadrant -DeepType: Multilingual Entity Linking by Neural Type System Evolution The wealth of structured (e.g. Wikidata) and unstructured data about the world available today presents an incredible opportunity for tomorrow's Artificial Intelligence. So far, integration of these two different modalities is a difficult process, involving many decisions concerning how best to represent the information so that it will be captured or useful, and hand-labeling large amounts of data. DeepType overcomes this challenge by explicitly integrating symbolic information into the reasoning process of a neural network with a type system. First we construct a type system, and second, we use it to constrain the outputs of a neural network to respect the symbolic structure. We achieve this by reformulating the design problem into a mixed integer problem: create a type system and subsequently train a neural network with it. In this reformulation discrete variables select which parent-child relations from an ontology are types within the type system, while continuous variables control a classifier fit to the type system. The original problem cannot be solved exactly, so we propose a 2-step algorithm: 1) heuristic search or stochastic optimization over discrete variables that define a type system informed by an Oracle and a Learnability heuristic, 2) gradient descent to fit classifier parameters. We apply DeepType to the problem of Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC KBP 2010) and find that it outperforms all existing solutions by a wide margin, including approaches that rely on a human-designed type system or recent deep learning-based entity embeddings, while explicitly using symbolic information lets it integrate new entities without retraining.|sl:arxiv_firstAuthor|Jonathan Raiman -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:tag|tag:consciousness_prior -Mac OS X Tip|skos:broader|Tips -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:arxiv_author|Quoc V. Le -RDF Service|skos:broader|RDF -Enswers|skos:broader|Corée du Sud -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:tag|tag:yoshua_bengio -Using Information Content to Evaluate Semantic Similarity in a Taxonomy This paper presents a new measure of semantic similarity in an IS-A taxonomy, based on the notion of information content. Experimental evaluation suggests that the measure performs encouragingly well (a correlation of r = 0.79 with a benchmark set of human similarity judgments, with an upper bound of r = 0.90 for human subjects performing the same task), and significantly better than the traditional edge counting approach (r = 0.66).|sl:tag|tag:taxonomies -OGM|skos:broader|Genetics Génétique -Alexandre Monnin|skos:broader|SW guys (and girls) -Polluted places|skos:broader|Pollution -XSPARQL|skos:broader|SPARQL -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:arxiv_author|Kenton Lee -Topic modelling for humans ; Python framework for fast Vector Space Modelling |skos:broader|A statistical model for discovering the abstract topics that occur in a collection of documents. -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:arxiv_author|Jipeng Qiang -Erich Maria Remarque|skos:broader|Ecrivain -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Slav Petrov -Clustering|skos:broader|Unsupervised machine learning -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:arxiv_author|Bart Dhoedt -Chrétienté|skos:broader|Religion -PAC|skos:broader|Subventions agricoles -Ajax|skos:broader|JavaScript -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:arxiv_doc -Apache on my mac|skos:broader|Apache web server -Verger de Gado à Niamey|skos:broader|Jardin -Bootstrap aggregating (Bagging)|skos:broader|Ensemble learning -Hubble|skos:broader|Exploration spatiale -Ville|skos:broader|Géographie -Coursera: Deep Learning|skos:broader|Deep Learning -Bourbaki|skos:broader|Mathématiques -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:tag|tag:arxiv_doc -Origine de la vie|skos:broader|Science -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:arxiv_author|Vikas Chandra -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:arxiv_author|Liang Yao -ATOM (Text editor)|skos:broader|GitHub project -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_firstAuthor|Luis Lamb -GWT|skos:broader|JavaScript framework -Transition énergétique|skos:broader|Energie -Semanlink dev|skos:broader|Dev -Art d'Afrique|skos:broader|Afrique -Singe|skos:broader|Animal -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:arxiv_firstAuthor|Sachin Kumar -Jeu|skos:broader|Jeux -Nelson Mandela|skos:broader|Grand Homme -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:tag|tag:arxiv_doc -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_author|Tomas Mikolov -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_author|Alexis Conneau -Tombe d'amphipolis|skos:broader|Alexandre le Grand -Piggy Bank|skos:broader|SIMILE -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:arxiv_author|Nigel Collier -Industrie pharmaceutique|skos:broader|industrie -Antoine Bordes|skos:broader|NLP girls and guys -Homme de Florès|skos:broader|Paléontologie humaine -Woody Allen|skos:broader|Cinéma américain -Amazon Mechanical Turk|skos:broader|Amazon -Freie Universität Berlin|skos:broader|Université -Tasmanian devil|skos:broader|Tasmanie -NMT|skos:broader|ANN NN Artificial neural network -Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. [blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) Vector representations and vector space modeling (VSM) play a central role in modern machine learning. We propose a novel approach to `vector similarity searching' over dense semantic representations of words and documents that can be deployed on top of traditional inverted-index-based fulltext engines, taking advantage of their robustness, stability, scalability and ubiquity. We show that this approach allows the indexing and querying of dense vectors in text domains. This opens up exciting avenues for major efficiency gains, along with simpler deployment, scaling and monitoring. The end result is a fast and scalable vector database with a tunable trade-off between vector search performance and quality, backed by a standard fulltext engine such as Elasticsearch. We empirically demonstrate its querying performance and quality by applying this solution to the task of semantic searching over a dense vector representation of the entire English Wikipedia.|sl:arxiv_author|Vít Novotný -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:emnlp_2019 -Solar storm|skos:broader|Soleil -Knowledge Graph Embedding by Relational Rotation in Complex Space|skos:broader|How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. -Deep Learning and the Information Bottleneck Principle Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.|sl:arxiv_author|Noga Zaslavsky -blogmarks|skos:broader|Tagging -Le Pen|skos:broader|Méchant -Paradis fiscaux|skos:broader|Finance -FBI v. Apple|skos:broader|Apple -Histoire des sciences|skos:broader|Science -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:arxiv_author|Chengsheng Mao -Google Knowledge Graph|skos:broader|Google -Product Knowledge Graph|skos:broader|Product description -Sentiment analysis|skos:broader|Data mining -Manaal Faruqui|skos:broader|NLP girls and guys -Grillon|skos:broader|Insecte -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:arxiv_doc -Théorie des cordes|skos:broader|Physique -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:tag|tag:cross_lingual_word_embeddings -Search Engines|skos:broader|Information retrieval -API management|skos:broader|API -Robotic imitation|skos:broader|Robotique -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_author|Sua Sung -sig.ma|skos:broader|Richard Cyganiak -Nature (journal)|skos:broader|Publication scientifique -Alphago|skos:broader|Go (Game) -Eau de Mars|skos:broader|Mars -Technological singularity|skos:broader|Technologie -Human-like AI|skos:broader|Artificial Intelligence -Apple sucks|skos:broader|Apple -Representation learning|skos:broader|Machine learning: problems -Thomas Wolf|skos:broader|NLP girls and guys -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:tag|tag:knowledge_graph_completion -Semantic Text Matching|skos:broader|Text Similarity -limule|skos:broader|Espèces menacées -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|José Emilio Labra Gayo -GNU Octave|skos:broader|Mathématiques -Variational autoencoder (VAE)|skos:broader|Deep latent variable models -Solr|skos:broader|apache.org -Privacy and internet|skos:broader|Internet -Junk DNA|skos:broader|ADN -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:arxiv_author|Dani Yogatama -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Nicole Limtiaco -EDF|skos:broader|Energie -Coursera: R Programming|skos:broader|R -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Nicolas Pinto -SDMX-RDF|skos:broader|Semantic Statistics -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:arxiv_author|Nelson F. Liu -Graph Embeddings|skos:broader|Embeddings -Koskas|skos:broader|KODE -Reboisement|skos:broader|Plantation d'arbres -Afrique francophone|skos:broader|Francophonie -JDBC|skos:broader|Java dev -Microsoft Research|skos:broader|Microsoft -Publicité Internet|skos:broader|Internet -markdown-it|skos:broader|Markown / Javascript -David Beckett|skos:broader|Technical girls and guys -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:tag|tag:word_embedding -Ours polaire|skos:broader|Ours -A Review of Relational Machine Learning for Knowledge Graphs Relational machine learning studies methods for the statistical analysis of relational, or graph-structured, data. In this paper, we provide a review of how such statistical models can be trained on large knowledge graphs, and then used to predict new facts about the world (which is equivalent to predicting new edges in the graph). In particular, we discuss two fundamentally different kinds of statistical relational models, both of which can scale to massive datasets. The first is based on latent feature models such as tensor factorization and multiway neural networks. The second is based on mining observable patterns in the graph. We also show how to combine these latent and observable models to get improved modeling power at decreased computational cost. Finally, we discuss how such statistical models of graphs can be combined with text-based information extraction methods for automatically constructing knowledge graphs from the Web. To this end, we also discuss Google's Knowledge Vault project as an example of such combination.|sl:arxiv_author|Kevin Murphy -Moussa Kaka|skos:broader|Journaliste -Musée archéologique de Bagdad|skos:broader|Irak -TouchGraph|skos:broader|Graph visualization -FBI v. Apple|skos:broader|Vie privée -TripleStore|skos:broader|Relational Databases and the Semantic Web -backplanejs|skos:broader|Unobtrusive JavaScript -Java in python|skos:broader|Java -spaCy|skos:broader|NLP tools -Pedro Almodóvar|skos:broader|Réalisateur -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Ashish Vaswani -photos online|skos:broader|Photo -Graph Editor|skos:broader|Graph -Python 4 Data science|skos:broader|Python -huggingface/transformers|skos:broader|Transformers -Embeddings|skos:broader|Representation learning -spurl|skos:broader|Social bookmarking -Maxent models|skos:broader|Machine learning: techniques -Elda|skos:broader|Epimorphics -Tchernobyl|skos:broader|Ex URSS URSS -Lycée|skos:broader|Education -Film indien|skos:broader|Film -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:arxiv_author|Tao Xiang -Hérodote|skos:broader|Grèce antique -Automotive AND W3C|skos:broader|Automotive and web technologies -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_author|Guillaume Lample -Concept's URI|skos:broader|LD -BLINK|skos:broader|Wikification -Archéologie chinoise|skos:broader|Archéologie -Sani Aboussa|skos:broader|Musicien -Mines d'or|skos:broader|Industrie minière -SVD factorizes the word-context co-occurrence matrix into the product of three matrices UΣV where U and V are orthonormal matrices (i.e. square matrices whose rows and columns are orthogonal unit vectors) and Σ is a diagonal matrix of eigenvalues in decreasing order. |skos:broader|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. -Stemming|skos:broader|NLP tasks / problems -XLNet: Generalized Autoregressive Pretraining for Language Understanding a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.|sl:arxiv_firstAuthor|Zhilin Yang -several representations are proposed to extend word representation for phrases ([Yin and Schütze, 2014](/doc/?uri=http%3A%2F%2Faclweb.org%2Fanthology%2FP14-3006); Yu and Dredze, 2015; Passos et al., 2014). However, they don’t use structured knowledge to derive phrase representations (as said [here](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1607.07956)) [Sabastian Ruder](/tag/sebastian_ruder) in 2017 says the [following](http://ruder.io/word-embeddings-2017/index.html#phrasesandmultiwordexpressions): explicitly modelling phrases has so far not shown significant improvements on downstream tasks that would justify the additional complexity (but hum: what's about NER - in particular if using external knowledge such as lexicons?) |skos:broader|The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity -RDF-OWL documentation tool|skos:broader|Documentation tool -A Survey on Deep Learning for Named Entity Recognition mainly focus on generic NEs in English language Named entity recognition (NER) is the task to identify mentions of rigid designators from text belonging to predefined semantic types such as person, location, organization etc. NER always serves as the foundation for many natural language applications such as question answering, text summarization, and machine translation. Early NER systems got a huge success in achieving good performance with the cost of human engineering in designing domain-specific features and rules. In recent years, deep learning, empowered by continuous real-valued vector representations and semantic composition through nonlinear processing, has been employed in NER systems, yielding stat-of-the-art performance. In this paper, we provide a comprehensive review on existing deep learning techniques for NER. We first introduce NER resources, including tagged NER corpora and off-the-shelf NER tools. Then, we systematically categorize existing works based on a taxonomy along three axes: distributed representations for input, context encoder, and tag decoder. Next, we survey the most representative methods for recent applied techniques of deep learning in new NER problem settings and applications. Finally, we present readers with the challenges faced by NER systems and outline future directions in this area.|sl:tag|tag:arxiv_doc -Kapuscinski|skos:broader|Littérature -GAO|skos:broader|fps ontologies -SHACL|skos:broader|Semantic Web -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:tag|tag:zero_shot_learning -Deep NLP|skos:broader|NLP -Afripedia|skos:broader|Francophonie -Songhaï|skos:broader|Peuples -Reptile|skos:broader|Animal -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:tag|tag:attention_is_all_you_need -PIMO|skos:broader|Personal ontology -Backbone.js|skos:broader|JavaScript librairies -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:siamese_network -Yves Roth|skos:broader|Souvenirs -Celte|skos:broader|Archéologie européenne -Mars Express|skos:broader|esa -SolrCloud|skos:broader|Solr -Peter Patel-Schneider|skos:broader|SW guys (and girls) -Self-Supervised Learning|skos:broader|Machine learning -URI opacity|skos:broader|URI -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:tag|tag:unsupervised_machine_learning -NLP: applications|skos:broader|Natural Language Processing -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:arxiv_author|Jonathan Pilault -Evolution|skos:broader|Histoire de la vie -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Pierre-Alain Muller -The HSIC Bottleneck: Deep Learning without Back-Propagation we show that it is possible to learn classification tasks at near competitive accuracy without backpropagation, by maximizing a surrogate of the mutual information between hidden representations and labels and simultaneously minimizing the mutual dependency between hidden representations and the inputs... the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy can be obtained by freezing the network trained without backpropagation and appending and training a one-layer network using conventional SGD to convert convert the representation to the desired format. The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). Advantages: - The method facilitates parallel processing and requires significantly less operations. - It does not suffer from exploding or vanishing gradients. - It is biologically more plausible than Backpropagation We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for training deep neural networks. The HSIC bottleneck is an alternative to the conventional cross-entropy loss and backpropagation that has a number of distinct advantages. It mitigates exploding and vanishing gradients, resulting in the ability to learn very deep networks without skip connections. There is no requirement for symmetric feedback or update locking. We find that the HSIC bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification comparable to backpropagation with a cross-entropy target, even when the system is not encouraged to make the output resemble the classification labels. Appending a single layer trained with SGD (without backpropagation) to reformat the information further improves performance.|sl:arxiv_author|W. Bastiaan Kleijn -Natural Language Processing with Small Feed-Forward Networks google guys: We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. We show that small and shallow feed-forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget.|sl:arxiv_author|Anton Bakalov -ESWC 2011|skos:broader|ESWC -Semantic browsing|skos:broader|Semanlink related -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:arxiv_author|Jun Zhao -Catastrophic forgetting|skos:broader|Deep Learning -Relations franco-américaines|skos:broader|Relations Europe-USA -Parasitisme|skos:broader|Biology -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:arxiv_author|Sheikh Muhammad Sarwar -FastText|skos:broader|Word embeddings -Jupiter/Europe|skos:broader|Jupiter -NLP: French|skos:broader|NLP -FBI v. Apple|skos:broader|Backdoor -Nokia|skos:broader|Téléphone -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:tag|tag:entity_embeddings -MPAA|skos:broader|Propriété intellectuelle -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:tag|tag:arxiv_doc -Dave Reynolds|skos:broader|SW guys (and girls) -Selective Question Answering under Domain Shift How you can get a QA model to abstain from answering when it doesn’t know the answer. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.|sl:arxiv_author|Amita Kamath -Crimes de l'église catholique|skos:broader|Crime -Topic Modeling over Short Texts|skos:broader|Topic Modeling -A system for rapidly creating training sets with weak supervision The System for Programmatically Building and Managing Training Data|skos:broader|data labeling is usually the bottleneck in developing NLP applications. Pbs of shifting contexts on social networks -[Doc](https://huggingface.co/transformers/)|skos:broader|[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): Attention is all you need. [#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (multi-head self-attention mechanism), without any RNN. -XSL|skos:broader|Dev -Tomcat|skos:broader|Servlet -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:tag|tag:arxiv_doc -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:tag|tag:sequence_modeling_cnn_vs_rnn -jersey/RDF|skos:broader|Semantic Web Services -Memory leak|skos:broader|Dev -Scala|skos:broader|Programming language -Apple CarPlay|skos:broader|Automobile -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:tag|tag:dan_jurafsky -Google ranking|skos:broader|Search Engines -RSS Dev|skos:broader|Dev -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:arxiv_author|Xindong Wu -Langues anciennes|skos:broader|Langues -Liberté, égalité, fraternité|skos:broader|Liberté -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:tag|tag:nlp_text_classification -Transfer learning in NLP|skos:broader|Transfer learning -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Vidur Joshi -Blog|skos:broader|Internet -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:arxiv_author|Yarin Gal -Une suite de matrices symétriques en rapport avec la fonction de Mertens we explore a class of equivalence relations over N from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. In this paper we explore a class of equivalence relations over $\\N^\\ast$ from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem.|sl:tag|tag:jean_paul -Sense2vec|skos:broader|Word sense / Lexical ambiguity -Disco Hyperdata Browser|skos:broader|Chris Bizer -Transfer learning in NLP|skos:broader|NLP tasks / problems -OpenLink Ajax Toolkit (OAT)|skos:broader|OpenLink Software -FranceConnect|skos:broader|France -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:tag|tag:survey -fps @ LDOW 2013|skos:broader|C2GWeb -Crète|skos:broader|Grèce -NLP: introduction|skos:broader|NLP -The Web Ontology for Products and Servicesbr/ OWL Representation of the eCl@ss Classification Standard|skos:broader|An ontology for linking product descriptions and business entities on the Web -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:tag|tag:bertology -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_author|Nicholas FitzGerald -Text Corpora and Lexical Resources|skos:broader|NLP -NLP+Automotive|skos:broader|NLP: use cases -DARPA Grand Challenge|skos:broader|DARPA -LDOW2013|skos:broader|WWW 2013 -Memory Embeddings|skos:broader|Embeddings -Argentine|skos:broader|Amérique latine -The Consciousness Prior consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., consciousness as awareness at a particular time instant: the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. [YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs) A new prior is proposed for learning representations of high-level concepts of the kind we manipulate with language. This prior can be combined with other priors in order to help disentangling abstract factors from each other. It is inspired by cognitive neuroscience theories of consciousness, seen as a bottleneck through which just a few elements, after having been selected by attention from a broader pool, are then broadcast and condition further processing, both in perception and decision-making. The set of recently selected elements one becomes aware of is seen as forming a low-dimensional conscious state. This conscious state is combining the few concepts constituting a conscious thought, i.e., what one is immediately conscious of at a particular moment. We claim that this architectural and information-processing constraint corresponds to assumptions about the joint distribution between high-level concepts. To the extent that these assumptions are generally true (and the form of natural language seems consistent with them), they can form a useful prior for representation learning. A low-dimensional thought or conscious state is analogous to a sentence: it involves only a few variables and yet can make a statement with very high probability of being true. This is consistent with a joint distribution (over high-level concepts) which has the form of a sparse factor graph, i.e., where the dependencies captured by each factor of the factor graph involve only very few variables while creating a strong dip in the overall energy function. The consciousness prior also makes it natural to map conscious states to natural language utterances or to express classical AI knowledge in a form similar to facts and rules, albeit capturing uncertainty as well as efficient search mechanisms implemented by attention mechanisms.|sl:tag|tag:conscience_artificielle -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:tag|tag:nn_symbolic_ai_hybridation -Panthéon (Paris)|skos:broader|Monuments historiques -TheWebConf 2020|skos:broader|TheWebConf -Film policier|skos:broader|Film -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:arxiv_firstAuthor|Ikuya Yamada -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:language_model -Online dictionary|skos:broader|Langues -Okapi BM25|skos:broader|Probabilistic relevance model -Quantum computing|skos:broader|Computers -NLP|skos:broader|Semantic technology -Driverless car|skos:broader|Automobile 2.0 -Eclipse tip|skos:broader|Dev tips -Content industries|skos:broader|Capitalistes -Méthodes agiles|skos:broader|Management -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:arxiv_author|Salvador García -Database to RDF mapping|skos:broader|Database -Rosetta|skos:broader|Comète -Similarity queries|skos:broader|Information retrieval: techniques -R|skos:broader|Data mining tools -Astrophysique|skos:broader|Astronomie -C2GWeb and Product description|skos:broader|C2GWeb: SEO -Okapi BM25|skos:broader|Ranking (information retrieval) -spurl|skos:broader|Tagging -KODE|skos:broader|Radix trees -Giovanni Tummarello|skos:broader|Technical girls and guys -Generative models that can be used to analyze the evolution of (unobserved) topics of a collection of documents over time.br/ extension to Latent Dirichlet Allocation (LDA) that can handle sequential documents|skos:broader|A statistical model for discovering the abstract topics that occur in a collection of documents. -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:tag|tag:arxiv_doc -Visually rich documents|skos:broader|2D-NLP -Self-Attention|skos:broader|Attention mechanism -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:tag|tag:sanjeev_arora -GINCO (Culture)|skos:broader|Culture et sem web -IPython|skos:broader|Jupyter -MongoDB|skos:broader|NOSQL -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:arxiv_author|Scott Blackburn -Verger de Gado à Niamey|skos:broader|Affaires de Gado à Niamey -KGE KG embedding Knowledge graph embedding|skos:broader|embedding -Excel|skos:broader|Spreadsheets -OWL ontology|skos:broader|Ontologies -Afrique du Nord|skos:broader|Afrique -Support vector machine|skos:broader|Statistical classification - bobdc.blog|skos:broader|Technical guys -Capitalisme financier|skos:broader|Finance -Similarity learning|skos:broader|Supervised machine learning -Locality Sensitive Hashing|skos:broader|Similarity queries -Blackbox NLP|skos:broader|Explainable NLP -del.icio.us|skos:broader|Social bookmarking -SOAP vs REST|skos:broader|Web Services -Carte|skos:broader|Géographie -Description Logic|skos:broader|Formal knowledge representation language -IBM|skos:broader|Entreprise -gnizr|skos:broader|Social bookmarking -Biterm Topic Model|skos:broader|Topic Modeling over Short Texts -Microsoft Concept Graph|skos:broader|Knowledge Graphs in NLP -Topic Modeling over Short Texts by Incorporating Word Embeddings New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic Inferring topics from the overwhelming amount of short texts becomes a critical but challenging task for many content analysis tasks, such as content charactering, user interest profiling, and emerging topic detecting. Existing methods such as probabilistic latent semantic analysis (PLSA) and latent Dirichlet allocation (LDA) cannot solve this prob- lem very well since only very limited word co-occurrence information is available in short texts. This paper studies how to incorporate the external word correlation knowledge into short texts to improve the coherence of topic modeling. Based on recent results in word embeddings that learn se- mantically representations for words from a large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo- texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic. The experiments on real-world datasets validate the effectiveness of our model comparing with the state-of-the-art models.|sl:arxiv_firstAuthor|Jipeng Qiang -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Qing Ling -Linked Data API|skos:broader|API -Latent Semantic Analysis|skos:broader|Distributional semantics -LOD mailing list|skos:broader|Linked Data -Elevage porcin|skos:broader|Agriculture -Physique des particules|skos:broader|Physique -IA/ML: domaines d'application|skos:broader|Machine learning -AJAR|skos:broader|RDF -Origines de l'homme|skos:broader|Evolution -W3C|skos:broader|Internet -DeepType: Multilingual Entity Linking by Neural Type System Evolution The wealth of structured (e.g. Wikidata) and unstructured data about the world available today presents an incredible opportunity for tomorrow's Artificial Intelligence. So far, integration of these two different modalities is a difficult process, involving many decisions concerning how best to represent the information so that it will be captured or useful, and hand-labeling large amounts of data. DeepType overcomes this challenge by explicitly integrating symbolic information into the reasoning process of a neural network with a type system. First we construct a type system, and second, we use it to constrain the outputs of a neural network to respect the symbolic structure. We achieve this by reformulating the design problem into a mixed integer problem: create a type system and subsequently train a neural network with it. In this reformulation discrete variables select which parent-child relations from an ontology are types within the type system, while continuous variables control a classifier fit to the type system. The original problem cannot be solved exactly, so we propose a 2-step algorithm: 1) heuristic search or stochastic optimization over discrete variables that define a type system informed by an Oracle and a Learnability heuristic, 2) gradient descent to fit classifier parameters. We apply DeepType to the problem of Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC KBP 2010) and find that it outperforms all existing solutions by a wide margin, including approaches that rely on a human-designed type system or recent deep learning-based entity embeddings, while explicitly using symbolic information lets it integrate new entities without retraining.|sl:arxiv_author|Jonathan Raiman -Named Entity Disambiguation using Deep Learning on Graphs Evaluation of different deep learning techniques to create a context vector from graphs, aimed at high-accuracy NED. (neural approach for entity disambiguation using graphs as background knowledge) We tackle Named Entity Disambiguation (NED) by comparing entities in short sentences with Wikidata graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to NED. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task... [published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) In NED, the system must be able to generate a context for an entity in a text and an entity in a knowledge base, then correctly link the two. Explore whether representing graphs as triplets is more useful than using the full topological information of the graph We tackle \\ac{NED} by comparing entities in short sentences with \\wikidata{} graphs. Creating a context vector from graphs through deep learning is a challenging problem that has never been applied to \\ac{NED}. Our main contribution is to present an experimental study of recent neural techniques, as well as a discussion about which graph features are most important for the disambiguation task. In addition, a new dataset (\\wikidatadisamb{}) is created to allow a clean and scalable evaluation of \\ac{NED} with \\wikidata{} entries, and to be used as a reference in future research. In the end our results show that a \\ac{Bi-LSTM} encoding of the graph triplets performs best, improving upon the baseline models and scoring an \ m{F1} value of $91.6\\%$ on the \\wikidatadisamb{} test set|sl:arxiv_firstAuthor|Alberto Cetoli -Text Classification|skos:broader|Statistical classification -Conditional random fields|skos:broader|Machine learning: techniques -group of related models that are used to produce word embeddings|skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:arxiv_author|Geoffrey Hinton -The Description Length of Deep Learning Models Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. Solomonoff's general theory of inference and the Minimum Description Length principle formalize Occam's razor, and hold that a good model of data is a model that is good at losslessly compressing the data, including the cost of describing the model itself. Deep neural networks might seem to go against this principle given the large number of parameters to be encoded. We demonstrate experimentally the ability of deep neural networks to compress the training data even when accounting for parameter encoding. The compression viewpoint originally motivated the use of variational methods in neural networks. Unexpectedly, we found that these variational methods provide surprisingly poor compression bounds, despite being explicitly built to minimize such bounds. This might explain the relatively poor practical performance of variational methods in deep learning. On the other hand, simple incremental encoding methods yield excellent compression values on deep networks, vindicating Solomonoff's approach.|sl:arxiv_author|Yann Ollivier -RDF data visualization|skos:broader|Data Visualization Tools -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:tag|tag:jean_paul -Workshop|skos:broader|Event -URI encoding|skos:broader|URI -Mafia|skos:broader|Grands problèmes -Torture|skos:broader|Horreur -Terre cuite|skos:broader|Archéologie -Word embedding technique (unsupervised learning algorithm for obtaining vector representations for words) based on factorizing a matrix of word co-occurence statistics (Training is performed on aggregated global word-word co-occurrence statistics from a corpus). Resulting representations showcase interesting linear substructures of the word vector space. |skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -Extinction d'espèces|skos:broader|Grands problèmes -HTTP GET vs POST|skos:broader|Web architecture -Hongkong|skos:broader|China -JCS - Java Caching System|skos:broader|apache.org -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:tag|tag:cluster_analysis -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Jiawei Han -MaxEnt classifier (Multinomial logistic regression)|skos:broader|Maxent models -Don't waste my time|skos:broader|Temps -Latent semantic indexing LSA LSI|skos:broader|Vectorial semantics -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_firstAuthor|Qingyun Wang -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:tag|tag:nlp_hierarchical_text_classification -Knowledge Graphs|skos:broader|Graph -fast.ai|skos:broader|Machine Learning Course -Dimensionality reduction|skos:broader|Machine learning: techniques -Kadhafi|skos:broader|Dictateur -RDF Net API|skos:broader|API -Embeddings in Information Retrieval|skos:broader|Neural Models for Information Retrieval -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |skos:broader|Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. -Catastrophe humanitaire|skos:broader|Horreur -Terre de Feu|skos:broader|Argentine -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:arxiv_author|Anders Søgaard -NOSQL|skos:broader|Database -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:tag|tag:arxiv_doc -Mashups|skos:broader|Web 2.0 -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:tag|tag:arxiv_doc -RapidMiner|skos:broader|Data mining tools -Rébellion touarègue|skos:broader|Touareg -Pre-trained Models for Natural Language Processing: A Survey Recently, the emergence of pre-trained models (PTMs) has brought natural language processing (NLP) to a new era. In this survey, we provide a comprehensive review of PTMs for NLP. We first briefly introduce language representation learning and its research progress. Then we systematically categorize existing PTMs based on a taxonomy with four perspectives. Next, we describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, we outline some potential directions of PTMs for future research. This survey is purposed to be a hands-on guide for understanding, using, and developing PTMs for various NLP tasks.|sl:tag|tag:survey -Mandelbrot|skos:broader|Fractales -Resources-Oriented Web Services|skos:broader|HATEOAS -Concept Bottleneck Models|skos:broader|Machine learning: techniques -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:tag|tag:yoav_goldberg -Littérature russe|skos:broader|Russie -Vue.js|skos:broader|Javascript framework -create.js|skos:broader|Interactive Knowledge Stack -Longformer: The Long-Document Transformer Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.|sl:arxiv_firstAuthor|Iz Beltagy -Natural language generation|skos:broader|NLP tasks / problems -Sphere packing|skos:broader|Grands problèmes mathématiques -Aggregating word embeddings through a mean, max, min. . . function is still one of the most easy and widely used techniques to derive sentence embeddings, often in combination with an MLP or convolutional network (Weston et al. (2014); dos Santos and Gatti (2014); Yin and Schu ̈tze (2015); Collobert et al. (2011)). On one hand, the word order is lost, which can be important in e.g. paraphrase identification. On the other hand, the methods are simple, out-of-the-box and do not require a fixed length input.|skos:broader|[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. ~ Context-predicting models ~ Latent feature representations of words Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. Plongement lexical in French Word embedding of a word: a succinct representation of the distribution of other words around this word. Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. Applications: - search document ranking - boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. -ML and physics|skos:broader|Machine learning -Certificat de nationalité|skos:broader|Administration française -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:arxiv_author|Charlotte Pelletier -Desktop applications|skos:broader|Informatique -R (programming language)|skos:broader|Data analysis -Named Entity Recognition with Extremely Limited Data Named Entity Search (NES) We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. We do not propose this as a replacement for NER, but as something to be used for an ephemeral or contextual class of entity, when it does not make sense to label hundreds or thousands of instances to learn a classifier Traditional information retrieval treats named entity recognition as a pre-indexing corpus annotation task, allowing entity tags to be indexed and used during search. Named entity taggers themselves are typically trained on thousands or tens of thousands of examples labeled by humans. However, there is a long tail of named entities classes, and for these cases, labeled data may be impossible to find or justify financially. We propose exploring named entity recognition as a search task, where the named entity class of interest is a query, and entities of that class are the relevant documents. What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries.|sl:tag|tag:information_retrieval -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:tag|tag:arxiv_doc -Laser|skos:broader|Physique -SparqlPress|skos:broader|WordPress -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:tag|tag:kg_and_nlp -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Max Berrendorf -Semantic Negotiation|skos:broader|Semantic Web -Web 2.0 businesses|skos:broader|Web 2.0 -Etat policier|skos:broader|Société -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Jason Weston -Blogger|skos:broader|Blog -Nissan|skos:broader|Entreprise -Croisade des Albigeois|skos:broader|Croisades -Passage AI|skos:broader|Chatbots -Paradoxe Einstein-Podolsky-Rosen|skos:broader|Mécanique quantique -Vito|skos:broader|Ami -Architecture|skos:broader|Art -Arctique|skos:broader|Régions polaires -Musée archéologique de Bagdad|skos:broader|Mésopotamie -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:tag|tag:constraint_satisfaction_problem -James Hendler|skos:broader|Technical girls and guys -Brexit|skos:broader|Royaume Uni -Bill de hÓra|skos:broader|Technical girls and guys -Coursera|skos:broader|MOOC -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:arxiv_firstAuthor|Jeremy Howard -Guillaume Lample|skos:broader|NLP@Facebook -Dev tips|skos:broader|Tips -Langue française|skos:broader|Langues -Italie|skos:broader|Europe -Loi sur le téléchargement|skos:broader|Le gouvernement Chirac est trop con -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:tag|tag:brad_pitt -Tribunal Pénal International|skos:broader|Institutions internationales -France : dysfonctionnement administratif|skos:broader|France : bureaucratie -DeepType: Multilingual Entity Linking by Neural Type System Evolution The wealth of structured (e.g. Wikidata) and unstructured data about the world available today presents an incredible opportunity for tomorrow's Artificial Intelligence. So far, integration of these two different modalities is a difficult process, involving many decisions concerning how best to represent the information so that it will be captured or useful, and hand-labeling large amounts of data. DeepType overcomes this challenge by explicitly integrating symbolic information into the reasoning process of a neural network with a type system. First we construct a type system, and second, we use it to constrain the outputs of a neural network to respect the symbolic structure. We achieve this by reformulating the design problem into a mixed integer problem: create a type system and subsequently train a neural network with it. In this reformulation discrete variables select which parent-child relations from an ontology are types within the type system, while continuous variables control a classifier fit to the type system. The original problem cannot be solved exactly, so we propose a 2-step algorithm: 1) heuristic search or stochastic optimization over discrete variables that define a type system informed by an Oracle and a Learnability heuristic, 2) gradient descent to fit classifier parameters. We apply DeepType to the problem of Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC KBP 2010) and find that it outperforms all existing solutions by a wide margin, including approaches that rely on a human-designed type system or recent deep learning-based entity embeddings, while explicitly using symbolic information lets it integrate new entities without retraining.|sl:tag|tag:entity_linking -Irlande|skos:broader|Pays d'Europe -Learning Deep Latent Spaces for Multi-Label Classification Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to learn a latent subspace from both feature and label domains for multi-label classification. (several implementations on github) Multi-label classification is a practical yet challenging task in machine learning related fields, since it requires the prediction of more than one label category for each input instance. We propose a novel deep neural networks (DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this task. Aiming at better relating feature and label domain data for improved classification, we uniquely perform joint feature and label embedding by deriving a deep latent space, followed by the introduction of label-correlation sensitive loss function for recovering the predicted label outputs. Our C2AE is achieved by integrating the DNN architectures of canonical correlation analysis and autoencoder, which allows end-to-end learning and prediction with the ability to exploit label dependency. Moreover, our C2AE can be easily extended to address the learning problem with missing labels. Our experiments on multiple datasets with different scales confirm the effectiveness and robustness of our proposed method, which is shown to perform favorably against state-of-the-art methods for multi-label classification.|sl:arxiv_author|Wei-Chieh Wu -Orange (data mining)|skos:broader|Python -formalism of information retrieval useful to derive functions that rank matching documents according to their relevance to a given search query.|skos:broader|Finding items that are similar to a given query is the core aspect of search and retrieval systems, as well as of recommendation engines. -Epimorphics json-rdf|skos:broader|RDF-in-JSON -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Chengqi Zhang -Mac OS X Tip|skos:broader|Mac OS X -React.js|skos:broader|Javascript framework -Fungal infections|skos:broader|Champignon -ML|skos:broader|Data analysis -No Fuss Distance Metric Learning using Proxies We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... Traditionnaly, supervision is expressed in the form of sets of points that follow an ordinal relationship – an anchor point x is similar to a set of positive points Y , and dissimilar to a set of negative points Z, and a loss defined over these distances is minimized. Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). We propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): Proxy based triplet learning: instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. Near the conclusion: Our formulation of Proxy-NCA loss produces a loss very similar to the standard cross-entropy loss used in classification. However, we arrive at our formulation from a different direction: we are not interested in the actual classifier and indeed discard the proxies once the model has been trained. Instead, the proxies are auxiliary variables, enabling more effective optimization of the embedding model parameters. As such, our formulation not only enables us to surpass the state of the art in zero-shot learning, but also offers an explanation to the effectiveness of the standard trick of training a classifier, and using its penultimate layer’s output as the embedding. We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity. Traditionally, for this problem supervision is expressed in the form of sets of points that follow an ordinal relationship -- an anchor point $x$ is similar to a set of positive points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined over these distances is minimized. While the specifics of the optimization differ, in this work we collectively call this type of supervision Triplets and all methods that follow this pattern Triplet-Based methods. These methods are challenging to optimize. A main issue is the need for finding informative triplets, which is usually achieved by a variety of tricks such as increasing the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, the convergence rate of such methods is slow. In this paper we propose to optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. This proxy-based loss is empirically better behaved. As a result, the proxy-loss improves on state-of-art results for three standard zero-shot learning datasets, by up to 15% points, while converging three times as fast as other triplet-based losses.|sl:arxiv_author|Thomas K. Leung -Memristor|skos:broader|Bio inspired computing devices -Reformer: The Efficient Transformer Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O($L^2$) to O($L\\log L$), where $L$ is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of $N$ times, where $N$ is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.|sl:arxiv_firstAuthor|Nikita Kitaev -African origin of modern humans|skos:broader|Origines de l'homme -Eclipse tip|skos:broader|Eclipse -Empires d'Afrique de l'Ouest|skos:broader|Histoire de l'Afrique -Sarkozy|skos:broader|Homme politique -Gradient descent|skos:broader|Algorithmes -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_firstAuthor|Andrew L. Beam -IBM SPSS Text Analytics for Surveys|skos:broader|IBM -Wordnet|skos:broader|Princeton -RDF and SOA|skos:broader|RDF -Christine Golbreich|skos:broader|SW guys (and girls) -Web|skos:broader|Internet -Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images Deep neural networks (DNNs) have recently been achieving state-of-the-art performance on a variety of pattern-recognition tasks, most notably visual classification problems. Given that DNNs are now able to classify objects in images with near-human-level performance, questions naturally arise as to what differences remain between computer and human vision. A recent study revealed that changing an image (e.g. of a lion) in a way imperceptible to humans can cause a DNN to label the image as something else entirely (e.g. mislabeling a lion a library). Here we show a related result: it is easy to produce images that are completely unrecognizable to humans, but that state-of-the-art DNNs believe to be recognizable objects with 99.99% confidence (e.g. labeling with certainty that white noise static is a lion). Specifically, we take convolutional neural networks trained to perform well on either the ImageNet or MNIST datasets and then find images with evolutionary algorithms or gradient ascent that DNNs label with high confidence as belonging to each dataset class. It is possible to produce images totally unrecognizable to human eyes that DNNs believe with near certainty are familiar objects, which we call fooling images (more generally, fooling examples). Our results shed light on interesting differences between human vision and current DNNs, and raise questions about the generality of DNN computer vision.|sl:tag|tag:arxiv_doc -Amanuensis: The Programmer's Apprentice The use of natural language to facilitate communication between the expert programmer and apprentice AI system. an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving. This document provides an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems that leverage and extend the state of the art in machine learning by integrating human and machine intelligence. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. Over time these savants learn cognitive strategies (domain-relevant problem solving skills) and develop intuitions (heuristics and the experience necessary for applying them) by learning from their expert associates. By doing so these savants elevate their innate analytical skills allowing them to partner on an equal footing as versatile collaborators - effectively serving as cognitive extensions and digital prostheses, thereby amplifying and emulating their human partner's conceptually-flexible thinking patterns and enabling improved access to and control over powerful computing resources.|sl:arxiv_author|Nate Gruver -sig.ma|skos:broader|DERI -SIMILE Exhibit|skos:broader|JavaScript -DSSM (Deep Semantic Similarity Model)|skos:broader|Embeddings in Information Retrieval -Shanghai Expo 2010|skos:broader|Shanghaï -Neurones|skos:broader|Brain -Combining knowledge graphs|skos:broader|Knowledge Graphs -Neural Models for Information Retrieval|skos:broader|Information retrieval -TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network how to add a set of new concepts to an existing taxonomy. [Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) we study the taxonomy expansion task: given an existing taxonomy and a set of new emerging concepts, we aim to automatically expand the taxonomy to incorporate these new concepts (without changing the existing relations in the given taxonomy). To the best of our knowledge, this is the first study on how to expand an existing directed acyclic graph (as we model a taxonomy as a DAG) using self-supervised learning. Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. 2 techniques: 1. a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ego network of concepts (potential “siblings” and “grand parents” of the query concept). Regular GNNs fail to distinguish nodes with different relative positions to the query (i.e., some nodes are grand parents of the query while the others are siblings of the query). To address this limitation, we present a simple but effective enhancement to inject such position information into GNNs using position embedding. We show that such embedding can be easily integrated with existing GNN architectures (e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the prediction performance Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) Instead of predicting whether each individual ⟨query concept, anchor concept⟩ pair is positive or not, we first group all pairs sharing the same query concept into a single training instance and learn a model to select the positive pair among other negative ones from the group. (Hum, ça me rappelle quelque chose) assume each concept (in existing taxonomy + set of new concepts) has an initial embedding vector learned from some text associated with this concept. To keep things tractable, only attempts to find a single parent node of each new concept. Taxonomies consist of machine-interpretable semantics and provide valuable knowledge for many web applications. For example, online retailers (e.g., Amazon and eBay) use taxonomies for product recommendation, and web search engines (e.g., Google and Bing) leverage taxonomies to enhance query understanding. Enormous efforts have been made on constructing taxonomies either manually or semi-automatically. However, with the fast-growing volume of web content, existing taxonomies will become outdated and fail to capture emerging knowledge. Therefore, in many applications, dynamic expansions of an existing taxonomy are in great demand. In this paper, we study how to expand an existing taxonomy by adding a set of new concepts. We propose a novel self-supervised framework, named TaxoExpan, which automatically generates a set of query concept, anchor concept pairs from the existing taxonomy as training data. Using such self-supervision data, TaxoExpan learns a model to predict whether a query concept is the direct hyponym of an anchor concept. We develop two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural network that encodes the local structure of an anchor concept in the existing taxonomy, and (2) a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. Extensive experiments on three large-scale datasets from different domains demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy expansion.|sl:arxiv_author|Chi Wang -Calais (jungle)|skos:broader|Immigration -Coursera: Web Intelligence and Big Data|skos:broader|Big Data -Coursera: A History of the World since 1300|skos:broader|Histoire du monde -Transposon|skos:broader|Genetics Génétique -Link to me|skos:broader|fps -Semantic Web Client Library|skos:broader|GRDDL -Neural coding|skos:broader|Neuroscience -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:tag|tag:arxiv_doc -ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. an unsupervised neural network which is trained to reconstruct a given input from its latent representation (Bengio, 2009). Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an encoding linear transform, while auto-encoders learn an encoding program). |skos:broader|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. -Biodiversity|skos:broader|Biologie -Continual Learning|skos:broader|Machine learning: techniques -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:tag|tag:ruslan_salakhutdinov -Virtuoso: review|skos:broader|Virtuoso -Semanlink|skos:broader|fps -Digital Video|skos:broader|Technologie -Jeffrey T. Pollock|skos:broader|Technical girls and guys -Cliqz|skos:broader|Brouteur -Constraints in the SW|skos:broader|Semantic Web -Paradise Papers|skos:broader|Leaks -Synonym URIs|skos:broader|URI -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:tag|tag:arxiv_doc -Volkswagate|skos:broader|Volkswagen -Conscience artificielle|skos:broader|Conscience -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:arxiv_author|Ludovic Denoyer -Euro Crisis|skos:broader|Crise financière -Andrew McCallum|skos:broader|AI girls and guys -JUnit|skos:broader|Unit test -DARPA|skos:broader|Armée américaine -Semantic Overflow|skos:broader|Q&A -Musique en ligne|skos:broader|Musique -Apple Developer Connection|skos:broader|Apple -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:arxiv_author|Łukasz Kaiser -Marchands d'arme|skos:broader|Industrie de l'armement -Martin Hepp|skos:broader|SW guys (and girls) -Exponential Organizations|skos:broader|Technological singularity -Keyword/keyphrase extraction|skos:broader|Phrases (NLP) -Social bookmarking|skos:broader|Social Networks -Jacqueline de Romilly|skos:broader|Grèce antique -Docker-Python|skos:broader|Python -TheWebConf 2019|skos:broader|TheWebConf -Linear classifier|skos:broader|Supervised machine learning -Eau de Mars|skos:broader|Eau extraterrestre -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_author|Sandeep Subramanian -Luis Buñuel|skos:broader|Réalisateur -gnizr|skos:broader|Semanlink related -Inductive bias|skos:broader|Bias -rdfQuery|skos:broader|Javascript RDF -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Heng Ji -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Manaal Faruqui -Google ranking|skos:broader|Google -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:tag|tag:arxiv_doc -Boosting|skos:broader|Ensemble learning -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:arxiv_author|Carlos Guestrin -Basic|skos:broader|Programming language -N3|skos:broader|RDF -BlackboxNLP (2018 workshop)|skos:broader|Workshop -Apache Marmotta|skos:broader|LDP: implementations -Histoire anglaise|skos:broader|Royaume Uni -GoodRelations/Renault|skos:broader|GoodRelations -How can we use knowledge graph in computing? A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. Generally, each entity is represented as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. transE, a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity embedding encodes both local and global connectivity patterns of the original graph. |skos:broader|The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity -Text: dimension reduction|skos:broader|Dimensionality reduction -folksonomies ontologies|skos:broader|Tagging -Truffe|skos:broader|Gastronomie -Knowledge Graph Conference 2019|skos:broader|Enterprise Knowledge Graph -NLP: pretraining|skos:broader|NLP techniques -Eclipse Juno|skos:broader|Eclipse -URIBurner.com|skos:broader|OpenLink Software -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:memory_in_deep_learning -Postman|skos:broader|REST -Business Intelligence and Semantic Web|skos:broader|Business intelligence -National Geographic|skos:broader|Géographie -Siri|skos:broader|Voice AI -Cloud and Linked Data|skos:broader|Cloud and Linked Data -Delip Rao|skos:broader|NLP girls and guys -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_firstAuthor|Zonghan Wu -Darfour|skos:broader|Catastrophe humanitaire -Jersey Cache-Control|skos:broader|jersey -SDB: A SPARQL Database for Jena|skos:broader|SQL -Tag ontology|skos:broader|Ontologies -Computational Neuroscience|skos:broader|Neuroscience -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:tag|tag:knowledge_graph_embeddings -Maïs|skos:broader|Agriculture -Matières premières|skos:broader|Economie -Barnaby Jack|skos:broader|Hackers -summly|skos:broader|Text Summarization -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:arxiv_author|Sachin Kumar -Relation Extraction|skos:broader|Knowledge Graph Completion -Meta Content Framework|skos:broader|Apple -LODr|skos:broader|Alexandre Passant -Ethereum|skos:broader|Blockchain -Chinafrique|skos:broader|China -Astrophysique|skos:broader|Cosmologie -Fact verification|skos:broader|NLP tasks / problems -m2eclipse|skos:broader|Eclipse -Locality Sensitive Hashing|skos:broader|Big Data -NG4J|skos:broader|Chris Bizer -Compagnies pétrolières|skos:broader|Entreprise -Labeled Data|skos:broader|Training data -Node Embeddings|skos:broader|Graph Embeddings -Semantic mashups|skos:broader|Semantic Web : Application -Mission Villani sur l'IA|skos:broader|Politique économique française -Knowledge Graph + Deep Learning|skos:broader|Domain Knowledge + Deep Learning -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:tag|tag:arxiv_doc -Probabilistic relevance model|skos:broader|Information retrieval: techniques -NLP tools|skos:broader|NLP -JavaOne|skos:broader|Conférences -Random forest|skos:broader|Machine learning: techniques -Nikolai Vavilov|skos:broader|Scientifique -Regroupement familial et test ADN de filiation|skos:broader|Ca craint -Unifying distillation and privileged information A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) Distillation (Hinton et al., 2015) and privileged information (Vapnik & Izmailov, 2015) are two techniques that enable machines to learn from other machines. This paper unifies these two techniques into generalized distillation, a framework to learn from multiple machines and data representations. We provide theoretical and causal insight about the inner workings of generalized distillation, extend it to unsupervised, semisupervised and multitask learning scenarios, and illustrate its efficacy on a variety of numerical simulations on both synthetic and real-world data.|sl:arxiv_author|Bernhard Schölkopf -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:tag|tag:arxiv_doc -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:arxiv_firstAuthor|Li-Qiang Niu -Multilingual embeddings|skos:broader|Embeddings in NLP -nbdev.fast.ai|skos:broader|Jupyter -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:tag|tag:survey -Zapata|skos:broader|Mexique -Number of neurons|skos:broader|Brain -Apple Java|skos:broader|Apple -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:arxiv_author|Peng Zhou -RDF database|skos:broader|RDF and database -Akhênaton|skos:broader|Pharaon -Text: dimension reduction|skos:broader|NLP techniques -Dan Brickley|skos:broader|Technical girls and guys -Hubble|skos:broader|NASA -Relational inductive biases|skos:broader|Inductive bias -Diable de Tasmanie|skos:broader|Endangered Species -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Lin Pan -Tortures américaines|skos:broader|USA -Receiver operating characteristic. Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied. Plotting the true positive rate (TPR: recall) against the false positive rate (FPR: fall-out or probability of false alarm ) at various threshold settings. |skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -Thesaurus|skos:broader|Thesaurus & Taxonomies -Antiquité de l'Inde|skos:broader|Antiquité -Energie sombre|skos:broader|Physique -Jena|skos:broader|RDF Framework -Wikification|skos:broader|Entity linking -1ere guerre mondiale|skos:broader|Histoire du XXe siècle -Doc2Vec|skos:broader|Document embeddings -Knowledge Graphs|skos:broader|Knowledge -Google Structured Data Testing Tool|skos:broader|Google: SEO -KG-BERT: BERT for Knowledge Graph Completion Pre-trained language models for knowledge graph completion. Triples are treated as textual sequences. (Hum, j'ai déjà vu ça quelque part) Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification, link prediction and relation prediction tasks.|sl:arxiv_firstAuthor|Liang Yao -Sarkozy|skos:broader|Politique française -Tahar Ben Jelloun|skos:broader|Ecrivain -PRISM|skos:broader|NSA -Passage AI|skos:broader|AI: startups -Jardinage|skos:broader|Jardin -N-ary Relations on the Semantic Web|skos:broader|Semantic Web -Metagenomics|skos:broader|Biodiversité -node.js|skos:broader|JavaScript -Roméo Dallaire|skos:broader|ONU -Pillage du palais d'été|skos:broader|Pékin -SPIN functions|skos:broader|TopBraid/SPIN -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:arxiv_author|Weizhu Chen -Californie|skos:broader|USA -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Luciano Serafini -Javascript RDF|skos:broader|RDF -Semantic Web, Semantic Me.|skos:broader|Hi, I am http://www.semanlink.net/tag/fps -Learned Index Structures|skos:broader|Machine learning: techniques -SVM|skos:broader|Kernel trick Kernel method -Empire colonial français|skos:broader|Colonisation -Pará|skos:broader|Amazonie -Zitgist|skos:broader|Semantic Web Services -Yahoo!|skos:broader|Entreprise -Mladic|skos:broader|Méchant -Perceptron|skos:broader|Supervised machine learning -Gore Vidal|skos:broader|Romancier -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Niki Parmar -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:tag|tag:named_entity_recognition -Knowledge-driven embeddings|skos:broader|Knowledge bases -LinkTo Semanlink|skos:broader|Link to me -Belo Horizonte|skos:broader|Brésil -Economies d'énergie|skos:broader|Energie -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:tag|tag:sequence_labeling -Belém|skos:broader|Pará -John Pereira|skos:broader|SW guys (and girls) -Mona Lisa|skos:broader|Peinture -Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation An embedding method specifically designed for NED that jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.|sl:tag|tag:combining_word_and_entity_embeddings -Mur de Berlin|skos:broader|Histoire -Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. Compared with long texts, topic discovery from short texts has the following three challenges: - only very limited word co-occurrence information is available, - the frequency of words plays a less discriminative role, - and the limited contexts make it more dicult to identify the senses of ambiguous words |skos:broader|A statistical model for discovering the abstract topics that occur in a collection of documents. -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:arxiv_author|Katia Sycara -PlaneteAfrique|skos:broader|Afrique -Film fantastique|skos:broader|Film -Pixelwise dense prediction|skos:broader|Computer vision -OwlSight|skos:broader|Google Web Toolkit -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:tag|tag:lexical_ambiguity -RDFa 1.1|skos:broader|RDFa -Érythrée|skos:broader|Afrique de l'Est -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:tag|tag:bertology -Firefox extension|skos:broader|Firefox -Medical Information Search|skos:broader|Medical IR, ML, IA -Music of Africa|skos:broader|Musique -[Notes](/doc/2020/01/Semanlink%20dev%20notes.md)|skos:broader|Semantic Web, Semantic Me. -Shanghai Expo 2010|skos:broader|Exposition universelle -JSONP|skos:broader|Ajax -Lula|skos:broader|Chef d'état -Decision tree learning|skos:broader|Supervised machine learning -The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives [blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We focus on the Transformers for our analysis as they have been shown effective on various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and how this process depends on the choice of learning objective. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.|sl:tag|tag:arxiv_doc -Ofir|skos:broader|Amazonie -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:tag|tag:extreme_classification -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:arxiv_author|Michael Karlen -Metropolitan Museum of Art|skos:broader|New York -Multi-task learning|skos:broader|Machine learning: techniques -Tchernobyl|skos:broader|Catastrophe industrielle -Zotero|skos:broader|Firefox extension -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:tag|tag:named_entity_recognition -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:arxiv_firstAuthor|Yifan Liu -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|sl:tag|tag:machine_learning -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:tag|tag:sentence_embeddings -\Why Should I Trust You?\: Explaining the Predictions of Any Classifier technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally around the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.|sl:arxiv_author|Sameer Singh -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:tag|tag:arxiv_doc -Chanson|skos:broader|Musique -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_firstAuthor|Mike Lewis -Génétique humaine|skos:broader|Génome -RDA|skos:broader|Allemagne -Feature selection|skos:broader|Machine learning: techniques -Banque Centrale Européenne|skos:broader|Institutions européennes -ML/NLP blog|skos:broader|NLP -Championnats du monde à Paris-Saint Denis, 2003|skos:broader|Championnat du monde d'athlétisme -Apache Shiro|skos:broader|Java dev -Iran|skos:broader|Asie -Python-NLP|skos:broader|Python -Apache Hive|skos:broader|Hadoop -Mars/Curiosity|skos:broader|Exploration marsienne -Corent|skos:broader|Auvergne -Google Visualization API|skos:broader|JavaScript -Robert McLiam Wilson|skos:broader|Irlande du Nord -Youtube tutorial|skos:broader|YouTube video -Wikification|skos:broader|Wikipedia -Ant|skos:broader|Dev tools -Délocalisations|skos:broader|Mondialisation -Fast.ai course|skos:broader|fast.ai -Deep Mutual Learning In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. [critic here](doc:2020/06/1804_03235_large_scale_distri): Zhang et al. (2017) reported a benefit in quality over basic distillation, but they compare distilling model M1 into model M2 with training model M1 and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 and M2 into model M3. ... we can achieve the 70.7% they report for online distillation using traditional offline distillation. Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, that is better suited to low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy where, rather than one way transfer between a static pre-defined teacher and a student, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on CIFAR-100 recognition and Market-1501 person re-identification benchmarks. Surprisingly, it is revealed that no prior powerful teacher network is necessary -- mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.|sl:tag|tag:arxiv_doc -Bidirectional LSTM-CRF Models for Sequence Tagging In this paper, we propose a variety of Long Short-Term Memory (LSTM) based models for sequence tagging. These models include LSTM networks, bidirectional LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer (LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model can efficiently use both past and future input features thanks to a bidirectional LSTM component. It can also use sentence level tag information thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or close to) accuracy on POS, chunking and NER data sets. In addition, it is robust and has less dependence on word embedding as compared to previous observations.|sl:arxiv_author|Kai Yu -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Sebastian Neumaier -Bertrand Sajus|skos:broader|SW guys (and girls) -AQMI|skos:broader|Sahel -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:tag|tag:deep_learning -Pfizer|skos:broader|Industrie pharmaceutique -Peter Chilson|skos:broader|Peace Corps -Web Application Threats|skos:broader|Web dev -Bibliothéconomie|skos:broader|Bibliothèque -IPv6|skos:broader|Internet -Prix Nobel|skos:broader|Science -Union européenne|skos:broader|Europe -Google Web Toolkit|skos:broader|Google -Statistical classification|skos:broader|Machine learning: problems -SKOS|skos:broader|Thesaurus & Taxonomies -Deep pre-training in NLP|skos:broader|Language Modeling Statistical Language Model -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Hua Wu -Tandja|skos:broader|Chef d'état -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_author|Yonghui Wu -Talis platform|skos:broader|Semantic Web Dev -Text in KG embeddings|skos:broader|Entity embeddings -Mission \Voulet-Chanoine\|skos:broader|Empire colonial français -Folksonomy|skos:broader|Tagging -Evaluation of sentence embeddings in downstream and linguistic probing tasks a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. Despite the fast developmental pace of new sentence embedding methods, it is still challenging to find comprehensive evaluations of these different techniques. In the past years, we saw significant improvements in the field of sentence embeddings and especially towards the development of universal sentence encoders that could provide inductive transfer to a wide variety of downstream tasks. In this work, we perform a comprehensive evaluation of recent methods using a wide variety of downstream and linguistic feature probing tasks. We show that a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets. We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks.|sl:arxiv_firstAuthor|Christian S. Perone -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:arxiv_author|Omer Levy -HAL|skos:broader|RESTful Web Services -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:arxiv_author|Hanning Zhou -Poutine|skos:broader|Méchant -Krakatoa|skos:broader|Volcan -Tim Berners-Lee|skos:broader|Technical girls and guys -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|Ruslan Salakhutdinov -Topic embeddings|skos:broader|Embeddings -Learning Semantic Similarity for Very Short Texts In order to pair short text fragments—as a concatenation of separate words—an adequate distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that combines the strength of dense distributed representations— as opposed to sparse term matching—with the strength of tf-idf based methods. The combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments. Levering data on social media, such as Twitter and Facebook, requires information retrieval algorithms to become able to relate very short text fragments to each other. Traditional text similarity methods such as tf-idf cosine-similarity, based on word overlap, mostly fail to produce good results in this case, since word overlap is little or non-existent. Recently, distributed word representations, or word embeddings, have been shown to successfully allow words to match on the semantic level. In order to pair short text fragments - as a concatenation of separate words - an adequate distributed sentence representation is needed, in existing literature often obtained by naively combining the individual word representations. We therefore investigated several text representations as a combination of word embeddings in the context of semantic pair matching. This paper investigates the effectiveness of several such naive techniques, as well as traditional tf-idf similarity, for fragments of different lengths. Our main contribution is a first step towards a hybrid method that combines the strength of dense distributed representations - as opposed to sparse term matching - with the strength of tf-idf based methods to automatically reduce the impact of less informative terms. Our new approach outperforms the existing techniques in a toy experimental set-up, leading to the conclusion that the combination of word embeddings and tf-idf information might lead to a better model for semantic content within very short text fragments.|sl:arxiv_author|Cedric De Boom -URIBurner.com|skos:broader|Linked Data -Java concurrency|skos:broader|Java -Maven-Eclipse on My mac|skos:broader|Maven -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Boliang Zhang -NLP 4 Requirements Engineering|skos:broader|NLP: use cases -An Introduction to Conditional Random Fields Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields.|sl:arxiv_author|Charles Sutton -Representation Learning with Contrastive Predictive Coding a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video... While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.|sl:tag|tag:representation_learning -Industrie textile|skos:broader|Economie -Gaz de schiste|skos:broader|Energies fossiles \non conventionnelles\ -Japonais|skos:broader|Langues vivantes -PAC|skos:broader|Union européenne -XSLT|skos:broader|XSL -Industrie minière|skos:broader|industrie -Learning Sparse, Distributed Representations using the Hebbian Principle The \fire together, wire together\ Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning The fire together, wire together Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning (AHL). We illustrate the distributed nature of the learned representations via output entropy computations for synthetic data, and demonstrate superior performance, compared to standard alternatives such as autoencoders, in training a deep convolutional net on standard image datasets.|sl:tag|tag:hebbian_theory -EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text) Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. The presence of a concept is decided from an excerpt i.e. a small sequence of consecutive words in the text. Relevant concepts for the prediction task at hand are automatically defined by our model, avoiding the need for concept-level annotations. To ease interpretability, we enforce that for each concept, the corresponding excerpts share similar semantics and are differentiable from each others. We experimentally demonstrate the relevance of our approach on text classification and multi-sentiment analysis tasks.|sl:tag|tag:explainable_nlp -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:attention_is_all_you_need -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_author|Meng Liu -Périodes glacières|skos:broader|Géologie -Emerging Cross-lingual Structure in Pretrained Language Models We study the problem of multilingual masked language modeling, i.e. the training of a single model on concatenated text from multiple languages, and present a detailed study of several factors that influence why these models are so effective for cross-lingual transfer. We show, contrary to what was previously hypothesized, that transfer is possible even when there is no shared vocabulary across the monolingual corpora and also when the text comes from very different domains. The only requirement is that there are some shared parameters in the top layers of the multi-lingual encoder. To better understand this result, we also show that representations from independently trained models in different languages can be aligned post-hoc quite effectively, strongly suggesting that, much like for non-contextual word embeddings, there are universal latent symmetries in the learned embedding spaces. For multilingual masked language modeling, these symmetries seem to be automatically discovered and aligned during the joint training process.|sl:arxiv_author|Veselin Stoyanov -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_author|Mike Schuster -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_author|Jeffrey Dean -Crime contre l'Humanité|skos:broader|Crime -Towards Understanding Linear Word Analogies A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.|sl:tag|tag:word_embedding -Relational Databases and the Semantic Web|skos:broader|Semantic Web -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:tag|tag:conditional_random_field -Leonardo da Vinci|skos:broader|Renaissance -Symmetric matrices related to the Mertens function In this paper we explore a family of congruences over N from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. In this paper we explore a family of congruences over $\\N^\\ast$ from which one builds a sequence of symmetric matrices related to the Mertens function. From the results of numerical experiments, we formulate a conjecture about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may come to play a more important role in this classical and difficult problem.|sl:tag|tag:hypothese_de_riemann -SourceForge|skos:broader|Open Source -Knowledge Graph Embeddings|skos:broader|Graph Embeddings -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:these_irit_renault_biblio -IBM Watson|skos:broader|IBM -Mémoire humaine|skos:broader|Mémoire -In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. Methods (non exhaustive list): - Dividing the original multi-label classification problem into multiple independent binary classification tasks - computationally expensive - cannot identify the correlation between label information - Label embedding based approaches (deriving a latent label space with reduced dimensionality) - correlation between the labels can be implicitly exploited eg. replace the final softmax layer with a Sigmoid layer and use Binary Cross Entropy loss function to optimize the model.|skos:broader|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. -Dynamic Object Model Pattern|skos:broader|Programming -Courtadon|skos:broader|Sculpture -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:arxiv_firstAuthor|Xiaodong Liu -Biohackers|skos:broader|OGM -OpenLink Software|skos:broader|Semantic web company -Déforestation|skos:broader|Forêt -Exploring the Limits of Language Modeling recent advances in Recurrent Neural Networks for large scale Language Modeling In this work we explore recent advances in Recurrent Neural Networks for large scale Language Modeling, a task central to language understanding. We extend current models to deal with two key challenges present in this task: corpora and vocabulary sizes, and complex, long term structure of language. We perform an exhaustive study on techniques such as character Convolutional Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. Our best single model significantly improves state-of-the-art perplexity from 51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), while an ensemble of models sets a new record by improving perplexity from 41.0 down to 23.7. We also release these models for the NLP and ML community to study and improve upon.|sl:arxiv_author|Noam Shazeer -Amazonie|skos:broader|Brésil -Machine learning: techniques|skos:broader|Machine learning -Kernel methods|skos:broader|Algorithmes -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:tag|tag:combining_knowledge_graphs -Christopher Olah|skos:broader|AI girls and guys -Messenger|skos:broader|Mercure (Planète) -FaceNet: A Unified Embedding for Face Recognition and Clustering Learns a Euclidean embedding per image Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. state-of-the-art face recognition performance using only 128-bytes per face. Despite significant recent advances in the field of face recognition, implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure of face similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings as feature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-art face recognition performance using only 128-bytes per face. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result by 30% on both datasets. We also introduce the concept of harmonic embeddings, and a harmonic triplet loss, which describe different versions of face embeddings (produced by different networks) that are compatible to each other and allow for direct comparison between each other.|sl:arxiv_author|Florian Schroff -nbdev.fast.ai|skos:broader|Python tools -Architecture en terre|skos:broader|Architecture -AutoML-Zero: Evolving Machine Learning Algorithms From Scratch Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. Can evolution be the “Master Algorithm”? ;) Machine learning research has advanced in multiple aspects, including model structures and learning methods. The effort to automate such research, known as AutoML, has also made significant progress. However, this progress has largely focused on the architecture of neural networks, where it has relied on sophisticated expert-designed layers as building blocks---or similarly restrictive search spaces. Our goal is to show that AutoML can go further: it is possible today to automatically discover complete machine learning algorithms just using basic mathematical operations as building blocks. We demonstrate this by introducing a novel framework that significantly reduces human bias through a generic search space. Despite the vastness of this space, evolutionary search can still discover two-layer neural networks trained by backpropagation. These simple neural networks can then be surpassed by evolving directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques emerge in the top algorithms, such as bilinear interactions, normalized gradients, and weight averaging. Moreover, evolution adapts algorithms to different task types: e.g., dropout-like techniques appear when little data is available. We believe these preliminary successes in discovering machine learning algorithms from scratch indicate a promising new direction for the field.|sl:arxiv_author|Chen Liang -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:arxiv_doc -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:arxiv_author|Artur d'Avila Garcez -On Extractive and Abstractive Neural Document Summarization with Transformer Language Models Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. We present a method to produce abstractive summaries of long documents that exceed several thousand words via neural abstractive summarization. We perform a simple extractive step before generating a summary, which is then used to condition the transformer language model on relevant information before being tasked with generating a summary. We show that this extractive step significantly improves summarization results. We also show that this approach produces more abstractive summaries compared to prior work that employs a copy mechanism while still achieving higher rouge scores. Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper.|sl:tag|tag:automatic_summarization -ANN: introduction|skos:broader|Introduction -RDF graph versioning|skos:broader|RDF dev -OSEMA/DERI-Renault paper|skos:broader|Configuration and SW -Zemanta|skos:broader|Semantic tagging -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:arxiv_firstAuthor|Bhaskar Mitra -Reinhard Mey|skos:broader|Chanson -Loosely formatted text|skos:broader|NLP tasks / problems -Magnétisme|skos:broader|Physique -Lagos|skos:broader|Nigeria -iOS|skos:broader|Apple -Luis Buñuel|skos:broader|Espagne -Probabilités|skos:broader|Mathématiques -Linked Learning 2012|skos:broader|Linked Learning -Censure et maltraitance animale|skos:broader|Agriculture industrielle -Chrome extension|skos:broader|Chrome -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:tag|tag:okapi_bm25 -Multi-class classification|skos:broader|Statistical classification -Spark (Java web framework)|skos:broader|Java microframeworks -Big data & semantic web|skos:broader|Semantic Web -rplug|skos:broader|C2GWeb -Energie sombre|skos:broader|Masse manquante -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Fengwen Chen -African land grab|skos:broader|Agriculture africaine -Neural Ranking Models with Weak Supervision Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): This is truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25 itself! Despite the impressive improvements achieved by unsupervised deep neural networks in computer vision and NLP tasks, such improvements have not yet been observed in ranking for information retrieval. The reason may be the complexity of the ranking problem, as it is not obvious how to learn from queries and documents when no supervised signal is available. Hence, in this paper, we propose to train a neural ranking model using weak supervision, where labels are obtained automatically without human annotators or any external resources (e.g., click data). To this aim, we use the output of an unsupervised ranking model, such as BM25, as a weak supervision signal. We further train a set of simple yet effective ranking models based on feed-forward neural networks. We study their effectiveness under various learning scenarios (point-wise and pair-wise models) and using different input representations (i.e., from encoding query-document pairs into dense/sparse vectors to using word embedding representation). We train our networks using tens of millions of training instances and evaluate it on two standard collections: a homogeneous news collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). Our experiments indicate that employing proper objective functions and letting the networks to learn the input representation based on weakly supervised data leads to impressive performance, with over 13% and 35% MAP improvements over the BM25 model on the Robust and the ClueWeb collections. Our findings also suggest that supervised neural ranking models can greatly benefit from pre-training on large amounts of weakly labeled data that can be easily obtained from unsupervised IR models.|sl:tag|tag:these_irit_renault_biblio_initiale -Corée|skos:broader|Asie -Web search|skos:broader|Search Engines -Jiroft|skos:broader|Découverte archéologique -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|sl:arxiv_firstAuthor|Osvaldo Simeone -Corse|skos:broader|France -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:arxiv_author|Nick Craswell -Pangloss: Fast Entity Linking in Noisy Text Environments a production system for entity disambiguation on messy tex, based on probabilistic tokenization and context-dependent document embeddings Probabilistic tokenization: uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas) Entity linking is the task of mapping potentially ambiguous terms in text to their constituent entities in a knowledge base like Wikipedia. This is useful for organizing content, extracting structured data from textual documents, and in machine learning relevance applications like semantic search, knowledge graph construction, and question answering. Traditionally, this work has focused on text that has been well-formed, like news articles, but in common real world datasets such as messaging, resumes, or short-form social media, non-grammatical, loosely-structured text adds a new dimension to this problem. This paper presents Pangloss, a production system for entity disambiguation on noisy text. Pangloss combines a probabilistic linear-time key phrase identification algorithm with a semantic similarity engine based on context-dependent document embeddings to achieve better than state-of-the-art results (5% in F1) compared to other research or commercially available systems. In addition, Pangloss leverages a local embedded database with a tiered architecture to house its statistics and metadata, which allows rapid disambiguation in streaming contexts and on-device disambiguation in low-memory environments such as mobile phones.|sl:tag|tag:arxiv_doc -Guerre chimique|skos:broader|War -Pfizer|skos:broader|Entreprise -Technological singularity|skos:broader|Anticipation -About Semanlink|skos:broader|Semanlink -Hypothèse de Riemann|skos:broader|Riemann -Social Web|skos:broader|Social Semantic Web -Hayabusa|skos:broader|Astéroïde -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:google_deepmind -New Africa|skos:broader|Favoris -Greffe de tête|skos:broader|Brain -Differentiable Top-k Operator with Optimal Transport if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator ... We apply the proposed operator to the [k-nearest neighbors](tag:k_nearest_neighbors_algorithm) and [beam search](tag:beam_search) algorithms, and demonstrate improved performance The top-k operation, i.e., finding the k largest or smallest elements from a collection of scores, is an important model component, which is widely used in information retrieval, machine learning, and data mining. However, if the top-k operation is implemented in an algorithmic way, e.g., using bubble algorithm, the resulting model cannot be trained in an end-to-end way using prevalent gradient descent algorithms. This is because these implementations typically involve swapping indices, whose gradient cannot be computed. Moreover, the corresponding mapping from the input scores to the indicator vector of whether this element belongs to the top-k set is essentially discontinuous. To address the issue, we propose a smoothed approximation, namely the SOFT (Scalable Optimal transport-based diFferenTiable) top-k operator. Specifically, our SOFT top-k operator approximates the output of the top-k operation as the solution of an Entropic Optimal Transport (EOT) problem. The gradient of the SOFT operator can then be efficiently approximated based on the optimality conditions of EOT problem. We apply the proposed operator to the k-nearest neighbors and beam search algorithms, and demonstrate improved performance.|sl:arxiv_author|Yujia Xie -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:arxiv_author|Stephen H. Bach -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:arxiv_author|Tom Kwiatkowski -Personal-information management|skos:broader|Informatique -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:arxiv_author|Jeffrey Dean -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Steve Yuan -URI dereferencing|skos:broader|Web architecture -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:tag|tag:arxiv_doc -cluster analysis which seeks to build a hierarchy of clusters. 2 kinds: - Agglomerative - Divisive|skos:broader|the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. -Machines teaching machines|skos:broader|Machine learning -Histoire de l'astronomie|skos:broader|Astronomie -Semantic Web P2P|skos:broader|Peer to peer -KODE|skos:broader|Database -SIMILE Timeline|skos:broader|Timeline -Solr and NLP|skos:broader|NLP tools -IHM web|skos:broader|Web dev -TopBraid/SPIN|skos:broader|SPARQL -NER|skos:broader|Entity Analysis -Hierarchical Memory Networks hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory Memory networks are neural networks with an explicit memory component that can be both read and written to by the network. The memory is often addressed in a soft way using a softmax function, making end-to-end training with backpropagation possible. However, this is not computationally scalable for applications which require the network to read from extremely large memories. On the other hand, it is well known that hard attention mechanisms based on reinforcement learning are challenging to train successfully. In this paper, we explore a form of hierarchical memory network, which can be considered as a hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory. Specifically, we propose to incorporate Maximum Inner Product Search (MIPS) in the training and inference procedures for our hierarchical memory network. We explore the use of various state-of-the art approximate MIPS techniques and report results on SimpleQuestions, a challenging large scale factoid question answering task.|sl:arxiv_author|Pascal Vincent -Virtuoso|skos:broader|TripleStore -Word Mover’s Distance|skos:broader|Text Similarity -fps and WWW 2008|skos:broader|WWW 2008 -Resources-Oriented Web Services|skos:broader|Restful semantic web services -RDF bus|skos:broader|RDF -A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments distance metric learning, a branch of machine learning that aims to learn distances from the data Distance metric learning is a branch of machine learning that aims to learn distances from the data. Distance metric learning can be useful to improve similarity learning algorithms, and also has applications in dimensionality reduction. This paper describes the distance metric learning problem and analyzes its main mathematical foundations. In addition, it also discusses some of the most popular distance metric learning techniques used in classification, showing their goals and the required information to understand and use them. Furthermore, some experiments to evaluate the performance of the different algorithms are also provided. Finally, this paper discusses several possibilities of future work in this topic.|sl:arxiv_author|Francisco Herrera -fps notes|skos:broader|fps -RDF bus|skos:broader|Semantic Integration Hub -Niamey|skos:broader|Ville -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:arxiv_firstAuthor|Nikolaos Kolitsas -Pillage du palais d'été|skos:broader|Histoire de la Chine -Virtuoso:doc|skos:broader|Virtuoso -Integrating Tomcat with Apache|skos:broader|Developer documentation -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:open_domain_question_answering -snorql|skos:broader|SPARQL en javascript -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:sentence_similarity -Makolab|skos:broader|Pologne -National Taiwan University|skos:broader|Taiwan -Ópera do Malandro|skos:broader|Chico Buarque -External memory algorithm|skos:broader|Mémoire (informatique) -Tech company|skos:broader|Entreprise -fps AND LDOW2008|skos:broader|SW in Technical Automotive Documentation -Loi sur le voile|skos:broader|Con de Chirac -Visualizing and Measuring the Geometry of BERT At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations Transformer architectures show significant promise for natural language processing. Given that a single pretrained model can be fine-tuned to perform well on many different tasks, these networks appear to extract generally useful linguistic features. A natural question is how such networks represent this information internally. This paper describes qualitative and quantitative investigations of one particularly effective model, BERT. At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations.|sl:tag|tag:arxiv_doc -Agriculture|skos:broader|Grands problèmes -Parameter-free Sentence Embedding via Orthogonal Basis training-free approach for building sentence representations, Geometric Embedding (GEM), based on the geometric structure of word embedding space. we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace [on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) [Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) We propose a simple and robust non-parameterized approach for building sentence representations. Inspired by the Gram-Schmidt Process in geometric theory, we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. We model the semantic meaning of a word in a sentence based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word's novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace. Following this motivation, we develop an innovative method based on orthogonal basis to combine pre-trained word embeddings into sentence representations. This approach requires zero parameters, along with efficient inference performance. We evaluate our approach on 11 downstream NLP tasks. Our model shows superior performance compared with non-parameterized alternatives and it is competitive to other approaches relying on either large amounts of labelled data or prolonged training time.|sl:arxiv_author|Ziyi Yang -Big Data Tools|skos:broader|Tools -Eau extraterrestre|skos:broader|Eau -OpenAI|skos:broader|Artificial Intelligence -hyperSOLutions|skos:broader|My old things -Google car|skos:broader|AI@Google -NLP conference|skos:broader|AI Conference -RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. The success of such a task heavily relies on the ability of modeling and inferring the patterns of (or between) the relations. In this paper, we present a new approach for knowledge graph embedding called RotatE, which is able to model and infer various relation patterns including: symmetry/antisymmetry, inversion, and composition. Specifically, the RotatE model defines each relation as a rotation from the source entity to the target entity in the complex vector space. In addition, we propose a novel self-adversarial negative sampling technique for efficiently and effectively training the RotatE model. Experimental results on multiple benchmark knowledge graphs show that the proposed RotatE model is not only scalable, but also able to infer and model various relation patterns and significantly outperform existing state-of-the-art models for link prediction.|sl:tag|tag:arxiv_doc -jersey|skos:broader|Open Source -Rural India|skos:broader|Inde -Similarity learning|skos:broader|Machine learning: problems -Semantic Web Crawler|skos:broader|Semantic Web : Tools -ISP / Servlet Hosting|skos:broader|ISP -RDF browser|skos:broader|Linked Data -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Lukas Schmelzeisen -Configuration as Linked Data|skos:broader|Configuration -Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. two methods: 1. Category Embedding model): it replaces the entities in the context with their directly labeled categories to build categories’ context; 2. Hierarchical Category Embedding: it further incorporates all ancestor categories of the context entities to utilize the hierarchical information. Due to the lack of structured knowledge applied in learning distributed representation of cate- gories, existing work cannot incorporate category hierarchies into entity information. We propose a framework that embeds entities and categories into a semantic space by integrating structured knowledge and taxonomy hierarchy from large knowledge bases. The framework allows to com- pute meaningful semantic relatedness between entities and categories. Our framework can han- dle both single-word concepts and multiple-word concepts with superior performance on concept categorization and yield state of the art results on dataless hierarchical classification.|sl:tag|tag:entity_embeddings -OWL|skos:broader|Semantic Web Dev -Transformers|skos:broader|Self-Attention -Obama|skos:broader|Président des USA -Polynesians|skos:broader|Peuples -Virtuoso|skos:broader|OpenLink Software -LOD & museum|skos:broader|Musée -Banksy|skos:broader|Peintre -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:arxiv_author|Sainbayar Sukhbaatar -Coursera: NLP class|skos:broader|Coursera -Amérique profonde|skos:broader|USA -Espèces menacées|skos:broader|Grands problèmes -Greenpeace|skos:broader|ONG -GRDDL|skos:broader|Semantic Web -CFPM|skos:broader|Musique du Niger -Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to represent network vertices into a low-dimensional vector space, by preserving both network topology structure and node content information. Algorithms are typically unsupervised and can be broadly classified into three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): - matrix factorization - random walks - deep learning approaches (graph neural networks - GNNs) - graph convolution networks (GraphSage) - graph attention networks, - graph auto-encoders (e.g., DNGR and SDNE) - graph generative networks, - graph spatial-temporal networks. Node embeddings (intuition: similar nodes should have similar vectors). - Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) - LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search - DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) - Node2Vec (2016) (mixed strategy) etc. |skos:broader|The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:arxiv_firstAuthor|Dan Svenstrup -BERT for Joint Intent Classification and Slot Filling Experimental results show that our proposed joint BERT model outperforms BERT models modeling intent classification and slot filling separately, demonstrating the efficacy of exploiting the relationship between the two tasks. Adding a CRF on top of the model doesn't improve the results. Intent classification and slot filling are two essential tasks for natural language understanding. They often suffer from small-scale human-labeled training data, resulting in poor generalization capability, especially for rare words. Recently a new language representation model, BERT (Bidirectional Encoder Representations from Transformers), facilitates pre-training deep bidirectional representations on large-scale unlabeled corpora, and has created state-of-the-art models for a wide variety of natural language processing tasks after simple fine-tuning. However, there has not been much effort on exploring BERT for natural language understanding. In this work, we propose a joint intent classification and slot filling model based on BERT. Experimental results demonstrate that our proposed model achieves significant improvement on intent classification accuracy, slot filling F1, and sentence-level semantic frame accuracy on several public benchmark datasets, compared to the attention-based recurrent neural network models and slot-gated models.|sl:tag|tag:alibaba +http://www.semanlink.net/tag/tomcat_tips|creationTime|2013-01-10T18:13:59Z +http://www.semanlink.net/tag/tomcat_tips|prefLabel|Tomcat tips +http://www.semanlink.net/tag/tomcat_tips|broader|http://www.semanlink.net/tag/tomcat +http://www.semanlink.net/tag/tomcat_tips|broader|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/tomcat_tips|creationDate|2013-01-10 +http://www.semanlink.net/tag/tomcat_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tomcat_tips|uri|http://www.semanlink.net/tag/tomcat_tips +http://www.semanlink.net/tag/tomcat_tips|broader_prefLabel|Tomcat +http://www.semanlink.net/tag/tomcat_tips|broader_prefLabel|Tips +http://www.semanlink.net/tag/memoire_informatique|creationTime|2008-08-29T18:45:29Z +http://www.semanlink.net/tag/memoire_informatique|prefLabel|Mémoire (informatique) +http://www.semanlink.net/tag/memoire_informatique|broader|http://www.semanlink.net/tag/memoire +http://www.semanlink.net/tag/memoire_informatique|creationDate|2008-08-29 +http://www.semanlink.net/tag/memoire_informatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memoire_informatique|uri|http://www.semanlink.net/tag/memoire_informatique +http://www.semanlink.net/tag/memoire_informatique|broader_prefLabel|Mémoire +http://www.semanlink.net/tag/css|prefLabel|css +http://www.semanlink.net/tag/css|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/css|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/css|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/css|uri|http://www.semanlink.net/tag/css +http://www.semanlink.net/tag/css|broader_prefLabel|Web dev +http://www.semanlink.net/tag/css|broader_prefLabel|Dev +http://www.semanlink.net/tag/css|broader_altLabel|Web app dev +http://www.semanlink.net/tag/medicaments|creationTime|2014-04-13T19:38:33Z +http://www.semanlink.net/tag/medicaments|prefLabel|Médicaments +http://www.semanlink.net/tag/medicaments|creationDate|2014-04-13 +http://www.semanlink.net/tag/medicaments|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medicaments|uri|http://www.semanlink.net/tag/medicaments +http://www.semanlink.net/tag/daimler|creationTime|2010-07-30T15:30:45Z +http://www.semanlink.net/tag/daimler|prefLabel|Daimler +http://www.semanlink.net/tag/daimler|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/daimler|creationDate|2010-07-30 +http://www.semanlink.net/tag/daimler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/daimler|uri|http://www.semanlink.net/tag/daimler +http://www.semanlink.net/tag/daimler|broader_prefLabel|Automobile +http://www.semanlink.net/tag/daimler|broader_altLabel|Automotive +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|creationTime|2018-02-11T10:40:42Z +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|prefLabel|Nouvelle Route de la Soie +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|creationDate|2018-02-11 +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|describedBy|https://fr.wikipedia.org/wiki/Nouvelle_route_de_la_soie +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|altLabel|Belt and Road Initiative +http://www.semanlink.net/tag/nouvelle_route_de_la_soie|uri|http://www.semanlink.net/tag/nouvelle_route_de_la_soie +http://www.semanlink.net/tag/mechant|prefLabel|Méchant +http://www.semanlink.net/tag/mechant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mechant|uri|http://www.semanlink.net/tag/mechant +http://www.semanlink.net/tag/peter_chilson|creationTime|2016-08-20T11:54:03Z +http://www.semanlink.net/tag/peter_chilson|prefLabel|Peter Chilson +http://www.semanlink.net/tag/peter_chilson|broader|http://www.semanlink.net/tag/peace_corps +http://www.semanlink.net/tag/peter_chilson|related|http://www.semanlink.net/tag/vito +http://www.semanlink.net/tag/peter_chilson|creationDate|2016-08-20 +http://www.semanlink.net/tag/peter_chilson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peter_chilson|homepage|http://www.peterchilson.com +http://www.semanlink.net/tag/peter_chilson|uri|http://www.semanlink.net/tag/peter_chilson +http://www.semanlink.net/tag/peter_chilson|broader_prefLabel|Peace Corps +http://www.semanlink.net/tag/scala|creationTime|2010-04-27T10:45:51Z +http://www.semanlink.net/tag/scala|prefLabel|Scala +http://www.semanlink.net/tag/scala|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/scala|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/scala|creationDate|2010-04-27 +http://www.semanlink.net/tag/scala|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scala|homepage|http://www.scala-lang.org/ +http://www.semanlink.net/tag/scala|describedBy|https://en.wikipedia.org/wiki/Scala_(programming_language) +http://www.semanlink.net/tag/scala|uri|http://www.semanlink.net/tag/scala +http://www.semanlink.net/tag/scala|broader_prefLabel|Programming language +http://www.semanlink.net/tag/scala|broader_prefLabel|Java +http://www.semanlink.net/tag/scala|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/jsonld_java|creationTime|2016-04-09T18:55:51Z +http://www.semanlink.net/tag/jsonld_java|prefLabel|Jsonld-java +http://www.semanlink.net/tag/jsonld_java|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/jsonld_java|creationDate|2016-04-09 +http://www.semanlink.net/tag/jsonld_java|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsonld_java|describedBy|https://github.com/jsonld-java +http://www.semanlink.net/tag/jsonld_java|uri|http://www.semanlink.net/tag/jsonld_java +http://www.semanlink.net/tag/jsonld_java|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/jsonld_java|broader_altLabel|JSONLD +http://www.semanlink.net/tag/biterm_topic_model|creationTime|2017-06-07T18:41:06Z +http://www.semanlink.net/tag/biterm_topic_model|prefLabel|Biterm Topic Model +http://www.semanlink.net/tag/biterm_topic_model|broader|http://www.semanlink.net/tag/topic_modeling_over_short_texts +http://www.semanlink.net/tag/biterm_topic_model|related|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/biterm_topic_model|creationDate|2017-06-07 +http://www.semanlink.net/tag/biterm_topic_model|comment|"Word co-occurrence based topic model that learns topics by modeling word-word co-occurrences patterns. (In constrast, LDA and PLSA are word-document co-occurrence topic models) + +A biterm consists of two words co-occurring in the same context (short text). BTM models the biterm occurrences in a corpus. + +Conventional topic models exploit word co-occurrence patterns to reveal the latent semantic structure of a corpus in an implicit way by modeling the generation of words in each document. These approaches are sensitive to the shortness of documents since the word co- occurrence patterns in a single short document are sparse and not reliable. Instead, if we aggregate all the word co-occurrence patterns in the corpus, their frequencies are more stable and more clearly reveals the correlation between the words. With this idea, we developed the biterm topic model, which takes a novel way to reveal the latent topic components in a corpus by directly modeling the generation of word co-occurrence patterns." +http://www.semanlink.net/tag/biterm_topic_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biterm_topic_model|uri|http://www.semanlink.net/tag/biterm_topic_model +http://www.semanlink.net/tag/biterm_topic_model|broader_prefLabel|Topic Modeling over Short Texts +http://www.semanlink.net/tag/rupert_westenthaler|creationTime|2012-06-14T15:03:22Z +http://www.semanlink.net/tag/rupert_westenthaler|prefLabel|Rupert Westenthaler +http://www.semanlink.net/tag/rupert_westenthaler|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/rupert_westenthaler|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/rupert_westenthaler|creationDate|2012-06-14 +http://www.semanlink.net/tag/rupert_westenthaler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rupert_westenthaler|describedBy|http://www.iks-project.eu/content/rupert-westenthaler +http://www.semanlink.net/tag/rupert_westenthaler|uri|http://www.semanlink.net/tag/rupert_westenthaler +http://www.semanlink.net/tag/rupert_westenthaler|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/gouvernement_sarkozy|creationTime|2008-10-25T00:31:53Z +http://www.semanlink.net/tag/gouvernement_sarkozy|prefLabel|Gouvernement Sarkozy +http://www.semanlink.net/tag/gouvernement_sarkozy|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/gouvernement_sarkozy|creationDate|2008-10-25 +http://www.semanlink.net/tag/gouvernement_sarkozy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gouvernement_sarkozy|uri|http://www.semanlink.net/tag/gouvernement_sarkozy +http://www.semanlink.net/tag/gouvernement_sarkozy|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/belleme|creationTime|2012-11-18T01:36:49Z +http://www.semanlink.net/tag/belleme|prefLabel|Bellême +http://www.semanlink.net/tag/belleme|creationDate|2012-11-18 +http://www.semanlink.net/tag/belleme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/belleme|uri|http://www.semanlink.net/tag/belleme +http://www.semanlink.net/tag/json_2_json_ld|creationTime|2017-01-05T17:09:08Z +http://www.semanlink.net/tag/json_2_json_ld|prefLabel|JSON 2 JSON-LD +http://www.semanlink.net/tag/json_2_json_ld|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/json_2_json_ld|creationDate|2017-01-05 +http://www.semanlink.net/tag/json_2_json_ld|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json_2_json_ld|uri|http://www.semanlink.net/tag/json_2_json_ld +http://www.semanlink.net/tag/json_2_json_ld|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/json_2_json_ld|broader_altLabel|JSONLD +http://www.semanlink.net/tag/grande_bretagne|prefLabel|Grande-Bretagne +http://www.semanlink.net/tag/grande_bretagne|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/grande_bretagne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grande_bretagne|uri|http://www.semanlink.net/tag/grande_bretagne +http://www.semanlink.net/tag/grande_bretagne|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/grande_bretagne|broader_altLabel|UK +http://www.semanlink.net/tag/birmanie|creationTime|2017-10-20T15:37:44Z +http://www.semanlink.net/tag/birmanie|prefLabel|Birmanie +http://www.semanlink.net/tag/birmanie|creationDate|2017-10-20 +http://www.semanlink.net/tag/birmanie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/birmanie|uri|http://www.semanlink.net/tag/birmanie +http://www.semanlink.net/tag/google|prefLabel|Google +http://www.semanlink.net/tag/google|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/google|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/google|related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/google|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google|uri|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/google|broader_prefLabel|Internet +http://www.semanlink.net/tag/securite|creationTime|2007-07-15T11:23:34Z +http://www.semanlink.net/tag/securite|prefLabel|Sécurité +http://www.semanlink.net/tag/securite|creationDate|2007-07-15 +http://www.semanlink.net/tag/securite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/securite|uri|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/category_embedding|creationTime|2019-08-02T10:45:28Z +http://www.semanlink.net/tag/category_embedding|prefLabel|Category Embedding +http://www.semanlink.net/tag/category_embedding|broader|http://www.semanlink.net/tag/categorical_variables +http://www.semanlink.net/tag/category_embedding|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/category_embedding|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/category_embedding|broader|http://www.semanlink.net/tag/label_embedding +http://www.semanlink.net/tag/category_embedding|creationDate|2019-08-02 +http://www.semanlink.net/tag/category_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/category_embedding|uri|http://www.semanlink.net/tag/category_embedding +http://www.semanlink.net/tag/category_embedding|broader_prefLabel|Categorical Variables +http://www.semanlink.net/tag/category_embedding|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/category_embedding|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/category_embedding|broader_prefLabel|Label Embedding +http://www.semanlink.net/tag/category_embedding|broader_altLabel|embedding +http://www.semanlink.net/tag/category_embedding|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/category_embedding|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/category_embedding|broader_related|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/tag/entity_recommendation|creationTime|2020-04-17T19:18:40Z +http://www.semanlink.net/tag/entity_recommendation|prefLabel|Entity recommendation +http://www.semanlink.net/tag/entity_recommendation|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_recommendation|related|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/tag/entity_recommendation|creationDate|2020-04-17 +http://www.semanlink.net/tag/entity_recommendation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_recommendation|uri|http://www.semanlink.net/tag/entity_recommendation +http://www.semanlink.net/tag/entity_recommendation|broader_prefLabel|Entities +http://www.semanlink.net/tag/iswc|creationTime|2011-11-14T23:00:17Z +http://www.semanlink.net/tag/iswc|prefLabel|ISWC +http://www.semanlink.net/tag/iswc|broader|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/iswc|creationDate|2011-11-14 +http://www.semanlink.net/tag/iswc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iswc|uri|http://www.semanlink.net/tag/iswc +http://www.semanlink.net/tag/iswc|broader_prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/ml_domaines_d_application|creationTime|2019-02-11T00:39:19Z +http://www.semanlink.net/tag/ml_domaines_d_application|prefLabel|IA/ML: domaines d'application +http://www.semanlink.net/tag/ml_domaines_d_application|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/ml_domaines_d_application|creationDate|2019-02-11 +http://www.semanlink.net/tag/ml_domaines_d_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_domaines_d_application|uri|http://www.semanlink.net/tag/ml_domaines_d_application +http://www.semanlink.net/tag/ml_domaines_d_application|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/ml_domaines_d_application|broader_altLabel|ML +http://www.semanlink.net/tag/ml_domaines_d_application|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ml_domaines_d_application|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/phishing|prefLabel|Phishing +http://www.semanlink.net/tag/phishing|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/phishing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phishing|uri|http://www.semanlink.net/tag/phishing +http://www.semanlink.net/tag/phishing|broader_prefLabel|Internet +http://www.semanlink.net/tag/salaire|creationTime|2007-11-21T15:18:17Z +http://www.semanlink.net/tag/salaire|prefLabel|Salaire +http://www.semanlink.net/tag/salaire|creationDate|2007-11-21 +http://www.semanlink.net/tag/salaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/salaire|uri|http://www.semanlink.net/tag/salaire +http://www.semanlink.net/tag/hydra_templated_links|creationTime|2015-02-19T14:03:59Z +http://www.semanlink.net/tag/hydra_templated_links|prefLabel|Hydra/Templated Links +http://www.semanlink.net/tag/hydra_templated_links|broader|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/hydra_templated_links|creationDate|2015-02-19 +http://www.semanlink.net/tag/hydra_templated_links|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hydra_templated_links|uri|http://www.semanlink.net/tag/hydra_templated_links +http://www.semanlink.net/tag/hydra_templated_links|broader_prefLabel|Hydra +http://www.semanlink.net/tag/hydra_templated_links|broader_related|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/overfitting|creationTime|2019-10-11T02:27:13Z +http://www.semanlink.net/tag/overfitting|prefLabel|Overfitting/Generalization +http://www.semanlink.net/tag/overfitting|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/overfitting|creationDate|2019-10-11 +http://www.semanlink.net/tag/overfitting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/overfitting|uri|http://www.semanlink.net/tag/overfitting +http://www.semanlink.net/tag/overfitting|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/rdf_tools|prefLabel|RDF Tools +http://www.semanlink.net/tag/rdf_tools|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_tools|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/rdf_tools|creationDate|2007-01-19 +http://www.semanlink.net/tag/rdf_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_tools|uri|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdf_tools|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_tools|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/rdf_tools|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_tools|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_tools|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_tools|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_tools|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/diacritics|creationTime|2017-06-06T11:29:04Z +http://www.semanlink.net/tag/diacritics|prefLabel|Diacritics +http://www.semanlink.net/tag/diacritics|creationDate|2017-06-06 +http://www.semanlink.net/tag/diacritics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diacritics|uri|http://www.semanlink.net/tag/diacritics +http://www.semanlink.net/tag/gilles_taddei|creationTime|2014-07-20T22:29:24Z +http://www.semanlink.net/tag/gilles_taddei|prefLabel|Gilles Taddei +http://www.semanlink.net/tag/gilles_taddei|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/gilles_taddei|creationDate|2014-07-20 +http://www.semanlink.net/tag/gilles_taddei|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gilles_taddei|uri|http://www.semanlink.net/tag/gilles_taddei +http://www.semanlink.net/tag/gilles_taddei|broader_prefLabel|Ami +http://www.semanlink.net/tag/lava_jato|creationTime|2017-03-26T10:52:51Z +http://www.semanlink.net/tag/lava_jato|prefLabel|Lava-jato +http://www.semanlink.net/tag/lava_jato|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/lava_jato|broader|http://www.semanlink.net/tag/petrobras +http://www.semanlink.net/tag/lava_jato|creationDate|2017-03-26 +http://www.semanlink.net/tag/lava_jato|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lava_jato|uri|http://www.semanlink.net/tag/lava_jato +http://www.semanlink.net/tag/lava_jato|broader_prefLabel|Brésil +http://www.semanlink.net/tag/lava_jato|broader_prefLabel|Petrobras +http://www.semanlink.net/tag/lava_jato|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/c2gweb_js|creationTime|2013-11-22T11:11:13Z +http://www.semanlink.net/tag/c2gweb_js|prefLabel|C2GWeb-JS +http://www.semanlink.net/tag/c2gweb_js|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_js|broader|http://www.semanlink.net/tag/configuration_ontology +http://www.semanlink.net/tag/c2gweb_js|broader|http://www.semanlink.net/tag/c2gweb_rdf +http://www.semanlink.net/tag/c2gweb_js|creationDate|2013-11-22 +http://www.semanlink.net/tag/c2gweb_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_js|homepage|http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html +http://www.semanlink.net/tag/c2gweb_js|uri|http://www.semanlink.net/tag/c2gweb_js +http://www.semanlink.net/tag/c2gweb_js|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/c2gweb_js|broader_prefLabel|Configuration ontology +http://www.semanlink.net/tag/c2gweb_js|broader_prefLabel|C2GWeb RDF +http://www.semanlink.net/tag/latex|creationTime|2011-12-06T23:56:58Z +http://www.semanlink.net/tag/latex|prefLabel|LaTeX +http://www.semanlink.net/tag/latex|creationDate|2011-12-06 +http://www.semanlink.net/tag/latex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/latex|uri|http://www.semanlink.net/tag/latex +http://www.semanlink.net/tag/ruby_on_rails|prefLabel|Ruby on Rails +http://www.semanlink.net/tag/ruby_on_rails|broader|http://www.semanlink.net/tag/ruby +http://www.semanlink.net/tag/ruby_on_rails|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ruby_on_rails|uri|http://www.semanlink.net/tag/ruby_on_rails +http://www.semanlink.net/tag/ruby_on_rails|broader_prefLabel|Ruby +http://www.semanlink.net/tag/film_turc|creationTime|2020-06-13T19:16:47Z +http://www.semanlink.net/tag/film_turc|prefLabel|Film turc +http://www.semanlink.net/tag/film_turc|broader|http://www.semanlink.net/tag/turquie +http://www.semanlink.net/tag/film_turc|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_turc|creationDate|2020-06-13 +http://www.semanlink.net/tag/film_turc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_turc|uri|http://www.semanlink.net/tag/film_turc +http://www.semanlink.net/tag/film_turc|broader_prefLabel|Turquie +http://www.semanlink.net/tag/film_turc|broader_prefLabel|Film +http://www.semanlink.net/tag/ant|creationTime|2007-04-25T22:13:38Z +http://www.semanlink.net/tag/ant|prefLabel|Ant +http://www.semanlink.net/tag/ant|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/ant|creationDate|2007-04-25 +http://www.semanlink.net/tag/ant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ant|homepage|http://ant.apache.org/ +http://www.semanlink.net/tag/ant|uri|http://www.semanlink.net/tag/ant +http://www.semanlink.net/tag/ant|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/toutankhamon|prefLabel|Toutankhamon +http://www.semanlink.net/tag/toutankhamon|broader|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/toutankhamon|broader|http://www.semanlink.net/tag/pharaon +http://www.semanlink.net/tag/toutankhamon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/toutankhamon|uri|http://www.semanlink.net/tag/toutankhamon +http://www.semanlink.net/tag/toutankhamon|broader_prefLabel|Egypte antique +http://www.semanlink.net/tag/toutankhamon|broader_prefLabel|Pharaon +http://www.semanlink.net/tag/liberte_d_expression|prefLabel|Liberté d'expression +http://www.semanlink.net/tag/liberte_d_expression|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberte_d_expression|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte_d_expression|uri|http://www.semanlink.net/tag/liberte_d_expression +http://www.semanlink.net/tag/liberte_d_expression|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberte_d_expression|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/uriqr|creationTime|2007-06-13T23:12:04Z +http://www.semanlink.net/tag/uriqr|prefLabel|Uriqr +http://www.semanlink.net/tag/uriqr|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/uriqr|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/uriqr|broader|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/uriqr|creationDate|2007-06-13 +http://www.semanlink.net/tag/uriqr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uriqr|uri|http://www.semanlink.net/tag/uriqr +http://www.semanlink.net/tag/uriqr|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/uriqr|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/uriqr|broader_prefLabel|Tom Heath +http://www.semanlink.net/tag/uriqr|broader_altLabel|LD +http://www.semanlink.net/tag/uriqr|broader_altLabel|LOD +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/uriqr|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/googleplus|creationTime|2012-01-02T12:06:27Z +http://www.semanlink.net/tag/googleplus|prefLabel|GooglePlus +http://www.semanlink.net/tag/googleplus|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/googleplus|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/googleplus|creationDate|2012-01-02 +http://www.semanlink.net/tag/googleplus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/googleplus|uri|http://www.semanlink.net/tag/googleplus +http://www.semanlink.net/tag/googleplus|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/googleplus|broader_prefLabel|Google +http://www.semanlink.net/tag/googleplus|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/ai_cloud_service|creationTime|2018-05-31T08:29:20Z +http://www.semanlink.net/tag/ai_cloud_service|prefLabel|AI cloud service +http://www.semanlink.net/tag/ai_cloud_service|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_cloud_service|broader|http://www.semanlink.net/tag/cloud +http://www.semanlink.net/tag/ai_cloud_service|creationDate|2018-05-31 +http://www.semanlink.net/tag/ai_cloud_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_cloud_service|uri|http://www.semanlink.net/tag/ai_cloud_service +http://www.semanlink.net/tag/ai_cloud_service|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_cloud_service|broader_prefLabel|Cloud +http://www.semanlink.net/tag/ai_cloud_service|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_cloud_service|broader_altLabel|AI +http://www.semanlink.net/tag/ai_cloud_service|broader_altLabel|IA +http://www.semanlink.net/tag/ai_cloud_service|broader_altLabel|Cloud computing +http://www.semanlink.net/tag/ai_cloud_service|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/javascript_promises|creationTime|2015-03-05T10:54:50Z +http://www.semanlink.net/tag/javascript_promises|prefLabel|JavaScript Promises +http://www.semanlink.net/tag/javascript_promises|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_promises|creationDate|2015-03-05 +http://www.semanlink.net/tag/javascript_promises|comment|The callback approach is called an “inversion of control”. A function that accepts a callback instead of a return value is saying, “Don’t call me, I’ll call you.”. Promises un-invert the inversion, cleanly separating the input arguments from control flow arguments. This simplifies the use and creation of API’s +http://www.semanlink.net/tag/javascript_promises|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_promises|uri|http://www.semanlink.net/tag/javascript_promises +http://www.semanlink.net/tag/javascript_promises|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_promises|broader_altLabel|js +http://www.semanlink.net/tag/mbilia_bel|creationTime|2008-12-29T22:34:27Z +http://www.semanlink.net/tag/mbilia_bel|prefLabel|Mbilia Bel +http://www.semanlink.net/tag/mbilia_bel|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/mbilia_bel|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/mbilia_bel|broader|http://www.semanlink.net/tag/zaire +http://www.semanlink.net/tag/mbilia_bel|creationDate|2008-12-29 +http://www.semanlink.net/tag/mbilia_bel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mbilia_bel|type|http://purl.org/ontology/mo/MusicArtist +http://www.semanlink.net/tag/mbilia_bel|uri|http://www.semanlink.net/tag/mbilia_bel +http://www.semanlink.net/tag/mbilia_bel|broader_prefLabel|Musicien +http://www.semanlink.net/tag/mbilia_bel|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/mbilia_bel|broader_prefLabel|Zaïre +http://www.semanlink.net/tag/mbilia_bel|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/mbilia_bel|broader_altLabel|African music +http://www.semanlink.net/tag/youtube_tutorial|creationTime|2015-09-04T22:36:06Z +http://www.semanlink.net/tag/youtube_tutorial|prefLabel|Youtube tutorial +http://www.semanlink.net/tag/youtube_tutorial|broader|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/tag/youtube_tutorial|broader|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/tag/youtube_tutorial|creationDate|2015-09-04 +http://www.semanlink.net/tag/youtube_tutorial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/youtube_tutorial|uri|http://www.semanlink.net/tag/youtube_tutorial +http://www.semanlink.net/tag/youtube_tutorial|broader_prefLabel|YouTube video +http://www.semanlink.net/tag/youtube_tutorial|broader_prefLabel|Tutorial +http://www.semanlink.net/tag/david_ricardo|creationTime|2017-08-10T00:03:41Z +http://www.semanlink.net/tag/david_ricardo|prefLabel|David Ricardo +http://www.semanlink.net/tag/david_ricardo|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/david_ricardo|broader|http://www.semanlink.net/tag/economiste +http://www.semanlink.net/tag/david_ricardo|creationDate|2017-08-10 +http://www.semanlink.net/tag/david_ricardo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/david_ricardo|describedBy|https://en.wikipedia.org/wiki/David_Ricardo +http://www.semanlink.net/tag/david_ricardo|uri|http://www.semanlink.net/tag/david_ricardo +http://www.semanlink.net/tag/david_ricardo|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/david_ricardo|broader_prefLabel|Economiste +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|creationTime|2020-02-13T23:58:13Z +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|prefLabel|ElasticSearch: nearest neighbor(s) +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|creationDate|2020-02-13 +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|uri|http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader_prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader_prefLabel|ElasticSearch +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader_altLabel|Similarity search +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s|broader_related|http://www.semanlink.net/tag/lucene +http://www.semanlink.net/tag/random_forest|creationTime|2017-06-19T11:17:07Z +http://www.semanlink.net/tag/random_forest|prefLabel|Random forest +http://www.semanlink.net/tag/random_forest|broader|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/random_forest|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/random_forest|related|http://www.semanlink.net/tag/decision_tree_learning +http://www.semanlink.net/tag/random_forest|related|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +http://www.semanlink.net/tag/random_forest|creationDate|2017-06-19 +http://www.semanlink.net/tag/random_forest|comment|A single decision tree is a highly non-linear classifier with typically low bias but high variance. Random forests address the problem of high variance by establishing a committee (i.e. average) of identically distributed single decision trees. +http://www.semanlink.net/tag/random_forest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/random_forest|describedBy|https://en.wikipedia.org/wiki/Random_forest +http://www.semanlink.net/tag/random_forest|uri|http://www.semanlink.net/tag/random_forest +http://www.semanlink.net/tag/random_forest|broader_prefLabel|Ensemble learning +http://www.semanlink.net/tag/random_forest|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/guerres_puniques|prefLabel|Guerres puniques +http://www.semanlink.net/tag/guerres_puniques|broader|http://www.semanlink.net/tag/antiquite_romaine +http://www.semanlink.net/tag/guerres_puniques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerres_puniques|uri|http://www.semanlink.net/tag/guerres_puniques +http://www.semanlink.net/tag/guerres_puniques|broader_prefLabel|Antiquité romaine +http://www.semanlink.net/tag/offshore_leaks|creationTime|2013-04-04T22:58:17Z +http://www.semanlink.net/tag/offshore_leaks|prefLabel|Offshore leaks +http://www.semanlink.net/tag/offshore_leaks|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/offshore_leaks|broader|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/tag/offshore_leaks|creationDate|2013-04-04 +http://www.semanlink.net/tag/offshore_leaks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/offshore_leaks|uri|http://www.semanlink.net/tag/offshore_leaks +http://www.semanlink.net/tag/offshore_leaks|broader_prefLabel|Leaks +http://www.semanlink.net/tag/offshore_leaks|broader_prefLabel|Paradis fiscaux +http://www.semanlink.net/tag/offshore_leaks|broader_altLabel|Tax Haven +http://www.semanlink.net/tag/offshore_leaks|broader_altLabel|Paradis fiscal +http://www.semanlink.net/tag/offshore_leaks|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/offshore_leaks|broader_related|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/c2gweb|creationTime|2012-03-29T18:17:31Z +http://www.semanlink.net/tag/c2gweb|prefLabel|C2GWeb +http://www.semanlink.net/tag/c2gweb|broader|http://www.semanlink.net/tag/c2g +http://www.semanlink.net/tag/c2gweb|broader|http://www.semanlink.net/tag/sw_at_renault +http://www.semanlink.net/tag/c2gweb|broader|http://www.semanlink.net/tag/configuration_as_linked_data +http://www.semanlink.net/tag/c2gweb|creationDate|2012-03-29 +http://www.semanlink.net/tag/c2gweb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb|uri|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb|broader_prefLabel|C2G +http://www.semanlink.net/tag/c2gweb|broader_prefLabel|SW at Renault +http://www.semanlink.net/tag/c2gweb|broader_prefLabel|Configuration as Linked Data +http://www.semanlink.net/tag/c2gweb|broader_altLabel|Semantic Web@Renault +http://www.semanlink.net/tag/how_much_information_in_a_language|creationTime|2019-10-11T00:33:20Z +http://www.semanlink.net/tag/how_much_information_in_a_language|prefLabel|How much information in a language? +http://www.semanlink.net/tag/how_much_information_in_a_language|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/how_much_information_in_a_language|related|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/how_much_information_in_a_language|creationDate|2019-10-11 +http://www.semanlink.net/tag/how_much_information_in_a_language|comment|Réfléchir à mettre en relation avec la taille des modèles genre BERT - et du coup avec ce [papier](/doc/2019/09/_1909_04120_span_selection_pre) +http://www.semanlink.net/tag/how_much_information_in_a_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/how_much_information_in_a_language|uri|http://www.semanlink.net/tag/how_much_information_in_a_language +http://www.semanlink.net/tag/how_much_information_in_a_language|broader_prefLabel|Language +http://www.semanlink.net/tag/how_much_information_in_a_language|broader_altLabel|Langage +http://www.semanlink.net/tag/anne_hidalgo|creationTime|2020-11-16T12:10:57Z +http://www.semanlink.net/tag/anne_hidalgo|prefLabel|Anne Hidalgo +http://www.semanlink.net/tag/anne_hidalgo|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/anne_hidalgo|related|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/anne_hidalgo|creationDate|2020-11-16 +http://www.semanlink.net/tag/anne_hidalgo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anne_hidalgo|uri|http://www.semanlink.net/tag/anne_hidalgo +http://www.semanlink.net/tag/anne_hidalgo|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/apache_org|prefLabel|apache.org +http://www.semanlink.net/tag/apache_org|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/apache_org|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/apache_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_org|uri|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_org|broader_prefLabel|Software +http://www.semanlink.net/tag/apache_org|broader_prefLabel|Open Source +http://www.semanlink.net/tag/fichage_genetique|creationTime|2007-05-07T11:42:43Z +http://www.semanlink.net/tag/fichage_genetique|prefLabel|Fichage génétique +http://www.semanlink.net/tag/fichage_genetique|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/fichage_genetique|broader|http://www.semanlink.net/tag/fichage +http://www.semanlink.net/tag/fichage_genetique|broader|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/fichage_genetique|creationDate|2007-05-07 +http://www.semanlink.net/tag/fichage_genetique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fichage_genetique|uri|http://www.semanlink.net/tag/fichage_genetique +http://www.semanlink.net/tag/fichage_genetique|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/fichage_genetique|broader_prefLabel|Fichage +http://www.semanlink.net/tag/fichage_genetique|broader_prefLabel|Etat policier +http://www.semanlink.net/tag/fichage_genetique|broader_related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/shanghai_expo_2010|creationTime|2010-05-24T22:28:58Z +http://www.semanlink.net/tag/shanghai_expo_2010|prefLabel|Shanghai Expo 2010 +http://www.semanlink.net/tag/shanghai_expo_2010|broader|http://www.semanlink.net/tag/shanghai +http://www.semanlink.net/tag/shanghai_expo_2010|broader|http://www.semanlink.net/tag/exposition_universelle +http://www.semanlink.net/tag/shanghai_expo_2010|creationDate|2010-05-24 +http://www.semanlink.net/tag/shanghai_expo_2010|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shanghai_expo_2010|uri|http://www.semanlink.net/tag/shanghai_expo_2010 +http://www.semanlink.net/tag/shanghai_expo_2010|broader_prefLabel|Shanghaï +http://www.semanlink.net/tag/shanghai_expo_2010|broader_prefLabel|Exposition universelle +http://www.semanlink.net/tag/cryptography|creationTime|2014-02-03T22:31:57Z +http://www.semanlink.net/tag/cryptography|prefLabel|Cryptography +http://www.semanlink.net/tag/cryptography|broader|http://www.semanlink.net/tag/encryption +http://www.semanlink.net/tag/cryptography|creationDate|2014-02-03 +http://www.semanlink.net/tag/cryptography|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cryptography|uri|http://www.semanlink.net/tag/cryptography +http://www.semanlink.net/tag/cryptography|broader_prefLabel|Encryption +http://www.semanlink.net/tag/cryptography|broader_altLabel|Cryptage +http://www.semanlink.net/tag/cryptography|broader_related|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/tag/mobile_device|creationTime|2013-05-25T15:01:27Z +http://www.semanlink.net/tag/mobile_device|prefLabel|Mobile device +http://www.semanlink.net/tag/mobile_device|creationDate|2013-05-25 +http://www.semanlink.net/tag/mobile_device|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_device|uri|http://www.semanlink.net/tag/mobile_device +http://www.semanlink.net/tag/dystopia|creationTime|2014-12-15T09:52:56Z +http://www.semanlink.net/tag/dystopia|prefLabel|Dystopia +http://www.semanlink.net/tag/dystopia|creationDate|2014-12-15 +http://www.semanlink.net/tag/dystopia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dystopia|uri|http://www.semanlink.net/tag/dystopia +http://www.semanlink.net/tag/coling2020|creationTime|2020-10-02T03:14:36Z +http://www.semanlink.net/tag/coling2020|prefLabel|COLING2020 +http://www.semanlink.net/tag/coling2020|broader|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/tag/coling2020|creationDate|2020-10-02 +http://www.semanlink.net/tag/coling2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coling2020|uri|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/coling2020|broader_prefLabel|NLP conference +http://www.semanlink.net/tag/bijan_parsia|creationTime|2007-08-23T23:58:55Z +http://www.semanlink.net/tag/bijan_parsia|prefLabel|Bijan Parsia +http://www.semanlink.net/tag/bijan_parsia|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bijan_parsia|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bijan_parsia|broader|http://www.semanlink.net/tag/clark_and_parsia +http://www.semanlink.net/tag/bijan_parsia|related|http://www.semanlink.net/tag/ian_horrocks +http://www.semanlink.net/tag/bijan_parsia|creationDate|2007-08-23 +http://www.semanlink.net/tag/bijan_parsia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bijan_parsia|uri|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/bijan_parsia|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bijan_parsia|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/bijan_parsia|broader_prefLabel|Clark and Parsia +http://www.semanlink.net/tag/bijan_parsia|broader_altLabel|Technical guys +http://www.semanlink.net/tag/bijan_parsia|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/attention_in_graphs|creationTime|2018-11-14T02:12:13Z +http://www.semanlink.net/tag/attention_in_graphs|prefLabel|Attention in Graphs +http://www.semanlink.net/tag/attention_in_graphs|broader|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/attention_in_graphs|broader|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/attention_in_graphs|related|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/attention_in_graphs|creationDate|2018-11-14 +http://www.semanlink.net/tag/attention_in_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/attention_in_graphs|altLabel|Graph + Transformer +http://www.semanlink.net/tag/attention_in_graphs|uri|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/tag/attention_in_graphs|broader_prefLabel|Attention mechanism +http://www.semanlink.net/tag/attention_in_graphs|broader_prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/attention_in_graphs|broader_altLabel|Graph Machine Learning +http://www.semanlink.net/tag/luis_bunuel|creationTime|2014-03-04T01:47:12Z +http://www.semanlink.net/tag/luis_bunuel|prefLabel|Luis Buñuel +http://www.semanlink.net/tag/luis_bunuel|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/luis_bunuel|broader|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/luis_bunuel|creationDate|2014-03-04 +http://www.semanlink.net/tag/luis_bunuel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/luis_bunuel|describedBy|https://fr.wikipedia.org/wiki/Luis_Bu%C3%B1uel +http://www.semanlink.net/tag/luis_bunuel|uri|http://www.semanlink.net/tag/luis_bunuel +http://www.semanlink.net/tag/luis_bunuel|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/luis_bunuel|broader_prefLabel|Espagne +http://www.semanlink.net/tag/luis_bunuel|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/gorille|prefLabel|Gorille +http://www.semanlink.net/tag/gorille|broader|http://www.semanlink.net/tag/grands_singes +http://www.semanlink.net/tag/gorille|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gorille|uri|http://www.semanlink.net/tag/gorille +http://www.semanlink.net/tag/gorille|broader_prefLabel|Grands Singes +http://www.semanlink.net/tag/gorille|broader_altLabel|Apes +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|creationTime|2016-01-17T23:48:14Z +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|prefLabel|k-nearest neighbors algorithm +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|creationDate|2016-01-17 +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|comment|"non-parametric method used for classification and regression: **take the k nearest neighbors (in the feature space) of a point you want to make a prediction about. For regression, average the values of them ; for classificarion: majority vote.** + +**Instance-based learning**, or lazy learning: **the function is only approximated locally** and all computation is deferred until classification. Makes predictions without having to maintain an abstraction (or model) derived from data. **Among the simplest of all machine learning algorithms.**" +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|describedBy|https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|altLabel|KNN +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|uri|http://www.semanlink.net/tag/k_nearest_neighbors_algorithm +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader_prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader_altLabel|Similarity search +http://www.semanlink.net/tag/k_nearest_neighbors_algorithm|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/machine_learning|creationTime|2011-12-28T13:21:20Z +http://www.semanlink.net/tag/machine_learning|prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/machine_learning|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/machine_learning|related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning|related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/machine_learning|creationDate|2011-12-28 +http://www.semanlink.net/tag/machine_learning|comment|"Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. + +[Glossary (by google)](https://developers.google.com/machine-learning/glossary/)" +http://www.semanlink.net/tag/machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning|describedBy|https://en.wikipedia.org/wiki/Machine_learning +http://www.semanlink.net/tag/machine_learning|altLabel|ML +http://www.semanlink.net/tag/machine_learning|uri|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/machine_learning|broader_prefLabel|Data science +http://www.semanlink.net/tag/machine_learning|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/machine_learning|broader_altLabel|AI +http://www.semanlink.net/tag/machine_learning|broader_altLabel|IA +http://www.semanlink.net/tag/machine_learning|broader_altLabel|Data analysis +http://www.semanlink.net/tag/machine_learning|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/neoliberalism|creationTime|2016-07-12T13:07:17Z +http://www.semanlink.net/tag/neoliberalism|prefLabel|Neoliberalism +http://www.semanlink.net/tag/neoliberalism|creationDate|2016-07-12 +http://www.semanlink.net/tag/neoliberalism|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neoliberalism|uri|http://www.semanlink.net/tag/neoliberalism +http://www.semanlink.net/tag/rideshare|creationTime|2014-04-16T01:09:38Z +http://www.semanlink.net/tag/rideshare|prefLabel|Rideshare +http://www.semanlink.net/tag/rideshare|creationDate|2014-04-16 +http://www.semanlink.net/tag/rideshare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rideshare|uri|http://www.semanlink.net/tag/rideshare +http://www.semanlink.net/tag/youtube|prefLabel|YouTube +http://www.semanlink.net/tag/youtube|creationDate|2007-01-07 +http://www.semanlink.net/tag/youtube|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/youtube|homepage|http://www.youtube.com +http://www.semanlink.net/tag/youtube|uri|http://www.semanlink.net/tag/youtube +http://www.semanlink.net/tag/turtle|creationTime|2007-05-18T22:02:53Z +http://www.semanlink.net/tag/turtle|prefLabel|Turtle +http://www.semanlink.net/tag/turtle|broader|http://www.semanlink.net/tag/david_beckett +http://www.semanlink.net/tag/turtle|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/turtle|creationDate|2007-05-18 +http://www.semanlink.net/tag/turtle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turtle|uri|http://www.semanlink.net/tag/turtle +http://www.semanlink.net/tag/turtle|broader_prefLabel|David Beckett +http://www.semanlink.net/tag/turtle|broader_prefLabel|RDF +http://www.semanlink.net/tag/turtle|broader_altLabel|dajobe +http://www.semanlink.net/tag/turtle|broader_altLabel|Dave Beckett +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/turtle|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/rdf_fails|creationTime|2016-07-21T18:13:20Z +http://www.semanlink.net/tag/rdf_fails|prefLabel|RDF Fails +http://www.semanlink.net/tag/rdf_fails|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_fails|creationDate|2016-07-21 +http://www.semanlink.net/tag/rdf_fails|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_fails|uri|http://www.semanlink.net/tag/rdf_fails +http://www.semanlink.net/tag/rdf_fails|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_fails|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_fails|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_fails|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_fails|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_fails|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semantic_web_project|creationTime|2011-10-11T00:28:37Z +http://www.semanlink.net/tag/semantic_web_project|prefLabel|Semantic Web project +http://www.semanlink.net/tag/semantic_web_project|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_project|creationDate|2011-10-11 +http://www.semanlink.net/tag/semantic_web_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_project|uri|http://www.semanlink.net/tag/semantic_web_project +http://www.semanlink.net/tag/semantic_web_project|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_project|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_project|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/gnu_octave|creationTime|2012-08-19T15:41:12Z +http://www.semanlink.net/tag/gnu_octave|prefLabel|GNU Octave +http://www.semanlink.net/tag/gnu_octave|broader|http://www.semanlink.net/tag/gnu +http://www.semanlink.net/tag/gnu_octave|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/gnu_octave|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/gnu_octave|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/gnu_octave|related|http://www.semanlink.net/tag/matlab +http://www.semanlink.net/tag/gnu_octave|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/gnu_octave|creationDate|2012-08-19 +http://www.semanlink.net/tag/gnu_octave|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gnu_octave|homepage|http://www.gnu.org/software/octave/ +http://www.semanlink.net/tag/gnu_octave|describedBy|https://en.wikipedia.org/wiki/GNU_Octave +http://www.semanlink.net/tag/gnu_octave|uri|http://www.semanlink.net/tag/gnu_octave +http://www.semanlink.net/tag/gnu_octave|broader_prefLabel|GNU +http://www.semanlink.net/tag/gnu_octave|broader_prefLabel|Programming language +http://www.semanlink.net/tag/gnu_octave|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/gnu_octave|broader_prefLabel|Open Source +http://www.semanlink.net/tag/gnu_octave|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/gnu_octave|broader_altLabel|Math +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|creationTime|2007-09-19T14:19:11Z +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|prefLabel|LOD: Limitations on browseable data +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|creationDate|2007-09-19 +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|uri|http://www.semanlink.net/tag/lod_limitations_on_browseable_data +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_altLabel|LD +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/lod_limitations_on_browseable_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/summly|creationTime|2011-12-28T13:14:13Z +http://www.semanlink.net/tag/summly|prefLabel|summly +http://www.semanlink.net/tag/summly|broader|http://www.semanlink.net/tag/iphone_app +http://www.semanlink.net/tag/summly|broader|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/summly|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/summly|creationDate|2011-12-28 +http://www.semanlink.net/tag/summly|comment|Summly is an iPhone app which summarises and simplifies the content of web pages and search results. +http://www.semanlink.net/tag/summly|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/summly|homepage|http://www.summly.com +http://www.semanlink.net/tag/summly|uri|http://www.semanlink.net/tag/summly +http://www.semanlink.net/tag/summly|broader_prefLabel|iphone app +http://www.semanlink.net/tag/summly|broader_prefLabel|Text Summarization +http://www.semanlink.net/tag/summly|broader_altLabel|Automatic summarization +http://www.semanlink.net/tag/summly|broader_related|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/tag/dropbox|creationTime|2013-03-13T19:30:22Z +http://www.semanlink.net/tag/dropbox|prefLabel|Dropbox +http://www.semanlink.net/tag/dropbox|creationDate|2013-03-13 +http://www.semanlink.net/tag/dropbox|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dropbox|uri|http://www.semanlink.net/tag/dropbox +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|creationTime|2019-11-24T17:10:16Z +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|prefLabel|ElasticSearch: annotated text field +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|broader|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|creationDate|2019-11-24 +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|uri|http://www.semanlink.net/tag/elasticsearch_annotated_text_field +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|broader_prefLabel|ElasticSearch +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|broader_prefLabel|Entities +http://www.semanlink.net/tag/elasticsearch_annotated_text_field|broader_related|http://www.semanlink.net/tag/lucene +http://www.semanlink.net/tag/film_italien|prefLabel|Film italien +http://www.semanlink.net/tag/film_italien|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_italien|broader|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/film_italien|creationDate|2006-12-19 +http://www.semanlink.net/tag/film_italien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_italien|uri|http://www.semanlink.net/tag/film_italien +http://www.semanlink.net/tag/film_italien|broader_prefLabel|Film +http://www.semanlink.net/tag/film_italien|broader_prefLabel|Italie +http://www.semanlink.net/tag/croisades|prefLabel|Croisades +http://www.semanlink.net/tag/croisades|broader|http://www.semanlink.net/tag/moyen_age +http://www.semanlink.net/tag/croisades|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/croisades|uri|http://www.semanlink.net/tag/croisades +http://www.semanlink.net/tag/croisades|broader_prefLabel|Moyen-âge +http://www.semanlink.net/tag/apprendre|creationTime|2014-12-17T23:45:36Z +http://www.semanlink.net/tag/apprendre|prefLabel|Apprendre +http://www.semanlink.net/tag/apprendre|creationDate|2014-12-17 +http://www.semanlink.net/tag/apprendre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apprendre|uri|http://www.semanlink.net/tag/apprendre +http://www.semanlink.net/tag/mandelbrot|prefLabel|Mandelbrot +http://www.semanlink.net/tag/mandelbrot|broader|http://www.semanlink.net/tag/fractales +http://www.semanlink.net/tag/mandelbrot|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/mandelbrot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mandelbrot|uri|http://www.semanlink.net/tag/mandelbrot +http://www.semanlink.net/tag/mandelbrot|broader_prefLabel|Fractales +http://www.semanlink.net/tag/mandelbrot|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/intelligence_collective|creationTime|2010-07-17T15:32:31Z +http://www.semanlink.net/tag/intelligence_collective|prefLabel|Intelligence collective +http://www.semanlink.net/tag/intelligence_collective|broader|http://www.semanlink.net/tag/intelligence +http://www.semanlink.net/tag/intelligence_collective|creationDate|2010-07-17 +http://www.semanlink.net/tag/intelligence_collective|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intelligence_collective|uri|http://www.semanlink.net/tag/intelligence_collective +http://www.semanlink.net/tag/intelligence_collective|broader_prefLabel|Intelligence +http://www.semanlink.net/tag/knowledge_graph_search_engine|creationTime|2020-12-10T13:39:28Z +http://www.semanlink.net/tag/knowledge_graph_search_engine|prefLabel|Knowledge graph search engine +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/knowledge_graph_search_engine|creationDate|2020-12-10 +http://www.semanlink.net/tag/knowledge_graph_search_engine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_search_engine|uri|http://www.semanlink.net/tag/knowledge_graph_search_engine +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader_altLabel|KG +http://www.semanlink.net/tag/knowledge_graph_search_engine|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/lenka_zdeborova|creationTime|2018-10-18T13:27:16Z +http://www.semanlink.net/tag/lenka_zdeborova|prefLabel|Lenka Zdeborová +http://www.semanlink.net/tag/lenka_zdeborova|broader|http://www.semanlink.net/tag/ml_and_physics +http://www.semanlink.net/tag/lenka_zdeborova|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/lenka_zdeborova|broader|http://www.semanlink.net/tag/statistical_physics +http://www.semanlink.net/tag/lenka_zdeborova|broader|http://www.semanlink.net/tag/physicien +http://www.semanlink.net/tag/lenka_zdeborova|related|http://www.semanlink.net/tag/france_is_ai_2018 +http://www.semanlink.net/tag/lenka_zdeborova|related|http://www.semanlink.net/tag/cea +http://www.semanlink.net/tag/lenka_zdeborova|creationDate|2018-10-18 +http://www.semanlink.net/tag/lenka_zdeborova|comment|"Statistical Physics Studies of Machine Learning Problems: +> What makes problems studied in machine and statistical physics related? How can this relation be used to understand better the performance and limitations of machine learning systems? What happens when a phase transition is found in a computational problem? How do phase transitions influence algorithmic hardness? + +[Using Physical Insights for ML](http://www.ipam.ucla.edu/programs/workshops/workshop-iv-using-physical-insights-for-machine-learning/?tab=overview)" +http://www.semanlink.net/tag/lenka_zdeborova|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lenka_zdeborova|homepage|http://artax.karlin.mff.cuni.cz/~zdebl9am/ +http://www.semanlink.net/tag/lenka_zdeborova|uri|http://www.semanlink.net/tag/lenka_zdeborova +http://www.semanlink.net/tag/lenka_zdeborova|broader_prefLabel|ML and physics +http://www.semanlink.net/tag/lenka_zdeborova|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/lenka_zdeborova|broader_prefLabel|Statistical physics +http://www.semanlink.net/tag/lenka_zdeborova|broader_prefLabel|Physicien +http://www.semanlink.net/tag/histoire_de_l_astronomie|creationTime|2008-05-17T16:53:47Z +http://www.semanlink.net/tag/histoire_de_l_astronomie|prefLabel|Histoire de l'astronomie +http://www.semanlink.net/tag/histoire_de_l_astronomie|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/histoire_de_l_astronomie|creationDate|2008-05-17 +http://www.semanlink.net/tag/histoire_de_l_astronomie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_astronomie|uri|http://www.semanlink.net/tag/histoire_de_l_astronomie +http://www.semanlink.net/tag/histoire_de_l_astronomie|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/drupal_modules|creationTime|2012-01-23T11:52:24Z +http://www.semanlink.net/tag/drupal_modules|prefLabel|Drupal modules +http://www.semanlink.net/tag/drupal_modules|broader|http://www.semanlink.net/tag/drupal +http://www.semanlink.net/tag/drupal_modules|creationDate|2012-01-23 +http://www.semanlink.net/tag/drupal_modules|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drupal_modules|uri|http://www.semanlink.net/tag/drupal_modules +http://www.semanlink.net/tag/drupal_modules|broader_prefLabel|Drupal +http://www.semanlink.net/tag/talis_platform|creationTime|2008-04-14T14:19:44Z +http://www.semanlink.net/tag/talis_platform|prefLabel|Talis platform +http://www.semanlink.net/tag/talis_platform|broader|http://www.semanlink.net/tag/talis +http://www.semanlink.net/tag/talis_platform|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/talis_platform|broader|http://www.semanlink.net/tag/semantic_web_platform +http://www.semanlink.net/tag/talis_platform|creationDate|2008-04-14 +http://www.semanlink.net/tag/talis_platform|comment|The Talis Platform is an environment for building next generation applications and services based on Semantic Web technologies. It is a hosted system which provides an efficient, robust storage infrastructure. Both arbitrary documents and RDF-based semantic content are supported, with sophisticated query, indexing and search features. +http://www.semanlink.net/tag/talis_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/talis_platform|homepage|http://www.talis.com/platform/ +http://www.semanlink.net/tag/talis_platform|uri|http://www.semanlink.net/tag/talis_platform +http://www.semanlink.net/tag/talis_platform|broader_prefLabel|Talis +http://www.semanlink.net/tag/talis_platform|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/talis_platform|broader_prefLabel|Semantic Web Platform +http://www.semanlink.net/tag/talis_platform|broader_related|http://www.semanlink.net/tag/leigh_dodds +http://www.semanlink.net/tag/talis_platform|broader_related|http://www.semanlink.net/tag/danny_ayers +http://www.semanlink.net/tag/talis_platform|broader_related|http://www.semanlink.net/tag/paul_miller +http://www.semanlink.net/tag/talis_platform|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/angela_merkel|prefLabel|Angela Merkel +http://www.semanlink.net/tag/angela_merkel|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/angela_merkel|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/angela_merkel|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/angela_merkel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/angela_merkel|uri|http://www.semanlink.net/tag/angela_merkel +http://www.semanlink.net/tag/angela_merkel|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/angela_merkel|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/angela_merkel|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/angela_merkel|broader_altLabel|Germany +http://www.semanlink.net/tag/angela_merkel|broader_altLabel|Deutschland +http://www.semanlink.net/tag/porto_rico|creationTime|2019-01-15T13:25:42Z +http://www.semanlink.net/tag/porto_rico|prefLabel|Porto Rico +http://www.semanlink.net/tag/porto_rico|creationDate|2019-01-15 +http://www.semanlink.net/tag/porto_rico|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/porto_rico|uri|http://www.semanlink.net/tag/porto_rico +http://www.semanlink.net/tag/web_application_threats|creationTime|2007-03-16T17:47:08Z +http://www.semanlink.net/tag/web_application_threats|prefLabel|Web Application Threats +http://www.semanlink.net/tag/web_application_threats|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/web_application_threats|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/web_application_threats|creationDate|2007-03-16 +http://www.semanlink.net/tag/web_application_threats|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_application_threats|uri|http://www.semanlink.net/tag/web_application_threats +http://www.semanlink.net/tag/web_application_threats|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/web_application_threats|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/web_application_threats|broader_prefLabel|Web dev +http://www.semanlink.net/tag/web_application_threats|broader_altLabel|Web app dev +http://www.semanlink.net/tag/knowledge_base|creationTime|2012-04-19T14:38:37Z +http://www.semanlink.net/tag/knowledge_base|prefLabel|Knowledge bases +http://www.semanlink.net/tag/knowledge_base|broader|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/knowledge_base|creationDate|2012-04-19 +http://www.semanlink.net/tag/knowledge_base|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_base|describedBy|https://en.wikipedia.org/wiki/Knowledge_base +http://www.semanlink.net/tag/knowledge_base|altLabel|Knowledge Base +http://www.semanlink.net/tag/knowledge_base|uri|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/knowledge_base|broader_prefLabel|Knowledge +http://www.semanlink.net/tag/sinai|creationTime|2015-08-22T23:52:12Z +http://www.semanlink.net/tag/sinai|prefLabel|Sinaï +http://www.semanlink.net/tag/sinai|broader|http://www.semanlink.net/tag/egypte +http://www.semanlink.net/tag/sinai|creationDate|2015-08-22 +http://www.semanlink.net/tag/sinai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sinai|describedBy|https://fr.wikipedia.org/wiki/Sina%C3%AF +http://www.semanlink.net/tag/sinai|uri|http://www.semanlink.net/tag/sinai +http://www.semanlink.net/tag/sinai|broader_prefLabel|Egypte +http://www.semanlink.net/tag/ian_horrocks|prefLabel|Ian Horrocks +http://www.semanlink.net/tag/ian_horrocks|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ian_horrocks|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/ian_horrocks|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/ian_horrocks|creationDate|2006-12-16 +http://www.semanlink.net/tag/ian_horrocks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ian_horrocks|uri|http://www.semanlink.net/tag/ian_horrocks +http://www.semanlink.net/tag/ian_horrocks|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ian_horrocks|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ian_horrocks|broader_altLabel|Technical guys +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|prefLabel|Web architecture +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|broader|http://www.semanlink.net/tag/test_of_independent_invention +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|creationDate|2006-12-31 +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|uri|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|broader_prefLabel|Internet +http://www.semanlink.net/tag/architecture_of_the_world_wide_web|broader_prefLabel|Test of independent invention +http://www.semanlink.net/tag/semantic_web_platform|creationTime|2008-06-05T23:24:31Z +http://www.semanlink.net/tag/semantic_web_platform|prefLabel|Semantic Web Platform +http://www.semanlink.net/tag/semantic_web_platform|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_platform|creationDate|2008-06-05 +http://www.semanlink.net/tag/semantic_web_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_platform|uri|http://www.semanlink.net/tag/semantic_web_platform +http://www.semanlink.net/tag/semantic_web_platform|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_platform|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_platform|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/d2rq|creationTime|2007-06-23T13:43:25Z +http://www.semanlink.net/tag/d2rq|prefLabel|D2RQ +http://www.semanlink.net/tag/d2rq|broader|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/d2rq|broader|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/d2rq|broader|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://www.semanlink.net/tag/d2rq|creationDate|2007-06-23 +http://www.semanlink.net/tag/d2rq|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/d2rq|homepage|http://www4.wiwiss.fu-berlin.de/bizer/d2rq/ +http://www.semanlink.net/tag/d2rq|uri|http://www.semanlink.net/tag/d2rq +http://www.semanlink.net/tag/d2rq|broader_prefLabel|Chris Bizer +http://www.semanlink.net/tag/d2rq|broader_prefLabel|Richard Cyganiak +http://www.semanlink.net/tag/d2rq|broader_prefLabel|SQL to RDF mapping +http://www.semanlink.net/tag/d2rq|broader_altLabel|dowhatimean.net +http://www.semanlink.net/tag/musee|prefLabel|Musée +http://www.semanlink.net/tag/musee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musee|uri|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/espace|prefLabel|Espace +http://www.semanlink.net/tag/espace|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/espace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/espace|altLabel|Space +http://www.semanlink.net/tag/espace|uri|http://www.semanlink.net/tag/espace +http://www.semanlink.net/tag/espace|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/ane|creationTime|2017-06-12T00:56:06Z +http://www.semanlink.net/tag/ane|prefLabel|Âne +http://www.semanlink.net/tag/ane|creationDate|2017-06-12 +http://www.semanlink.net/tag/ane|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ane|uri|http://www.semanlink.net/tag/ane +http://www.semanlink.net/tag/data_interchange_format|creationTime|2008-07-16T13:00:00Z +http://www.semanlink.net/tag/data_interchange_format|prefLabel|Data Interchange Format +http://www.semanlink.net/tag/data_interchange_format|creationDate|2008-07-16 +http://www.semanlink.net/tag/data_interchange_format|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_interchange_format|uri|http://www.semanlink.net/tag/data_interchange_format +http://www.semanlink.net/tag/ml_sequential_data|creationTime|2014-04-28T15:53:30Z +http://www.semanlink.net/tag/ml_sequential_data|prefLabel|ML: Sequential data +http://www.semanlink.net/tag/ml_sequential_data|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/ml_sequential_data|related|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/tag/ml_sequential_data|creationDate|2014-04-28 +http://www.semanlink.net/tag/ml_sequential_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_sequential_data|uri|http://www.semanlink.net/tag/ml_sequential_data +http://www.semanlink.net/tag/ml_sequential_data|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/word_embedding|creationTime|2015-10-16T11:17:38Z +http://www.semanlink.net/tag/word_embedding|prefLabel|Word embeddings +http://www.semanlink.net/tag/word_embedding|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/word_embedding|broader|http://www.semanlink.net/tag/distributional_semantics +http://www.semanlink.net/tag/word_embedding|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/word_embedding|broader|http://www.semanlink.net/tag/nn_4_nlp +http://www.semanlink.net/tag/word_embedding|related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/word_embedding|related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/word_embedding|related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/word_embedding|creationDate|2015-10-16 +http://www.semanlink.net/tag/word_embedding|comment|"[Best presentation](doc:2020/06/on_word_embeddings) about word embeddings, by [Sebastian Ruder](tag:sebastian_ruder) + +Capture the idea that one can express “meaning” of words using a vector, so that the cosine of the angle between the vectors captures semantic similarity. + +A set of language modeling and feature learning techniques where words from the vocabulary (and possibly phrases thereof) are mapped to vectors of real numbers in a low dimensional space, relative to the vocabulary size. + +~ Context-predicting models + +~ Latent feature representations of words + +Paramaterized function mapping words in some language to vectors (perhaps 200 to 500 dimensions). Conceptually it involves a mathematical embedding from a space with one dimension per word to a continuous vector space with much lower dimension. + +""Plongement lexical"" in French + +Word embedding of a word: a succinct representation of the distribution of other words around this word. + +Methods to generate the mapping include neural networks, dimensionality reduction on the word co-occurrence matrix, probabilistic models, and explicit representation in terms of the context in which words appear. + +In the new generation of models, the vector estimation problem is handled as a supervised task, where the weights in a word vector are set to maximize the probability of the contexts in which the word is observed in the corpus + +The mapping may be generated training a neural network on a large corpus to predict a word given a context (Continuous Bag Of Words model) or to predict the context given a word (skip gram model). The context is a window of surrounding words. + +The most known software to produce word embeddings is Tomas Mikolov's Word2vec. Pre-trained word embeddings are also available in the word2vec code.google page. + +Applications: + +- search document ranking +- boost the performance in NLP tasks such as syntactic parsing and sentiment analysis. + + + + +" +http://www.semanlink.net/tag/word_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_embedding|describedBy|https://en.wikipedia.org/wiki/Word_embedding +http://www.semanlink.net/tag/word_embedding|altLabel|Word Embedding +http://www.semanlink.net/tag/word_embedding|altLabel|Plongement lexical +http://www.semanlink.net/tag/word_embedding|uri|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word_embedding|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/word_embedding|broader_prefLabel|Distributional semantics +http://www.semanlink.net/tag/word_embedding|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/word_embedding|broader_prefLabel|NN 4 NLP +http://www.semanlink.net/tag/word_embedding|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/word_embedding|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word_embedding|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/semantic_web_sites|creationTime|2007-06-12T00:57:40Z +http://www.semanlink.net/tag/semantic_web_sites|prefLabel|semantic web sites +http://www.semanlink.net/tag/semantic_web_sites|creationDate|2007-06-12 +http://www.semanlink.net/tag/semantic_web_sites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_sites|uri|http://www.semanlink.net/tag/semantic_web_sites +http://www.semanlink.net/tag/coursera_machine_learning|creationTime|2014-03-03T23:03:17Z +http://www.semanlink.net/tag/coursera_machine_learning|prefLabel|Coursera: Machine Learning +http://www.semanlink.net/tag/coursera_machine_learning|broader|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/tag/coursera_machine_learning|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_machine_learning|broader|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/coursera_machine_learning|creationDate|2014-03-03 +http://www.semanlink.net/tag/coursera_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_machine_learning|homepage|https://class.coursera.org/ml-005 +http://www.semanlink.net/tag/coursera_machine_learning|uri|http://www.semanlink.net/tag/coursera_machine_learning +http://www.semanlink.net/tag/coursera_machine_learning|broader_prefLabel|Machine Learning Course +http://www.semanlink.net/tag/coursera_machine_learning|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_machine_learning|broader_prefLabel|Andrew Ng +http://www.semanlink.net/tag/coursera_machine_learning|broader_altLabel|Ng +http://www.semanlink.net/tag/coursera_machine_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/coursera_machine_learning|broader_related|http://www.semanlink.net/tag/coursera_machine_learning +http://www.semanlink.net/tag/coursera_machine_learning|broader_related|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/tag/coursera_machine_learning|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/pfizer|creationTime|2010-08-24T23:56:46Z +http://www.semanlink.net/tag/pfizer|prefLabel|Pfizer +http://www.semanlink.net/tag/pfizer|broader|http://www.semanlink.net/tag/industrie_pharmaceutique +http://www.semanlink.net/tag/pfizer|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/pfizer|creationDate|2010-08-24 +http://www.semanlink.net/tag/pfizer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pfizer|uri|http://www.semanlink.net/tag/pfizer +http://www.semanlink.net/tag/pfizer|broader_prefLabel|Industrie pharmaceutique +http://www.semanlink.net/tag/pfizer|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/uncertainty_in_deep_learning|creationTime|2018-11-05T09:39:06Z +http://www.semanlink.net/tag/uncertainty_in_deep_learning|prefLabel|Uncertainty in Deep Learning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader|http://www.semanlink.net/tag/accountable_ai +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader|http://www.semanlink.net/tag/uncertainty_reasoning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|related|http://www.semanlink.net/tag/accountable_ai +http://www.semanlink.net/tag/uncertainty_in_deep_learning|creationDate|2018-11-05 +http://www.semanlink.net/tag/uncertainty_in_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uncertainty_in_deep_learning|uri|http://www.semanlink.net/tag/uncertainty_in_deep_learning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader_prefLabel|Accountable AI +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader_prefLabel|Uncertainty Reasoning +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/uncertainty_in_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/cruaute|creationTime|2013-04-11T21:42:26Z +http://www.semanlink.net/tag/cruaute|prefLabel|Cruauté +http://www.semanlink.net/tag/cruaute|creationDate|2013-04-11 +http://www.semanlink.net/tag/cruaute|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cruaute|uri|http://www.semanlink.net/tag/cruaute +http://www.semanlink.net/tag/leopard|creationTime|2007-12-24T11:39:31Z +http://www.semanlink.net/tag/leopard|prefLabel|Leopard +http://www.semanlink.net/tag/leopard|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/leopard|creationDate|2007-12-24 +http://www.semanlink.net/tag/leopard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leopard|uri|http://www.semanlink.net/tag/leopard +http://www.semanlink.net/tag/leopard|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/leopard|broader_altLabel|OS X +http://www.semanlink.net/tag/leopard|broader_altLabel|OSX +http://www.semanlink.net/tag/okapi_bm25|creationTime|2017-06-14T00:00:07Z +http://www.semanlink.net/tag/okapi_bm25|prefLabel|BM25 +http://www.semanlink.net/tag/okapi_bm25|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/okapi_bm25|broader|http://www.semanlink.net/tag/probabilistic_relevance_model +http://www.semanlink.net/tag/okapi_bm25|related|http://www.semanlink.net/tag/tf_idf +http://www.semanlink.net/tag/okapi_bm25|related|http://www.semanlink.net/tag/bag_of_words +http://www.semanlink.net/tag/okapi_bm25|creationDate|2017-06-14 +http://www.semanlink.net/tag/okapi_bm25|comment|"ranking function used by search engines to rank matching documents according to their relevance to a given search query. Bag-of-words based. Algorithm used by default in [Elasticsearch](elasticsearch) and [Lucene](lucene) + +(ranks documents based on the query terms appearing in each document, regardless of their proximity within the document)" +http://www.semanlink.net/tag/okapi_bm25|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/okapi_bm25|describedBy|https://en.wikipedia.org/wiki/Okapi_BM25 +http://www.semanlink.net/tag/okapi_bm25|altLabel|Okapi BM25 +http://www.semanlink.net/tag/okapi_bm25|uri|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/okapi_bm25|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/okapi_bm25|broader_prefLabel|Probabilistic relevance model +http://www.semanlink.net/tag/aichi|prefLabel|Aïchi +http://www.semanlink.net/tag/aichi|broader|http://www.semanlink.net/tag/exposition_universelle +http://www.semanlink.net/tag/aichi|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/aichi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aichi|uri|http://www.semanlink.net/tag/aichi +http://www.semanlink.net/tag/aichi|broader_prefLabel|Exposition universelle +http://www.semanlink.net/tag/aichi|broader_prefLabel|Japon +http://www.semanlink.net/tag/aichi|broader_altLabel|Japan +http://www.semanlink.net/tag/ambre|creationTime|2008-04-10T10:36:09Z +http://www.semanlink.net/tag/ambre|prefLabel|Ambre +http://www.semanlink.net/tag/ambre|related|http://www.semanlink.net/tag/insectes_fossiles +http://www.semanlink.net/tag/ambre|creationDate|2008-04-10 +http://www.semanlink.net/tag/ambre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ambre|uri|http://www.semanlink.net/tag/ambre +http://www.semanlink.net/tag/transductive_learning|creationTime|2018-03-03T13:40:16Z +http://www.semanlink.net/tag/transductive_learning|prefLabel|Transductive Learning +http://www.semanlink.net/tag/transductive_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/transductive_learning|broader|http://www.semanlink.net/tag/missing_labels_ml +http://www.semanlink.net/tag/transductive_learning|related|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/tag/transductive_learning|creationDate|2018-03-03 +http://www.semanlink.net/tag/transductive_learning|comment|Reasoning from observed, specific (training) cases to specific (test) cases. In contrast, induction is reasoning from observed training cases to general rules, which are then applied to the test cases +http://www.semanlink.net/tag/transductive_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transductive_learning|describedBy|https://en.wikipedia.org/wiki/Transduction_(machine_learning) +http://www.semanlink.net/tag/transductive_learning|uri|http://www.semanlink.net/tag/transductive_learning +http://www.semanlink.net/tag/transductive_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/transductive_learning|broader_prefLabel|Missing Labels (ML) +http://www.semanlink.net/tag/embeddings|creationTime|2017-09-09T14:04:11Z +http://www.semanlink.net/tag/embeddings|prefLabel|Embeddings +http://www.semanlink.net/tag/embeddings|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/embeddings|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/embeddings|related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/embeddings|related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/embeddings|creationDate|2017-09-09 +http://www.semanlink.net/tag/embeddings|comment|"The objective of embedding methods is to organize symbolic objects (e.g., words, entities, concepts) in a way such that their similarity in the embedding space reflects their semantic or functional similarity +" +http://www.semanlink.net/tag/embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/embeddings|altLabel|embedding +http://www.semanlink.net/tag/embeddings|uri|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/embeddings|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/embeddings|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/embeddings|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/arxiv|creationTime|2018-01-22T18:07:33Z +http://www.semanlink.net/tag/arxiv|prefLabel|Arxiv +http://www.semanlink.net/tag/arxiv|creationDate|2018-01-22 +http://www.semanlink.net/tag/arxiv|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arxiv|uri|http://www.semanlink.net/tag/arxiv +http://www.semanlink.net/tag/www2007|creationTime|2007-05-24T13:13:13Z +http://www.semanlink.net/tag/www2007|prefLabel|WWW 2007 +http://www.semanlink.net/tag/www2007|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www2007|creationDate|2007-05-24 +http://www.semanlink.net/tag/www2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www2007|uri|http://www.semanlink.net/tag/www2007 +http://www.semanlink.net/tag/www2007|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www2007|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/le_pen|prefLabel|Le Pen +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/mechant +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/extreme_droite +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/le_pen|broader|http://www.semanlink.net/tag/fn +http://www.semanlink.net/tag/le_pen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/le_pen|uri|http://www.semanlink.net/tag/le_pen +http://www.semanlink.net/tag/le_pen|broader_prefLabel|Immigration +http://www.semanlink.net/tag/le_pen|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/le_pen|broader_prefLabel|Méchant +http://www.semanlink.net/tag/le_pen|broader_prefLabel|Extrème droite +http://www.semanlink.net/tag/le_pen|broader_prefLabel|Politique française +http://www.semanlink.net/tag/le_pen|broader_prefLabel|FN +http://www.semanlink.net/tag/semantic_web_crm|creationTime|2009-01-15T18:20:28Z +http://www.semanlink.net/tag/semantic_web_crm|prefLabel|Semantic Web: CRM +http://www.semanlink.net/tag/semantic_web_crm|broader|http://www.semanlink.net/tag/crm +http://www.semanlink.net/tag/semantic_web_crm|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_crm|creationDate|2009-01-15 +http://www.semanlink.net/tag/semantic_web_crm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_crm|uri|http://www.semanlink.net/tag/semantic_web_crm +http://www.semanlink.net/tag/semantic_web_crm|broader_prefLabel|CRM +http://www.semanlink.net/tag/semantic_web_crm|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_crm|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_crm|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/reinforcement_learning|creationTime|2016-01-09T00:48:15Z +http://www.semanlink.net/tag/reinforcement_learning|prefLabel|Reinforcement learning +http://www.semanlink.net/tag/reinforcement_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/reinforcement_learning|creationDate|2016-01-09 +http://www.semanlink.net/tag/reinforcement_learning|comment|An area of machine learning inspired by behaviorist psychology, concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward. +http://www.semanlink.net/tag/reinforcement_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reinforcement_learning|describedBy|https://en.wikipedia.org/wiki/Reinforcement_learning +http://www.semanlink.net/tag/reinforcement_learning|altLabel|RL +http://www.semanlink.net/tag/reinforcement_learning|uri|http://www.semanlink.net/tag/reinforcement_learning +http://www.semanlink.net/tag/reinforcement_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/rdf_net_api|creationTime|2008-01-10T00:00:08Z +http://www.semanlink.net/tag/rdf_net_api|prefLabel|RDF Net API +http://www.semanlink.net/tag/rdf_net_api|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/rdf_net_api|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_net_api|creationDate|2008-01-10 +http://www.semanlink.net/tag/rdf_net_api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_net_api|uri|http://www.semanlink.net/tag/rdf_net_api +http://www.semanlink.net/tag/rdf_net_api|broader_prefLabel|API +http://www.semanlink.net/tag/rdf_net_api|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_net_api|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_net_api|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_net_api|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_net_api|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_net_api|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/periodes_glacieres|creationTime|2008-08-25T14:09:52Z +http://www.semanlink.net/tag/periodes_glacieres|prefLabel|Périodes glacières +http://www.semanlink.net/tag/periodes_glacieres|broader|http://www.semanlink.net/tag/climat +http://www.semanlink.net/tag/periodes_glacieres|broader|http://www.semanlink.net/tag/geologie +http://www.semanlink.net/tag/periodes_glacieres|related|http://www.semanlink.net/tag/glacier +http://www.semanlink.net/tag/periodes_glacieres|creationDate|2008-08-25 +http://www.semanlink.net/tag/periodes_glacieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/periodes_glacieres|uri|http://www.semanlink.net/tag/periodes_glacieres +http://www.semanlink.net/tag/periodes_glacieres|broader_prefLabel|Climat +http://www.semanlink.net/tag/periodes_glacieres|broader_prefLabel|Géologie +http://www.semanlink.net/tag/taliban|prefLabel|Taliban +http://www.semanlink.net/tag/taliban|broader|http://www.semanlink.net/tag/afghanistan +http://www.semanlink.net/tag/taliban|broader|http://www.semanlink.net/tag/extremisme_islamique +http://www.semanlink.net/tag/taliban|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taliban|uri|http://www.semanlink.net/tag/taliban +http://www.semanlink.net/tag/taliban|broader_prefLabel|Afghanistan +http://www.semanlink.net/tag/taliban|broader_prefLabel|Extrémisme islamique +http://www.semanlink.net/tag/multidevice|creationTime|2013-05-23T14:18:52Z +http://www.semanlink.net/tag/multidevice|prefLabel|Multidevice +http://www.semanlink.net/tag/multidevice|creationDate|2013-05-23 +http://www.semanlink.net/tag/multidevice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multidevice|uri|http://www.semanlink.net/tag/multidevice +http://www.semanlink.net/tag/knowledge_distillation|creationTime|2019-08-28T22:49:27Z +http://www.semanlink.net/tag/knowledge_distillation|prefLabel|Knowledge distillation +http://www.semanlink.net/tag/knowledge_distillation|broader|http://www.semanlink.net/tag/machines_teaching_machines +http://www.semanlink.net/tag/knowledge_distillation|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/knowledge_distillation|related|http://www.semanlink.net/tag/mutual_learning +http://www.semanlink.net/tag/knowledge_distillation|related|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/knowledge_distillation|related|http://www.semanlink.net/tag/on_device_nlp +http://www.semanlink.net/tag/knowledge_distillation|creationDate|2019-08-28 +http://www.semanlink.net/tag/knowledge_distillation|comment|"Knowledge distillation (or teacher-student learning) is a compression technique in which a small model is trained to reproduce the behavior of a larger model (or an ensemble of models). + +A key idea behind knowledge distillation is that the +soft probabilities output by a trained “teacher” network contains +a lot more information about a data point than just the +class label. ([source](doc:2020/06/1910_01348_on_the_efficacy_of))" +http://www.semanlink.net/tag/knowledge_distillation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_distillation|altLabel|Teacher-student learning +http://www.semanlink.net/tag/knowledge_distillation|uri|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/tag/knowledge_distillation|broader_prefLabel|Machines teaching machines +http://www.semanlink.net/tag/knowledge_distillation|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/knowledge_distillation|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/knowledge_distillation|broader_altLabel|ANN +http://www.semanlink.net/tag/knowledge_distillation|broader_altLabel|NN +http://www.semanlink.net/tag/recommender_systems|creationTime|2013-02-16T14:39:48Z +http://www.semanlink.net/tag/recommender_systems|prefLabel|Recommender Systems +http://www.semanlink.net/tag/recommender_systems|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/recommender_systems|creationDate|2013-02-16 +http://www.semanlink.net/tag/recommender_systems|comment|"predict the ""rating"" a user would give to an item" +http://www.semanlink.net/tag/recommender_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recommender_systems|describedBy|https://en.wikipedia.org/wiki/Recommender_system +http://www.semanlink.net/tag/recommender_systems|altLabel|Recommandation system +http://www.semanlink.net/tag/recommender_systems|altLabel|Système de recommandation +http://www.semanlink.net/tag/recommender_systems|uri|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/tag/recommender_systems|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/del_icio_us|prefLabel|del.icio.us +http://www.semanlink.net/tag/del_icio_us|broader|http://www.semanlink.net/tag/social_bookmarking +http://www.semanlink.net/tag/del_icio_us|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/del_icio_us|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/del_icio_us|altLabel|delicious +http://www.semanlink.net/tag/del_icio_us|uri|http://www.semanlink.net/tag/del_icio_us +http://www.semanlink.net/tag/del_icio_us|broader_prefLabel|Social bookmarking +http://www.semanlink.net/tag/del_icio_us|broader_prefLabel|Tagging +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|creationTime|2021-05-26T17:27:31Z +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|prefLabel|Clustering small sets of short texts +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|broader|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|creationDate|2021-05-26 +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|uri|http://www.semanlink.net/tag/clustering_small_sets_of_short_texts +http://www.semanlink.net/tag/clustering_small_sets_of_short_texts|broader_prefLabel|Short Text Clustering +http://www.semanlink.net/tag/encoding|prefLabel|Encoding +http://www.semanlink.net/tag/encoding|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/encoding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encoding|uri|http://www.semanlink.net/tag/encoding +http://www.semanlink.net/tag/encoding|broader_prefLabel|Dev +http://www.semanlink.net/tag/adn_mitochondrial|prefLabel|ADN mitochondrial +http://www.semanlink.net/tag/adn_mitochondrial|broader|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/adn_mitochondrial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/adn_mitochondrial|uri|http://www.semanlink.net/tag/adn_mitochondrial +http://www.semanlink.net/tag/adn_mitochondrial|broader_prefLabel|ADN +http://www.semanlink.net/tag/adn_mitochondrial|broader_altLabel|DNA +http://www.semanlink.net/tag/google_web_toolkit|creationTime|2007-06-24T21:46:36Z +http://www.semanlink.net/tag/google_web_toolkit|prefLabel|Google Web Toolkit +http://www.semanlink.net/tag/google_web_toolkit|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/google_web_toolkit|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/google_web_toolkit|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_web_toolkit|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/google_web_toolkit|creationDate|2007-06-24 +http://www.semanlink.net/tag/google_web_toolkit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_web_toolkit|altLabel|GWT +http://www.semanlink.net/tag/google_web_toolkit|uri|http://www.semanlink.net/tag/google_web_toolkit +http://www.semanlink.net/tag/google_web_toolkit|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/google_web_toolkit|broader_prefLabel|Dev +http://www.semanlink.net/tag/google_web_toolkit|broader_prefLabel|Google +http://www.semanlink.net/tag/google_web_toolkit|broader_prefLabel|Ajax +http://www.semanlink.net/tag/google_web_toolkit|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/google_web_toolkit|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/google_web_toolkit|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/aidan_hogan|creationTime|2020-03-08T12:08:47Z +http://www.semanlink.net/tag/aidan_hogan|prefLabel|Aidan Hogan +http://www.semanlink.net/tag/aidan_hogan|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/aidan_hogan|related|http://www.semanlink.net/tag/chili +http://www.semanlink.net/tag/aidan_hogan|creationDate|2020-03-08 +http://www.semanlink.net/tag/aidan_hogan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aidan_hogan|uri|http://www.semanlink.net/tag/aidan_hogan +http://www.semanlink.net/tag/aidan_hogan|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/caetano_veloso|prefLabel|Caetano Veloso +http://www.semanlink.net/tag/caetano_veloso|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/caetano_veloso|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/caetano_veloso|broader|http://www.semanlink.net/tag/musique_bresilienne +http://www.semanlink.net/tag/caetano_veloso|type|http://purl.org/ontology/mo/MusicArtist +http://www.semanlink.net/tag/caetano_veloso|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/caetano_veloso|homepage|http://www.caetanoveloso.com.br/ +http://www.semanlink.net/tag/caetano_veloso|uri|http://www.semanlink.net/tag/caetano_veloso +http://www.semanlink.net/tag/caetano_veloso|broader_prefLabel|Brésil +http://www.semanlink.net/tag/caetano_veloso|broader_prefLabel|Musicien +http://www.semanlink.net/tag/caetano_veloso|broader_prefLabel|Musique brésilienne +http://www.semanlink.net/tag/caetano_veloso|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/ouganda|prefLabel|Ouganda +http://www.semanlink.net/tag/ouganda|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/ouganda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ouganda|uri|http://www.semanlink.net/tag/ouganda +http://www.semanlink.net/tag/ouganda|broader_prefLabel|Afrique +http://www.semanlink.net/tag/ouganda|broader_altLabel|Africa +http://www.semanlink.net/tag/public_key_cryptography_in_browsers|creationTime|2015-09-17T23:01:45Z +http://www.semanlink.net/tag/public_key_cryptography_in_browsers|prefLabel|public key cryptography in browsers +http://www.semanlink.net/tag/public_key_cryptography_in_browsers|creationDate|2015-09-17 +http://www.semanlink.net/tag/public_key_cryptography_in_browsers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_key_cryptography_in_browsers|uri|http://www.semanlink.net/tag/public_key_cryptography_in_browsers +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|creationTime|2013-04-04T10:36:43Z +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|prefLabel|Coursera: Web Intelligence and Big Data +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader|http://www.semanlink.net/tag/web_intelligence +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|related|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|creationDate|2013-04-04 +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|homepage|https://class.coursera.org/bigdata-002 +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|uri|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader_prefLabel|Big Data +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader_prefLabel|Web Intelligence +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/reporters_sans_frontieres|creationTime|2008-01-10T01:08:44Z +http://www.semanlink.net/tag/reporters_sans_frontieres|prefLabel|Reporters sans frontières +http://www.semanlink.net/tag/reporters_sans_frontieres|broader|http://www.semanlink.net/tag/liberte_de_la_presse +http://www.semanlink.net/tag/reporters_sans_frontieres|creationDate|2008-01-10 +http://www.semanlink.net/tag/reporters_sans_frontieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reporters_sans_frontieres|uri|http://www.semanlink.net/tag/reporters_sans_frontieres +http://www.semanlink.net/tag/reporters_sans_frontieres|broader_prefLabel|Liberté de la presse +http://www.semanlink.net/tag/communisme|prefLabel|Communisme +http://www.semanlink.net/tag/communisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/communisme|uri|http://www.semanlink.net/tag/communisme +http://www.semanlink.net/tag/www_2012|creationTime|2011-11-08T11:24:01Z +http://www.semanlink.net/tag/www_2012|prefLabel|WWW 2012 +http://www.semanlink.net/tag/www_2012|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www_2012|broader|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/www_2012|creationDate|2011-11-08 +http://www.semanlink.net/tag/www_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www_2012|homepage|http://www2012.wwwconference.org/ +http://www.semanlink.net/tag/www_2012|uri|http://www.semanlink.net/tag/www_2012 +http://www.semanlink.net/tag/www_2012|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www_2012|broader_prefLabel|J'y étais +http://www.semanlink.net/tag/www_2012|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/shenzhen|creationTime|2014-03-23T23:13:00Z +http://www.semanlink.net/tag/shenzhen|prefLabel|Shenzhen +http://www.semanlink.net/tag/shenzhen|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/shenzhen|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/shenzhen|creationDate|2014-03-23 +http://www.semanlink.net/tag/shenzhen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shenzhen|describedBy|https://en.wikipedia.org/wiki/Shenzhen +http://www.semanlink.net/tag/shenzhen|uri|http://www.semanlink.net/tag/shenzhen +http://www.semanlink.net/tag/shenzhen|broader_prefLabel|Ville +http://www.semanlink.net/tag/shenzhen|broader_prefLabel|Chine +http://www.semanlink.net/tag/shenzhen|broader_altLabel|China +http://www.semanlink.net/tag/meryl_streep|prefLabel|Meryl Streep +http://www.semanlink.net/tag/meryl_streep|creationDate|2007-01-14 +http://www.semanlink.net/tag/meryl_streep|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meryl_streep|uri|http://www.semanlink.net/tag/meryl_streep +http://www.semanlink.net/tag/cassandra|creationTime|2013-02-14T11:30:37Z +http://www.semanlink.net/tag/cassandra|prefLabel|Cassandra +http://www.semanlink.net/tag/cassandra|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/cassandra|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/cassandra|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/cassandra|creationDate|2013-02-14 +http://www.semanlink.net/tag/cassandra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cassandra|homepage|http://cassandra.apache.org/ +http://www.semanlink.net/tag/cassandra|uri|http://www.semanlink.net/tag/cassandra +http://www.semanlink.net/tag/cassandra|broader_prefLabel|Big Data +http://www.semanlink.net/tag/cassandra|broader_prefLabel|Database +http://www.semanlink.net/tag/cassandra|broader_prefLabel|apache.org +http://www.semanlink.net/tag/cassandra|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/semantic_web_client_library|creationTime|2011-02-09T00:20:56Z +http://www.semanlink.net/tag/semantic_web_client_library|prefLabel|Semantic Web Client Library +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/freie_universitat_berlin +http://www.semanlink.net/tag/semantic_web_client_library|broader|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semantic_web_client_library|creationDate|2011-02-09 +http://www.semanlink.net/tag/semantic_web_client_library|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_client_library|describedBy|http://www4.wiwiss.fu-berlin.de/bizer/ng4j/semwebclient/ +http://www.semanlink.net/tag/semantic_web_client_library|uri|http://www.semanlink.net/tag/semantic_web_client_library +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|Chris Bizer +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|Richard Cyganiak +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|Freie Universität Berlin +http://www.semanlink.net/tag/semantic_web_client_library|broader_prefLabel|GRDDL +http://www.semanlink.net/tag/semantic_web_client_library|broader_altLabel|LD +http://www.semanlink.net/tag/semantic_web_client_library|broader_altLabel|dowhatimean.net +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/semantic_web_client_library|broader_related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/configuration_and_sw|creationTime|2012-02-13T10:21:04Z +http://www.semanlink.net/tag/configuration_and_sw|prefLabel|Configuration and SW +http://www.semanlink.net/tag/configuration_and_sw|broader|http://www.semanlink.net/tag/configuration +http://www.semanlink.net/tag/configuration_and_sw|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/configuration_and_sw|broader|http://www.semanlink.net/tag/constraints_in_the_sw +http://www.semanlink.net/tag/configuration_and_sw|creationDate|2012-02-13 +http://www.semanlink.net/tag/configuration_and_sw|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/configuration_and_sw|uri|http://www.semanlink.net/tag/configuration_and_sw +http://www.semanlink.net/tag/configuration_and_sw|broader_prefLabel|Configuration +http://www.semanlink.net/tag/configuration_and_sw|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/configuration_and_sw|broader_prefLabel|Constraints in the SW +http://www.semanlink.net/tag/configuration_and_sw|broader_altLabel|sw +http://www.semanlink.net/tag/configuration_and_sw|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/configuration_and_sw|broader_related|http://www.semanlink.net/tag/constraint_programming +http://www.semanlink.net/tag/bon_prof|creationTime|2021-01-12T12:09:47Z +http://www.semanlink.net/tag/bon_prof|prefLabel|Bon prof +http://www.semanlink.net/tag/bon_prof|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/bon_prof|creationDate|2021-01-12 +http://www.semanlink.net/tag/bon_prof|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bon_prof|uri|http://www.semanlink.net/tag/bon_prof +http://www.semanlink.net/tag/bon_prof|broader_prefLabel|Education +http://www.semanlink.net/tag/bon_prof|broader_altLabel|Enseignement +http://www.semanlink.net/tag/documentation_tool|creationTime|2012-09-02T10:32:41Z +http://www.semanlink.net/tag/documentation_tool|prefLabel|Documentation tool +http://www.semanlink.net/tag/documentation_tool|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/documentation_tool|creationDate|2012-09-02 +http://www.semanlink.net/tag/documentation_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/documentation_tool|uri|http://www.semanlink.net/tag/documentation_tool +http://www.semanlink.net/tag/documentation_tool|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/plastic_print|creationTime|2008-04-10T10:39:46Z +http://www.semanlink.net/tag/plastic_print|prefLabel|Plastic print +http://www.semanlink.net/tag/plastic_print|broader|http://www.semanlink.net/tag/3d +http://www.semanlink.net/tag/plastic_print|broader|http://www.semanlink.net/tag/imprimantes +http://www.semanlink.net/tag/plastic_print|creationDate|2008-04-10 +http://www.semanlink.net/tag/plastic_print|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plastic_print|uri|http://www.semanlink.net/tag/plastic_print +http://www.semanlink.net/tag/plastic_print|broader_prefLabel|3D +http://www.semanlink.net/tag/plastic_print|broader_prefLabel|Imprimantes +http://www.semanlink.net/tag/robert_mcliam_wilson|creationTime|2016-06-12T09:37:38Z +http://www.semanlink.net/tag/robert_mcliam_wilson|prefLabel|Robert McLiam Wilson +http://www.semanlink.net/tag/robert_mcliam_wilson|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/robert_mcliam_wilson|broader|http://www.semanlink.net/tag/irlande_du_nord +http://www.semanlink.net/tag/robert_mcliam_wilson|creationDate|2016-06-12 +http://www.semanlink.net/tag/robert_mcliam_wilson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robert_mcliam_wilson|describedBy|https://en.wikipedia.org/wiki/Robert_McLiam_Wilson +http://www.semanlink.net/tag/robert_mcliam_wilson|uri|http://www.semanlink.net/tag/robert_mcliam_wilson +http://www.semanlink.net/tag/robert_mcliam_wilson|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/robert_mcliam_wilson|broader_prefLabel|Irlande du Nord +http://www.semanlink.net/tag/open_data|creationTime|2011-09-10T22:49:09Z +http://www.semanlink.net/tag/open_data|prefLabel|Open Data +http://www.semanlink.net/tag/open_data|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/open_data|creationDate|2011-09-10 +http://www.semanlink.net/tag/open_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_data|uri|http://www.semanlink.net/tag/open_data +http://www.semanlink.net/tag/histoire_anglaise|creationTime|2017-08-10T00:06:28Z +http://www.semanlink.net/tag/histoire_anglaise|prefLabel|Histoire anglaise +http://www.semanlink.net/tag/histoire_anglaise|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/histoire_anglaise|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_anglaise|creationDate|2017-08-10 +http://www.semanlink.net/tag/histoire_anglaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_anglaise|uri|http://www.semanlink.net/tag/histoire_anglaise +http://www.semanlink.net/tag/histoire_anglaise|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/histoire_anglaise|broader_prefLabel|Histoire +http://www.semanlink.net/tag/histoire_anglaise|broader_altLabel|UK +http://www.semanlink.net/tag/pablo_neruda|creationTime|2020-05-07T01:00:09Z +http://www.semanlink.net/tag/pablo_neruda|prefLabel|Pablo Neruda +http://www.semanlink.net/tag/pablo_neruda|broader|http://www.semanlink.net/tag/poete +http://www.semanlink.net/tag/pablo_neruda|broader|http://www.semanlink.net/tag/chili +http://www.semanlink.net/tag/pablo_neruda|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/pablo_neruda|creationDate|2020-05-07 +http://www.semanlink.net/tag/pablo_neruda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pablo_neruda|describedBy|https://fr.wikipedia.org/wiki/Pablo_Neruda +http://www.semanlink.net/tag/pablo_neruda|uri|http://www.semanlink.net/tag/pablo_neruda +http://www.semanlink.net/tag/pablo_neruda|broader_prefLabel|Poète +http://www.semanlink.net/tag/pablo_neruda|broader_prefLabel|Chili +http://www.semanlink.net/tag/pablo_neruda|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/search|creationTime|2021-05-19T01:14:37Z +http://www.semanlink.net/tag/search|prefLabel|Search +http://www.semanlink.net/tag/search|creationDate|2021-05-19 +http://www.semanlink.net/tag/search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/search|uri|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/latent_semantic_analysis|creationTime|2013-09-09T16:56:32Z +http://www.semanlink.net/tag/latent_semantic_analysis|prefLabel|Latent Semantic Analysis +http://www.semanlink.net/tag/latent_semantic_analysis|broader|http://www.semanlink.net/tag/vector_space_model +http://www.semanlink.net/tag/latent_semantic_analysis|broader|http://www.semanlink.net/tag/distributional_semantics +http://www.semanlink.net/tag/latent_semantic_analysis|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/latent_semantic_analysis|related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/latent_semantic_analysis|related|http://www.semanlink.net/tag/singular_value_decomposition +http://www.semanlink.net/tag/latent_semantic_analysis|related|http://www.semanlink.net/tag/principal_component_analysis +http://www.semanlink.net/tag/latent_semantic_analysis|related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/latent_semantic_analysis|creationDate|2013-09-09 +http://www.semanlink.net/tag/latent_semantic_analysis|comment|"Technique of analyzing relationships between a set of documents and the terms they contain, by producing a set of concepts related to the documents and terms. LSA assumes that words that are close in meaning will occur in similar pieces of text. + +LSI transforms documents from either bag-of-words or (preferrably) TfIdf-weighted space into a latent space of a lower dimensionality. + +A matrix containing word counts (in lines) per paragraph (column) is constructed from a large piece of text. [Singular value decomposition (SVD)](singular_value_decomposition) is used to reduce the number of rows while preserving the similarity structure among columns. Similarities between words and/or docs can then be evaluated using cosine-distance in the low-dimensional space + +- pros: + - alleviate the problem of **synonymy** (note: wikipedia se contredit en ce qui concerne la polysémie. Je dirais que LSI ne peut pas régler ce pb) + - can output topics in a **ranked order**. +- cons: + - **requires a num_topics parameter**. + - dimensions have no easily interpretable meaning in natural language + - SVD is computation intensive (still a pb with improved algos?) + - wikipedia says that the probabilistic model of LSA does not match observed data: LSA assumes that words and documents form a joint Gaussian model (ergodic hypothesis), while a Poisson distribution has been observed. Thus, a newer alternative is probabilistic latent semantic analysis, based on a multinomial model, which is reported to give better results than standard LSA + +[Gensim tuto about transformations](https://markroxor.github.io/gensim/static/notebooks/Topics_and_Transformations.html) says that ""LSI training is unique in that it can continue at any point, simply by providing more training documents."" + +(LSI or LSA ? Truncated SVD applied to document similarity is called Latent Semantic Indexing (LSI), but it is called Latent Semantic Analysis (LSA) when applied to word similarity.) + + +4 ways of looking at the Truncated SVD ([cf.](http://www.jair.org/media/2934/live-2934-4846-jair.pdf)) : + +- Latent meaning: the truncated SVD creates a low-dimensional linear mapping between words in row space and context in columns which captures the hidden (latent) meaning in the words and contexts + +- Noise reduction: the truncated SVD can be seen as a smoothed version of the original matrix ( which captures the signal and leaves out the noise) + +- A way to discover high-order co-occurrence: when 2 words appear in similar context + +- Sparsity reduction: the origin matrix is sparse, but the truncated SVD is dense. Sparsity may be viewed as a problem of insufficient data and truncated SVD as a way of simulating the missing text + +[See also ""Introduction to Information Retrieval"" Manning 2008](https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html) + + + + + + + + + + + + + + + + + + +" +http://www.semanlink.net/tag/latent_semantic_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/latent_semantic_analysis|describedBy|https://en.wikipedia.org/wiki/Latent_semantic_analysis +http://www.semanlink.net/tag/latent_semantic_analysis|altLabel|LSI +http://www.semanlink.net/tag/latent_semantic_analysis|altLabel|Latent semantic indexing +http://www.semanlink.net/tag/latent_semantic_analysis|altLabel|LSA +http://www.semanlink.net/tag/latent_semantic_analysis|uri|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/latent_semantic_analysis|broader_prefLabel|Vector space model +http://www.semanlink.net/tag/latent_semantic_analysis|broader_prefLabel|Distributional semantics +http://www.semanlink.net/tag/latent_semantic_analysis|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/latent_semantic_analysis|broader_altLabel|Vectorial semantics +http://www.semanlink.net/tag/latent_semantic_analysis|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/latent_semantic_analysis|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/industrie_pharmaceutique|prefLabel|Industrie pharmaceutique +http://www.semanlink.net/tag/industrie_pharmaceutique|broader|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/industrie_pharmaceutique|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/industrie_pharmaceutique|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/industrie_pharmaceutique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_pharmaceutique|uri|http://www.semanlink.net/tag/industrie_pharmaceutique +http://www.semanlink.net/tag/industrie_pharmaceutique|broader_prefLabel|industrie +http://www.semanlink.net/tag/industrie_pharmaceutique|broader_prefLabel|Economie +http://www.semanlink.net/tag/industrie_pharmaceutique|broader_prefLabel|Santé +http://www.semanlink.net/tag/industrie_pharmaceutique|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/tf_idf|creationTime|2013-05-31T14:39:27Z +http://www.semanlink.net/tag/tf_idf|prefLabel|TF-IDF +http://www.semanlink.net/tag/tf_idf|broader|http://www.semanlink.net/tag/vector_space_model +http://www.semanlink.net/tag/tf_idf|broader|http://www.semanlink.net/tag/probabilistic_relevance_model +http://www.semanlink.net/tag/tf_idf|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/tf_idf|creationDate|2013-05-31 +http://www.semanlink.net/tag/tf_idf|comment|"Term Frequency-Inverse Document Frequency. + +major limitations: + +- It computes document similarity directly in the word-count space, which could be slow for large vocabularies. +- It assumes that the counts of different words provide independent evidence of similarity. +- It makes no use of semantic similarities between words. +" +http://www.semanlink.net/tag/tf_idf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tf_idf|describedBy|https://en.wikipedia.org/wiki/Tf-idf +http://www.semanlink.net/tag/tf_idf|uri|http://www.semanlink.net/tag/tf_idf +http://www.semanlink.net/tag/tf_idf|broader_prefLabel|Vector space model +http://www.semanlink.net/tag/tf_idf|broader_prefLabel|Probabilistic relevance model +http://www.semanlink.net/tag/tf_idf|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/tf_idf|broader_altLabel|Vectorial semantics +http://www.semanlink.net/tag/cross_validation|creationTime|2016-01-11T17:51:02Z +http://www.semanlink.net/tag/cross_validation|prefLabel|Cross-validation +http://www.semanlink.net/tag/cross_validation|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/cross_validation|creationDate|2016-01-11 +http://www.semanlink.net/tag/cross_validation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_validation|describedBy|https://en.wikipedia.org/wiki/Cross-validation_(statistics) +http://www.semanlink.net/tag/cross_validation|uri|http://www.semanlink.net/tag/cross_validation +http://www.semanlink.net/tag/cross_validation|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/asie_mineure|prefLabel|Asie mineure +http://www.semanlink.net/tag/asie_mineure|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/asie_mineure|broader|http://www.semanlink.net/tag/turquie +http://www.semanlink.net/tag/asie_mineure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asie_mineure|altLabel|Anatolie +http://www.semanlink.net/tag/asie_mineure|uri|http://www.semanlink.net/tag/asie_mineure +http://www.semanlink.net/tag/asie_mineure|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/asie_mineure|broader_prefLabel|Turquie +http://www.semanlink.net/tag/juridique|creationTime|2019-01-24T17:14:21Z +http://www.semanlink.net/tag/juridique|prefLabel|Juridique +http://www.semanlink.net/tag/juridique|creationDate|2019-01-24 +http://www.semanlink.net/tag/juridique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/juridique|uri|http://www.semanlink.net/tag/juridique +http://www.semanlink.net/tag/convolutional_neural_network|creationTime|2015-11-08T11:55:21Z +http://www.semanlink.net/tag/convolutional_neural_network|prefLabel|Convolutional neural network +http://www.semanlink.net/tag/convolutional_neural_network|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/convolutional_neural_network|related|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/tag/convolutional_neural_network|creationDate|2015-11-08 +http://www.semanlink.net/tag/convolutional_neural_network|comment|"Feed-forward artificial neural network where the individual neurons are tiled in such a way that they respond to overlapping regions in the visual field. CNN use convolutions over the input layer to compute the output. Widely used models for image and video recognition. + +Main assumption: Data are compositional, they are formed of patterns that are: + +- Local +- Stationary +- Multi-scale (hierarchical) + +ConvNets leverage the compositionality structure: They extract compositional features and feed them to classifier, recommender, etc (end-to-end)." +http://www.semanlink.net/tag/convolutional_neural_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/convolutional_neural_network|describedBy|https://en.wikipedia.org/wiki/Convolutional_neural_network +http://www.semanlink.net/tag/convolutional_neural_network|altLabel|Convnets +http://www.semanlink.net/tag/convolutional_neural_network|altLabel|CNN +http://www.semanlink.net/tag/convolutional_neural_network|altLabel|Convolutional neural networks +http://www.semanlink.net/tag/convolutional_neural_network|altLabel|Convnet +http://www.semanlink.net/tag/convolutional_neural_network|uri|http://www.semanlink.net/tag/convolutional_neural_network +http://www.semanlink.net/tag/convolutional_neural_network|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/convolutional_neural_network|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/convolutional_neural_network|broader_altLabel|ANN +http://www.semanlink.net/tag/convolutional_neural_network|broader_altLabel|NN +http://www.semanlink.net/tag/torture|prefLabel|Torture +http://www.semanlink.net/tag/torture|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/torture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/torture|uri|http://www.semanlink.net/tag/torture +http://www.semanlink.net/tag/torture|broader_prefLabel|Horreur +http://www.semanlink.net/tag/mussolini|prefLabel|Mussolini +http://www.semanlink.net/tag/mussolini|broader|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/mussolini|broader|http://www.semanlink.net/tag/dictature +http://www.semanlink.net/tag/mussolini|broader|http://www.semanlink.net/tag/fascisme +http://www.semanlink.net/tag/mussolini|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mussolini|uri|http://www.semanlink.net/tag/mussolini +http://www.semanlink.net/tag/mussolini|broader_prefLabel|Italie +http://www.semanlink.net/tag/mussolini|broader_prefLabel|Dictature +http://www.semanlink.net/tag/mussolini|broader_prefLabel|Fascisme +http://www.semanlink.net/tag/functional_programming|creationTime|2014-10-07T16:11:39Z +http://www.semanlink.net/tag/functional_programming|prefLabel|Functional programming +http://www.semanlink.net/tag/functional_programming|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/functional_programming|creationDate|2014-10-07 +http://www.semanlink.net/tag/functional_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/functional_programming|describedBy|https://en.wikipedia.org/wiki/Functional_programming +http://www.semanlink.net/tag/functional_programming|uri|http://www.semanlink.net/tag/functional_programming +http://www.semanlink.net/tag/functional_programming|broader_prefLabel|Programming language +http://www.semanlink.net/tag/functional_programming|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/sdmx|creationTime|2010-07-16T14:13:56Z +http://www.semanlink.net/tag/sdmx|prefLabel|SDMX +http://www.semanlink.net/tag/sdmx|broader|http://www.semanlink.net/tag/statistical_data +http://www.semanlink.net/tag/sdmx|creationDate|2010-07-16 +http://www.semanlink.net/tag/sdmx|comment|"""Statistical Data and Metadata Exchange""
+SDMX is an initiative to foster standards for the exchange of statistical information." +http://www.semanlink.net/tag/sdmx|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sdmx|describedBy|http://sdmx.org/ +http://www.semanlink.net/tag/sdmx|uri|http://www.semanlink.net/tag/sdmx +http://www.semanlink.net/tag/sdmx|broader_prefLabel|Statistical data +http://www.semanlink.net/tag/stem_cell|creationTime|2013-09-12T00:37:56Z +http://www.semanlink.net/tag/stem_cell|prefLabel|Cellule souche +http://www.semanlink.net/tag/stem_cell|prefLabel|Stem cell +http://www.semanlink.net/tag/stem_cell|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/stem_cell|creationDate|2013-09-12 +http://www.semanlink.net/tag/stem_cell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stem_cell|describedBy|https://en.wikipedia.org/wiki/Stem_cell +http://www.semanlink.net/tag/stem_cell|uri|http://www.semanlink.net/tag/stem_cell +http://www.semanlink.net/tag/stem_cell|broader_prefLabel|Médecine +http://www.semanlink.net/tag/baidu|creationTime|2010-08-30T14:17:13Z +http://www.semanlink.net/tag/baidu|prefLabel|Baidu +http://www.semanlink.net/tag/baidu|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/baidu|broader|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/tag/baidu|creationDate|2010-08-30 +http://www.semanlink.net/tag/baidu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/baidu|describedBy|https://en.wikipedia.org/wiki/Baidu +http://www.semanlink.net/tag/baidu|uri|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/tag/baidu|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/baidu|broader_prefLabel|Chine : technologie +http://www.semanlink.net/tag/baidu|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/aspect_detection|creationTime|2020-09-06T16:43:41Z +http://www.semanlink.net/tag/aspect_detection|prefLabel|Aspect Detection +http://www.semanlink.net/tag/aspect_detection|broader|http://www.semanlink.net/tag/aspect_nlp +http://www.semanlink.net/tag/aspect_detection|related|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/aspect_detection|creationDate|2020-09-06 +http://www.semanlink.net/tag/aspect_detection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aspect_detection|uri|http://www.semanlink.net/tag/aspect_detection +http://www.semanlink.net/tag/aspect_detection|broader_prefLabel|Aspect (NLP) +http://www.semanlink.net/tag/politique|prefLabel|Politique +http://www.semanlink.net/tag/politique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique|uri|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/transition_energetique|creationTime|2013-03-30T10:22:59Z +http://www.semanlink.net/tag/transition_energetique|prefLabel|Transition énergétique +http://www.semanlink.net/tag/transition_energetique|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/transition_energetique|related|http://www.semanlink.net/tag/pic_de_hubbert +http://www.semanlink.net/tag/transition_energetique|creationDate|2013-03-30 +http://www.semanlink.net/tag/transition_energetique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transition_energetique|uri|http://www.semanlink.net/tag/transition_energetique +http://www.semanlink.net/tag/transition_energetique|broader_prefLabel|Energie +http://www.semanlink.net/tag/ml_nlp_blog|creationTime|2018-09-09T15:35:59Z +http://www.semanlink.net/tag/ml_nlp_blog|prefLabel|ML/NLP blog +http://www.semanlink.net/tag/ml_nlp_blog|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/ml_nlp_blog|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/ml_nlp_blog|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/ml_nlp_blog|creationDate|2018-09-09 +http://www.semanlink.net/tag/ml_nlp_blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_nlp_blog|uri|http://www.semanlink.net/tag/ml_nlp_blog +http://www.semanlink.net/tag/ml_nlp_blog|broader_prefLabel|NLP +http://www.semanlink.net/tag/ml_nlp_blog|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/ml_nlp_blog|broader_prefLabel|Blog +http://www.semanlink.net/tag/ml_nlp_blog|broader_altLabel|TALN +http://www.semanlink.net/tag/ml_nlp_blog|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/ml_nlp_blog|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/ml_nlp_blog|broader_altLabel|ML +http://www.semanlink.net/tag/ml_nlp_blog|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ml_nlp_blog|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/talis|creationTime|2008-03-04T23:01:09Z +http://www.semanlink.net/tag/talis|prefLabel|Talis +http://www.semanlink.net/tag/talis|broader|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/talis|related|http://www.semanlink.net/tag/leigh_dodds +http://www.semanlink.net/tag/talis|related|http://www.semanlink.net/tag/danny_ayers +http://www.semanlink.net/tag/talis|related|http://www.semanlink.net/tag/paul_miller +http://www.semanlink.net/tag/talis|related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/talis|creationDate|2008-03-04 +http://www.semanlink.net/tag/talis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/talis|homepage|http://www.talis.com/ +http://www.semanlink.net/tag/talis|uri|http://www.semanlink.net/tag/talis +http://www.semanlink.net/tag/talis|broader_prefLabel|Semantic web company +http://www.semanlink.net/tag/talis|broader_altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/talis|broader_altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/a_voir|creationTime|2007-04-03T23:40:55Z +http://www.semanlink.net/tag/a_voir|prefLabel|A voir +http://www.semanlink.net/tag/a_voir|broader|http://www.semanlink.net/tag/todo_list +http://www.semanlink.net/tag/a_voir|creationDate|2007-04-03 +http://www.semanlink.net/tag/a_voir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/a_voir|uri|http://www.semanlink.net/tag/a_voir +http://www.semanlink.net/tag/a_voir|broader_prefLabel|Todo list +http://www.semanlink.net/tag/semantic_pingback|creationTime|2010-04-26T11:37:56Z +http://www.semanlink.net/tag/semantic_pingback|prefLabel|Semantic pingback +http://www.semanlink.net/tag/semantic_pingback|creationDate|2010-04-26 +http://www.semanlink.net/tag/semantic_pingback|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_pingback|uri|http://www.semanlink.net/tag/semantic_pingback +http://www.semanlink.net/tag/ranked_entities_in_search_results|creationTime|2020-07-02T15:46:32Z +http://www.semanlink.net/tag/ranked_entities_in_search_results|prefLabel|Ranked Entities in Search Results +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/ranked_entities_in_search_results|creationDate|2020-07-02 +http://www.semanlink.net/tag/ranked_entities_in_search_results|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ranked_entities_in_search_results|uri|http://www.semanlink.net/tag/ranked_entities_in_search_results +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader_prefLabel|NLP and Search +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader_prefLabel|Entities +http://www.semanlink.net/tag/ranked_entities_in_search_results|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/bombe_atomique|prefLabel|Bombe atomique +http://www.semanlink.net/tag/bombe_atomique|broader|http://www.semanlink.net/tag/armement +http://www.semanlink.net/tag/bombe_atomique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bombe_atomique|uri|http://www.semanlink.net/tag/bombe_atomique +http://www.semanlink.net/tag/bombe_atomique|broader_prefLabel|Armement +http://www.semanlink.net/tag/support_vector_machine|creationTime|2013-06-06T14:53:17Z +http://www.semanlink.net/tag/support_vector_machine|prefLabel|Support vector machine +http://www.semanlink.net/tag/support_vector_machine|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/support_vector_machine|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/support_vector_machine|broader|http://www.semanlink.net/tag/kernel_method +http://www.semanlink.net/tag/support_vector_machine|creationDate|2013-06-06 +http://www.semanlink.net/tag/support_vector_machine|comment|"supervised learning models used for classification and regression analysis. + +An SVM model is a representation of the training examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. + +Non-probabilistic binary linear classifier (some methods exist to use SVM in a probabilistic classification setting). Can be made non-linear with the ""kernel trick"" (implicitly mapping the inputs into high-dimensional feature spaces.) + + + +" +http://www.semanlink.net/tag/support_vector_machine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/support_vector_machine|describedBy|https://en.wikipedia.org/wiki/Support_vector_machine +http://www.semanlink.net/tag/support_vector_machine|altLabel|SVM +http://www.semanlink.net/tag/support_vector_machine|uri|http://www.semanlink.net/tag/support_vector_machine +http://www.semanlink.net/tag/support_vector_machine|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/support_vector_machine|broader_prefLabel|Classification +http://www.semanlink.net/tag/support_vector_machine|broader_prefLabel|Kernel methods +http://www.semanlink.net/tag/support_vector_machine|broader_altLabel|Kernel trick +http://www.semanlink.net/tag/support_vector_machine|broader_altLabel|Kernel method +http://www.semanlink.net/tag/forms|creationTime|2008-06-02T22:15:29Z +http://www.semanlink.net/tag/forms|prefLabel|Forms +http://www.semanlink.net/tag/forms|broader|http://www.semanlink.net/tag/html_dev +http://www.semanlink.net/tag/forms|creationDate|2008-06-02 +http://www.semanlink.net/tag/forms|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/forms|uri|http://www.semanlink.net/tag/forms +http://www.semanlink.net/tag/forms|broader_prefLabel|HTML Dev +http://www.semanlink.net/tag/web_tv|creationTime|2010-06-26T11:23:26Z +http://www.semanlink.net/tag/web_tv|prefLabel|Web TV +http://www.semanlink.net/tag/web_tv|creationDate|2010-06-26 +http://www.semanlink.net/tag/web_tv|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_tv|uri|http://www.semanlink.net/tag/web_tv +http://www.semanlink.net/tag/dense_passage_retrieval|creationTime|2021-06-03T11:06:49Z +http://www.semanlink.net/tag/dense_passage_retrieval|prefLabel|Dense Passage Retrieval +http://www.semanlink.net/tag/dense_passage_retrieval|broader|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/tag/dense_passage_retrieval|related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/dense_passage_retrieval|creationDate|2021-06-03 +http://www.semanlink.net/tag/dense_passage_retrieval|comment|"As opposed to sparse retrievers such as TF-IDF and BM25 + +> Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. [In this work](doc:2021/06/2004_04906_dense_passage_retr), we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. + +[GitHub](https://github.com/facebookresearch/DPR) ; [at HuggingFace](https://huggingface.co/transformers/model_doc/dpr.html) +" +http://www.semanlink.net/tag/dense_passage_retrieval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dense_passage_retrieval|altLabel|DPR +http://www.semanlink.net/tag/dense_passage_retrieval|uri|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/tag/dense_passage_retrieval|broader_prefLabel|Open Domain Question Answering +http://www.semanlink.net/tag/henri_iv|creationTime|2013-10-13T23:24:48Z +http://www.semanlink.net/tag/henri_iv|prefLabel|Henri IV +http://www.semanlink.net/tag/henri_iv|creationDate|2013-10-13 +http://www.semanlink.net/tag/henri_iv|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/henri_iv|describedBy|https://fr.wikipedia.org/wiki/Henri_IV_de_France +http://www.semanlink.net/tag/henri_iv|uri|http://www.semanlink.net/tag/henri_iv +http://www.semanlink.net/tag/negative_sampling|creationTime|2020-04-25T16:53:40Z +http://www.semanlink.net/tag/negative_sampling|prefLabel|Negative Sampling +http://www.semanlink.net/tag/negative_sampling|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/negative_sampling|related|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/negative_sampling|creationDate|2020-04-25 +http://www.semanlink.net/tag/negative_sampling|comment|Technique used in Word2Vec, which causes each training sample to update only a small percentage of the model’s weights. [src](doc:?uri=http%3A%2F%2Fmccormickml.com%2F2017%2F01%2F11%2Fword2vec-tutorial-part-2-negative-sampling%2F) +http://www.semanlink.net/tag/negative_sampling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/negative_sampling|uri|http://www.semanlink.net/tag/negative_sampling +http://www.semanlink.net/tag/negative_sampling|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/fps_ec_web_14|creationTime|2014-04-18T23:40:15Z +http://www.semanlink.net/tag/fps_ec_web_14|prefLabel|fps@EC-Web'14 +http://www.semanlink.net/tag/fps_ec_web_14|broader|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/fps_ec_web_14|broader|http://www.semanlink.net/tag/ec_web_14 +http://www.semanlink.net/tag/fps_ec_web_14|broader|http://www.semanlink.net/tag/c2gweb_and_product_description +http://www.semanlink.net/tag/fps_ec_web_14|broader|http://www.semanlink.net/tag/semantic_seo +http://www.semanlink.net/tag/fps_ec_web_14|broader|http://www.semanlink.net/tag/fps_paper +http://www.semanlink.net/tag/fps_ec_web_14|creationDate|2014-04-18 +http://www.semanlink.net/tag/fps_ec_web_14|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_ec_web_14|uri|http://www.semanlink.net/tag/fps_ec_web_14 +http://www.semanlink.net/tag/fps_ec_web_14|broader_prefLabel|schema.org +http://www.semanlink.net/tag/fps_ec_web_14|broader_prefLabel|EC-Web'14 +http://www.semanlink.net/tag/fps_ec_web_14|broader_prefLabel|C2GWeb and Product description +http://www.semanlink.net/tag/fps_ec_web_14|broader_prefLabel|Semantic SEO +http://www.semanlink.net/tag/fps_ec_web_14|broader_prefLabel|fps: paper +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/fps_ec_web_14|broader_related|http://www.semanlink.net/tag/configuration_ontology +http://www.semanlink.net/tag/entity_type_prediction|creationTime|2021-06-14T16:24:23Z +http://www.semanlink.net/tag/entity_type_prediction|prefLabel|Entity type prediction +http://www.semanlink.net/tag/entity_type_prediction|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_type_prediction|broader|http://www.semanlink.net/tag/entity_type +http://www.semanlink.net/tag/entity_type_prediction|creationDate|2021-06-14 +http://www.semanlink.net/tag/entity_type_prediction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_type_prediction|altLabel|Entity typing +http://www.semanlink.net/tag/entity_type_prediction|uri|http://www.semanlink.net/tag/entity_type_prediction +http://www.semanlink.net/tag/entity_type_prediction|broader_prefLabel|Entities +http://www.semanlink.net/tag/entity_type_prediction|broader_prefLabel|Entity type +http://www.semanlink.net/tag/virtual_knowledge_graph|creationTime|2019-08-03T20:02:05Z +http://www.semanlink.net/tag/virtual_knowledge_graph|prefLabel|Virtual knowledge graph +http://www.semanlink.net/tag/virtual_knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/virtual_knowledge_graph|creationDate|2019-08-03 +http://www.semanlink.net/tag/virtual_knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtual_knowledge_graph|uri|http://www.semanlink.net/tag/virtual_knowledge_graph +http://www.semanlink.net/tag/virtual_knowledge_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/virtual_knowledge_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/virtual_knowledge_graph|broader_altLabel|KG +http://www.semanlink.net/tag/freie_universitat_berlin|creationTime|2007-04-20T21:01:31Z +http://www.semanlink.net/tag/freie_universitat_berlin|prefLabel|Freie Universität Berlin +http://www.semanlink.net/tag/freie_universitat_berlin|broader|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/freie_universitat_berlin|broader|http://www.semanlink.net/tag/berlin +http://www.semanlink.net/tag/freie_universitat_berlin|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/freie_universitat_berlin|creationDate|2007-04-20 +http://www.semanlink.net/tag/freie_universitat_berlin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/freie_universitat_berlin|uri|http://www.semanlink.net/tag/freie_universitat_berlin +http://www.semanlink.net/tag/freie_universitat_berlin|broader_prefLabel|Université +http://www.semanlink.net/tag/freie_universitat_berlin|broader_prefLabel|Berlin +http://www.semanlink.net/tag/pantheon_paris|creationTime|2017-02-04T15:09:42Z +http://www.semanlink.net/tag/pantheon_paris|prefLabel|Panthéon (Paris) +http://www.semanlink.net/tag/pantheon_paris|broader|http://www.semanlink.net/tag/monuments_historiques +http://www.semanlink.net/tag/pantheon_paris|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/pantheon_paris|creationDate|2017-02-04 +http://www.semanlink.net/tag/pantheon_paris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pantheon_paris|describedBy|https://fr.wikipedia.org/wiki/Panth%C3%A9on_(Paris) +http://www.semanlink.net/tag/pantheon_paris|uri|http://www.semanlink.net/tag/pantheon_paris +http://www.semanlink.net/tag/pantheon_paris|broader_prefLabel|Monuments historiques +http://www.semanlink.net/tag/pantheon_paris|broader_prefLabel|Paris +http://www.semanlink.net/tag/good_practice_when_generating_uris|creationTime|2007-11-17T16:11:20Z +http://www.semanlink.net/tag/good_practice_when_generating_uris|prefLabel|Good Practice When Generating URIs +http://www.semanlink.net/tag/good_practice_when_generating_uris|broader|http://www.semanlink.net/tag/minting_uris +http://www.semanlink.net/tag/good_practice_when_generating_uris|related|http://www.semanlink.net/tag/uri_reference +http://www.semanlink.net/tag/good_practice_when_generating_uris|creationDate|2007-11-17 +http://www.semanlink.net/tag/good_practice_when_generating_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/good_practice_when_generating_uris|uri|http://www.semanlink.net/tag/good_practice_when_generating_uris +http://www.semanlink.net/tag/good_practice_when_generating_uris|broader_prefLabel|Minting URIs +http://www.semanlink.net/tag/domain_knowledge_deep_learning|creationTime|2019-03-03T09:41:55Z +http://www.semanlink.net/tag/domain_knowledge_deep_learning|prefLabel|Domain Knowledge + Deep Learning +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/domain_knowledge_deep_learning|creationDate|2019-03-03 +http://www.semanlink.net/tag/domain_knowledge_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/domain_knowledge_deep_learning|uri|http://www.semanlink.net/tag/domain_knowledge_deep_learning +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_prefLabel|AI + Knowledge +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/domain_knowledge_deep_learning|broader_related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/notre_dame_de_paris|creationTime|2019-04-15T23:08:42Z +http://www.semanlink.net/tag/notre_dame_de_paris|prefLabel|Notre-Dame de Paris +http://www.semanlink.net/tag/notre_dame_de_paris|creationDate|2019-04-15 +http://www.semanlink.net/tag/notre_dame_de_paris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/notre_dame_de_paris|describedBy|https://fr.wikipedia.org/wiki/Cath%C3%A9drale_Notre-Dame_de_Paris +http://www.semanlink.net/tag/notre_dame_de_paris|uri|http://www.semanlink.net/tag/notre_dame_de_paris +http://www.semanlink.net/tag/sonia_braga|prefLabel|Sônia Braga +http://www.semanlink.net/tag/sonia_braga|broader|http://www.semanlink.net/tag/actrice +http://www.semanlink.net/tag/sonia_braga|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/sonia_braga|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sonia_braga|uri|http://www.semanlink.net/tag/sonia_braga +http://www.semanlink.net/tag/sonia_braga|broader_prefLabel|Actrice +http://www.semanlink.net/tag/sonia_braga|broader_prefLabel|Brésil +http://www.semanlink.net/tag/sonia_braga|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/musubi|creationTime|2012-05-30T15:13:33Z +http://www.semanlink.net/tag/musubi|prefLabel|Musubi +http://www.semanlink.net/tag/musubi|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/musubi|broader|http://www.semanlink.net/tag/telephone +http://www.semanlink.net/tag/musubi|creationDate|2012-05-30 +http://www.semanlink.net/tag/musubi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musubi|uri|http://www.semanlink.net/tag/musubi +http://www.semanlink.net/tag/musubi|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/musubi|broader_prefLabel|Téléphone +http://www.semanlink.net/tag/musubi|broader_altLabel|Téléphonie +http://www.semanlink.net/tag/biohackers|creationTime|2013-07-08T16:05:13Z +http://www.semanlink.net/tag/biohackers|prefLabel|Biohackers +http://www.semanlink.net/tag/biohackers|broader|http://www.semanlink.net/tag/hackers +http://www.semanlink.net/tag/biohackers|broader|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/biohackers|creationDate|2013-07-08 +http://www.semanlink.net/tag/biohackers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biohackers|uri|http://www.semanlink.net/tag/biohackers +http://www.semanlink.net/tag/biohackers|broader_prefLabel|Hackers +http://www.semanlink.net/tag/biohackers|broader_prefLabel|GMO +http://www.semanlink.net/tag/biohackers|broader_altLabel|OGM +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|creationTime|2018-02-25T18:09:51Z +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|prefLabel|Word embeddings with lexical resources +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|related|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|related|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|creationDate|2018-02-25 +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|uri|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_prefLabel|Sense embeddings +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/word_embeddings_with_lexical_resources|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/publication_scientifique|prefLabel|Publication scientifique +http://www.semanlink.net/tag/publication_scientifique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/publication_scientifique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/publication_scientifique|uri|http://www.semanlink.net/tag/publication_scientifique +http://www.semanlink.net/tag/publication_scientifique|broader_prefLabel|Science +http://www.semanlink.net/tag/publication_scientifique|broader_altLabel|sciences +http://www.semanlink.net/tag/googling|creationTime|2007-12-20T15:25:11Z +http://www.semanlink.net/tag/googling|prefLabel|Googling +http://www.semanlink.net/tag/googling|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/googling|creationDate|2007-12-20 +http://www.semanlink.net/tag/googling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/googling|uri|http://www.semanlink.net/tag/googling +http://www.semanlink.net/tag/googling|broader_prefLabel|Google +http://www.semanlink.net/tag/googling|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/cms|creationTime|2010-12-17T00:20:16Z +http://www.semanlink.net/tag/cms|prefLabel|CMS +http://www.semanlink.net/tag/cms|creationDate|2010-12-17 +http://www.semanlink.net/tag/cms|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cms|uri|http://www.semanlink.net/tag/cms +http://www.semanlink.net/tag/linked_data|creationTime|2008-02-01T15:15:24Z +http://www.semanlink.net/tag/linked_data|prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data|related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/linked_data|creationDate|2007-01-02 +http://www.semanlink.net/tag/linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data|altLabel|LD +http://www.semanlink.net/tag/linked_data|uri|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/linked_data|broader_altLabel|sw +http://www.semanlink.net/tag/linked_data|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/nlp_short_texts|creationTime|2017-06-09T14:39:13Z +http://www.semanlink.net/tag/nlp_short_texts|prefLabel|NLP: short texts +http://www.semanlink.net/tag/nlp_short_texts|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_short_texts|creationDate|2017-06-09 +http://www.semanlink.net/tag/nlp_short_texts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_short_texts|uri|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/tag/nlp_short_texts|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/chretiente|creationTime|2007-10-14T23:11:00Z +http://www.semanlink.net/tag/chretiente|prefLabel|Chrétienté +http://www.semanlink.net/tag/chretiente|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/chretiente|creationDate|2007-10-14 +http://www.semanlink.net/tag/chretiente|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chretiente|uri|http://www.semanlink.net/tag/chretiente +http://www.semanlink.net/tag/chretiente|broader_prefLabel|Religion +http://www.semanlink.net/tag/ldow2012|creationTime|2012-04-14T12:12:55Z +http://www.semanlink.net/tag/ldow2012|prefLabel|LDOW2012 +http://www.semanlink.net/tag/ldow2012|broader|http://www.semanlink.net/tag/www_2012 +http://www.semanlink.net/tag/ldow2012|broader|http://www.semanlink.net/tag/ldow +http://www.semanlink.net/tag/ldow2012|creationDate|2012-04-14 +http://www.semanlink.net/tag/ldow2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldow2012|homepage|http://events.linkeddata.org/ldow2012/ +http://www.semanlink.net/tag/ldow2012|uri|http://www.semanlink.net/tag/ldow2012 +http://www.semanlink.net/tag/ldow2012|broader_prefLabel|WWW 2012 +http://www.semanlink.net/tag/ldow2012|broader_prefLabel|LDOW +http://www.semanlink.net/tag/kg_embeddings_library|creationTime|2020-07-07T19:17:33Z +http://www.semanlink.net/tag/kg_embeddings_library|prefLabel|KG Embeddings Library +http://www.semanlink.net/tag/kg_embeddings_library|broader|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/kg_embeddings_library|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/kg_embeddings_library|broader|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/kg_embeddings_library|creationDate|2020-07-07 +http://www.semanlink.net/tag/kg_embeddings_library|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kg_embeddings_library|altLabel|Knowledge Graph Embeddings Library +http://www.semanlink.net/tag/kg_embeddings_library|uri|http://www.semanlink.net/tag/kg_embeddings_library +http://www.semanlink.net/tag/kg_embeddings_library|broader_prefLabel|Machine Learning library +http://www.semanlink.net/tag/kg_embeddings_library|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/kg_embeddings_library|broader_prefLabel|Library (code) +http://www.semanlink.net/tag/kg_embeddings_library|broader_altLabel|KGE +http://www.semanlink.net/tag/kg_embeddings_library|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/kg_embeddings_library|broader_altLabel|KG embedding +http://www.semanlink.net/tag/kg_embeddings_library|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/kg_embeddings_library|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/kg_embeddings_library|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/kg_embeddings_library|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/kg_embeddings_library|broader_related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/heinrich_barth|creationTime|2020-07-05T16:21:16Z +http://www.semanlink.net/tag/heinrich_barth|prefLabel|Heinrich Barth +http://www.semanlink.net/tag/heinrich_barth|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/heinrich_barth|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/heinrich_barth|broader|http://www.semanlink.net/tag/explorateur +http://www.semanlink.net/tag/heinrich_barth|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/heinrich_barth|related|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/heinrich_barth|related|http://www.semanlink.net/tag/tombouctou +http://www.semanlink.net/tag/heinrich_barth|related|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/tag/heinrich_barth|related|http://www.semanlink.net/tag/haoussa +http://www.semanlink.net/tag/heinrich_barth|creationDate|2020-07-05 +http://www.semanlink.net/tag/heinrich_barth|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/heinrich_barth|describedBy|https://de.wikipedia.org/wiki/Heinrich_Barth +http://www.semanlink.net/tag/heinrich_barth|uri|http://www.semanlink.net/tag/heinrich_barth +http://www.semanlink.net/tag/heinrich_barth|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/heinrich_barth|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/heinrich_barth|broader_prefLabel|Explorateur +http://www.semanlink.net/tag/heinrich_barth|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/heinrich_barth|broader_altLabel|Germany +http://www.semanlink.net/tag/heinrich_barth|broader_altLabel|Deutschland +http://www.semanlink.net/tag/denisovan|creationTime|2013-11-28T23:17:23Z +http://www.semanlink.net/tag/denisovan|prefLabel|Denisovan +http://www.semanlink.net/tag/denisovan|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/denisovan|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/denisovan|creationDate|2013-11-28 +http://www.semanlink.net/tag/denisovan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/denisovan|uri|http://www.semanlink.net/tag/denisovan +http://www.semanlink.net/tag/denisovan|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/denisovan|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/denisovan|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/spring_boot|creationTime|2018-10-10T14:18:19Z +http://www.semanlink.net/tag/spring_boot|prefLabel|Spring-Boot +http://www.semanlink.net/tag/spring_boot|creationDate|2018-10-10 +http://www.semanlink.net/tag/spring_boot|comment|"ds eclipse: maven run avec le goal spring-boot:run + +jersey @Path -> springboot @RequestMapping +jersey @PathParam -> springboot @PathVariable +jersey @QueryParam -> springboot @RequestParam +" +http://www.semanlink.net/tag/spring_boot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spring_boot|homepage|http://spring.io/projects/spring-boot +http://www.semanlink.net/tag/spring_boot|altLabel|springboot +http://www.semanlink.net/tag/spring_boot|uri|http://www.semanlink.net/tag/spring_boot +http://www.semanlink.net/tag/web_search|creationTime|2010-04-26T13:20:54Z +http://www.semanlink.net/tag/web_search|prefLabel|Web search +http://www.semanlink.net/tag/web_search|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/web_search|creationDate|2010-04-26 +http://www.semanlink.net/tag/web_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_search|uri|http://www.semanlink.net/tag/web_search +http://www.semanlink.net/tag/web_search|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/web_search|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/rest|prefLabel|REST +http://www.semanlink.net/tag/rest|broader|http://www.semanlink.net/tag/roy_t_fielding +http://www.semanlink.net/tag/rest|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/rest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rest|uri|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/rest|broader_prefLabel|Roy T. Fielding +http://www.semanlink.net/tag/rest|broader_prefLabel|Informatique +http://www.semanlink.net/tag/apres_guerre|creationTime|2016-05-25T23:49:10Z +http://www.semanlink.net/tag/apres_guerre|prefLabel|Après guerre +http://www.semanlink.net/tag/apres_guerre|creationDate|2016-05-25 +http://www.semanlink.net/tag/apres_guerre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apres_guerre|uri|http://www.semanlink.net/tag/apres_guerre +http://www.semanlink.net/tag/cancer|prefLabel|Cancer +http://www.semanlink.net/tag/cancer|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/cancer|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/cancer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cancer|uri|http://www.semanlink.net/tag/cancer +http://www.semanlink.net/tag/cancer|broader_prefLabel|Maladie +http://www.semanlink.net/tag/cancer|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/jena|prefLabel|Jena +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/rdf_framework +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/jena|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/jena|related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/jena|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena|homepage|http://jena.apache.org/ +http://www.semanlink.net/tag/jena|uri|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/jena|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/jena|broader_prefLabel|RDF Framework +http://www.semanlink.net/tag/jena|broader_prefLabel|apache.org +http://www.semanlink.net/tag/jena|broader_prefLabel|Java dev +http://www.semanlink.net/tag/jena|broader_prefLabel|RDF +http://www.semanlink.net/tag/jena|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/jena|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/jena|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/jena|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/jena|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/jena|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/bernard_vatant|creationTime|2008-05-15T23:25:24Z +http://www.semanlink.net/tag/bernard_vatant|prefLabel|Bernard Vatant +http://www.semanlink.net/tag/bernard_vatant|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bernard_vatant|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bernard_vatant|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/bernard_vatant|creationDate|2008-05-15 +http://www.semanlink.net/tag/bernard_vatant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bernard_vatant|uri|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/bernard_vatant|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bernard_vatant|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/bernard_vatant|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ernie|creationTime|2020-12-15T14:07:36Z +http://www.semanlink.net/tag/ernie|prefLabel|ERNIE +http://www.semanlink.net/tag/ernie|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/ernie|broader|http://www.semanlink.net/tag/knowledge_graph_augmented_language_models +http://www.semanlink.net/tag/ernie|broader|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/ernie|related|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/ernie|creationDate|2020-12-15 +http://www.semanlink.net/tag/ernie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ernie|uri|http://www.semanlink.net/tag/ernie +http://www.semanlink.net/tag/ernie|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/ernie|broader_prefLabel|KG-augmented Language Models +http://www.semanlink.net/tag/ernie|broader_prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/ernie|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/ernie|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/ernie|broader_altLabel|Knowledge Graph-augmented Language Models +http://www.semanlink.net/tag/ernie|broader_altLabel|KG-augmented LMs +http://www.semanlink.net/tag/ernie|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/ernie|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/sorting|creationTime|2021-03-25T17:24:53Z +http://www.semanlink.net/tag/sorting|prefLabel|Sorting +http://www.semanlink.net/tag/sorting|creationDate|2021-03-25 +http://www.semanlink.net/tag/sorting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sorting|uri|http://www.semanlink.net/tag/sorting +http://www.semanlink.net/tag/535|prefLabel|535 +http://www.semanlink.net/tag/535|broader|http://www.semanlink.net/tag/krakatoa +http://www.semanlink.net/tag/535|broader|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/535|broader|http://www.semanlink.net/tag/eruption_volcanique +http://www.semanlink.net/tag/535|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/535|broader|http://www.semanlink.net/tag/accident_climatique +http://www.semanlink.net/tag/535|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/535|describedBy|https://en.wikipedia.org/wiki/Climate_changes_of_535%E2%80%93536 +http://www.semanlink.net/tag/535|uri|http://www.semanlink.net/tag/535 +http://www.semanlink.net/tag/535|broader_prefLabel|Krakatoa +http://www.semanlink.net/tag/535|broader_prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/535|broader_prefLabel|Eruption volcanique +http://www.semanlink.net/tag/535|broader_prefLabel|Histoire +http://www.semanlink.net/tag/535|broader_prefLabel|Accident climatique +http://www.semanlink.net/tag/genetique_histoire|creationTime|2019-03-24T19:51:38Z +http://www.semanlink.net/tag/genetique_histoire|prefLabel|Génétique + Histoire +http://www.semanlink.net/tag/genetique_histoire|broader|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/genetique_histoire|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genetique_histoire|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/genetique_histoire|creationDate|2019-03-24 +http://www.semanlink.net/tag/genetique_histoire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetique_histoire|uri|http://www.semanlink.net/tag/genetique_histoire +http://www.semanlink.net/tag/genetique_histoire|broader_prefLabel|ADN +http://www.semanlink.net/tag/genetique_histoire|broader_prefLabel|Genetics +http://www.semanlink.net/tag/genetique_histoire|broader_prefLabel|Génétique +http://www.semanlink.net/tag/genetique_histoire|broader_prefLabel|Histoire +http://www.semanlink.net/tag/genetique_histoire|broader_altLabel|DNA +http://www.semanlink.net/tag/roger_penrose|creationTime|2016-06-03T22:12:15Z +http://www.semanlink.net/tag/roger_penrose|prefLabel|Roger Penrose +http://www.semanlink.net/tag/roger_penrose|creationDate|2016-06-03 +http://www.semanlink.net/tag/roger_penrose|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roger_penrose|describedBy|https://en.wikipedia.org/wiki/Roger_Penrose +http://www.semanlink.net/tag/roger_penrose|uri|http://www.semanlink.net/tag/roger_penrose +http://www.semanlink.net/tag/erta_ale|creationTime|2008-11-21T23:25:29Z +http://www.semanlink.net/tag/erta_ale|prefLabel|Erta Ale +http://www.semanlink.net/tag/erta_ale|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/erta_ale|broader|http://www.semanlink.net/tag/lac_de_lave +http://www.semanlink.net/tag/erta_ale|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/erta_ale|creationDate|2008-11-21 +http://www.semanlink.net/tag/erta_ale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erta_ale|describedBy|https://fr.wikipedia.org/wiki/Erta_Ale +http://www.semanlink.net/tag/erta_ale|uri|http://www.semanlink.net/tag/erta_ale +http://www.semanlink.net/tag/erta_ale|broader_prefLabel|Volcan +http://www.semanlink.net/tag/erta_ale|broader_prefLabel|Lac de lave +http://www.semanlink.net/tag/erta_ale|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/linked_data_platform|creationTime|2012-07-30T23:52:43Z +http://www.semanlink.net/tag/linked_data_platform|prefLabel|Linked Data Platform +http://www.semanlink.net/tag/linked_data_platform|broader|http://www.semanlink.net/tag/read_write_linked_data +http://www.semanlink.net/tag/linked_data_platform|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_platform|broader|http://www.semanlink.net/tag/w3c_working_group +http://www.semanlink.net/tag/linked_data_platform|creationDate|2012-07-30 +http://www.semanlink.net/tag/linked_data_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_platform|altLabel|LDP +http://www.semanlink.net/tag/linked_data_platform|uri|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/linked_data_platform|broader_prefLabel|Read-Write Linked Data +http://www.semanlink.net/tag/linked_data_platform|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_platform|broader_prefLabel|W3C Working group +http://www.semanlink.net/tag/linked_data_platform|broader_altLabel|RW Linked Data +http://www.semanlink.net/tag/linked_data_platform|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_platform|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/artificial_neural_network|creationTime|2014-03-26T10:49:02Z +http://www.semanlink.net/tag/artificial_neural_network|prefLabel|Neural networks +http://www.semanlink.net/tag/artificial_neural_network|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/artificial_neural_network|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/artificial_neural_network|broader|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/artificial_neural_network|creationDate|2014-03-26 +http://www.semanlink.net/tag/artificial_neural_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_neural_network|describedBy|https://en.wikipedia.org/wiki/Neural_networks +http://www.semanlink.net/tag/artificial_neural_network|describedBy|https://en.wikipedia.org/wiki/Artificial_neural_network +http://www.semanlink.net/tag/artificial_neural_network|altLabel|Artificial neural network +http://www.semanlink.net/tag/artificial_neural_network|altLabel|ANN +http://www.semanlink.net/tag/artificial_neural_network|altLabel|NN +http://www.semanlink.net/tag/artificial_neural_network|uri|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/artificial_neural_network|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/artificial_neural_network|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/artificial_neural_network|broader_prefLabel|Data mining +http://www.semanlink.net/tag/artificial_neural_network|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/artificial_neural_network|broader_altLabel|AI +http://www.semanlink.net/tag/artificial_neural_network|broader_altLabel|IA +http://www.semanlink.net/tag/artificial_neural_network|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/referentiel_des_operations|creationTime|2007-03-20T21:40:11Z +http://www.semanlink.net/tag/referentiel_des_operations|prefLabel|Référentiel des opérations +http://www.semanlink.net/tag/referentiel_des_operations|broader|http://www.semanlink.net/tag/apv_evolution +http://www.semanlink.net/tag/referentiel_des_operations|broader|http://www.semanlink.net/tag/sw_in_technical_automotive_documentation +http://www.semanlink.net/tag/referentiel_des_operations|creationDate|2007-03-20 +http://www.semanlink.net/tag/referentiel_des_operations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/referentiel_des_operations|uri|http://www.semanlink.net/tag/referentiel_des_operations +http://www.semanlink.net/tag/referentiel_des_operations|broader_prefLabel|APV evolution +http://www.semanlink.net/tag/referentiel_des_operations|broader_prefLabel|SW in Technical Automotive Documentation +http://www.semanlink.net/tag/euro_2016|creationTime|2016-06-12T09:22:26Z +http://www.semanlink.net/tag/euro_2016|prefLabel|Euro 2016 +http://www.semanlink.net/tag/euro_2016|broader|http://www.semanlink.net/tag/football +http://www.semanlink.net/tag/euro_2016|creationDate|2016-06-12 +http://www.semanlink.net/tag/euro_2016|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/euro_2016|uri|http://www.semanlink.net/tag/euro_2016 +http://www.semanlink.net/tag/euro_2016|broader_prefLabel|Football +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|creationTime|2013-12-10T14:32:12Z +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|prefLabel|Chalutage en eaux profondes +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|broader|http://www.semanlink.net/tag/peche +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|creationDate|2013-12-10 +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|altLabel|Pêche profonde +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|uri|http://www.semanlink.net/tag/chalutage_en_eaux_profondes +http://www.semanlink.net/tag/chalutage_en_eaux_profondes|broader_prefLabel|Pêche +http://www.semanlink.net/tag/www08|creationTime|2008-02-01T15:14:14Z +http://www.semanlink.net/tag/www08|prefLabel|WWW 2008 +http://www.semanlink.net/tag/www08|broader|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/www08|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www08|broader|http://www.semanlink.net/tag/pekin +http://www.semanlink.net/tag/www08|creationDate|2008-02-01 +http://www.semanlink.net/tag/www08|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www08|homepage|http://www2008.org/ +http://www.semanlink.net/tag/www08|uri|http://www.semanlink.net/tag/www08 +http://www.semanlink.net/tag/www08|broader_prefLabel|J'y étais +http://www.semanlink.net/tag/www08|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www08|broader_prefLabel|Pékin +http://www.semanlink.net/tag/www08|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/http_get_vs_post|creationTime|2010-06-04T09:56:01Z +http://www.semanlink.net/tag/http_get_vs_post|prefLabel|HTTP GET vs POST +http://www.semanlink.net/tag/http_get_vs_post|broader|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/http_get_vs_post|creationDate|2010-06-04 +http://www.semanlink.net/tag/http_get_vs_post|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/http_get_vs_post|uri|http://www.semanlink.net/tag/http_get_vs_post +http://www.semanlink.net/tag/http_get_vs_post|broader_prefLabel|Web architecture +http://www.semanlink.net/tag/web_dev|prefLabel|Web dev +http://www.semanlink.net/tag/web_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_dev|altLabel|Web app dev +http://www.semanlink.net/tag/web_dev|uri|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/graph_convolutional_networks|creationTime|2019-01-30T12:55:04Z +http://www.semanlink.net/tag/graph_convolutional_networks|prefLabel|Graph Convolutional Networks +http://www.semanlink.net/tag/graph_convolutional_networks|broader|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/graph_convolutional_networks|broader|http://www.semanlink.net/tag/convolutional_neural_network +http://www.semanlink.net/tag/graph_convolutional_networks|related|http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings +http://www.semanlink.net/tag/graph_convolutional_networks|creationDate|2019-01-30 +http://www.semanlink.net/tag/graph_convolutional_networks|comment|"Graph Convolutional Networks (GCNs) strike a balance between modeling the full structure of the +graph dynamically, as the tensor model does, and modeling the local neighbourhood structure through +extracted features (as substructure counting methods and RDF2Vec do). ([source](/doc/2019/08/the_knowledge_graph_as_the_defa)) +" +http://www.semanlink.net/tag/graph_convolutional_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_convolutional_networks|altLabel|Graph Convolutional Network +http://www.semanlink.net/tag/graph_convolutional_networks|altLabel|GCN +http://www.semanlink.net/tag/graph_convolutional_networks|uri|http://www.semanlink.net/tag/graph_convolutional_networks +http://www.semanlink.net/tag/graph_convolutional_networks|broader_prefLabel|Graph neural networks +http://www.semanlink.net/tag/graph_convolutional_networks|broader_prefLabel|Convolutional neural network +http://www.semanlink.net/tag/graph_convolutional_networks|broader_altLabel|GNN +http://www.semanlink.net/tag/graph_convolutional_networks|broader_altLabel|Convnets +http://www.semanlink.net/tag/graph_convolutional_networks|broader_altLabel|CNN +http://www.semanlink.net/tag/graph_convolutional_networks|broader_altLabel|Convolutional neural networks +http://www.semanlink.net/tag/graph_convolutional_networks|broader_altLabel|Convnet +http://www.semanlink.net/tag/graph_convolutional_networks|broader_related|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/tag/ulrike_sattler|creationTime|2010-09-06T22:06:18Z +http://www.semanlink.net/tag/ulrike_sattler|prefLabel|Ulrike Sattler +http://www.semanlink.net/tag/ulrike_sattler|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/ulrike_sattler|related|http://www.semanlink.net/tag/owled_2007_and_fps +http://www.semanlink.net/tag/ulrike_sattler|creationDate|2010-09-06 +http://www.semanlink.net/tag/ulrike_sattler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ulrike_sattler|uri|http://www.semanlink.net/tag/ulrike_sattler +http://www.semanlink.net/tag/fast_ai_course|creationTime|2019-01-13T11:14:14Z +http://www.semanlink.net/tag/fast_ai_course|prefLabel|Fast.ai course +http://www.semanlink.net/tag/fast_ai_course|broader|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/fast_ai_course|broader|http://www.semanlink.net/tag/mooc +http://www.semanlink.net/tag/fast_ai_course|creationDate|2019-01-13 +http://www.semanlink.net/tag/fast_ai_course|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fast_ai_course|homepage|https://course.fast.ai/ +http://www.semanlink.net/tag/fast_ai_course|uri|http://www.semanlink.net/tag/fast_ai_course +http://www.semanlink.net/tag/fast_ai_course|broader_prefLabel|fast.ai +http://www.semanlink.net/tag/fast_ai_course|broader_prefLabel|MOOC +http://www.semanlink.net/tag/fast_ai_course|broader_altLabel|fastai +http://www.semanlink.net/tag/bosch|creationTime|2021-08-24T02:40:03Z +http://www.semanlink.net/tag/bosch|prefLabel|Bosch +http://www.semanlink.net/tag/bosch|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/bosch|creationDate|2021-08-24 +http://www.semanlink.net/tag/bosch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bosch|uri|http://www.semanlink.net/tag/bosch +http://www.semanlink.net/tag/bosch|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/intent_detection|creationTime|2019-12-14T01:06:49Z +http://www.semanlink.net/tag/intent_detection|prefLabel|Intent detection +http://www.semanlink.net/tag/intent_detection|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/intent_detection|broader|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/intent_detection|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/intent_detection|creationDate|2019-12-14 +http://www.semanlink.net/tag/intent_detection|comment|Intent classification: predicting the intent of a query +http://www.semanlink.net/tag/intent_detection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intent_detection|altLabel|intent detection +http://www.semanlink.net/tag/intent_detection|altLabel|intent classification +http://www.semanlink.net/tag/intent_detection|uri|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/intent_detection|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/intent_detection|broader_prefLabel|Chatbots +http://www.semanlink.net/tag/intent_detection|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/intent_detection|broader_altLabel|Chatbot +http://www.semanlink.net/tag/intent_detection|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/television|prefLabel|Télévision +http://www.semanlink.net/tag/television|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/television|altLabel|TV +http://www.semanlink.net/tag/television|uri|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/rdf_next_steps|creationTime|2010-08-20T13:14:35Z +http://www.semanlink.net/tag/rdf_next_steps|prefLabel|RDF Next Steps +http://www.semanlink.net/tag/rdf_next_steps|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_next_steps|creationDate|2010-08-20 +http://www.semanlink.net/tag/rdf_next_steps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_next_steps|uri|http://www.semanlink.net/tag/rdf_next_steps +http://www.semanlink.net/tag/rdf_next_steps|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_next_steps|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_next_steps|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_next_steps|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_next_steps|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_next_steps|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/machine_learning_course|creationTime|2018-03-12T12:53:12Z +http://www.semanlink.net/tag/machine_learning_course|prefLabel|Machine Learning Course +http://www.semanlink.net/tag/machine_learning_course|broader|http://www.semanlink.net/tag/online_course_materials +http://www.semanlink.net/tag/machine_learning_course|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_course|creationDate|2018-03-12 +http://www.semanlink.net/tag/machine_learning_course|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_course|uri|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/tag/machine_learning_course|broader_prefLabel|Online Course Materials +http://www.semanlink.net/tag/machine_learning_course|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_course|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_course|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_course|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/semantic_fingerprints|creationTime|2017-07-10T15:06:10Z +http://www.semanlink.net/tag/semantic_fingerprints|prefLabel|Semantic fingerprints +http://www.semanlink.net/tag/semantic_fingerprints|creationDate|2017-07-10 +http://www.semanlink.net/tag/semantic_fingerprints|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_fingerprints|uri|http://www.semanlink.net/tag/semantic_fingerprints +http://www.semanlink.net/tag/logistic_regression|creationTime|2016-01-11T16:50:52Z +http://www.semanlink.net/tag/logistic_regression|prefLabel|Logistic regression +http://www.semanlink.net/tag/logistic_regression|broader|http://www.semanlink.net/tag/regression_analysis +http://www.semanlink.net/tag/logistic_regression|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/logistic_regression|related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/logistic_regression|creationDate|2016-01-11 +http://www.semanlink.net/tag/logistic_regression|comment|regression model where the dependent variable is categorical. +http://www.semanlink.net/tag/logistic_regression|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/logistic_regression|describedBy|https://en.wikipedia.org/wiki/Logistic_regression +http://www.semanlink.net/tag/logistic_regression|uri|http://www.semanlink.net/tag/logistic_regression +http://www.semanlink.net/tag/logistic_regression|broader_prefLabel|Regression analysis +http://www.semanlink.net/tag/logistic_regression|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/fado_tropical|creationTime|2007-02-22T23:18:57Z +http://www.semanlink.net/tag/fado_tropical|prefLabel|Fado tropical +http://www.semanlink.net/tag/fado_tropical|broader|http://www.semanlink.net/tag/portugal +http://www.semanlink.net/tag/fado_tropical|broader|http://www.semanlink.net/tag/chico_buarque +http://www.semanlink.net/tag/fado_tropical|broader|http://www.semanlink.net/tag/conquistadores +http://www.semanlink.net/tag/fado_tropical|broader|http://www.semanlink.net/tag/poesie +http://www.semanlink.net/tag/fado_tropical|broader|http://www.semanlink.net/tag/chanson +http://www.semanlink.net/tag/fado_tropical|creationDate|2007-02-22 +http://www.semanlink.net/tag/fado_tropical|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fado_tropical|uri|http://www.semanlink.net/tag/fado_tropical +http://www.semanlink.net/tag/fado_tropical|broader_prefLabel|Portugal +http://www.semanlink.net/tag/fado_tropical|broader_prefLabel|Chico Buarque +http://www.semanlink.net/tag/fado_tropical|broader_prefLabel|Conquistadores +http://www.semanlink.net/tag/fado_tropical|broader_prefLabel|Poésie +http://www.semanlink.net/tag/fado_tropical|broader_prefLabel|Chanson +http://www.semanlink.net/tag/fado_tropical|broader_related|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/fado_tropical|broader_related|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/kapuscinski|creationTime|2007-02-23T21:15:05Z +http://www.semanlink.net/tag/kapuscinski|prefLabel|Kapuscinski +http://www.semanlink.net/tag/kapuscinski|broader|http://www.semanlink.net/tag/pologne +http://www.semanlink.net/tag/kapuscinski|broader|http://www.semanlink.net/tag/journaliste +http://www.semanlink.net/tag/kapuscinski|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/kapuscinski|creationDate|2007-02-23 +http://www.semanlink.net/tag/kapuscinski|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kapuscinski|uri|http://www.semanlink.net/tag/kapuscinski +http://www.semanlink.net/tag/kapuscinski|broader_prefLabel|Pologne +http://www.semanlink.net/tag/kapuscinski|broader_prefLabel|Journaliste +http://www.semanlink.net/tag/kapuscinski|broader_prefLabel|Littérature +http://www.semanlink.net/tag/kapuscinski|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/ciao_vito|prefLabel|Ciao Vito +http://www.semanlink.net/tag/ciao_vito|broader|http://www.semanlink.net/tag/restaurant +http://www.semanlink.net/tag/ciao_vito|broader|http://www.semanlink.net/tag/portland_or +http://www.semanlink.net/tag/ciao_vito|broader|http://www.semanlink.net/tag/vito +http://www.semanlink.net/tag/ciao_vito|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ciao_vito|homepage|http://www.ciaovito.net/ +http://www.semanlink.net/tag/ciao_vito|uri|http://www.semanlink.net/tag/ciao_vito +http://www.semanlink.net/tag/ciao_vito|broader_prefLabel|Restaurant +http://www.semanlink.net/tag/ciao_vito|broader_prefLabel|Portland (OR) +http://www.semanlink.net/tag/ciao_vito|broader_prefLabel|Vito +http://www.semanlink.net/tag/genetic_algorithm|creationTime|2018-03-30T13:57:52Z +http://www.semanlink.net/tag/genetic_algorithm|prefLabel|Genetic algorithm +http://www.semanlink.net/tag/genetic_algorithm|creationDate|2018-03-30 +http://www.semanlink.net/tag/genetic_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetic_algorithm|describedBy|https://en.wikipedia.org/wiki/Genetic_algorithm +http://www.semanlink.net/tag/genetic_algorithm|uri|http://www.semanlink.net/tag/genetic_algorithm +http://www.semanlink.net/tag/acl_2019|creationTime|2019-07-25T10:48:45Z +http://www.semanlink.net/tag/acl_2019|prefLabel|ACL 2019 +http://www.semanlink.net/tag/acl_2019|broader|http://www.semanlink.net/tag/acl +http://www.semanlink.net/tag/acl_2019|creationDate|2019-07-25 +http://www.semanlink.net/tag/acl_2019|comment|[Program](http://www.acl2019.org/EN/program.xhtml) +http://www.semanlink.net/tag/acl_2019|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acl_2019|uri|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/tag/acl_2019|broader_prefLabel|ACL +http://www.semanlink.net/tag/provocation_policiere|creationTime|2007-05-08T10:23:06Z +http://www.semanlink.net/tag/provocation_policiere|prefLabel|Provocation policière +http://www.semanlink.net/tag/provocation_policiere|broader|http://www.semanlink.net/tag/police +http://www.semanlink.net/tag/provocation_policiere|creationDate|2007-05-08 +http://www.semanlink.net/tag/provocation_policiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/provocation_policiere|uri|http://www.semanlink.net/tag/provocation_policiere +http://www.semanlink.net/tag/provocation_policiere|broader_prefLabel|Police +http://www.semanlink.net/tag/coupe_du_monde_1998|creationTime|2014-05-27T17:29:29Z +http://www.semanlink.net/tag/coupe_du_monde_1998|prefLabel|Coupe du monde 1998 +http://www.semanlink.net/tag/coupe_du_monde_1998|broader|http://www.semanlink.net/tag/coupe_du_monde_de_football +http://www.semanlink.net/tag/coupe_du_monde_1998|creationDate|2014-05-27 +http://www.semanlink.net/tag/coupe_du_monde_1998|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupe_du_monde_1998|uri|http://www.semanlink.net/tag/coupe_du_monde_1998 +http://www.semanlink.net/tag/coupe_du_monde_1998|broader_prefLabel|Coupe du monde de football +http://www.semanlink.net/tag/first_americans|prefLabel|First Americans +http://www.semanlink.net/tag/first_americans|broader|http://www.semanlink.net/tag/civilisations_precolombiennes +http://www.semanlink.net/tag/first_americans|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/first_americans|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/first_americans|describedBy|https://en.wikipedia.org/wiki/Indigenous_peoples_of_the_Americas +http://www.semanlink.net/tag/first_americans|uri|http://www.semanlink.net/tag/first_americans +http://www.semanlink.net/tag/first_americans|broader_prefLabel|Civilisations précolombiennes +http://www.semanlink.net/tag/first_americans|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/first_americans|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/rdf_and_database|creationTime|2009-02-10T22:39:21Z +http://www.semanlink.net/tag/rdf_and_database|prefLabel|RDF and database +http://www.semanlink.net/tag/rdf_and_database|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_and_database|broader|http://www.semanlink.net/tag/semantic_web_databases +http://www.semanlink.net/tag/rdf_and_database|creationDate|2009-02-10 +http://www.semanlink.net/tag/rdf_and_database|uri|http://www.semanlink.net/tag/rdf_and_database +http://www.semanlink.net/tag/rdf_and_database|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_and_database|broader_prefLabel|Semantic Web: databases +http://www.semanlink.net/tag/rdf_and_database|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_and_database|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_and_database|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_and_database|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_and_database|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semtechbiz_berlin_2012|creationTime|2011-12-12T17:50:53Z +http://www.semanlink.net/tag/semtechbiz_berlin_2012|prefLabel|SemTechBiz Berlin 2012 +http://www.semanlink.net/tag/semtechbiz_berlin_2012|broader|http://www.semanlink.net/tag/berlin +http://www.semanlink.net/tag/semtechbiz_berlin_2012|broader|http://www.semanlink.net/tag/semtechbiz +http://www.semanlink.net/tag/semtechbiz_berlin_2012|creationDate|2011-12-12 +http://www.semanlink.net/tag/semtechbiz_berlin_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semtechbiz_berlin_2012|homepage|http://semtechbizberlin2012.semanticweb.com/ +http://www.semanlink.net/tag/semtechbiz_berlin_2012|uri|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://www.semanlink.net/tag/semtechbiz_berlin_2012|broader_prefLabel|Berlin +http://www.semanlink.net/tag/semtechbiz_berlin_2012|broader_prefLabel|SemTechBiz +http://www.semanlink.net/tag/knowledge_compilation|creationTime|2009-01-06T22:28:01Z +http://www.semanlink.net/tag/knowledge_compilation|prefLabel|Knowledge Compilation +http://www.semanlink.net/tag/knowledge_compilation|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/knowledge_compilation|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/knowledge_compilation|creationDate|2009-01-06 +http://www.semanlink.net/tag/knowledge_compilation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_compilation|uri|http://www.semanlink.net/tag/knowledge_compilation +http://www.semanlink.net/tag/knowledge_compilation|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/knowledge_compilation|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/knowledge_compilation|broader_altLabel|KR +http://www.semanlink.net/tag/knowledge_compilation|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/knowledge_compilation|broader_altLabel|AI +http://www.semanlink.net/tag/knowledge_compilation|broader_altLabel|IA +http://www.semanlink.net/tag/knowledge_compilation|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/fps_pres|creationTime|2016-04-10T10:46:33Z +http://www.semanlink.net/tag/fps_pres|prefLabel|fps pres +http://www.semanlink.net/tag/fps_pres|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_pres|creationDate|2016-04-10 +http://www.semanlink.net/tag/fps_pres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_pres|uri|http://www.semanlink.net/tag/fps_pres +http://www.semanlink.net/tag/fps_pres|broader_prefLabel|fps +http://www.semanlink.net/tag/linked_data_api|creationTime|2010-02-25T13:18:24Z +http://www.semanlink.net/tag/linked_data_api|prefLabel|Linked Data API +http://www.semanlink.net/tag/linked_data_api|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_api|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/linked_data_api|creationDate|2010-02-25 +http://www.semanlink.net/tag/linked_data_api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_api|homepage|http://code.google.com/p/linked-data-api/ +http://www.semanlink.net/tag/linked_data_api|uri|http://www.semanlink.net/tag/linked_data_api +http://www.semanlink.net/tag/linked_data_api|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_api|broader_prefLabel|API +http://www.semanlink.net/tag/linked_data_api|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_api|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/amazon_alexa|creationTime|2018-11-01T21:23:54Z +http://www.semanlink.net/tag/amazon_alexa|prefLabel|Amazon Alexa +http://www.semanlink.net/tag/amazon_alexa|broader|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/tag/amazon_alexa|broader|http://www.semanlink.net/tag/enceintes_connectees +http://www.semanlink.net/tag/amazon_alexa|creationDate|2018-11-01 +http://www.semanlink.net/tag/amazon_alexa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amazon_alexa|altLabel|Alexa (was a semantic search engine) +http://www.semanlink.net/tag/amazon_alexa|altLabel|Alexa +http://www.semanlink.net/tag/amazon_alexa|uri|http://www.semanlink.net/tag/amazon_alexa +http://www.semanlink.net/tag/amazon_alexa|broader_prefLabel|Amazon +http://www.semanlink.net/tag/amazon_alexa|broader_prefLabel|Enceintes connectées +http://www.semanlink.net/tag/deep_learning|creationTime|2012-11-30T22:49:11Z +http://www.semanlink.net/tag/deep_learning|prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/deep_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/deep_learning|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/deep_learning|related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning|related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/deep_learning|creationDate|2012-11-30 +http://www.semanlink.net/tag/deep_learning|comment|"a set of algorithms in machine learning that attempt to model high-level abstractions in data by using architectures composed of multiple non-linear transformations. Deep learning is part of a broader family of machine learning methods based on learning representations of data. + +One of the promises of deep learning is replacing handcrafted features with efficient algorithms for unsupervised or semi-supervised feature learning and hierarchical feature extraction + +With Deep Learning, Ng says, you just give the system a lot of data ""so it can discover by itself what some of the concepts in the world are ([cf.](http://www.wired.com/2013/05/neuro-artificial-intelligence/all/)) +" +http://www.semanlink.net/tag/deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning|describedBy|https://en.wikipedia.org/wiki/Deep_learning +http://www.semanlink.net/tag/deep_learning|uri|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/deep_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/deep_learning|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/deep_learning|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/deep_learning|broader_altLabel|AI +http://www.semanlink.net/tag/deep_learning|broader_altLabel|IA +http://www.semanlink.net/tag/deep_learning|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/deep_learning|broader_altLabel|ANN +http://www.semanlink.net/tag/deep_learning|broader_altLabel|NN +http://www.semanlink.net/tag/deep_learning|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/securite_informatique|creationTime|2017-11-20T09:01:04Z +http://www.semanlink.net/tag/securite_informatique|prefLabel|Cybersecurity +http://www.semanlink.net/tag/securite_informatique|prefLabel|Sécurité informatique +http://www.semanlink.net/tag/securite_informatique|broader|http://www.semanlink.net/tag/security +http://www.semanlink.net/tag/securite_informatique|broader|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/securite_informatique|creationDate|2017-11-20 +http://www.semanlink.net/tag/securite_informatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/securite_informatique|uri|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/securite_informatique|broader_prefLabel|Security +http://www.semanlink.net/tag/securite_informatique|broader_prefLabel|Sécurité +http://www.semanlink.net/tag/etat_de_la_france|prefLabel|Etat de la France +http://www.semanlink.net/tag/etat_de_la_france|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/etat_de_la_france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/etat_de_la_france|uri|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/tag/etat_de_la_france|broader_prefLabel|France +http://www.semanlink.net/tag/turquie|prefLabel|Turquie +http://www.semanlink.net/tag/turquie|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/turquie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/turquie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turquie|uri|http://www.semanlink.net/tag/turquie +http://www.semanlink.net/tag/turquie|broader_prefLabel|Asie +http://www.semanlink.net/tag/turquie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/programming|creationTime|2007-11-15T01:25:36Z +http://www.semanlink.net/tag/programming|prefLabel|Programming +http://www.semanlink.net/tag/programming|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/programming|creationDate|2007-11-15 +http://www.semanlink.net/tag/programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/programming|uri|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/programming|broader_prefLabel|Dev +http://www.semanlink.net/tag/nso_pegasus|creationTime|2020-07-14T12:32:26Z +http://www.semanlink.net/tag/nso_pegasus|prefLabel|NSO/Pegasus +http://www.semanlink.net/tag/nso_pegasus|broader|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/nso_pegasus|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/nso_pegasus|related|http://www.semanlink.net/tag/israel +http://www.semanlink.net/tag/nso_pegasus|creationDate|2020-07-14 +http://www.semanlink.net/tag/nso_pegasus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nso_pegasus|uri|http://www.semanlink.net/tag/nso_pegasus +http://www.semanlink.net/tag/nso_pegasus|broader_prefLabel|Cybersurveillance +http://www.semanlink.net/tag/nso_pegasus|broader_prefLabel|Leaks +http://www.semanlink.net/tag/nso_pegasus|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/guerre|creationTime|2018-01-14T19:19:10Z +http://www.semanlink.net/tag/guerre|prefLabel|War +http://www.semanlink.net/tag/guerre|broader|http://www.semanlink.net/tag/conflits +http://www.semanlink.net/tag/guerre|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/guerre|creationDate|2018-01-14 +http://www.semanlink.net/tag/guerre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerre|altLabel|Guerre +http://www.semanlink.net/tag/guerre|uri|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/guerre|broader_prefLabel|Conflits +http://www.semanlink.net/tag/guerre|broader_prefLabel|Horreur +http://www.semanlink.net/tag/artificial_intelligence|creationTime|2009-01-06T22:30:58Z +http://www.semanlink.net/tag/artificial_intelligence|prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/artificial_intelligence|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/artificial_intelligence|broader|http://www.semanlink.net/tag/intelligence +http://www.semanlink.net/tag/artificial_intelligence|related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/artificial_intelligence|creationDate|2009-01-06 +http://www.semanlink.net/tag/artificial_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_intelligence|altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/artificial_intelligence|altLabel|AI +http://www.semanlink.net/tag/artificial_intelligence|altLabel|IA +http://www.semanlink.net/tag/artificial_intelligence|uri|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/artificial_intelligence|broader_prefLabel|Informatique +http://www.semanlink.net/tag/artificial_intelligence|broader_prefLabel|Intelligence +http://www.semanlink.net/tag/snorql|creationTime|2007-10-13T00:05:58Z +http://www.semanlink.net/tag/snorql|prefLabel|snorql +http://www.semanlink.net/tag/snorql|broader|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/snorql|broader|http://www.semanlink.net/tag/sparql_en_javascript +http://www.semanlink.net/tag/snorql|creationDate|2007-10-13 +http://www.semanlink.net/tag/snorql|comment|SPARQL explorer +http://www.semanlink.net/tag/snorql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/snorql|uri|http://www.semanlink.net/tag/snorql +http://www.semanlink.net/tag/snorql|broader_prefLabel|dbpedia +http://www.semanlink.net/tag/snorql|broader_prefLabel|SPARQL en javascript +http://www.semanlink.net/tag/snorql|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/snorql|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/semantic_web_databases|creationTime|2008-03-18T08:16:04Z +http://www.semanlink.net/tag/semantic_web_databases|prefLabel|Semantic Web: databases +http://www.semanlink.net/tag/semantic_web_databases|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_databases|creationDate|2008-03-18 +http://www.semanlink.net/tag/semantic_web_databases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_databases|uri|http://www.semanlink.net/tag/semantic_web_databases +http://www.semanlink.net/tag/semantic_web_databases|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_databases|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_databases|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/tim_bray|prefLabel|Tim Bray +http://www.semanlink.net/tag/tim_bray|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/tim_bray|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tim_bray|uri|http://www.semanlink.net/tag/tim_bray +http://www.semanlink.net/tag/tim_bray|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/tim_bray|broader_altLabel|Technical guys +http://www.semanlink.net/tag/fermi_paradox|creationTime|2007-08-08T17:33:08Z +http://www.semanlink.net/tag/fermi_paradox|prefLabel|Fermi's paradox +http://www.semanlink.net/tag/fermi_paradox|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/fermi_paradox|creationDate|2007-08-08 +http://www.semanlink.net/tag/fermi_paradox|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fermi_paradox|uri|http://www.semanlink.net/tag/fermi_paradox +http://www.semanlink.net/tag/fermi_paradox|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/esa|prefLabel|esa +http://www.semanlink.net/tag/esa|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/esa|broader|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/esa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/esa|uri|http://www.semanlink.net/tag/esa +http://www.semanlink.net/tag/esa|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/esa|broader_prefLabel|Union européenne +http://www.semanlink.net/tag/esa|broader_altLabel|UE +http://www.semanlink.net/tag/darpa|prefLabel|DARPA +http://www.semanlink.net/tag/darpa|broader|http://www.semanlink.net/tag/armee_americaine +http://www.semanlink.net/tag/darpa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/darpa|uri|http://www.semanlink.net/tag/darpa +http://www.semanlink.net/tag/darpa|broader_prefLabel|Armée américaine +http://www.semanlink.net/tag/apache_hive|creationTime|2013-03-12T11:31:09Z +http://www.semanlink.net/tag/apache_hive|prefLabel|Apache Hive +http://www.semanlink.net/tag/apache_hive|broader|http://www.semanlink.net/tag/big_data_tools +http://www.semanlink.net/tag/apache_hive|broader|http://www.semanlink.net/tag/hadoop +http://www.semanlink.net/tag/apache_hive|broader|http://www.semanlink.net/tag/data_warehouse +http://www.semanlink.net/tag/apache_hive|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/apache_hive|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_hive|creationDate|2013-03-12 +http://www.semanlink.net/tag/apache_hive|comment|The Apache Hive ™ data warehouse software facilitates reading, writing, and managing large datasets residing in distributed storage using SQL. Structure can be projected onto data already in storage. A command line tool and JDBC driver are provided to connect users to Hive. +http://www.semanlink.net/tag/apache_hive|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_hive|homepage|http://hive.apache.org/ +http://www.semanlink.net/tag/apache_hive|altLabel|Hive +http://www.semanlink.net/tag/apache_hive|uri|http://www.semanlink.net/tag/apache_hive +http://www.semanlink.net/tag/apache_hive|broader_prefLabel|Big Data Tools +http://www.semanlink.net/tag/apache_hive|broader_prefLabel|Hadoop +http://www.semanlink.net/tag/apache_hive|broader_prefLabel|Data Warehouse +http://www.semanlink.net/tag/apache_hive|broader_prefLabel|Database +http://www.semanlink.net/tag/apache_hive|broader_prefLabel|apache.org +http://www.semanlink.net/tag/apache_hive|broader_related|http://www.semanlink.net/tag/map_reduce +http://www.semanlink.net/tag/apache_hive|broader_related|http://www.semanlink.net/tag/etl +http://www.semanlink.net/tag/http|prefLabel|HTTP +http://www.semanlink.net/tag/http|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/http|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/http|uri|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/http|broader_prefLabel|Internet +http://www.semanlink.net/tag/rdf_template|creationTime|2014-05-17T17:44:21Z +http://www.semanlink.net/tag/rdf_template|prefLabel|RDF Template +http://www.semanlink.net/tag/rdf_template|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_template|creationDate|2014-05-17 +http://www.semanlink.net/tag/rdf_template|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_template|uri|http://www.semanlink.net/tag/rdf_template +http://www.semanlink.net/tag/rdf_template|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_template|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_template|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_template|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_template|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_template|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/microsoft_research|creationTime|2013-03-25T13:11:28Z +http://www.semanlink.net/tag/microsoft_research|prefLabel|Microsoft Research +http://www.semanlink.net/tag/microsoft_research|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/microsoft_research|creationDate|2013-03-25 +http://www.semanlink.net/tag/microsoft_research|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microsoft_research|uri|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/tag/microsoft_research|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/microsoft_research|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/concept_learning|creationTime|2018-01-04T14:57:36Z +http://www.semanlink.net/tag/concept_learning|prefLabel|Concept learning +http://www.semanlink.net/tag/concept_learning|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/concept_learning|related|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/tag/concept_learning|creationDate|2018-01-04 +http://www.semanlink.net/tag/concept_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concept_learning|uri|http://www.semanlink.net/tag/concept_learning +http://www.semanlink.net/tag/concept_learning|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/immune_system|creationTime|2012-12-30T13:25:25Z +http://www.semanlink.net/tag/immune_system|prefLabel|Système immunitaire +http://www.semanlink.net/tag/immune_system|prefLabel|Immune system +http://www.semanlink.net/tag/immune_system|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/immune_system|related|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/immune_system|creationDate|2012-12-30 +http://www.semanlink.net/tag/immune_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/immune_system|describedBy|https://en.wikipedia.org/wiki/Immune_system +http://www.semanlink.net/tag/immune_system|uri|http://www.semanlink.net/tag/immune_system +http://www.semanlink.net/tag/immune_system|broader_prefLabel|Biology +http://www.semanlink.net/tag/immune_system|broader_altLabel|Biologie +http://www.semanlink.net/tag/w3c_data_activity|creationTime|2014-01-06T13:50:34Z +http://www.semanlink.net/tag/w3c_data_activity|prefLabel|W3C Data Activity +http://www.semanlink.net/tag/w3c_data_activity|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/w3c_data_activity|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_data_activity|creationDate|2014-01-06 +http://www.semanlink.net/tag/w3c_data_activity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_data_activity|uri|http://www.semanlink.net/tag/w3c_data_activity +http://www.semanlink.net/tag/w3c_data_activity|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/w3c_data_activity|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_data_activity|broader_altLabel|sw +http://www.semanlink.net/tag/w3c_data_activity|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/w3c_data_activity|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_data_activity|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/stardog|creationTime|2018-06-14T13:21:37Z +http://www.semanlink.net/tag/stardog|prefLabel|Stardog +http://www.semanlink.net/tag/stardog|broader|http://www.semanlink.net/tag/enterprise_knowledge_graph_platform +http://www.semanlink.net/tag/stardog|creationDate|2018-06-14 +http://www.semanlink.net/tag/stardog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stardog|homepage|https://www.stardog.com +http://www.semanlink.net/tag/stardog|uri|http://www.semanlink.net/tag/stardog +http://www.semanlink.net/tag/stardog|broader_prefLabel|Enterprise Knowledge Graph Platform +http://www.semanlink.net/tag/stardog|broader_related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/tanis_kt|creationTime|2019-04-01T17:43:17Z +http://www.semanlink.net/tag/tanis_kt|prefLabel|Tanis-KT +http://www.semanlink.net/tag/tanis_kt|broader|http://www.semanlink.net/tag/extinction_des_dinosaures +http://www.semanlink.net/tag/tanis_kt|creationDate|2019-04-01 +http://www.semanlink.net/tag/tanis_kt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tanis_kt|describedBy|https://en.wikipedia.org/wiki/Tanis_(fossil_site) +http://www.semanlink.net/tag/tanis_kt|uri|http://www.semanlink.net/tag/tanis_kt +http://www.semanlink.net/tag/tanis_kt|broader_prefLabel|Extinction des dinosaures +http://www.semanlink.net/tag/tom_heath|creationTime|2007-06-13T23:10:42Z +http://www.semanlink.net/tag/tom_heath|prefLabel|Tom Heath +http://www.semanlink.net/tag/tom_heath|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/tom_heath|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/tom_heath|creationDate|2007-06-13 +http://www.semanlink.net/tag/tom_heath|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tom_heath|uri|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/tom_heath|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/tom_heath|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/tom_heath|broader_altLabel|Technical guys +http://www.semanlink.net/tag/driverless_car|creationTime|2012-11-30T22:32:42Z +http://www.semanlink.net/tag/driverless_car|prefLabel|Driverless car +http://www.semanlink.net/tag/driverless_car|broader|http://www.semanlink.net/tag/automobile_2_0 +http://www.semanlink.net/tag/driverless_car|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/driverless_car|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/driverless_car|creationDate|2012-11-30 +http://www.semanlink.net/tag/driverless_car|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/driverless_car|altLabel|Self-driving car +http://www.semanlink.net/tag/driverless_car|uri|http://www.semanlink.net/tag/driverless_car +http://www.semanlink.net/tag/driverless_car|broader_prefLabel|Automobile 2.0 +http://www.semanlink.net/tag/driverless_car|broader_prefLabel|Robotique +http://www.semanlink.net/tag/driverless_car|broader_prefLabel|Automobile +http://www.semanlink.net/tag/driverless_car|broader_altLabel|Robotics +http://www.semanlink.net/tag/driverless_car|broader_altLabel|Robot +http://www.semanlink.net/tag/driverless_car|broader_altLabel|Automotive +http://www.semanlink.net/tag/knowledge_graph_deep_learning|creationTime|2018-12-09T10:34:31Z +http://www.semanlink.net/tag/knowledge_graph_deep_learning|prefLabel|Knowledge Graph + Deep Learning +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader|http://www.semanlink.net/tag/knowledge_graph_ml +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader|http://www.semanlink.net/tag/domain_knowledge_deep_learning +http://www.semanlink.net/tag/knowledge_graph_deep_learning|related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/knowledge_graph_deep_learning|creationDate|2018-12-09 +http://www.semanlink.net/tag/knowledge_graph_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_deep_learning|uri|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader_prefLabel|Knowledge Graph + ML +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader_prefLabel|AI + Knowledge Bases +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader_prefLabel|Domain Knowledge + Deep Learning +http://www.semanlink.net/tag/knowledge_graph_deep_learning|broader_related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/annees_50|creationTime|2019-06-17T22:57:21Z +http://www.semanlink.net/tag/annees_50|prefLabel|Années 50 +http://www.semanlink.net/tag/annees_50|creationDate|2019-06-17 +http://www.semanlink.net/tag/annees_50|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/annees_50|uri|http://www.semanlink.net/tag/annees_50 +http://www.semanlink.net/tag/retard_technologique_francais|prefLabel|Retard technologique français +http://www.semanlink.net/tag/retard_technologique_francais|broader|http://www.semanlink.net/tag/declin_de_la_france +http://www.semanlink.net/tag/retard_technologique_francais|broader|http://www.semanlink.net/tag/france_delabrement +http://www.semanlink.net/tag/retard_technologique_francais|broader|http://www.semanlink.net/tag/retard_technologique_europeen +http://www.semanlink.net/tag/retard_technologique_francais|creationDate|2006-11-20 +http://www.semanlink.net/tag/retard_technologique_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/retard_technologique_francais|uri|http://www.semanlink.net/tag/retard_technologique_francais +http://www.semanlink.net/tag/retard_technologique_francais|broader_prefLabel|Déclin de la France +http://www.semanlink.net/tag/retard_technologique_francais|broader_prefLabel|France : délabrement +http://www.semanlink.net/tag/retard_technologique_francais|broader_prefLabel|Retard technologique européen +http://www.semanlink.net/tag/retard_technologique_francais|broader_altLabel|La France ne marche pas +http://www.semanlink.net/tag/general_nlp_tasks|creationTime|2017-06-29T18:37:58Z +http://www.semanlink.net/tag/general_nlp_tasks|prefLabel|General NLP tasks +http://www.semanlink.net/tag/general_nlp_tasks|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/general_nlp_tasks|creationDate|2017-06-29 +http://www.semanlink.net/tag/general_nlp_tasks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/general_nlp_tasks|uri|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/general_nlp_tasks|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/big_brother|creationTime|2009-02-18T01:03:45Z +http://www.semanlink.net/tag/big_brother|prefLabel|Big Brother +http://www.semanlink.net/tag/big_brother|broader|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/big_brother|creationDate|2009-02-18 +http://www.semanlink.net/tag/big_brother|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/big_brother|uri|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/big_brother|broader_prefLabel|Etat policier +http://www.semanlink.net/tag/big_brother|broader_related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/openstructs|creationTime|2012-05-10T00:11:31Z +http://www.semanlink.net/tag/openstructs|prefLabel|OpenStructs +http://www.semanlink.net/tag/openstructs|broader|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/openstructs|broader|http://www.semanlink.net/tag/semantic_framework +http://www.semanlink.net/tag/openstructs|creationDate|2012-05-10 +http://www.semanlink.net/tag/openstructs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openstructs|homepage|http://openstructs.org/ +http://www.semanlink.net/tag/openstructs|uri|http://www.semanlink.net/tag/openstructs +http://www.semanlink.net/tag/openstructs|broader_prefLabel|Frederick Giasson +http://www.semanlink.net/tag/openstructs|broader_prefLabel|Semantic framework +http://www.semanlink.net/tag/openstructs|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/openstructs|broader_related|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/apache_spark|creationTime|2015-02-12T16:42:40Z +http://www.semanlink.net/tag/apache_spark|prefLabel|Apache Spark +http://www.semanlink.net/tag/apache_spark|broader|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/apache_spark|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_spark|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/apache_spark|broader|http://www.semanlink.net/tag/in_memory_computing +http://www.semanlink.net/tag/apache_spark|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/apache_spark|related|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_spark|related|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/apache_spark|creationDate|2015-02-12 +http://www.semanlink.net/tag/apache_spark|comment|"**cluster computing framework** based on a **distributed memory architecture** (in contrast to Hadoop's two-stage disk-based MapReduce paradigm) +" +http://www.semanlink.net/tag/apache_spark|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_spark|homepage|https://spark.apache.org/ +http://www.semanlink.net/tag/apache_spark|uri|http://www.semanlink.net/tag/apache_spark +http://www.semanlink.net/tag/apache_spark|broader_prefLabel|Machine Learning tool +http://www.semanlink.net/tag/apache_spark|broader_prefLabel|apache.org +http://www.semanlink.net/tag/apache_spark|broader_prefLabel|Big Data +http://www.semanlink.net/tag/apache_spark|broader_prefLabel|In-memory computing +http://www.semanlink.net/tag/apache_spark|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/apache_spark|broader_related|http://www.semanlink.net/tag/bitmap_index +http://www.semanlink.net/tag/wiki_service|prefLabel|Wiki service +http://www.semanlink.net/tag/wiki_service|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/wiki_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wiki_service|uri|http://www.semanlink.net/tag/wiki_service +http://www.semanlink.net/tag/wiki_service|broader_prefLabel|Wiki +http://www.semanlink.net/tag/seq2seq_encoder_decoder|creationTime|2018-11-13T17:20:16Z +http://www.semanlink.net/tag/seq2seq_encoder_decoder|prefLabel|Sequence-To-Sequence Encoder-Decoder Architecture +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader|http://www.semanlink.net/tag/encoder_decoder_architecture +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/seq2seq_encoder_decoder|creationDate|2018-11-13 +http://www.semanlink.net/tag/seq2seq_encoder_decoder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seq2seq_encoder_decoder|altLabel|Seq2Seq Encoder-Decoder +http://www.semanlink.net/tag/seq2seq_encoder_decoder|uri|http://www.semanlink.net/tag/seq2seq_encoder_decoder +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_prefLabel|Encoder-Decoder architecture +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_altLabel|Sequence Modeling +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_altLabel|Seq2Seq +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_related|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/seq2seq_encoder_decoder|broader_related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/christine_golbreich|creationTime|2007-07-13T18:50:18Z +http://www.semanlink.net/tag/christine_golbreich|prefLabel|Christine Golbreich +http://www.semanlink.net/tag/christine_golbreich|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/christine_golbreich|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/christine_golbreich|creationDate|2007-07-13 +http://www.semanlink.net/tag/christine_golbreich|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/christine_golbreich|uri|http://www.semanlink.net/tag/christine_golbreich +http://www.semanlink.net/tag/christine_golbreich|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/christine_golbreich|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/christine_golbreich|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ami|prefLabel|Ami +http://www.semanlink.net/tag/ami|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ami|uri|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/tensorflow|creationTime|2015-11-09T18:48:44Z +http://www.semanlink.net/tag/tensorflow|prefLabel|TensorFlow +http://www.semanlink.net/tag/tensorflow|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/tensorflow|broader|http://www.semanlink.net/tag/deep_learning_frameworks +http://www.semanlink.net/tag/tensorflow|broader|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/tag/tensorflow|related|http://www.semanlink.net/tag/christopher_olah +http://www.semanlink.net/tag/tensorflow|creationDate|2015-11-09 +http://www.semanlink.net/tag/tensorflow|comment|"TensorFlow™ is an open source software library for numerical computation using data flow graphs. + +Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them." +http://www.semanlink.net/tag/tensorflow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tensorflow|homepage|http://tensorflow.org/ +http://www.semanlink.net/tag/tensorflow|uri|http://www.semanlink.net/tag/tensorflow +http://www.semanlink.net/tag/tensorflow|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/tensorflow|broader_prefLabel|Deep Learning frameworks +http://www.semanlink.net/tag/tensorflow|broader_prefLabel|Google Research +http://www.semanlink.net/tag/hdmi|prefLabel|HDMI +http://www.semanlink.net/tag/hdmi|broader|http://www.semanlink.net/tag/drm +http://www.semanlink.net/tag/hdmi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hdmi|uri|http://www.semanlink.net/tag/hdmi +http://www.semanlink.net/tag/hdmi|broader_prefLabel|DRM +http://www.semanlink.net/tag/ouigour|prefLabel|Ouïgour +http://www.semanlink.net/tag/ouigour|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/ouigour|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/ouigour|creationDate|2006-11-14 +http://www.semanlink.net/tag/ouigour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ouigour|altLabel|Xinjiang +http://www.semanlink.net/tag/ouigour|uri|http://www.semanlink.net/tag/ouigour +http://www.semanlink.net/tag/ouigour|broader_prefLabel|Chine +http://www.semanlink.net/tag/ouigour|broader_prefLabel|Peuples +http://www.semanlink.net/tag/ouigour|broader_altLabel|China +http://www.semanlink.net/tag/francois_scharffe|creationTime|2012-11-10T01:59:58Z +http://www.semanlink.net/tag/francois_scharffe|prefLabel|François Scharffe +http://www.semanlink.net/tag/francois_scharffe|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/francois_scharffe|related|http://www.semanlink.net/tag/datalift +http://www.semanlink.net/tag/francois_scharffe|creationDate|2012-11-10 +http://www.semanlink.net/tag/francois_scharffe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francois_scharffe|uri|http://www.semanlink.net/tag/francois_scharffe +http://www.semanlink.net/tag/francois_scharffe|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/url|prefLabel|URL +http://www.semanlink.net/tag/url|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/url|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/url|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/url|uri|http://www.semanlink.net/tag/url +http://www.semanlink.net/tag/url|broader_prefLabel|Dev +http://www.semanlink.net/tag/url|broader_prefLabel|URI +http://www.semanlink.net/tag/osema_deri_renault_paper|creationTime|2011-04-04T14:01:43Z +http://www.semanlink.net/tag/osema_deri_renault_paper|prefLabel|OSEMA/DERI-Renault paper +http://www.semanlink.net/tag/osema_deri_renault_paper|broader|http://www.semanlink.net/tag/fps_paper +http://www.semanlink.net/tag/osema_deri_renault_paper|broader|http://www.semanlink.net/tag/fadi_badra +http://www.semanlink.net/tag/osema_deri_renault_paper|broader|http://www.semanlink.net/tag/configuration_and_sw +http://www.semanlink.net/tag/osema_deri_renault_paper|broader|http://www.semanlink.net/tag/osema_2011 +http://www.semanlink.net/tag/osema_deri_renault_paper|creationDate|2011-04-04 +http://www.semanlink.net/tag/osema_deri_renault_paper|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/osema_deri_renault_paper|uri|http://www.semanlink.net/tag/osema_deri_renault_paper +http://www.semanlink.net/tag/osema_deri_renault_paper|broader_prefLabel|fps: paper +http://www.semanlink.net/tag/osema_deri_renault_paper|broader_prefLabel|Fadi Badra +http://www.semanlink.net/tag/osema_deri_renault_paper|broader_prefLabel|Configuration and SW +http://www.semanlink.net/tag/osema_deri_renault_paper|broader_prefLabel|OSEMA 2011 +http://www.semanlink.net/tag/osema_deri_renault_paper|broader_related|http://www.semanlink.net/tag/osema_deri_renault_paper +http://www.semanlink.net/tag/internet_regulation|creationTime|2017-08-17T13:20:34Z +http://www.semanlink.net/tag/internet_regulation|prefLabel|Internet regulation +http://www.semanlink.net/tag/internet_regulation|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet_regulation|creationDate|2017-08-17 +http://www.semanlink.net/tag/internet_regulation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_regulation|uri|http://www.semanlink.net/tag/internet_regulation +http://www.semanlink.net/tag/internet_regulation|broader_prefLabel|Internet +http://www.semanlink.net/tag/fabien_gandon|creationTime|2013-05-14T18:35:19Z +http://www.semanlink.net/tag/fabien_gandon|prefLabel|Fabien Gandon +http://www.semanlink.net/tag/fabien_gandon|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/fabien_gandon|creationDate|2013-05-14 +http://www.semanlink.net/tag/fabien_gandon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fabien_gandon|uri|http://www.semanlink.net/tag/fabien_gandon +http://www.semanlink.net/tag/fabien_gandon|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/petrole|prefLabel|Pétrole +http://www.semanlink.net/tag/petrole|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/petrole|broader|http://www.semanlink.net/tag/matieres_premieres +http://www.semanlink.net/tag/petrole|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/petrole|uri|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/petrole|broader_prefLabel|Energie +http://www.semanlink.net/tag/petrole|broader_prefLabel|Matières premières +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|creationTime|2018-08-05T10:44:45Z +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|prefLabel|Sequence Modeling: CNN vs RNN +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|creationDate|2018-08-05 +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|uri|http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_altLabel|Sequence Modeling +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_altLabel|Seq2Seq +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn|broader_related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/tech_company|creationTime|2014-04-30T17:18:36Z +http://www.semanlink.net/tag/tech_company|prefLabel|Tech company +http://www.semanlink.net/tag/tech_company|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/tech_company|creationDate|2014-04-30 +http://www.semanlink.net/tag/tech_company|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tech_company|uri|http://www.semanlink.net/tag/tech_company +http://www.semanlink.net/tag/tech_company|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/europe_aberrations|creationTime|2014-03-26T13:36:50Z +http://www.semanlink.net/tag/europe_aberrations|prefLabel|Europe : aberrations +http://www.semanlink.net/tag/europe_aberrations|broader|http://www.semanlink.net/tag/construction_europeenne +http://www.semanlink.net/tag/europe_aberrations|creationDate|2014-03-26 +http://www.semanlink.net/tag/europe_aberrations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/europe_aberrations|uri|http://www.semanlink.net/tag/europe_aberrations +http://www.semanlink.net/tag/europe_aberrations|broader_prefLabel|Construction européenne +http://www.semanlink.net/tag/antiquite_de_l_inde|prefLabel|Antiquité de l'Inde +http://www.semanlink.net/tag/antiquite_de_l_inde|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/antiquite_de_l_inde|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/antiquite_de_l_inde|broader|http://www.semanlink.net/tag/histoire_de_l_inde +http://www.semanlink.net/tag/antiquite_de_l_inde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite_de_l_inde|uri|http://www.semanlink.net/tag/antiquite_de_l_inde +http://www.semanlink.net/tag/antiquite_de_l_inde|broader_prefLabel|Inde +http://www.semanlink.net/tag/antiquite_de_l_inde|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/antiquite_de_l_inde|broader_prefLabel|Histoire de l'Inde +http://www.semanlink.net/tag/antonin_artaud|creationTime|2009-09-22T00:13:48Z +http://www.semanlink.net/tag/antonin_artaud|prefLabel|Antonin Artaud +http://www.semanlink.net/tag/antonin_artaud|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/antonin_artaud|broader|http://www.semanlink.net/tag/folie +http://www.semanlink.net/tag/antonin_artaud|creationDate|2009-09-22 +http://www.semanlink.net/tag/antonin_artaud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antonin_artaud|describedBy|https://fr.wikipedia.org/wiki/Antonin_Artaud +http://www.semanlink.net/tag/antonin_artaud|uri|http://www.semanlink.net/tag/antonin_artaud +http://www.semanlink.net/tag/antonin_artaud|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/antonin_artaud|broader_prefLabel|Folie +http://www.semanlink.net/tag/web_dev_framework|creationTime|2008-09-09T14:51:07Z +http://www.semanlink.net/tag/web_dev_framework|prefLabel|Web dev framework +http://www.semanlink.net/tag/web_dev_framework|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/web_dev_framework|creationDate|2008-09-09 +http://www.semanlink.net/tag/web_dev_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_dev_framework|uri|http://www.semanlink.net/tag/web_dev_framework +http://www.semanlink.net/tag/web_dev_framework|broader_prefLabel|Web dev +http://www.semanlink.net/tag/web_dev_framework|broader_altLabel|Web app dev +http://www.semanlink.net/tag/modernisation_de_l_etat|creationTime|2014-06-26T13:35:57Z +http://www.semanlink.net/tag/modernisation_de_l_etat|prefLabel|Modernisation de l'état +http://www.semanlink.net/tag/modernisation_de_l_etat|creationDate|2014-06-26 +http://www.semanlink.net/tag/modernisation_de_l_etat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/modernisation_de_l_etat|uri|http://www.semanlink.net/tag/modernisation_de_l_etat +http://www.semanlink.net/tag/lycee|creationTime|2009-12-12T01:13:09Z +http://www.semanlink.net/tag/lycee|prefLabel|Lycée +http://www.semanlink.net/tag/lycee|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/lycee|creationDate|2009-12-12 +http://www.semanlink.net/tag/lycee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lycee|uri|http://www.semanlink.net/tag/lycee +http://www.semanlink.net/tag/lycee|broader_prefLabel|Education +http://www.semanlink.net/tag/lycee|broader_altLabel|Enseignement +http://www.semanlink.net/tag/semanlink_tag_finder|creationTime|2020-09-15T18:23:48Z +http://www.semanlink.net/tag/semanlink_tag_finder|prefLabel|Semanlink Tag Finder +http://www.semanlink.net/tag/semanlink_tag_finder|broader|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/semanlink_tag_finder|broader|http://www.semanlink.net/tag/hierarchical_multi_label_text_classification +http://www.semanlink.net/tag/semanlink_tag_finder|creationDate|2020-09-15 +http://www.semanlink.net/tag/semanlink_tag_finder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_tag_finder|uri|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/tag/semanlink_tag_finder|broader_prefLabel|NLP 4 Semanlink +http://www.semanlink.net/tag/semanlink_tag_finder|broader_prefLabel|Hierarchical multi-label text classification +http://www.semanlink.net/tag/semanlink_tag_finder|broader_related|http://www.semanlink.net/tag/personal_knowledge_graph +http://www.semanlink.net/tag/carte_d_identite|prefLabel|Carte d'identité +http://www.semanlink.net/tag/carte_d_identite|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/carte_d_identite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carte_d_identite|uri|http://www.semanlink.net/tag/carte_d_identite +http://www.semanlink.net/tag/carte_d_identite|broader_prefLabel|Divers +http://www.semanlink.net/tag/roam|creationTime|2020-05-15T16:41:48Z +http://www.semanlink.net/tag/roam|prefLabel|Roam +http://www.semanlink.net/tag/roam|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/roam|broader|http://www.semanlink.net/tag/note_taking_app +http://www.semanlink.net/tag/roam|creationDate|2020-05-15 +http://www.semanlink.net/tag/roam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roam|homepage|https://roamresearch.com/ +http://www.semanlink.net/tag/roam|uri|http://www.semanlink.net/tag/roam +http://www.semanlink.net/tag/roam|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/roam|broader_prefLabel|Note taking app +http://www.semanlink.net/tag/lumieres|creationTime|2016-03-14T22:04:47Z +http://www.semanlink.net/tag/lumieres|prefLabel|Lumières +http://www.semanlink.net/tag/lumieres|broader|http://www.semanlink.net/tag/philosophie +http://www.semanlink.net/tag/lumieres|related|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/lumieres|creationDate|2016-03-14 +http://www.semanlink.net/tag/lumieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lumieres|describedBy|https://fr.wikipedia.org/wiki/Lumi%C3%A8res_(philosophie) +http://www.semanlink.net/tag/lumieres|uri|http://www.semanlink.net/tag/lumieres +http://www.semanlink.net/tag/lumieres|broader_prefLabel|Philosophie +http://www.semanlink.net/tag/lumieres|broader_altLabel|Philosophy +http://www.semanlink.net/tag/perche|creationTime|2007-02-18T22:32:12Z +http://www.semanlink.net/tag/perche|prefLabel|Perche +http://www.semanlink.net/tag/perche|creationDate|2007-02-18 +http://www.semanlink.net/tag/perche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perche|uri|http://www.semanlink.net/tag/perche +http://www.semanlink.net/tag/ornithorynque|prefLabel|Ornithorynque +http://www.semanlink.net/tag/ornithorynque|broader|http://www.semanlink.net/tag/monotremes +http://www.semanlink.net/tag/ornithorynque|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/ornithorynque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ornithorynque|altLabel|Platypus +http://www.semanlink.net/tag/ornithorynque|uri|http://www.semanlink.net/tag/ornithorynque +http://www.semanlink.net/tag/ornithorynque|broader_prefLabel|Monotrèmes +http://www.semanlink.net/tag/ornithorynque|broader_prefLabel|Animal +http://www.semanlink.net/tag/berkeley|creationTime|2007-10-04T21:30:32Z +http://www.semanlink.net/tag/berkeley|prefLabel|Berkeley +http://www.semanlink.net/tag/berkeley|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/berkeley|creationDate|2007-10-04 +http://www.semanlink.net/tag/berkeley|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/berkeley|uri|http://www.semanlink.net/tag/berkeley +http://www.semanlink.net/tag/berkeley|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/k_means_clustering|creationTime|2014-10-11T11:42:18Z +http://www.semanlink.net/tag/k_means_clustering|prefLabel|k-means clustering +http://www.semanlink.net/tag/k_means_clustering|broader|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/k_means_clustering|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/k_means_clustering|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/k_means_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/k_means_clustering|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/k_means_clustering|creationDate|2014-10-11 +http://www.semanlink.net/tag/k_means_clustering|comment|aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean (Most k-means-type algorithms require the number of clusters – k – to be specified in advance) +http://www.semanlink.net/tag/k_means_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/k_means_clustering|describedBy|https://en.wikipedia.org/wiki/K-means_clustering +http://www.semanlink.net/tag/k_means_clustering|uri|http://www.semanlink.net/tag/k_means_clustering +http://www.semanlink.net/tag/k_means_clustering|broader_prefLabel|Data mining +http://www.semanlink.net/tag/k_means_clustering|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/k_means_clustering|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/k_means_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/k_means_clustering|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/k_means_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/k_means_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/k_means_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/politique_francaise|prefLabel|Politique française +http://www.semanlink.net/tag/politique_francaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/politique_francaise|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/politique_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_francaise|uri|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/politique_francaise|broader_prefLabel|France +http://www.semanlink.net/tag/politique_francaise|broader_prefLabel|Politique +http://www.semanlink.net/tag/nlp_pretraining|creationTime|2020-06-30T11:33:52Z +http://www.semanlink.net/tag/nlp_pretraining|prefLabel|NLP: pretraining +http://www.semanlink.net/tag/nlp_pretraining|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nlp_pretraining|creationDate|2020-06-30 +http://www.semanlink.net/tag/nlp_pretraining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_pretraining|altLabel|nlp: pre-training +http://www.semanlink.net/tag/nlp_pretraining|uri|http://www.semanlink.net/tag/nlp_pretraining +http://www.semanlink.net/tag/nlp_pretraining|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/bill_de_hora|prefLabel|Bill de hÓra +http://www.semanlink.net/tag/bill_de_hora|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bill_de_hora|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bill_de_hora|uri|http://www.semanlink.net/tag/bill_de_hora +http://www.semanlink.net/tag/bill_de_hora|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bill_de_hora|broader_altLabel|Technical guys +http://www.semanlink.net/tag/phrases_nlp|creationTime|2020-05-01T17:40:43Z +http://www.semanlink.net/tag/phrases_nlp|prefLabel|Phrases (NLP) +http://www.semanlink.net/tag/phrases_nlp|broader|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/phrases_nlp|related|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/phrases_nlp|creationDate|2020-05-01 +http://www.semanlink.net/tag/phrases_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phrases_nlp|uri|http://www.semanlink.net/tag/phrases_nlp +http://www.semanlink.net/tag/phrases_nlp|broader_prefLabel|General NLP tasks +http://www.semanlink.net/tag/molecular_biology|creationTime|2020-08-28T13:35:21Z +http://www.semanlink.net/tag/molecular_biology|prefLabel|Molecular Biology +http://www.semanlink.net/tag/molecular_biology|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/molecular_biology|creationDate|2020-08-28 +http://www.semanlink.net/tag/molecular_biology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/molecular_biology|altLabel|Biologie moléculaire +http://www.semanlink.net/tag/molecular_biology|uri|http://www.semanlink.net/tag/molecular_biology +http://www.semanlink.net/tag/molecular_biology|broader_prefLabel|Biology +http://www.semanlink.net/tag/molecular_biology|broader_altLabel|Biologie +http://www.semanlink.net/tag/tattoo|creationTime|2013-08-27T13:42:24Z +http://www.semanlink.net/tag/tattoo|prefLabel|Tattoo +http://www.semanlink.net/tag/tattoo|creationDate|2013-08-27 +http://www.semanlink.net/tag/tattoo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tattoo|uri|http://www.semanlink.net/tag/tattoo +http://www.semanlink.net/tag/rdf_and_soa|creationTime|2007-03-12T23:31:34Z +http://www.semanlink.net/tag/rdf_and_soa|prefLabel|RDF and SOA +http://www.semanlink.net/tag/rdf_and_soa|broader|http://www.semanlink.net/tag/soa +http://www.semanlink.net/tag/rdf_and_soa|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_and_soa|creationDate|2007-03-12 +http://www.semanlink.net/tag/rdf_and_soa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_and_soa|uri|http://www.semanlink.net/tag/rdf_and_soa +http://www.semanlink.net/tag/rdf_and_soa|broader_prefLabel|SOA +http://www.semanlink.net/tag/rdf_and_soa|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_and_soa|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_and_soa|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_and_soa|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_and_soa|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_and_soa|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semantic_hashing|creationTime|2017-11-07T14:40:54Z +http://www.semanlink.net/tag/semantic_hashing|prefLabel|Semantic Hashing +http://www.semanlink.net/tag/semantic_hashing|broader|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/semantic_hashing|broader|http://www.semanlink.net/tag/nn_4_nlp +http://www.semanlink.net/tag/semantic_hashing|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/semantic_hashing|related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/semantic_hashing|creationDate|2017-11-07 +http://www.semanlink.net/tag/semantic_hashing|comment|"A method to map documents to a code (e.g., 32-bit memory address) so documents with semantically closed content are mapped to close addresses. Method introduced by Ruslan Salakhutdinov and Geoffrey Hinton in this [paper](/doc/?uri=http%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0888613X08001813) + +" +http://www.semanlink.net/tag/semantic_hashing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_hashing|uri|http://www.semanlink.net/tag/semantic_hashing +http://www.semanlink.net/tag/semantic_hashing|broader_prefLabel|Similarity queries +http://www.semanlink.net/tag/semantic_hashing|broader_prefLabel|NN 4 NLP +http://www.semanlink.net/tag/semantic_hashing|broader_altLabel|Vector similarity search +http://www.semanlink.net/tag/semantic_hashing|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/hierarchical_classification_evaluation|creationTime|2021-01-10T12:30:52Z +http://www.semanlink.net/tag/hierarchical_classification_evaluation|prefLabel|Hierarchical classification: evaluation +http://www.semanlink.net/tag/hierarchical_classification_evaluation|broader|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/hierarchical_classification_evaluation|creationDate|2021-01-10 +http://www.semanlink.net/tag/hierarchical_classification_evaluation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_classification_evaluation|uri|http://www.semanlink.net/tag/hierarchical_classification_evaluation +http://www.semanlink.net/tag/hierarchical_classification_evaluation|broader_prefLabel|Hierarchical Classification +http://www.semanlink.net/tag/nosql_pour_les_nuls|creationTime|2013-03-13T11:52:52Z +http://www.semanlink.net/tag/nosql_pour_les_nuls|prefLabel|NoSQL pour les nuls +http://www.semanlink.net/tag/nosql_pour_les_nuls|broader|http://www.semanlink.net/tag/pour_les_nuls +http://www.semanlink.net/tag/nosql_pour_les_nuls|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/nosql_pour_les_nuls|creationDate|2013-03-13 +http://www.semanlink.net/tag/nosql_pour_les_nuls|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nosql_pour_les_nuls|uri|http://www.semanlink.net/tag/nosql_pour_les_nuls +http://www.semanlink.net/tag/nosql_pour_les_nuls|broader_prefLabel|Pour les nuls +http://www.semanlink.net/tag/nosql_pour_les_nuls|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/nosql_pour_les_nuls|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/jussieu|creationTime|2019-12-03T09:36:58Z +http://www.semanlink.net/tag/jussieu|prefLabel|Jussieu +http://www.semanlink.net/tag/jussieu|broader|http://www.semanlink.net/tag/sorbonne +http://www.semanlink.net/tag/jussieu|creationDate|2019-12-03 +http://www.semanlink.net/tag/jussieu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jussieu|uri|http://www.semanlink.net/tag/jussieu +http://www.semanlink.net/tag/jussieu|broader_prefLabel|Sorbonne +http://www.semanlink.net/tag/nips_2017|creationTime|2017-12-12T11:02:54Z +http://www.semanlink.net/tag/nips_2017|prefLabel|NIPS 2017 +http://www.semanlink.net/tag/nips_2017|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/nips_2017|creationDate|2017-12-12 +http://www.semanlink.net/tag/nips_2017|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nips_2017|uri|http://www.semanlink.net/tag/nips_2017 +http://www.semanlink.net/tag/nips_2017|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/nips_2017|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/nips_2017|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/rdf_browser|prefLabel|RDF browser +http://www.semanlink.net/tag/rdf_browser|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_browser|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/rdf_browser|broader|http://www.semanlink.net/tag/linked_data_browser +http://www.semanlink.net/tag/rdf_browser|related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/rdf_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_browser|uri|http://www.semanlink.net/tag/rdf_browser +http://www.semanlink.net/tag/rdf_browser|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_browser|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/rdf_browser|broader_prefLabel|Linked Data Browser +http://www.semanlink.net/tag/rdf_browser|broader_altLabel|LD +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/rdf_browser|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|creationTime|2009-04-04T19:08:08Z +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|prefLabel|La France est un pays régicide +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|related|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|related|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|creationDate|2009-04-04 +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|uri|http://www.semanlink.net/tag/la_france_est_un_pays_regicide +http://www.semanlink.net/tag/la_france_est_un_pays_regicide|broader_prefLabel|France +http://www.semanlink.net/tag/incipit|creationTime|2020-08-24T00:54:43Z +http://www.semanlink.net/tag/incipit|prefLabel|Incipit +http://www.semanlink.net/tag/incipit|broader|http://www.semanlink.net/tag/citation +http://www.semanlink.net/tag/incipit|creationDate|2020-08-24 +http://www.semanlink.net/tag/incipit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/incipit|uri|http://www.semanlink.net/tag/incipit +http://www.semanlink.net/tag/incipit|broader_prefLabel|Quote +http://www.semanlink.net/tag/incipit|broader_altLabel|Citation +http://www.semanlink.net/tag/pseudo_relevance_feedback|creationTime|2018-02-13T10:53:47Z +http://www.semanlink.net/tag/pseudo_relevance_feedback|prefLabel|Pseudo relevance feedback +http://www.semanlink.net/tag/pseudo_relevance_feedback|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/pseudo_relevance_feedback|creationDate|2018-02-13 +http://www.semanlink.net/tag/pseudo_relevance_feedback|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pseudo_relevance_feedback|altLabel|Blind relevance feedback +http://www.semanlink.net/tag/pseudo_relevance_feedback|uri|http://www.semanlink.net/tag/pseudo_relevance_feedback +http://www.semanlink.net/tag/pseudo_relevance_feedback|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/pseudo_relevance_feedback|broader_altLabel|IR +http://www.semanlink.net/tag/desktop_search|prefLabel|Desktop search +http://www.semanlink.net/tag/desktop_search|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/desktop_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/desktop_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/desktop_search|uri|http://www.semanlink.net/tag/desktop_search +http://www.semanlink.net/tag/desktop_search|broader_prefLabel|Informatique +http://www.semanlink.net/tag/desktop_search|broader_prefLabel|Search +http://www.semanlink.net/tag/syntax_trees|creationTime|2019-06-10T00:05:17Z +http://www.semanlink.net/tag/syntax_trees|prefLabel|Syntax trees +http://www.semanlink.net/tag/syntax_trees|related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/syntax_trees|creationDate|2019-06-10 +http://www.semanlink.net/tag/syntax_trees|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/syntax_trees|uri|http://www.semanlink.net/tag/syntax_trees +http://www.semanlink.net/tag/andrew_mccallum|creationTime|2014-04-24T01:20:00Z +http://www.semanlink.net/tag/andrew_mccallum|prefLabel|Andrew McCallum +http://www.semanlink.net/tag/andrew_mccallum|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/andrew_mccallum|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/andrew_mccallum|creationDate|2014-04-24 +http://www.semanlink.net/tag/andrew_mccallum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/andrew_mccallum|describedBy|https://en.wikipedia.org/wiki/Andrew_McCallum +http://www.semanlink.net/tag/andrew_mccallum|uri|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/andrew_mccallum|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/andrew_mccallum|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/distributional_semantics|creationTime|2017-05-26T00:49:17Z +http://www.semanlink.net/tag/distributional_semantics|prefLabel|Distributional semantics +http://www.semanlink.net/tag/distributional_semantics|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/distributional_semantics|broader|http://www.semanlink.net/tag/analyse_semantique +http://www.semanlink.net/tag/distributional_semantics|related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/distributional_semantics|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/distributional_semantics|creationDate|2017-05-26 +http://www.semanlink.net/tag/distributional_semantics|comment|"Methods for quantifying and categorizing semantic similarities between linguistic items based on their distributional properties in large samples of language data. + +Basic idea: the **Distributional hypothesis**: linguistic items with similar distributions have similar meanings. + +Basic approach: collect distributional information in high-dimensional vectors, and define similarity in terms of vector similarity + +Models: latent semantic analysis (LSA), Hyperspace Analogue to Language (HAL), syntax- or dependency-based models, random indexing, semantic folding and various variants of the topic model. + + +" +http://www.semanlink.net/tag/distributional_semantics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/distributional_semantics|describedBy|https://en.wikipedia.org/wiki/Distributional_semantics +http://www.semanlink.net/tag/distributional_semantics|uri|http://www.semanlink.net/tag/distributional_semantics +http://www.semanlink.net/tag/distributional_semantics|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/distributional_semantics|broader_prefLabel|Analyse sémantique +http://www.semanlink.net/tag/browserify|creationTime|2020-10-18T00:57:55Z +http://www.semanlink.net/tag/browserify|prefLabel|browserify +http://www.semanlink.net/tag/browserify|broader|http://www.semanlink.net/tag/javascript_tool +http://www.semanlink.net/tag/browserify|creationDate|2020-10-18 +http://www.semanlink.net/tag/browserify|comment|> Browserify lets you require('modules') in the browser by bundling up all of your dependencies. +http://www.semanlink.net/tag/browserify|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/browserify|homepage|http://browserify.org/ +http://www.semanlink.net/tag/browserify|uri|http://www.semanlink.net/tag/browserify +http://www.semanlink.net/tag/browserify|broader_prefLabel|Javascript tool +http://www.semanlink.net/tag/exploit|prefLabel|Exploit +http://www.semanlink.net/tag/exploit|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/exploit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exploit|uri|http://www.semanlink.net/tag/exploit +http://www.semanlink.net/tag/exploit|broader_prefLabel|Divers +http://www.semanlink.net/tag/disco_hyperdata_browser|creationTime|2007-03-29T00:36:06Z +http://www.semanlink.net/tag/disco_hyperdata_browser|prefLabel|Disco Hyperdata Browser +http://www.semanlink.net/tag/disco_hyperdata_browser|broader|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/disco_hyperdata_browser|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/disco_hyperdata_browser|broader|http://www.semanlink.net/tag/rdf_browser +http://www.semanlink.net/tag/disco_hyperdata_browser|creationDate|2007-03-29 +http://www.semanlink.net/tag/disco_hyperdata_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disco_hyperdata_browser|uri|http://www.semanlink.net/tag/disco_hyperdata_browser +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_prefLabel|Chris Bizer +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_prefLabel|RDF browser +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_altLabel|LD +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/disco_hyperdata_browser|broader_related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/paul_miller|creationTime|2008-05-17T23:13:02Z +http://www.semanlink.net/tag/paul_miller|prefLabel|Paul Miller +http://www.semanlink.net/tag/paul_miller|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/paul_miller|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/paul_miller|creationDate|2008-05-17 +http://www.semanlink.net/tag/paul_miller|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paul_miller|uri|http://www.semanlink.net/tag/paul_miller +http://www.semanlink.net/tag/paul_miller|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/paul_miller|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/paul_miller|broader_altLabel|Technical guys +http://www.semanlink.net/tag/taxe_carbone|creationTime|2010-03-24T15:01:55Z +http://www.semanlink.net/tag/taxe_carbone|prefLabel|Taxe carbone +http://www.semanlink.net/tag/taxe_carbone|broader|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/tag/taxe_carbone|broader|http://www.semanlink.net/tag/economie_ecologique +http://www.semanlink.net/tag/taxe_carbone|broader|http://www.semanlink.net/tag/pollueurs_payeurs +http://www.semanlink.net/tag/taxe_carbone|creationDate|2010-03-24 +http://www.semanlink.net/tag/taxe_carbone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taxe_carbone|uri|http://www.semanlink.net/tag/taxe_carbone +http://www.semanlink.net/tag/taxe_carbone|broader_prefLabel|Climate crisis +http://www.semanlink.net/tag/taxe_carbone|broader_prefLabel|Économie écologique +http://www.semanlink.net/tag/taxe_carbone|broader_prefLabel|Pollueurs payeurs +http://www.semanlink.net/tag/taxe_carbone|broader_altLabel|Réchauffement climatique +http://www.semanlink.net/tag/taxe_carbone|broader_altLabel|Global warming +http://www.semanlink.net/tag/taxe_carbone|broader_related|http://www.semanlink.net/tag/anthropocene +http://www.semanlink.net/tag/vso|creationTime|2011-03-24T18:27:37Z +http://www.semanlink.net/tag/vso|prefLabel|VSO +http://www.semanlink.net/tag/vso|broader|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/vso|broader|http://www.semanlink.net/tag/automotive_ontologies +http://www.semanlink.net/tag/vso|creationDate|2011-03-24 +http://www.semanlink.net/tag/vso|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vso|homepage|http://www.heppnetz.de/ontologies/vso/ns +http://www.semanlink.net/tag/vso|uri|http://www.semanlink.net/tag/vso +http://www.semanlink.net/tag/vso|broader_prefLabel|GoodRelations +http://www.semanlink.net/tag/vso|broader_prefLabel|Automotive ontologies +http://www.semanlink.net/tag/vso|broader_altLabel|Car ontology +http://www.semanlink.net/tag/vso|broader_related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/semantic_web|prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web|broader|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/semantic_web|broader|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/semantic_web|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web|altLabel|sw +http://www.semanlink.net/tag/semantic_web|altLabel|Web sémantique +http://www.semanlink.net/tag/semantic_web|uri|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web|broader_prefLabel|Semantic technology +http://www.semanlink.net/tag/semantic_web|broader_prefLabel|Web of data +http://www.semanlink.net/tag/semantic_web|broader_prefLabel|Internet +http://www.semanlink.net/tag/ebooks|creationTime|2013-08-18T13:02:37Z +http://www.semanlink.net/tag/ebooks|prefLabel|ebooks +http://www.semanlink.net/tag/ebooks|creationDate|2013-08-18 +http://www.semanlink.net/tag/ebooks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ebooks|uri|http://www.semanlink.net/tag/ebooks +http://www.semanlink.net/tag/para|creationTime|2007-09-11T21:27:12Z +http://www.semanlink.net/tag/para|prefLabel|Pará +http://www.semanlink.net/tag/para|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/para|broader|http://www.semanlink.net/tag/amazonie +http://www.semanlink.net/tag/para|creationDate|2007-09-11 +http://www.semanlink.net/tag/para|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/para|uri|http://www.semanlink.net/tag/para +http://www.semanlink.net/tag/para|broader_prefLabel|Brésil +http://www.semanlink.net/tag/para|broader_prefLabel|Amazonie +http://www.semanlink.net/tag/para|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/configurator|creationTime|2013-04-17T14:54:02Z +http://www.semanlink.net/tag/configurator|prefLabel|Configurator +http://www.semanlink.net/tag/configurator|creationDate|2013-04-17 +http://www.semanlink.net/tag/configurator|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/configurator|uri|http://www.semanlink.net/tag/configurator +http://www.semanlink.net/tag/atom_github|creationTime|2014-10-05T17:23:01Z +http://www.semanlink.net/tag/atom_github|prefLabel|ATOM (Text editor) +http://www.semanlink.net/tag/atom_github|broader|http://www.semanlink.net/tag/text_editor +http://www.semanlink.net/tag/atom_github|broader|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/atom_github|broader|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/tag/atom_github|creationDate|2014-10-05 +http://www.semanlink.net/tag/atom_github|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/atom_github|uri|http://www.semanlink.net/tag/atom_github +http://www.semanlink.net/tag/atom_github|broader_prefLabel|Text Editor +http://www.semanlink.net/tag/atom_github|broader_prefLabel|GitHub +http://www.semanlink.net/tag/atom_github|broader_prefLabel|GitHub project +http://www.semanlink.net/tag/bibliotheconomie|prefLabel|Bibliothéconomie +http://www.semanlink.net/tag/bibliotheconomie|broader|http://www.semanlink.net/tag/bibliotheque +http://www.semanlink.net/tag/bibliotheconomie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bibliotheconomie|uri|http://www.semanlink.net/tag/bibliotheconomie +http://www.semanlink.net/tag/bibliotheconomie|broader_prefLabel|Bibliothèque +http://www.semanlink.net/tag/filets_a_nuages|creationTime|2008-04-08T13:06:03Z +http://www.semanlink.net/tag/filets_a_nuages|prefLabel|Filets à nuages +http://www.semanlink.net/tag/filets_a_nuages|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/filets_a_nuages|creationDate|2008-04-08 +http://www.semanlink.net/tag/filets_a_nuages|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/filets_a_nuages|uri|http://www.semanlink.net/tag/filets_a_nuages +http://www.semanlink.net/tag/filets_a_nuages|broader_prefLabel|Eau +http://www.semanlink.net/tag/tenere|prefLabel|Ténéré +http://www.semanlink.net/tag/tenere|broader|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/tenere|broader|http://www.semanlink.net/tag/desert +http://www.semanlink.net/tag/tenere|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/tenere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tenere|uri|http://www.semanlink.net/tag/tenere +http://www.semanlink.net/tag/tenere|broader_prefLabel|Sahara +http://www.semanlink.net/tag/tenere|broader_prefLabel|Désert +http://www.semanlink.net/tag/tenere|broader_prefLabel|Niger +http://www.semanlink.net/tag/tenere|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/tenere|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/tenere|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/nosql|creationTime|2009-07-07T19:26:57Z +http://www.semanlink.net/tag/nosql|prefLabel|NOSQL +http://www.semanlink.net/tag/nosql|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/nosql|related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/nosql|creationDate|2009-07-07 +http://www.semanlink.net/tag/nosql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nosql|uri|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/nosql|broader_prefLabel|Database +http://www.semanlink.net/tag/debug|prefLabel|Debug +http://www.semanlink.net/tag/debug|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/debug|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/debug|uri|http://www.semanlink.net/tag/debug +http://www.semanlink.net/tag/debug|broader_prefLabel|Dev +http://www.semanlink.net/tag/cia|prefLabel|CIA +http://www.semanlink.net/tag/cia|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/cia|broader|http://www.semanlink.net/tag/services_secrets +http://www.semanlink.net/tag/cia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cia|uri|http://www.semanlink.net/tag/cia +http://www.semanlink.net/tag/cia|broader_prefLabel|USA +http://www.semanlink.net/tag/cia|broader_prefLabel|Services secrets +http://www.semanlink.net/tag/cia|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/cia|broader_altLabel|United States +http://www.semanlink.net/tag/rdf_bus|prefLabel|RDF bus +http://www.semanlink.net/tag/rdf_bus|broader|http://www.semanlink.net/tag/semantic_integration_hub +http://www.semanlink.net/tag/rdf_bus|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_bus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_bus|altLabel|RDF-based data integration +http://www.semanlink.net/tag/rdf_bus|uri|http://www.semanlink.net/tag/rdf_bus +http://www.semanlink.net/tag/rdf_bus|broader_prefLabel|Semantic Integration Hub +http://www.semanlink.net/tag/rdf_bus|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_bus|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_bus|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_bus|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_bus|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_bus|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/obamacare|creationTime|2010-01-12T14:49:48Z +http://www.semanlink.net/tag/obamacare|prefLabel|Obamacare +http://www.semanlink.net/tag/obamacare|broader|http://www.semanlink.net/tag/obama +http://www.semanlink.net/tag/obamacare|creationDate|2010-01-12 +http://www.semanlink.net/tag/obamacare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obamacare|uri|http://www.semanlink.net/tag/obamacare +http://www.semanlink.net/tag/obamacare|broader_prefLabel|Obama +http://www.semanlink.net/tag/rdf_in_json|creationTime|2010-12-06T18:07:42Z +http://www.semanlink.net/tag/rdf_in_json|prefLabel|RDF-in-JSON +http://www.semanlink.net/tag/rdf_in_json|broader|http://www.semanlink.net/tag/json +http://www.semanlink.net/tag/rdf_in_json|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_in_json|creationDate|2010-12-06 +http://www.semanlink.net/tag/rdf_in_json|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_in_json|uri|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/rdf_in_json|broader_prefLabel|JSON +http://www.semanlink.net/tag/rdf_in_json|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_in_json|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_in_json|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_in_json|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_in_json|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_in_json|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/osema_2011|creationTime|2011-03-07T09:04:05Z +http://www.semanlink.net/tag/osema_2011|prefLabel|OSEMA 2011 +http://www.semanlink.net/tag/osema_2011|broader|http://www.semanlink.net/tag/eswc_2011 +http://www.semanlink.net/tag/osema_2011|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/osema_2011|related|http://www.semanlink.net/tag/osema_deri_renault_paper +http://www.semanlink.net/tag/osema_2011|creationDate|2011-03-07 +http://www.semanlink.net/tag/osema_2011|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/osema_2011|homepage|http://www.osema.org.ve/ +http://www.semanlink.net/tag/osema_2011|uri|http://www.semanlink.net/tag/osema_2011 +http://www.semanlink.net/tag/osema_2011|broader_prefLabel|ESWC 2011 +http://www.semanlink.net/tag/osema_2011|broader_prefLabel|Workshop +http://www.semanlink.net/tag/snorkel|creationTime|2019-05-22T00:29:55Z +http://www.semanlink.net/tag/snorkel|prefLabel|Snorkel +http://www.semanlink.net/tag/snorkel|broader|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/tag/snorkel|broader|http://www.semanlink.net/tag/labeled_data +http://www.semanlink.net/tag/snorkel|broader|http://www.semanlink.net/tag/training_data +http://www.semanlink.net/tag/snorkel|broader|http://www.semanlink.net/tag/training_data_nlp +http://www.semanlink.net/tag/snorkel|creationDate|2019-05-22 +http://www.semanlink.net/tag/snorkel|comment|"A system for rapidly creating training sets with weak supervision + +> The System for Programmatically Building and Managing Training Data" +http://www.semanlink.net/tag/snorkel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/snorkel|altLabel|Snorkel MeTaL +http://www.semanlink.net/tag/snorkel|uri|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/tag/snorkel|broader_prefLabel|Weak supervision +http://www.semanlink.net/tag/snorkel|broader_prefLabel|Labeled Data +http://www.semanlink.net/tag/snorkel|broader_prefLabel|Training data +http://www.semanlink.net/tag/snorkel|broader_prefLabel|Training Data (NLP) +http://www.semanlink.net/tag/knowledge_graph_conference_2019|creationTime|2019-05-09T23:19:13Z +http://www.semanlink.net/tag/knowledge_graph_conference_2019|prefLabel|Knowledge Graph Conference 2019 +http://www.semanlink.net/tag/knowledge_graph_conference_2019|broader|http://www.semanlink.net/tag/the_knowledge_graph_conference +http://www.semanlink.net/tag/knowledge_graph_conference_2019|broader|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/knowledge_graph_conference_2019|related|http://www.semanlink.net/tag/francois_scharffe +http://www.semanlink.net/tag/knowledge_graph_conference_2019|creationDate|2019-05-09 +http://www.semanlink.net/tag/knowledge_graph_conference_2019|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_conference_2019|uri|http://www.semanlink.net/tag/knowledge_graph_conference_2019 +http://www.semanlink.net/tag/knowledge_graph_conference_2019|broader_prefLabel|The Knowledge Graph Conference +http://www.semanlink.net/tag/knowledge_graph_conference_2019|broader_prefLabel|Enterprise Knowledge Graph +http://www.semanlink.net/tag/knowledge_graph_conference_2019|broader_related|http://www.semanlink.net/tag/francois_scharffe +http://www.semanlink.net/tag/alexandria|creationTime|2015-07-14T23:50:24Z +http://www.semanlink.net/tag/alexandria|prefLabel|Alexandria +http://www.semanlink.net/tag/alexandria|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/alexandria|broader|http://www.semanlink.net/tag/egypte +http://www.semanlink.net/tag/alexandria|creationDate|2015-07-14 +http://www.semanlink.net/tag/alexandria|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandria|describedBy|https://en.wikipedia.org/wiki/Alexandria +http://www.semanlink.net/tag/alexandria|uri|http://www.semanlink.net/tag/alexandria +http://www.semanlink.net/tag/alexandria|broader_prefLabel|Ville +http://www.semanlink.net/tag/alexandria|broader_prefLabel|Egypte +http://www.semanlink.net/tag/paradise_papers|creationTime|2017-11-06T12:05:10Z +http://www.semanlink.net/tag/paradise_papers|prefLabel|Paradise Papers +http://www.semanlink.net/tag/paradise_papers|broader|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/tag/paradise_papers|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/paradise_papers|creationDate|2017-11-06 +http://www.semanlink.net/tag/paradise_papers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paradise_papers|uri|http://www.semanlink.net/tag/paradise_papers +http://www.semanlink.net/tag/paradise_papers|broader_prefLabel|Paradis fiscaux +http://www.semanlink.net/tag/paradise_papers|broader_prefLabel|Leaks +http://www.semanlink.net/tag/paradise_papers|broader_altLabel|Tax Haven +http://www.semanlink.net/tag/paradise_papers|broader_altLabel|Paradis fiscal +http://www.semanlink.net/tag/paradise_papers|broader_related|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/paradise_papers|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/open_university|prefLabel|Open University +http://www.semanlink.net/tag/open_university|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/open_university|broader|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/open_university|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_university|uri|http://www.semanlink.net/tag/open_university +http://www.semanlink.net/tag/open_university|broader_prefLabel|Education +http://www.semanlink.net/tag/open_university|broader_prefLabel|Université +http://www.semanlink.net/tag/open_university|broader_altLabel|Enseignement +http://www.semanlink.net/tag/michael_rakowitz|creationTime|2019-03-23T01:24:12Z +http://www.semanlink.net/tag/michael_rakowitz|prefLabel|Michael Rakowitz +http://www.semanlink.net/tag/michael_rakowitz|broader|http://www.semanlink.net/tag/artiste +http://www.semanlink.net/tag/michael_rakowitz|related|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/michael_rakowitz|creationDate|2019-03-23 +http://www.semanlink.net/tag/michael_rakowitz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/michael_rakowitz|homepage|http://www.michaelrakowitz.com +http://www.semanlink.net/tag/michael_rakowitz|describedBy|https://en.wikipedia.org/wiki/Michael_Rakowitz +http://www.semanlink.net/tag/michael_rakowitz|uri|http://www.semanlink.net/tag/michael_rakowitz +http://www.semanlink.net/tag/michael_rakowitz|broader_prefLabel|Artiste +http://www.semanlink.net/tag/conjecture_de_goldbach|creationTime|2012-05-20T23:01:18Z +http://www.semanlink.net/tag/conjecture_de_goldbach|prefLabel|Conjecture de Goldbach +http://www.semanlink.net/tag/conjecture_de_goldbach|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/conjecture_de_goldbach|broader|http://www.semanlink.net/tag/nombres_premiers +http://www.semanlink.net/tag/conjecture_de_goldbach|creationDate|2012-05-20 +http://www.semanlink.net/tag/conjecture_de_goldbach|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conjecture_de_goldbach|uri|http://www.semanlink.net/tag/conjecture_de_goldbach +http://www.semanlink.net/tag/conjecture_de_goldbach|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/conjecture_de_goldbach|broader_prefLabel|Nombres premiers +http://www.semanlink.net/tag/computational_neuroscience|creationTime|2013-11-30T21:52:07Z +http://www.semanlink.net/tag/computational_neuroscience|prefLabel|Computational Neuroscience +http://www.semanlink.net/tag/computational_neuroscience|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/computational_neuroscience|related|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/computational_neuroscience|creationDate|2013-11-30 +http://www.semanlink.net/tag/computational_neuroscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computational_neuroscience|uri|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/computational_neuroscience|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/cazuza|creationTime|2008-09-12T18:55:07Z +http://www.semanlink.net/tag/cazuza|prefLabel|Cazuza +http://www.semanlink.net/tag/cazuza|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/cazuza|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/cazuza|creationDate|2008-09-12 +http://www.semanlink.net/tag/cazuza|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cazuza|describedBy|https://pt.wikipedia.org/wiki/Cazuza +http://www.semanlink.net/tag/cazuza|uri|http://www.semanlink.net/tag/cazuza +http://www.semanlink.net/tag/cazuza|broader_prefLabel|Musique +http://www.semanlink.net/tag/cazuza|broader_prefLabel|Brésil +http://www.semanlink.net/tag/cazuza|broader_altLabel|Music +http://www.semanlink.net/tag/cazuza|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/multi_task_learning|creationTime|2018-01-20T13:08:36Z +http://www.semanlink.net/tag/multi_task_learning|prefLabel|Multi-task learning +http://www.semanlink.net/tag/multi_task_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/multi_task_learning|related|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/tag/multi_task_learning|creationDate|2018-01-20 +http://www.semanlink.net/tag/multi_task_learning|comment|general method for sharing parameters between models that are trained on multiple tasks solve 2 tasks at once +http://www.semanlink.net/tag/multi_task_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_task_learning|describedBy|https://en.wikipedia.org/wiki/Multi-task_learning +http://www.semanlink.net/tag/multi_task_learning|altLabel|Multitask Learning +http://www.semanlink.net/tag/multi_task_learning|altLabel|Joint learning +http://www.semanlink.net/tag/multi_task_learning|altLabel|Joint models +http://www.semanlink.net/tag/multi_task_learning|altLabel|MTL +http://www.semanlink.net/tag/multi_task_learning|uri|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/multi_task_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/jeff_hawkins|creationTime|2013-05-25T17:58:33Z +http://www.semanlink.net/tag/jeff_hawkins|prefLabel|Jeff Hawkins +http://www.semanlink.net/tag/jeff_hawkins|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/jeff_hawkins|related|http://www.semanlink.net/tag/one_learning_algorithm_hypothesis +http://www.semanlink.net/tag/jeff_hawkins|related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/jeff_hawkins|related|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/jeff_hawkins|related|http://www.semanlink.net/tag/neocortex +http://www.semanlink.net/tag/jeff_hawkins|creationDate|2013-05-25 +http://www.semanlink.net/tag/jeff_hawkins|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeff_hawkins|describedBy|https://en.wikipedia.org/wiki/Jeff_Hawkins +http://www.semanlink.net/tag/jeff_hawkins|uri|http://www.semanlink.net/tag/jeff_hawkins +http://www.semanlink.net/tag/jeff_hawkins|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/ldow2013|creationTime|2013-03-02T16:44:41Z +http://www.semanlink.net/tag/ldow2013|prefLabel|LDOW2013 +http://www.semanlink.net/tag/ldow2013|broader|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/ldow2013|broader|http://www.semanlink.net/tag/ldow +http://www.semanlink.net/tag/ldow2013|creationDate|2013-03-02 +http://www.semanlink.net/tag/ldow2013|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldow2013|homepage|http://events.linkeddata.org/ldow2013/ +http://www.semanlink.net/tag/ldow2013|uri|http://www.semanlink.net/tag/ldow2013 +http://www.semanlink.net/tag/ldow2013|broader_prefLabel|WWW 2013 +http://www.semanlink.net/tag/ldow2013|broader_prefLabel|LDOW +http://www.semanlink.net/tag/lod_museum|creationTime|2016-09-05T11:45:34Z +http://www.semanlink.net/tag/lod_museum|prefLabel|LOD & museum +http://www.semanlink.net/tag/lod_museum|broader|http://www.semanlink.net/tag/culture_et_sem_web +http://www.semanlink.net/tag/lod_museum|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/lod_museum|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/lod_museum|creationDate|2016-09-05 +http://www.semanlink.net/tag/lod_museum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod_museum|uri|http://www.semanlink.net/tag/lod_museum +http://www.semanlink.net/tag/lod_museum|broader_prefLabel|Culture et sem web +http://www.semanlink.net/tag/lod_museum|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/lod_museum|broader_prefLabel|Musée +http://www.semanlink.net/tag/lod_museum|broader_altLabel|LOD +http://www.semanlink.net/tag/lod_museum|broader_related|http://www.semanlink.net/tag/bertrand_sajus +http://www.semanlink.net/tag/lod_museum|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_museum|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_museum|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_museum|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/firefighter|creationTime|2013-10-18T22:36:42Z +http://www.semanlink.net/tag/firefighter|prefLabel|Firefighter +http://www.semanlink.net/tag/firefighter|broader|http://www.semanlink.net/tag/fire +http://www.semanlink.net/tag/firefighter|creationDate|2013-10-18 +http://www.semanlink.net/tag/firefighter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/firefighter|uri|http://www.semanlink.net/tag/firefighter +http://www.semanlink.net/tag/firefighter|broader_prefLabel|Fire +http://www.semanlink.net/tag/semanlink_feature_request|creationTime|2007-04-07T23:14:12Z +http://www.semanlink.net/tag/semanlink_feature_request|prefLabel|Semanlink Feature Request +http://www.semanlink.net/tag/semanlink_feature_request|broader|http://www.semanlink.net/tag/semanlink_todo +http://www.semanlink.net/tag/semanlink_feature_request|creationDate|2007-04-07 +http://www.semanlink.net/tag/semanlink_feature_request|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_feature_request|altLabel|SL Feature Request +http://www.semanlink.net/tag/semanlink_feature_request|uri|http://www.semanlink.net/tag/semanlink_feature_request +http://www.semanlink.net/tag/semanlink_feature_request|broader_prefLabel|Semanlink todo +http://www.semanlink.net/tag/semanlink_feature_request|broader_altLabel|SL todo +http://www.semanlink.net/tag/chine_usa|creationTime|2021-11-03T12:29:42Z +http://www.semanlink.net/tag/chine_usa|prefLabel|Chine-USA +http://www.semanlink.net/tag/chine_usa|creationDate|2021-11-03 +http://www.semanlink.net/tag/chine_usa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_usa|uri|http://www.semanlink.net/tag/chine_usa +http://www.semanlink.net/tag/swrl|creationTime|2007-08-23T23:59:14Z +http://www.semanlink.net/tag/swrl|prefLabel|SWRL +http://www.semanlink.net/tag/swrl|broader|http://www.semanlink.net/tag/rules +http://www.semanlink.net/tag/swrl|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/swrl|creationDate|2007-08-23 +http://www.semanlink.net/tag/swrl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swrl|uri|http://www.semanlink.net/tag/swrl +http://www.semanlink.net/tag/swrl|broader_prefLabel|Rules +http://www.semanlink.net/tag/pythagore|creationTime|2009-05-03T13:20:36Z +http://www.semanlink.net/tag/pythagore|prefLabel|Pythagore +http://www.semanlink.net/tag/pythagore|broader|http://www.semanlink.net/tag/geometrie +http://www.semanlink.net/tag/pythagore|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/pythagore|creationDate|2009-05-03 +http://www.semanlink.net/tag/pythagore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pythagore|uri|http://www.semanlink.net/tag/pythagore +http://www.semanlink.net/tag/pythagore|broader_prefLabel|Géométrie +http://www.semanlink.net/tag/pythagore|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|creationTime|2007-05-23T01:23:30Z +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|prefLabel|Placentaires, marsupiaux et monotrèmes +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|broader|http://www.semanlink.net/tag/monotremes +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|broader|http://www.semanlink.net/tag/marsupiaux +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|creationDate|2007-05-23 +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|uri|http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|broader_prefLabel|Monotrèmes +http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes|broader_prefLabel|Marsupiaux +http://www.semanlink.net/tag/acoustique_musicale|creationTime|2007-11-28T01:31:53Z +http://www.semanlink.net/tag/acoustique_musicale|prefLabel|Acoustique musicale +http://www.semanlink.net/tag/acoustique_musicale|broader|http://www.semanlink.net/tag/acoustique +http://www.semanlink.net/tag/acoustique_musicale|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/acoustique_musicale|creationDate|2007-11-28 +http://www.semanlink.net/tag/acoustique_musicale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acoustique_musicale|uri|http://www.semanlink.net/tag/acoustique_musicale +http://www.semanlink.net/tag/acoustique_musicale|broader_prefLabel|Acoustique +http://www.semanlink.net/tag/acoustique_musicale|broader_prefLabel|Musique +http://www.semanlink.net/tag/acoustique_musicale|broader_altLabel|Music +http://www.semanlink.net/tag/semantic_search|prefLabel|Semantic Search +http://www.semanlink.net/tag/semantic_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/semantic_search|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_search|related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/semantic_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_search|uri|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/tag/semantic_search|broader_prefLabel|Search +http://www.semanlink.net/tag/semantic_search|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_search|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_search|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/hepp_s_propertyvalue|creationTime|2014-12-09T16:55:50Z +http://www.semanlink.net/tag/hepp_s_propertyvalue|prefLabel|Hepp's PropertyValue +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/hepp_s_propertyvalue|creationDate|2014-12-09 +http://www.semanlink.net/tag/hepp_s_propertyvalue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hepp_s_propertyvalue|uri|http://www.semanlink.net/tag/hepp_s_propertyvalue +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_prefLabel|schema.org +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_prefLabel|Martin Hepp +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_prefLabel|Product description +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/hepp_s_propertyvalue|broader_related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/jdbc|creationTime|2008-10-20T15:36:17Z +http://www.semanlink.net/tag/jdbc|prefLabel|JDBC +http://www.semanlink.net/tag/jdbc|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/jdbc|broader|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/jdbc|creationDate|2008-10-20 +http://www.semanlink.net/tag/jdbc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jdbc|uri|http://www.semanlink.net/tag/jdbc +http://www.semanlink.net/tag/jdbc|broader_prefLabel|Java dev +http://www.semanlink.net/tag/jdbc|broader_prefLabel|SQL +http://www.semanlink.net/tag/resources_oriented_web_services|creationTime|2014-10-21T13:41:57Z +http://www.semanlink.net/tag/resources_oriented_web_services|prefLabel|Resources-Oriented Web Services +http://www.semanlink.net/tag/resources_oriented_web_services|broader|http://www.semanlink.net/tag/hateoas +http://www.semanlink.net/tag/resources_oriented_web_services|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/resources_oriented_web_services|broader|http://www.semanlink.net/tag/restful_semantic_web_services +http://www.semanlink.net/tag/resources_oriented_web_services|broader|http://www.semanlink.net/tag/schema_org_actions +http://www.semanlink.net/tag/resources_oriented_web_services|creationDate|2014-10-21 +http://www.semanlink.net/tag/resources_oriented_web_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/resources_oriented_web_services|uri|http://www.semanlink.net/tag/resources_oriented_web_services +http://www.semanlink.net/tag/resources_oriented_web_services|broader_prefLabel|HATEOAS +http://www.semanlink.net/tag/resources_oriented_web_services|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/resources_oriented_web_services|broader_prefLabel|Restful semantic web services +http://www.semanlink.net/tag/resources_oriented_web_services|broader_prefLabel|Schema.org Actions +http://www.semanlink.net/tag/resources_oriented_web_services|broader_altLabel|Hypermedia API +http://www.semanlink.net/tag/resources_oriented_web_services|broader_altLabel|LD +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/samuel_goto +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/resources_oriented_web_services|broader_related|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/backpropagation_vs_biology|creationTime|2019-10-31T13:34:59Z +http://www.semanlink.net/tag/backpropagation_vs_biology|prefLabel|Backpropagation vs Biology +http://www.semanlink.net/tag/backpropagation_vs_biology|broader|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/backpropagation_vs_biology|broader|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/tag/backpropagation_vs_biology|broader|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/backpropagation_vs_biology|creationDate|2019-10-31 +http://www.semanlink.net/tag/backpropagation_vs_biology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backpropagation_vs_biology|uri|http://www.semanlink.net/tag/backpropagation_vs_biology +http://www.semanlink.net/tag/backpropagation_vs_biology|broader_prefLabel|Brain vs Deep Learning +http://www.semanlink.net/tag/backpropagation_vs_biology|broader_prefLabel|Backpropagation +http://www.semanlink.net/tag/backpropagation_vs_biology|broader_prefLabel|Computational Neuroscience +http://www.semanlink.net/tag/backpropagation_vs_biology|broader_altLabel|Back Propagation +http://www.semanlink.net/tag/backpropagation_vs_biology|broader_related|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/swoogle|prefLabel|Swoogle +http://www.semanlink.net/tag/swoogle|broader|http://www.semanlink.net/tag/semantic_web_search_engine +http://www.semanlink.net/tag/swoogle|broader|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/swoogle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swoogle|uri|http://www.semanlink.net/tag/swoogle +http://www.semanlink.net/tag/swoogle|broader_prefLabel|Semantic Web search engine +http://www.semanlink.net/tag/swoogle|broader_prefLabel|RDF Data source +http://www.semanlink.net/tag/swoogle|broader_altLabel|RDF search engine +http://www.semanlink.net/tag/textblob|creationTime|2017-10-25T23:34:53Z +http://www.semanlink.net/tag/textblob|prefLabel|TextBlob +http://www.semanlink.net/tag/textblob|broader|http://www.semanlink.net/tag/nltk +http://www.semanlink.net/tag/textblob|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/textblob|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/textblob|related|http://www.semanlink.net/tag/spellchecker +http://www.semanlink.net/tag/textblob|creationDate|2017-10-25 +http://www.semanlink.net/tag/textblob|comment|"Python (2 and 3) library for processing textual data. API for diving into common NLP tasks such as part-of-speech tagging, noun phrase extraction, sentiment analysis, classification, translation, and more. Based on NLTK +" +http://www.semanlink.net/tag/textblob|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/textblob|homepage|https://textblob.readthedocs.io +http://www.semanlink.net/tag/textblob|uri|http://www.semanlink.net/tag/textblob +http://www.semanlink.net/tag/textblob|broader_prefLabel|NLTK +http://www.semanlink.net/tag/textblob|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/textblob|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/textblob|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/textblob|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/big_bang|prefLabel|Big bang +http://www.semanlink.net/tag/big_bang|broader|http://www.semanlink.net/tag/astrophysique +http://www.semanlink.net/tag/big_bang|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/big_bang|uri|http://www.semanlink.net/tag/big_bang +http://www.semanlink.net/tag/big_bang|broader_prefLabel|Astrophysique +http://www.semanlink.net/tag/www_2009|creationTime|2009-03-05T10:41:46Z +http://www.semanlink.net/tag/www_2009|prefLabel|WWW 2009 +http://www.semanlink.net/tag/www_2009|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www_2009|creationDate|2009-03-05 +http://www.semanlink.net/tag/www_2009|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www_2009|uri|http://www.semanlink.net/tag/www_2009 +http://www.semanlink.net/tag/www_2009|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www_2009|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/jsonld_mongodb|creationTime|2017-01-05T16:06:51Z +http://www.semanlink.net/tag/jsonld_mongodb|prefLabel|JsonLD + MongoDB +http://www.semanlink.net/tag/jsonld_mongodb|broader|http://www.semanlink.net/tag/mongodb +http://www.semanlink.net/tag/jsonld_mongodb|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/jsonld_mongodb|creationDate|2017-01-05 +http://www.semanlink.net/tag/jsonld_mongodb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsonld_mongodb|uri|http://www.semanlink.net/tag/jsonld_mongodb +http://www.semanlink.net/tag/jsonld_mongodb|broader_prefLabel|MongoDB +http://www.semanlink.net/tag/jsonld_mongodb|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/jsonld_mongodb|broader_altLabel|JSONLD +http://www.semanlink.net/tag/choose_science|creationTime|2021-07-01T13:47:30Z +http://www.semanlink.net/tag/choose_science|prefLabel|Choose science +http://www.semanlink.net/tag/choose_science|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/choose_science|creationDate|2021-07-01 +http://www.semanlink.net/tag/choose_science|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/choose_science|uri|http://www.semanlink.net/tag/choose_science +http://www.semanlink.net/tag/choose_science|broader_prefLabel|Science +http://www.semanlink.net/tag/choose_science|broader_altLabel|sciences +http://www.semanlink.net/tag/sparql_sample_code|creationTime|2008-09-24T22:54:17Z +http://www.semanlink.net/tag/sparql_sample_code|prefLabel|SPARQL: sample code +http://www.semanlink.net/tag/sparql_sample_code|broader|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/sparql_sample_code|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_sample_code|creationDate|2008-09-24 +http://www.semanlink.net/tag/sparql_sample_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_sample_code|uri|http://www.semanlink.net/tag/sparql_sample_code +http://www.semanlink.net/tag/sparql_sample_code|broader_prefLabel|Sample code +http://www.semanlink.net/tag/sparql_sample_code|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/droit_et_internet|creationTime|2008-03-29T15:08:35Z +http://www.semanlink.net/tag/droit_et_internet|prefLabel|Droit et internet +http://www.semanlink.net/tag/droit_et_internet|broader|http://www.semanlink.net/tag/droit +http://www.semanlink.net/tag/droit_et_internet|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/droit_et_internet|creationDate|2008-03-29 +http://www.semanlink.net/tag/droit_et_internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/droit_et_internet|uri|http://www.semanlink.net/tag/droit_et_internet +http://www.semanlink.net/tag/droit_et_internet|broader_prefLabel|Droit +http://www.semanlink.net/tag/droit_et_internet|broader_prefLabel|Internet +http://www.semanlink.net/tag/droit_et_internet|broader_related|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/information_extraction|creationTime|2018-11-04T18:15:22Z +http://www.semanlink.net/tag/information_extraction|prefLabel|Information extraction +http://www.semanlink.net/tag/information_extraction|related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/information_extraction|creationDate|2018-11-04 +http://www.semanlink.net/tag/information_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_extraction|uri|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/ogm|prefLabel|GMO +http://www.semanlink.net/tag/ogm|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/ogm|broader|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/ogm|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/ogm|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/ogm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ogm|altLabel|OGM +http://www.semanlink.net/tag/ogm|uri|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/ogm|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/ogm|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/ogm|broader_prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/ogm|broader_prefLabel|Genetics +http://www.semanlink.net/tag/ogm|broader_prefLabel|Génétique +http://www.semanlink.net/tag/ogm|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/solr_documentation|creationTime|2015-03-09T11:20:20Z +http://www.semanlink.net/tag/solr_documentation|prefLabel|Solr documentation +http://www.semanlink.net/tag/solr_documentation|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr_documentation|creationDate|2015-03-09 +http://www.semanlink.net/tag/solr_documentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr_documentation|uri|http://www.semanlink.net/tag/solr_documentation +http://www.semanlink.net/tag/solr_documentation|broader_prefLabel|Solr +http://www.semanlink.net/tag/linked_data_collaborative_editing|creationTime|2013-04-17T10:15:30Z +http://www.semanlink.net/tag/linked_data_collaborative_editing|prefLabel|Linked Data / collaborative editing +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader|http://www.semanlink.net/tag/collaborative_editing +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_collaborative_editing|creationDate|2013-04-17 +http://www.semanlink.net/tag/linked_data_collaborative_editing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_collaborative_editing|uri|http://www.semanlink.net/tag/linked_data_collaborative_editing +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_prefLabel|Collaborative editing +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_collaborative_editing|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/portland_or|creationTime|2007-09-11T21:38:49Z +http://www.semanlink.net/tag/portland_or|prefLabel|Portland (OR) +http://www.semanlink.net/tag/portland_or|broader|http://www.semanlink.net/tag/oregon +http://www.semanlink.net/tag/portland_or|creationDate|2007-09-11 +http://www.semanlink.net/tag/portland_or|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/portland_or|uri|http://www.semanlink.net/tag/portland_or +http://www.semanlink.net/tag/portland_or|broader_prefLabel|Oregon +http://www.semanlink.net/tag/culture_et_sem_web|creationTime|2012-11-19T15:54:43Z +http://www.semanlink.net/tag/culture_et_sem_web|prefLabel|Culture et sem web +http://www.semanlink.net/tag/culture_et_sem_web|broader|http://www.semanlink.net/tag/culture +http://www.semanlink.net/tag/culture_et_sem_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/culture_et_sem_web|related|http://www.semanlink.net/tag/bertrand_sajus +http://www.semanlink.net/tag/culture_et_sem_web|creationDate|2012-11-19 +http://www.semanlink.net/tag/culture_et_sem_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/culture_et_sem_web|uri|http://www.semanlink.net/tag/culture_et_sem_web +http://www.semanlink.net/tag/culture_et_sem_web|broader_prefLabel|Culture +http://www.semanlink.net/tag/culture_et_sem_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/culture_et_sem_web|broader_altLabel|sw +http://www.semanlink.net/tag/culture_et_sem_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/sweo_renault_use_case|creationTime|2008-09-23T15:04:04Z +http://www.semanlink.net/tag/sweo_renault_use_case|prefLabel|SWEO: Renault use case +http://www.semanlink.net/tag/sweo_renault_use_case|broader|http://www.semanlink.net/tag/sweo_interest_group +http://www.semanlink.net/tag/sweo_renault_use_case|broader|http://www.semanlink.net/tag/sw_in_technical_automotive_documentation +http://www.semanlink.net/tag/sweo_renault_use_case|creationDate|2008-09-23 +http://www.semanlink.net/tag/sweo_renault_use_case|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sweo_renault_use_case|uri|http://www.semanlink.net/tag/sweo_renault_use_case +http://www.semanlink.net/tag/sweo_renault_use_case|broader_prefLabel|SWEO Interest Group +http://www.semanlink.net/tag/sweo_renault_use_case|broader_prefLabel|SW in Technical Automotive Documentation +http://www.semanlink.net/tag/sweo_renault_use_case|broader_related|http://www.semanlink.net/tag/ivan_herman +http://www.semanlink.net/tag/mais_ogm|prefLabel|Maïs OGM +http://www.semanlink.net/tag/mais_ogm|broader|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/mais_ogm|broader|http://www.semanlink.net/tag/mais +http://www.semanlink.net/tag/mais_ogm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mais_ogm|uri|http://www.semanlink.net/tag/mais_ogm +http://www.semanlink.net/tag/mais_ogm|broader_prefLabel|GMO +http://www.semanlink.net/tag/mais_ogm|broader_prefLabel|Maïs +http://www.semanlink.net/tag/mais_ogm|broader_altLabel|OGM +http://www.semanlink.net/tag/trusted_computing|prefLabel|Trusted Computing +http://www.semanlink.net/tag/trusted_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trusted_computing|altLabel|TCPA +http://www.semanlink.net/tag/trusted_computing|uri|http://www.semanlink.net/tag/trusted_computing +http://www.semanlink.net/tag/pericles|creationTime|2009-03-05T01:37:08Z +http://www.semanlink.net/tag/pericles|prefLabel|Périclès +http://www.semanlink.net/tag/pericles|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/pericles|broader|http://www.semanlink.net/tag/personnage_historique +http://www.semanlink.net/tag/pericles|broader|http://www.semanlink.net/tag/athenes +http://www.semanlink.net/tag/pericles|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/pericles|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/pericles|creationDate|2009-03-05 +http://www.semanlink.net/tag/pericles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pericles|describedBy|https://en.wikipedia.org/wiki/Pericles +http://www.semanlink.net/tag/pericles|uri|http://www.semanlink.net/tag/pericles +http://www.semanlink.net/tag/pericles|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/pericles|broader_prefLabel|Personnage historique +http://www.semanlink.net/tag/pericles|broader_prefLabel|Athènes +http://www.semanlink.net/tag/pericles|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/pericles|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/java_dev|prefLabel|Java dev +http://www.semanlink.net/tag/java_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/java_dev|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_dev|uri|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/java_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/java_dev|broader_prefLabel|Java +http://www.semanlink.net/tag/rdf_forms|creationTime|2007-03-20T21:21:14Z +http://www.semanlink.net/tag/rdf_forms|prefLabel|RDF forms +http://www.semanlink.net/tag/rdf_forms|broader|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/rdf_forms|broader|http://www.semanlink.net/tag/forms +http://www.semanlink.net/tag/rdf_forms|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/rdf_forms|related|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/rdf_forms|creationDate|2007-03-20 +http://www.semanlink.net/tag/rdf_forms|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_forms|uri|http://www.semanlink.net/tag/rdf_forms +http://www.semanlink.net/tag/rdf_forms|broader_prefLabel|Semantic Web Services +http://www.semanlink.net/tag/rdf_forms|broader_prefLabel|Forms +http://www.semanlink.net/tag/rdf_forms|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/rdf_forms|broader_altLabel|LD +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/rdf_forms|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/distilbert|creationTime|2020-02-14T00:26:15Z +http://www.semanlink.net/tag/distilbert|prefLabel|DistilBERT +http://www.semanlink.net/tag/distilbert|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/distilbert|broader|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/tag/distilbert|creationDate|2020-02-14 +http://www.semanlink.net/tag/distilbert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/distilbert|uri|http://www.semanlink.net/tag/distilbert +http://www.semanlink.net/tag/distilbert|broader_prefLabel|BERT +http://www.semanlink.net/tag/distilbert|broader_prefLabel|Knowledge distillation +http://www.semanlink.net/tag/distilbert|broader_altLabel|Teacher-student learning +http://www.semanlink.net/tag/distilbert|broader_related|http://www.semanlink.net/tag/mutual_learning +http://www.semanlink.net/tag/distilbert|broader_related|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/distilbert|broader_related|http://www.semanlink.net/tag/on_device_nlp +http://www.semanlink.net/tag/curl|creationTime|2016-03-31T11:05:02Z +http://www.semanlink.net/tag/curl|prefLabel|cURL +http://www.semanlink.net/tag/curl|creationDate|2016-03-31 +http://www.semanlink.net/tag/curl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/curl|describedBy|https://en.wikipedia.org/wiki/CURL +http://www.semanlink.net/tag/curl|uri|http://www.semanlink.net/tag/curl +http://www.semanlink.net/tag/inductive_bias|creationTime|2018-11-09T00:59:12Z +http://www.semanlink.net/tag/inductive_bias|prefLabel|Inductive bias +http://www.semanlink.net/tag/inductive_bias|broader|http://www.semanlink.net/tag/bias +http://www.semanlink.net/tag/inductive_bias|creationDate|2018-11-09 +http://www.semanlink.net/tag/inductive_bias|comment|"learning bias: the set of assumptions that a model makes in order to generalize to new inputs. + +An inductive bias allows a learning algorithm to prioritize +one solution (or interpretation) over another, independent of the observed data (Mitchell, +1980). In a Bayesian model, inductive biases are typically expressed through the choice and +parameterization of the prior distribution [source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1806.01261)" +http://www.semanlink.net/tag/inductive_bias|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inductive_bias|describedBy|https://en.wikipedia.org/wiki/Inductive_bias +http://www.semanlink.net/tag/inductive_bias|altLabel|Learning bias +http://www.semanlink.net/tag/inductive_bias|uri|http://www.semanlink.net/tag/inductive_bias +http://www.semanlink.net/tag/inductive_bias|broader_prefLabel|Bias +http://www.semanlink.net/tag/microblogs|creationTime|2010-12-20T15:31:36Z +http://www.semanlink.net/tag/microblogs|prefLabel|Microblogs +http://www.semanlink.net/tag/microblogs|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/microblogs|creationDate|2010-12-20 +http://www.semanlink.net/tag/microblogs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microblogs|altLabel|Microblogging +http://www.semanlink.net/tag/microblogs|uri|http://www.semanlink.net/tag/microblogs +http://www.semanlink.net/tag/microblogs|broader_prefLabel|Blog +http://www.semanlink.net/tag/developpement|prefLabel|Développement +http://www.semanlink.net/tag/developpement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/developpement|uri|http://www.semanlink.net/tag/developpement +http://www.semanlink.net/tag/java_5|prefLabel|Java 5 +http://www.semanlink.net/tag/java_5|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_5|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_5|altLabel|Java 1.5 +http://www.semanlink.net/tag/java_5|uri|http://www.semanlink.net/tag/java_5 +http://www.semanlink.net/tag/java_5|broader_prefLabel|Java +http://www.semanlink.net/tag/normale_sup|creationTime|2011-08-28T01:41:56Z +http://www.semanlink.net/tag/normale_sup|prefLabel|Normale Sup +http://www.semanlink.net/tag/normale_sup|broader|http://www.semanlink.net/tag/enseignement_superieur +http://www.semanlink.net/tag/normale_sup|creationDate|2011-08-28 +http://www.semanlink.net/tag/normale_sup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/normale_sup|altLabel|ENS +http://www.semanlink.net/tag/normale_sup|uri|http://www.semanlink.net/tag/normale_sup +http://www.semanlink.net/tag/normale_sup|broader_prefLabel|Enseignement supérieur +http://www.semanlink.net/tag/end_to_end_learning|creationTime|2020-07-06T17:41:54Z +http://www.semanlink.net/tag/end_to_end_learning|prefLabel|End-to-End Learning +http://www.semanlink.net/tag/end_to_end_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/end_to_end_learning|creationDate|2020-07-06 +http://www.semanlink.net/tag/end_to_end_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/end_to_end_learning|uri|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/tag/end_to_end_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/end_to_end_learning|broader_altLabel|ML +http://www.semanlink.net/tag/end_to_end_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/end_to_end_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/musee_de_niamey|creationTime|2010-05-17T12:13:23Z +http://www.semanlink.net/tag/musee_de_niamey|prefLabel|Musée de Niamey +http://www.semanlink.net/tag/musee_de_niamey|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/musee_de_niamey|broader|http://www.semanlink.net/tag/niamey +http://www.semanlink.net/tag/musee_de_niamey|broader|http://www.semanlink.net/tag/musees_africains +http://www.semanlink.net/tag/musee_de_niamey|creationDate|2010-05-17 +http://www.semanlink.net/tag/musee_de_niamey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musee_de_niamey|uri|http://www.semanlink.net/tag/musee_de_niamey +http://www.semanlink.net/tag/musee_de_niamey|broader_prefLabel|Musée +http://www.semanlink.net/tag/musee_de_niamey|broader_prefLabel|Niamey +http://www.semanlink.net/tag/musee_de_niamey|broader_prefLabel|Musées africains +http://www.semanlink.net/tag/hierarchical_multi_label_classification|creationTime|2020-08-15T11:53:54Z +http://www.semanlink.net/tag/hierarchical_multi_label_classification|prefLabel|Hierarchical multi-label classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|creationDate|2020-08-15 +http://www.semanlink.net/tag/hierarchical_multi_label_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_multi_label_classification|uri|http://www.semanlink.net/tag/hierarchical_multi_label_classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader_prefLabel|Multi-label classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader_prefLabel|Hierarchical Classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader_altLabel|Multilabel classification +http://www.semanlink.net/tag/hierarchical_multi_label_classification|broader_related|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/scikit_learn|creationTime|2015-10-16T11:14:27Z +http://www.semanlink.net/tag/scikit_learn|prefLabel|scikit-learn +http://www.semanlink.net/tag/scikit_learn|broader|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/scikit_learn|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/scikit_learn|broader|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/scikit_learn|creationDate|2015-10-16 +http://www.semanlink.net/tag/scikit_learn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scikit_learn|homepage|http://scikit-learn.org/ +http://www.semanlink.net/tag/scikit_learn|altLabel|sklearn +http://www.semanlink.net/tag/scikit_learn|uri|http://www.semanlink.net/tag/scikit_learn +http://www.semanlink.net/tag/scikit_learn|broader_prefLabel|Python 4 Data science +http://www.semanlink.net/tag/scikit_learn|broader_prefLabel|Python +http://www.semanlink.net/tag/scikit_learn|broader_prefLabel|Machine Learning library +http://www.semanlink.net/tag/tchernobyl|prefLabel|Tchernobyl +http://www.semanlink.net/tag/tchernobyl|broader|http://www.semanlink.net/tag/ukraine +http://www.semanlink.net/tag/tchernobyl|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/tchernobyl|broader|http://www.semanlink.net/tag/catastrophe_ecologique +http://www.semanlink.net/tag/tchernobyl|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/tchernobyl|broader|http://www.semanlink.net/tag/catastrophe_industrielle +http://www.semanlink.net/tag/tchernobyl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tchernobyl|uri|http://www.semanlink.net/tag/tchernobyl +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|Ukraine +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|Catastrophe écologique +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|URSS +http://www.semanlink.net/tag/tchernobyl|broader_prefLabel|Catastrophe industrielle +http://www.semanlink.net/tag/tchernobyl|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/tchernobyl|broader_altLabel|Désastre écologique +http://www.semanlink.net/tag/attentats_13_11_2015|creationTime|2015-11-15T10:58:34Z +http://www.semanlink.net/tag/attentats_13_11_2015|prefLabel|Attentats 13-11-2015 +http://www.semanlink.net/tag/attentats_13_11_2015|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/attentats_13_11_2015|broader|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/attentats_13_11_2015|broader|http://www.semanlink.net/tag/ei +http://www.semanlink.net/tag/attentats_13_11_2015|creationDate|2015-11-15 +http://www.semanlink.net/tag/attentats_13_11_2015|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/attentats_13_11_2015|uri|http://www.semanlink.net/tag/attentats_13_11_2015 +http://www.semanlink.net/tag/attentats_13_11_2015|broader_prefLabel|Paris +http://www.semanlink.net/tag/attentats_13_11_2015|broader_prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/attentats_13_11_2015|broader_prefLabel|Etat islamique +http://www.semanlink.net/tag/attentats_13_11_2015|broader_altLabel|EIIL +http://www.semanlink.net/tag/attentats_13_11_2015|broader_related|http://www.semanlink.net/tag/syrie +http://www.semanlink.net/tag/attentats_13_11_2015|broader_related|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/chevenement|creationTime|2013-07-07T00:51:25Z +http://www.semanlink.net/tag/chevenement|prefLabel|Chevènement +http://www.semanlink.net/tag/chevenement|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/chevenement|creationDate|2013-07-07 +http://www.semanlink.net/tag/chevenement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chevenement|uri|http://www.semanlink.net/tag/chevenement +http://www.semanlink.net/tag/chevenement|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|creationTime|2017-10-21T17:23:13Z +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|creationDate|2017-10-21 +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|uri|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_prefLabel|Knowledge Graphs and NLP +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_prefLabel|NLP: using Knowledge +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_altLabel|KG + NLP +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_altLabel|Knowledge Graphs + Text +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_related|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/nlp_using_knowledge_graphs|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/iphone|creationTime|2008-07-08T22:50:43Z +http://www.semanlink.net/tag/iphone|prefLabel|iphone +http://www.semanlink.net/tag/iphone|broader|http://www.semanlink.net/tag/smartphone +http://www.semanlink.net/tag/iphone|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/iphone|creationDate|2008-07-08 +http://www.semanlink.net/tag/iphone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iphone|uri|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/iphone|broader_prefLabel|Smartphone +http://www.semanlink.net/tag/iphone|broader_prefLabel|Apple +http://www.semanlink.net/tag/iphone|broader_altLabel|Téléphone portable +http://www.semanlink.net/tag/annotations|creationTime|2010-05-20T01:25:02Z +http://www.semanlink.net/tag/annotations|prefLabel|Annotations +http://www.semanlink.net/tag/annotations|creationDate|2010-05-20 +http://www.semanlink.net/tag/annotations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/annotations|altLabel|Annotate +http://www.semanlink.net/tag/annotations|uri|http://www.semanlink.net/tag/annotations +http://www.semanlink.net/tag/html_parsing|prefLabel|HTML parsing +http://www.semanlink.net/tag/html_parsing|broader|http://www.semanlink.net/tag/html_dev +http://www.semanlink.net/tag/html_parsing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html_parsing|uri|http://www.semanlink.net/tag/html_parsing +http://www.semanlink.net/tag/html_parsing|broader_prefLabel|HTML Dev +http://www.semanlink.net/tag/tensor|creationTime|2019-01-04T22:06:22Z +http://www.semanlink.net/tag/tensor|prefLabel|Tensor +http://www.semanlink.net/tag/tensor|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/tensor|creationDate|2019-01-04 +http://www.semanlink.net/tag/tensor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tensor|uri|http://www.semanlink.net/tag/tensor +http://www.semanlink.net/tag/tensor|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/beethoven|creationTime|2016-12-05T16:17:48Z +http://www.semanlink.net/tag/beethoven|prefLabel|Beethoven +http://www.semanlink.net/tag/beethoven|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/beethoven|creationDate|2016-12-05 +http://www.semanlink.net/tag/beethoven|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/beethoven|describedBy|https://en.wikipedia.org/wiki/Ludwig_van_Beethoven +http://www.semanlink.net/tag/beethoven|uri|http://www.semanlink.net/tag/beethoven +http://www.semanlink.net/tag/beethoven|broader_prefLabel|Musicien +http://www.semanlink.net/tag/sem_web_future|creationTime|2012-12-01T13:46:35Z +http://www.semanlink.net/tag/sem_web_future|prefLabel|Sem web: future +http://www.semanlink.net/tag/sem_web_future|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sem_web_future|creationDate|2012-12-01 +http://www.semanlink.net/tag/sem_web_future|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sem_web_future|uri|http://www.semanlink.net/tag/sem_web_future +http://www.semanlink.net/tag/sem_web_future|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sem_web_future|broader_altLabel|sw +http://www.semanlink.net/tag/sem_web_future|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/abstractions_in_ai|creationTime|2019-02-09T01:53:21Z +http://www.semanlink.net/tag/abstractions_in_ai|prefLabel|Abstractions in AI +http://www.semanlink.net/tag/abstractions_in_ai|broader|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/tag/abstractions_in_ai|creationDate|2019-02-09 +http://www.semanlink.net/tag/abstractions_in_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abstractions_in_ai|uri|http://www.semanlink.net/tag/abstractions_in_ai +http://www.semanlink.net/tag/abstractions_in_ai|broader_prefLabel|Artificial general intelligence +http://www.semanlink.net/tag/abstractions_in_ai|broader_altLabel|AGI +http://www.semanlink.net/tag/abstractions_in_ai|broader_related|http://www.semanlink.net/tag/combinatorial_generalization +http://www.semanlink.net/tag/optimisation_fiscale|creationTime|2012-11-12T22:09:47Z +http://www.semanlink.net/tag/optimisation_fiscale|prefLabel|Optimisation fiscale +http://www.semanlink.net/tag/optimisation_fiscale|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/optimisation_fiscale|creationDate|2012-11-12 +http://www.semanlink.net/tag/optimisation_fiscale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/optimisation_fiscale|uri|http://www.semanlink.net/tag/optimisation_fiscale +http://www.semanlink.net/tag/optimisation_fiscale|broader_prefLabel|Finance +http://www.semanlink.net/tag/linked_data_publishing|creationTime|2013-03-13T19:30:15Z +http://www.semanlink.net/tag/linked_data_publishing|prefLabel|Linked Data publishing +http://www.semanlink.net/tag/linked_data_publishing|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/linked_data_publishing|creationDate|2013-03-13 +http://www.semanlink.net/tag/linked_data_publishing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_publishing|altLabel|RDF Data publishing +http://www.semanlink.net/tag/linked_data_publishing|uri|http://www.semanlink.net/tag/linked_data_publishing +http://www.semanlink.net/tag/linked_data_publishing|broader_prefLabel|RDF +http://www.semanlink.net/tag/linked_data_publishing|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/linked_data_publishing|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/linked_data_publishing|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/linked_data_publishing|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/linked_data_publishing|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/sparql_demo|creationTime|2008-05-19T18:52:31Z +http://www.semanlink.net/tag/sparql_demo|prefLabel|SPARQL Demo +http://www.semanlink.net/tag/sparql_demo|broader|http://www.semanlink.net/tag/sw_demo +http://www.semanlink.net/tag/sparql_demo|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_demo|creationDate|2008-05-19 +http://www.semanlink.net/tag/sparql_demo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_demo|uri|http://www.semanlink.net/tag/sparql_demo +http://www.semanlink.net/tag/sparql_demo|broader_prefLabel|SW demo +http://www.semanlink.net/tag/sparql_demo|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/smalltalk|creationTime|2007-09-10T19:49:14Z +http://www.semanlink.net/tag/smalltalk|prefLabel|Smalltalk +http://www.semanlink.net/tag/smalltalk|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/smalltalk|creationDate|2007-09-10 +http://www.semanlink.net/tag/smalltalk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/smalltalk|uri|http://www.semanlink.net/tag/smalltalk +http://www.semanlink.net/tag/smalltalk|broader_prefLabel|Programming language +http://www.semanlink.net/tag/smalltalk|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/arameen|prefLabel|Araméen +http://www.semanlink.net/tag/arameen|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/arameen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arameen|uri|http://www.semanlink.net/tag/arameen +http://www.semanlink.net/tag/arameen|broader_prefLabel|Langues +http://www.semanlink.net/tag/memory_requirements_in_nn|creationTime|2019-05-14T23:14:14Z +http://www.semanlink.net/tag/memory_requirements_in_nn|prefLabel|Memory requirements in NN +http://www.semanlink.net/tag/memory_requirements_in_nn|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/memory_requirements_in_nn|broader|http://www.semanlink.net/tag/memoire_informatique +http://www.semanlink.net/tag/memory_requirements_in_nn|creationDate|2019-05-14 +http://www.semanlink.net/tag/memory_requirements_in_nn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_requirements_in_nn|uri|http://www.semanlink.net/tag/memory_requirements_in_nn +http://www.semanlink.net/tag/memory_requirements_in_nn|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/memory_requirements_in_nn|broader_prefLabel|Mémoire (informatique) +http://www.semanlink.net/tag/memory_requirements_in_nn|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/memory_requirements_in_nn|broader_altLabel|ANN +http://www.semanlink.net/tag/memory_requirements_in_nn|broader_altLabel|NN +http://www.semanlink.net/tag/myfaces|prefLabel|MyFaces +http://www.semanlink.net/tag/myfaces|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/myfaces|broader|http://www.semanlink.net/tag/java_server_faces +http://www.semanlink.net/tag/myfaces|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/myfaces|uri|http://www.semanlink.net/tag/myfaces +http://www.semanlink.net/tag/myfaces|broader_prefLabel|apache.org +http://www.semanlink.net/tag/myfaces|broader_prefLabel|Java Server Faces +http://www.semanlink.net/tag/myfaces|broader_altLabel|jsf +http://www.semanlink.net/tag/resteasy|creationTime|2016-04-01T10:42:20Z +http://www.semanlink.net/tag/resteasy|prefLabel|RESTEasy +http://www.semanlink.net/tag/resteasy|creationDate|2016-04-01 +http://www.semanlink.net/tag/resteasy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/resteasy|uri|http://www.semanlink.net/tag/resteasy +http://www.semanlink.net/tag/real_time_communications|creationTime|2014-01-08T14:15:36Z +http://www.semanlink.net/tag/real_time_communications|prefLabel|Real-Time Communications +http://www.semanlink.net/tag/real_time_communications|broader|http://www.semanlink.net/tag/real_time +http://www.semanlink.net/tag/real_time_communications|creationDate|2014-01-08 +http://www.semanlink.net/tag/real_time_communications|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/real_time_communications|uri|http://www.semanlink.net/tag/real_time_communications +http://www.semanlink.net/tag/real_time_communications|broader_prefLabel|Real-Time +http://www.semanlink.net/tag/wtp|creationTime|2007-11-10T03:50:17Z +http://www.semanlink.net/tag/wtp|prefLabel|WTP +http://www.semanlink.net/tag/wtp|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/wtp|creationDate|2007-11-10 +http://www.semanlink.net/tag/wtp|comment|" +" +http://www.semanlink.net/tag/wtp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wtp|uri|http://www.semanlink.net/tag/wtp +http://www.semanlink.net/tag/wtp|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/text_feature_extraction|creationTime|2016-01-12T00:54:30Z +http://www.semanlink.net/tag/text_feature_extraction|prefLabel|Text feature extraction +http://www.semanlink.net/tag/text_feature_extraction|broader|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/text_feature_extraction|broader|http://www.semanlink.net/tag/text_dim_reduction +http://www.semanlink.net/tag/text_feature_extraction|broader|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/text_feature_extraction|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/text_feature_extraction|creationDate|2016-01-12 +http://www.semanlink.net/tag/text_feature_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_feature_extraction|uri|http://www.semanlink.net/tag/text_feature_extraction +http://www.semanlink.net/tag/text_feature_extraction|broader_prefLabel|General NLP tasks +http://www.semanlink.net/tag/text_feature_extraction|broader_prefLabel|Text: dimension reduction +http://www.semanlink.net/tag/text_feature_extraction|broader_prefLabel|Feature extraction +http://www.semanlink.net/tag/text_feature_extraction|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/text_feature_extraction|broader_related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/jure_leskovec|creationTime|2018-05-10T14:28:27Z +http://www.semanlink.net/tag/jure_leskovec|prefLabel|Jure Leskovec +http://www.semanlink.net/tag/jure_leskovec|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/jure_leskovec|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/jure_leskovec|related|http://www.semanlink.net/tag/node2vec +http://www.semanlink.net/tag/jure_leskovec|related|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/jure_leskovec|creationDate|2018-05-10 +http://www.semanlink.net/tag/jure_leskovec|comment|"Co-Author of [Node2Vec](/tag/node2vec) paper +" +http://www.semanlink.net/tag/jure_leskovec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jure_leskovec|describedBy|https://cs.stanford.edu/%7Ejure/ +http://www.semanlink.net/tag/jure_leskovec|uri|http://www.semanlink.net/tag/jure_leskovec +http://www.semanlink.net/tag/jure_leskovec|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/jure_leskovec|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/yves_peirsman|creationTime|2019-02-07T00:48:52Z +http://www.semanlink.net/tag/yves_peirsman|prefLabel|Yves Peirsman +http://www.semanlink.net/tag/yves_peirsman|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/yves_peirsman|creationDate|2019-02-07 +http://www.semanlink.net/tag/yves_peirsman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yves_peirsman|uri|http://www.semanlink.net/tag/yves_peirsman +http://www.semanlink.net/tag/yves_peirsman|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/beatles|creationTime|2013-05-21T08:49:34Z +http://www.semanlink.net/tag/beatles|prefLabel|Beatles +http://www.semanlink.net/tag/beatles|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/beatles|creationDate|2013-05-21 +http://www.semanlink.net/tag/beatles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/beatles|uri|http://www.semanlink.net/tag/beatles +http://www.semanlink.net/tag/beatles|broader_prefLabel|Musique +http://www.semanlink.net/tag/beatles|broader_altLabel|Music +http://www.semanlink.net/tag/self_organizing_maps|creationTime|2021-10-17T10:50:31Z +http://www.semanlink.net/tag/self_organizing_maps|prefLabel|Self-Organizing Maps +http://www.semanlink.net/tag/self_organizing_maps|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/self_organizing_maps|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/self_organizing_maps|broader|http://www.semanlink.net/tag/incremental_clustering +http://www.semanlink.net/tag/self_organizing_maps|creationDate|2021-10-17 +http://www.semanlink.net/tag/self_organizing_maps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/self_organizing_maps|describedBy|https://en.wikipedia.org/wiki/Self-organizing_map +http://www.semanlink.net/tag/self_organizing_maps|uri|http://www.semanlink.net/tag/self_organizing_maps +http://www.semanlink.net/tag/self_organizing_maps|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/self_organizing_maps|broader_prefLabel|Clustering +http://www.semanlink.net/tag/self_organizing_maps|broader_prefLabel|Incremental Clustering +http://www.semanlink.net/tag/self_organizing_maps|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/self_organizing_maps|broader_altLabel|ANN +http://www.semanlink.net/tag/self_organizing_maps|broader_altLabel|NN +http://www.semanlink.net/tag/self_organizing_maps|broader_altLabel|Data clustering +http://www.semanlink.net/tag/self_organizing_maps|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/self_organizing_maps|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/docker_python|creationTime|2018-03-26T08:34:16Z +http://www.semanlink.net/tag/docker_python|prefLabel|Docker-Python +http://www.semanlink.net/tag/docker_python|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/docker_python|broader|http://www.semanlink.net/tag/docker +http://www.semanlink.net/tag/docker_python|creationDate|2018-03-26 +http://www.semanlink.net/tag/docker_python|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/docker_python|uri|http://www.semanlink.net/tag/docker_python +http://www.semanlink.net/tag/docker_python|broader_prefLabel|Python +http://www.semanlink.net/tag/docker_python|broader_prefLabel|Docker +http://www.semanlink.net/tag/open_endedness|creationTime|2018-03-30T13:56:01Z +http://www.semanlink.net/tag/open_endedness|prefLabel|Open-endedness +http://www.semanlink.net/tag/open_endedness|related|http://www.semanlink.net/tag/genetic_programming +http://www.semanlink.net/tag/open_endedness|related|http://www.semanlink.net/tag/genetic_algorithm +http://www.semanlink.net/tag/open_endedness|creationDate|2018-03-30 +http://www.semanlink.net/tag/open_endedness|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_endedness|uri|http://www.semanlink.net/tag/open_endedness +http://www.semanlink.net/tag/justice_internationale|prefLabel|Justice internationale +http://www.semanlink.net/tag/justice_internationale|broader|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/justice_internationale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/justice_internationale|uri|http://www.semanlink.net/tag/justice_internationale +http://www.semanlink.net/tag/justice_internationale|broader_prefLabel|Justice +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|creationTime|2008-04-01T15:06:04Z +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|prefLabel|Javascript RDF Parser in IE +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader|http://www.semanlink.net/tag/javascript_rdf_parser +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader|http://www.semanlink.net/tag/tabulator +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader|http://www.semanlink.net/tag/internet_explorer +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader|http://www.semanlink.net/tag/compatibilite_javascript +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|creationDate|2008-04-01 +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|uri|http://www.semanlink.net/tag/javascript_rdf_parser_in_ie +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader_prefLabel|Javascript RDF Parser +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader_prefLabel|Tabulator +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader_prefLabel|Internet Explorer +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader_prefLabel|Compatibilité Javascript +http://www.semanlink.net/tag/javascript_rdf_parser_in_ie|broader_related|http://www.semanlink.net/tag/ajar +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|creationTime|2007-09-13T22:10:29Z +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|prefLabel|Bacterial to Animal Gene Transfer +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader|http://www.semanlink.net/tag/bacteries +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|creationDate|2007-09-13 +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|uri|http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_prefLabel|Horizontal gene transfer +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_prefLabel|Bacteria +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_prefLabel|Genetics +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_prefLabel|Génétique +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_altLabel|Lateral gene transfer +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_altLabel|Bactéries +http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer|broader_related|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/tag_ontology|prefLabel|Tag ontology +http://www.semanlink.net/tag/tag_ontology|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/tag_ontology|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/tag_ontology|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/tag_ontology|creationDate|2007-01-09 +http://www.semanlink.net/tag/tag_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tag_ontology|uri|http://www.semanlink.net/tag/tag_ontology +http://www.semanlink.net/tag/tag_ontology|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/tag_ontology|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/tag_ontology|broader_prefLabel|Tagging +http://www.semanlink.net/tag/tag_ontology|broader_altLabel|Ontology +http://www.semanlink.net/tag/government_data_as_linked_data|creationTime|2010-07-30T14:08:21Z +http://www.semanlink.net/tag/government_data_as_linked_data|prefLabel|Government data as Linked Data +http://www.semanlink.net/tag/government_data_as_linked_data|broader|http://www.semanlink.net/tag/government_data +http://www.semanlink.net/tag/government_data_as_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/government_data_as_linked_data|creationDate|2010-07-30 +http://www.semanlink.net/tag/government_data_as_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/government_data_as_linked_data|uri|http://www.semanlink.net/tag/government_data_as_linked_data +http://www.semanlink.net/tag/government_data_as_linked_data|broader_prefLabel|Government data +http://www.semanlink.net/tag/government_data_as_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/government_data_as_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/government_data_as_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/markus_lanthaler|creationTime|2014-10-29T01:07:22Z +http://www.semanlink.net/tag/markus_lanthaler|prefLabel|Markus Lanthaler +http://www.semanlink.net/tag/markus_lanthaler|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/markus_lanthaler|related|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/markus_lanthaler|related|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/markus_lanthaler|creationDate|2014-10-29 +http://www.semanlink.net/tag/markus_lanthaler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markus_lanthaler|uri|http://www.semanlink.net/tag/markus_lanthaler +http://www.semanlink.net/tag/markus_lanthaler|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/markus_lanthaler|broader_altLabel|Technical guys +http://www.semanlink.net/tag/john_sofakolle|prefLabel|John Sofakolle +http://www.semanlink.net/tag/john_sofakolle|broader|http://www.semanlink.net/tag/cfpm +http://www.semanlink.net/tag/john_sofakolle|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/john_sofakolle|broader|http://www.semanlink.net/tag/musique_du_niger +http://www.semanlink.net/tag/john_sofakolle|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/john_sofakolle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/john_sofakolle|altLabel|Sofakolle +http://www.semanlink.net/tag/john_sofakolle|uri|http://www.semanlink.net/tag/john_sofakolle +http://www.semanlink.net/tag/john_sofakolle|broader_prefLabel|CFPM +http://www.semanlink.net/tag/john_sofakolle|broader_prefLabel|Musicien +http://www.semanlink.net/tag/john_sofakolle|broader_prefLabel|Musique du Niger +http://www.semanlink.net/tag/john_sofakolle|broader_prefLabel|Ami +http://www.semanlink.net/tag/ben_adida|creationTime|2008-04-21T15:38:35Z +http://www.semanlink.net/tag/ben_adida|prefLabel|Ben Adida +http://www.semanlink.net/tag/ben_adida|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ben_adida|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/ben_adida|related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/ben_adida|creationDate|2008-04-21 +http://www.semanlink.net/tag/ben_adida|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ben_adida|homepage|http://ben.adida.net/ +http://www.semanlink.net/tag/ben_adida|uri|http://www.semanlink.net/tag/ben_adida +http://www.semanlink.net/tag/ben_adida|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ben_adida|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ben_adida|broader_altLabel|Technical guys +http://www.semanlink.net/tag/anaconda|creationTime|2017-05-28T18:56:25Z +http://www.semanlink.net/tag/anaconda|prefLabel|Anaconda +http://www.semanlink.net/tag/anaconda|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/anaconda|broader|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/anaconda|creationDate|2017-05-28 +http://www.semanlink.net/tag/anaconda|comment|"""data science platform powered by Python. The open source version of Anaconda is a high performance distribution of Python and R and includes over 100 of the most popular Python, R and Scala packages for data science.""" +http://www.semanlink.net/tag/anaconda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anaconda|uri|http://www.semanlink.net/tag/anaconda +http://www.semanlink.net/tag/anaconda|broader_prefLabel|Python +http://www.semanlink.net/tag/anaconda|broader_prefLabel|Python 4 Data science +http://www.semanlink.net/tag/chameau|prefLabel|Chameau +http://www.semanlink.net/tag/chameau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chameau|uri|http://www.semanlink.net/tag/chameau +http://www.semanlink.net/tag/global_brain|prefLabel|Global brain +http://www.semanlink.net/tag/global_brain|broader|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/global_brain|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/global_brain|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/global_brain|broader|http://www.semanlink.net/tag/conscience_artificielle +http://www.semanlink.net/tag/global_brain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/global_brain|uri|http://www.semanlink.net/tag/global_brain +http://www.semanlink.net/tag/global_brain|broader_prefLabel|Anticipation +http://www.semanlink.net/tag/global_brain|broader_prefLabel|NTIC +http://www.semanlink.net/tag/global_brain|broader_prefLabel|Brain +http://www.semanlink.net/tag/global_brain|broader_prefLabel|Conscience artificielle +http://www.semanlink.net/tag/global_brain|broader_altLabel|Cerveau +http://www.semanlink.net/tag/global_brain|broader_altLabel|Machine consciousness +http://www.semanlink.net/tag/global_brain|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/ntic_et_developpement|creationTime|2015-11-01T20:52:12Z +http://www.semanlink.net/tag/ntic_et_developpement|prefLabel|NTIC et développement +http://www.semanlink.net/tag/ntic_et_developpement|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/ntic_et_developpement|broader|http://www.semanlink.net/tag/innovation +http://www.semanlink.net/tag/ntic_et_developpement|creationDate|2015-11-01 +http://www.semanlink.net/tag/ntic_et_developpement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ntic_et_developpement|altLabel|Tech / developing world +http://www.semanlink.net/tag/ntic_et_developpement|uri|http://www.semanlink.net/tag/ntic_et_developpement +http://www.semanlink.net/tag/ntic_et_developpement|broader_prefLabel|NTIC +http://www.semanlink.net/tag/ntic_et_developpement|broader_prefLabel|Innovation +http://www.semanlink.net/tag/ckb|creationTime|2021-04-13T11:58:02Z +http://www.semanlink.net/tag/ckb|prefLabel|CKB +http://www.semanlink.net/tag/ckb|broader|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/ckb|creationDate|2021-04-13 +http://www.semanlink.net/tag/ckb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ckb|uri|http://www.semanlink.net/tag/ckb +http://www.semanlink.net/tag/ckb|broader_prefLabel|Raphaël Sourty +http://www.semanlink.net/tag/ckb|broader_altLabel|raphaelsty +http://www.semanlink.net/tag/ckb|broader_related|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/ckb|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/attali|prefLabel|Attali +http://www.semanlink.net/tag/attali|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/attali|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/attali|sameAs|http://dbpedia.org/resource/Jacques_Attali +http://www.semanlink.net/tag/attali|uri|http://www.semanlink.net/tag/attali +http://www.semanlink.net/tag/attali|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/kde|creationTime|2008-06-04T23:10:45Z +http://www.semanlink.net/tag/kde|prefLabel|KDE +http://www.semanlink.net/tag/kde|broader|http://www.semanlink.net/tag/linux +http://www.semanlink.net/tag/kde|creationDate|2008-06-04 +http://www.semanlink.net/tag/kde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kde|uri|http://www.semanlink.net/tag/kde +http://www.semanlink.net/tag/kde|broader_prefLabel|Linux +http://www.semanlink.net/tag/olaf_hartig|creationTime|2013-05-18T22:32:21Z +http://www.semanlink.net/tag/olaf_hartig|prefLabel|Olaf Hartig +http://www.semanlink.net/tag/olaf_hartig|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/olaf_hartig|related|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/olaf_hartig|creationDate|2013-05-18 +http://www.semanlink.net/tag/olaf_hartig|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/olaf_hartig|uri|http://www.semanlink.net/tag/olaf_hartig +http://www.semanlink.net/tag/olaf_hartig|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/stack_overflow|creationTime|2013-07-07T17:13:35Z +http://www.semanlink.net/tag/stack_overflow|prefLabel|Stack Overflow +http://www.semanlink.net/tag/stack_overflow|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/stack_overflow|broader|http://www.semanlink.net/tag/q_a +http://www.semanlink.net/tag/stack_overflow|broader|http://www.semanlink.net/tag/faq +http://www.semanlink.net/tag/stack_overflow|creationDate|2013-07-07 +http://www.semanlink.net/tag/stack_overflow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stack_overflow|uri|http://www.semanlink.net/tag/stack_overflow +http://www.semanlink.net/tag/stack_overflow|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/stack_overflow|broader_prefLabel|Q&A +http://www.semanlink.net/tag/stack_overflow|broader_prefLabel|FAQ +http://www.semanlink.net/tag/texas|prefLabel|Texas +http://www.semanlink.net/tag/texas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/texas|uri|http://www.semanlink.net/tag/texas +http://www.semanlink.net/tag/jena_fuseki|creationTime|2013-08-12T12:02:43Z +http://www.semanlink.net/tag/jena_fuseki|prefLabel|Jena Fuseki +http://www.semanlink.net/tag/jena_fuseki|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_fuseki|creationDate|2013-08-12 +http://www.semanlink.net/tag/jena_fuseki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_fuseki|describedBy|http://jena.apache.org/documentation/fuseki2/ +http://www.semanlink.net/tag/jena_fuseki|uri|http://www.semanlink.net/tag/jena_fuseki +http://www.semanlink.net/tag/jena_fuseki|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_fuseki|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|creationTime|2007-02-17T01:52:28Z +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|prefLabel|Les 100 pièges de l'Anglais +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|broader|http://www.semanlink.net/tag/learning_english +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|broader|http://www.semanlink.net/tag/anglais +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|creationDate|2007-02-17 +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|uri|http://www.semanlink.net/tag/les_100_pieges_de_l_anglais +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|broader_prefLabel|Learning english +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|broader_prefLabel|Anglais +http://www.semanlink.net/tag/les_100_pieges_de_l_anglais|broader_altLabel|English +http://www.semanlink.net/tag/minimum_wage|creationTime|2014-07-29T19:21:32Z +http://www.semanlink.net/tag/minimum_wage|prefLabel|Minimum wage +http://www.semanlink.net/tag/minimum_wage|broader|http://www.semanlink.net/tag/bas_salaires +http://www.semanlink.net/tag/minimum_wage|broader|http://www.semanlink.net/tag/salaire +http://www.semanlink.net/tag/minimum_wage|creationDate|2014-07-29 +http://www.semanlink.net/tag/minimum_wage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minimum_wage|uri|http://www.semanlink.net/tag/minimum_wage +http://www.semanlink.net/tag/minimum_wage|broader_prefLabel|Bas salaires +http://www.semanlink.net/tag/minimum_wage|broader_prefLabel|Salaire +http://www.semanlink.net/tag/www_conference|creationTime|2013-03-02T16:47:09Z +http://www.semanlink.net/tag/www_conference|prefLabel|TheWebConf +http://www.semanlink.net/tag/www_conference|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/www_conference|creationDate|2013-03-02 +http://www.semanlink.net/tag/www_conference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www_conference|altLabel|WWW Conference +http://www.semanlink.net/tag/www_conference|uri|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www_conference|broader_prefLabel|Conférences +http://www.semanlink.net/tag/www_conference|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/regression_analysis|creationTime|2014-04-24T13:34:28Z +http://www.semanlink.net/tag/regression_analysis|prefLabel|Regression analysis +http://www.semanlink.net/tag/regression_analysis|creationDate|2014-04-24 +http://www.semanlink.net/tag/regression_analysis|comment|"a statistical process for estimating the relationships among variables. +" +http://www.semanlink.net/tag/regression_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/regression_analysis|describedBy|https://en.wikipedia.org/wiki/Regression_analysis +http://www.semanlink.net/tag/regression_analysis|uri|http://www.semanlink.net/tag/regression_analysis +http://www.semanlink.net/tag/humour_noir|creationTime|2012-10-23T00:22:06Z +http://www.semanlink.net/tag/humour_noir|prefLabel|Humour noir +http://www.semanlink.net/tag/humour_noir|broader|http://www.semanlink.net/tag/humour +http://www.semanlink.net/tag/humour_noir|creationDate|2012-10-23 +http://www.semanlink.net/tag/humour_noir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/humour_noir|uri|http://www.semanlink.net/tag/humour_noir +http://www.semanlink.net/tag/humour_noir|broader_prefLabel|Humour +http://www.semanlink.net/tag/peche|prefLabel|Pêche +http://www.semanlink.net/tag/peche|broader|http://www.semanlink.net/tag/poisson +http://www.semanlink.net/tag/peche|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/peche|broader|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/peche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peche|uri|http://www.semanlink.net/tag/peche +http://www.semanlink.net/tag/peche|broader_prefLabel|Poisson +http://www.semanlink.net/tag/peche|broader_prefLabel|Economie +http://www.semanlink.net/tag/peche|broader_prefLabel|Océan +http://www.semanlink.net/tag/peche|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/poutine|prefLabel|Poutine +http://www.semanlink.net/tag/poutine|broader|http://www.semanlink.net/tag/mechant +http://www.semanlink.net/tag/poutine|broader|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/poutine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poutine|altLabel|Putin +http://www.semanlink.net/tag/poutine|uri|http://www.semanlink.net/tag/poutine +http://www.semanlink.net/tag/poutine|broader_prefLabel|Méchant +http://www.semanlink.net/tag/poutine|broader_prefLabel|Russie +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|creationTime|2018-11-06T09:57:23Z +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|prefLabel|BlackboxNLP (2018 workshop) +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader|http://www.semanlink.net/tag/emnlp_2018 +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader|http://www.semanlink.net/tag/blackbox_nlp +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|related|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|creationDate|2018-11-06 +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|comment|"[Proceedings](https://aclanthology.coli.uni-saarland.de/events/ws-2018#W18-54) + +> the introduction of neural networks has typically come at the cost of our understanding of the system: what are the representations and computations that the network learns? The goal of this workshop is to bring together people who are attempting to peek inside the neural network black box, taking inspiration from machine learning, psychology, linguistics and neuroscience." +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|homepage|https://blackboxnlp.github.io/ +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|uri|http://www.semanlink.net/tag/blackboxnlp_workshop_2018 +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader_prefLabel|EMNLP 2018 +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader_prefLabel|Blackbox NLP +http://www.semanlink.net/tag/blackboxnlp_workshop_2018|broader_prefLabel|Workshop +http://www.semanlink.net/tag/sylvain|prefLabel|Sylvain +http://www.semanlink.net/tag/sylvain|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/sylvain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sylvain|uri|http://www.semanlink.net/tag/sylvain +http://www.semanlink.net/tag/sylvain|broader_prefLabel|Ami +http://www.semanlink.net/tag/semantic_feature_extraction|creationTime|2014-03-08T15:40:52Z +http://www.semanlink.net/tag/semantic_feature_extraction|prefLabel|Semantic feature extraction +http://www.semanlink.net/tag/semantic_feature_extraction|broader|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/semantic_feature_extraction|broader|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/semantic_feature_extraction|creationDate|2014-03-08 +http://www.semanlink.net/tag/semantic_feature_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_feature_extraction|uri|http://www.semanlink.net/tag/semantic_feature_extraction +http://www.semanlink.net/tag/semantic_feature_extraction|broader_prefLabel|Feature extraction +http://www.semanlink.net/tag/semantic_feature_extraction|broader_prefLabel|Semantic technology +http://www.semanlink.net/tag/semantic_feature_extraction|broader_related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/thriller|creationTime|2017-10-07T01:14:30Z +http://www.semanlink.net/tag/thriller|prefLabel|Thriller +http://www.semanlink.net/tag/thriller|creationDate|2017-10-07 +http://www.semanlink.net/tag/thriller|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thriller|uri|http://www.semanlink.net/tag/thriller +http://www.semanlink.net/tag/barnaby_jack|creationTime|2013-07-29T10:41:04Z +http://www.semanlink.net/tag/barnaby_jack|prefLabel|Barnaby Jack +http://www.semanlink.net/tag/barnaby_jack|broader|http://www.semanlink.net/tag/hackers +http://www.semanlink.net/tag/barnaby_jack|creationDate|2013-07-29 +http://www.semanlink.net/tag/barnaby_jack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/barnaby_jack|uri|http://www.semanlink.net/tag/barnaby_jack +http://www.semanlink.net/tag/barnaby_jack|broader_prefLabel|Hackers +http://www.semanlink.net/tag/cosmic_inflation|creationTime|2014-06-21T18:18:35Z +http://www.semanlink.net/tag/cosmic_inflation|prefLabel|Cosmic inflation +http://www.semanlink.net/tag/cosmic_inflation|broader|http://www.semanlink.net/tag/cosmologie +http://www.semanlink.net/tag/cosmic_inflation|related|http://www.semanlink.net/tag/big_bang +http://www.semanlink.net/tag/cosmic_inflation|creationDate|2014-06-21 +http://www.semanlink.net/tag/cosmic_inflation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cosmic_inflation|uri|http://www.semanlink.net/tag/cosmic_inflation +http://www.semanlink.net/tag/cosmic_inflation|broader_prefLabel|Cosmologie +http://www.semanlink.net/tag/document_embeddings|creationTime|2017-10-21T16:21:53Z +http://www.semanlink.net/tag/document_embeddings|prefLabel|Document embeddings +http://www.semanlink.net/tag/document_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/document_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/document_embeddings|broader|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/document_embeddings|creationDate|2017-10-21 +http://www.semanlink.net/tag/document_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/document_embeddings|uri|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/tag/document_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/document_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/document_embeddings|broader_prefLabel|Text Embeddings +http://www.semanlink.net/tag/document_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/document_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/document_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/document_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/document_embeddings|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/emnlp_2021|creationTime|2021-09-03T00:41:59Z +http://www.semanlink.net/tag/emnlp_2021|prefLabel|EMNLP 2021 +http://www.semanlink.net/tag/emnlp_2021|broader|http://www.semanlink.net/tag/emnlp +http://www.semanlink.net/tag/emnlp_2021|creationDate|2021-09-03 +http://www.semanlink.net/tag/emnlp_2021|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emnlp_2021|uri|http://www.semanlink.net/tag/emnlp_2021 +http://www.semanlink.net/tag/emnlp_2021|broader_prefLabel|EMNLP +http://www.semanlink.net/tag/rio_tinto|creationTime|2021-07-31T18:42:45Z +http://www.semanlink.net/tag/rio_tinto|prefLabel|Rio Tinto +http://www.semanlink.net/tag/rio_tinto|broader|http://www.semanlink.net/tag/industrie_miniere +http://www.semanlink.net/tag/rio_tinto|broader|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/rio_tinto|creationDate|2021-07-31 +http://www.semanlink.net/tag/rio_tinto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rio_tinto|describedBy|https://fr.wikipedia.org/wiki/Rio_Tinto_(entreprise)#Nouveau_scandale_en_2020_en_Australie +http://www.semanlink.net/tag/rio_tinto|uri|http://www.semanlink.net/tag/rio_tinto +http://www.semanlink.net/tag/rio_tinto|broader_prefLabel|Industrie minière +http://www.semanlink.net/tag/rio_tinto|broader_prefLabel|Australie +http://www.semanlink.net/tag/syrie|prefLabel|Syrie +http://www.semanlink.net/tag/syrie|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/syrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/syrie|uri|http://www.semanlink.net/tag/syrie +http://www.semanlink.net/tag/syrie|broader_prefLabel|Asie +http://www.semanlink.net/tag/applet|prefLabel|Applet +http://www.semanlink.net/tag/applet|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/applet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/applet|uri|http://www.semanlink.net/tag/applet +http://www.semanlink.net/tag/applet|broader_prefLabel|Java +http://www.semanlink.net/tag/liberalisme|prefLabel|Libéralisme +http://www.semanlink.net/tag/liberalisme|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/liberalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberalisme|altLabel|Libre-échange +http://www.semanlink.net/tag/liberalisme|uri|http://www.semanlink.net/tag/liberalisme +http://www.semanlink.net/tag/liberalisme|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/paleolithique|creationTime|2007-11-21T15:42:20Z +http://www.semanlink.net/tag/paleolithique|prefLabel|Paléolithique +http://www.semanlink.net/tag/paleolithique|creationDate|2007-11-21 +http://www.semanlink.net/tag/paleolithique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paleolithique|uri|http://www.semanlink.net/tag/paleolithique +http://www.semanlink.net/tag/children_s_language_acquisition|creationTime|2020-09-17T23:54:45Z +http://www.semanlink.net/tag/children_s_language_acquisition|prefLabel|Children’s language acquisition +http://www.semanlink.net/tag/children_s_language_acquisition|broader|http://www.semanlink.net/tag/language_learning +http://www.semanlink.net/tag/children_s_language_acquisition|creationDate|2020-09-17 +http://www.semanlink.net/tag/children_s_language_acquisition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/children_s_language_acquisition|uri|http://www.semanlink.net/tag/children_s_language_acquisition +http://www.semanlink.net/tag/children_s_language_acquisition|broader_prefLabel|Language learning +http://www.semanlink.net/tag/cheat_sheet|creationTime|2011-06-29T18:05:17Z +http://www.semanlink.net/tag/cheat_sheet|prefLabel|Cheat sheet +http://www.semanlink.net/tag/cheat_sheet|broader|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/cheat_sheet|creationDate|2011-06-29 +http://www.semanlink.net/tag/cheat_sheet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cheat_sheet|altLabel|Cheatsheet +http://www.semanlink.net/tag/cheat_sheet|uri|http://www.semanlink.net/tag/cheat_sheet +http://www.semanlink.net/tag/cheat_sheet|broader_prefLabel|Dev tips +http://www.semanlink.net/tag/cheat_sheet|broader_altLabel|Dev tip +http://www.semanlink.net/tag/tandja|creationTime|2009-07-14T13:12:16Z +http://www.semanlink.net/tag/tandja|prefLabel|Tandja +http://www.semanlink.net/tag/tandja|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/tandja|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/tandja|creationDate|2009-07-14 +http://www.semanlink.net/tag/tandja|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tandja|uri|http://www.semanlink.net/tag/tandja +http://www.semanlink.net/tag/tandja|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/tandja|broader_prefLabel|Niger +http://www.semanlink.net/tag/tandja|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/tandja|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/tandja|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/bioinformatics|creationTime|2015-01-04T19:30:41Z +http://www.semanlink.net/tag/bioinformatics|prefLabel|Bioinformatics +http://www.semanlink.net/tag/bioinformatics|creationDate|2015-01-04 +http://www.semanlink.net/tag/bioinformatics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bioinformatics|describedBy|https://en.wikipedia.org/wiki/Bioinformatics +http://www.semanlink.net/tag/bioinformatics|altLabel|Bioinformatique +http://www.semanlink.net/tag/bioinformatics|uri|http://www.semanlink.net/tag/bioinformatics +http://www.semanlink.net/tag/facebook_open_graph|creationTime|2014-10-29T02:36:53Z +http://www.semanlink.net/tag/facebook_open_graph|prefLabel|Facebook Open Graph +http://www.semanlink.net/tag/facebook_open_graph|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/facebook_open_graph|creationDate|2014-10-29 +http://www.semanlink.net/tag/facebook_open_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facebook_open_graph|uri|http://www.semanlink.net/tag/facebook_open_graph +http://www.semanlink.net/tag/facebook_open_graph|broader_prefLabel|Facebook +http://www.semanlink.net/tag/facebook_open_graph|broader_altLabel|FB +http://www.semanlink.net/tag/cloud_and_linked_data|creationTime|2014-03-07T11:43:02Z +http://www.semanlink.net/tag/cloud_and_linked_data|prefLabel|Cloud and Linked Data +http://www.semanlink.net/tag/cloud_and_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/cloud_and_linked_data|broader|http://www.semanlink.net/tag/cloud_and_linked_data +http://www.semanlink.net/tag/cloud_and_linked_data|creationDate|2014-03-07 +http://www.semanlink.net/tag/cloud_and_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cloud_and_linked_data|uri|http://www.semanlink.net/tag/cloud_and_linked_data +http://www.semanlink.net/tag/cloud_and_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/cloud_and_linked_data|broader_prefLabel|Cloud and Linked Data +http://www.semanlink.net/tag/cloud_and_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/cloud_and_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/ivan_herman|creationTime|2007-04-03T23:05:50Z +http://www.semanlink.net/tag/ivan_herman|prefLabel|Ivan Herman +http://www.semanlink.net/tag/ivan_herman|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/ivan_herman|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ivan_herman|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/ivan_herman|creationDate|2007-04-03 +http://www.semanlink.net/tag/ivan_herman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ivan_herman|uri|http://www.semanlink.net/tag/ivan_herman +http://www.semanlink.net/tag/ivan_herman|broader_prefLabel|W3C +http://www.semanlink.net/tag/ivan_herman|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ivan_herman|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ivan_herman|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ivan_herman|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/ivan_herman|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/multi_label_classification|creationTime|2014-04-08T17:25:08Z +http://www.semanlink.net/tag/multi_label_classification|prefLabel|Multi-label classification +http://www.semanlink.net/tag/multi_label_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/multi_label_classification|related|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/multi_label_classification|creationDate|2014-04-08 +http://www.semanlink.net/tag/multi_label_classification|comment|"In multi-label classification, each sample can be associated with a set of class labels. It is distinct from multi-class classification which aims to predict a single mutually exclusive label. + +Performance evaluation of multi-label learning system: [see here](doc:?uri=http%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.89.7148) + +Methods (non-exhaustive list): + +- Dividing the original multi-label classification problem into multiple independent binary classification tasks + - computationally expensive + - cannot identify the correlation between label information +- KNN based. +- Label embedding based approaches (deriving a latent label space with reduced dimensionality). Ex : [SLEEC](doc:2020/08/sparse_local_embeddings_for_ext) +- Tree-based methods: a tree structure to divide the document set recursively, so that documents in each leaf node share similar label distribution. Ex: FastXML + - correlation between the labels can be implicitly exploited +- Deep Learning based methods: cf. successes of deep learning for text representation +- Combining label embeddings and deep learning on text to construct label-aware representation of documents. Ex [LAHA](doc:2019/06/_1905_10070_label_aware_docume)" +http://www.semanlink.net/tag/multi_label_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_label_classification|describedBy|https://en.wikipedia.org/wiki/Multi-label_classification +http://www.semanlink.net/tag/multi_label_classification|altLabel|Multilabel classification +http://www.semanlink.net/tag/multi_label_classification|uri|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/multi_label_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/keras|creationTime|2017-09-16T02:22:49Z +http://www.semanlink.net/tag/keras|prefLabel|Keras +http://www.semanlink.net/tag/keras|broader|http://www.semanlink.net/tag/deep_learning_frameworks +http://www.semanlink.net/tag/keras|creationDate|2017-09-16 +http://www.semanlink.net/tag/keras|comment|high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. +http://www.semanlink.net/tag/keras|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keras|describedBy|https://keras.io/ +http://www.semanlink.net/tag/keras|uri|http://www.semanlink.net/tag/keras +http://www.semanlink.net/tag/keras|broader_prefLabel|Deep Learning frameworks +http://www.semanlink.net/tag/brain_machine_interface|creationTime|2015-09-19T03:23:39Z +http://www.semanlink.net/tag/brain_machine_interface|prefLabel|Brain-Machine Interface +http://www.semanlink.net/tag/brain_machine_interface|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/brain_machine_interface|creationDate|2015-09-19 +http://www.semanlink.net/tag/brain_machine_interface|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_machine_interface|uri|http://www.semanlink.net/tag/brain_machine_interface +http://www.semanlink.net/tag/brain_machine_interface|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|creationTime|2020-12-12T13:33:08Z +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|prefLabel|KG-augmented Language Models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|creationDate|2020-12-12 +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|altLabel|Knowledge Graph-augmented Language Models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|altLabel|KG-augmented LMs +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|uri|http://www.semanlink.net/tag/knowledge_graph_augmented_language_models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_prefLabel|Knowledge Graph + Deep Learning +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_prefLabel|Knowledge-augmented Language Models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_altLabel|KG-Augmented LMs +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_altLabel|Injecting knowledge into LMs +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_altLabel|Injecting knowledge into Language Models +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/knowledge_graph_augmented_language_models|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/australia_s_evolutionary_history|creationTime|2007-05-23T01:20:13Z +http://www.semanlink.net/tag/australia_s_evolutionary_history|prefLabel|Australia's evolutionary history +http://www.semanlink.net/tag/australia_s_evolutionary_history|broader|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/australia_s_evolutionary_history|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/australia_s_evolutionary_history|related|http://www.semanlink.net/tag/monotremes +http://www.semanlink.net/tag/australia_s_evolutionary_history|related|http://www.semanlink.net/tag/marsupiaux +http://www.semanlink.net/tag/australia_s_evolutionary_history|creationDate|2007-05-23 +http://www.semanlink.net/tag/australia_s_evolutionary_history|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/australia_s_evolutionary_history|uri|http://www.semanlink.net/tag/australia_s_evolutionary_history +http://www.semanlink.net/tag/australia_s_evolutionary_history|broader_prefLabel|Australie +http://www.semanlink.net/tag/australia_s_evolutionary_history|broader_prefLabel|Evolution +http://www.semanlink.net/tag/vaccin|creationTime|2015-08-06T23:33:14Z +http://www.semanlink.net/tag/vaccin|prefLabel|Vaccin +http://www.semanlink.net/tag/vaccin|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/vaccin|creationDate|2015-08-06 +http://www.semanlink.net/tag/vaccin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vaccin|uri|http://www.semanlink.net/tag/vaccin +http://www.semanlink.net/tag/vaccin|broader_prefLabel|Médecine +http://www.semanlink.net/tag/vint_cerf|creationTime|2018-04-28T16:18:06Z +http://www.semanlink.net/tag/vint_cerf|prefLabel|Vint Cerf +http://www.semanlink.net/tag/vint_cerf|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/vint_cerf|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/vint_cerf|creationDate|2018-04-28 +http://www.semanlink.net/tag/vint_cerf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vint_cerf|describedBy|https://en.wikipedia.org/wiki/Vint_Cerf +http://www.semanlink.net/tag/vint_cerf|uri|http://www.semanlink.net/tag/vint_cerf +http://www.semanlink.net/tag/vint_cerf|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/vint_cerf|broader_altLabel|Technical guys +http://www.semanlink.net/tag/service_description|creationTime|2014-10-29T02:33:32Z +http://www.semanlink.net/tag/service_description|prefLabel|Service description +http://www.semanlink.net/tag/service_description|creationDate|2014-10-29 +http://www.semanlink.net/tag/service_description|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/service_description|uri|http://www.semanlink.net/tag/service_description +http://www.semanlink.net/tag/readwriteweb_com|creationTime|2010-07-30T14:47:39Z +http://www.semanlink.net/tag/readwriteweb_com|prefLabel|ReadWriteWeb.com +http://www.semanlink.net/tag/readwriteweb_com|creationDate|2010-07-30 +http://www.semanlink.net/tag/readwriteweb_com|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/readwriteweb_com|uri|http://www.semanlink.net/tag/readwriteweb_com +http://www.semanlink.net/tag/yago|creationTime|2007-05-23T21:33:08Z +http://www.semanlink.net/tag/yago|prefLabel|Yago +http://www.semanlink.net/tag/yago|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/yago|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/yago|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/yago|related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/yago|related|http://www.semanlink.net/tag/wordnet +http://www.semanlink.net/tag/yago|creationDate|2007-05-23 +http://www.semanlink.net/tag/yago|comment|"A Core of Semantic Knowledge Unifying WordNet and Wikipedia +" +http://www.semanlink.net/tag/yago|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yago|homepage|http://www.mpi-inf.mpg.de/yago-naga/yago/ +http://www.semanlink.net/tag/yago|uri|http://www.semanlink.net/tag/yago +http://www.semanlink.net/tag/yago|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/yago|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/yago|broader_altLabel|LD +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/yago|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/bruxelles|prefLabel|Bruxelles +http://www.semanlink.net/tag/bruxelles|broader|http://www.semanlink.net/tag/belgique +http://www.semanlink.net/tag/bruxelles|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/bruxelles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bruxelles|uri|http://www.semanlink.net/tag/bruxelles +http://www.semanlink.net/tag/bruxelles|broader_prefLabel|Belgique +http://www.semanlink.net/tag/bruxelles|broader_prefLabel|Ville +http://www.semanlink.net/tag/litterature|prefLabel|Littérature +http://www.semanlink.net/tag/litterature|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/litterature|related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/litterature|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/litterature|uri|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/litterature|broader_prefLabel|Art +http://www.semanlink.net/tag/doc_by_google|creationTime|2020-01-21T17:14:44Z +http://www.semanlink.net/tag/doc_by_google|prefLabel|Doc by Google +http://www.semanlink.net/tag/doc_by_google|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/doc_by_google|creationDate|2020-01-21 +http://www.semanlink.net/tag/doc_by_google|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/doc_by_google|uri|http://www.semanlink.net/tag/doc_by_google +http://www.semanlink.net/tag/doc_by_google|broader_prefLabel|Google +http://www.semanlink.net/tag/doc_by_google|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/bertology|creationTime|2019-10-31T08:28:59Z +http://www.semanlink.net/tag/bertology|prefLabel|Bertology +http://www.semanlink.net/tag/bertology|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/bertology|creationDate|2019-10-31 +http://www.semanlink.net/tag/bertology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bertology|uri|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/tag/bertology|broader_prefLabel|BERT +http://www.semanlink.net/tag/word_sense_disambiguation|creationTime|2019-04-12T11:15:01Z +http://www.semanlink.net/tag/word_sense_disambiguation|prefLabel|Word-sense disambiguation +http://www.semanlink.net/tag/word_sense_disambiguation|broader|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/word_sense_disambiguation|related|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/word_sense_disambiguation|creationDate|2019-04-12 +http://www.semanlink.net/tag/word_sense_disambiguation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_sense_disambiguation|describedBy|https://en.wikipedia.org/wiki/Word-sense_disambiguation +http://www.semanlink.net/tag/word_sense_disambiguation|uri|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/tag/word_sense_disambiguation|broader_prefLabel|Word sense / Lexical ambiguity +http://www.semanlink.net/tag/word_sense_disambiguation|broader_altLabel|Polysemy +http://www.semanlink.net/tag/word_sense_disambiguation|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/client_side_xslt|prefLabel|Client side XSLT +http://www.semanlink.net/tag/client_side_xslt|broader|http://www.semanlink.net/tag/xslt +http://www.semanlink.net/tag/client_side_xslt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/client_side_xslt|uri|http://www.semanlink.net/tag/client_side_xslt +http://www.semanlink.net/tag/client_side_xslt|broader_prefLabel|XSLT +http://www.semanlink.net/tag/digital_economy|creationTime|2013-01-07T13:10:47Z +http://www.semanlink.net/tag/digital_economy|prefLabel|Digital economy +http://www.semanlink.net/tag/digital_economy|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/digital_economy|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/digital_economy|creationDate|2013-01-07 +http://www.semanlink.net/tag/digital_economy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_economy|altLabel|Economie numérique +http://www.semanlink.net/tag/digital_economy|uri|http://www.semanlink.net/tag/digital_economy +http://www.semanlink.net/tag/digital_economy|broader_prefLabel|Internet +http://www.semanlink.net/tag/digital_economy|broader_prefLabel|Economie +http://www.semanlink.net/tag/digital_economy|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/embeddings_in_nlp|creationTime|2020-03-20T00:34:10Z +http://www.semanlink.net/tag/embeddings_in_nlp|prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/embeddings_in_nlp|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/embeddings_in_nlp|related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/embeddings_in_nlp|creationDate|2020-03-20 +http://www.semanlink.net/tag/embeddings_in_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/embeddings_in_nlp|uri|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/embeddings_in_nlp|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/embeddings_in_nlp|broader_altLabel|embedding +http://www.semanlink.net/tag/embeddings_in_nlp|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/embeddings_in_nlp|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/chinua_achebe|creationTime|2020-02-11T01:13:36Z +http://www.semanlink.net/tag/chinua_achebe|prefLabel|Chinua Achebe +http://www.semanlink.net/tag/chinua_achebe|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/chinua_achebe|broader|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/chinua_achebe|creationDate|2020-02-11 +http://www.semanlink.net/tag/chinua_achebe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chinua_achebe|describedBy|https://en.wikipedia.org/wiki/Chinua_Achebe +http://www.semanlink.net/tag/chinua_achebe|uri|http://www.semanlink.net/tag/chinua_achebe +http://www.semanlink.net/tag/chinua_achebe|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/chinua_achebe|broader_prefLabel|Nigeria +http://www.semanlink.net/tag/tests|creationTime|2013-09-03T16:16:20Z +http://www.semanlink.net/tag/tests|prefLabel|Tests +http://www.semanlink.net/tag/tests|creationDate|2013-09-03 +http://www.semanlink.net/tag/tests|comment|"un lien vers un [doc](/doc/2020/01/test_md), et vers un [Tag](/tag/nlp) +" +http://www.semanlink.net/tag/tests|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tests|altLabel|Test +http://www.semanlink.net/tag/tests|subject|bla bla dc subject +http://www.semanlink.net/tag/tests|uri|http://www.semanlink.net/tag/tests +http://www.semanlink.net/tag/semi_supervised_learning|creationTime|2016-01-19T16:00:01Z +http://www.semanlink.net/tag/semi_supervised_learning|prefLabel|Semi-supervised learning +http://www.semanlink.net/tag/semi_supervised_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/semi_supervised_learning|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/semi_supervised_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/semi_supervised_learning|related|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/tag/semi_supervised_learning|creationDate|2016-01-19 +http://www.semanlink.net/tag/semi_supervised_learning|comment|Supervised learning techniques that also make use of unlabeled data for training – typically a small amount of labeled data with a large amount of unlabeled data. +http://www.semanlink.net/tag/semi_supervised_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semi_supervised_learning|describedBy|https://en.wikipedia.org/wiki/Semi-supervised_learning +http://www.semanlink.net/tag/semi_supervised_learning|uri|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/tag/semi_supervised_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/semi_supervised_learning|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/semi_supervised_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/semi_supervised_learning|broader_altLabel|ML +http://www.semanlink.net/tag/semi_supervised_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/semi_supervised_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/jeux_en_ligne|prefLabel|Jeux en ligne +http://www.semanlink.net/tag/jeux_en_ligne|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/jeux_en_ligne|broader|http://www.semanlink.net/tag/computer_game +http://www.semanlink.net/tag/jeux_en_ligne|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/jeux_en_ligne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeux_en_ligne|uri|http://www.semanlink.net/tag/jeux_en_ligne +http://www.semanlink.net/tag/jeux_en_ligne|broader_prefLabel|Internet +http://www.semanlink.net/tag/jeux_en_ligne|broader_prefLabel|Computer game +http://www.semanlink.net/tag/jeux_en_ligne|broader_prefLabel|Jeux +http://www.semanlink.net/tag/camembert_nlp|creationTime|2020-01-23T22:51:23Z +http://www.semanlink.net/tag/camembert_nlp|prefLabel|CamemBERT +http://www.semanlink.net/tag/camembert_nlp|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/camembert_nlp|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/camembert_nlp|broader|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/tag/camembert_nlp|broader|http://www.semanlink.net/tag/inria +http://www.semanlink.net/tag/camembert_nlp|creationDate|2020-01-23 +http://www.semanlink.net/tag/camembert_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/camembert_nlp|uri|http://www.semanlink.net/tag/camembert_nlp +http://www.semanlink.net/tag/camembert_nlp|broader_prefLabel|BERT +http://www.semanlink.net/tag/camembert_nlp|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/camembert_nlp|broader_prefLabel|NLP: French +http://www.semanlink.net/tag/camembert_nlp|broader_prefLabel|INRIA +http://www.semanlink.net/tag/camembert_nlp|broader_altLabel|NLP: Français +http://www.semanlink.net/tag/glove|creationTime|2017-05-18T22:49:52Z +http://www.semanlink.net/tag/glove|prefLabel|GloVe +http://www.semanlink.net/tag/glove|broader|http://www.semanlink.net/tag/global_semantic_context +http://www.semanlink.net/tag/glove|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/glove|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/glove|broader|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/glove|related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/glove|related|http://www.semanlink.net/tag/richard_socher +http://www.semanlink.net/tag/glove|related|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/glove|creationDate|2017-05-18 +http://www.semanlink.net/tag/glove|comment|"Word embedding technique (unsupervised learning algorithm for obtaining vector representations for words) based on factorizing a matrix of word co-occurence statistics (**Training is performed on aggregated global word-word co-occurrence statistics from a corpus**). +Resulting representations showcase interesting linear substructures of the word vector space. + +GloVe learns the word vectors that better reconstruct the probabilities of co-occurrence between pairs +of terms as estimated via their dot product + + +" +http://www.semanlink.net/tag/glove|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/glove|homepage|https://nlp.stanford.edu/projects/glove/ +http://www.semanlink.net/tag/glove|uri|http://www.semanlink.net/tag/glove +http://www.semanlink.net/tag/glove|broader_prefLabel|Global Semantic Context +http://www.semanlink.net/tag/glove|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/glove|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/glove|broader_prefLabel|NLP@Stanford +http://www.semanlink.net/tag/glove|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/glove|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/glove|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/glove|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/glove|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/glove|broader_related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/glove|broader_related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/tribunal_penal_international|prefLabel|Tribunal Pénal International +http://www.semanlink.net/tag/tribunal_penal_international|broader|http://www.semanlink.net/tag/crime_contre_l_humanite +http://www.semanlink.net/tag/tribunal_penal_international|broader|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/tribunal_penal_international|broader|http://www.semanlink.net/tag/justice_internationale +http://www.semanlink.net/tag/tribunal_penal_international|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tribunal_penal_international|altLabel|TPI +http://www.semanlink.net/tag/tribunal_penal_international|uri|http://www.semanlink.net/tag/tribunal_penal_international +http://www.semanlink.net/tag/tribunal_penal_international|broader_prefLabel|Crime contre l'Humanité +http://www.semanlink.net/tag/tribunal_penal_international|broader_prefLabel|Institutions internationales +http://www.semanlink.net/tag/tribunal_penal_international|broader_prefLabel|Justice internationale +http://www.semanlink.net/tag/knowledge_augmented_language_models|creationTime|2020-07-11T16:04:53Z +http://www.semanlink.net/tag/knowledge_augmented_language_models|prefLabel|Knowledge-augmented Language Models +http://www.semanlink.net/tag/knowledge_augmented_language_models|broader|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/tag/knowledge_augmented_language_models|related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/knowledge_augmented_language_models|creationDate|2020-07-11 +http://www.semanlink.net/tag/knowledge_augmented_language_models|comment|"[biblio](doc:2020/07/2004_07202_entities_as_expert): Ahn +et al., 2016; Yang et al., 2016; Logan et al., 2019; +Zhang et al., 2019; Levine et al., 2019; Xiong +et al., 2019a; Peters et al., 2019; Poerner et al., +2019; Wang et al., 2020" +http://www.semanlink.net/tag/knowledge_augmented_language_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_augmented_language_models|altLabel|KG-Augmented LMs +http://www.semanlink.net/tag/knowledge_augmented_language_models|altLabel|Injecting knowledge into LMs +http://www.semanlink.net/tag/knowledge_augmented_language_models|altLabel|Injecting knowledge into Language Models +http://www.semanlink.net/tag/knowledge_augmented_language_models|uri|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/knowledge_augmented_language_models|broader_prefLabel|Language Models + Knowledge +http://www.semanlink.net/tag/knowledge_augmented_language_models|broader_related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/eurogroupe|creationTime|2007-09-18T22:27:31Z +http://www.semanlink.net/tag/eurogroupe|prefLabel|Eurogroupe +http://www.semanlink.net/tag/eurogroupe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/eurogroupe|creationDate|2007-09-18 +http://www.semanlink.net/tag/eurogroupe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eurogroupe|uri|http://www.semanlink.net/tag/eurogroupe +http://www.semanlink.net/tag/eurogroupe|broader_prefLabel|Europe +http://www.semanlink.net/tag/plastic_waste_trade|creationTime|2019-04-23T11:51:40Z +http://www.semanlink.net/tag/plastic_waste_trade|prefLabel|Plastic waste trade +http://www.semanlink.net/tag/plastic_waste_trade|broader|http://www.semanlink.net/tag/plastic +http://www.semanlink.net/tag/plastic_waste_trade|creationDate|2019-04-23 +http://www.semanlink.net/tag/plastic_waste_trade|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plastic_waste_trade|uri|http://www.semanlink.net/tag/plastic_waste_trade +http://www.semanlink.net/tag/plastic_waste_trade|broader_prefLabel|Plastic +http://www.semanlink.net/tag/plastic_waste_trade|broader_altLabel|Plastique +http://www.semanlink.net/tag/cvs|creationTime|2007-04-25T22:05:26Z +http://www.semanlink.net/tag/cvs|prefLabel|CVS +http://www.semanlink.net/tag/cvs|creationDate|2007-04-25 +http://www.semanlink.net/tag/cvs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cvs|uri|http://www.semanlink.net/tag/cvs +http://www.semanlink.net/tag/litterature_russe|creationTime|2010-10-30T21:56:09Z +http://www.semanlink.net/tag/litterature_russe|prefLabel|Littérature russe +http://www.semanlink.net/tag/litterature_russe|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/litterature_russe|broader|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/litterature_russe|creationDate|2010-10-30 +http://www.semanlink.net/tag/litterature_russe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/litterature_russe|uri|http://www.semanlink.net/tag/litterature_russe +http://www.semanlink.net/tag/litterature_russe|broader_prefLabel|Littérature +http://www.semanlink.net/tag/litterature_russe|broader_prefLabel|Russie +http://www.semanlink.net/tag/litterature_russe|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/tables|creationTime|2013-08-20T16:21:55Z +http://www.semanlink.net/tag/tables|prefLabel|Tables +http://www.semanlink.net/tag/tables|creationDate|2013-08-20 +http://www.semanlink.net/tag/tables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tables|uri|http://www.semanlink.net/tag/tables +http://www.semanlink.net/tag/inverse_functional_properties|creationTime|2008-12-17T16:54:08Z +http://www.semanlink.net/tag/inverse_functional_properties|prefLabel|Inverse-functional properties +http://www.semanlink.net/tag/inverse_functional_properties|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/inverse_functional_properties|related|http://www.semanlink.net/tag/smushing +http://www.semanlink.net/tag/inverse_functional_properties|creationDate|2008-12-17 +http://www.semanlink.net/tag/inverse_functional_properties|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inverse_functional_properties|uri|http://www.semanlink.net/tag/inverse_functional_properties +http://www.semanlink.net/tag/inverse_functional_properties|broader_prefLabel|OWL +http://www.semanlink.net/tag/inverse_functional_properties|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/gilets_jaunes|creationTime|2018-12-06T09:56:29Z +http://www.semanlink.net/tag/gilets_jaunes|prefLabel|Gilets jaunes +http://www.semanlink.net/tag/gilets_jaunes|broader|http://www.semanlink.net/tag/france_delabrement +http://www.semanlink.net/tag/gilets_jaunes|broader|http://www.semanlink.net/tag/societe_francaise +http://www.semanlink.net/tag/gilets_jaunes|creationDate|2018-12-06 +http://www.semanlink.net/tag/gilets_jaunes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gilets_jaunes|uri|http://www.semanlink.net/tag/gilets_jaunes +http://www.semanlink.net/tag/gilets_jaunes|broader_prefLabel|France : délabrement +http://www.semanlink.net/tag/gilets_jaunes|broader_prefLabel|Société française +http://www.semanlink.net/tag/gilets_jaunes|broader_prefLabel|Société française +http://www.semanlink.net/tag/gilets_jaunes|broader_altLabel|La France ne marche pas +http://www.semanlink.net/tag/hypiosvocampparismay2010|creationTime|2010-05-14T10:15:28Z +http://www.semanlink.net/tag/hypiosvocampparismay2010|prefLabel|HypiosVoCampParisMay2010 +http://www.semanlink.net/tag/hypiosvocampparismay2010|broader|http://www.semanlink.net/tag/hypios +http://www.semanlink.net/tag/hypiosvocampparismay2010|broader|http://www.semanlink.net/tag/vocamp +http://www.semanlink.net/tag/hypiosvocampparismay2010|creationDate|2010-05-14 +http://www.semanlink.net/tag/hypiosvocampparismay2010|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypiosvocampparismay2010|uri|http://www.semanlink.net/tag/hypiosvocampparismay2010 +http://www.semanlink.net/tag/hypiosvocampparismay2010|broader_prefLabel|Hypios +http://www.semanlink.net/tag/hypiosvocampparismay2010|broader_prefLabel|VoCamp +http://www.semanlink.net/tag/siren|creationTime|2015-03-06T15:29:41Z +http://www.semanlink.net/tag/siren|prefLabel|SIREn +http://www.semanlink.net/tag/siren|related|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/tag/siren|related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/siren|related|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/siren|related|http://www.semanlink.net/tag/solr_rdf +http://www.semanlink.net/tag/siren|creationDate|2015-03-06 +http://www.semanlink.net/tag/siren|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/siren|uri|http://www.semanlink.net/tag/siren +http://www.semanlink.net/tag/yves_raymond|creationTime|2007-06-13T23:21:09Z +http://www.semanlink.net/tag/yves_raymond|prefLabel|Yves Raymond +http://www.semanlink.net/tag/yves_raymond|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/yves_raymond|creationDate|2007-06-13 +http://www.semanlink.net/tag/yves_raymond|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yves_raymond|uri|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/yves_raymond|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/linkedin|creationTime|2008-10-16T23:30:43Z +http://www.semanlink.net/tag/linkedin|prefLabel|LinkedIn +http://www.semanlink.net/tag/linkedin|creationDate|2008-10-16 +http://www.semanlink.net/tag/linkedin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linkedin|uri|http://www.semanlink.net/tag/linkedin +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|creationTime|2020-02-17T09:09:17Z +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|prefLabel|Allen Institute for AI (A2I) +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|creationDate|2020-02-17 +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|uri|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/allen_institute_for_ai_a2i|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/okkam|creationTime|2008-04-05T00:26:10Z +http://www.semanlink.net/tag/okkam|prefLabel|OKKAM +http://www.semanlink.net/tag/okkam|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/okkam|related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/okkam|creationDate|2008-04-05 +http://www.semanlink.net/tag/okkam|comment|"OKKAM - Enabling the Web of Entities
The overall goal of the OKKAM initiative at the University of Trento is to enable the Web of Entities, a global digital space for publishing and managing information about entities, where every entity is uniquely identified, and links between entities can be explicitly specified and exploited in a variety of scenarios. Compared to the WWW, the main differences are that the domain of entities is extended beyond the realm of digital resources to include objects in other realms like products, organizations, associations, countries, events, publications, hotels or people; and that links between entities are extended beyond hyperlinks to include virtually any type of relation. + + + +" +http://www.semanlink.net/tag/okkam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/okkam|describedBy|http://www.okkam.org/ +http://www.semanlink.net/tag/okkam|uri|http://www.semanlink.net/tag/okkam +http://www.semanlink.net/tag/okkam|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/aho_corasick_algorithm|creationTime|2019-04-24T11:46:07Z +http://www.semanlink.net/tag/aho_corasick_algorithm|prefLabel|Aho–Corasick algorithm +http://www.semanlink.net/tag/aho_corasick_algorithm|broader|http://www.semanlink.net/tag/text_processing +http://www.semanlink.net/tag/aho_corasick_algorithm|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/aho_corasick_algorithm|broader|http://www.semanlink.net/tag/string_searching_algorithm +http://www.semanlink.net/tag/aho_corasick_algorithm|creationDate|2019-04-24 +http://www.semanlink.net/tag/aho_corasick_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aho_corasick_algorithm|describedBy|https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm +http://www.semanlink.net/tag/aho_corasick_algorithm|altLabel|Aho–Corasick +http://www.semanlink.net/tag/aho_corasick_algorithm|uri|http://www.semanlink.net/tag/aho_corasick_algorithm +http://www.semanlink.net/tag/aho_corasick_algorithm|broader_prefLabel|Text processing +http://www.semanlink.net/tag/aho_corasick_algorithm|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/aho_corasick_algorithm|broader_prefLabel|String-searching algorithm +http://www.semanlink.net/tag/quentin_tarantino|creationTime|2017-01-16T00:27:48Z +http://www.semanlink.net/tag/quentin_tarantino|prefLabel|Quentin Tarantino +http://www.semanlink.net/tag/quentin_tarantino|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/quentin_tarantino|creationDate|2017-01-16 +http://www.semanlink.net/tag/quentin_tarantino|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quentin_tarantino|describedBy|https://en.wikipedia.org/wiki/Quentin_Tarantino +http://www.semanlink.net/tag/quentin_tarantino|uri|http://www.semanlink.net/tag/quentin_tarantino +http://www.semanlink.net/tag/quentin_tarantino|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/quentin_tarantino|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/relation_learning|creationTime|2021-05-13T00:42:19Z +http://www.semanlink.net/tag/relation_learning|prefLabel|Relation Learning +http://www.semanlink.net/tag/relation_learning|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/relation_learning|creationDate|2021-05-13 +http://www.semanlink.net/tag/relation_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relation_learning|uri|http://www.semanlink.net/tag/relation_learning +http://www.semanlink.net/tag/relation_learning|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/reshaping|creationTime|2019-09-14T11:30:09Z +http://www.semanlink.net/tag/reshaping|prefLabel|Reshaping +http://www.semanlink.net/tag/reshaping|creationDate|2019-09-14 +http://www.semanlink.net/tag/reshaping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reshaping|uri|http://www.semanlink.net/tag/reshaping +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|creationTime|2017-07-13T10:37:31Z +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|prefLabel|IBM SPSS Text Analytics for Surveys +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|broader|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|broader|http://www.semanlink.net/tag/survey_analysis +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|creationDate|2017-07-13 +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|uri|http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|broader_prefLabel|IBM +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|broader_prefLabel|Survey analysis +http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys|broader_related|http://www.semanlink.net/tag/topic_modeling_over_short_texts +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|creationTime|2013-03-24T10:17:49Z +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|prefLabel|Loi sur les oeuvres indisponibles +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|creationDate|2013-03-24 +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|uri|http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles +http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/cons_de_francais|prefLabel|Cons de Français +http://www.semanlink.net/tag/cons_de_francais|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/cons_de_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cons_de_francais|uri|http://www.semanlink.net/tag/cons_de_francais +http://www.semanlink.net/tag/cons_de_francais|broader_prefLabel|France +http://www.semanlink.net/tag/stackoverflow_q|creationTime|2017-01-05T00:38:23Z +http://www.semanlink.net/tag/stackoverflow_q|prefLabel|StackOverFlow Q +http://www.semanlink.net/tag/stackoverflow_q|broader|http://www.semanlink.net/tag/stack_overflow +http://www.semanlink.net/tag/stackoverflow_q|creationDate|2017-01-05 +http://www.semanlink.net/tag/stackoverflow_q|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stackoverflow_q|uri|http://www.semanlink.net/tag/stackoverflow_q +http://www.semanlink.net/tag/stackoverflow_q|broader_prefLabel|Stack Overflow +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|creationTime|2015-06-14T14:33:56Z +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|prefLabel|Tyrannical exploitation of nature by mankind +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|broader|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|creationDate|2015-06-14 +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|uri|http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind +http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind|broader_prefLabel|Crise écologique +http://www.semanlink.net/tag/nltk|creationTime|2015-10-21T18:43:50Z +http://www.semanlink.net/tag/nltk|prefLabel|NLTK +http://www.semanlink.net/tag/nltk|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/nltk|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/nltk|creationDate|2015-10-21 +http://www.semanlink.net/tag/nltk|comment|"NLP for Python + +> Provides easy-to-use interfaces to over 50 corpora and lexical resources such as WordNet, along with a suite of text processing libraries for classification, tokenization, stemming, tagging, parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries, and an active discussion forum. + +Install the data on mac: ``/Applications/Python 3.6/Install Certificates.command``(cf ssl), then ``import nltk ; nltk.download()`` + + + + + +" +http://www.semanlink.net/tag/nltk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nltk|homepage|http://www.nltk.org/ +http://www.semanlink.net/tag/nltk|uri|http://www.semanlink.net/tag/nltk +http://www.semanlink.net/tag/nltk|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/nltk|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/nltk|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/nltk|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/benjamin_nowack|creationTime|2007-12-31T17:10:28Z +http://www.semanlink.net/tag/benjamin_nowack|prefLabel|Benjamin Nowack +http://www.semanlink.net/tag/benjamin_nowack|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/benjamin_nowack|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/benjamin_nowack|creationDate|2007-12-31 +http://www.semanlink.net/tag/benjamin_nowack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/benjamin_nowack|altLabel|bengee +http://www.semanlink.net/tag/benjamin_nowack|uri|http://www.semanlink.net/tag/benjamin_nowack +http://www.semanlink.net/tag/benjamin_nowack|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/benjamin_nowack|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/benjamin_nowack|broader_altLabel|Technical guys +http://www.semanlink.net/tag/schema_org_roles|creationTime|2014-12-09T16:55:12Z +http://www.semanlink.net/tag/schema_org_roles|prefLabel|Schema.org roles +http://www.semanlink.net/tag/schema_org_roles|broader|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/schema_org_roles|creationDate|2014-12-09 +http://www.semanlink.net/tag/schema_org_roles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/schema_org_roles|uri|http://www.semanlink.net/tag/schema_org_roles +http://www.semanlink.net/tag/schema_org_roles|broader_prefLabel|schema.org +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/schema_org_roles|broader_related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/sagesse_du_langage|creationTime|2009-12-18T19:50:48Z +http://www.semanlink.net/tag/sagesse_du_langage|prefLabel|Sagesse du langage +http://www.semanlink.net/tag/sagesse_du_langage|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/sagesse_du_langage|creationDate|2009-12-18 +http://www.semanlink.net/tag/sagesse_du_langage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sagesse_du_langage|uri|http://www.semanlink.net/tag/sagesse_du_langage +http://www.semanlink.net/tag/sagesse_du_langage|broader_prefLabel|Language +http://www.semanlink.net/tag/sagesse_du_langage|broader_altLabel|Langage +http://www.semanlink.net/tag/aster_aweke|creationTime|2007-09-26T01:01:16Z +http://www.semanlink.net/tag/aster_aweke|prefLabel|Aster Aweke +http://www.semanlink.net/tag/aster_aweke|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/aster_aweke|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/aster_aweke|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/aster_aweke|creationDate|2007-09-26 +http://www.semanlink.net/tag/aster_aweke|type|http://purl.org/ontology/mo/MusicArtist +http://www.semanlink.net/tag/aster_aweke|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aster_aweke|describedBy|https://en.wikipedia.org/wiki/Aster_Aweke +http://www.semanlink.net/tag/aster_aweke|linkToMusicBrainz|http://musicbrainz.org/artist/a9a3acea-e026-44ac-90d5-7768eaf183ed.html +http://www.semanlink.net/tag/aster_aweke|wikipage-en|https://en.wikipedia.org/wiki/Aster_Aweke +http://www.semanlink.net/tag/aster_aweke|uri|http://www.semanlink.net/tag/aster_aweke +http://www.semanlink.net/tag/aster_aweke|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/aster_aweke|broader_prefLabel|Musicien +http://www.semanlink.net/tag/aster_aweke|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/aster_aweke|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/aster_aweke|broader_altLabel|African music +http://www.semanlink.net/tag/tomcat_7|creationTime|2012-04-10T12:48:23Z +http://www.semanlink.net/tag/tomcat_7|prefLabel|Tomcat 7 +http://www.semanlink.net/tag/tomcat_7|broader|http://www.semanlink.net/tag/tomcat +http://www.semanlink.net/tag/tomcat_7|creationDate|2012-04-10 +http://www.semanlink.net/tag/tomcat_7|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tomcat_7|uri|http://www.semanlink.net/tag/tomcat_7 +http://www.semanlink.net/tag/tomcat_7|broader_prefLabel|Tomcat +http://www.semanlink.net/tag/principle_of_least_power|creationTime|2009-04-14T01:20:54Z +http://www.semanlink.net/tag/principle_of_least_power|prefLabel|Principle of least power +http://www.semanlink.net/tag/principle_of_least_power|related|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/principle_of_least_power|creationDate|2009-04-14 +http://www.semanlink.net/tag/principle_of_least_power|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/principle_of_least_power|uri|http://www.semanlink.net/tag/principle_of_least_power +http://www.semanlink.net/tag/ml_conditioning|creationTime|2019-10-12T11:12:51Z +http://www.semanlink.net/tag/ml_conditioning|prefLabel|ML: conditioning +http://www.semanlink.net/tag/ml_conditioning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/ml_conditioning|related|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/ml_conditioning|creationDate|2019-10-12 +http://www.semanlink.net/tag/ml_conditioning|comment|processing one source of information in the context of another +http://www.semanlink.net/tag/ml_conditioning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_conditioning|uri|http://www.semanlink.net/tag/ml_conditioning +http://www.semanlink.net/tag/ml_conditioning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/9_3|prefLabel|93 +http://www.semanlink.net/tag/9_3|broader|http://www.semanlink.net/tag/banlieue +http://www.semanlink.net/tag/9_3|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/9_3|altLabel|Seine-Saint-Denis +http://www.semanlink.net/tag/9_3|uri|http://www.semanlink.net/tag/9_3 +http://www.semanlink.net/tag/9_3|broader_prefLabel|Banlieue +http://www.semanlink.net/tag/perceptron|creationTime|2014-03-28T00:48:24Z +http://www.semanlink.net/tag/perceptron|prefLabel|Perceptron +http://www.semanlink.net/tag/perceptron|broader|http://www.semanlink.net/tag/linear_classifier +http://www.semanlink.net/tag/perceptron|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/perceptron|creationDate|2014-03-28 +http://www.semanlink.net/tag/perceptron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perceptron|describedBy|https://en.wikipedia.org/wiki/Perceptron +http://www.semanlink.net/tag/perceptron|uri|http://www.semanlink.net/tag/perceptron +http://www.semanlink.net/tag/perceptron|broader_prefLabel|Linear classifier +http://www.semanlink.net/tag/perceptron|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/trump|creationTime|2016-07-24T14:08:26Z +http://www.semanlink.net/tag/trump|prefLabel|Trump +http://www.semanlink.net/tag/trump|broader|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/trump|creationDate|2016-07-24 +http://www.semanlink.net/tag/trump|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trump|uri|http://www.semanlink.net/tag/trump +http://www.semanlink.net/tag/trump|broader_prefLabel|Président des USA +http://www.semanlink.net/tag/uri_dereferencing|prefLabel|URI dereferencing +http://www.semanlink.net/tag/uri_dereferencing|broader|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/uri_dereferencing|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_dereferencing|creationDate|2006-12-31 +http://www.semanlink.net/tag/uri_dereferencing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_dereferencing|uri|http://www.semanlink.net/tag/uri_dereferencing +http://www.semanlink.net/tag/uri_dereferencing|broader_prefLabel|Web architecture +http://www.semanlink.net/tag/uri_dereferencing|broader_prefLabel|URI +http://www.semanlink.net/tag/velo|creationTime|2020-07-12T21:02:47Z +http://www.semanlink.net/tag/velo|prefLabel|Vélo +http://www.semanlink.net/tag/velo|creationDate|2020-07-12 +http://www.semanlink.net/tag/velo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/velo|uri|http://www.semanlink.net/tag/velo +http://www.semanlink.net/tag/neocortex|creationTime|2013-09-10T01:50:16Z +http://www.semanlink.net/tag/neocortex|prefLabel|Neocortex +http://www.semanlink.net/tag/neocortex|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/neocortex|creationDate|2013-09-10 +http://www.semanlink.net/tag/neocortex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neocortex|uri|http://www.semanlink.net/tag/neocortex +http://www.semanlink.net/tag/neocortex|broader_prefLabel|Brain +http://www.semanlink.net/tag/neocortex|broader_altLabel|Cerveau +http://www.semanlink.net/tag/neocortex|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/class_based_language_models|creationTime|2018-07-07T14:49:26Z +http://www.semanlink.net/tag/class_based_language_models|prefLabel|Class based language models +http://www.semanlink.net/tag/class_based_language_models|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/class_based_language_models|creationDate|2018-07-07 +http://www.semanlink.net/tag/class_based_language_models|comment|similar items are clustered into classes, an n-gram language model for the class tokens is generated, and then the probabilities for words in a class are distributed according to the smoothed relative unigram frequencies of the words. +http://www.semanlink.net/tag/class_based_language_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/class_based_language_models|uri|http://www.semanlink.net/tag/class_based_language_models +http://www.semanlink.net/tag/class_based_language_models|broader_prefLabel|Language model +http://www.semanlink.net/tag/class_based_language_models|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/class_based_language_models|broader_altLabel|LM +http://www.semanlink.net/tag/class_based_language_models|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/class_based_language_models|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/class_based_language_models|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/byzantine_fault_tolerance|creationTime|2013-12-16T11:02:40Z +http://www.semanlink.net/tag/byzantine_fault_tolerance|prefLabel|Byzantine fault tolerance +http://www.semanlink.net/tag/byzantine_fault_tolerance|related|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/byzantine_fault_tolerance|creationDate|2013-12-16 +http://www.semanlink.net/tag/byzantine_fault_tolerance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/byzantine_fault_tolerance|describedBy|https://en.wikipedia.org/wiki/Byzantine_fault_tolerance +http://www.semanlink.net/tag/byzantine_fault_tolerance|uri|http://www.semanlink.net/tag/byzantine_fault_tolerance +http://www.semanlink.net/tag/limule|creationTime|2014-02-27T01:05:50Z +http://www.semanlink.net/tag/limule|prefLabel|limule +http://www.semanlink.net/tag/limule|broader|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/limule|broader|http://www.semanlink.net/tag/especes_menacees +http://www.semanlink.net/tag/limule|creationDate|2014-02-27 +http://www.semanlink.net/tag/limule|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/limule|describedBy|https://fr.wikipedia.org/wiki/Limulidae +http://www.semanlink.net/tag/limule|uri|http://www.semanlink.net/tag/limule +http://www.semanlink.net/tag/limule|broader_prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/limule|broader_prefLabel|Espèces menacées +http://www.semanlink.net/tag/limule|broader_altLabel|Endangered Species +http://www.semanlink.net/tag/limule|broader_related|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/points_of_interest|creationTime|2013-04-16T15:10:44Z +http://www.semanlink.net/tag/points_of_interest|prefLabel|Points of Interest +http://www.semanlink.net/tag/points_of_interest|creationDate|2013-04-16 +http://www.semanlink.net/tag/points_of_interest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/points_of_interest|uri|http://www.semanlink.net/tag/points_of_interest +http://www.semanlink.net/tag/eric_schmidt|creationTime|2013-04-28T00:10:19Z +http://www.semanlink.net/tag/eric_schmidt|prefLabel|Eric Schmidt +http://www.semanlink.net/tag/eric_schmidt|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/eric_schmidt|creationDate|2013-04-28 +http://www.semanlink.net/tag/eric_schmidt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eric_schmidt|uri|http://www.semanlink.net/tag/eric_schmidt +http://www.semanlink.net/tag/eric_schmidt|broader_prefLabel|Google +http://www.semanlink.net/tag/eric_schmidt|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/thomas_wolf|creationTime|2020-01-05T18:29:27Z +http://www.semanlink.net/tag/thomas_wolf|prefLabel|Thomas Wolf +http://www.semanlink.net/tag/thomas_wolf|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/thomas_wolf|related|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/tag/thomas_wolf|related|http://www.semanlink.net/tag/huggingface_bigscience +http://www.semanlink.net/tag/thomas_wolf|creationDate|2020-01-05 +http://www.semanlink.net/tag/thomas_wolf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thomas_wolf|uri|http://www.semanlink.net/tag/thomas_wolf +http://www.semanlink.net/tag/thomas_wolf|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/evangelistes|creationTime|2020-08-22T14:32:25Z +http://www.semanlink.net/tag/evangelistes|prefLabel|Evangélistes +http://www.semanlink.net/tag/evangelistes|broader|http://www.semanlink.net/tag/chretiente +http://www.semanlink.net/tag/evangelistes|creationDate|2020-08-22 +http://www.semanlink.net/tag/evangelistes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evangelistes|uri|http://www.semanlink.net/tag/evangelistes +http://www.semanlink.net/tag/evangelistes|broader_prefLabel|Chrétienté +http://www.semanlink.net/tag/exxonmobil|creationTime|2008-02-20T23:57:46Z +http://www.semanlink.net/tag/exxonmobil|prefLabel|ExxonMobil +http://www.semanlink.net/tag/exxonmobil|broader|http://www.semanlink.net/tag/compagnies_petrolieres +http://www.semanlink.net/tag/exxonmobil|creationDate|2008-02-20 +http://www.semanlink.net/tag/exxonmobil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exxonmobil|uri|http://www.semanlink.net/tag/exxonmobil +http://www.semanlink.net/tag/exxonmobil|broader_prefLabel|Compagnies pétrolières +http://www.semanlink.net/tag/science_fiction|creationTime|2009-09-25T23:54:28Z +http://www.semanlink.net/tag/science_fiction|prefLabel|Science fiction +http://www.semanlink.net/tag/science_fiction|broader|http://www.semanlink.net/tag/fiction +http://www.semanlink.net/tag/science_fiction|related|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/science_fiction|creationDate|2009-09-25 +http://www.semanlink.net/tag/science_fiction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/science_fiction|uri|http://www.semanlink.net/tag/science_fiction +http://www.semanlink.net/tag/science_fiction|broader_prefLabel|Fiction +http://www.semanlink.net/tag/graphs_machine_learning|creationTime|2019-06-18T10:13:04Z +http://www.semanlink.net/tag/graphs_machine_learning|prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/graphs_machine_learning|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graphs_machine_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/graphs_machine_learning|creationDate|2019-06-18 +http://www.semanlink.net/tag/graphs_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graphs_machine_learning|altLabel|Graph Machine Learning +http://www.semanlink.net/tag/graphs_machine_learning|uri|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/graphs_machine_learning|broader_prefLabel|Graph +http://www.semanlink.net/tag/graphs_machine_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/graphs_machine_learning|broader_altLabel|ML +http://www.semanlink.net/tag/graphs_machine_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/graphs_machine_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/java_tool|creationTime|2012-04-10T02:07:58Z +http://www.semanlink.net/tag/java_tool|prefLabel|Java tool +http://www.semanlink.net/tag/java_tool|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_tool|creationDate|2012-04-10 +http://www.semanlink.net/tag/java_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_tool|uri|http://www.semanlink.net/tag/java_tool +http://www.semanlink.net/tag/java_tool|broader_prefLabel|Java +http://www.semanlink.net/tag/cassini_huygens|prefLabel|Cassini-Huygens +http://www.semanlink.net/tag/cassini_huygens|broader|http://www.semanlink.net/tag/cassini +http://www.semanlink.net/tag/cassini_huygens|broader|http://www.semanlink.net/tag/titan +http://www.semanlink.net/tag/cassini_huygens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cassini_huygens|uri|http://www.semanlink.net/tag/cassini_huygens +http://www.semanlink.net/tag/cassini_huygens|broader_prefLabel|Cassini +http://www.semanlink.net/tag/cassini_huygens|broader_prefLabel|Titan +http://www.semanlink.net/tag/owled_2007|creationTime|2007-03-04T17:07:36Z +http://www.semanlink.net/tag/owled_2007|prefLabel|OWLED 2007 +http://www.semanlink.net/tag/owled_2007|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owled_2007|broader|http://www.semanlink.net/tag/owled +http://www.semanlink.net/tag/owled_2007|creationDate|2007-03-04 +http://www.semanlink.net/tag/owled_2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owled_2007|uri|http://www.semanlink.net/tag/owled_2007 +http://www.semanlink.net/tag/owled_2007|broader_prefLabel|OWL +http://www.semanlink.net/tag/owled_2007|broader_prefLabel|OWLED +http://www.semanlink.net/tag/owled_2007|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/parthenogenese|creationTime|2020-04-08T21:08:53Z +http://www.semanlink.net/tag/parthenogenese|prefLabel|Parthénogenèse +http://www.semanlink.net/tag/parthenogenese|broader|http://www.semanlink.net/tag/sexe +http://www.semanlink.net/tag/parthenogenese|creationDate|2020-04-08 +http://www.semanlink.net/tag/parthenogenese|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parthenogenese|describedBy|https://fr.wikipedia.org/wiki/Parth%C3%A9nogen%C3%A8se +http://www.semanlink.net/tag/parthenogenese|uri|http://www.semanlink.net/tag/parthenogenese +http://www.semanlink.net/tag/parthenogenese|broader_prefLabel|Sexe +http://www.semanlink.net/tag/exponential_organizations|creationTime|2015-09-13T14:50:35Z +http://www.semanlink.net/tag/exponential_organizations|prefLabel|Exponential Organizations +http://www.semanlink.net/tag/exponential_organizations|broader|http://www.semanlink.net/tag/technological_singularity +http://www.semanlink.net/tag/exponential_organizations|creationDate|2015-09-13 +http://www.semanlink.net/tag/exponential_organizations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exponential_organizations|uri|http://www.semanlink.net/tag/exponential_organizations +http://www.semanlink.net/tag/exponential_organizations|broader_prefLabel|Technological singularity +http://www.semanlink.net/tag/sparqlpress|creationTime|2008-02-17T02:24:43Z +http://www.semanlink.net/tag/sparqlpress|prefLabel|SparqlPress +http://www.semanlink.net/tag/sparqlpress|broader|http://www.semanlink.net/tag/wordpress +http://www.semanlink.net/tag/sparqlpress|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparqlpress|related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/sparqlpress|related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/sparqlpress|creationDate|2008-02-17 +http://www.semanlink.net/tag/sparqlpress|comment|SparqlPress is a project. Primary ingredients are WordPress and SPARQL The goal for SparqlPress is easy-to-use, low-barrier-of-entry, access to the linked data web. There are two, intimately-related sides to the idea: producing data, and consuming it. One goal is to make it easy for Wordpress to expose more data in SPARQL-friendly form. Another is to make it easier to use a Wordpress installation as a personal, perhaps even private, local aggregation of such data. +http://www.semanlink.net/tag/sparqlpress|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparqlpress|describedBy|http://wiki.foaf-project.org/SparqlPress +http://www.semanlink.net/tag/sparqlpress|uri|http://www.semanlink.net/tag/sparqlpress +http://www.semanlink.net/tag/sparqlpress|broader_prefLabel|WordPress +http://www.semanlink.net/tag/sparqlpress|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/bulgarie|prefLabel|Bulgarie +http://www.semanlink.net/tag/bulgarie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/bulgarie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/bulgarie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bulgarie|uri|http://www.semanlink.net/tag/bulgarie +http://www.semanlink.net/tag/bulgarie|broader_prefLabel|Europe +http://www.semanlink.net/tag/bulgarie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/hippies|prefLabel|Hippies +http://www.semanlink.net/tag/hippies|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/hippies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hippies|uri|http://www.semanlink.net/tag/hippies +http://www.semanlink.net/tag/hippies|broader_prefLabel|I like +http://www.semanlink.net/tag/hippies|broader_prefLabel|I like +http://www.semanlink.net/tag/instant_messaging|creationTime|2008-04-10T10:30:22Z +http://www.semanlink.net/tag/instant_messaging|prefLabel|Instant Messaging +http://www.semanlink.net/tag/instant_messaging|creationDate|2008-04-10 +http://www.semanlink.net/tag/instant_messaging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/instant_messaging|uri|http://www.semanlink.net/tag/instant_messaging +http://www.semanlink.net/tag/belzoni|creationTime|2008-01-04T01:47:27Z +http://www.semanlink.net/tag/belzoni|prefLabel|Belzoni +http://www.semanlink.net/tag/belzoni|broader|http://www.semanlink.net/tag/explorateur +http://www.semanlink.net/tag/belzoni|broader|http://www.semanlink.net/tag/archeologue +http://www.semanlink.net/tag/belzoni|broader|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/belzoni|creationDate|2008-01-04 +http://www.semanlink.net/tag/belzoni|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/belzoni|uri|http://www.semanlink.net/tag/belzoni +http://www.semanlink.net/tag/belzoni|broader_prefLabel|Explorateur +http://www.semanlink.net/tag/belzoni|broader_prefLabel|Archéologue +http://www.semanlink.net/tag/belzoni|broader_prefLabel|Egypte antique +http://www.semanlink.net/tag/garce|prefLabel|Garce +http://www.semanlink.net/tag/garce|creationDate|2006-11-21 +http://www.semanlink.net/tag/garce|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/garce|uri|http://www.semanlink.net/tag/garce +http://www.semanlink.net/tag/subspace_clustering|creationTime|2019-12-11T03:29:35Z +http://www.semanlink.net/tag/subspace_clustering|prefLabel|Subspace clustering +http://www.semanlink.net/tag/subspace_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/subspace_clustering|creationDate|2019-12-11 +http://www.semanlink.net/tag/subspace_clustering|comment|finding clusters which are defined by only a subset of dimensions (it is not needed to have the agreement of all N features) +http://www.semanlink.net/tag/subspace_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/subspace_clustering|uri|http://www.semanlink.net/tag/subspace_clustering +http://www.semanlink.net/tag/subspace_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/subspace_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/subspace_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/subspace_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/text_kg_and_embeddings|creationTime|2020-05-14T01:22:41Z +http://www.semanlink.net/tag/text_kg_and_embeddings|prefLabel|Text, KG and embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/text_kg_and_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|creationDate|2020-05-14 +http://www.semanlink.net/tag/text_kg_and_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_kg_and_embeddings|altLabel|Embeddings of Text + Knowledge Graphs +http://www.semanlink.net/tag/text_kg_and_embeddings|altLabel|Embeddings of Text + Knowledge Bases +http://www.semanlink.net/tag/text_kg_and_embeddings|altLabel|Embeddings of text + KB +http://www.semanlink.net/tag/text_kg_and_embeddings|uri|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_prefLabel|Knowledge Graphs and NLP +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|KG + NLP +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|Knowledge Graphs + Text +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|KGE +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_altLabel|KG embedding +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/text_kg_and_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/dynamic_object_model_pattern|creationTime|2007-03-09T02:05:34Z +http://www.semanlink.net/tag/dynamic_object_model_pattern|prefLabel|Dynamic Object Model Pattern +http://www.semanlink.net/tag/dynamic_object_model_pattern|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/dynamic_object_model_pattern|broader|http://www.semanlink.net/tag/design_pattern +http://www.semanlink.net/tag/dynamic_object_model_pattern|creationDate|2007-03-09 +http://www.semanlink.net/tag/dynamic_object_model_pattern|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dynamic_object_model_pattern|uri|http://www.semanlink.net/tag/dynamic_object_model_pattern +http://www.semanlink.net/tag/dynamic_object_model_pattern|broader_prefLabel|Programming +http://www.semanlink.net/tag/dynamic_object_model_pattern|broader_prefLabel|Design pattern +http://www.semanlink.net/tag/dynamic_object_model_pattern|broader_altLabel|Patterns +http://www.semanlink.net/tag/machine_learning_business|creationTime|2016-08-19T12:32:41Z +http://www.semanlink.net/tag/machine_learning_business|prefLabel|Machine Learning: business +http://www.semanlink.net/tag/machine_learning_business|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_business|creationDate|2016-08-19 +http://www.semanlink.net/tag/machine_learning_business|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_business|uri|http://www.semanlink.net/tag/machine_learning_business +http://www.semanlink.net/tag/machine_learning_business|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_business|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_business|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_business|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/apple_photos|creationTime|2018-04-30T13:06:10Z +http://www.semanlink.net/tag/apple_photos|prefLabel|Apple Photos +http://www.semanlink.net/tag/apple_photos|creationDate|2018-04-30 +http://www.semanlink.net/tag/apple_photos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_photos|uri|http://www.semanlink.net/tag/apple_photos +http://www.semanlink.net/tag/mughal_empire|creationTime|2021-04-03T12:00:51Z +http://www.semanlink.net/tag/mughal_empire|prefLabel|Empire moghol +http://www.semanlink.net/tag/mughal_empire|broader|http://www.semanlink.net/tag/histoire_de_l_inde +http://www.semanlink.net/tag/mughal_empire|creationDate|2021-04-03 +http://www.semanlink.net/tag/mughal_empire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mughal_empire|describedBy|https://en.wikipedia.org/wiki/Mughal_Empire +http://www.semanlink.net/tag/mughal_empire|altLabel|Mughal Empire +http://www.semanlink.net/tag/mughal_empire|uri|http://www.semanlink.net/tag/mughal_empire +http://www.semanlink.net/tag/mughal_empire|broader_prefLabel|Histoire de l'Inde +http://www.semanlink.net/tag/training_data_nlp|creationTime|2019-06-28T00:41:30Z +http://www.semanlink.net/tag/training_data_nlp|prefLabel|Training Data (NLP) +http://www.semanlink.net/tag/training_data_nlp|broader|http://www.semanlink.net/tag/training_data +http://www.semanlink.net/tag/training_data_nlp|creationDate|2019-06-28 +http://www.semanlink.net/tag/training_data_nlp|comment|data labeling is usually the bottleneck in developing NLP applications. Pbs of shifting contexts on social networks +http://www.semanlink.net/tag/training_data_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/training_data_nlp|uri|http://www.semanlink.net/tag/training_data_nlp +http://www.semanlink.net/tag/training_data_nlp|broader_prefLabel|Training data +http://www.semanlink.net/tag/sw_coreferences|creationTime|2008-05-15T22:24:28Z +http://www.semanlink.net/tag/sw_coreferences|prefLabel|SW: coreferences +http://www.semanlink.net/tag/sw_coreferences|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/sw_coreferences|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/sw_coreferences|creationDate|2008-05-15 +http://www.semanlink.net/tag/sw_coreferences|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_coreferences|uri|http://www.semanlink.net/tag/sw_coreferences +http://www.semanlink.net/tag/sw_coreferences|broader_prefLabel|URI +http://www.semanlink.net/tag/sw_coreferences|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/sw_coreferences|broader_altLabel|LD +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/sw_coreferences|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/expression|creationTime|2012-09-01T19:18:15Z +http://www.semanlink.net/tag/expression|prefLabel|Expression +http://www.semanlink.net/tag/expression|creationDate|2012-09-01 +http://www.semanlink.net/tag/expression|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/expression|uri|http://www.semanlink.net/tag/expression +http://www.semanlink.net/tag/rdf_embeddings|creationTime|2018-01-03T17:07:29Z +http://www.semanlink.net/tag/rdf_embeddings|prefLabel|RDF embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_embeddings|creationDate|2018-01-03 +http://www.semanlink.net/tag/rdf_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_embeddings|uri|http://www.semanlink.net/tag/rdf_embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/rdf_embeddings|broader_altLabel|KGE +http://www.semanlink.net/tag/rdf_embeddings|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/rdf_embeddings|broader_altLabel|KG embedding +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_embeddings|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semantic_web_and_ai|creationTime|2007-02-06T21:56:12Z +http://www.semanlink.net/tag/semantic_web_and_ai|prefLabel|Semantic web and AI +http://www.semanlink.net/tag/semantic_web_and_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/semantic_web_and_ai|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_and_ai|creationDate|2007-02-06 +http://www.semanlink.net/tag/semantic_web_and_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_and_ai|uri|http://www.semanlink.net/tag/semantic_web_and_ai +http://www.semanlink.net/tag/semantic_web_and_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/semantic_web_and_ai|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_and_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/semantic_web_and_ai|broader_altLabel|AI +http://www.semanlink.net/tag/semantic_web_and_ai|broader_altLabel|IA +http://www.semanlink.net/tag/semantic_web_and_ai|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_and_ai|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/semantic_web_and_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|creationTime|2018-01-03T15:54:08Z +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|prefLabel|NN / Symbolic AI hybridation +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|related|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|creationDate|2018-01-03 +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|uri|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader_altLabel|AI +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader_altLabel|IA +http://www.semanlink.net/tag/nn_symbolic_ai_hybridation|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/w3c_incubator_group_report|creationTime|2008-05-04T15:57:18Z +http://www.semanlink.net/tag/w3c_incubator_group_report|prefLabel|W3C Incubator Group Report +http://www.semanlink.net/tag/w3c_incubator_group_report|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_incubator_group_report|creationDate|2008-05-04 +http://www.semanlink.net/tag/w3c_incubator_group_report|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_incubator_group_report|uri|http://www.semanlink.net/tag/w3c_incubator_group_report +http://www.semanlink.net/tag/w3c_incubator_group_report|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_incubator_group_report|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_incubator_group_report|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/few_shot_learning|creationTime|2018-12-02T10:17:02Z +http://www.semanlink.net/tag/few_shot_learning|prefLabel|Few-shot learning +http://www.semanlink.net/tag/few_shot_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/few_shot_learning|creationDate|2018-12-02 +http://www.semanlink.net/tag/few_shot_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/few_shot_learning|uri|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/tag/few_shot_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/few_shot_learning|broader_altLabel|ML +http://www.semanlink.net/tag/few_shot_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/few_shot_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/encoder_decoder_architecture|creationTime|2018-11-07T00:05:33Z +http://www.semanlink.net/tag/encoder_decoder_architecture|prefLabel|Encoder-Decoder architecture +http://www.semanlink.net/tag/encoder_decoder_architecture|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/encoder_decoder_architecture|related|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/encoder_decoder_architecture|creationDate|2018-11-07 +http://www.semanlink.net/tag/encoder_decoder_architecture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encoder_decoder_architecture|uri|http://www.semanlink.net/tag/encoder_decoder_architecture +http://www.semanlink.net/tag/encoder_decoder_architecture|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/jpl|creationTime|2008-05-17T13:32:16Z +http://www.semanlink.net/tag/jpl|prefLabel|JPL +http://www.semanlink.net/tag/jpl|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/jpl|creationDate|2008-05-17 +http://www.semanlink.net/tag/jpl|comment|Jet Propulsion Laboratory, California Institute of Technology +http://www.semanlink.net/tag/jpl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jpl|describedBy|http://www.jpl.nasa.gov/ +http://www.semanlink.net/tag/jpl|uri|http://www.semanlink.net/tag/jpl +http://www.semanlink.net/tag/jpl|broader_prefLabel|NASA +http://www.semanlink.net/tag/site_web_gouvernemental|creationTime|2010-07-28T00:07:50Z +http://www.semanlink.net/tag/site_web_gouvernemental|prefLabel|Site web gouvernemental +http://www.semanlink.net/tag/site_web_gouvernemental|creationDate|2010-07-28 +http://www.semanlink.net/tag/site_web_gouvernemental|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/site_web_gouvernemental|uri|http://www.semanlink.net/tag/site_web_gouvernemental +http://www.semanlink.net/tag/ai_knowledge_bases|creationTime|2020-07-09T23:58:23Z +http://www.semanlink.net/tag/ai_knowledge_bases|prefLabel|AI + Knowledge Bases +http://www.semanlink.net/tag/ai_knowledge_bases|broader|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/ai_knowledge_bases|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/ai_knowledge_bases|related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/ai_knowledge_bases|creationDate|2020-07-09 +http://www.semanlink.net/tag/ai_knowledge_bases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_knowledge_bases|uri|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/tag/ai_knowledge_bases|broader_prefLabel|AI + Knowledge +http://www.semanlink.net/tag/ai_knowledge_bases|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/ai_knowledge_bases|broader_altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/ai_knowledge_bases|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/ai_knowledge_bases|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/ai_knowledge_bases|broader_related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/sarkozy_immigration|creationTime|2008-04-07T21:32:22Z +http://www.semanlink.net/tag/sarkozy_immigration|prefLabel|Sarkozy : immigration +http://www.semanlink.net/tag/sarkozy_immigration|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/sarkozy_immigration|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/sarkozy_immigration|broader|http://www.semanlink.net/tag/sarkozy_et_extreme_droite +http://www.semanlink.net/tag/sarkozy_immigration|creationDate|2008-04-07 +http://www.semanlink.net/tag/sarkozy_immigration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarkozy_immigration|uri|http://www.semanlink.net/tag/sarkozy_immigration +http://www.semanlink.net/tag/sarkozy_immigration|broader_prefLabel|Immigration +http://www.semanlink.net/tag/sarkozy_immigration|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/sarkozy_immigration|broader_prefLabel|Sarkozy et extrème droite +http://www.semanlink.net/tag/grand_challenge|prefLabel|DARPA Grand Challenge +http://www.semanlink.net/tag/grand_challenge|broader|http://www.semanlink.net/tag/darpa +http://www.semanlink.net/tag/grand_challenge|broader|http://www.semanlink.net/tag/driverless_car +http://www.semanlink.net/tag/grand_challenge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grand_challenge|uri|http://www.semanlink.net/tag/grand_challenge +http://www.semanlink.net/tag/grand_challenge|broader_prefLabel|DARPA +http://www.semanlink.net/tag/grand_challenge|broader_prefLabel|Driverless car +http://www.semanlink.net/tag/grand_challenge|broader_altLabel|Self-driving car +http://www.semanlink.net/tag/emnlp|creationTime|2020-11-24T09:46:58Z +http://www.semanlink.net/tag/emnlp|prefLabel|EMNLP +http://www.semanlink.net/tag/emnlp|broader|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/tag/emnlp|creationDate|2020-11-24 +http://www.semanlink.net/tag/emnlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emnlp|uri|http://www.semanlink.net/tag/emnlp +http://www.semanlink.net/tag/emnlp|broader_prefLabel|NLP conference +http://www.semanlink.net/tag/akkadian_language|creationTime|2021-09-23T10:47:42Z +http://www.semanlink.net/tag/akkadian_language|prefLabel|Akkadian language +http://www.semanlink.net/tag/akkadian_language|broader|http://www.semanlink.net/tag/langues_anciennes +http://www.semanlink.net/tag/akkadian_language|broader|http://www.semanlink.net/tag/mesopotamie +http://www.semanlink.net/tag/akkadian_language|creationDate|2021-09-23 +http://www.semanlink.net/tag/akkadian_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/akkadian_language|describedBy|https://en.wikipedia.org/wiki/Akkadian_language +http://www.semanlink.net/tag/akkadian_language|uri|http://www.semanlink.net/tag/akkadian_language +http://www.semanlink.net/tag/akkadian_language|broader_prefLabel|Langues anciennes +http://www.semanlink.net/tag/akkadian_language|broader_prefLabel|Mésopotamie +http://www.semanlink.net/tag/detroit|prefLabel|Detroit +http://www.semanlink.net/tag/detroit|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/detroit|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/detroit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/detroit|uri|http://www.semanlink.net/tag/detroit +http://www.semanlink.net/tag/detroit|broader_prefLabel|USA +http://www.semanlink.net/tag/detroit|broader_prefLabel|Ville +http://www.semanlink.net/tag/detroit|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/detroit|broader_altLabel|United States +http://www.semanlink.net/tag/france_telecom|prefLabel|France Télécom +http://www.semanlink.net/tag/france_telecom|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/france_telecom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_telecom|uri|http://www.semanlink.net/tag/france_telecom +http://www.semanlink.net/tag/france_telecom|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/retrieval_augmented_lm|creationTime|2021-02-23T11:36:03Z +http://www.semanlink.net/tag/retrieval_augmented_lm|prefLabel|Retrieval augmented LM +http://www.semanlink.net/tag/retrieval_augmented_lm|broader|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/tag/retrieval_augmented_lm|broader|http://www.semanlink.net/tag/retrieval_based_nlp +http://www.semanlink.net/tag/retrieval_augmented_lm|broader|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/retrieval_augmented_lm|related|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/tag/retrieval_augmented_lm|related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/retrieval_augmented_lm|creationDate|2021-02-23 +http://www.semanlink.net/tag/retrieval_augmented_lm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/retrieval_augmented_lm|uri|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_prefLabel|Not Encoding Factual Knowledge in Language Model +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_prefLabel|Retrieval-based NLP +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_prefLabel|Knowledge-augmented Language Models +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_altLabel|KG-Augmented LMs +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_altLabel|Injecting knowledge into LMs +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_altLabel|Injecting knowledge into Language Models +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_related|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_related|http://www.semanlink.net/tag/how_much_information_in_a_language +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_related|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/tag/retrieval_augmented_lm|broader_related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/comete|prefLabel|Comète +http://www.semanlink.net/tag/comete|broader|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/tag/comete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/comete|uri|http://www.semanlink.net/tag/comete +http://www.semanlink.net/tag/comete|broader_prefLabel|Astéroïde +http://www.semanlink.net/tag/miriam_makeba|creationTime|2008-11-10T10:21:46Z +http://www.semanlink.net/tag/miriam_makeba|prefLabel|Miriam Makeba +http://www.semanlink.net/tag/miriam_makeba|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/miriam_makeba|broader|http://www.semanlink.net/tag/afrique_du_sud +http://www.semanlink.net/tag/miriam_makeba|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/miriam_makeba|creationDate|2008-11-10 +http://www.semanlink.net/tag/miriam_makeba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/miriam_makeba|uri|http://www.semanlink.net/tag/miriam_makeba +http://www.semanlink.net/tag/miriam_makeba|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/miriam_makeba|broader_prefLabel|Afrique du Sud +http://www.semanlink.net/tag/miriam_makeba|broader_prefLabel|Musicien +http://www.semanlink.net/tag/miriam_makeba|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/miriam_makeba|broader_altLabel|African music +http://www.semanlink.net/tag/enfer_administratif|creationTime|2021-02-20T12:44:11Z +http://www.semanlink.net/tag/enfer_administratif|prefLabel|Enfer administratif +http://www.semanlink.net/tag/enfer_administratif|broader|http://www.semanlink.net/tag/administration +http://www.semanlink.net/tag/enfer_administratif|creationDate|2021-02-20 +http://www.semanlink.net/tag/enfer_administratif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enfer_administratif|uri|http://www.semanlink.net/tag/enfer_administratif +http://www.semanlink.net/tag/enfer_administratif|broader_prefLabel|Administration +http://www.semanlink.net/tag/encryption|creationTime|2013-01-06T23:39:42Z +http://www.semanlink.net/tag/encryption|prefLabel|Encryption +http://www.semanlink.net/tag/encryption|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/encryption|related|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/tag/encryption|creationDate|2013-01-06 +http://www.semanlink.net/tag/encryption|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encryption|altLabel|Cryptage +http://www.semanlink.net/tag/encryption|uri|http://www.semanlink.net/tag/encryption +http://www.semanlink.net/tag/encryption|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/encryption|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/insecte|prefLabel|Insecte +http://www.semanlink.net/tag/insecte|broader|http://www.semanlink.net/tag/arthropodes +http://www.semanlink.net/tag/insecte|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/insecte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/insecte|uri|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/insecte|broader_prefLabel|Arthropodes +http://www.semanlink.net/tag/insecte|broader_prefLabel|Animal +http://www.semanlink.net/tag/grillon|creationTime|2013-08-05T14:42:01Z +http://www.semanlink.net/tag/grillon|prefLabel|Grillon +http://www.semanlink.net/tag/grillon|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/grillon|creationDate|2013-08-05 +http://www.semanlink.net/tag/grillon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grillon|uri|http://www.semanlink.net/tag/grillon +http://www.semanlink.net/tag/grillon|broader_prefLabel|Insecte +http://www.semanlink.net/tag/jena_grddl_reader|creationTime|2007-07-17T23:10:12Z +http://www.semanlink.net/tag/jena_grddl_reader|prefLabel|Jena GRDDL Reader +http://www.semanlink.net/tag/jena_grddl_reader|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_grddl_reader|broader|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/jena_grddl_reader|creationDate|2007-07-17 +http://www.semanlink.net/tag/jena_grddl_reader|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_grddl_reader|uri|http://www.semanlink.net/tag/jena_grddl_reader +http://www.semanlink.net/tag/jena_grddl_reader|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_grddl_reader|broader_prefLabel|GRDDL +http://www.semanlink.net/tag/jena_grddl_reader|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/jena_grddl_reader|broader_related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/christian_faure|creationTime|2011-01-09T21:40:33Z +http://www.semanlink.net/tag/christian_faure|prefLabel|Christian Fauré +http://www.semanlink.net/tag/christian_faure|creationDate|2011-01-09 +http://www.semanlink.net/tag/christian_faure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/christian_faure|uri|http://www.semanlink.net/tag/christian_faure +http://www.semanlink.net/tag/davos|prefLabel|Davos +http://www.semanlink.net/tag/davos|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/davos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/davos|uri|http://www.semanlink.net/tag/davos +http://www.semanlink.net/tag/davos|broader_prefLabel|Economie +http://www.semanlink.net/tag/davos|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/news_website|prefLabel|News website +http://www.semanlink.net/tag/news_website|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/news_website|uri|http://www.semanlink.net/tag/news_website +http://www.semanlink.net/tag/de_extinction|creationTime|2013-03-23T01:07:57Z +http://www.semanlink.net/tag/de_extinction|prefLabel|De-extinction +http://www.semanlink.net/tag/de_extinction|broader|http://www.semanlink.net/tag/clonage +http://www.semanlink.net/tag/de_extinction|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/de_extinction|broader|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/de_extinction|creationDate|2013-03-23 +http://www.semanlink.net/tag/de_extinction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/de_extinction|uri|http://www.semanlink.net/tag/de_extinction +http://www.semanlink.net/tag/de_extinction|broader_prefLabel|Clonage +http://www.semanlink.net/tag/de_extinction|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/de_extinction|broader_prefLabel|Disparition d'espèces +http://www.semanlink.net/tag/de_extinction|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/de_extinction|broader_altLabel|Espèces disparues +http://www.semanlink.net/tag/linking_open_data|creationTime|2007-03-20T21:25:13Z +http://www.semanlink.net/tag/linking_open_data|prefLabel|Linking Open Data +http://www.semanlink.net/tag/linking_open_data|broader|http://www.semanlink.net/tag/open_data +http://www.semanlink.net/tag/linking_open_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linking_open_data|related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linking_open_data|related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linking_open_data|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linking_open_data|related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linking_open_data|creationDate|2007-03-20 +http://www.semanlink.net/tag/linking_open_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linking_open_data|homepage|http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData +http://www.semanlink.net/tag/linking_open_data|altLabel|LOD +http://www.semanlink.net/tag/linking_open_data|uri|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/linking_open_data|broader_prefLabel|Open Data +http://www.semanlink.net/tag/linking_open_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linking_open_data|broader_altLabel|LD +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linking_open_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/cosmologie|creationTime|2008-08-28T13:45:53Z +http://www.semanlink.net/tag/cosmologie|prefLabel|Cosmologie +http://www.semanlink.net/tag/cosmologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/cosmologie|creationDate|2008-08-28 +http://www.semanlink.net/tag/cosmologie|comment|Even at the speed of light, you’ll never reach these galaxies. +http://www.semanlink.net/tag/cosmologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cosmologie|uri|http://www.semanlink.net/tag/cosmologie +http://www.semanlink.net/tag/cosmologie|broader_prefLabel|Science +http://www.semanlink.net/tag/cosmologie|broader_altLabel|sciences +http://www.semanlink.net/tag/ldp_updates|creationTime|2014-10-12T22:47:32Z +http://www.semanlink.net/tag/ldp_updates|prefLabel|LDP: updates +http://www.semanlink.net/tag/ldp_updates|broader|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/ldp_updates|creationDate|2014-10-12 +http://www.semanlink.net/tag/ldp_updates|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldp_updates|uri|http://www.semanlink.net/tag/ldp_updates +http://www.semanlink.net/tag/ldp_updates|broader_prefLabel|Linked Data Platform +http://www.semanlink.net/tag/ldp_updates|broader_altLabel|LDP +http://www.semanlink.net/tag/statistics|creationTime|2014-11-27T13:37:23Z +http://www.semanlink.net/tag/statistics|prefLabel|Statistics +http://www.semanlink.net/tag/statistics|creationDate|2014-11-27 +http://www.semanlink.net/tag/statistics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistics|altLabel|stats +http://www.semanlink.net/tag/statistics|altLabel|Statistiques +http://www.semanlink.net/tag/statistics|uri|http://www.semanlink.net/tag/statistics +http://www.semanlink.net/tag/bat|creationTime|2012-12-30T13:23:44Z +http://www.semanlink.net/tag/bat|prefLabel|Bat +http://www.semanlink.net/tag/bat|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/bat|creationDate|2012-12-30 +http://www.semanlink.net/tag/bat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bat|describedBy|https://en.wikipedia.org/wiki/Bat +http://www.semanlink.net/tag/bat|altLabel|Chauve-souris +http://www.semanlink.net/tag/bat|uri|http://www.semanlink.net/tag/bat +http://www.semanlink.net/tag/bat|broader_prefLabel|Animal +http://www.semanlink.net/tag/virtuoso|creationTime|2008-06-05T23:25:58Z +http://www.semanlink.net/tag/virtuoso|prefLabel|Virtuoso +http://www.semanlink.net/tag/virtuoso|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/virtuoso|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/virtuoso|broader|http://www.semanlink.net/tag/federated_database_system +http://www.semanlink.net/tag/virtuoso|broader|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/virtuoso|broader|http://www.semanlink.net/tag/semantic_web_platform +http://www.semanlink.net/tag/virtuoso|creationDate|2008-06-05 +http://www.semanlink.net/tag/virtuoso|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtuoso|homepage|http://virtuoso.openlinksw.com/ +http://www.semanlink.net/tag/virtuoso|uri|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/virtuoso|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/virtuoso|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/virtuoso|broader_prefLabel|Federated database system +http://www.semanlink.net/tag/virtuoso|broader_prefLabel|OpenLink Software +http://www.semanlink.net/tag/virtuoso|broader_prefLabel|Semantic Web Platform +http://www.semanlink.net/tag/virtuoso|broader_altLabel|RDF database +http://www.semanlink.net/tag/virtuoso|broader_altLabel|RDF and database +http://www.semanlink.net/tag/virtuoso|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/virtuoso|broader_related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/virtuoso|broader_related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/virtuoso|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/cancers_pediatriques|creationTime|2021-08-12T11:55:23Z +http://www.semanlink.net/tag/cancers_pediatriques|prefLabel|Cancers pédiatriques +http://www.semanlink.net/tag/cancers_pediatriques|broader|http://www.semanlink.net/tag/cancer +http://www.semanlink.net/tag/cancers_pediatriques|creationDate|2021-08-12 +http://www.semanlink.net/tag/cancers_pediatriques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cancers_pediatriques|uri|http://www.semanlink.net/tag/cancers_pediatriques +http://www.semanlink.net/tag/cancers_pediatriques|broader_prefLabel|Cancer +http://www.semanlink.net/tag/graph_attention_networks|creationTime|2020-03-01T02:37:32Z +http://www.semanlink.net/tag/graph_attention_networks|prefLabel|Graph Attention Networks +http://www.semanlink.net/tag/graph_attention_networks|broader|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/graph_attention_networks|broader|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/tag/graph_attention_networks|creationDate|2020-03-01 +http://www.semanlink.net/tag/graph_attention_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_attention_networks|uri|http://www.semanlink.net/tag/graph_attention_networks +http://www.semanlink.net/tag/graph_attention_networks|broader_prefLabel|Graph neural networks +http://www.semanlink.net/tag/graph_attention_networks|broader_prefLabel|Attention in Graphs +http://www.semanlink.net/tag/graph_attention_networks|broader_altLabel|GNN +http://www.semanlink.net/tag/graph_attention_networks|broader_altLabel|Graph + Transformer +http://www.semanlink.net/tag/graph_attention_networks|broader_related|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/film_noir|creationTime|2008-05-15T22:30:46Z +http://www.semanlink.net/tag/film_noir|prefLabel|Film noir +http://www.semanlink.net/tag/film_noir|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_noir|creationDate|2008-05-15 +http://www.semanlink.net/tag/film_noir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_noir|uri|http://www.semanlink.net/tag/film_noir +http://www.semanlink.net/tag/film_noir|broader_prefLabel|Film +http://www.semanlink.net/tag/lobby|creationTime|2013-06-08T09:36:35Z +http://www.semanlink.net/tag/lobby|prefLabel|Lobby +http://www.semanlink.net/tag/lobby|creationDate|2013-06-08 +http://www.semanlink.net/tag/lobby|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lobby|uri|http://www.semanlink.net/tag/lobby +http://www.semanlink.net/tag/the_knowledge_graph_conference|creationTime|2021-01-30T13:52:23Z +http://www.semanlink.net/tag/the_knowledge_graph_conference|prefLabel|The Knowledge Graph Conference +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/the_knowledge_graph_conference|related|http://www.semanlink.net/tag/francois_scharffe +http://www.semanlink.net/tag/the_knowledge_graph_conference|creationDate|2021-01-30 +http://www.semanlink.net/tag/the_knowledge_graph_conference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_knowledge_graph_conference|uri|http://www.semanlink.net/tag/the_knowledge_graph_conference +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader_prefLabel|Conférences +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader_altLabel|KG +http://www.semanlink.net/tag/the_knowledge_graph_conference|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/japon|prefLabel|Japon +http://www.semanlink.net/tag/japon|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/japon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/japon|altLabel|Japan +http://www.semanlink.net/tag/japon|uri|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/japon|broader_prefLabel|Asie +http://www.semanlink.net/tag/sense2vec|creationTime|2018-04-08T15:28:28Z +http://www.semanlink.net/tag/sense2vec|prefLabel|Sense2vec +http://www.semanlink.net/tag/sense2vec|broader|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/sense2vec|broader|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/sense2vec|related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/sense2vec|creationDate|2018-04-08 +http://www.semanlink.net/tag/sense2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sense2vec|uri|http://www.semanlink.net/tag/sense2vec +http://www.semanlink.net/tag/sense2vec|broader_prefLabel|Sense embeddings +http://www.semanlink.net/tag/sense2vec|broader_prefLabel|Word sense / Lexical ambiguity +http://www.semanlink.net/tag/sense2vec|broader_altLabel|Polysemy +http://www.semanlink.net/tag/sense2vec|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/sense2vec|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/aubrac|creationTime|2013-07-31T10:57:33Z +http://www.semanlink.net/tag/aubrac|prefLabel|Aubrac +http://www.semanlink.net/tag/aubrac|creationDate|2013-07-31 +http://www.semanlink.net/tag/aubrac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aubrac|uri|http://www.semanlink.net/tag/aubrac +http://www.semanlink.net/tag/configuration_ontology|creationTime|2012-08-20T11:35:53Z +http://www.semanlink.net/tag/configuration_ontology|prefLabel|Configuration ontology +http://www.semanlink.net/tag/configuration_ontology|broader|http://www.semanlink.net/tag/configuration_as_linked_data +http://www.semanlink.net/tag/configuration_ontology|broader|http://www.semanlink.net/tag/fps_ontologies +http://www.semanlink.net/tag/configuration_ontology|broader|http://www.semanlink.net/tag/c2gweb_rdf +http://www.semanlink.net/tag/configuration_ontology|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/configuration_ontology|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/configuration_ontology|creationDate|2012-08-20 +http://www.semanlink.net/tag/configuration_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/configuration_ontology|homepage|http://purl.org/configurationontology +http://www.semanlink.net/tag/configuration_ontology|seeAlso|http://purl.org/configurationontology +http://www.semanlink.net/tag/configuration_ontology|uri|http://www.semanlink.net/tag/configuration_ontology +http://www.semanlink.net/tag/configuration_ontology|broader_prefLabel|Configuration as Linked Data +http://www.semanlink.net/tag/configuration_ontology|broader_prefLabel|fps ontologies +http://www.semanlink.net/tag/configuration_ontology|broader_prefLabel|C2GWeb RDF +http://www.semanlink.net/tag/configuration_ontology|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/configuration_ontology|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/configuration_ontology|broader_altLabel|Ontology +http://www.semanlink.net/tag/microcredit|creationTime|2021-06-29T00:38:50Z +http://www.semanlink.net/tag/microcredit|prefLabel|Microcrédit +http://www.semanlink.net/tag/microcredit|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/microcredit|broader|http://www.semanlink.net/tag/pauvrete +http://www.semanlink.net/tag/microcredit|creationDate|2021-06-29 +http://www.semanlink.net/tag/microcredit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microcredit|uri|http://www.semanlink.net/tag/microcredit +http://www.semanlink.net/tag/microcredit|broader_prefLabel|Finance +http://www.semanlink.net/tag/microcredit|broader_prefLabel|Pauvreté +http://www.semanlink.net/tag/bulle_speculative|prefLabel|Bulle spéculative +http://www.semanlink.net/tag/bulle_speculative|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/bulle_speculative|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bulle_speculative|uri|http://www.semanlink.net/tag/bulle_speculative +http://www.semanlink.net/tag/bulle_speculative|broader_prefLabel|Economie +http://www.semanlink.net/tag/bulle_speculative|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/nlp_4_africa|creationTime|2021-06-30T00:27:22Z +http://www.semanlink.net/tag/nlp_4_africa|prefLabel|NLP 4 Africa +http://www.semanlink.net/tag/nlp_4_africa|broader|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/tag/nlp_4_africa|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_4_africa|creationDate|2021-06-30 +http://www.semanlink.net/tag/nlp_4_africa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_4_africa|altLabel|Africa - NLP +http://www.semanlink.net/tag/nlp_4_africa|altLabel|Afrique - NLP +http://www.semanlink.net/tag/nlp_4_africa|uri|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/tag/nlp_4_africa|broader_prefLabel|African languages +http://www.semanlink.net/tag/nlp_4_africa|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_4_africa|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_4_africa|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_4_africa|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/neuroscience_of_consciousness|creationTime|2021-08-02T18:25:16Z +http://www.semanlink.net/tag/neuroscience_of_consciousness|prefLabel|Neuroscience of Consciousness +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader|http://www.semanlink.net/tag/models_of_consciousness +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader|http://www.semanlink.net/tag/conscience +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/neuroscience_of_consciousness|creationDate|2021-08-02 +http://www.semanlink.net/tag/neuroscience_of_consciousness|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neuroscience_of_consciousness|uri|http://www.semanlink.net/tag/neuroscience_of_consciousness +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader_prefLabel|Models of consciousness +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader_prefLabel|Consciousness +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader_altLabel|Conscience +http://www.semanlink.net/tag/neuroscience_of_consciousness|broader_related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/messenger|creationTime|2008-01-15T14:03:29Z +http://www.semanlink.net/tag/messenger|prefLabel|Messenger +http://www.semanlink.net/tag/messenger|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/messenger|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/messenger|broader|http://www.semanlink.net/tag/mercure +http://www.semanlink.net/tag/messenger|creationDate|2008-01-15 +http://www.semanlink.net/tag/messenger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/messenger|homepage|http://messenger.jhuapl.edu/ +http://www.semanlink.net/tag/messenger|uri|http://www.semanlink.net/tag/messenger +http://www.semanlink.net/tag/messenger|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/messenger|broader_prefLabel|NASA +http://www.semanlink.net/tag/messenger|broader_prefLabel|Mercure (Planète) +http://www.semanlink.net/tag/mutual_learning|creationTime|2020-09-06T12:07:49Z +http://www.semanlink.net/tag/mutual_learning|prefLabel|Mutual Learning +http://www.semanlink.net/tag/mutual_learning|broader|http://www.semanlink.net/tag/machines_teaching_machines +http://www.semanlink.net/tag/mutual_learning|creationDate|2020-09-06 +http://www.semanlink.net/tag/mutual_learning|comment|// TODO see collaborative learning / co-training +http://www.semanlink.net/tag/mutual_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mutual_learning|altLabel|Collaborative learning +http://www.semanlink.net/tag/mutual_learning|uri|http://www.semanlink.net/tag/mutual_learning +http://www.semanlink.net/tag/mutual_learning|broader_prefLabel|Machines teaching machines +http://www.semanlink.net/tag/nosql_vs_sql|creationTime|2013-03-12T16:51:50Z +http://www.semanlink.net/tag/nosql_vs_sql|prefLabel|NOSQL vs SQL +http://www.semanlink.net/tag/nosql_vs_sql|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/nosql_vs_sql|creationDate|2013-03-12 +http://www.semanlink.net/tag/nosql_vs_sql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nosql_vs_sql|uri|http://www.semanlink.net/tag/nosql_vs_sql +http://www.semanlink.net/tag/nosql_vs_sql|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/nosql_vs_sql|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/fact_verification|creationTime|2019-12-01T13:21:40Z +http://www.semanlink.net/tag/fact_verification|prefLabel|Fact verification +http://www.semanlink.net/tag/fact_verification|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/fact_verification|creationDate|2019-12-01 +http://www.semanlink.net/tag/fact_verification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fact_verification|uri|http://www.semanlink.net/tag/fact_verification +http://www.semanlink.net/tag/fact_verification|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/database_to_rdf_mapping|creationTime|2007-04-19T22:46:41Z +http://www.semanlink.net/tag/database_to_rdf_mapping|prefLabel|Database to RDF mapping +http://www.semanlink.net/tag/database_to_rdf_mapping|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/database_to_rdf_mapping|broader|http://www.semanlink.net/tag/converting_data_into_rdf +http://www.semanlink.net/tag/database_to_rdf_mapping|broader|http://www.semanlink.net/tag/rdf_and_database +http://www.semanlink.net/tag/database_to_rdf_mapping|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/database_to_rdf_mapping|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/database_to_rdf_mapping|related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/database_to_rdf_mapping|creationDate|2007-04-19 +http://www.semanlink.net/tag/database_to_rdf_mapping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/database_to_rdf_mapping|uri|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_prefLabel|Database +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_prefLabel|Converting data into RDF +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_prefLabel|RDF and database +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_prefLabel|RDF +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_altLabel|RDF and database +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/database_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/cybersurveillance|creationTime|2009-04-22T18:01:08Z +http://www.semanlink.net/tag/cybersurveillance|prefLabel|Cybersurveillance +http://www.semanlink.net/tag/cybersurveillance|creationDate|2009-04-22 +http://www.semanlink.net/tag/cybersurveillance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cybersurveillance|uri|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/uruguay|creationTime|2010-07-03T11:32:11Z +http://www.semanlink.net/tag/uruguay|prefLabel|Uruguay +http://www.semanlink.net/tag/uruguay|broader|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/uruguay|creationDate|2010-07-03 +http://www.semanlink.net/tag/uruguay|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uruguay|uri|http://www.semanlink.net/tag/uruguay +http://www.semanlink.net/tag/uruguay|broader_prefLabel|Amérique du sud +http://www.semanlink.net/tag/thesaurus_taxonomies|creationTime|2020-05-22T16:17:42Z +http://www.semanlink.net/tag/thesaurus_taxonomies|prefLabel|Thesaurus & Taxonomies +http://www.semanlink.net/tag/thesaurus_taxonomies|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/thesaurus_taxonomies|broader|http://www.semanlink.net/tag/semantic_hierarchies +http://www.semanlink.net/tag/thesaurus_taxonomies|related|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/thesaurus_taxonomies|creationDate|2020-05-22 +http://www.semanlink.net/tag/thesaurus_taxonomies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thesaurus_taxonomies|uri|http://www.semanlink.net/tag/thesaurus_taxonomies +http://www.semanlink.net/tag/thesaurus_taxonomies|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/thesaurus_taxonomies|broader_prefLabel|Semantic hierarchies +http://www.semanlink.net/tag/thesaurus_taxonomies|broader_altLabel|KR +http://www.semanlink.net/tag/thesaurus_taxonomies|broader_related|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/tag/wordnet|prefLabel|Wordnet +http://www.semanlink.net/tag/wordnet|broader|http://www.semanlink.net/tag/semantic_networks +http://www.semanlink.net/tag/wordnet|broader|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.semanlink.net/tag/wordnet|broader|http://www.semanlink.net/tag/anglais +http://www.semanlink.net/tag/wordnet|broader|http://www.semanlink.net/tag/nlp_princeton +http://www.semanlink.net/tag/wordnet|broader|http://www.semanlink.net/tag/princeton +http://www.semanlink.net/tag/wordnet|comment|A lexical database for the English language +http://www.semanlink.net/tag/wordnet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wordnet|homepage|https://wordnet.princeton.edu +http://www.semanlink.net/tag/wordnet|uri|http://www.semanlink.net/tag/wordnet +http://www.semanlink.net/tag/wordnet|broader_prefLabel|Semantic Networks +http://www.semanlink.net/tag/wordnet|broader_prefLabel|Text Corpora and Lexical Resources +http://www.semanlink.net/tag/wordnet|broader_prefLabel|Anglais +http://www.semanlink.net/tag/wordnet|broader_prefLabel|NLP@Princeton +http://www.semanlink.net/tag/wordnet|broader_prefLabel|Princeton +http://www.semanlink.net/tag/wordnet|broader_altLabel|Text corpora +http://www.semanlink.net/tag/wordnet|broader_altLabel|Lexical Resource +http://www.semanlink.net/tag/wordnet|broader_altLabel|Text corpus +http://www.semanlink.net/tag/wordnet|broader_altLabel|English +http://www.semanlink.net/tag/wordnet|broader_related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/wordnet|broader_related|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/mali|prefLabel|Mali +http://www.semanlink.net/tag/mali|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/mali|broader|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/mali|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mali|uri|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/mali|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/mali|broader_prefLabel|Sahel +http://www.semanlink.net/tag/parente_a_plaisanterie|creationTime|2020-01-19T17:21:23Z +http://www.semanlink.net/tag/parente_a_plaisanterie|prefLabel|Parenté à plaisanterie +http://www.semanlink.net/tag/parente_a_plaisanterie|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/parente_a_plaisanterie|broader|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/tag/parente_a_plaisanterie|creationDate|2020-01-19 +http://www.semanlink.net/tag/parente_a_plaisanterie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parente_a_plaisanterie|describedBy|https://fr.wikipedia.org/wiki/Parent%C3%A9_%C3%A0_plaisanterie +http://www.semanlink.net/tag/parente_a_plaisanterie|uri|http://www.semanlink.net/tag/parente_a_plaisanterie +http://www.semanlink.net/tag/parente_a_plaisanterie|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/parente_a_plaisanterie|broader_prefLabel|Rigolo +http://www.semanlink.net/tag/co_|prefLabel|Gaz carbonique +http://www.semanlink.net/tag/co_|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/co_|altLabel|CO2 +http://www.semanlink.net/tag/co_|uri|http://www.semanlink.net/tag/co_ +http://www.semanlink.net/tag/merisier|creationTime|2011-11-14T21:04:51Z +http://www.semanlink.net/tag/merisier|prefLabel|Merisier +http://www.semanlink.net/tag/merisier|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/merisier|creationDate|2011-11-14 +http://www.semanlink.net/tag/merisier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/merisier|uri|http://www.semanlink.net/tag/merisier +http://www.semanlink.net/tag/merisier|broader_prefLabel|Arbres +http://www.semanlink.net/tag/java_server_faces|prefLabel|Java Server Faces +http://www.semanlink.net/tag/java_server_faces|broader|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/tag/java_server_faces|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_server_faces|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_server_faces|altLabel|jsf +http://www.semanlink.net/tag/java_server_faces|uri|http://www.semanlink.net/tag/java_server_faces +http://www.semanlink.net/tag/java_server_faces|broader_prefLabel|Servlet +http://www.semanlink.net/tag/java_server_faces|broader_prefLabel|Java +http://www.semanlink.net/tag/sea_peoples|creationTime|2016-05-23T08:46:04Z +http://www.semanlink.net/tag/sea_peoples|prefLabel|Sea Peoples +http://www.semanlink.net/tag/sea_peoples|creationDate|2016-05-23 +http://www.semanlink.net/tag/sea_peoples|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sea_peoples|describedBy|https://en.wikipedia.org/wiki/Sea_Peoples +http://www.semanlink.net/tag/sea_peoples|uri|http://www.semanlink.net/tag/sea_peoples +http://www.semanlink.net/tag/cognitive_computing|creationTime|2014-02-03T23:21:55Z +http://www.semanlink.net/tag/cognitive_computing|prefLabel|Cognitive computing +http://www.semanlink.net/tag/cognitive_computing|broader|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/cognitive_computing|creationDate|2014-02-03 +http://www.semanlink.net/tag/cognitive_computing|comment|not very well defined buzzword +http://www.semanlink.net/tag/cognitive_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cognitive_computing|describedBy|https://en.wikipedia.org/wiki/Cognitive_computing +http://www.semanlink.net/tag/cognitive_computing|uri|http://www.semanlink.net/tag/cognitive_computing +http://www.semanlink.net/tag/cognitive_computing|broader_prefLabel|AI + Knowledge +http://www.semanlink.net/tag/cognitive_computing|broader_altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/cognitive_computing|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/cognitive_computing|broader_related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/mainstream_media|creationTime|2017-02-26T16:22:10Z +http://www.semanlink.net/tag/mainstream_media|prefLabel|Mainstream media +http://www.semanlink.net/tag/mainstream_media|creationDate|2017-02-26 +http://www.semanlink.net/tag/mainstream_media|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mainstream_media|uri|http://www.semanlink.net/tag/mainstream_media +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|creationTime|2017-12-30T02:16:09Z +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|prefLabel|DSSM (Deep Semantic Similarity Model) +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/web_search +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/embeddings_in_ir +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|creationDate|2017-12-30 +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|comment|"- Siamese network with two deep sub-models +- Projects input and candidate texts into embedding space +- Trained by maximizing cosine similarity between correct input-output pairs + +[source](/doc/2019/08/neural_models_for_information_r)" +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|uri|http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Siamese networks +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Similarity queries +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Web search +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Document embeddings +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Embeddings in Information Retrieval +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_prefLabel|Microsoft Research +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_altLabel|Siamese network +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_altLabel|Vector similarity search +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_altLabel|Embeddings in IR +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_related|http://www.semanlink.net/tag/face_recognition +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_related|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/paradis_fiscaux|creationTime|2013-04-04T22:58:34Z +http://www.semanlink.net/tag/paradis_fiscaux|prefLabel|Paradis fiscaux +http://www.semanlink.net/tag/paradis_fiscaux|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/paradis_fiscaux|broader|http://www.semanlink.net/tag/optimisation_fiscale +http://www.semanlink.net/tag/paradis_fiscaux|related|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/paradis_fiscaux|creationDate|2013-04-04 +http://www.semanlink.net/tag/paradis_fiscaux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paradis_fiscaux|altLabel|Tax Haven +http://www.semanlink.net/tag/paradis_fiscaux|altLabel|Paradis fiscal +http://www.semanlink.net/tag/paradis_fiscaux|uri|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/tag/paradis_fiscaux|broader_prefLabel|Finance +http://www.semanlink.net/tag/paradis_fiscaux|broader_prefLabel|Optimisation fiscale +http://www.semanlink.net/tag/hip_hop|creationTime|2008-06-07T14:51:40Z +http://www.semanlink.net/tag/hip_hop|prefLabel|Hip Hop +http://www.semanlink.net/tag/hip_hop|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/hip_hop|related|http://www.semanlink.net/tag/rap +http://www.semanlink.net/tag/hip_hop|creationDate|2008-06-07 +http://www.semanlink.net/tag/hip_hop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hip_hop|uri|http://www.semanlink.net/tag/hip_hop +http://www.semanlink.net/tag/hip_hop|broader_prefLabel|Musique +http://www.semanlink.net/tag/hip_hop|broader_altLabel|Music +http://www.semanlink.net/tag/enseignement_en_afrique|creationTime|2018-01-23T18:33:20Z +http://www.semanlink.net/tag/enseignement_en_afrique|prefLabel|Enseignement en Afrique +http://www.semanlink.net/tag/enseignement_en_afrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/enseignement_en_afrique|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/enseignement_en_afrique|creationDate|2018-01-23 +http://www.semanlink.net/tag/enseignement_en_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enseignement_en_afrique|uri|http://www.semanlink.net/tag/enseignement_en_afrique +http://www.semanlink.net/tag/enseignement_en_afrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/enseignement_en_afrique|broader_prefLabel|Education +http://www.semanlink.net/tag/enseignement_en_afrique|broader_altLabel|Africa +http://www.semanlink.net/tag/enseignement_en_afrique|broader_altLabel|Enseignement +http://www.semanlink.net/tag/del_icio|creationTime|2007-06-12T00:57:39Z +http://www.semanlink.net/tag/del_icio|prefLabel|del icio +http://www.semanlink.net/tag/del_icio|creationDate|2007-06-12 +http://www.semanlink.net/tag/del_icio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/del_icio|uri|http://www.semanlink.net/tag/del_icio +http://www.semanlink.net/tag/ontologie_visualization|creationTime|2014-12-30T15:02:42Z +http://www.semanlink.net/tag/ontologie_visualization|prefLabel|Ontologie visualization +http://www.semanlink.net/tag/ontologie_visualization|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/ontologie_visualization|creationDate|2014-12-30 +http://www.semanlink.net/tag/ontologie_visualization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontologie_visualization|uri|http://www.semanlink.net/tag/ontologie_visualization +http://www.semanlink.net/tag/ontologie_visualization|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/ontologie_visualization|broader_altLabel|Ontology +http://www.semanlink.net/tag/contrastive_self_supervised_learning|creationTime|2019-04-01T16:31:49Z +http://www.semanlink.net/tag/contrastive_self_supervised_learning|prefLabel|Contrastive Self-Supervised Learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader|http://www.semanlink.net/tag/contrastive_learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|related|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/tag/contrastive_self_supervised_learning|related|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/contrastive_self_supervised_learning|related|http://www.semanlink.net/tag/noise_contrastive_estimation +http://www.semanlink.net/tag/contrastive_self_supervised_learning|related|http://www.semanlink.net/tag/negative_sampling +http://www.semanlink.net/tag/contrastive_self_supervised_learning|creationDate|2019-04-01 +http://www.semanlink.net/tag/contrastive_self_supervised_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/contrastive_self_supervised_learning|altLabel|Contrastive Unsupervised Representation Learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|uri|http://www.semanlink.net/tag/contrastive_self_supervised_learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader_prefLabel|Contrastive Learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader_prefLabel|Self-Supervised Learning +http://www.semanlink.net/tag/contrastive_self_supervised_learning|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/solr|creationTime|2011-11-13T14:26:29Z +http://www.semanlink.net/tag/solr|prefLabel|Solr +http://www.semanlink.net/tag/solr|broader|http://www.semanlink.net/tag/text_search +http://www.semanlink.net/tag/solr|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/solr|creationDate|2011-11-13 +http://www.semanlink.net/tag/solr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr|homepage|http://lucene.apache.org/solr/ +http://www.semanlink.net/tag/solr|uri|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr|broader_prefLabel|Text Search +http://www.semanlink.net/tag/solr|broader_prefLabel|apache.org +http://www.semanlink.net/tag/solr|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/belgique|prefLabel|Belgique +http://www.semanlink.net/tag/belgique|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/belgique|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/belgique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/belgique|uri|http://www.semanlink.net/tag/belgique +http://www.semanlink.net/tag/belgique|broader_prefLabel|Europe +http://www.semanlink.net/tag/belgique|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/encyclopedia_of_life|creationTime|2007-05-11T00:13:14Z +http://www.semanlink.net/tag/encyclopedia_of_life|prefLabel|Encyclopedia of Life +http://www.semanlink.net/tag/encyclopedia_of_life|broader|http://www.semanlink.net/tag/encyclopedie_collaborative +http://www.semanlink.net/tag/encyclopedia_of_life|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/encyclopedia_of_life|broader|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/encyclopedia_of_life|broader|http://www.semanlink.net/tag/biodiversity_data +http://www.semanlink.net/tag/encyclopedia_of_life|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/encyclopedia_of_life|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/encyclopedia_of_life|creationDate|2007-05-11 +http://www.semanlink.net/tag/encyclopedia_of_life|comment|EOL is meant to be the Wikipedia of all 1.8 million known living species +http://www.semanlink.net/tag/encyclopedia_of_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encyclopedia_of_life|homepage|http://www.eol.org/ +http://www.semanlink.net/tag/encyclopedia_of_life|uri|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/encyclopedia_of_life|broader_prefLabel|Encyclopédie collaborative +http://www.semanlink.net/tag/encyclopedia_of_life|broader_prefLabel|Biology +http://www.semanlink.net/tag/encyclopedia_of_life|broader_prefLabel|Biodiversité +http://www.semanlink.net/tag/encyclopedia_of_life|broader_prefLabel|Biodiversity data +http://www.semanlink.net/tag/encyclopedia_of_life|broader_altLabel|Biologie +http://www.semanlink.net/tag/encyclopedia_of_life|broader_altLabel|Biodiversity +http://www.semanlink.net/tag/encyclopedia_of_life|broader_related|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/rdf_in_files|prefLabel|RDF in files +http://www.semanlink.net/tag/rdf_in_files|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/rdf_in_files|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_in_files|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_in_files|uri|http://www.semanlink.net/tag/rdf_in_files +http://www.semanlink.net/tag/rdf_in_files|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/rdf_in_files|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_in_files|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_in_files|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_in_files|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_in_files|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_in_files|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/educational_resources|creationTime|2019-01-27T15:44:03Z +http://www.semanlink.net/tag/educational_resources|prefLabel|Educational resources +http://www.semanlink.net/tag/educational_resources|creationDate|2019-01-27 +http://www.semanlink.net/tag/educational_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/educational_resources|uri|http://www.semanlink.net/tag/educational_resources +http://www.semanlink.net/tag/ml_and_physics|creationTime|2019-06-11T11:52:45Z +http://www.semanlink.net/tag/ml_and_physics|prefLabel|ML and physics +http://www.semanlink.net/tag/ml_and_physics|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/ml_and_physics|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/ml_and_physics|creationDate|2019-06-11 +http://www.semanlink.net/tag/ml_and_physics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_and_physics|uri|http://www.semanlink.net/tag/ml_and_physics +http://www.semanlink.net/tag/ml_and_physics|broader_prefLabel|Physique +http://www.semanlink.net/tag/ml_and_physics|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/ml_and_physics|broader_altLabel|Physics +http://www.semanlink.net/tag/ml_and_physics|broader_altLabel|ML +http://www.semanlink.net/tag/ml_and_physics|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ml_and_physics|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/dynamic_semantic_publishing|creationTime|2010-08-12T16:11:33Z +http://www.semanlink.net/tag/dynamic_semantic_publishing|prefLabel|Dynamic Semantic Publishing +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/dynamic_semantic_publishing|related|http://www.semanlink.net/tag/bbc +http://www.semanlink.net/tag/dynamic_semantic_publishing|creationDate|2010-08-12 +http://www.semanlink.net/tag/dynamic_semantic_publishing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dynamic_semantic_publishing|uri|http://www.semanlink.net/tag/dynamic_semantic_publishing +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_altLabel|LD +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/dynamic_semantic_publishing|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/phoenix_mars_lander|creationTime|2008-05-17T13:00:14Z +http://www.semanlink.net/tag/phoenix_mars_lander|prefLabel|Phoenix Mars Lander +http://www.semanlink.net/tag/phoenix_mars_lander|broader|http://www.semanlink.net/tag/exploration_marsienne +http://www.semanlink.net/tag/phoenix_mars_lander|creationDate|2008-05-17 +http://www.semanlink.net/tag/phoenix_mars_lander|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phoenix_mars_lander|describedBy|http://www.nasa.gov/mission_pages/phoenix/main/index.html +http://www.semanlink.net/tag/phoenix_mars_lander|uri|http://www.semanlink.net/tag/phoenix_mars_lander +http://www.semanlink.net/tag/phoenix_mars_lander|broader_prefLabel|Exploration marsienne +http://www.semanlink.net/tag/sud_des_etats_unis|creationTime|2007-07-18T19:48:52Z +http://www.semanlink.net/tag/sud_des_etats_unis|prefLabel|Sud des Etats-Unis +http://www.semanlink.net/tag/sud_des_etats_unis|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/sud_des_etats_unis|creationDate|2007-07-18 +http://www.semanlink.net/tag/sud_des_etats_unis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sud_des_etats_unis|uri|http://www.semanlink.net/tag/sud_des_etats_unis +http://www.semanlink.net/tag/sud_des_etats_unis|broader_prefLabel|USA +http://www.semanlink.net/tag/sud_des_etats_unis|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/sud_des_etats_unis|broader_altLabel|United States +http://www.semanlink.net/tag/hierarchical_classification|creationTime|2020-08-15T12:14:44Z +http://www.semanlink.net/tag/hierarchical_classification|prefLabel|Hierarchical Classification +http://www.semanlink.net/tag/hierarchical_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/hierarchical_classification|broader|http://www.semanlink.net/tag/semantic_hierarchies +http://www.semanlink.net/tag/hierarchical_classification|broader|http://www.semanlink.net/tag/hierarchies_in_ml +http://www.semanlink.net/tag/hierarchical_classification|creationDate|2020-08-15 +http://www.semanlink.net/tag/hierarchical_classification|comment|the problem of classifying items into a hierarchy of classes. +http://www.semanlink.net/tag/hierarchical_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_classification|uri|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/hierarchical_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/hierarchical_classification|broader_prefLabel|Semantic hierarchies +http://www.semanlink.net/tag/hierarchical_classification|broader_prefLabel|Hierarchies in ML +http://www.semanlink.net/tag/hierarchical_classification|broader_related|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/tag/diamant|creationTime|2008-12-13T12:31:49Z +http://www.semanlink.net/tag/diamant|prefLabel|Diamant +http://www.semanlink.net/tag/diamant|creationDate|2008-12-13 +http://www.semanlink.net/tag/diamant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diamant|uri|http://www.semanlink.net/tag/diamant +http://www.semanlink.net/tag/git|creationTime|2012-08-07T20:20:54Z +http://www.semanlink.net/tag/git|prefLabel|Git +http://www.semanlink.net/tag/git|related|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/git|related|http://www.semanlink.net/tag/maven +http://www.semanlink.net/tag/git|creationDate|2012-08-07 +http://www.semanlink.net/tag/git|comment|"[le chapitre de la doc](https://git-scm.com/book/en/v2/Git-Basics-Getting-a-Git-Repository) + +[cheat sheet](https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet) +" +http://www.semanlink.net/tag/git|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/git|homepage|http://git-scm.com/ +http://www.semanlink.net/tag/git|uri|http://www.semanlink.net/tag/git +http://www.semanlink.net/tag/http_redirect|creationTime|2017-10-19T22:35:56Z +http://www.semanlink.net/tag/http_redirect|prefLabel|HTTP Redirect +http://www.semanlink.net/tag/http_redirect|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/http_redirect|creationDate|2017-10-19 +http://www.semanlink.net/tag/http_redirect|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/http_redirect|uri|http://www.semanlink.net/tag/http_redirect +http://www.semanlink.net/tag/http_redirect|broader_prefLabel|HTTP +http://www.semanlink.net/tag/florence_aubenas|creationTime|2021-04-30T15:39:18Z +http://www.semanlink.net/tag/florence_aubenas|prefLabel|Florence Aubenas +http://www.semanlink.net/tag/florence_aubenas|broader|http://www.semanlink.net/tag/journaliste +http://www.semanlink.net/tag/florence_aubenas|creationDate|2021-04-30 +http://www.semanlink.net/tag/florence_aubenas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/florence_aubenas|uri|http://www.semanlink.net/tag/florence_aubenas +http://www.semanlink.net/tag/florence_aubenas|broader_prefLabel|Journaliste +http://www.semanlink.net/tag/consciousness_prior|creationTime|2018-09-29T00:35:53Z +http://www.semanlink.net/tag/consciousness_prior|prefLabel|Consciousness Prior +http://www.semanlink.net/tag/consciousness_prior|broader|http://www.semanlink.net/tag/conscience_artificielle +http://www.semanlink.net/tag/consciousness_prior|broader|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/consciousness_prior|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/consciousness_prior|broader|http://www.semanlink.net/tag/human_level_ai +http://www.semanlink.net/tag/consciousness_prior|broader|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/consciousness_prior|related|http://www.semanlink.net/tag/thought_vector +http://www.semanlink.net/tag/consciousness_prior|creationDate|2018-09-29 +http://www.semanlink.net/tag/consciousness_prior|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/consciousness_prior|uri|http://www.semanlink.net/tag/consciousness_prior +http://www.semanlink.net/tag/consciousness_prior|broader_prefLabel|Conscience artificielle +http://www.semanlink.net/tag/consciousness_prior|broader_prefLabel|Yoshua Bengio +http://www.semanlink.net/tag/consciousness_prior|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/consciousness_prior|broader_prefLabel|Human Level AI +http://www.semanlink.net/tag/consciousness_prior|broader_prefLabel|NN / Symbolic AI hybridation +http://www.semanlink.net/tag/consciousness_prior|broader_altLabel|Machine consciousness +http://www.semanlink.net/tag/consciousness_prior|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/consciousness_prior|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/consciousness_prior|broader_related|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/tag/generative_adversarial_network|creationTime|2017-06-09T17:47:12Z +http://www.semanlink.net/tag/generative_adversarial_network|prefLabel|GAN +http://www.semanlink.net/tag/generative_adversarial_network|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/generative_adversarial_network|broader|http://www.semanlink.net/tag/deep_latent_variable_models +http://www.semanlink.net/tag/generative_adversarial_network|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/generative_adversarial_network|related|http://www.semanlink.net/tag/ian_goodfellow +http://www.semanlink.net/tag/generative_adversarial_network|related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/generative_adversarial_network|creationDate|2017-06-09 +http://www.semanlink.net/tag/generative_adversarial_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/generative_adversarial_network|describedBy|https://en.wikipedia.org/wiki/Generative_adversarial_network +http://www.semanlink.net/tag/generative_adversarial_network|altLabel|Generative adversarial networks +http://www.semanlink.net/tag/generative_adversarial_network|altLabel|Generative adversarial network +http://www.semanlink.net/tag/generative_adversarial_network|uri|http://www.semanlink.net/tag/generative_adversarial_network +http://www.semanlink.net/tag/generative_adversarial_network|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/generative_adversarial_network|broader_prefLabel|Deep latent variable models +http://www.semanlink.net/tag/generative_adversarial_network|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/generative_adversarial_network|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/generative_adversarial_network|broader_altLabel|ANN +http://www.semanlink.net/tag/generative_adversarial_network|broader_altLabel|NN +http://www.semanlink.net/tag/memory_prediction_framework|creationTime|2013-05-25T18:08:23Z +http://www.semanlink.net/tag/memory_prediction_framework|prefLabel|Memory-prediction framework +http://www.semanlink.net/tag/memory_prediction_framework|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/memory_prediction_framework|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/memory_prediction_framework|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/memory_prediction_framework|related|http://www.semanlink.net/tag/prediction +http://www.semanlink.net/tag/memory_prediction_framework|related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/memory_prediction_framework|related|http://www.semanlink.net/tag/jeff_hawkins +http://www.semanlink.net/tag/memory_prediction_framework|creationDate|2013-05-25 +http://www.semanlink.net/tag/memory_prediction_framework|comment|a theory of brain function created by Jeff Hawkins about mammalian neocortex. Role of the mammalian neocortex in matching sensory inputs to stored memory patterns, and how this process leads to predictions of what will happen in the future. +http://www.semanlink.net/tag/memory_prediction_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_prediction_framework|describedBy|https://en.wikipedia.org/wiki/Memory-prediction_framework +http://www.semanlink.net/tag/memory_prediction_framework|uri|http://www.semanlink.net/tag/memory_prediction_framework +http://www.semanlink.net/tag/memory_prediction_framework|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/memory_prediction_framework|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/memory_prediction_framework|broader_altLabel|ML +http://www.semanlink.net/tag/memory_prediction_framework|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/memory_prediction_framework|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/nlp_facebook|creationTime|2017-06-28T01:16:28Z +http://www.semanlink.net/tag/nlp_facebook|prefLabel|NLP@Facebook +http://www.semanlink.net/tag/nlp_facebook|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_facebook|broader|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/tag/nlp_facebook|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/nlp_facebook|creationDate|2017-06-28 +http://www.semanlink.net/tag/nlp_facebook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_facebook|uri|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/nlp_facebook|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_facebook|broader_prefLabel|AI@Facebook +http://www.semanlink.net/tag/nlp_facebook|broader_prefLabel|Facebook +http://www.semanlink.net/tag/nlp_facebook|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/nlp_facebook|broader_altLabel|FB +http://www.semanlink.net/tag/soap_vs_rest|prefLabel|SOAP vs REST +http://www.semanlink.net/tag/soap_vs_rest|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/soap_vs_rest|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/soap_vs_rest|broader|http://www.semanlink.net/tag/web_services_critique +http://www.semanlink.net/tag/soap_vs_rest|broader|http://www.semanlink.net/tag/soap +http://www.semanlink.net/tag/soap_vs_rest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soap_vs_rest|uri|http://www.semanlink.net/tag/soap_vs_rest +http://www.semanlink.net/tag/soap_vs_rest|broader_prefLabel|Web Services +http://www.semanlink.net/tag/soap_vs_rest|broader_prefLabel|REST +http://www.semanlink.net/tag/soap_vs_rest|broader_prefLabel|Web services : critique +http://www.semanlink.net/tag/soap_vs_rest|broader_prefLabel|SOAP +http://www.semanlink.net/tag/soap_vs_rest|broader_altLabel|WS +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|creationTime|2015-01-06T14:53:56Z +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|prefLabel|Coursera: The Data Scientist’s Toolbox +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|creationDate|2015-01-06 +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|homepage|https://class.coursera.org/datascitoolbox-010 +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|uri|http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/gael_de_chalendar|creationTime|2017-07-17T00:22:17Z +http://www.semanlink.net/tag/gael_de_chalendar|prefLabel|Gaël de Chalendar +http://www.semanlink.net/tag/gael_de_chalendar|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/gael_de_chalendar|creationDate|2017-07-17 +http://www.semanlink.net/tag/gael_de_chalendar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gael_de_chalendar|uri|http://www.semanlink.net/tag/gael_de_chalendar +http://www.semanlink.net/tag/gael_de_chalendar|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/table_based_fact_verification|creationTime|2019-12-01T13:22:30Z +http://www.semanlink.net/tag/table_based_fact_verification|prefLabel|Table-based Fact Verification +http://www.semanlink.net/tag/table_based_fact_verification|broader|http://www.semanlink.net/tag/fact_verification +http://www.semanlink.net/tag/table_based_fact_verification|creationDate|2019-12-01 +http://www.semanlink.net/tag/table_based_fact_verification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/table_based_fact_verification|uri|http://www.semanlink.net/tag/table_based_fact_verification +http://www.semanlink.net/tag/table_based_fact_verification|broader_prefLabel|Fact verification +http://www.semanlink.net/tag/hierarchical_temporal_memory|creationTime|2013-05-25T18:21:27Z +http://www.semanlink.net/tag/hierarchical_temporal_memory|prefLabel|Hierarchical temporal memory +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader|http://www.semanlink.net/tag/memory_prediction_framework +http://www.semanlink.net/tag/hierarchical_temporal_memory|related|http://www.semanlink.net/tag/jeff_hawkins +http://www.semanlink.net/tag/hierarchical_temporal_memory|related|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/hierarchical_temporal_memory|creationDate|2013-05-25 +http://www.semanlink.net/tag/hierarchical_temporal_memory|comment|"A machine learning model that models some of the structural and algorithmic properties of the neocortex. HTM is a biomimetic model based on the memory-prediction theory of brain function described by Jeff Hawkins. HTM is a method for discovering and inferring the high-level causes of observed input patterns and sequences, thus building an increasingly complex model of the world. +" +http://www.semanlink.net/tag/hierarchical_temporal_memory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_temporal_memory|describedBy|https://en.wikipedia.org/wiki/Hierarchical_temporal_memory +http://www.semanlink.net/tag/hierarchical_temporal_memory|uri|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_prefLabel|Memory-prediction framework +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_related|http://www.semanlink.net/tag/prediction +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/hierarchical_temporal_memory|broader_related|http://www.semanlink.net/tag/jeff_hawkins +http://www.semanlink.net/tag/christiane_taubira|creationTime|2013-12-06T01:14:36Z +http://www.semanlink.net/tag/christiane_taubira|prefLabel|Christiane Taubira +http://www.semanlink.net/tag/christiane_taubira|creationDate|2013-12-06 +http://www.semanlink.net/tag/christiane_taubira|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/christiane_taubira|uri|http://www.semanlink.net/tag/christiane_taubira +http://www.semanlink.net/tag/semantic_tagging|creationTime|2007-06-23T15:04:40Z +http://www.semanlink.net/tag/semantic_tagging|prefLabel|Semantic tagging +http://www.semanlink.net/tag/semantic_tagging|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/semantic_tagging|creationDate|2007-06-23 +http://www.semanlink.net/tag/semantic_tagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_tagging|uri|http://www.semanlink.net/tag/semantic_tagging +http://www.semanlink.net/tag/semantic_tagging|broader_prefLabel|Tagging +http://www.semanlink.net/tag/ludovic_denoyer|creationTime|2019-12-05T15:05:54Z +http://www.semanlink.net/tag/ludovic_denoyer|prefLabel|Ludovic Denoyer +http://www.semanlink.net/tag/ludovic_denoyer|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/ludovic_denoyer|related|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/tag/ludovic_denoyer|related|http://www.semanlink.net/tag/sorbonne +http://www.semanlink.net/tag/ludovic_denoyer|creationDate|2019-12-05 +http://www.semanlink.net/tag/ludovic_denoyer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ludovic_denoyer|uri|http://www.semanlink.net/tag/ludovic_denoyer +http://www.semanlink.net/tag/ludovic_denoyer|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/lod2|creationTime|2010-09-06T09:30:47Z +http://www.semanlink.net/tag/lod2|prefLabel|LOD2 +http://www.semanlink.net/tag/lod2|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/lod2|broader|http://www.semanlink.net/tag/european_project +http://www.semanlink.net/tag/lod2|creationDate|2010-09-06 +http://www.semanlink.net/tag/lod2|comment|"""Creating Knowledge out of Interlinked Data""" +http://www.semanlink.net/tag/lod2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod2|describedBy|http://lod2.eu/Welcome.html +http://www.semanlink.net/tag/lod2|uri|http://www.semanlink.net/tag/lod2 +http://www.semanlink.net/tag/lod2|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/lod2|broader_prefLabel|European project +http://www.semanlink.net/tag/lod2|broader_altLabel|LOD +http://www.semanlink.net/tag/lod2|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod2|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod2|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod2|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/hydrogen_economy|creationTime|2014-07-03T14:29:17Z +http://www.semanlink.net/tag/hydrogen_economy|prefLabel|Hydrogen economy +http://www.semanlink.net/tag/hydrogen_economy|broader|http://www.semanlink.net/tag/hydrogen +http://www.semanlink.net/tag/hydrogen_economy|creationDate|2014-07-03 +http://www.semanlink.net/tag/hydrogen_economy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hydrogen_economy|describedBy|https://en.wikipedia.org/wiki/Hydrogen_economy +http://www.semanlink.net/tag/hydrogen_economy|uri|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/tag/hydrogen_economy|broader_prefLabel|Hydrogen +http://www.semanlink.net/tag/hydrogen_economy|broader_related|http://www.semanlink.net/tag/transition_energetique +http://www.semanlink.net/tag/knowledge_vault|creationTime|2014-08-27T14:59:47Z +http://www.semanlink.net/tag/knowledge_vault|prefLabel|Knowledge Vault +http://www.semanlink.net/tag/knowledge_vault|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/knowledge_vault|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/knowledge_vault|related|http://www.semanlink.net/tag/google_knowledge_graph +http://www.semanlink.net/tag/knowledge_vault|creationDate|2014-08-27 +http://www.semanlink.net/tag/knowledge_vault|comment|knowledge base created by Google +http://www.semanlink.net/tag/knowledge_vault|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_vault|describedBy|https://en.wikipedia.org/wiki/Knowledge_Vault +http://www.semanlink.net/tag/knowledge_vault|uri|http://www.semanlink.net/tag/knowledge_vault +http://www.semanlink.net/tag/knowledge_vault|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/knowledge_vault|broader_prefLabel|Google +http://www.semanlink.net/tag/knowledge_vault|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/knowledge_vault|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/parthe|creationTime|2015-04-04T16:55:34Z +http://www.semanlink.net/tag/parthe|prefLabel|Parthe +http://www.semanlink.net/tag/parthe|broader|http://www.semanlink.net/tag/mesopotamie +http://www.semanlink.net/tag/parthe|creationDate|2015-04-04 +http://www.semanlink.net/tag/parthe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parthe|describedBy|https://en.wikipedia.org/wiki/Parthian_Empire +http://www.semanlink.net/tag/parthe|altLabel|Parthian Empire +http://www.semanlink.net/tag/parthe|uri|http://www.semanlink.net/tag/parthe +http://www.semanlink.net/tag/parthe|broader_prefLabel|Mésopotamie +http://www.semanlink.net/tag/google_cloud_platform|creationTime|2017-07-12T15:37:52Z +http://www.semanlink.net/tag/google_cloud_platform|prefLabel|Google Cloud Platform +http://www.semanlink.net/tag/google_cloud_platform|broader|http://www.semanlink.net/tag/google_cloud +http://www.semanlink.net/tag/google_cloud_platform|creationDate|2017-07-12 +http://www.semanlink.net/tag/google_cloud_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_cloud_platform|altLabel|GCP +http://www.semanlink.net/tag/google_cloud_platform|uri|http://www.semanlink.net/tag/google_cloud_platform +http://www.semanlink.net/tag/google_cloud_platform|broader_prefLabel|Google Cloud +http://www.semanlink.net/tag/salsa|creationTime|2007-04-18T13:22:16Z +http://www.semanlink.net/tag/salsa|prefLabel|Salsa +http://www.semanlink.net/tag/salsa|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/salsa|creationDate|2007-04-18 +http://www.semanlink.net/tag/salsa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/salsa|uri|http://www.semanlink.net/tag/salsa +http://www.semanlink.net/tag/salsa|broader_prefLabel|Musique +http://www.semanlink.net/tag/salsa|broader_altLabel|Music +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|creationTime|2020-09-06T17:24:34Z +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|prefLabel|Using literal descriptions of entities +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|creationDate|2020-09-06 +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|uri|http://www.semanlink.net/tag/using_literal_descriptions_of_entities +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_prefLabel|Text, KG and embeddings +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_prefLabel|Entities +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_altLabel|Embeddings of Text + Knowledge Graphs +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_altLabel|Embeddings of Text + Knowledge Bases +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_altLabel|Embeddings of text + KB +http://www.semanlink.net/tag/using_literal_descriptions_of_entities|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/plantu|prefLabel|Plantu +http://www.semanlink.net/tag/plantu|broader|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/tag/plantu|broader|http://www.semanlink.net/tag/humour +http://www.semanlink.net/tag/plantu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plantu|uri|http://www.semanlink.net/tag/plantu +http://www.semanlink.net/tag/plantu|broader_prefLabel|Journal Le Monde +http://www.semanlink.net/tag/plantu|broader_prefLabel|Humour +http://www.semanlink.net/tag/grand_homme|prefLabel|Grand Homme +http://www.semanlink.net/tag/grand_homme|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/grand_homme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grand_homme|uri|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/grand_homme|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/navier_stokes|creationTime|2020-10-31T12:36:07Z +http://www.semanlink.net/tag/navier_stokes|prefLabel|Navier–Stokes +http://www.semanlink.net/tag/navier_stokes|broader|http://www.semanlink.net/tag/partial_differential_equations +http://www.semanlink.net/tag/navier_stokes|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/navier_stokes|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/navier_stokes|creationDate|2020-10-31 +http://www.semanlink.net/tag/navier_stokes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/navier_stokes|describedBy|https://en.wikipedia.org/wiki/Navier%E2%80%93Stokes_equations +http://www.semanlink.net/tag/navier_stokes|uri|http://www.semanlink.net/tag/navier_stokes +http://www.semanlink.net/tag/navier_stokes|broader_prefLabel|Partial differential equations +http://www.semanlink.net/tag/navier_stokes|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/navier_stokes|broader_prefLabel|Physique +http://www.semanlink.net/tag/navier_stokes|broader_altLabel|Physics +http://www.semanlink.net/tag/marseillaise|prefLabel|Marseillaise +http://www.semanlink.net/tag/marseillaise|broader|http://www.semanlink.net/tag/hymne_national +http://www.semanlink.net/tag/marseillaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/marseillaise|creationDate|2006-08-31 +http://www.semanlink.net/tag/marseillaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marseillaise|uri|http://www.semanlink.net/tag/marseillaise +http://www.semanlink.net/tag/marseillaise|broader_prefLabel|Hymne national +http://www.semanlink.net/tag/marseillaise|broader_prefLabel|France +http://www.semanlink.net/tag/amadou_hampate_ba|creationTime|2021-07-26T21:12:18Z +http://www.semanlink.net/tag/amadou_hampate_ba|prefLabel|Amadou Hampâté Bâ +http://www.semanlink.net/tag/amadou_hampate_ba|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/amadou_hampate_ba|broader|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/amadou_hampate_ba|creationDate|2021-07-26 +http://www.semanlink.net/tag/amadou_hampate_ba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amadou_hampate_ba|describedBy|https://fr.wikipedia.org/wiki/Amadou_Hamp%C3%A2t%C3%A9_B%C3%A2 +http://www.semanlink.net/tag/amadou_hampate_ba|uri|http://www.semanlink.net/tag/amadou_hampate_ba +http://www.semanlink.net/tag/amadou_hampate_ba|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/amadou_hampate_ba|broader_prefLabel|Mali +http://www.semanlink.net/tag/darfour|prefLabel|Darfour +http://www.semanlink.net/tag/darfour|broader|http://www.semanlink.net/tag/genocide +http://www.semanlink.net/tag/darfour|broader|http://www.semanlink.net/tag/soudan +http://www.semanlink.net/tag/darfour|broader|http://www.semanlink.net/tag/catastrophe_humanitaire +http://www.semanlink.net/tag/darfour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/darfour|uri|http://www.semanlink.net/tag/darfour +http://www.semanlink.net/tag/darfour|broader_prefLabel|Génocide +http://www.semanlink.net/tag/darfour|broader_prefLabel|Soudan +http://www.semanlink.net/tag/darfour|broader_prefLabel|Catastrophe humanitaire +http://www.semanlink.net/tag/eclipse_project|creationTime|2007-10-19T10:39:02Z +http://www.semanlink.net/tag/eclipse_project|prefLabel|Eclipse project +http://www.semanlink.net/tag/eclipse_project|broader|http://www.semanlink.net/tag/fps_dev +http://www.semanlink.net/tag/eclipse_project|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/eclipse_project|creationDate|2007-10-19 +http://www.semanlink.net/tag/eclipse_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eclipse_project|uri|http://www.semanlink.net/tag/eclipse_project +http://www.semanlink.net/tag/eclipse_project|broader_prefLabel|fps dev +http://www.semanlink.net/tag/eclipse_project|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/rdf_schema_querying|prefLabel|RDF Schema querying +http://www.semanlink.net/tag/rdf_schema_querying|broader|http://www.semanlink.net/tag/rdf_schema +http://www.semanlink.net/tag/rdf_schema_querying|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_schema_querying|uri|http://www.semanlink.net/tag/rdf_schema_querying +http://www.semanlink.net/tag/rdf_schema_querying|broader_prefLabel|RDF Schema +http://www.semanlink.net/tag/rdf_schema_querying|broader_altLabel|RDF-S +http://www.semanlink.net/tag/rdf_schema_querying|broader_altLabel|RDFS +http://www.semanlink.net/tag/pape|creationTime|2007-12-05T18:54:01Z +http://www.semanlink.net/tag/pape|prefLabel|Pape +http://www.semanlink.net/tag/pape|broader|http://www.semanlink.net/tag/eglise_catholique +http://www.semanlink.net/tag/pape|broader|http://www.semanlink.net/tag/catholicisme +http://www.semanlink.net/tag/pape|creationDate|2007-12-05 +http://www.semanlink.net/tag/pape|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pape|uri|http://www.semanlink.net/tag/pape +http://www.semanlink.net/tag/pape|broader_prefLabel|Eglise catholique +http://www.semanlink.net/tag/pape|broader_prefLabel|Catholicisme +http://www.semanlink.net/tag/pape|broader_altLabel|Catholique +http://www.semanlink.net/tag/axel_ngonga|creationTime|2014-12-15T15:04:41Z +http://www.semanlink.net/tag/axel_ngonga|prefLabel|Axel Ngonga +http://www.semanlink.net/tag/axel_ngonga|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/axel_ngonga|creationDate|2014-12-15 +http://www.semanlink.net/tag/axel_ngonga|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/axel_ngonga|uri|http://www.semanlink.net/tag/axel_ngonga +http://www.semanlink.net/tag/axel_ngonga|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/propriete_privee|creationTime|2008-08-15T11:51:59Z +http://www.semanlink.net/tag/propriete_privee|prefLabel|Propriété privée +http://www.semanlink.net/tag/propriete_privee|creationDate|2008-08-15 +http://www.semanlink.net/tag/propriete_privee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/propriete_privee|uri|http://www.semanlink.net/tag/propriete_privee +http://www.semanlink.net/tag/user_driven_modelling|prefLabel|User Driven Modelling +http://www.semanlink.net/tag/user_driven_modelling|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/user_driven_modelling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/user_driven_modelling|uri|http://www.semanlink.net/tag/user_driven_modelling +http://www.semanlink.net/tag/user_driven_modelling|broader_prefLabel|Informatique +http://www.semanlink.net/tag/antiracisme|prefLabel|Antiracisme +http://www.semanlink.net/tag/antiracisme|broader|http://www.semanlink.net/tag/racisme +http://www.semanlink.net/tag/antiracisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiracisme|uri|http://www.semanlink.net/tag/antiracisme +http://www.semanlink.net/tag/antiracisme|broader_prefLabel|Racisme +http://www.semanlink.net/tag/lexical_ambiguity|creationTime|2017-10-21T17:08:58Z +http://www.semanlink.net/tag/lexical_ambiguity|prefLabel|Word sense / Lexical ambiguity +http://www.semanlink.net/tag/lexical_ambiguity|broader|http://www.semanlink.net/tag/ambiguity_nlp +http://www.semanlink.net/tag/lexical_ambiguity|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/lexical_ambiguity|related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/lexical_ambiguity|creationDate|2017-10-21 +http://www.semanlink.net/tag/lexical_ambiguity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lexical_ambiguity|altLabel|Polysemy +http://www.semanlink.net/tag/lexical_ambiguity|uri|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/lexical_ambiguity|broader_prefLabel|Ambiguity (NLP) +http://www.semanlink.net/tag/lexical_ambiguity|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/mladic|creationTime|2007-09-10T19:35:55Z +http://www.semanlink.net/tag/mladic|prefLabel|Mladic +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/crime_contre_l_humanite +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/bosnie +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/serbie +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/mechant +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/guerre_de_yougoslavie +http://www.semanlink.net/tag/mladic|broader|http://www.semanlink.net/tag/genocide +http://www.semanlink.net/tag/mladic|creationDate|2007-09-10 +http://www.semanlink.net/tag/mladic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mladic|uri|http://www.semanlink.net/tag/mladic +http://www.semanlink.net/tag/mladic|broader_prefLabel|Crime contre l'Humanité +http://www.semanlink.net/tag/mladic|broader_prefLabel|Bosnie +http://www.semanlink.net/tag/mladic|broader_prefLabel|Serbie +http://www.semanlink.net/tag/mladic|broader_prefLabel|Méchant +http://www.semanlink.net/tag/mladic|broader_prefLabel|Guerre de Yougoslavie +http://www.semanlink.net/tag/mladic|broader_prefLabel|Génocide +http://www.semanlink.net/tag/princeton|prefLabel|Princeton +http://www.semanlink.net/tag/princeton|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/princeton|related|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/princeton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/princeton|uri|http://www.semanlink.net/tag/princeton +http://www.semanlink.net/tag/princeton|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/don_t_waste_my_time|creationTime|2018-12-19T13:09:36Z +http://www.semanlink.net/tag/don_t_waste_my_time|prefLabel|Don't waste my time +http://www.semanlink.net/tag/don_t_waste_my_time|broader|http://www.semanlink.net/tag/temps +http://www.semanlink.net/tag/don_t_waste_my_time|creationDate|2018-12-19 +http://www.semanlink.net/tag/don_t_waste_my_time|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/don_t_waste_my_time|uri|http://www.semanlink.net/tag/don_t_waste_my_time +http://www.semanlink.net/tag/don_t_waste_my_time|broader_prefLabel|Temps +http://www.semanlink.net/tag/dean_allemang|creationTime|2010-08-24T22:40:11Z +http://www.semanlink.net/tag/dean_allemang|prefLabel|Dean Allemang +http://www.semanlink.net/tag/dean_allemang|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dean_allemang|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/dean_allemang|broader|http://www.semanlink.net/tag/topquadrant +http://www.semanlink.net/tag/dean_allemang|creationDate|2010-08-24 +http://www.semanlink.net/tag/dean_allemang|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dean_allemang|describedBy|http://www.semanticuniverse.com/profiles/dean-allemang +http://www.semanlink.net/tag/dean_allemang|uri|http://www.semanlink.net/tag/dean_allemang +http://www.semanlink.net/tag/dean_allemang|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dean_allemang|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/dean_allemang|broader_prefLabel|TopQuadrant +http://www.semanlink.net/tag/dean_allemang|broader_altLabel|Technical guys +http://www.semanlink.net/tag/dean_allemang|broader_altLabel|www.topquadrant.com +http://www.semanlink.net/tag/technology_enhanced_learning|creationTime|2011-12-22T22:56:07Z +http://www.semanlink.net/tag/technology_enhanced_learning|prefLabel|Technology Enhanced Learning +http://www.semanlink.net/tag/technology_enhanced_learning|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/technology_enhanced_learning|creationDate|2011-12-22 +http://www.semanlink.net/tag/technology_enhanced_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technology_enhanced_learning|uri|http://www.semanlink.net/tag/technology_enhanced_learning +http://www.semanlink.net/tag/technology_enhanced_learning|broader_prefLabel|Education +http://www.semanlink.net/tag/technology_enhanced_learning|broader_altLabel|Enseignement +http://www.semanlink.net/tag/hash_uris|creationTime|2007-04-03T23:29:07Z +http://www.semanlink.net/tag/hash_uris|prefLabel|Hash URIs +http://www.semanlink.net/tag/hash_uris|broader|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/hash_uris|creationDate|2007-04-03 +http://www.semanlink.net/tag/hash_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hash_uris|uri|http://www.semanlink.net/tag/hash_uris +http://www.semanlink.net/tag/hash_uris|broader_prefLabel|httpRange-14 +http://www.semanlink.net/tag/hash_uris|broader_altLabel|303-redirect +http://www.semanlink.net/tag/hash_uris|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/hash_uris|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/cloud|creationTime|2011-03-08T09:04:53Z +http://www.semanlink.net/tag/cloud|prefLabel|Cloud +http://www.semanlink.net/tag/cloud|creationDate|2011-03-08 +http://www.semanlink.net/tag/cloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cloud|altLabel|Cloud computing +http://www.semanlink.net/tag/cloud|uri|http://www.semanlink.net/tag/cloud +http://www.semanlink.net/tag/dan_brickley|prefLabel|Dan Brickley +http://www.semanlink.net/tag/dan_brickley|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dan_brickley|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/dan_brickley|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/dan_brickley|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/dan_brickley|related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/dan_brickley|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dan_brickley|altLabel|danbri +http://www.semanlink.net/tag/dan_brickley|uri|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/dan_brickley|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dan_brickley|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/dan_brickley|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/dan_brickley|broader_altLabel|Technical guys +http://www.semanlink.net/tag/dan_brickley|broader_altLabel|sw +http://www.semanlink.net/tag/dan_brickley|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/semantics_of_skos_concept|creationTime|2008-05-12T19:23:52Z +http://www.semanlink.net/tag/semantics_of_skos_concept|prefLabel|Semantics of skos:Concept +http://www.semanlink.net/tag/semantics_of_skos_concept|broader|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/semantics_of_skos_concept|related|http://www.semanlink.net/tag/skos_owl +http://www.semanlink.net/tag/semantics_of_skos_concept|creationDate|2008-05-12 +http://www.semanlink.net/tag/semantics_of_skos_concept|comment|"The class skos:Concept is the class of SKOS conceptual resources. + +A conceptual resource can be viewed as an idea or notion; a unit of thought. However, what constitutes a ""unit of thought"" is subjective, and this definition is meant to be suggestive, rather than restrictive. +(see http://www.w3.org/TR/2008/WD-skos-reference-20080125/#L1289)" +http://www.semanlink.net/tag/semantics_of_skos_concept|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantics_of_skos_concept|uri|http://www.semanlink.net/tag/semantics_of_skos_concept +http://www.semanlink.net/tag/semantics_of_skos_concept|broader_prefLabel|SKOS +http://www.semanlink.net/tag/semantics_of_skos_concept|broader_related|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/vue_js|creationTime|2019-11-19T11:35:51Z +http://www.semanlink.net/tag/vue_js|prefLabel|Vue.js +http://www.semanlink.net/tag/vue_js|broader|http://www.semanlink.net/tag/javascript_frameork +http://www.semanlink.net/tag/vue_js|creationDate|2019-11-19 +http://www.semanlink.net/tag/vue_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vue_js|homepage|https://vuejs.org/ +http://www.semanlink.net/tag/vue_js|altLabel|vuejs +http://www.semanlink.net/tag/vue_js|uri|http://www.semanlink.net/tag/vue_js +http://www.semanlink.net/tag/vue_js|broader_prefLabel|Javascript framework +http://www.semanlink.net/tag/machine_learned_ranking|creationTime|2017-06-27T11:11:59Z +http://www.semanlink.net/tag/machine_learned_ranking|prefLabel|Learning to rank +http://www.semanlink.net/tag/machine_learned_ranking|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/machine_learned_ranking|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/machine_learned_ranking|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/machine_learned_ranking|related|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/machine_learned_ranking|related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/machine_learned_ranking|related|http://www.semanlink.net/tag/pagerank +http://www.semanlink.net/tag/machine_learned_ranking|creationDate|2017-06-27 +http://www.semanlink.net/tag/machine_learned_ranking|comment|application of machine learning in the construction of ranking models. Training data consists of lists of items with some partial order specified between items in each list. +http://www.semanlink.net/tag/machine_learned_ranking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learned_ranking|describedBy|https://en.wikipedia.org/wiki/Learning_to_rank +http://www.semanlink.net/tag/machine_learned_ranking|altLabel|Machine learned ranking +http://www.semanlink.net/tag/machine_learned_ranking|uri|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/machine_learned_ranking|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/machine_learned_ranking|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/machine_learned_ranking|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/bert_kb|creationTime|2020-10-17T12:26:56Z +http://www.semanlink.net/tag/bert_kb|prefLabel|BERT + KB +http://www.semanlink.net/tag/bert_kb|broader|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/bert_kb|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/bert_kb|broader|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/bert_kb|broader|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/tag/bert_kb|creationDate|2020-10-17 +http://www.semanlink.net/tag/bert_kb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bert_kb|altLabel|BERT + Knowledge Graphs +http://www.semanlink.net/tag/bert_kb|altLabel|BERT + Knowledge Bases +http://www.semanlink.net/tag/bert_kb|uri|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/tag/bert_kb|broader_prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/bert_kb|broader_prefLabel|BERT +http://www.semanlink.net/tag/bert_kb|broader_prefLabel|Knowledge Graphs and NLP +http://www.semanlink.net/tag/bert_kb|broader_prefLabel|Language Models + Knowledge +http://www.semanlink.net/tag/bert_kb|broader_altLabel|KG + NLP +http://www.semanlink.net/tag/bert_kb|broader_altLabel|Knowledge Graphs + Text +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/bert_kb|broader_related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/livre_a_lire|creationTime|2007-03-20T21:48:50Z +http://www.semanlink.net/tag/livre_a_lire|prefLabel|Livre à lire +http://www.semanlink.net/tag/livre_a_lire|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/livre_a_lire|creationDate|2007-03-20 +http://www.semanlink.net/tag/livre_a_lire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/livre_a_lire|uri|http://www.semanlink.net/tag/livre_a_lire +http://www.semanlink.net/tag/livre_a_lire|broader_prefLabel|Livre +http://www.semanlink.net/tag/livre_a_lire|broader_altLabel|Livres +http://www.semanlink.net/tag/relational_inductive_biases|creationTime|2019-08-25T12:55:59Z +http://www.semanlink.net/tag/relational_inductive_biases|prefLabel|Relational inductive biases +http://www.semanlink.net/tag/relational_inductive_biases|broader|http://www.semanlink.net/tag/inductive_bias +http://www.semanlink.net/tag/relational_inductive_biases|creationDate|2019-08-25 +http://www.semanlink.net/tag/relational_inductive_biases|comment|"inductive biases which impose constraints on relationships and interactions among entities in a learning process +" +http://www.semanlink.net/tag/relational_inductive_biases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relational_inductive_biases|altLabel|Relational inductive bias +http://www.semanlink.net/tag/relational_inductive_biases|uri|http://www.semanlink.net/tag/relational_inductive_biases +http://www.semanlink.net/tag/relational_inductive_biases|broader_prefLabel|Inductive bias +http://www.semanlink.net/tag/relational_inductive_biases|broader_altLabel|Learning bias +http://www.semanlink.net/tag/information_retrieval|prefLabel|Information retrieval +http://www.semanlink.net/tag/information_retrieval|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/information_retrieval|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/information_retrieval|comment|"process of searching +and returning relevant documents for a query from +a collection" +http://www.semanlink.net/tag/information_retrieval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_retrieval|describedBy|https://en.wikipedia.org/wiki/Information_retrieval +http://www.semanlink.net/tag/information_retrieval|altLabel|IR +http://www.semanlink.net/tag/information_retrieval|uri|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/information_retrieval|broader_prefLabel|Informatique +http://www.semanlink.net/tag/information_retrieval|broader_prefLabel|Technologie +http://www.semanlink.net/tag/outliner|prefLabel|Outliner +http://www.semanlink.net/tag/outliner|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/outliner|uri|http://www.semanlink.net/tag/outliner +http://www.semanlink.net/tag/prohibition_des_narcotiques|creationTime|2013-01-22T22:23:18Z +http://www.semanlink.net/tag/prohibition_des_narcotiques|prefLabel|Prohibition des narcotiques +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader|http://www.semanlink.net/tag/drogues +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader|http://www.semanlink.net/tag/trafic_de_drogue +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader|http://www.semanlink.net/tag/prohibition +http://www.semanlink.net/tag/prohibition_des_narcotiques|creationDate|2013-01-22 +http://www.semanlink.net/tag/prohibition_des_narcotiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prohibition_des_narcotiques|uri|http://www.semanlink.net/tag/prohibition_des_narcotiques +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader_prefLabel|Drogues +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader_prefLabel|Trafic de drogue +http://www.semanlink.net/tag/prohibition_des_narcotiques|broader_prefLabel|Prohibition +http://www.semanlink.net/tag/manu_dibango|creationTime|2009-11-05T19:09:42Z +http://www.semanlink.net/tag/manu_dibango|prefLabel|Manu Dibango +http://www.semanlink.net/tag/manu_dibango|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/manu_dibango|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/manu_dibango|broader|http://www.semanlink.net/tag/jazz +http://www.semanlink.net/tag/manu_dibango|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/manu_dibango|creationDate|2009-11-05 +http://www.semanlink.net/tag/manu_dibango|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manu_dibango|uri|http://www.semanlink.net/tag/manu_dibango +http://www.semanlink.net/tag/manu_dibango|broader_prefLabel|I like +http://www.semanlink.net/tag/manu_dibango|broader_prefLabel|I like +http://www.semanlink.net/tag/manu_dibango|broader_prefLabel|Musicien +http://www.semanlink.net/tag/manu_dibango|broader_prefLabel|Jazz +http://www.semanlink.net/tag/manu_dibango|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/manu_dibango|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/manu_dibango|broader_altLabel|African music +http://www.semanlink.net/tag/dao_attack|creationTime|2016-06-20T10:32:05Z +http://www.semanlink.net/tag/dao_attack|prefLabel|DAO attack +http://www.semanlink.net/tag/dao_attack|broader|http://www.semanlink.net/tag/ethereum +http://www.semanlink.net/tag/dao_attack|broader|http://www.semanlink.net/tag/hack +http://www.semanlink.net/tag/dao_attack|broader|http://www.semanlink.net/tag/the_dao +http://www.semanlink.net/tag/dao_attack|creationDate|2016-06-20 +http://www.semanlink.net/tag/dao_attack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dao_attack|uri|http://www.semanlink.net/tag/dao_attack +http://www.semanlink.net/tag/dao_attack|broader_prefLabel|Ethereum +http://www.semanlink.net/tag/dao_attack|broader_prefLabel|Hack +http://www.semanlink.net/tag/dao_attack|broader_prefLabel|The DAO +http://www.semanlink.net/tag/dao_attack|broader_related|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/dao_attack|broader_related|http://www.semanlink.net/tag/dao_attack +http://www.semanlink.net/tag/linked_data_dev|creationTime|2011-12-27T19:18:13Z +http://www.semanlink.net/tag/linked_data_dev|prefLabel|Linked Data Dev +http://www.semanlink.net/tag/linked_data_dev|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_dev|creationDate|2011-12-27 +http://www.semanlink.net/tag/linked_data_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_dev|uri|http://www.semanlink.net/tag/linked_data_dev +http://www.semanlink.net/tag/linked_data_dev|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_dev|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_dev|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/metropolitan_museum_of_art|prefLabel|Metropolitan Museum of Art +http://www.semanlink.net/tag/metropolitan_museum_of_art|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/metropolitan_museum_of_art|broader|http://www.semanlink.net/tag/new_york +http://www.semanlink.net/tag/metropolitan_museum_of_art|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metropolitan_museum_of_art|describedBy|https://en.wikipedia.org/wiki/Metropolitan_Museum_of_Art +http://www.semanlink.net/tag/metropolitan_museum_of_art|altLabel|The MET +http://www.semanlink.net/tag/metropolitan_museum_of_art|uri|http://www.semanlink.net/tag/metropolitan_museum_of_art +http://www.semanlink.net/tag/metropolitan_museum_of_art|broader_prefLabel|Musée +http://www.semanlink.net/tag/metropolitan_museum_of_art|broader_prefLabel|New York +http://www.semanlink.net/tag/google_structured_data_testing_tool|creationTime|2017-09-26T15:49:05Z +http://www.semanlink.net/tag/google_structured_data_testing_tool|prefLabel|Google Structured Data Testing Tool +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader|http://www.semanlink.net/tag/web_tools +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader|http://www.semanlink.net/tag/seo +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader|http://www.semanlink.net/tag/google_seo +http://www.semanlink.net/tag/google_structured_data_testing_tool|related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/google_structured_data_testing_tool|creationDate|2017-09-26 +http://www.semanlink.net/tag/google_structured_data_testing_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_structured_data_testing_tool|homepage|https://search.google.com/structured-data/testing-tool +http://www.semanlink.net/tag/google_structured_data_testing_tool|uri|http://www.semanlink.net/tag/google_structured_data_testing_tool +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader_prefLabel|Web tools +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader_prefLabel|SEO +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader_prefLabel|Google: SEO +http://www.semanlink.net/tag/google_structured_data_testing_tool|broader_related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/russie|prefLabel|Russie +http://www.semanlink.net/tag/russie|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/russie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/russie|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/russie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/russie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/russie|uri|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/russie|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/russie|broader_prefLabel|URSS +http://www.semanlink.net/tag/russie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/russie|broader_prefLabel|Asie +http://www.semanlink.net/tag/russie|broader_prefLabel|Europe +http://www.semanlink.net/tag/age_du_bronze|prefLabel|Âge du bronze +http://www.semanlink.net/tag/age_du_bronze|broader|http://www.semanlink.net/tag/prehistoire +http://www.semanlink.net/tag/age_du_bronze|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/age_du_bronze|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/age_du_bronze|uri|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/age_du_bronze|broader_prefLabel|Préhistoire +http://www.semanlink.net/tag/age_du_bronze|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/peace_corps|prefLabel|Peace Corps +http://www.semanlink.net/tag/peace_corps|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/peace_corps|broader|http://www.semanlink.net/tag/cooperation +http://www.semanlink.net/tag/peace_corps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peace_corps|uri|http://www.semanlink.net/tag/peace_corps +http://www.semanlink.net/tag/peace_corps|broader_prefLabel|USA +http://www.semanlink.net/tag/peace_corps|broader_prefLabel|Coopération +http://www.semanlink.net/tag/peace_corps|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/peace_corps|broader_altLabel|United States +http://www.semanlink.net/tag/open_knowledge_network|creationTime|2017-10-21T11:44:58Z +http://www.semanlink.net/tag/open_knowledge_network|prefLabel|Open Knowledge Network +http://www.semanlink.net/tag/open_knowledge_network|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/open_knowledge_network|related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/open_knowledge_network|creationDate|2017-10-21 +http://www.semanlink.net/tag/open_knowledge_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_knowledge_network|uri|http://www.semanlink.net/tag/open_knowledge_network +http://www.semanlink.net/tag/open_knowledge_network|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/open_knowledge_network|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/open_knowledge_network|broader_altLabel|KG +http://www.semanlink.net/tag/chef_d_etat|prefLabel|Chef d'état +http://www.semanlink.net/tag/chef_d_etat|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/chef_d_etat|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/chef_d_etat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chef_d_etat|uri|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/chef_d_etat|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/chef_d_etat|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/owl_editor|prefLabel|OWL editor +http://www.semanlink.net/tag/owl_editor|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_editor|uri|http://www.semanlink.net/tag/owl_editor +http://www.semanlink.net/tag/owl_editor|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_editor|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/library_of_congress|creationTime|2008-06-12T08:21:44Z +http://www.semanlink.net/tag/library_of_congress|prefLabel|Library of Congress +http://www.semanlink.net/tag/library_of_congress|creationDate|2008-06-12 +http://www.semanlink.net/tag/library_of_congress|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/library_of_congress|uri|http://www.semanlink.net/tag/library_of_congress +http://www.semanlink.net/tag/word2vec_howto|creationTime|2017-07-20T13:44:48Z +http://www.semanlink.net/tag/word2vec_howto|prefLabel|Word2vec: howto +http://www.semanlink.net/tag/word2vec_howto|broader|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/tag/word2vec_howto|broader|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/word2vec_howto|creationDate|2017-07-20 +http://www.semanlink.net/tag/word2vec_howto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word2vec_howto|uri|http://www.semanlink.net/tag/word2vec_howto +http://www.semanlink.net/tag/word2vec_howto|broader_prefLabel|Using word embeddings +http://www.semanlink.net/tag/word2vec_howto|broader_prefLabel|Word2vec +http://www.semanlink.net/tag/word2vec_howto|broader_related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/language_models_size|creationTime|2021-01-23T16:49:51Z +http://www.semanlink.net/tag/language_models_size|prefLabel|Language Models: size +http://www.semanlink.net/tag/language_models_size|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/language_models_size|related|http://www.semanlink.net/tag/stochastic_parrots +http://www.semanlink.net/tag/language_models_size|creationDate|2021-01-23 +http://www.semanlink.net/tag/language_models_size|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_models_size|uri|http://www.semanlink.net/tag/language_models_size +http://www.semanlink.net/tag/language_models_size|broader_prefLabel|Language model +http://www.semanlink.net/tag/language_models_size|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/language_models_size|broader_altLabel|LM +http://www.semanlink.net/tag/language_models_size|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/language_models_size|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/language_models_size|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/galileo|prefLabel|Galileo +http://www.semanlink.net/tag/galileo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/galileo|uri|http://www.semanlink.net/tag/galileo +http://www.semanlink.net/tag/rdf_driven_web_sites|creationTime|2007-05-31T01:20:57Z +http://www.semanlink.net/tag/rdf_driven_web_sites|prefLabel|RDF-driven web sites +http://www.semanlink.net/tag/rdf_driven_web_sites|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_driven_web_sites|related|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.semanlink.net/tag/rdf_driven_web_sites|creationDate|2007-05-31 +http://www.semanlink.net/tag/rdf_driven_web_sites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_driven_web_sites|uri|http://www.semanlink.net/tag/rdf_driven_web_sites +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_driven_web_sites|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/film_danois|prefLabel|Film danois +http://www.semanlink.net/tag/film_danois|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_danois|broader|http://www.semanlink.net/tag/danemark +http://www.semanlink.net/tag/film_danois|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_danois|uri|http://www.semanlink.net/tag/film_danois +http://www.semanlink.net/tag/film_danois|broader_prefLabel|Film +http://www.semanlink.net/tag/film_danois|broader_prefLabel|Danemark +http://www.semanlink.net/tag/14_juillet|creationTime|2007-07-14T00:54:38Z +http://www.semanlink.net/tag/14_juillet|prefLabel|14 juillet +http://www.semanlink.net/tag/14_juillet|broader|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/14_juillet|broader|http://www.semanlink.net/tag/fete_nationale +http://www.semanlink.net/tag/14_juillet|creationDate|2007-07-14 +http://www.semanlink.net/tag/14_juillet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/14_juillet|uri|http://www.semanlink.net/tag/14_juillet +http://www.semanlink.net/tag/14_juillet|broader_prefLabel|Révolution française +http://www.semanlink.net/tag/14_juillet|broader_prefLabel|Fête nationale +http://www.semanlink.net/tag/genetic_data|prefLabel|Genetic data +http://www.semanlink.net/tag/genetic_data|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genetic_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetic_data|uri|http://www.semanlink.net/tag/genetic_data +http://www.semanlink.net/tag/genetic_data|broader_prefLabel|Genetics +http://www.semanlink.net/tag/genetic_data|broader_prefLabel|Génétique +http://www.semanlink.net/tag/identity_crisis_in_linked_data|creationTime|2011-12-17T15:21:13Z +http://www.semanlink.net/tag/identity_crisis_in_linked_data|prefLabel|Identity Crisis in Linked Data +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/identity_crisis_in_linked_data|creationDate|2011-12-17 +http://www.semanlink.net/tag/identity_crisis_in_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/identity_crisis_in_linked_data|uri|http://www.semanlink.net/tag/identity_crisis_in_linked_data +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_prefLabel|URI +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/identity_crisis_in_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/online_course_materials|creationTime|2007-10-05T21:12:59Z +http://www.semanlink.net/tag/online_course_materials|prefLabel|Online Course Materials +http://www.semanlink.net/tag/online_course_materials|broader|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/tag/online_course_materials|broader|http://www.semanlink.net/tag/mooc +http://www.semanlink.net/tag/online_course_materials|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/online_course_materials|creationDate|2007-10-05 +http://www.semanlink.net/tag/online_course_materials|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/online_course_materials|uri|http://www.semanlink.net/tag/online_course_materials +http://www.semanlink.net/tag/online_course_materials|broader_prefLabel|Online Learning +http://www.semanlink.net/tag/online_course_materials|broader_prefLabel|MOOC +http://www.semanlink.net/tag/online_course_materials|broader_prefLabel|Education +http://www.semanlink.net/tag/online_course_materials|broader_altLabel|Pédagogie numérique +http://www.semanlink.net/tag/online_course_materials|broader_altLabel|Enseignement +http://www.semanlink.net/tag/horizontal_gene_transfer|creationTime|2011-04-04T15:15:35Z +http://www.semanlink.net/tag/horizontal_gene_transfer|prefLabel|Horizontal gene transfer +http://www.semanlink.net/tag/horizontal_gene_transfer|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/horizontal_gene_transfer|broader|http://www.semanlink.net/tag/genetique_et_evolution +http://www.semanlink.net/tag/horizontal_gene_transfer|related|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/horizontal_gene_transfer|creationDate|2011-04-04 +http://www.semanlink.net/tag/horizontal_gene_transfer|comment|Movement of genes between unrelated species. Transfer of genes between organisms in a manner other than traditional reproduction +http://www.semanlink.net/tag/horizontal_gene_transfer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/horizontal_gene_transfer|describedBy|https://en.wikipedia.org/wiki/Horizontal_gene_transfer +http://www.semanlink.net/tag/horizontal_gene_transfer|altLabel|Lateral gene transfer +http://www.semanlink.net/tag/horizontal_gene_transfer|uri|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.semanlink.net/tag/horizontal_gene_transfer|broader_prefLabel|Genetics +http://www.semanlink.net/tag/horizontal_gene_transfer|broader_prefLabel|Génétique +http://www.semanlink.net/tag/horizontal_gene_transfer|broader_prefLabel|Génétique et Évolution +http://www.semanlink.net/tag/querying_remote_sparql_services|creationTime|2008-01-04T01:23:31Z +http://www.semanlink.net/tag/querying_remote_sparql_services|prefLabel|Querying Remote SPARQL Services +http://www.semanlink.net/tag/querying_remote_sparql_services|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/querying_remote_sparql_services|creationDate|2008-01-04 +http://www.semanlink.net/tag/querying_remote_sparql_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/querying_remote_sparql_services|uri|http://www.semanlink.net/tag/querying_remote_sparql_services +http://www.semanlink.net/tag/querying_remote_sparql_services|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/twitterature|creationTime|2014-05-27T14:08:01Z +http://www.semanlink.net/tag/twitterature|prefLabel|Twittérature +http://www.semanlink.net/tag/twitterature|broader|http://www.semanlink.net/tag/twitter +http://www.semanlink.net/tag/twitterature|creationDate|2014-05-27 +http://www.semanlink.net/tag/twitterature|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/twitterature|uri|http://www.semanlink.net/tag/twitterature +http://www.semanlink.net/tag/twitterature|broader_prefLabel|Twitter +http://www.semanlink.net/tag/rev|creationTime|2009-11-27T11:14:14Z +http://www.semanlink.net/tag/rev|prefLabel|Rev +http://www.semanlink.net/tag/rev|related|http://www.semanlink.net/tag/hypercard +http://www.semanlink.net/tag/rev|creationDate|2009-11-27 +http://www.semanlink.net/tag/rev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rev|uri|http://www.semanlink.net/tag/rev +http://www.semanlink.net/tag/judaisme|creationTime|2020-06-06T21:40:57Z +http://www.semanlink.net/tag/judaisme|prefLabel|Judaïsme +http://www.semanlink.net/tag/judaisme|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/judaisme|broader|http://www.semanlink.net/tag/juif +http://www.semanlink.net/tag/judaisme|creationDate|2020-06-06 +http://www.semanlink.net/tag/judaisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/judaisme|uri|http://www.semanlink.net/tag/judaisme +http://www.semanlink.net/tag/judaisme|broader_prefLabel|Religion +http://www.semanlink.net/tag/judaisme|broader_prefLabel|Juifs +http://www.semanlink.net/tag/azawad|creationTime|2013-01-22T22:27:04Z +http://www.semanlink.net/tag/azawad|prefLabel|Azawad +http://www.semanlink.net/tag/azawad|broader|http://www.semanlink.net/tag/touareg +http://www.semanlink.net/tag/azawad|creationDate|2013-01-22 +http://www.semanlink.net/tag/azawad|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/azawad|uri|http://www.semanlink.net/tag/azawad +http://www.semanlink.net/tag/azawad|broader_prefLabel|Touareg +http://www.semanlink.net/tag/finder|creationTime|2011-07-18T23:08:46Z +http://www.semanlink.net/tag/finder|prefLabel|Finder +http://www.semanlink.net/tag/finder|creationDate|2011-07-18 +http://www.semanlink.net/tag/finder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/finder|uri|http://www.semanlink.net/tag/finder +http://www.semanlink.net/tag/java_1_5_mac_os_x|prefLabel|Java 1.5 Mac OS X +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader|http://www.semanlink.net/tag/apple_java +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader|http://www.semanlink.net/tag/java_5 +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/java_1_5_mac_os_x|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_1_5_mac_os_x|uri|http://www.semanlink.net/tag/java_1_5_mac_os_x +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_prefLabel|Apple Java +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_prefLabel|Java 5 +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_altLabel|Java 1.5 +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_altLabel|OS X +http://www.semanlink.net/tag/java_1_5_mac_os_x|broader_altLabel|OSX +http://www.semanlink.net/tag/elmo|creationTime|2018-05-30T23:58:46Z +http://www.semanlink.net/tag/elmo|prefLabel|ELMo +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/allennlp +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/deep_nlp +http://www.semanlink.net/tag/elmo|broader|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/elmo|related|http://www.semanlink.net/tag/bi_lstm +http://www.semanlink.net/tag/elmo|related|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/elmo|creationDate|2018-05-30 +http://www.semanlink.net/tag/elmo|comment|"> Deep contextualized word representations + +each word is assigned a representation which is a function of the +entire corpus sentences to which they belong. The embeddings are +computed from the internal states of a two-layers bidirectional Language +Model, hence the name “ELMo”: Embeddings from Language +Models. + +[Github](https://github.com/allenai/bilm-tf) + +" +http://www.semanlink.net/tag/elmo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elmo|homepage|https://allennlp.org/elmo +http://www.semanlink.net/tag/elmo|uri|http://www.semanlink.net/tag/elmo +http://www.semanlink.net/tag/elmo|broader_prefLabel|Contextualized word representations +http://www.semanlink.net/tag/elmo|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/elmo|broader_prefLabel|AllenNLP +http://www.semanlink.net/tag/elmo|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/elmo|broader_prefLabel|Deep NLP +http://www.semanlink.net/tag/elmo|broader_prefLabel|Word sense / Lexical ambiguity +http://www.semanlink.net/tag/elmo|broader_altLabel|Contextualized word embeddings +http://www.semanlink.net/tag/elmo|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/elmo|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/elmo|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/elmo|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/elmo|broader_altLabel|Polysemy +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/elmo|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/zaire|prefLabel|Zaïre +http://www.semanlink.net/tag/zaire|broader|http://www.semanlink.net/tag/congo_kinshasa +http://www.semanlink.net/tag/zaire|creationDate|2006-11-24 +http://www.semanlink.net/tag/zaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zaire|uri|http://www.semanlink.net/tag/zaire +http://www.semanlink.net/tag/zaire|broader_prefLabel|RDC +http://www.semanlink.net/tag/zaire|broader_altLabel|République démocratique du Congo +http://www.semanlink.net/tag/eigenvectors|creationTime|2019-11-14T19:22:24Z +http://www.semanlink.net/tag/eigenvectors|prefLabel|EigenVectors +http://www.semanlink.net/tag/eigenvectors|broader|http://www.semanlink.net/tag/linear_algebra +http://www.semanlink.net/tag/eigenvectors|creationDate|2019-11-14 +http://www.semanlink.net/tag/eigenvectors|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eigenvectors|uri|http://www.semanlink.net/tag/eigenvectors +http://www.semanlink.net/tag/eigenvectors|broader_prefLabel|Linear algebra +http://www.semanlink.net/tag/eigenvectors|broader_altLabel|Algèbre linéaire +http://www.semanlink.net/tag/nlp_data_anonymization|creationTime|2021-02-01T08:14:00Z +http://www.semanlink.net/tag/nlp_data_anonymization|prefLabel|NLP: Data Anonymization +http://www.semanlink.net/tag/nlp_data_anonymization|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_data_anonymization|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/nlp_data_anonymization|creationDate|2021-02-01 +http://www.semanlink.net/tag/nlp_data_anonymization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_data_anonymization|uri|http://www.semanlink.net/tag/nlp_data_anonymization +http://www.semanlink.net/tag/nlp_data_anonymization|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_data_anonymization|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/nlp_data_anonymization|broader_altLabel|NER +http://www.semanlink.net/tag/ui|creationTime|2012-09-20T14:38:17Z +http://www.semanlink.net/tag/ui|prefLabel|UI +http://www.semanlink.net/tag/ui|creationDate|2012-09-20 +http://www.semanlink.net/tag/ui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ui|uri|http://www.semanlink.net/tag/ui +http://www.semanlink.net/tag/dialogs_in_javascript|prefLabel|Dialogs in javascript +http://www.semanlink.net/tag/dialogs_in_javascript|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/dialogs_in_javascript|creationDate|2006-10-11 +http://www.semanlink.net/tag/dialogs_in_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dialogs_in_javascript|uri|http://www.semanlink.net/tag/dialogs_in_javascript +http://www.semanlink.net/tag/dialogs_in_javascript|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/dialogs_in_javascript|broader_altLabel|js +http://www.semanlink.net/tag/crise_financiere|creationTime|2007-09-10T19:28:44Z +http://www.semanlink.net/tag/crise_financiere|prefLabel|Crise financière +http://www.semanlink.net/tag/crise_financiere|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/crise_financiere|broader|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/crise_financiere|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/crise_financiere|creationDate|2007-09-10 +http://www.semanlink.net/tag/crise_financiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_financiere|uri|http://www.semanlink.net/tag/crise_financiere +http://www.semanlink.net/tag/crise_financiere|broader_prefLabel|Finance +http://www.semanlink.net/tag/crise_financiere|broader_prefLabel|Marchés financiers +http://www.semanlink.net/tag/crise_financiere|broader_prefLabel|Money +http://www.semanlink.net/tag/crise_financiere|broader_altLabel|Monnaie +http://www.semanlink.net/tag/venus_de_brassempouy|creationTime|2007-11-26T01:19:58Z +http://www.semanlink.net/tag/venus_de_brassempouy|prefLabel|Venus de Brassempouy +http://www.semanlink.net/tag/venus_de_brassempouy|broader|http://www.semanlink.net/tag/paleolithique +http://www.semanlink.net/tag/venus_de_brassempouy|broader|http://www.semanlink.net/tag/arts_premiers +http://www.semanlink.net/tag/venus_de_brassempouy|broader|http://www.semanlink.net/tag/venus_prehistoriques +http://www.semanlink.net/tag/venus_de_brassempouy|creationDate|2007-11-26 +http://www.semanlink.net/tag/venus_de_brassempouy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venus_de_brassempouy|uri|http://www.semanlink.net/tag/venus_de_brassempouy +http://www.semanlink.net/tag/venus_de_brassempouy|broader_prefLabel|Paléolithique +http://www.semanlink.net/tag/venus_de_brassempouy|broader_prefLabel|Arts premiers +http://www.semanlink.net/tag/venus_de_brassempouy|broader_prefLabel|Vénus préhistoriques +http://www.semanlink.net/tag/data_scientists|creationTime|2018-06-17T12:27:49Z +http://www.semanlink.net/tag/data_scientists|prefLabel|Data Scientists +http://www.semanlink.net/tag/data_scientists|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/data_scientists|creationDate|2018-06-17 +http://www.semanlink.net/tag/data_scientists|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_scientists|uri|http://www.semanlink.net/tag/data_scientists +http://www.semanlink.net/tag/data_scientists|broader_prefLabel|Data science +http://www.semanlink.net/tag/data_scientists|broader_altLabel|Data analysis +http://www.semanlink.net/tag/folie|prefLabel|Folie +http://www.semanlink.net/tag/folie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/folie|uri|http://www.semanlink.net/tag/folie +http://www.semanlink.net/tag/sebastian_ruder|creationTime|2017-09-18T18:47:16Z +http://www.semanlink.net/tag/sebastian_ruder|prefLabel|Sebastian Ruder +http://www.semanlink.net/tag/sebastian_ruder|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/sebastian_ruder|creationDate|2017-09-18 +http://www.semanlink.net/tag/sebastian_ruder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sebastian_ruder|homepage|http://ruder.io/ +http://www.semanlink.net/tag/sebastian_ruder|uri|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/tag/sebastian_ruder|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/brouteur|prefLabel|Brouteur +http://www.semanlink.net/tag/brouteur|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/brouteur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brouteur|altLabel|Browser +http://www.semanlink.net/tag/brouteur|uri|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/brouteur|broader_prefLabel|Web +http://www.semanlink.net/tag/stromatolithes|prefLabel|Stromatolithes +http://www.semanlink.net/tag/stromatolithes|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/stromatolithes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stromatolithes|uri|http://www.semanlink.net/tag/stromatolithes +http://www.semanlink.net/tag/stromatolithes|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/apache_mahout|creationTime|2013-09-09T15:23:19Z +http://www.semanlink.net/tag/apache_mahout|prefLabel|Apache Mahout +http://www.semanlink.net/tag/apache_mahout|broader|http://www.semanlink.net/tag/data_mining_tools +http://www.semanlink.net/tag/apache_mahout|broader|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/apache_mahout|broader|http://www.semanlink.net/tag/hadoop +http://www.semanlink.net/tag/apache_mahout|creationDate|2013-09-09 +http://www.semanlink.net/tag/apache_mahout|comment|Apache Mahout: Scalable machine learning and data mining +http://www.semanlink.net/tag/apache_mahout|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_mahout|describedBy|http://mahout.apache.org/ +http://www.semanlink.net/tag/apache_mahout|uri|http://www.semanlink.net/tag/apache_mahout +http://www.semanlink.net/tag/apache_mahout|broader_prefLabel|Data mining tools +http://www.semanlink.net/tag/apache_mahout|broader_prefLabel|Machine Learning tool +http://www.semanlink.net/tag/apache_mahout|broader_prefLabel|Hadoop +http://www.semanlink.net/tag/apache_mahout|broader_related|http://www.semanlink.net/tag/map_reduce +http://www.semanlink.net/tag/dave_winer|creationTime|2007-07-17T23:14:04Z +http://www.semanlink.net/tag/dave_winer|prefLabel|Dave Winer +http://www.semanlink.net/tag/dave_winer|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dave_winer|creationDate|2007-07-17 +http://www.semanlink.net/tag/dave_winer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dave_winer|uri|http://www.semanlink.net/tag/dave_winer +http://www.semanlink.net/tag/dave_winer|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dave_winer|broader_altLabel|Technical guys +http://www.semanlink.net/tag/fleuve|prefLabel|Fleuve +http://www.semanlink.net/tag/fleuve|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/fleuve|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/fleuve|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fleuve|uri|http://www.semanlink.net/tag/fleuve +http://www.semanlink.net/tag/fleuve|broader_prefLabel|Eau +http://www.semanlink.net/tag/fleuve|broader_prefLabel|Géographie +http://www.semanlink.net/tag/extreme_multi_label_classification|creationTime|2020-09-06T11:21:26Z +http://www.semanlink.net/tag/extreme_multi_label_classification|prefLabel|Extreme Multi-label Classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader|http://www.semanlink.net/tag/extreme_classification +http://www.semanlink.net/tag/extreme_multi_label_classification|creationDate|2020-09-06 +http://www.semanlink.net/tag/extreme_multi_label_classification|comment|> Datasets in extreme classification exhibit fit to power-law distribution, i.e. a large fraction of labels have very few positive instances in the data distribution. Most state-of-the-art approaches for extreme multi-label classification attempt to capture correlation among labels by embedding the label matrix to a low-dimensional linear sub-space. However, in the presence of power-law distributed extremely large and diverse label spaces, structural assumptions such as low rank can be easily violated. [src](doc:2020/09/1609_02521_dismec_distribut) +http://www.semanlink.net/tag/extreme_multi_label_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extreme_multi_label_classification|uri|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader_prefLabel|Multi-label classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader_prefLabel|Extreme classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader_altLabel|Multilabel classification +http://www.semanlink.net/tag/extreme_multi_label_classification|broader_related|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/relations_franco_americaines|prefLabel|Relations franco-américaines +http://www.semanlink.net/tag/relations_franco_americaines|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/relations_franco_americaines|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/relations_franco_americaines|broader|http://www.semanlink.net/tag/relations_europe_usa +http://www.semanlink.net/tag/relations_franco_americaines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relations_franco_americaines|uri|http://www.semanlink.net/tag/relations_franco_americaines +http://www.semanlink.net/tag/relations_franco_americaines|broader_prefLabel|USA +http://www.semanlink.net/tag/relations_franco_americaines|broader_prefLabel|France +http://www.semanlink.net/tag/relations_franco_americaines|broader_prefLabel|Relations Europe-USA +http://www.semanlink.net/tag/relations_franco_americaines|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/relations_franco_americaines|broader_altLabel|United States +http://www.semanlink.net/tag/krakatoa|prefLabel|Krakatoa +http://www.semanlink.net/tag/krakatoa|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/krakatoa|broader|http://www.semanlink.net/tag/indonesie +http://www.semanlink.net/tag/krakatoa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/krakatoa|uri|http://www.semanlink.net/tag/krakatoa +http://www.semanlink.net/tag/krakatoa|broader_prefLabel|Volcan +http://www.semanlink.net/tag/krakatoa|broader_prefLabel|Indonésie +http://www.semanlink.net/tag/mike_bergman|creationTime|2008-05-16T00:19:46Z +http://www.semanlink.net/tag/mike_bergman|prefLabel|Mike Bergman +http://www.semanlink.net/tag/mike_bergman|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/mike_bergman|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/mike_bergman|related|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/mike_bergman|related|http://www.semanlink.net/tag/ldow2008 +http://www.semanlink.net/tag/mike_bergman|creationDate|2008-05-16 +http://www.semanlink.net/tag/mike_bergman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mike_bergman|homepage|http://www.mkbergman.com/ +http://www.semanlink.net/tag/mike_bergman|altLabel|AI3:::Adaptive Information +http://www.semanlink.net/tag/mike_bergman|uri|http://www.semanlink.net/tag/mike_bergman +http://www.semanlink.net/tag/mike_bergman|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/mike_bergman|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/mike_bergman|broader_altLabel|Technical guys +http://www.semanlink.net/tag/fps_ldow_2013|creationTime|2013-03-24T10:34:59Z +http://www.semanlink.net/tag/fps_ldow_2013|prefLabel|fps @ LDOW 2013 +http://www.semanlink.net/tag/fps_ldow_2013|broader|http://www.semanlink.net/tag/ldow2013 +http://www.semanlink.net/tag/fps_ldow_2013|broader|http://www.semanlink.net/tag/fps_paper +http://www.semanlink.net/tag/fps_ldow_2013|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/fps_ldow_2013|creationDate|2013-03-24 +http://www.semanlink.net/tag/fps_ldow_2013|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_ldow_2013|uri|http://www.semanlink.net/tag/fps_ldow_2013 +http://www.semanlink.net/tag/fps_ldow_2013|broader_prefLabel|LDOW2013 +http://www.semanlink.net/tag/fps_ldow_2013|broader_prefLabel|fps: paper +http://www.semanlink.net/tag/fps_ldow_2013|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/freebase|creationTime|2010-07-18T19:29:26Z +http://www.semanlink.net/tag/freebase|prefLabel|Freebase +http://www.semanlink.net/tag/freebase|creationDate|2010-07-18 +http://www.semanlink.net/tag/freebase|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/freebase|describedBy|http://www.freebase.com/ +http://www.semanlink.net/tag/freebase|uri|http://www.semanlink.net/tag/freebase +http://www.semanlink.net/tag/productivite|prefLabel|Productivité +http://www.semanlink.net/tag/productivite|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/productivite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/productivite|uri|http://www.semanlink.net/tag/productivite +http://www.semanlink.net/tag/productivite|broader_prefLabel|Economie +http://www.semanlink.net/tag/productivite|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/berlin|creationTime|2007-04-25T22:07:48Z +http://www.semanlink.net/tag/berlin|prefLabel|Berlin +http://www.semanlink.net/tag/berlin|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/berlin|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/berlin|creationDate|2007-04-25 +http://www.semanlink.net/tag/berlin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/berlin|uri|http://www.semanlink.net/tag/berlin +http://www.semanlink.net/tag/berlin|broader_prefLabel|Ville +http://www.semanlink.net/tag/berlin|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/berlin|broader_altLabel|Germany +http://www.semanlink.net/tag/berlin|broader_altLabel|Deutschland +http://www.semanlink.net/tag/rap|prefLabel|Rap +http://www.semanlink.net/tag/rap|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/rap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rap|uri|http://www.semanlink.net/tag/rap +http://www.semanlink.net/tag/rap|broader_prefLabel|Musique +http://www.semanlink.net/tag/rap|broader_altLabel|Music +http://www.semanlink.net/tag/verger_de_gado_a_niamey|prefLabel|Verger de Gado à Niamey +http://www.semanlink.net/tag/verger_de_gado_a_niamey|broader|http://www.semanlink.net/tag/jardin +http://www.semanlink.net/tag/verger_de_gado_a_niamey|broader|http://www.semanlink.net/tag/affaires_de_gado_a_niamey +http://www.semanlink.net/tag/verger_de_gado_a_niamey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/verger_de_gado_a_niamey|uri|http://www.semanlink.net/tag/verger_de_gado_a_niamey +http://www.semanlink.net/tag/verger_de_gado_a_niamey|broader_prefLabel|Jardin +http://www.semanlink.net/tag/verger_de_gado_a_niamey|broader_prefLabel|Affaires de Gado à Niamey +http://www.semanlink.net/tag/sparql_extensions|creationTime|2009-11-12T14:06:40Z +http://www.semanlink.net/tag/sparql_extensions|prefLabel|SPARQL extensions +http://www.semanlink.net/tag/sparql_extensions|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_extensions|creationDate|2009-11-12 +http://www.semanlink.net/tag/sparql_extensions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_extensions|uri|http://www.semanlink.net/tag/sparql_extensions +http://www.semanlink.net/tag/sparql_extensions|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/huggingface_transformers|creationTime|2020-07-14T18:46:31Z +http://www.semanlink.net/tag/huggingface_transformers|prefLabel|huggingface/transformers +http://www.semanlink.net/tag/huggingface_transformers|broader|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/tag/huggingface_transformers|broader|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/tag/huggingface_transformers|broader|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/tag/huggingface_transformers|creationDate|2020-07-14 +http://www.semanlink.net/tag/huggingface_transformers|comment|[Doc](https://huggingface.co/transformers/) +http://www.semanlink.net/tag/huggingface_transformers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/huggingface_transformers|homepage|https://github.com/huggingface/transformers +http://www.semanlink.net/tag/huggingface_transformers|uri|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/tag/huggingface_transformers|broader_prefLabel|Transformers +http://www.semanlink.net/tag/huggingface_transformers|broader_prefLabel|Hugging Face +http://www.semanlink.net/tag/huggingface_transformers|broader_prefLabel|GitHub project +http://www.semanlink.net/tag/huggingface_transformers|broader_altLabel|Transformer +http://www.semanlink.net/tag/huggingface_transformers|broader_altLabel|Transformers +http://www.semanlink.net/tag/huggingface_transformers|broader_altLabel|Attention is All You Need +http://www.semanlink.net/tag/huggingface_transformers|broader_altLabel|HuggingFace +http://www.semanlink.net/tag/huggingface_transformers|broader_related|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/ng4j|creationTime|2007-07-14T01:24:02Z +http://www.semanlink.net/tag/ng4j|prefLabel|NG4J +http://www.semanlink.net/tag/ng4j|broader|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/ng4j|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/ng4j|broader|http://www.semanlink.net/tag/named_graphs +http://www.semanlink.net/tag/ng4j|creationDate|2007-07-14 +http://www.semanlink.net/tag/ng4j|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ng4j|uri|http://www.semanlink.net/tag/ng4j +http://www.semanlink.net/tag/ng4j|broader_prefLabel|Chris Bizer +http://www.semanlink.net/tag/ng4j|broader_prefLabel|Jena +http://www.semanlink.net/tag/ng4j|broader_prefLabel|Named Graphs +http://www.semanlink.net/tag/ng4j|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/backbone_js|creationTime|2012-08-28T15:40:51Z +http://www.semanlink.net/tag/backbone_js|prefLabel|Backbone.js +http://www.semanlink.net/tag/backbone_js|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/backbone_js|related|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://www.semanlink.net/tag/backbone_js|creationDate|2012-08-28 +http://www.semanlink.net/tag/backbone_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backbone_js|homepage|http://backbonejs.org/ +http://www.semanlink.net/tag/backbone_js|uri|http://www.semanlink.net/tag/backbone_js +http://www.semanlink.net/tag/backbone_js|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/backbone_js|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/neuroevolution|creationTime|2018-01-06T15:04:47Z +http://www.semanlink.net/tag/neuroevolution|prefLabel|Neuroevolution +http://www.semanlink.net/tag/neuroevolution|broader|http://www.semanlink.net/tag/evolutionary_computation +http://www.semanlink.net/tag/neuroevolution|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/neuroevolution|creationDate|2018-01-06 +http://www.semanlink.net/tag/neuroevolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neuroevolution|describedBy|https://en.wikipedia.org/wiki/Neuroevolution +http://www.semanlink.net/tag/neuroevolution|uri|http://www.semanlink.net/tag/neuroevolution +http://www.semanlink.net/tag/neuroevolution|broader_prefLabel|Evolutionary computation +http://www.semanlink.net/tag/neuroevolution|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/neuroevolution|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/neuroevolution|broader_altLabel|ANN +http://www.semanlink.net/tag/neuroevolution|broader_altLabel|NN +http://www.semanlink.net/tag/neural_bag_of_words|creationTime|2018-05-29T16:53:18Z +http://www.semanlink.net/tag/neural_bag_of_words|prefLabel|Neural Bag of Words +http://www.semanlink.net/tag/neural_bag_of_words|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/neural_bag_of_words|broader|http://www.semanlink.net/tag/bag_of_words +http://www.semanlink.net/tag/neural_bag_of_words|creationDate|2018-05-29 +http://www.semanlink.net/tag/neural_bag_of_words|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_bag_of_words|uri|http://www.semanlink.net/tag/neural_bag_of_words +http://www.semanlink.net/tag/neural_bag_of_words|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/neural_bag_of_words|broader_prefLabel|Bag-of-words +http://www.semanlink.net/tag/pekin_2008|creationTime|2008-08-08T18:27:06Z +http://www.semanlink.net/tag/pekin_2008|prefLabel|Pékin 2008 +http://www.semanlink.net/tag/pekin_2008|broader|http://www.semanlink.net/tag/jeux_olympiques +http://www.semanlink.net/tag/pekin_2008|broader|http://www.semanlink.net/tag/pekin +http://www.semanlink.net/tag/pekin_2008|creationDate|2008-08-08 +http://www.semanlink.net/tag/pekin_2008|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pekin_2008|uri|http://www.semanlink.net/tag/pekin_2008 +http://www.semanlink.net/tag/pekin_2008|broader_prefLabel|Jeux Olympiques +http://www.semanlink.net/tag/pekin_2008|broader_prefLabel|Pékin +http://www.semanlink.net/tag/cross_lingual_nlp|creationTime|2017-11-12T02:37:45Z +http://www.semanlink.net/tag/cross_lingual_nlp|prefLabel|Cross-lingual NLP +http://www.semanlink.net/tag/cross_lingual_nlp|broader|http://www.semanlink.net/tag/multilingual_nlp +http://www.semanlink.net/tag/cross_lingual_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/cross_lingual_nlp|related|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/tag/cross_lingual_nlp|creationDate|2017-11-12 +http://www.semanlink.net/tag/cross_lingual_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_lingual_nlp|uri|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/tag/cross_lingual_nlp|broader_prefLabel|Multilingual NLP +http://www.semanlink.net/tag/cross_lingual_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/dbpedia_francophone|creationTime|2012-11-19T14:07:39Z +http://www.semanlink.net/tag/dbpedia_francophone|prefLabel|dbpedia francophone +http://www.semanlink.net/tag/dbpedia_francophone|broader|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/dbpedia_francophone|creationDate|2012-11-19 +http://www.semanlink.net/tag/dbpedia_francophone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dbpedia_francophone|uri|http://www.semanlink.net/tag/dbpedia_francophone +http://www.semanlink.net/tag/dbpedia_francophone|broader_prefLabel|dbpedia +http://www.semanlink.net/tag/dbpedia_francophone|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbpedia_francophone|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/java_profiling|creationTime|2010-02-11T15:55:53Z +http://www.semanlink.net/tag/java_profiling|prefLabel|Java profiling +http://www.semanlink.net/tag/java_profiling|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/java_profiling|creationDate|2010-02-11 +http://www.semanlink.net/tag/java_profiling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_profiling|uri|http://www.semanlink.net/tag/java_profiling +http://www.semanlink.net/tag/java_profiling|broader_prefLabel|Java dev +http://www.semanlink.net/tag/jsfiddle|creationTime|2012-08-08T15:58:18Z +http://www.semanlink.net/tag/jsfiddle|prefLabel|jsFiddle +http://www.semanlink.net/tag/jsfiddle|broader|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/jsfiddle|broader|http://www.semanlink.net/tag/web_tools +http://www.semanlink.net/tag/jsfiddle|broader|http://www.semanlink.net/tag/javascript_tool +http://www.semanlink.net/tag/jsfiddle|creationDate|2012-08-08 +http://www.semanlink.net/tag/jsfiddle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsfiddle|homepage|http://jsfiddle.net/ +http://www.semanlink.net/tag/jsfiddle|uri|http://www.semanlink.net/tag/jsfiddle +http://www.semanlink.net/tag/jsfiddle|broader_prefLabel|Sample code +http://www.semanlink.net/tag/jsfiddle|broader_prefLabel|Web tools +http://www.semanlink.net/tag/jsfiddle|broader_prefLabel|Javascript tool +http://www.semanlink.net/tag/al_gore|creationTime|2013-02-28T19:07:11Z +http://www.semanlink.net/tag/al_gore|prefLabel|Al Gore +http://www.semanlink.net/tag/al_gore|creationDate|2013-02-28 +http://www.semanlink.net/tag/al_gore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/al_gore|uri|http://www.semanlink.net/tag/al_gore +http://www.semanlink.net/tag/jail|creationTime|2013-07-11T01:27:40Z +http://www.semanlink.net/tag/jail|prefLabel|Jail +http://www.semanlink.net/tag/jail|creationDate|2013-07-11 +http://www.semanlink.net/tag/jail|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jail|uri|http://www.semanlink.net/tag/jail +http://www.semanlink.net/tag/big_data_tools|creationTime|2014-12-18T13:10:02Z +http://www.semanlink.net/tag/big_data_tools|prefLabel|Big Data Tools +http://www.semanlink.net/tag/big_data_tools|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/big_data_tools|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/big_data_tools|creationDate|2014-12-18 +http://www.semanlink.net/tag/big_data_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/big_data_tools|uri|http://www.semanlink.net/tag/big_data_tools +http://www.semanlink.net/tag/big_data_tools|broader_prefLabel|Big Data +http://www.semanlink.net/tag/big_data_tools|broader_prefLabel|Tools +http://www.semanlink.net/tag/big_data_tools|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/graph_embeddings|creationTime|2018-01-27T15:18:45Z +http://www.semanlink.net/tag/graph_embeddings|prefLabel|Graph Embeddings +http://www.semanlink.net/tag/graph_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/graph_embeddings|broader|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/graph_embeddings|related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/graph_embeddings|creationDate|2018-01-27 +http://www.semanlink.net/tag/graph_embeddings|comment|"Traditionally, networks are usually represented as adjacency matrices. This suffers from data sparsity and high-dimensionality. Network embeddings aim to **represent network +vertices into a low-dimensional vector space, by preserving +both network topology structure and node content information**. + +Algorithms are typically unsupervised +and can be broadly classified into +three groups ([source](/doc/2019/07/_1901_00596_a_comprehensive_su)): + +- matrix factorization +- random walks +- deep learning approaches (graph neural networks - GNNs) + - graph convolution networks (GraphSage) + - graph attention networks, + - graph auto-encoders (e.g., DNGR and SDNE) + - graph generative networks, + - graph spatial-temporal networks. + +Node embeddings (intuition: similar nodes should have similar vectors). + +- Laplacian EigenMap (an eigenvector based computation, OK when matrix is not too large) +- LINE Large-scale Information Network Embedding, most cited paper at WWW2015; Breadth first search +- DeepWalk (Perozzi et al. 2014) (the technique to learn word embeddings adapted to nodes: treating nodes as words and generating short random walks as sentences) +- Node2Vec (2016) (mixed strategy) + +etc. +" +http://www.semanlink.net/tag/graph_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_embeddings|altLabel|Representation Learning on Networks +http://www.semanlink.net/tag/graph_embeddings|altLabel|Graph representation learning +http://www.semanlink.net/tag/graph_embeddings|altLabel|Network Representation Learning +http://www.semanlink.net/tag/graph_embeddings|altLabel|Network embeddings +http://www.semanlink.net/tag/graph_embeddings|uri|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/tag/graph_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/graph_embeddings|broader_prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/graph_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/graph_embeddings|broader_altLabel|Graph Machine Learning +http://www.semanlink.net/tag/graph_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/graph_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/wikileaks|creationTime|2010-06-02T21:43:06Z +http://www.semanlink.net/tag/wikileaks|prefLabel|Wikileaks +http://www.semanlink.net/tag/wikileaks|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/wikileaks|related|http://www.semanlink.net/tag/chelsea_manning +http://www.semanlink.net/tag/wikileaks|creationDate|2010-06-02 +http://www.semanlink.net/tag/wikileaks|comment|a media insurgency +http://www.semanlink.net/tag/wikileaks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikileaks|describedBy|https://en.wikipedia.org/wiki/Wikileaks +http://www.semanlink.net/tag/wikileaks|uri|http://www.semanlink.net/tag/wikileaks +http://www.semanlink.net/tag/wikileaks|broader_prefLabel|Leaks +http://www.semanlink.net/tag/wikileaks|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/munich|creationTime|2014-09-02T19:27:26Z +http://www.semanlink.net/tag/munich|prefLabel|Munich +http://www.semanlink.net/tag/munich|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/munich|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/munich|creationDate|2014-09-02 +http://www.semanlink.net/tag/munich|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/munich|describedBy|https://en.wikipedia.org/wiki/Munich +http://www.semanlink.net/tag/munich|uri|http://www.semanlink.net/tag/munich +http://www.semanlink.net/tag/munich|broader_prefLabel|Ville +http://www.semanlink.net/tag/munich|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/munich|broader_altLabel|Germany +http://www.semanlink.net/tag/munich|broader_altLabel|Deutschland +http://www.semanlink.net/tag/europe_ecologie|creationTime|2009-11-21T17:38:03Z +http://www.semanlink.net/tag/europe_ecologie|prefLabel|Europe écologie +http://www.semanlink.net/tag/europe_ecologie|broader|http://www.semanlink.net/tag/verts +http://www.semanlink.net/tag/europe_ecologie|related|http://www.semanlink.net/tag/cohn_bendit +http://www.semanlink.net/tag/europe_ecologie|creationDate|2009-11-21 +http://www.semanlink.net/tag/europe_ecologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/europe_ecologie|uri|http://www.semanlink.net/tag/europe_ecologie +http://www.semanlink.net/tag/europe_ecologie|broader_prefLabel|Verts +http://www.semanlink.net/tag/cea_list|creationTime|2018-01-03T15:24:38Z +http://www.semanlink.net/tag/cea_list|prefLabel|CEA, LIST +http://www.semanlink.net/tag/cea_list|broader|http://www.semanlink.net/tag/cea +http://www.semanlink.net/tag/cea_list|creationDate|2018-01-03 +http://www.semanlink.net/tag/cea_list|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cea_list|uri|http://www.semanlink.net/tag/cea_list +http://www.semanlink.net/tag/cea_list|broader_prefLabel|CEA +http://www.semanlink.net/tag/spreadsheets|creationTime|2007-08-22T00:21:06Z +http://www.semanlink.net/tag/spreadsheets|prefLabel|Spreadsheets +http://www.semanlink.net/tag/spreadsheets|creationDate|2007-08-22 +http://www.semanlink.net/tag/spreadsheets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spreadsheets|altLabel|Spreadsheet +http://www.semanlink.net/tag/spreadsheets|uri|http://www.semanlink.net/tag/spreadsheets +http://www.semanlink.net/tag/similarity_learning|creationTime|2019-06-17T23:53:25Z +http://www.semanlink.net/tag/similarity_learning|prefLabel|Similarity learning +http://www.semanlink.net/tag/similarity_learning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/similarity_learning|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/similarity_learning|related|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/similarity_learning|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/similarity_learning|creationDate|2019-06-17 +http://www.semanlink.net/tag/similarity_learning|comment|"The goal is to learn from examples a similarity function that measures how similar or related two objects are. + +Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. + +> Distance metric learning is a major tool for a variety +of problems in computer vision. It has successfully +been employed for image retrieval, near duplicate detection, clustering and zero-shot learning. ([src](doc/2020/02/_1703_07464_no_fuss_distance_m))" +http://www.semanlink.net/tag/similarity_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/similarity_learning|describedBy|https://en.wikipedia.org/wiki/Similarity_learning +http://www.semanlink.net/tag/similarity_learning|uri|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/tag/similarity_learning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/similarity_learning|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/feedly|creationTime|2019-01-29T00:49:47Z +http://www.semanlink.net/tag/feedly|prefLabel|Feedly +http://www.semanlink.net/tag/feedly|broader|http://www.semanlink.net/tag/rss +http://www.semanlink.net/tag/feedly|creationDate|2019-01-29 +http://www.semanlink.net/tag/feedly|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feedly|uri|http://www.semanlink.net/tag/feedly +http://www.semanlink.net/tag/feedly|broader_prefLabel|RSS +http://www.semanlink.net/tag/sl_gui|creationTime|2008-01-06T02:46:56Z +http://www.semanlink.net/tag/sl_gui|prefLabel|SL GUI +http://www.semanlink.net/tag/sl_gui|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/sl_gui|creationDate|2008-01-06 +http://www.semanlink.net/tag/sl_gui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sl_gui|uri|http://www.semanlink.net/tag/sl_gui +http://www.semanlink.net/tag/sl_gui|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/sl_gui|broader_altLabel|SL +http://www.semanlink.net/tag/sl_gui|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/peche_industrielle|creationTime|2014-01-30T23:12:18Z +http://www.semanlink.net/tag/peche_industrielle|prefLabel|Pêche industrielle +http://www.semanlink.net/tag/peche_industrielle|broader|http://www.semanlink.net/tag/peche +http://www.semanlink.net/tag/peche_industrielle|creationDate|2014-01-30 +http://www.semanlink.net/tag/peche_industrielle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peche_industrielle|uri|http://www.semanlink.net/tag/peche_industrielle +http://www.semanlink.net/tag/peche_industrielle|broader_prefLabel|Pêche +http://www.semanlink.net/tag/slides_fps|creationTime|2011-05-19T23:37:20Z +http://www.semanlink.net/tag/slides_fps|prefLabel|slides fps +http://www.semanlink.net/tag/slides_fps|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/slides_fps|broader|http://www.semanlink.net/tag/slides +http://www.semanlink.net/tag/slides_fps|creationDate|2011-05-19 +http://www.semanlink.net/tag/slides_fps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slides_fps|uri|http://www.semanlink.net/tag/slides_fps +http://www.semanlink.net/tag/slides_fps|broader_prefLabel|fps +http://www.semanlink.net/tag/slides_fps|broader_prefLabel|Slides +http://www.semanlink.net/tag/monopolies|creationTime|2020-11-22T00:02:47Z +http://www.semanlink.net/tag/monopolies|prefLabel|Monopolies +http://www.semanlink.net/tag/monopolies|broader|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/monopolies|creationDate|2020-11-22 +http://www.semanlink.net/tag/monopolies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/monopolies|uri|http://www.semanlink.net/tag/monopolies +http://www.semanlink.net/tag/monopolies|broader_prefLabel|Vive le capitalisme ! +http://www.semanlink.net/tag/monopolies|broader_altLabel|Capitalisme de merde +http://www.semanlink.net/tag/latent_variable_model|creationTime|2018-10-31T23:09:33Z +http://www.semanlink.net/tag/latent_variable_model|prefLabel|Latent variable model +http://www.semanlink.net/tag/latent_variable_model|creationDate|2018-10-31 +http://www.semanlink.net/tag/latent_variable_model|comment|"statistical model that relates a set of observable variables (""manifest variables"") to a set of latent variables + +> A latent variable is one which is not directly observed but which is assumed to affect observed variables. Latent variable models therefore attempt to model the underlying structure of the observed variables, offering an explanation of the dependencies between observed variables which are then seen as conditionally independent, given the latent variable(s). ([source](https://supernlp.github.io/2018/11/10/emnlp-2018/)) +" +http://www.semanlink.net/tag/latent_variable_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/latent_variable_model|describedBy|https://en.wikipedia.org/wiki/Latent_variable_model +http://www.semanlink.net/tag/latent_variable_model|uri|http://www.semanlink.net/tag/latent_variable_model +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|creationTime|2014-05-18T11:32:51Z +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|prefLabel|Hierarchical text classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader|http://www.semanlink.net/tag/hierarchical_categories +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|creationDate|2014-05-18 +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|comment|assignment of one or more suitable categories from a hierarchical category space to a document +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|uri|http://www.semanlink.net/tag/nlp_hierarchical_text_classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader_prefLabel|Hierarchical Categories +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader_prefLabel|Hierarchical Classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/nlp_hierarchical_text_classification|broader_related|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/string_searching_algorithm|creationTime|2020-01-09T16:34:37Z +http://www.semanlink.net/tag/string_searching_algorithm|prefLabel|String-searching algorithm +http://www.semanlink.net/tag/string_searching_algorithm|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/string_searching_algorithm|broader|http://www.semanlink.net/tag/text_processing +http://www.semanlink.net/tag/string_searching_algorithm|creationDate|2020-01-09 +http://www.semanlink.net/tag/string_searching_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/string_searching_algorithm|describedBy|https://en.wikipedia.org/wiki/String-searching_algorithm +http://www.semanlink.net/tag/string_searching_algorithm|uri|http://www.semanlink.net/tag/string_searching_algorithm +http://www.semanlink.net/tag/string_searching_algorithm|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/string_searching_algorithm|broader_prefLabel|Text processing +http://www.semanlink.net/tag/inversion_of_control|creationTime|2018-08-04T22:44:40Z +http://www.semanlink.net/tag/inversion_of_control|prefLabel|Inversion of Control +http://www.semanlink.net/tag/inversion_of_control|creationDate|2018-08-04 +http://www.semanlink.net/tag/inversion_of_control|comment| Don't call us, we'll call you +http://www.semanlink.net/tag/inversion_of_control|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inversion_of_control|describedBy|https://en.wikipedia.org/wiki/Inversion_of_control +http://www.semanlink.net/tag/inversion_of_control|uri|http://www.semanlink.net/tag/inversion_of_control +http://www.semanlink.net/tag/acteur|prefLabel|Acteur +http://www.semanlink.net/tag/acteur|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/acteur|creationDate|2007-01-14 +http://www.semanlink.net/tag/acteur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acteur|uri|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/acteur|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/workshop|prefLabel|Workshop +http://www.semanlink.net/tag/workshop|broader|http://www.semanlink.net/tag/event +http://www.semanlink.net/tag/workshop|creationDate|2006-12-23 +http://www.semanlink.net/tag/workshop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/workshop|uri|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/workshop|broader_prefLabel|Event +http://www.semanlink.net/tag/cour_europeenne_de_justice|prefLabel|Cour européenne de justice +http://www.semanlink.net/tag/cour_europeenne_de_justice|broader|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/cour_europeenne_de_justice|broader|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/cour_europeenne_de_justice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cour_europeenne_de_justice|uri|http://www.semanlink.net/tag/cour_europeenne_de_justice +http://www.semanlink.net/tag/cour_europeenne_de_justice|broader_prefLabel|Institutions européennes +http://www.semanlink.net/tag/cour_europeenne_de_justice|broader_prefLabel|Union européenne +http://www.semanlink.net/tag/cour_europeenne_de_justice|broader_altLabel|UE +http://www.semanlink.net/tag/coal_seam_fire|creationTime|2016-11-17T00:11:28Z +http://www.semanlink.net/tag/coal_seam_fire|prefLabel|Coal seam fire +http://www.semanlink.net/tag/coal_seam_fire|broader|http://www.semanlink.net/tag/fire +http://www.semanlink.net/tag/coal_seam_fire|broader|http://www.semanlink.net/tag/charbon +http://www.semanlink.net/tag/coal_seam_fire|creationDate|2016-11-17 +http://www.semanlink.net/tag/coal_seam_fire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coal_seam_fire|describedBy|https://en.wikipedia.org/wiki/Coal_seam_fire#Germany +http://www.semanlink.net/tag/coal_seam_fire|uri|http://www.semanlink.net/tag/coal_seam_fire +http://www.semanlink.net/tag/coal_seam_fire|broader_prefLabel|Fire +http://www.semanlink.net/tag/coal_seam_fire|broader_prefLabel|Charbon +http://www.semanlink.net/tag/biomedical_data|creationTime|2017-09-04T21:11:32Z +http://www.semanlink.net/tag/biomedical_data|prefLabel|Biomedical data +http://www.semanlink.net/tag/biomedical_data|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/biomedical_data|creationDate|2017-09-04 +http://www.semanlink.net/tag/biomedical_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biomedical_data|uri|http://www.semanlink.net/tag/biomedical_data +http://www.semanlink.net/tag/biomedical_data|broader_prefLabel|Santé +http://www.semanlink.net/tag/ronan_collobert|creationTime|2018-01-17T18:44:40Z +http://www.semanlink.net/tag/ronan_collobert|prefLabel|Ronan Collobert +http://www.semanlink.net/tag/ronan_collobert|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/ronan_collobert|related|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/ronan_collobert|creationDate|2018-01-17 +http://www.semanlink.net/tag/ronan_collobert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ronan_collobert|homepage|https://ronan.collobert.com/ +http://www.semanlink.net/tag/ronan_collobert|uri|http://www.semanlink.net/tag/ronan_collobert +http://www.semanlink.net/tag/ronan_collobert|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/differentiable_programming|creationTime|2018-01-14T19:20:34Z +http://www.semanlink.net/tag/differentiable_programming|prefLabel|Differentiable Programming +http://www.semanlink.net/tag/differentiable_programming|creationDate|2018-01-14 +http://www.semanlink.net/tag/differentiable_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/differentiable_programming|uri|http://www.semanlink.net/tag/differentiable_programming +http://www.semanlink.net/tag/declaration_des_droits_de_l_homme|creationTime|2011-02-02T01:38:02Z +http://www.semanlink.net/tag/declaration_des_droits_de_l_homme|prefLabel|Déclaration des droits de l'homme +http://www.semanlink.net/tag/declaration_des_droits_de_l_homme|creationDate|2011-02-02 +http://www.semanlink.net/tag/declaration_des_droits_de_l_homme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/declaration_des_droits_de_l_homme|uri|http://www.semanlink.net/tag/declaration_des_droits_de_l_homme +http://www.semanlink.net/tag/wiki|prefLabel|Wiki +http://www.semanlink.net/tag/wiki|broader|http://www.semanlink.net/tag/collaborative_editing +http://www.semanlink.net/tag/wiki|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/wiki|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/wiki|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wiki|uri|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/wiki|broader_prefLabel|Collaborative editing +http://www.semanlink.net/tag/wiki|broader_prefLabel|Software +http://www.semanlink.net/tag/wiki|broader_prefLabel|Internet +http://www.semanlink.net/tag/wiki|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/xtech_2006|prefLabel|XTech 2006 +http://www.semanlink.net/tag/xtech_2006|broader|http://www.semanlink.net/tag/xtech +http://www.semanlink.net/tag/xtech_2006|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xtech_2006|uri|http://www.semanlink.net/tag/xtech_2006 +http://www.semanlink.net/tag/xtech_2006|broader_prefLabel|XTech +http://www.semanlink.net/tag/jeffrey_t_pollock|creationTime|2010-07-31T14:35:32Z +http://www.semanlink.net/tag/jeffrey_t_pollock|prefLabel|Jeffrey T. Pollock +http://www.semanlink.net/tag/jeffrey_t_pollock|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/jeffrey_t_pollock|related|http://www.semanlink.net/tag/oracle +http://www.semanlink.net/tag/jeffrey_t_pollock|creationDate|2010-07-31 +http://www.semanlink.net/tag/jeffrey_t_pollock|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeffrey_t_pollock|describedBy|http://www.linkedin.com/in/jtpollock +http://www.semanlink.net/tag/jeffrey_t_pollock|uri|http://www.semanlink.net/tag/jeffrey_t_pollock +http://www.semanlink.net/tag/jeffrey_t_pollock|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/jeffrey_t_pollock|broader_altLabel|Technical guys +http://www.semanlink.net/tag/invasion_d_especes_etrangeres|prefLabel|Espèces invasives +http://www.semanlink.net/tag/invasion_d_especes_etrangeres|creationDate|2006-11-25 +http://www.semanlink.net/tag/invasion_d_especes_etrangeres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/invasion_d_especes_etrangeres|uri|http://www.semanlink.net/tag/invasion_d_especes_etrangeres +http://www.semanlink.net/tag/ipod|prefLabel|iPod +http://www.semanlink.net/tag/ipod|broader|http://www.semanlink.net/tag/digital_entertainment +http://www.semanlink.net/tag/ipod|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/ipod|broader|http://www.semanlink.net/tag/devices +http://www.semanlink.net/tag/ipod|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/ipod|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ipod|uri|http://www.semanlink.net/tag/ipod +http://www.semanlink.net/tag/ipod|broader_prefLabel|Digital entertainment +http://www.semanlink.net/tag/ipod|broader_prefLabel|Apple +http://www.semanlink.net/tag/ipod|broader_prefLabel|Devices +http://www.semanlink.net/tag/ipod|broader_prefLabel|Musique +http://www.semanlink.net/tag/ipod|broader_altLabel|Music +http://www.semanlink.net/tag/design_pattern|creationTime|2008-12-11T01:03:11Z +http://www.semanlink.net/tag/design_pattern|prefLabel|Design pattern +http://www.semanlink.net/tag/design_pattern|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/design_pattern|creationDate|2008-12-11 +http://www.semanlink.net/tag/design_pattern|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/design_pattern|describedBy|https://en.wikipedia.org/wiki/Design_pattern_(computer_science) +http://www.semanlink.net/tag/design_pattern|altLabel|Patterns +http://www.semanlink.net/tag/design_pattern|uri|http://www.semanlink.net/tag/design_pattern +http://www.semanlink.net/tag/design_pattern|broader_prefLabel|Informatique +http://www.semanlink.net/tag/retrovirus|creationTime|2011-03-27T14:35:54Z +http://www.semanlink.net/tag/retrovirus|prefLabel|Rétrovirus +http://www.semanlink.net/tag/retrovirus|broader|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/retrovirus|creationDate|2011-03-27 +http://www.semanlink.net/tag/retrovirus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/retrovirus|uri|http://www.semanlink.net/tag/retrovirus +http://www.semanlink.net/tag/retrovirus|broader_prefLabel|Virus +http://www.semanlink.net/tag/cassini|prefLabel|Cassini +http://www.semanlink.net/tag/cassini|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/cassini|broader|http://www.semanlink.net/tag/saturne +http://www.semanlink.net/tag/cassini|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/cassini|comment|"Cassini-Huygens: mission to Saturn & Titan +" +http://www.semanlink.net/tag/cassini|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cassini|homepage|http://saturn.jpl.nasa.gov +http://www.semanlink.net/tag/cassini|uri|http://www.semanlink.net/tag/cassini +http://www.semanlink.net/tag/cassini|broader_prefLabel|NASA +http://www.semanlink.net/tag/cassini|broader_prefLabel|Saturne +http://www.semanlink.net/tag/cassini|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/cassini|broader_altLabel|Saturn +http://www.semanlink.net/tag/hadoop|creationTime|2013-02-18T16:11:52Z +http://www.semanlink.net/tag/hadoop|prefLabel|Hadoop +http://www.semanlink.net/tag/hadoop|broader|http://www.semanlink.net/tag/distributed_computing +http://www.semanlink.net/tag/hadoop|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/hadoop|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/hadoop|related|http://www.semanlink.net/tag/map_reduce +http://www.semanlink.net/tag/hadoop|creationDate|2013-02-18 +http://www.semanlink.net/tag/hadoop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hadoop|homepage|http://hadoop.apache.org/ +http://www.semanlink.net/tag/hadoop|uri|http://www.semanlink.net/tag/hadoop +http://www.semanlink.net/tag/hadoop|broader_prefLabel|Distributed computing +http://www.semanlink.net/tag/hadoop|broader_prefLabel|apache.org +http://www.semanlink.net/tag/hadoop|broader_prefLabel|Big Data +http://www.semanlink.net/tag/hadoop|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/sanjeev_arora|creationTime|2018-05-26T10:33:58Z +http://www.semanlink.net/tag/sanjeev_arora|prefLabel|Sanjeev Arora +http://www.semanlink.net/tag/sanjeev_arora|broader|http://www.semanlink.net/tag/nlp_princeton +http://www.semanlink.net/tag/sanjeev_arora|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/sanjeev_arora|creationDate|2018-05-26 +http://www.semanlink.net/tag/sanjeev_arora|comment|"best known for his work on probabilistically checkable proofs and, in particular, the PCP theorem. + +[Off the convex path](http://www.offconvex.org/) +" +http://www.semanlink.net/tag/sanjeev_arora|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sanjeev_arora|describedBy|https://en.wikipedia.org/wiki/Sanjeev_Arora +http://www.semanlink.net/tag/sanjeev_arora|uri|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/sanjeev_arora|broader_prefLabel|NLP@Princeton +http://www.semanlink.net/tag/sanjeev_arora|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/lambda_calculus|creationTime|2016-05-03T23:16:33Z +http://www.semanlink.net/tag/lambda_calculus|prefLabel|Lambda calculus +http://www.semanlink.net/tag/lambda_calculus|creationDate|2016-05-03 +http://www.semanlink.net/tag/lambda_calculus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lambda_calculus|describedBy|https://en.wikipedia.org/wiki/Lambda_calculus +http://www.semanlink.net/tag/lambda_calculus|uri|http://www.semanlink.net/tag/lambda_calculus +http://www.semanlink.net/tag/alex_allauzen|creationTime|2018-07-07T12:41:14Z +http://www.semanlink.net/tag/alex_allauzen|prefLabel|Alex Allauzen +http://www.semanlink.net/tag/alex_allauzen|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/alex_allauzen|related|http://www.semanlink.net/tag/pfia_2018 +http://www.semanlink.net/tag/alex_allauzen|creationDate|2018-07-07 +http://www.semanlink.net/tag/alex_allauzen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alex_allauzen|describedBy|https://allauzen.github.io/ +http://www.semanlink.net/tag/alex_allauzen|altLabel|Alexandre Allauzen +http://www.semanlink.net/tag/alex_allauzen|uri|http://www.semanlink.net/tag/alex_allauzen +http://www.semanlink.net/tag/alex_allauzen|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/hypothes_is|creationTime|2017-08-23T18:40:16Z +http://www.semanlink.net/tag/hypothes_is|prefLabel|Hypothes.is +http://www.semanlink.net/tag/hypothes_is|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/hypothes_is|broader|http://www.semanlink.net/tag/tagged +http://www.semanlink.net/tag/hypothes_is|creationDate|2017-08-23 +http://www.semanlink.net/tag/hypothes_is|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypothes_is|homepage|https://web.hypothes.is/ +http://www.semanlink.net/tag/hypothes_is|describedBy|https://en.wikipedia.org/wiki/Hypothes.is +http://www.semanlink.net/tag/hypothes_is|uri|http://www.semanlink.net/tag/hypothes_is +http://www.semanlink.net/tag/hypothes_is|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/hypothes_is|broader_prefLabel|Tagged +http://www.semanlink.net/tag/claude_hartmann|creationTime|2018-02-18T09:21:37Z +http://www.semanlink.net/tag/claude_hartmann|prefLabel|Claude Hartmann +http://www.semanlink.net/tag/claude_hartmann|creationDate|2018-02-18 +http://www.semanlink.net/tag/claude_hartmann|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/claude_hartmann|homepage|http://claudehartmann1.wixsite.com/clhartmann +http://www.semanlink.net/tag/claude_hartmann|uri|http://www.semanlink.net/tag/claude_hartmann +http://www.semanlink.net/tag/wikification|creationTime|2020-05-02T11:36:52Z +http://www.semanlink.net/tag/wikification|prefLabel|Wikification +http://www.semanlink.net/tag/wikification|broader|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/wikification|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikification|creationDate|2020-05-02 +http://www.semanlink.net/tag/wikification|comment|Entity linking with Wikipedia as the target knowledge base +http://www.semanlink.net/tag/wikification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikification|uri|http://www.semanlink.net/tag/wikification +http://www.semanlink.net/tag/wikification|broader_prefLabel|Entity linking +http://www.semanlink.net/tag/wikification|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/wikification|broader_altLabel|Named entity disambiguation +http://www.semanlink.net/tag/wikification|broader_related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/wikification|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/services_publics|creationTime|2007-05-28T21:19:57Z +http://www.semanlink.net/tag/services_publics|prefLabel|Services publics +http://www.semanlink.net/tag/services_publics|creationDate|2007-05-28 +http://www.semanlink.net/tag/services_publics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/services_publics|altLabel|Service public +http://www.semanlink.net/tag/services_publics|uri|http://www.semanlink.net/tag/services_publics +http://www.semanlink.net/tag/african_origin_of_modern_humans|creationTime|2016-09-24T21:39:40Z +http://www.semanlink.net/tag/african_origin_of_modern_humans|prefLabel|African origin of modern humans +http://www.semanlink.net/tag/african_origin_of_modern_humans|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/african_origin_of_modern_humans|creationDate|2016-09-24 +http://www.semanlink.net/tag/african_origin_of_modern_humans|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/african_origin_of_modern_humans|describedBy|https://en.wikipedia.org/wiki/Recent_African_origin_of_modern_humans +http://www.semanlink.net/tag/african_origin_of_modern_humans|uri|http://www.semanlink.net/tag/african_origin_of_modern_humans +http://www.semanlink.net/tag/african_origin_of_modern_humans|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/mondeca|creationTime|2012-07-05T16:07:53Z +http://www.semanlink.net/tag/mondeca|prefLabel|Mondeca +http://www.semanlink.net/tag/mondeca|broader|http://www.semanlink.net/tag/french_semantic_web_company +http://www.semanlink.net/tag/mondeca|related|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/mondeca|creationDate|2012-07-05 +http://www.semanlink.net/tag/mondeca|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mondeca|homepage|http://www.mondeca.com/ +http://www.semanlink.net/tag/mondeca|uri|http://www.semanlink.net/tag/mondeca +http://www.semanlink.net/tag/mondeca|broader_prefLabel|French Semantic web company +http://www.semanlink.net/tag/wikipedia_page_to_concept|creationTime|2007-10-13T00:29:39Z +http://www.semanlink.net/tag/wikipedia_page_to_concept|prefLabel|Wikipedia page to concept +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader|http://www.semanlink.net/tag/information_resources +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikipedia_page_to_concept|creationDate|2007-10-13 +http://www.semanlink.net/tag/wikipedia_page_to_concept|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikipedia_page_to_concept|uri|http://www.semanlink.net/tag/wikipedia_page_to_concept +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_prefLabel|Information resources +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_prefLabel|dbpedia +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_altLabel|Concept's URI +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/wikipedia_page_to_concept|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/tag_clusters|prefLabel|Tag Clusters +http://www.semanlink.net/tag/tag_clusters|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/tag_clusters|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/tag_clusters|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tag_clusters|uri|http://www.semanlink.net/tag/tag_clusters +http://www.semanlink.net/tag/tag_clusters|broader_prefLabel|Tagging +http://www.semanlink.net/tag/tag_clusters|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/rdfa_tool|creationTime|2011-03-24T22:53:25Z +http://www.semanlink.net/tag/rdfa_tool|prefLabel|RDFa tool +http://www.semanlink.net/tag/rdfa_tool|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/rdfa_tool|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdfa_tool|creationDate|2011-03-24 +http://www.semanlink.net/tag/rdfa_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa_tool|uri|http://www.semanlink.net/tag/rdfa_tool +http://www.semanlink.net/tag/rdfa_tool|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/rdfa_tool|broader_prefLabel|RDFa +http://www.semanlink.net/tag/rdfa_tool|broader_altLabel|RDF/A +http://www.semanlink.net/tag/rdfa_tool|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdfa_tool|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdfa_tool|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/doc2vec|creationTime|2017-07-10T16:14:43Z +http://www.semanlink.net/tag/doc2vec|prefLabel|Doc2Vec +http://www.semanlink.net/tag/doc2vec|broader|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/doc2vec|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/doc2vec|broader|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/tag/doc2vec|creationDate|2017-07-10 +http://www.semanlink.net/tag/doc2vec|comment|"(aka paragraph2vec, aka sentence embeddings) extends word2vec algorithm to larger blocks of text (sentences, paragraphs or entire documents). Represents each document by a dense vector which is trained to predict words in the document. + +Paragraph Vectors is the name of the model proposed by Le and Mikolov to generate unsupervised representations of sentences, paragraphs, or entire documents without losing local word order. + +Implemented in [gensim](/tag/gensim) + + + + + +" +http://www.semanlink.net/tag/doc2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/doc2vec|altLabel|paragraph2vec +http://www.semanlink.net/tag/doc2vec|altLabel|Paragraph Vectors +http://www.semanlink.net/tag/doc2vec|uri|http://www.semanlink.net/tag/doc2vec +http://www.semanlink.net/tag/doc2vec|broader_prefLabel|Word2vec +http://www.semanlink.net/tag/doc2vec|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/doc2vec|broader_prefLabel|Document embeddings +http://www.semanlink.net/tag/doc2vec|broader_related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/m3_multi_media_museum|creationTime|2010-05-17T12:14:13Z +http://www.semanlink.net/tag/m3_multi_media_museum|prefLabel|M3 Multi Media Museum +http://www.semanlink.net/tag/m3_multi_media_museum|broader|http://www.semanlink.net/tag/hypersolutions +http://www.semanlink.net/tag/m3_multi_media_museum|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/m3_multi_media_museum|broader|http://www.semanlink.net/tag/digital_collections +http://www.semanlink.net/tag/m3_multi_media_museum|broader|http://www.semanlink.net/tag/my_old_things +http://www.semanlink.net/tag/m3_multi_media_museum|creationDate|2010-05-17 +http://www.semanlink.net/tag/m3_multi_media_museum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/m3_multi_media_museum|uri|http://www.semanlink.net/tag/m3_multi_media_museum +http://www.semanlink.net/tag/m3_multi_media_museum|broader_prefLabel|hyperSOLutions +http://www.semanlink.net/tag/m3_multi_media_museum|broader_prefLabel|Musée +http://www.semanlink.net/tag/m3_multi_media_museum|broader_prefLabel|Digital Collections +http://www.semanlink.net/tag/m3_multi_media_museum|broader_prefLabel|My old things +http://www.semanlink.net/tag/m3_multi_media_museum|broader_related|http://www.semanlink.net/tag/cms +http://www.semanlink.net/tag/semantic_annotation|prefLabel|Semantic annotation +http://www.semanlink.net/tag/semantic_annotation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_annotation|uri|http://www.semanlink.net/tag/semantic_annotation +http://www.semanlink.net/tag/sarraounia_mangou|prefLabel|Sarraounia Mangou +http://www.semanlink.net/tag/sarraounia_mangou|broader|http://www.semanlink.net/tag/empire_colonial_francais +http://www.semanlink.net/tag/sarraounia_mangou|broader|http://www.semanlink.net/tag/esprit_de_resistance +http://www.semanlink.net/tag/sarraounia_mangou|broader|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/sarraounia_mangou|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarraounia_mangou|uri|http://www.semanlink.net/tag/sarraounia_mangou +http://www.semanlink.net/tag/sarraounia_mangou|broader_prefLabel|Empire colonial français +http://www.semanlink.net/tag/sarraounia_mangou|broader_prefLabel|Esprit de résistance +http://www.semanlink.net/tag/sarraounia_mangou|broader_prefLabel|Histoire du Niger +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|creationTime|2018-10-28T00:45:24Z +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|prefLabel|Bootstrap aggregating (Bagging) +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|broader|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|related|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|creationDate|2018-10-28 +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|comment|"ML ensemble meta-algorithm designed to improve the stability and accuracy of ML algorithms used in classification and regression (by combining classifications of randomly generated training sets) + +Usually applied to decision tree methods, but can be used with any type of method. Special case of the model averaging approach. + +Reduces variance, and hence the risk of overtting. + +" +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|describedBy|https://en.wikipedia.org/wiki/Bootstrap_aggregating +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|uri|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +http://www.semanlink.net/tag/bootstrap_aggregating_bagging|broader_prefLabel|Ensemble learning +http://www.semanlink.net/tag/thomas_piketty|creationTime|2014-04-23T21:58:21Z +http://www.semanlink.net/tag/thomas_piketty|prefLabel|Thomas Piketty +http://www.semanlink.net/tag/thomas_piketty|broader|http://www.semanlink.net/tag/economiste +http://www.semanlink.net/tag/thomas_piketty|creationDate|2014-04-23 +http://www.semanlink.net/tag/thomas_piketty|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thomas_piketty|homepage|http://piketty.pse.ens.fr/en/ +http://www.semanlink.net/tag/thomas_piketty|uri|http://www.semanlink.net/tag/thomas_piketty +http://www.semanlink.net/tag/thomas_piketty|broader_prefLabel|Economiste +http://www.semanlink.net/tag/rdf_schema|prefLabel|RDF Schema +http://www.semanlink.net/tag/rdf_schema|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_schema|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/rdf_schema|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_schema|altLabel|RDF-S +http://www.semanlink.net/tag/rdf_schema|altLabel|RDFS +http://www.semanlink.net/tag/rdf_schema|uri|http://www.semanlink.net/tag/rdf_schema +http://www.semanlink.net/tag/rdf_schema|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_schema|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/rdf_schema|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_schema|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_schema|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_schema|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_schema|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/salesforce|creationTime|2017-09-18T15:09:26Z +http://www.semanlink.net/tag/salesforce|prefLabel|salesforce +http://www.semanlink.net/tag/salesforce|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/salesforce|creationDate|2017-09-18 +http://www.semanlink.net/tag/salesforce|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/salesforce|describedBy|https://en.wikipedia.org/wiki/Salesforce.com +http://www.semanlink.net/tag/salesforce|uri|http://www.semanlink.net/tag/salesforce +http://www.semanlink.net/tag/salesforce|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/hippopotame|prefLabel|Hippopotame +http://www.semanlink.net/tag/hippopotame|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/hippopotame|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hippopotame|uri|http://www.semanlink.net/tag/hippopotame +http://www.semanlink.net/tag/hippopotame|broader_prefLabel|Animal +http://www.semanlink.net/tag/contestation|prefLabel|Contestation +http://www.semanlink.net/tag/contestation|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/contestation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/contestation|uri|http://www.semanlink.net/tag/contestation +http://www.semanlink.net/tag/contestation|broader_prefLabel|I like +http://www.semanlink.net/tag/contestation|broader_prefLabel|I like +http://www.semanlink.net/tag/communaute_internationale|prefLabel|Communauté internationale +http://www.semanlink.net/tag/communaute_internationale|creationDate|2006-11-21 +http://www.semanlink.net/tag/communaute_internationale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/communaute_internationale|uri|http://www.semanlink.net/tag/communaute_internationale +http://www.semanlink.net/tag/samare_de_l_erable|creationTime|2020-10-15T02:01:32Z +http://www.semanlink.net/tag/samare_de_l_erable|prefLabel|Samare de l'érable +http://www.semanlink.net/tag/samare_de_l_erable|broader|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/samare_de_l_erable|broader|http://www.semanlink.net/tag/dispersion_des_graines +http://www.semanlink.net/tag/samare_de_l_erable|creationDate|2020-10-15 +http://www.semanlink.net/tag/samare_de_l_erable|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/samare_de_l_erable|uri|http://www.semanlink.net/tag/samare_de_l_erable +http://www.semanlink.net/tag/samare_de_l_erable|broader_prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/samare_de_l_erable|broader_prefLabel|Dispersion des graines +http://www.semanlink.net/tag/champignon|prefLabel|Champignons +http://www.semanlink.net/tag/champignon|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/champignon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/champignon|uri|http://www.semanlink.net/tag/champignon +http://www.semanlink.net/tag/champignon|broader_prefLabel|Biology +http://www.semanlink.net/tag/champignon|broader_altLabel|Biologie +http://www.semanlink.net/tag/lod_mailing_list|creationTime|2007-09-19T14:13:12Z +http://www.semanlink.net/tag/lod_mailing_list|prefLabel|LOD mailing list +http://www.semanlink.net/tag/lod_mailing_list|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/lod_mailing_list|broader|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/lod_mailing_list|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/lod_mailing_list|creationDate|2007-09-19 +http://www.semanlink.net/tag/lod_mailing_list|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod_mailing_list|homepage|http://lists.w3.org/Archives/Public/public-lod/ +http://www.semanlink.net/tag/lod_mailing_list|uri|http://www.semanlink.net/tag/lod_mailing_list +http://www.semanlink.net/tag/lod_mailing_list|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/lod_mailing_list|broader_prefLabel|Mailing list +http://www.semanlink.net/tag/lod_mailing_list|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/lod_mailing_list|broader_altLabel|LOD +http://www.semanlink.net/tag/lod_mailing_list|broader_altLabel|LD +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/lod_mailing_list|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/ei|creationTime|2014-10-03T15:24:28Z +http://www.semanlink.net/tag/ei|prefLabel|Etat islamique +http://www.semanlink.net/tag/ei|broader|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/ei|related|http://www.semanlink.net/tag/syrie +http://www.semanlink.net/tag/ei|related|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/ei|creationDate|2014-10-03 +http://www.semanlink.net/tag/ei|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ei|altLabel|EIIL +http://www.semanlink.net/tag/ei|uri|http://www.semanlink.net/tag/ei +http://www.semanlink.net/tag/ei|broader_prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/francois_chollet|creationTime|2017-10-23T01:03:18Z +http://www.semanlink.net/tag/francois_chollet|prefLabel|François Chollet +http://www.semanlink.net/tag/francois_chollet|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/francois_chollet|related|http://www.semanlink.net/tag/keras +http://www.semanlink.net/tag/francois_chollet|creationDate|2017-10-23 +http://www.semanlink.net/tag/francois_chollet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francois_chollet|uri|http://www.semanlink.net/tag/francois_chollet +http://www.semanlink.net/tag/francois_chollet|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/manu_sporny|creationTime|2012-02-20T22:49:47Z +http://www.semanlink.net/tag/manu_sporny|prefLabel|Manu Sporny +http://www.semanlink.net/tag/manu_sporny|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/manu_sporny|related|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/manu_sporny|creationDate|2012-02-20 +http://www.semanlink.net/tag/manu_sporny|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manu_sporny|uri|http://www.semanlink.net/tag/manu_sporny +http://www.semanlink.net/tag/manu_sporny|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/j_hallucine|creationTime|2007-05-07T11:43:55Z +http://www.semanlink.net/tag/j_hallucine|prefLabel|J'hallucine +http://www.semanlink.net/tag/j_hallucine|creationDate|2007-05-07 +http://www.semanlink.net/tag/j_hallucine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/j_hallucine|uri|http://www.semanlink.net/tag/j_hallucine +http://www.semanlink.net/tag/these_renault_embeddings|creationTime|2018-05-28T22:35:38Z +http://www.semanlink.net/tag/these_renault_embeddings|prefLabel|Thèse IRIT-Renault NLP-KB +http://www.semanlink.net/tag/these_renault_embeddings|broader|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/these_renault_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/jose_moreno +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/these_renault_embeddings|related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/these_renault_embeddings|creationDate|2018-05-28 +http://www.semanlink.net/tag/these_renault_embeddings|comment|"> Apprentissage profond pour l’accès aux textes et bases de connaissances + +Apprentissage de représentations d'informations sémantiques, adaptées au Traitement du Langage Naturel et à la Recherche d'Information, à partir de textes et de bases de connaissances formelles du domaine automobile " +http://www.semanlink.net/tag/these_renault_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/these_renault_embeddings|altLabel|Thèse IRIT-Renault +http://www.semanlink.net/tag/these_renault_embeddings|publish|true +http://www.semanlink.net/tag/these_renault_embeddings|uri|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/these_renault_embeddings|broader_prefLabel|IRIT +http://www.semanlink.net/tag/these_renault_embeddings|broader_prefLabel|Knowledge Graph + Deep Learning +http://www.semanlink.net/tag/these_renault_embeddings|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/drug_resistant_germs|creationTime|2019-04-10T00:02:29Z +http://www.semanlink.net/tag/drug_resistant_germs|prefLabel|Drug-resistant germs +http://www.semanlink.net/tag/drug_resistant_germs|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/drug_resistant_germs|creationDate|2019-04-10 +http://www.semanlink.net/tag/drug_resistant_germs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drug_resistant_germs|uri|http://www.semanlink.net/tag/drug_resistant_germs +http://www.semanlink.net/tag/drug_resistant_germs|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/semanlink2|creationTime|2012-12-09T16:06:15Z +http://www.semanlink.net/tag/semanlink2|prefLabel|Semanlink2 +http://www.semanlink.net/tag/semanlink2|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/semanlink2|creationDate|2012-12-09 +http://www.semanlink.net/tag/semanlink2|comment|"Voir [notes](/sl/doc/2015/09/semanlink2Notes.md) +" +http://www.semanlink.net/tag/semanlink2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink2|uri|http://www.semanlink.net/tag/semanlink2 +http://www.semanlink.net/tag/semanlink2|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/semanlink2|broader_altLabel|SL +http://www.semanlink.net/tag/semanlink2|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|creationTime|2019-10-30T00:48:21Z +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|prefLabel|Watson Speech-to-Text +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|creationDate|2019-10-30 +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|uri|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_prefLabel|Speech-to-Text +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_prefLabel|IBM Watson +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_altLabel|Speech recognition +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_altLabel|Voice recognition +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_altLabel|IBM's Watson +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_altLabel|Watson +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_related|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_related|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/tag/ibm_watson_and_speech_to_text|broader_related|http://www.semanlink.net/tag/chris_welty +http://www.semanlink.net/tag/japonais|creationTime|2016-05-07T00:19:02Z +http://www.semanlink.net/tag/japonais|prefLabel|Japonais +http://www.semanlink.net/tag/japonais|broader|http://www.semanlink.net/tag/langues_vivantes +http://www.semanlink.net/tag/japonais|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/japonais|creationDate|2016-05-07 +http://www.semanlink.net/tag/japonais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/japonais|uri|http://www.semanlink.net/tag/japonais +http://www.semanlink.net/tag/japonais|broader_prefLabel|Langues vivantes +http://www.semanlink.net/tag/japonais|broader_prefLabel|Japon +http://www.semanlink.net/tag/japonais|broader_altLabel|Japan +http://www.semanlink.net/tag/apache_shiro|creationTime|2018-08-05T18:51:02Z +http://www.semanlink.net/tag/apache_shiro|prefLabel|Apache Shiro +http://www.semanlink.net/tag/apache_shiro|broader|http://www.semanlink.net/tag/security +http://www.semanlink.net/tag/apache_shiro|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_shiro|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/apache_shiro|creationDate|2018-08-05 +http://www.semanlink.net/tag/apache_shiro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_shiro|homepage|https://shiro.apache.org/ +http://www.semanlink.net/tag/apache_shiro|uri|http://www.semanlink.net/tag/apache_shiro +http://www.semanlink.net/tag/apache_shiro|broader_prefLabel|Security +http://www.semanlink.net/tag/apache_shiro|broader_prefLabel|apache.org +http://www.semanlink.net/tag/apache_shiro|broader_prefLabel|Java dev +http://www.semanlink.net/tag/good_question|creationTime|2015-03-04T17:12:55Z +http://www.semanlink.net/tag/good_question|prefLabel|Good question +http://www.semanlink.net/tag/good_question|creationDate|2015-03-04 +http://www.semanlink.net/tag/good_question|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/good_question|uri|http://www.semanlink.net/tag/good_question +http://www.semanlink.net/tag/mercure|prefLabel|Mercure (Planète) +http://www.semanlink.net/tag/mercure|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/mercure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mercure|uri|http://www.semanlink.net/tag/mercure +http://www.semanlink.net/tag/mercure|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/chine_technologie|prefLabel|Chine : technologie +http://www.semanlink.net/tag/chine_technologie|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine_technologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_technologie|uri|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/tag/chine_technologie|broader_prefLabel|Chine +http://www.semanlink.net/tag/chine_technologie|broader_altLabel|China +http://www.semanlink.net/tag/vary_header|creationTime|2015-05-14T15:23:33Z +http://www.semanlink.net/tag/vary_header|prefLabel|Vary Header +http://www.semanlink.net/tag/vary_header|broader|http://www.semanlink.net/tag/http_cache +http://www.semanlink.net/tag/vary_header|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/vary_header|creationDate|2015-05-14 +http://www.semanlink.net/tag/vary_header|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vary_header|uri|http://www.semanlink.net/tag/vary_header +http://www.semanlink.net/tag/vary_header|broader_prefLabel|HTTP Cache +http://www.semanlink.net/tag/vary_header|broader_prefLabel|HTTP +http://www.semanlink.net/tag/1ere_guerre_mondiale|prefLabel|1ere guerre mondiale +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/1ere_guerre_mondiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/1ere_guerre_mondiale|uri|http://www.semanlink.net/tag/1ere_guerre_mondiale +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader_prefLabel|War +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader_prefLabel|Histoire du XXe siècle +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader_prefLabel|Histoire +http://www.semanlink.net/tag/1ere_guerre_mondiale|broader_altLabel|Guerre +http://www.semanlink.net/tag/restaurant|creationTime|2007-02-22T08:37:49Z +http://www.semanlink.net/tag/restaurant|prefLabel|Restaurant +http://www.semanlink.net/tag/restaurant|broader|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/tag/restaurant|creationDate|2007-02-22 +http://www.semanlink.net/tag/restaurant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/restaurant|uri|http://www.semanlink.net/tag/restaurant +http://www.semanlink.net/tag/restaurant|broader_prefLabel|Gastronomie +http://www.semanlink.net/tag/restaurant|broader_altLabel|Cuisine +http://www.semanlink.net/tag/multi_class_classification|creationTime|2018-03-04T17:22:09Z +http://www.semanlink.net/tag/multi_class_classification|prefLabel|Multi-class classification +http://www.semanlink.net/tag/multi_class_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/multi_class_classification|related|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/multi_class_classification|creationDate|2018-03-04 +http://www.semanlink.net/tag/multi_class_classification|comment|predicting a single label among mutually exclusive labels. +http://www.semanlink.net/tag/multi_class_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_class_classification|altLabel|Multiclass classification +http://www.semanlink.net/tag/multi_class_classification|uri|http://www.semanlink.net/tag/multi_class_classification +http://www.semanlink.net/tag/multi_class_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|creationTime|2007-08-22T00:20:29Z +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|prefLabel|Mapping data from spreadsheets to RDF +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader|http://www.semanlink.net/tag/spreadsheets +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader|http://www.semanlink.net/tag/converting_data_into_rdf +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|creationDate|2007-08-22 +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|uri|http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_prefLabel|Spreadsheets +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_prefLabel|Converting data into RDF +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_altLabel|Spreadsheet +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/mpaa|prefLabel|MPAA +http://www.semanlink.net/tag/mpaa|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/mpaa|broader|http://www.semanlink.net/tag/content_industries +http://www.semanlink.net/tag/mpaa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mpaa|uri|http://www.semanlink.net/tag/mpaa +http://www.semanlink.net/tag/mpaa|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/mpaa|broader_prefLabel|Content industries +http://www.semanlink.net/tag/peter_bloem|creationTime|2019-08-21T22:05:09Z +http://www.semanlink.net/tag/peter_bloem|prefLabel|Peter Bloem +http://www.semanlink.net/tag/peter_bloem|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/peter_bloem|related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/peter_bloem|creationDate|2019-08-21 +http://www.semanlink.net/tag/peter_bloem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peter_bloem|homepage|http://www.peterbloem.nl/ +http://www.semanlink.net/tag/peter_bloem|uri|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/tag/peter_bloem|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/manik_varma|creationTime|2020-08-14T01:44:36Z +http://www.semanlink.net/tag/manik_varma|prefLabel|Manik Varma +http://www.semanlink.net/tag/manik_varma|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/manik_varma|related|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/tag/manik_varma|creationDate|2020-08-14 +http://www.semanlink.net/tag/manik_varma|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manik_varma|describedBy|https://www.microsoft.com/en-us/research/people/manik/ +http://www.semanlink.net/tag/manik_varma|uri|http://www.semanlink.net/tag/manik_varma +http://www.semanlink.net/tag/manik_varma|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/phil_archer|creationTime|2014-11-05T20:27:37Z +http://www.semanlink.net/tag/phil_archer|prefLabel|Phil Archer +http://www.semanlink.net/tag/phil_archer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/phil_archer|related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/phil_archer|related|http://www.semanlink.net/tag/ivan_herman +http://www.semanlink.net/tag/phil_archer|creationDate|2014-11-05 +http://www.semanlink.net/tag/phil_archer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phil_archer|uri|http://www.semanlink.net/tag/phil_archer +http://www.semanlink.net/tag/phil_archer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/json_ld|creationTime|2011-06-09T16:27:10Z +http://www.semanlink.net/tag/json_ld|prefLabel|JSON-LD +http://www.semanlink.net/tag/json_ld|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/json_ld|broader|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/json_ld|creationDate|2011-06-09 +http://www.semanlink.net/tag/json_ld|comment|JavaScript Object Notation for Linking Data +http://www.semanlink.net/tag/json_ld|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json_ld|homepage|http://json-ld.org/ +http://www.semanlink.net/tag/json_ld|altLabel|JSONLD +http://www.semanlink.net/tag/json_ld|uri|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/json_ld|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/json_ld|broader_prefLabel|RDF-in-JSON +http://www.semanlink.net/tag/json_ld|broader_altLabel|LD +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/json_ld|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/volvic|creationTime|2016-06-09T22:43:29Z +http://www.semanlink.net/tag/volvic|prefLabel|Volvic +http://www.semanlink.net/tag/volvic|creationDate|2016-06-09 +http://www.semanlink.net/tag/volvic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/volvic|describedBy|https://fr.wikipedia.org/wiki/Volvic +http://www.semanlink.net/tag/volvic|uri|http://www.semanlink.net/tag/volvic +http://www.semanlink.net/tag/moussa_kaka|creationTime|2008-01-10T01:08:28Z +http://www.semanlink.net/tag/moussa_kaka|prefLabel|Moussa Kaka +http://www.semanlink.net/tag/moussa_kaka|broader|http://www.semanlink.net/tag/rfi +http://www.semanlink.net/tag/moussa_kaka|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/moussa_kaka|broader|http://www.semanlink.net/tag/journaliste +http://www.semanlink.net/tag/moussa_kaka|creationDate|2008-01-10 +http://www.semanlink.net/tag/moussa_kaka|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moussa_kaka|uri|http://www.semanlink.net/tag/moussa_kaka +http://www.semanlink.net/tag/moussa_kaka|broader_prefLabel|RFI +http://www.semanlink.net/tag/moussa_kaka|broader_prefLabel|Niger +http://www.semanlink.net/tag/moussa_kaka|broader_prefLabel|Journaliste +http://www.semanlink.net/tag/moussa_kaka|broader_altLabel|Radio France Internationale +http://www.semanlink.net/tag/moussa_kaka|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/moussa_kaka|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/moussa_kaka|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/webmasters_google|creationTime|2013-08-23T13:49:27Z +http://www.semanlink.net/tag/webmasters_google|prefLabel|Webmasters @ Google +http://www.semanlink.net/tag/webmasters_google|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/webmasters_google|creationDate|2013-08-23 +http://www.semanlink.net/tag/webmasters_google|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/webmasters_google|describedBy|http://www.google.com/webmasters/ +http://www.semanlink.net/tag/webmasters_google|uri|http://www.semanlink.net/tag/webmasters_google +http://www.semanlink.net/tag/webmasters_google|broader_prefLabel|Google +http://www.semanlink.net/tag/webmasters_google|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/computational_universe|creationTime|2020-04-15T11:24:27Z +http://www.semanlink.net/tag/computational_universe|prefLabel|Computational Universe +http://www.semanlink.net/tag/computational_universe|creationDate|2020-04-15 +http://www.semanlink.net/tag/computational_universe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computational_universe|describedBy|https://en.wikipedia.org/wiki/Digital_physics +http://www.semanlink.net/tag/computational_universe|uri|http://www.semanlink.net/tag/computational_universe +http://www.semanlink.net/tag/hydra|creationTime|2014-10-29T01:29:19Z +http://www.semanlink.net/tag/hydra|prefLabel|Hydra +http://www.semanlink.net/tag/hydra|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/hydra|broader|http://www.semanlink.net/tag/service_description +http://www.semanlink.net/tag/hydra|broader|http://www.semanlink.net/tag/hateoas +http://www.semanlink.net/tag/hydra|broader|http://www.semanlink.net/tag/apis_and_linked_data +http://www.semanlink.net/tag/hydra|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/hydra|related|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/hydra|creationDate|2014-10-29 +http://www.semanlink.net/tag/hydra|comment|"Hypermedia driven web APIs.
+The basic idea behind Hydra is to provide a vocabulary which enables a server to advertise valid state transitions to a client. A client can then use this information to construct HTTP requests which modify the server’s state so that a certain desired goal is achieved. Since all the information about the valid state transitions is exchanged in a machine-processable way at runtime instead of being hardcoded into the client at design time, clients can be decoupled from the server and adapt to changes more easily." +http://www.semanlink.net/tag/hydra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hydra|homepage|http://www.hydra-cg.com/ +http://www.semanlink.net/tag/hydra|uri|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/hydra|broader_prefLabel|REST +http://www.semanlink.net/tag/hydra|broader_prefLabel|Service description +http://www.semanlink.net/tag/hydra|broader_prefLabel|HATEOAS +http://www.semanlink.net/tag/hydra|broader_prefLabel|APIs and Linked Data +http://www.semanlink.net/tag/hydra|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/hydra|broader_altLabel|Hypermedia API +http://www.semanlink.net/tag/hydra|broader_altLabel|LD +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/samuel_goto +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/hydra|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/best_practices|creationTime|2017-01-14T12:42:44Z +http://www.semanlink.net/tag/best_practices|prefLabel|Best Practices +http://www.semanlink.net/tag/best_practices|creationDate|2017-01-14 +http://www.semanlink.net/tag/best_practices|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/best_practices|uri|http://www.semanlink.net/tag/best_practices +http://www.semanlink.net/tag/gpt_2|creationTime|2020-12-15T17:57:46Z +http://www.semanlink.net/tag/gpt_2|prefLabel|GPT-2 +http://www.semanlink.net/tag/gpt_2|broader|http://www.semanlink.net/tag/openai_gpt +http://www.semanlink.net/tag/gpt_2|creationDate|2020-12-15 +http://www.semanlink.net/tag/gpt_2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gpt_2|uri|http://www.semanlink.net/tag/gpt_2 +http://www.semanlink.net/tag/gpt_2|broader_prefLabel|OpenAI GPT +http://www.semanlink.net/tag/lhc|creationTime|2008-09-09T18:22:52Z +http://www.semanlink.net/tag/lhc|prefLabel|LHC +http://www.semanlink.net/tag/lhc|broader|http://www.semanlink.net/tag/experience_scientifique +http://www.semanlink.net/tag/lhc|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/lhc|broader|http://www.semanlink.net/tag/cern +http://www.semanlink.net/tag/lhc|creationDate|2008-09-09 +http://www.semanlink.net/tag/lhc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lhc|uri|http://www.semanlink.net/tag/lhc +http://www.semanlink.net/tag/lhc|broader_prefLabel|Expérience scientifique +http://www.semanlink.net/tag/lhc|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/lhc|broader_prefLabel|CERN +http://www.semanlink.net/tag/bert|creationTime|2018-11-05T14:45:44Z +http://www.semanlink.net/tag/bert|prefLabel|BERT +http://www.semanlink.net/tag/bert|broader|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/tag/bert|broader|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/tag/bert|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/bert|broader|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/bert|creationDate|2018-11-05 +http://www.semanlink.net/tag/bert|comment|"""Bidirectional Encoder Representations from Transformers"": pretraining technique for NLP. + +[Google AI blog post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) + +> BERT is designed to pre-train +deep bidirectional representations by jointly +conditioning on both left and right context in +all layers. As a result, the pre-trained BERT +representations can be fine-tuned with just one +additional output layer + +BERT is pre-trained on two auxiliary tasks: **Masked Language Model** and +**Next Sentence Prediction** (but it has been shown in the RoBERTa paper that this +training objective doesn’t help that much). + +The general BERT adaptation approach is to alter the model used for pre-training while retaining the transformer +encoder layers. The model discards the layers used for the final prediction in the pre-training tasks and adds layers to +predict the target task. All parameters are then fine tuned on the target task + +Builds on [#The Transformer](/tag/attention_is_all_you_need) + +Code and pre-trained models open-sourced on Nov 3rd, 2018." +http://www.semanlink.net/tag/bert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bert|uri|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/bert|broader_prefLabel|Transformers +http://www.semanlink.net/tag/bert|broader_prefLabel|Contextualized word representations +http://www.semanlink.net/tag/bert|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/bert|broader_prefLabel|NLP@Google +http://www.semanlink.net/tag/bert|broader_altLabel|Transformer +http://www.semanlink.net/tag/bert|broader_altLabel|Transformers +http://www.semanlink.net/tag/bert|broader_altLabel|Attention is All You Need +http://www.semanlink.net/tag/bert|broader_altLabel|Contextualized word embeddings +http://www.semanlink.net/tag/bert|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/bert|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/bert|broader_related|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/bert|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/bert|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/bert|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/social_semantic_web|creationTime|2010-12-06T16:25:41Z +http://www.semanlink.net/tag/social_semantic_web|prefLabel|Social Semantic Web +http://www.semanlink.net/tag/social_semantic_web|creationDate|2010-12-06 +http://www.semanlink.net/tag/social_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_semantic_web|uri|http://www.semanlink.net/tag/social_semantic_web +http://www.semanlink.net/tag/information_bottleneck_method|creationTime|2019-08-15T10:08:08Z +http://www.semanlink.net/tag/information_bottleneck_method|prefLabel|Information bottleneck method +http://www.semanlink.net/tag/information_bottleneck_method|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/information_bottleneck_method|creationDate|2019-08-15 +http://www.semanlink.net/tag/information_bottleneck_method|comment|"Designed for finding the best tradeoff between accuracy and complexity (compression) when summarizing a random variable X, given a joint probability distribution p(X,Y) between X and an observed relevant variable Y. + +The method **aims to extract a compressed representation of an +input which retains as much as possible information about an output**. It maximizes mutual +information with the output while penalizing the MI with the input ([source](/doc/2019/09/evolution_of_representations_in)). Intuitively, the IB principle preserves the information of the hidden representations about the label while compressing information about the input data. ([source](/doc/2019/08/_1908_01580_the_hsic_bottlenec)) + +**Information Bottleneck Theory of Deep Learning**: the goal of a network is to optimize the IB tradeoff between compression and prediction. Contested in [this 2018 paper](https://openreview.net/pdf?id=ry_WPG-A-)" +http://www.semanlink.net/tag/information_bottleneck_method|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_bottleneck_method|describedBy|https://en.wikipedia.org/wiki/Information_bottleneck_method +http://www.semanlink.net/tag/information_bottleneck_method|altLabel|Information bottleneck principle +http://www.semanlink.net/tag/information_bottleneck_method|uri|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/tag/information_bottleneck_method|broader_prefLabel|Information theory +http://www.semanlink.net/tag/wolfram_language|creationTime|2014-03-03T00:05:12Z +http://www.semanlink.net/tag/wolfram_language|prefLabel|Wolfram Language +http://www.semanlink.net/tag/wolfram_language|broader|http://www.semanlink.net/tag/wolfram +http://www.semanlink.net/tag/wolfram_language|related|http://www.semanlink.net/tag/knowledge_based_ai +http://www.semanlink.net/tag/wolfram_language|creationDate|2014-03-03 +http://www.semanlink.net/tag/wolfram_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wolfram_language|uri|http://www.semanlink.net/tag/wolfram_language +http://www.semanlink.net/tag/wolfram_language|broader_prefLabel|Stephen Wolfram +http://www.semanlink.net/tag/wolfram_language|broader_altLabel|Wolfram +http://www.semanlink.net/tag/cryptographie_quantique|creationTime|2021-07-16T13:38:09Z +http://www.semanlink.net/tag/cryptographie_quantique|prefLabel|Cryptographie quantique +http://www.semanlink.net/tag/cryptographie_quantique|broader|http://www.semanlink.net/tag/cryptography +http://www.semanlink.net/tag/cryptographie_quantique|broader|http://www.semanlink.net/tag/quantum_computing +http://www.semanlink.net/tag/cryptographie_quantique|creationDate|2021-07-16 +http://www.semanlink.net/tag/cryptographie_quantique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cryptographie_quantique|uri|http://www.semanlink.net/tag/cryptographie_quantique +http://www.semanlink.net/tag/cryptographie_quantique|broader_prefLabel|Cryptography +http://www.semanlink.net/tag/cryptographie_quantique|broader_prefLabel|Quantum computing +http://www.semanlink.net/tag/cryptographie_quantique|broader_altLabel|Quantum computer +http://www.semanlink.net/tag/cryptographie_quantique|broader_altLabel|Ordinateur quantique +http://www.semanlink.net/tag/deri|creationTime|2008-04-25T11:59:18Z +http://www.semanlink.net/tag/deri|prefLabel|DERI +http://www.semanlink.net/tag/deri|related|http://www.semanlink.net/tag/edward_curry +http://www.semanlink.net/tag/deri|related|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/deri|related|http://www.semanlink.net/tag/axel_polleres +http://www.semanlink.net/tag/deri|related|http://www.semanlink.net/tag/fadi_badra +http://www.semanlink.net/tag/deri|related|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/deri|creationDate|2008-04-25 +http://www.semanlink.net/tag/deri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deri|uri|http://www.semanlink.net/tag/deri +http://www.semanlink.net/tag/pollueurs_payeurs|creationTime|2015-11-28T16:26:03Z +http://www.semanlink.net/tag/pollueurs_payeurs|prefLabel|Pollueurs payeurs +http://www.semanlink.net/tag/pollueurs_payeurs|broader|http://www.semanlink.net/tag/economie_ecologique +http://www.semanlink.net/tag/pollueurs_payeurs|broader|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/pollueurs_payeurs|creationDate|2015-11-28 +http://www.semanlink.net/tag/pollueurs_payeurs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pollueurs_payeurs|uri|http://www.semanlink.net/tag/pollueurs_payeurs +http://www.semanlink.net/tag/pollueurs_payeurs|broader_prefLabel|Économie écologique +http://www.semanlink.net/tag/pollueurs_payeurs|broader_prefLabel|Pollution +http://www.semanlink.net/tag/pays_d_europe|creationTime|2007-07-08T15:16:33Z +http://www.semanlink.net/tag/pays_d_europe|prefLabel|Pays d'Europe +http://www.semanlink.net/tag/pays_d_europe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/pays_d_europe|creationDate|2007-07-08 +http://www.semanlink.net/tag/pays_d_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pays_d_europe|uri|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/pays_d_europe|broader_prefLabel|Europe +http://www.semanlink.net/tag/graph_database|creationTime|2013-03-12T15:00:24Z +http://www.semanlink.net/tag/graph_database|prefLabel|Graph database +http://www.semanlink.net/tag/graph_database|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/graph_database|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_database|creationDate|2013-03-12 +http://www.semanlink.net/tag/graph_database|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_database|uri|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/tag/graph_database|broader_prefLabel|Database +http://www.semanlink.net/tag/graph_database|broader_prefLabel|Graph +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|creationTime|2008-09-10T18:47:49Z +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|prefLabel|SDB: A SPARQL Database for Jena +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader|http://www.semanlink.net/tag/sparql_and_jena +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader|http://www.semanlink.net/tag/jena_and_database +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|creationDate|2008-09-10 +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|describedBy|http://jena.sourceforge.net/SDB/ +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|altLabel|SDB +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|uri|http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader_prefLabel|SQL +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader_prefLabel|SPARQL AND Jena +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader_prefLabel|Jena and database +http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/zotero|creationTime|2007-06-09T00:11:01Z +http://www.semanlink.net/tag/zotero|prefLabel|Zotero +http://www.semanlink.net/tag/zotero|broader|http://www.semanlink.net/tag/firefox_extension +http://www.semanlink.net/tag/zotero|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/zotero|creationDate|2007-06-09 +http://www.semanlink.net/tag/zotero|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zotero|uri|http://www.semanlink.net/tag/zotero +http://www.semanlink.net/tag/zotero|broader_prefLabel|Firefox extension +http://www.semanlink.net/tag/zotero|broader_prefLabel|Open Source +http://www.semanlink.net/tag/malware|creationTime|2019-08-16T13:50:54Z +http://www.semanlink.net/tag/malware|prefLabel|Malware +http://www.semanlink.net/tag/malware|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/malware|creationDate|2019-08-16 +http://www.semanlink.net/tag/malware|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/malware|uri|http://www.semanlink.net/tag/malware +http://www.semanlink.net/tag/malware|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/malware|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/human_level_ai|creationTime|2018-09-28T21:28:03Z +http://www.semanlink.net/tag/human_level_ai|prefLabel|Human Level AI +http://www.semanlink.net/tag/human_level_ai|broader|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/tag/human_level_ai|creationDate|2018-09-28 +http://www.semanlink.net/tag/human_level_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/human_level_ai|uri|http://www.semanlink.net/tag/human_level_ai +http://www.semanlink.net/tag/human_level_ai|broader_prefLabel|Artificial general intelligence +http://www.semanlink.net/tag/human_level_ai|broader_altLabel|AGI +http://www.semanlink.net/tag/human_level_ai|broader_related|http://www.semanlink.net/tag/combinatorial_generalization +http://www.semanlink.net/tag/swift|creationTime|2019-03-07T09:07:06Z +http://www.semanlink.net/tag/swift|prefLabel|Swift +http://www.semanlink.net/tag/swift|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/swift|creationDate|2019-03-07 +http://www.semanlink.net/tag/swift|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swift|uri|http://www.semanlink.net/tag/swift +http://www.semanlink.net/tag/swift|broader_prefLabel|Programming language +http://www.semanlink.net/tag/swift|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/feature_learning|creationTime|2015-10-16T10:52:01Z +http://www.semanlink.net/tag/feature_learning|prefLabel|Feature learning +http://www.semanlink.net/tag/feature_learning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/feature_learning|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/feature_learning|related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/feature_learning|creationDate|2015-10-16 +http://www.semanlink.net/tag/feature_learning|comment|"techniques (mostly unsupervised learning algorithms) that learn a feature: a transformation of raw data input to a representation that can be effectively exploited in machine learning tasks + +(= aim at discovering better representations of the inputs provided during training. Classical examples include principal components analysis and cluster analysis. Representation learning algorithms often attempt to preserve the information in their input but transform it in a way that makes it useful) +" +http://www.semanlink.net/tag/feature_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feature_learning|describedBy|https://en.wikipedia.org/wiki/Feature_learning +http://www.semanlink.net/tag/feature_learning|altLabel|Representation learning +http://www.semanlink.net/tag/feature_learning|uri|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/feature_learning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/feature_learning|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/feature_learning|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/sang|creationTime|2021-07-29T00:38:19Z +http://www.semanlink.net/tag/sang|prefLabel|Sang +http://www.semanlink.net/tag/sang|creationDate|2021-07-29 +http://www.semanlink.net/tag/sang|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sang|uri|http://www.semanlink.net/tag/sang +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|creationTime|2012-05-12T09:45:23Z +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|prefLabel|IKS Workshop Salzburg 2012 +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader|http://www.semanlink.net/tag/salzburg +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|creationDate|2012-05-12 +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|homepage|http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012 +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|uri|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader_prefLabel|Salzburg +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader_prefLabel|Interactive Knowledge Stack +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader_prefLabel|Workshop +http://www.semanlink.net/tag/iks_workshop_salzburg_2012|broader_altLabel|IKS +http://www.semanlink.net/tag/swing|prefLabel|Swing +http://www.semanlink.net/tag/swing|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/swing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swing|uri|http://www.semanlink.net/tag/swing +http://www.semanlink.net/tag/swing|broader_prefLabel|Java +http://www.semanlink.net/tag/deep_latent_variable_models|creationTime|2018-10-31T22:53:17Z +http://www.semanlink.net/tag/deep_latent_variable_models|prefLabel|Deep latent variable models +http://www.semanlink.net/tag/deep_latent_variable_models|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_latent_variable_models|broader|http://www.semanlink.net/tag/latent_variable_model +http://www.semanlink.net/tag/deep_latent_variable_models|creationDate|2018-10-31 +http://www.semanlink.net/tag/deep_latent_variable_models|comment|Deep latent variable models assume a generative process whereby a simple random variable is transformed from the latent space to the observed, output space through a deep neural network. Generative Adversarial Networks (GAN) and Variational Autoencoders (VAE) are two of the most popular variants of this approach +http://www.semanlink.net/tag/deep_latent_variable_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_latent_variable_models|uri|http://www.semanlink.net/tag/deep_latent_variable_models +http://www.semanlink.net/tag/deep_latent_variable_models|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_latent_variable_models|broader_prefLabel|Latent variable model +http://www.semanlink.net/tag/deep_latent_variable_models|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_latent_variable_models|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/coupe_du_monde_2018|creationTime|2018-07-07T15:15:08Z +http://www.semanlink.net/tag/coupe_du_monde_2018|prefLabel|Coupe du monde 2018 +http://www.semanlink.net/tag/coupe_du_monde_2018|broader|http://www.semanlink.net/tag/coupe_du_monde_de_football +http://www.semanlink.net/tag/coupe_du_monde_2018|creationDate|2018-07-07 +http://www.semanlink.net/tag/coupe_du_monde_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupe_du_monde_2018|uri|http://www.semanlink.net/tag/coupe_du_monde_2018 +http://www.semanlink.net/tag/coupe_du_monde_2018|broader_prefLabel|Coupe du monde de football +http://www.semanlink.net/tag/google_brain|creationTime|2016-11-06T01:56:50Z +http://www.semanlink.net/tag/google_brain|prefLabel|Google Brain +http://www.semanlink.net/tag/google_brain|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/google_brain|creationDate|2016-11-06 +http://www.semanlink.net/tag/google_brain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_brain|describedBy|https://en.wikipedia.org/wiki/Google_Brain +http://www.semanlink.net/tag/google_brain|uri|http://www.semanlink.net/tag/google_brain +http://www.semanlink.net/tag/google_brain|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/microdata|creationTime|2011-06-07T13:45:37Z +http://www.semanlink.net/tag/microdata|prefLabel|Microdata +http://www.semanlink.net/tag/microdata|broader|http://www.semanlink.net/tag/html_data +http://www.semanlink.net/tag/microdata|related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/microdata|creationDate|2011-06-07 +http://www.semanlink.net/tag/microdata|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microdata|uri|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/microdata|broader_prefLabel|HTML Data +http://www.semanlink.net/tag/first_order_logic|creationTime|2013-06-05T00:31:06Z +http://www.semanlink.net/tag/first_order_logic|prefLabel|First-order logic +http://www.semanlink.net/tag/first_order_logic|broader|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/first_order_logic|creationDate|2013-06-05 +http://www.semanlink.net/tag/first_order_logic|comment|then order drinks +http://www.semanlink.net/tag/first_order_logic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/first_order_logic|describedBy|https://en.wikipedia.org/wiki/First-order_logic +http://www.semanlink.net/tag/first_order_logic|uri|http://www.semanlink.net/tag/first_order_logic +http://www.semanlink.net/tag/first_order_logic|broader_prefLabel|Logic +http://www.semanlink.net/tag/relativite|prefLabel|Relativité +http://www.semanlink.net/tag/relativite|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/relativite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relativite|uri|http://www.semanlink.net/tag/relativite +http://www.semanlink.net/tag/relativite|broader_prefLabel|Physique +http://www.semanlink.net/tag/relativite|broader_altLabel|Physics +http://www.semanlink.net/tag/acl_2018|creationTime|2021-07-08T00:09:40Z +http://www.semanlink.net/tag/acl_2018|prefLabel|ACL 2018 +http://www.semanlink.net/tag/acl_2018|broader|http://www.semanlink.net/tag/acl +http://www.semanlink.net/tag/acl_2018|creationDate|2021-07-08 +http://www.semanlink.net/tag/acl_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acl_2018|uri|http://www.semanlink.net/tag/acl_2018 +http://www.semanlink.net/tag/acl_2018|broader_prefLabel|ACL +http://www.semanlink.net/tag/cultural_heritage|creationTime|2009-05-05T16:20:48Z +http://www.semanlink.net/tag/cultural_heritage|prefLabel|Cultural heritage +http://www.semanlink.net/tag/cultural_heritage|related|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/cultural_heritage|creationDate|2009-05-05 +http://www.semanlink.net/tag/cultural_heritage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cultural_heritage|uri|http://www.semanlink.net/tag/cultural_heritage +http://www.semanlink.net/tag/stefan_zweig|creationTime|2019-08-25T19:37:49Z +http://www.semanlink.net/tag/stefan_zweig|prefLabel|Stefan Zweig +http://www.semanlink.net/tag/stefan_zweig|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/stefan_zweig|broader|http://www.semanlink.net/tag/autriche +http://www.semanlink.net/tag/stefan_zweig|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/stefan_zweig|creationDate|2019-08-25 +http://www.semanlink.net/tag/stefan_zweig|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stefan_zweig|describedBy|https://fr.wikipedia.org/wiki/Stefan_Zweig +http://www.semanlink.net/tag/stefan_zweig|uri|http://www.semanlink.net/tag/stefan_zweig +http://www.semanlink.net/tag/stefan_zweig|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/stefan_zweig|broader_prefLabel|Autriche +http://www.semanlink.net/tag/stefan_zweig|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/langues_anciennes|creationTime|2010-08-21T12:31:56Z +http://www.semanlink.net/tag/langues_anciennes|prefLabel|Langues anciennes +http://www.semanlink.net/tag/langues_anciennes|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/langues_anciennes|creationDate|2010-08-21 +http://www.semanlink.net/tag/langues_anciennes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langues_anciennes|uri|http://www.semanlink.net/tag/langues_anciennes +http://www.semanlink.net/tag/langues_anciennes|broader_prefLabel|Langues +http://www.semanlink.net/tag/feynman|prefLabel|Feynman +http://www.semanlink.net/tag/feynman|broader|http://www.semanlink.net/tag/prix_nobel_de_physique +http://www.semanlink.net/tag/feynman|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/feynman|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/feynman|broader|http://www.semanlink.net/tag/physicien +http://www.semanlink.net/tag/feynman|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/feynman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feynman|uri|http://www.semanlink.net/tag/feynman +http://www.semanlink.net/tag/feynman|broader_prefLabel|Prix Nobel de physique +http://www.semanlink.net/tag/feynman|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/feynman|broader_prefLabel|Physique +http://www.semanlink.net/tag/feynman|broader_prefLabel|Physicien +http://www.semanlink.net/tag/feynman|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/feynman|broader_altLabel|Savant +http://www.semanlink.net/tag/feynman|broader_altLabel|Physics +http://www.semanlink.net/tag/semanlink2_related|creationTime|2014-05-17T17:41:35Z +http://www.semanlink.net/tag/semanlink2_related|prefLabel|Semanlink2 related +http://www.semanlink.net/tag/semanlink2_related|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/semanlink2_related|broader|http://www.semanlink.net/tag/semanlink2 +http://www.semanlink.net/tag/semanlink2_related|related|http://www.semanlink.net/tag/concept_extraction +http://www.semanlink.net/tag/semanlink2_related|creationDate|2014-05-17 +http://www.semanlink.net/tag/semanlink2_related|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink2_related|uri|http://www.semanlink.net/tag/semanlink2_related +http://www.semanlink.net/tag/semanlink2_related|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/semanlink2_related|broader_prefLabel|Semanlink2 +http://www.semanlink.net/tag/mark_birbeck|creationTime|2012-03-19T22:56:47Z +http://www.semanlink.net/tag/mark_birbeck|prefLabel|Mark Birbeck +http://www.semanlink.net/tag/mark_birbeck|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/mark_birbeck|creationDate|2012-03-19 +http://www.semanlink.net/tag/mark_birbeck|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mark_birbeck|uri|http://www.semanlink.net/tag/mark_birbeck +http://www.semanlink.net/tag/mark_birbeck|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/r_d|creationTime|2021-03-02T12:14:22Z +http://www.semanlink.net/tag/r_d|prefLabel|R&D +http://www.semanlink.net/tag/r_d|creationDate|2021-03-02 +http://www.semanlink.net/tag/r_d|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/r_d|uri|http://www.semanlink.net/tag/r_d +http://www.semanlink.net/tag/jeanne_d_arc|creationTime|2009-09-22T00:12:46Z +http://www.semanlink.net/tag/jeanne_d_arc|prefLabel|Jeanne d'Arc +http://www.semanlink.net/tag/jeanne_d_arc|creationDate|2009-09-22 +http://www.semanlink.net/tag/jeanne_d_arc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeanne_d_arc|describedBy|https://fr.wikipedia.org/wiki/Jeanne_d'Arc +http://www.semanlink.net/tag/jeanne_d_arc|uri|http://www.semanlink.net/tag/jeanne_d_arc +http://www.semanlink.net/tag/language_models_knowledge|creationTime|2020-07-11T15:10:34Z +http://www.semanlink.net/tag/language_models_knowledge|prefLabel|Language Models + Knowledge +http://www.semanlink.net/tag/language_models_knowledge|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/language_models_knowledge|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/language_models_knowledge|broader|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/tag/language_models_knowledge|related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/language_models_knowledge|creationDate|2020-07-11 +http://www.semanlink.net/tag/language_models_knowledge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_models_knowledge|uri|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/tag/language_models_knowledge|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/language_models_knowledge|broader_prefLabel|Language model +http://www.semanlink.net/tag/language_models_knowledge|broader_prefLabel|AI + Knowledge Bases +http://www.semanlink.net/tag/language_models_knowledge|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/language_models_knowledge|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/language_models_knowledge|broader_altLabel|LM +http://www.semanlink.net/tag/language_models_knowledge|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/language_models_knowledge|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/language_models_knowledge|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/language_models_knowledge|broader_related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/web_of_needs|creationTime|2013-05-14T18:38:06Z +http://www.semanlink.net/tag/web_of_needs|prefLabel|Web of Needs +http://www.semanlink.net/tag/web_of_needs|creationDate|2013-05-14 +http://www.semanlink.net/tag/web_of_needs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_of_needs|uri|http://www.semanlink.net/tag/web_of_needs +http://www.semanlink.net/tag/bayesian_reasoning|creationTime|2015-11-20T17:28:08Z +http://www.semanlink.net/tag/bayesian_reasoning|prefLabel|Bayesian Reasoning +http://www.semanlink.net/tag/bayesian_reasoning|creationDate|2015-11-20 +http://www.semanlink.net/tag/bayesian_reasoning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bayesian_reasoning|uri|http://www.semanlink.net/tag/bayesian_reasoning +http://www.semanlink.net/tag/rss_dev|prefLabel|RSS Dev +http://www.semanlink.net/tag/rss_dev|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/rss_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/rss_dev|broader|http://www.semanlink.net/tag/rss +http://www.semanlink.net/tag/rss_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rss_dev|uri|http://www.semanlink.net/tag/rss_dev +http://www.semanlink.net/tag/rss_dev|broader_prefLabel|Web dev +http://www.semanlink.net/tag/rss_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/rss_dev|broader_prefLabel|RSS +http://www.semanlink.net/tag/rss_dev|broader_altLabel|Web app dev +http://www.semanlink.net/tag/evolutionary_computation|creationTime|2018-01-06T15:07:41Z +http://www.semanlink.net/tag/evolutionary_computation|prefLabel|Evolutionary computation +http://www.semanlink.net/tag/evolutionary_computation|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/evolutionary_computation|creationDate|2018-01-06 +http://www.semanlink.net/tag/evolutionary_computation|comment|family of algorithms for global optimization inspired by biological evolution +http://www.semanlink.net/tag/evolutionary_computation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evolutionary_computation|describedBy|https://en.wikipedia.org/wiki/Evolutionary_computation +http://www.semanlink.net/tag/evolutionary_computation|uri|http://www.semanlink.net/tag/evolutionary_computation +http://www.semanlink.net/tag/evolutionary_computation|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/evolutionary_computation|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/evolutionary_computation|broader_altLabel|AI +http://www.semanlink.net/tag/evolutionary_computation|broader_altLabel|IA +http://www.semanlink.net/tag/evolutionary_computation|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/eswc_2011|creationTime|2011-02-16T01:42:24Z +http://www.semanlink.net/tag/eswc_2011|prefLabel|ESWC 2011 +http://www.semanlink.net/tag/eswc_2011|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2011|creationDate|2011-02-16 +http://www.semanlink.net/tag/eswc_2011|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2011|homepage|http://www.eswc2011.org +http://www.semanlink.net/tag/eswc_2011|uri|http://www.semanlink.net/tag/eswc_2011 +http://www.semanlink.net/tag/eswc_2011|broader_prefLabel|ESWC +http://www.semanlink.net/tag/insect_collapse|creationTime|2019-01-15T13:25:21Z +http://www.semanlink.net/tag/insect_collapse|prefLabel|Insect collapse +http://www.semanlink.net/tag/insect_collapse|broader|http://www.semanlink.net/tag/biodiversite_declin +http://www.semanlink.net/tag/insect_collapse|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/insect_collapse|creationDate|2019-01-15 +http://www.semanlink.net/tag/insect_collapse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/insect_collapse|uri|http://www.semanlink.net/tag/insect_collapse +http://www.semanlink.net/tag/insect_collapse|broader_prefLabel|Biodiversité : effondrement +http://www.semanlink.net/tag/insect_collapse|broader_prefLabel|Insecte +http://www.semanlink.net/tag/okfn_datahub|creationTime|2013-09-02T11:08:52Z +http://www.semanlink.net/tag/okfn_datahub|prefLabel|OKFN Datahub +http://www.semanlink.net/tag/okfn_datahub|broader|http://www.semanlink.net/tag/open_data +http://www.semanlink.net/tag/okfn_datahub|broader|http://www.semanlink.net/tag/open_knowledge_foundation +http://www.semanlink.net/tag/okfn_datahub|broader|http://www.semanlink.net/tag/data_management_platform +http://www.semanlink.net/tag/okfn_datahub|creationDate|2013-09-02 +http://www.semanlink.net/tag/okfn_datahub|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/okfn_datahub|uri|http://www.semanlink.net/tag/okfn_datahub +http://www.semanlink.net/tag/okfn_datahub|broader_prefLabel|Open Data +http://www.semanlink.net/tag/okfn_datahub|broader_prefLabel|Open Knowledge Foundation +http://www.semanlink.net/tag/okfn_datahub|broader_prefLabel|Data management platform +http://www.semanlink.net/tag/okfn_datahub|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/jena_introduction|prefLabel|Jena : Introduction +http://www.semanlink.net/tag/jena_introduction|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_introduction|broader|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/tag/jena_introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_introduction|uri|http://www.semanlink.net/tag/jena_introduction +http://www.semanlink.net/tag/jena_introduction|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_introduction|broader_prefLabel|Introduction +http://www.semanlink.net/tag/jena_introduction|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/verts|prefLabel|Verts +http://www.semanlink.net/tag/verts|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/verts|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/verts|broader|http://www.semanlink.net/tag/politique_et_environnement +http://www.semanlink.net/tag/verts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/verts|uri|http://www.semanlink.net/tag/verts +http://www.semanlink.net/tag/verts|broader_prefLabel|Écologie +http://www.semanlink.net/tag/verts|broader_prefLabel|Politique +http://www.semanlink.net/tag/verts|broader_prefLabel|Politique et environnement +http://www.semanlink.net/tag/verts|broader_related|http://www.semanlink.net/tag/verts +http://www.semanlink.net/tag/elasticsearch|creationTime|2015-03-06T15:31:08Z +http://www.semanlink.net/tag/elasticsearch|prefLabel|ElasticSearch +http://www.semanlink.net/tag/elasticsearch|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/elasticsearch|broader|http://www.semanlink.net/tag/text_search +http://www.semanlink.net/tag/elasticsearch|related|http://www.semanlink.net/tag/lucene +http://www.semanlink.net/tag/elasticsearch|creationDate|2015-03-06 +http://www.semanlink.net/tag/elasticsearch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elasticsearch|homepage|https://www.elastic.co +http://www.semanlink.net/tag/elasticsearch|uri|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/tag/elasticsearch|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/elasticsearch|broader_prefLabel|Text Search +http://www.semanlink.net/tag/elasticsearch|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/elasticsearch|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/semantic_overflow|creationTime|2010-06-23T16:06:38Z +http://www.semanlink.net/tag/semantic_overflow|prefLabel|Semantic Overflow +http://www.semanlink.net/tag/semantic_overflow|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_overflow|broader|http://www.semanlink.net/tag/howto +http://www.semanlink.net/tag/semantic_overflow|broader|http://www.semanlink.net/tag/q_a +http://www.semanlink.net/tag/semantic_overflow|creationDate|2010-06-23 +http://www.semanlink.net/tag/semantic_overflow|comment|For questions about semantic web techniques and technologies. +http://www.semanlink.net/tag/semantic_overflow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_overflow|homepage|http://www.semanticoverflow.com/ +http://www.semanlink.net/tag/semantic_overflow|uri|http://www.semanlink.net/tag/semantic_overflow +http://www.semanlink.net/tag/semantic_overflow|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_overflow|broader_prefLabel|Howto +http://www.semanlink.net/tag/semantic_overflow|broader_prefLabel|Q&A +http://www.semanlink.net/tag/semantic_overflow|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_overflow|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/protege|prefLabel|Protégé +http://www.semanlink.net/tag/protege|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/protege|broader|http://www.semanlink.net/tag/owl_tool +http://www.semanlink.net/tag/protege|broader|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/protege|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/protege|uri|http://www.semanlink.net/tag/protege +http://www.semanlink.net/tag/protege|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/protege|broader_prefLabel|OWL tool +http://www.semanlink.net/tag/protege|broader_prefLabel|Stanford +http://www.semanlink.net/tag/protege|broader_altLabel|Ontology +http://www.semanlink.net/tag/semantic_hierarchies|creationTime|2021-05-17T15:21:54Z +http://www.semanlink.net/tag/semantic_hierarchies|prefLabel|Semantic hierarchies +http://www.semanlink.net/tag/semantic_hierarchies|broader|http://www.semanlink.net/tag/hierarchies_in_ml +http://www.semanlink.net/tag/semantic_hierarchies|related|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/tag/semantic_hierarchies|creationDate|2021-05-17 +http://www.semanlink.net/tag/semantic_hierarchies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_hierarchies|uri|http://www.semanlink.net/tag/semantic_hierarchies +http://www.semanlink.net/tag/semantic_hierarchies|broader_prefLabel|Hierarchies in ML +http://www.semanlink.net/tag/diy|creationTime|2011-10-13T23:44:38Z +http://www.semanlink.net/tag/diy|prefLabel|DIY +http://www.semanlink.net/tag/diy|creationDate|2011-10-13 +http://www.semanlink.net/tag/diy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diy|uri|http://www.semanlink.net/tag/diy +http://www.semanlink.net/tag/test_adn_de_filiation|creationTime|2007-09-18T21:54:17Z +http://www.semanlink.net/tag/test_adn_de_filiation|prefLabel|Test ADN de filiation +http://www.semanlink.net/tag/test_adn_de_filiation|broader|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/test_adn_de_filiation|creationDate|2007-09-18 +http://www.semanlink.net/tag/test_adn_de_filiation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/test_adn_de_filiation|uri|http://www.semanlink.net/tag/test_adn_de_filiation +http://www.semanlink.net/tag/test_adn_de_filiation|broader_prefLabel|ADN +http://www.semanlink.net/tag/test_adn_de_filiation|broader_altLabel|DNA +http://www.semanlink.net/tag/ville|prefLabel|Ville +http://www.semanlink.net/tag/ville|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/ville|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ville|uri|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/ville|broader_prefLabel|Géographie +http://www.semanlink.net/tag/11_septembre_2001|prefLabel|11 septembre 2001 +http://www.semanlink.net/tag/11_septembre_2001|broader|http://www.semanlink.net/tag/al_qaida +http://www.semanlink.net/tag/11_septembre_2001|broader|http://www.semanlink.net/tag/ben_laden +http://www.semanlink.net/tag/11_septembre_2001|broader|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/11_septembre_2001|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/11_septembre_2001|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/11_septembre_2001|altLabel|11 septembre +http://www.semanlink.net/tag/11_septembre_2001|uri|http://www.semanlink.net/tag/11_septembre_2001 +http://www.semanlink.net/tag/11_septembre_2001|broader_prefLabel|Al-Qaida +http://www.semanlink.net/tag/11_septembre_2001|broader_prefLabel|Ben Laden +http://www.semanlink.net/tag/11_septembre_2001|broader_prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/11_septembre_2001|broader_prefLabel|USA +http://www.semanlink.net/tag/11_septembre_2001|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/11_septembre_2001|broader_altLabel|United States +http://www.semanlink.net/tag/thinking_tools|prefLabel|Thinking tools +http://www.semanlink.net/tag/thinking_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thinking_tools|uri|http://www.semanlink.net/tag/thinking_tools +http://www.semanlink.net/tag/intervention_francaise_au_mali|creationTime|2013-01-22T22:30:47Z +http://www.semanlink.net/tag/intervention_francaise_au_mali|prefLabel|Intervention française au Mali +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader|http://www.semanlink.net/tag/france_politique_etrangere +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader|http://www.semanlink.net/tag/france_afrique +http://www.semanlink.net/tag/intervention_francaise_au_mali|creationDate|2013-01-22 +http://www.semanlink.net/tag/intervention_francaise_au_mali|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intervention_francaise_au_mali|uri|http://www.semanlink.net/tag/intervention_francaise_au_mali +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader_prefLabel|France : politique étrangère +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader_prefLabel|Mali +http://www.semanlink.net/tag/intervention_francaise_au_mali|broader_prefLabel|France / Afrique +http://www.semanlink.net/tag/famine|prefLabel|Famine +http://www.semanlink.net/tag/famine|broader|http://www.semanlink.net/tag/faim +http://www.semanlink.net/tag/famine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/famine|altLabel|Crise alimentaire +http://www.semanlink.net/tag/famine|uri|http://www.semanlink.net/tag/famine +http://www.semanlink.net/tag/famine|broader_prefLabel|Faim +http://www.semanlink.net/tag/intel|prefLabel|Intel +http://www.semanlink.net/tag/intel|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/intel|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/intel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intel|uri|http://www.semanlink.net/tag/intel +http://www.semanlink.net/tag/intel|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/intel|broader_prefLabel|Technologie +http://www.semanlink.net/tag/h5n1|creationTime|2011-12-21T22:50:22Z +http://www.semanlink.net/tag/h5n1|prefLabel|H5N1 +http://www.semanlink.net/tag/h5n1|broader|http://www.semanlink.net/tag/grippe_aviaire +http://www.semanlink.net/tag/h5n1|broader|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/h5n1|creationDate|2011-12-21 +http://www.semanlink.net/tag/h5n1|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/h5n1|uri|http://www.semanlink.net/tag/h5n1 +http://www.semanlink.net/tag/h5n1|broader_prefLabel|Grippe aviaire +http://www.semanlink.net/tag/h5n1|broader_prefLabel|Virus +http://www.semanlink.net/tag/xsparql|creationTime|2009-06-24T21:59:01Z +http://www.semanlink.net/tag/xsparql|prefLabel|XSPARQL +http://www.semanlink.net/tag/xsparql|broader|http://www.semanlink.net/tag/xquery +http://www.semanlink.net/tag/xsparql|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/xsparql|related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/xsparql|creationDate|2009-06-24 +http://www.semanlink.net/tag/xsparql|comment|"This specification defines a merge of SPARQL and XQuery, and has the potential to bring XML and RDF closer together. XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. +" +http://www.semanlink.net/tag/xsparql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xsparql|uri|http://www.semanlink.net/tag/xsparql +http://www.semanlink.net/tag/xsparql|broader_prefLabel|XQuery +http://www.semanlink.net/tag/xsparql|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/starspace|creationTime|2018-09-11T00:16:12Z +http://www.semanlink.net/tag/starspace|prefLabel|StarSpace +http://www.semanlink.net/tag/starspace|broader|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/tag/starspace|broader|http://www.semanlink.net/tag/antoine_bordes +http://www.semanlink.net/tag/starspace|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/starspace|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/starspace|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/starspace|related|http://www.semanlink.net/tag/transe +http://www.semanlink.net/tag/starspace|related|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/tag/starspace|related|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/starspace|creationDate|2018-09-11 +http://www.semanlink.net/tag/starspace|comment|"> general-purpose neural embedding +model that can solve a wide variety of problems: labeling +tasks such as text classification, ranking tasks such as information +retrieval/web search, collaborative filtering-based +or content-based recommendation, embedding of multirelational +graphs, and learning word, sentence or document +level embeddings + +[Github](https://github.com/facebookresearch/starSpace) + +(seems to be the solution for [#Multi-Label classification](/tag/multi_label_classification) that [#FastText](/tag/fasttext) doesn't support very well) +" +http://www.semanlink.net/tag/starspace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/starspace|uri|http://www.semanlink.net/tag/starspace +http://www.semanlink.net/tag/starspace|broader_prefLabel|AI@Facebook +http://www.semanlink.net/tag/starspace|broader_prefLabel|Antoine Bordes +http://www.semanlink.net/tag/starspace|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/starspace|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/starspace|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/starspace|broader_altLabel|embedding +http://www.semanlink.net/tag/starspace|broader_related|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/tag/starspace|broader_related|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/starspace|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/starspace|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/diacritics_in_uri|prefLabel|Diacritics in URI +http://www.semanlink.net/tag/diacritics_in_uri|broader|http://www.semanlink.net/tag/encoding +http://www.semanlink.net/tag/diacritics_in_uri|broader|http://www.semanlink.net/tag/uri_encoding +http://www.semanlink.net/tag/diacritics_in_uri|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/diacritics_in_uri|broader|http://www.semanlink.net/tag/diacritics +http://www.semanlink.net/tag/diacritics_in_uri|creationDate|2006-08-23 +http://www.semanlink.net/tag/diacritics_in_uri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diacritics_in_uri|uri|http://www.semanlink.net/tag/diacritics_in_uri +http://www.semanlink.net/tag/diacritics_in_uri|broader_prefLabel|Encoding +http://www.semanlink.net/tag/diacritics_in_uri|broader_prefLabel|URI encoding +http://www.semanlink.net/tag/diacritics_in_uri|broader_prefLabel|URI +http://www.semanlink.net/tag/diacritics_in_uri|broader_prefLabel|Diacritics +http://www.semanlink.net/tag/feature_extraction|creationTime|2015-10-21T17:01:34Z +http://www.semanlink.net/tag/feature_extraction|prefLabel|Feature extraction +http://www.semanlink.net/tag/feature_extraction|broader|http://www.semanlink.net/tag/features_machine_learning +http://www.semanlink.net/tag/feature_extraction|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/feature_extraction|related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/feature_extraction|creationDate|2015-10-21 +http://www.semanlink.net/tag/feature_extraction|comment|feature extraction starts from an initial set of measured data and builds derived values (features) intended to be informative, non redundant, facilitating the subsequent learning and generalization steps, in some cases leading to better human interpretations. Feature extraction is related to dimensionality reduction. +http://www.semanlink.net/tag/feature_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feature_extraction|describedBy|https://en.wikipedia.org/wiki/Feature_extraction +http://www.semanlink.net/tag/feature_extraction|uri|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/feature_extraction|broader_prefLabel|Features (Machine Learning) +http://www.semanlink.net/tag/feature_extraction|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/unobtrusive_javascript|creationTime|2012-03-02T01:31:43Z +http://www.semanlink.net/tag/unobtrusive_javascript|prefLabel|Unobtrusive JavaScript +http://www.semanlink.net/tag/unobtrusive_javascript|related|http://www.semanlink.net/tag/jquery +http://www.semanlink.net/tag/unobtrusive_javascript|creationDate|2012-03-02 +http://www.semanlink.net/tag/unobtrusive_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unobtrusive_javascript|describedBy|https://en.wikipedia.org/wiki/Unobtrusive_JavaScript +http://www.semanlink.net/tag/unobtrusive_javascript|uri|http://www.semanlink.net/tag/unobtrusive_javascript +http://www.semanlink.net/tag/rdf2vec|creationTime|2018-01-03T16:50:43Z +http://www.semanlink.net/tag/rdf2vec|prefLabel|RDF2VEC +http://www.semanlink.net/tag/rdf2vec|broader|http://www.semanlink.net/tag/rdf_embeddings +http://www.semanlink.net/tag/rdf2vec|related|http://www.semanlink.net/tag/deepwalk +http://www.semanlink.net/tag/rdf2vec|creationDate|2018-01-03 +http://www.semanlink.net/tag/rdf2vec|comment|"principle: extract short random walks starting at the instance vertices, and feed these as sentences to the Word2Vec algorithm. (a relational version of the idea behind [#DeepWalk](/tag/deepwalk)). A vertex +is modeled by its context and a vertex’s context is defined by the vertices up to a given number of steps away. + +[GitHub](https://github.com/IBCNServices/pyRDF2Vec)" +http://www.semanlink.net/tag/rdf2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf2vec|describedBy|http://www.rdf2vec.org/ +http://www.semanlink.net/tag/rdf2vec|uri|http://www.semanlink.net/tag/rdf2vec +http://www.semanlink.net/tag/rdf2vec|broader_prefLabel|RDF embeddings +http://www.semanlink.net/tag/semanlink|prefLabel|Semanlink +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/semantic_tagging +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/personal_knowledge_graph +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/personal_information_management +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/personal_knowledge_management +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/rdf_application +http://www.semanlink.net/tag/semanlink|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/semanlink|related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/semanlink|comment|Semantic Web, Semantic Me. +http://www.semanlink.net/tag/semanlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink|homepage|http://www.semanlink.net +http://www.semanlink.net/tag/semanlink|altLabel|SL +http://www.semanlink.net/tag/semanlink|uri|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Semantic tagging +http://www.semanlink.net/tag/semanlink|broader_prefLabel|fps +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Personal Knowledge Graph +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Personal-information management +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Personal Knowledge Management +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Dev +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Tagging +http://www.semanlink.net/tag/semanlink|broader_prefLabel|RDF Application +http://www.semanlink.net/tag/semanlink|broader_prefLabel|Favoris +http://www.semanlink.net/tag/semanlink|broader_altLabel|PIM +http://www.semanlink.net/tag/semanlink|broader_altLabel|favorites +http://www.semanlink.net/tag/semanlink|broader_related|http://www.semanlink.net/tag/personal_knowledge_management +http://www.semanlink.net/tag/graphql|creationTime|2016-10-08T15:00:05Z +http://www.semanlink.net/tag/graphql|prefLabel|GraphQL +http://www.semanlink.net/tag/graphql|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/graphql|related|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/graphql|related|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/graphql|creationDate|2016-10-08 +http://www.semanlink.net/tag/graphql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graphql|describedBy|https://en.wikipedia.org/wiki/GraphQL +http://www.semanlink.net/tag/graphql|uri|http://www.semanlink.net/tag/graphql +http://www.semanlink.net/tag/graphql|broader_prefLabel|Facebook +http://www.semanlink.net/tag/graphql|broader_altLabel|FB +http://www.semanlink.net/tag/cimba|creationTime|2014-11-05T22:15:08Z +http://www.semanlink.net/tag/cimba|prefLabel|CIMBA +http://www.semanlink.net/tag/cimba|broader|http://www.semanlink.net/tag/microblogs +http://www.semanlink.net/tag/cimba|broader|http://www.semanlink.net/tag/personal_data +http://www.semanlink.net/tag/cimba|broader|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/cimba|related|http://www.semanlink.net/tag/semweb_pro +http://www.semanlink.net/tag/cimba|creationDate|2014-11-05 +http://www.semanlink.net/tag/cimba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cimba|uri|http://www.semanlink.net/tag/cimba +http://www.semanlink.net/tag/cimba|broader_prefLabel|Microblogs +http://www.semanlink.net/tag/cimba|broader_prefLabel|Personal data +http://www.semanlink.net/tag/cimba|broader_prefLabel|Linked Data Platform +http://www.semanlink.net/tag/cimba|broader_altLabel|Microblogging +http://www.semanlink.net/tag/cimba|broader_altLabel|LDP +http://www.semanlink.net/tag/requirements_engineering|creationTime|2021-06-08T00:16:41Z +http://www.semanlink.net/tag/requirements_engineering|prefLabel|Requirements Engineering +http://www.semanlink.net/tag/requirements_engineering|creationDate|2021-06-08 +http://www.semanlink.net/tag/requirements_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/requirements_engineering|altLabel|Ingénierie des exigences +http://www.semanlink.net/tag/requirements_engineering|uri|http://www.semanlink.net/tag/requirements_engineering +http://www.semanlink.net/tag/falashas|creationTime|2012-02-27T22:57:37Z +http://www.semanlink.net/tag/falashas|prefLabel|Falashas +http://www.semanlink.net/tag/falashas|broader|http://www.semanlink.net/tag/juif +http://www.semanlink.net/tag/falashas|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/falashas|creationDate|2012-02-27 +http://www.semanlink.net/tag/falashas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/falashas|uri|http://www.semanlink.net/tag/falashas +http://www.semanlink.net/tag/falashas|broader_prefLabel|Juifs +http://www.semanlink.net/tag/falashas|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/cocteau|creationTime|2008-07-04T22:34:12Z +http://www.semanlink.net/tag/cocteau|prefLabel|Cocteau +http://www.semanlink.net/tag/cocteau|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/cocteau|broader|http://www.semanlink.net/tag/poete +http://www.semanlink.net/tag/cocteau|creationDate|2008-07-04 +http://www.semanlink.net/tag/cocteau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cocteau|uri|http://www.semanlink.net/tag/cocteau +http://www.semanlink.net/tag/cocteau|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/cocteau|broader_prefLabel|Poète +http://www.semanlink.net/tag/cocteau|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/automobile|prefLabel|Automobile +http://www.semanlink.net/tag/automobile|broader|http://www.semanlink.net/tag/transport +http://www.semanlink.net/tag/automobile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automobile|altLabel|Automotive +http://www.semanlink.net/tag/automobile|uri|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/automobile|broader_prefLabel|Transport +http://www.semanlink.net/tag/public_lod_w3_org|creationTime|2013-08-06T20:11:42Z +http://www.semanlink.net/tag/public_lod_w3_org|prefLabel|public-lod@w3.org +http://www.semanlink.net/tag/public_lod_w3_org|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/public_lod_w3_org|broader|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/public_lod_w3_org|creationDate|2013-08-06 +http://www.semanlink.net/tag/public_lod_w3_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_lod_w3_org|uri|http://www.semanlink.net/tag/public_lod_w3_org +http://www.semanlink.net/tag/public_lod_w3_org|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/public_lod_w3_org|broader_prefLabel|Mailing list +http://www.semanlink.net/tag/public_lod_w3_org|broader_altLabel|LD +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/public_lod_w3_org|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/combining_statistics_and_semantics|creationTime|2014-11-27T13:37:51Z +http://www.semanlink.net/tag/combining_statistics_and_semantics|prefLabel|Combining Statistics and Semantics +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader|http://www.semanlink.net/tag/semantics +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader|http://www.semanlink.net/tag/statistics +http://www.semanlink.net/tag/combining_statistics_and_semantics|creationDate|2014-11-27 +http://www.semanlink.net/tag/combining_statistics_and_semantics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combining_statistics_and_semantics|uri|http://www.semanlink.net/tag/combining_statistics_and_semantics +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader_prefLabel|Semantics +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader_prefLabel|Statistics +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader_altLabel|stats +http://www.semanlink.net/tag/combining_statistics_and_semantics|broader_altLabel|Statistiques +http://www.semanlink.net/tag/music_source_separation|creationTime|2020-03-08T12:12:00Z +http://www.semanlink.net/tag/music_source_separation|prefLabel|Music source separation +http://www.semanlink.net/tag/music_source_separation|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/music_source_separation|broader|http://www.semanlink.net/tag/digital_audio +http://www.semanlink.net/tag/music_source_separation|broader|http://www.semanlink.net/tag/ml_domaines_d_application +http://www.semanlink.net/tag/music_source_separation|creationDate|2020-03-08 +http://www.semanlink.net/tag/music_source_separation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/music_source_separation|uri|http://www.semanlink.net/tag/music_source_separation +http://www.semanlink.net/tag/music_source_separation|broader_prefLabel|Musique +http://www.semanlink.net/tag/music_source_separation|broader_prefLabel|Digital Audio +http://www.semanlink.net/tag/music_source_separation|broader_prefLabel|IA/ML: domaines d'application +http://www.semanlink.net/tag/music_source_separation|broader_altLabel|Music +http://www.semanlink.net/tag/j_ai_un_petit_probleme|creationTime|2016-10-22T15:02:06Z +http://www.semanlink.net/tag/j_ai_un_petit_probleme|prefLabel|J'ai un petit problème +http://www.semanlink.net/tag/j_ai_un_petit_probleme|broader|http://www.semanlink.net/tag/howto +http://www.semanlink.net/tag/j_ai_un_petit_probleme|creationDate|2016-10-22 +http://www.semanlink.net/tag/j_ai_un_petit_probleme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/j_ai_un_petit_probleme|uri|http://www.semanlink.net/tag/j_ai_un_petit_probleme +http://www.semanlink.net/tag/j_ai_un_petit_probleme|broader_prefLabel|Howto +http://www.semanlink.net/tag/troll|creationTime|2018-11-05T18:27:02Z +http://www.semanlink.net/tag/troll|prefLabel|Troll +http://www.semanlink.net/tag/troll|creationDate|2018-11-05 +http://www.semanlink.net/tag/troll|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/troll|uri|http://www.semanlink.net/tag/troll +http://www.semanlink.net/tag/africa_s_last_wild_places|creationTime|2007-07-14T01:13:18Z +http://www.semanlink.net/tag/africa_s_last_wild_places|prefLabel|Africa's Last Wild Places +http://www.semanlink.net/tag/africa_s_last_wild_places|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/africa_s_last_wild_places|creationDate|2007-07-14 +http://www.semanlink.net/tag/africa_s_last_wild_places|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/africa_s_last_wild_places|uri|http://www.semanlink.net/tag/africa_s_last_wild_places +http://www.semanlink.net/tag/africa_s_last_wild_places|broader_prefLabel|Afrique +http://www.semanlink.net/tag/africa_s_last_wild_places|broader_altLabel|Africa +http://www.semanlink.net/tag/mutual_information|creationTime|2019-08-09T01:44:08Z +http://www.semanlink.net/tag/mutual_information|prefLabel|Mutual information +http://www.semanlink.net/tag/mutual_information|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/mutual_information|related|http://www.semanlink.net/tag/pointwise_mutual_information +http://www.semanlink.net/tag/mutual_information|creationDate|2019-08-09 +http://www.semanlink.net/tag/mutual_information|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mutual_information|describedBy|https://en.wikipedia.org/wiki/Mutual_information +http://www.semanlink.net/tag/mutual_information|uri|http://www.semanlink.net/tag/mutual_information +http://www.semanlink.net/tag/mutual_information|broader_prefLabel|Information theory +http://www.semanlink.net/tag/parti_socialiste|creationTime|2012-03-16T00:53:13Z +http://www.semanlink.net/tag/parti_socialiste|prefLabel|Parti socialiste +http://www.semanlink.net/tag/parti_socialiste|creationDate|2012-03-16 +http://www.semanlink.net/tag/parti_socialiste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parti_socialiste|uri|http://www.semanlink.net/tag/parti_socialiste +http://www.semanlink.net/tag/human_like_ai|creationTime|2018-10-28T17:08:22Z +http://www.semanlink.net/tag/human_like_ai|prefLabel|Human-like AI +http://www.semanlink.net/tag/human_like_ai|broader|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/tag/human_like_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/human_like_ai|creationDate|2018-10-28 +http://www.semanlink.net/tag/human_like_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/human_like_ai|uri|http://www.semanlink.net/tag/human_like_ai +http://www.semanlink.net/tag/human_like_ai|broader_prefLabel|Artificial general intelligence +http://www.semanlink.net/tag/human_like_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/human_like_ai|broader_altLabel|AGI +http://www.semanlink.net/tag/human_like_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/human_like_ai|broader_altLabel|AI +http://www.semanlink.net/tag/human_like_ai|broader_altLabel|IA +http://www.semanlink.net/tag/human_like_ai|broader_related|http://www.semanlink.net/tag/combinatorial_generalization +http://www.semanlink.net/tag/human_like_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/zero_shot_learning|creationTime|2020-02-07T18:16:25Z +http://www.semanlink.net/tag/zero_shot_learning|prefLabel|Zero-Shot Learning +http://www.semanlink.net/tag/zero_shot_learning|broader|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/tag/zero_shot_learning|related|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/tag/zero_shot_learning|creationDate|2020-02-07 +http://www.semanlink.net/tag/zero_shot_learning|comment|"learn a classifier f : X → Y that must predict novel values of Y that were omitted from the training set (classification under the +restriction that the model cannot look +at any examples from the target classes) " +http://www.semanlink.net/tag/zero_shot_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zero_shot_learning|uri|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/tag/zero_shot_learning|broader_prefLabel|Zero shot +http://www.semanlink.net/tag/fourier|prefLabel|Fourier +http://www.semanlink.net/tag/fourier|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/fourier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fourier|uri|http://www.semanlink.net/tag/fourier +http://www.semanlink.net/tag/fourier|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/grece|prefLabel|Grèce +http://www.semanlink.net/tag/grece|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/grece|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/grece|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grece|uri|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/grece|broader_prefLabel|Europe +http://www.semanlink.net/tag/grece|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/hyperlinks|prefLabel|Hyperlinks +http://www.semanlink.net/tag/hyperlinks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hyperlinks|uri|http://www.semanlink.net/tag/hyperlinks +http://www.semanlink.net/tag/taiwan|creationTime|2014-03-26T09:44:07Z +http://www.semanlink.net/tag/taiwan|prefLabel|Taiwan +http://www.semanlink.net/tag/taiwan|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/taiwan|related|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/taiwan|creationDate|2014-03-26 +http://www.semanlink.net/tag/taiwan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taiwan|describedBy|https://en.wikipedia.org/wiki/Taiwan +http://www.semanlink.net/tag/taiwan|uri|http://www.semanlink.net/tag/taiwan +http://www.semanlink.net/tag/taiwan|broader_prefLabel|Asie +http://www.semanlink.net/tag/scientologie|prefLabel|Scientologie +http://www.semanlink.net/tag/scientologie|broader|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/tag/scientologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scientologie|uri|http://www.semanlink.net/tag/scientologie +http://www.semanlink.net/tag/scientologie|broader_prefLabel|Manipulation +http://www.semanlink.net/tag/v_s_naipaul|creationTime|2013-11-28T23:00:17Z +http://www.semanlink.net/tag/v_s_naipaul|prefLabel|V.S. Naipaul +http://www.semanlink.net/tag/v_s_naipaul|prefLabel|V. S. Naipaul +http://www.semanlink.net/tag/v_s_naipaul|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/v_s_naipaul|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/v_s_naipaul|creationDate|2013-11-28 +http://www.semanlink.net/tag/v_s_naipaul|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/v_s_naipaul|uri|http://www.semanlink.net/tag/v_s_naipaul +http://www.semanlink.net/tag/v_s_naipaul|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/v_s_naipaul|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/kg_and_nlp|creationTime|2019-01-27T14:53:45Z +http://www.semanlink.net/tag/kg_and_nlp|prefLabel|Knowledge Graphs and NLP +http://www.semanlink.net/tag/kg_and_nlp|broader|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/tag/kg_and_nlp|broader|http://www.semanlink.net/tag/graphs_nlp +http://www.semanlink.net/tag/kg_and_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/kg_and_nlp|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/kg_and_nlp|related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/kg_and_nlp|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/kg_and_nlp|related|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/kg_and_nlp|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/kg_and_nlp|creationDate|2019-01-27 +http://www.semanlink.net/tag/kg_and_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kg_and_nlp|altLabel|KG + NLP +http://www.semanlink.net/tag/kg_and_nlp|altLabel|Knowledge Graphs + Text +http://www.semanlink.net/tag/kg_and_nlp|uri|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/kg_and_nlp|broader_prefLabel|AI + Knowledge Bases +http://www.semanlink.net/tag/kg_and_nlp|broader_prefLabel|Graphs + NLP +http://www.semanlink.net/tag/kg_and_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/kg_and_nlp|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/kg_and_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/kg_and_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/kg_and_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/kg_and_nlp|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/kg_and_nlp|broader_altLabel|KG +http://www.semanlink.net/tag/kg_and_nlp|broader_related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/erreur_judiciaire|creationTime|2010-12-11T13:19:07Z +http://www.semanlink.net/tag/erreur_judiciaire|prefLabel|Erreur judiciaire +http://www.semanlink.net/tag/erreur_judiciaire|broader|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/erreur_judiciaire|creationDate|2010-12-11 +http://www.semanlink.net/tag/erreur_judiciaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erreur_judiciaire|uri|http://www.semanlink.net/tag/erreur_judiciaire +http://www.semanlink.net/tag/erreur_judiciaire|broader_prefLabel|Justice +http://www.semanlink.net/tag/hindu_muslim_riots|creationTime|2007-10-18T01:32:21Z +http://www.semanlink.net/tag/hindu_muslim_riots|prefLabel|Hindu/Muslim riots +http://www.semanlink.net/tag/hindu_muslim_riots|broader|http://www.semanlink.net/tag/guerres_de_religion +http://www.semanlink.net/tag/hindu_muslim_riots|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/hindu_muslim_riots|creationDate|2007-10-18 +http://www.semanlink.net/tag/hindu_muslim_riots|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hindu_muslim_riots|uri|http://www.semanlink.net/tag/hindu_muslim_riots +http://www.semanlink.net/tag/hindu_muslim_riots|broader_prefLabel|Guerres de religion +http://www.semanlink.net/tag/hindu_muslim_riots|broader_prefLabel|Inde +http://www.semanlink.net/tag/email|creationTime|2010-05-04T08:57:30Z +http://www.semanlink.net/tag/email|prefLabel|email +http://www.semanlink.net/tag/email|creationDate|2010-05-04 +http://www.semanlink.net/tag/email|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/email|uri|http://www.semanlink.net/tag/email +http://www.semanlink.net/tag/developing_countries|creationTime|2011-01-02T16:07:57Z +http://www.semanlink.net/tag/developing_countries|prefLabel|Developing countries +http://www.semanlink.net/tag/developing_countries|related|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/developing_countries|creationDate|2011-01-02 +http://www.semanlink.net/tag/developing_countries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/developing_countries|uri|http://www.semanlink.net/tag/developing_countries +http://www.semanlink.net/tag/symbiose|creationTime|2007-12-28T02:11:17Z +http://www.semanlink.net/tag/symbiose|prefLabel|Symbiose +http://www.semanlink.net/tag/symbiose|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/symbiose|creationDate|2007-12-28 +http://www.semanlink.net/tag/symbiose|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/symbiose|uri|http://www.semanlink.net/tag/symbiose +http://www.semanlink.net/tag/symbiose|broader_prefLabel|Biology +http://www.semanlink.net/tag/symbiose|broader_altLabel|Biologie +http://www.semanlink.net/tag/ihm_web|prefLabel|IHM web +http://www.semanlink.net/tag/ihm_web|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/ihm_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ihm_web|uri|http://www.semanlink.net/tag/ihm_web +http://www.semanlink.net/tag/ihm_web|broader_prefLabel|Web dev +http://www.semanlink.net/tag/ihm_web|broader_altLabel|Web app dev +http://www.semanlink.net/tag/semanticpedia|creationTime|2012-11-19T14:07:04Z +http://www.semanlink.net/tag/semanticpedia|prefLabel|SémanticPédia +http://www.semanlink.net/tag/semanticpedia|broader|http://www.semanlink.net/tag/dbpedia_francophone +http://www.semanlink.net/tag/semanticpedia|related|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/semanticpedia|creationDate|2012-11-19 +http://www.semanlink.net/tag/semanticpedia|comment|"""en quelle langue voulons-nous que les machines pensent ?""" +http://www.semanlink.net/tag/semanticpedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanticpedia|homepage|http://www.semanticpedia.org +http://www.semanlink.net/tag/semanticpedia|uri|http://www.semanlink.net/tag/semanticpedia +http://www.semanlink.net/tag/semanticpedia|broader_prefLabel|dbpedia francophone +http://www.semanlink.net/tag/shallow_parsing_chunking|creationTime|2017-05-24T17:24:40Z +http://www.semanlink.net/tag/shallow_parsing_chunking|prefLabel|Shallow parsing (Chunking) +http://www.semanlink.net/tag/shallow_parsing_chunking|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/shallow_parsing_chunking|broader|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/shallow_parsing_chunking|related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/shallow_parsing_chunking|creationDate|2017-05-24 +http://www.semanlink.net/tag/shallow_parsing_chunking|comment|"analysis of a sentence which first identifies constituent parts of sentences (nouns, verbs, adjectives, etc.) and then links them to higher order units that have discrete grammatical meanings (noun groups or phrases, verb groups, etc.). +" +http://www.semanlink.net/tag/shallow_parsing_chunking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shallow_parsing_chunking|describedBy|https://en.wikipedia.org/wiki/Shallow_parsing +http://www.semanlink.net/tag/shallow_parsing_chunking|uri|http://www.semanlink.net/tag/shallow_parsing_chunking +http://www.semanlink.net/tag/shallow_parsing_chunking|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/shallow_parsing_chunking|broader_prefLabel|General NLP tasks +http://www.semanlink.net/tag/immigration|prefLabel|Immigration +http://www.semanlink.net/tag/immigration|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/immigration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/immigration|uri|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/immigration|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/os|prefLabel|OS +http://www.semanlink.net/tag/os|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/os|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/os|uri|http://www.semanlink.net/tag/os +http://www.semanlink.net/tag/os|broader_prefLabel|Software +http://www.semanlink.net/tag/yandex|creationTime|2018-01-04T23:34:50Z +http://www.semanlink.net/tag/yandex|prefLabel|Yandex +http://www.semanlink.net/tag/yandex|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/yandex|creationDate|2018-01-04 +http://www.semanlink.net/tag/yandex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yandex|uri|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/yandex|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/yandex|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/eclassowl|creationTime|2013-03-02T21:29:58Z +http://www.semanlink.net/tag/eclassowl|prefLabel|eClassOWL +http://www.semanlink.net/tag/eclassowl|broader|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/eclassowl|broader|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/eclassowl|creationDate|2013-03-02 +http://www.semanlink.net/tag/eclassowl|comment|"""The Web Ontology for Products and Services""
+OWL Representation of the eCl@ss Classification Standard" +http://www.semanlink.net/tag/eclassowl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eclassowl|homepage|http://www.heppnetz.de/projects/eclassowl/ +http://www.semanlink.net/tag/eclassowl|uri|http://www.semanlink.net/tag/eclassowl +http://www.semanlink.net/tag/eclassowl|broader_prefLabel|GoodRelations +http://www.semanlink.net/tag/eclassowl|broader_prefLabel|Product description +http://www.semanlink.net/tag/eclassowl|broader_related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/eclassowl|broader_related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/exalead|prefLabel|Exalead +http://www.semanlink.net/tag/exalead|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/exalead|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exalead|uri|http://www.semanlink.net/tag/exalead +http://www.semanlink.net/tag/exalead|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/exalead|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/sigma_js|creationTime|2015-08-29T19:13:49Z +http://www.semanlink.net/tag/sigma_js|prefLabel|Sigma.js +http://www.semanlink.net/tag/sigma_js|broader|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/tag/sigma_js|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/sigma_js|creationDate|2015-08-29 +http://www.semanlink.net/tag/sigma_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sigma_js|uri|http://www.semanlink.net/tag/sigma_js +http://www.semanlink.net/tag/sigma_js|broader_prefLabel|Graph visualization +http://www.semanlink.net/tag/sigma_js|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/sigma_js|broader_altLabel|js +http://www.semanlink.net/tag/demo|creationTime|2008-09-12T14:28:04Z +http://www.semanlink.net/tag/demo|prefLabel|Demo +http://www.semanlink.net/tag/demo|creationDate|2008-09-12 +http://www.semanlink.net/tag/demo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/demo|uri|http://www.semanlink.net/tag/demo +http://www.semanlink.net/tag/banlieue|prefLabel|Banlieue +http://www.semanlink.net/tag/banlieue|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/banlieue|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/banlieue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banlieue|uri|http://www.semanlink.net/tag/banlieue +http://www.semanlink.net/tag/banlieue|broader_prefLabel|Ville +http://www.semanlink.net/tag/banlieue|broader_prefLabel|Société +http://www.semanlink.net/tag/artificial_neurons|creationTime|2017-04-22T18:25:32Z +http://www.semanlink.net/tag/artificial_neurons|prefLabel|Artificial neurons +http://www.semanlink.net/tag/artificial_neurons|broader|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/tag/artificial_neurons|creationDate|2017-04-22 +http://www.semanlink.net/tag/artificial_neurons|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_neurons|uri|http://www.semanlink.net/tag/artificial_neurons +http://www.semanlink.net/tag/artificial_neurons|broader_prefLabel|Bio inspired computing devices +http://www.semanlink.net/tag/artificial_neurons|broader_altLabel|Neuromorphic engineering +http://www.semanlink.net/tag/artificial_neurons|broader_altLabel|Brains in silicon +http://www.semanlink.net/tag/artificial_neurons|broader_altLabel|Neuromorphique +http://www.semanlink.net/tag/linked_media_framework|creationTime|2012-06-14T15:42:04Z +http://www.semanlink.net/tag/linked_media_framework|prefLabel|Linked Media Framework +http://www.semanlink.net/tag/linked_media_framework|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/linked_media_framework|related|http://www.semanlink.net/tag/google_refine +http://www.semanlink.net/tag/linked_media_framework|creationDate|2012-06-14 +http://www.semanlink.net/tag/linked_media_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_media_framework|describedBy|http://code.google.com/p/lmf/ +http://www.semanlink.net/tag/linked_media_framework|altLabel|LMF +http://www.semanlink.net/tag/linked_media_framework|uri|http://www.semanlink.net/tag/linked_media_framework +http://www.semanlink.net/tag/job_title_normalization|creationTime|2020-01-23T01:15:30Z +http://www.semanlink.net/tag/job_title_normalization|prefLabel|Job title normalization +http://www.semanlink.net/tag/job_title_normalization|broader|http://www.semanlink.net/tag/nlp_human_resources +http://www.semanlink.net/tag/job_title_normalization|creationDate|2020-01-23 +http://www.semanlink.net/tag/job_title_normalization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/job_title_normalization|uri|http://www.semanlink.net/tag/job_title_normalization +http://www.semanlink.net/tag/job_title_normalization|broader_prefLabel|NLP + Human Resources +http://www.semanlink.net/tag/mer|prefLabel|Mer +http://www.semanlink.net/tag/mer|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/mer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mer|uri|http://www.semanlink.net/tag/mer +http://www.semanlink.net/tag/mer|broader_prefLabel|Géographie +http://www.semanlink.net/tag/video|creationTime|2007-05-19T14:36:13Z +http://www.semanlink.net/tag/video|prefLabel|Video +http://www.semanlink.net/tag/video|creationDate|2007-05-19 +http://www.semanlink.net/tag/video|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/video|uri|http://www.semanlink.net/tag/video +http://www.semanlink.net/tag/nn_dev|creationTime|2019-01-27T12:42:52Z +http://www.semanlink.net/tag/nn_dev|prefLabel|NN dev +http://www.semanlink.net/tag/nn_dev|creationDate|2019-01-27 +http://www.semanlink.net/tag/nn_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nn_dev|uri|http://www.semanlink.net/tag/nn_dev +http://www.semanlink.net/tag/three_way_decisions|creationTime|2019-02-02T15:23:25Z +http://www.semanlink.net/tag/three_way_decisions|prefLabel|Three-way decisions +http://www.semanlink.net/tag/three_way_decisions|broader|http://www.semanlink.net/tag/selective_classification +http://www.semanlink.net/tag/three_way_decisions|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/three_way_decisions|creationDate|2019-02-02 +http://www.semanlink.net/tag/three_way_decisions|comment|"todo à voir : +https://www.researchgate.net/publication/315590093_Cost-sensitive_sequential_three-way_decision_modeling_using_a_deep_neural_network +https://arxiv.org/pdf/1611.05134.pdf +https://www.researchgate.net/publication/261379944_Three-way_decisions_with_artificial_neural_networks +" +http://www.semanlink.net/tag/three_way_decisions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/three_way_decisions|altLabel|yes/no/don't know classification +http://www.semanlink.net/tag/three_way_decisions|uri|http://www.semanlink.net/tag/three_way_decisions +http://www.semanlink.net/tag/three_way_decisions|broader_prefLabel|Selective Classification +http://www.semanlink.net/tag/three_way_decisions|broader_prefLabel|Classification +http://www.semanlink.net/tag/benchmark|creationTime|2008-09-24T22:52:14Z +http://www.semanlink.net/tag/benchmark|prefLabel|Benchmark +http://www.semanlink.net/tag/benchmark|creationDate|2008-09-24 +http://www.semanlink.net/tag/benchmark|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/benchmark|uri|http://www.semanlink.net/tag/benchmark +http://www.semanlink.net/tag/mobile_apps|creationTime|2013-07-11T00:52:44Z +http://www.semanlink.net/tag/mobile_apps|prefLabel|Mobile apps +http://www.semanlink.net/tag/mobile_apps|creationDate|2013-07-11 +http://www.semanlink.net/tag/mobile_apps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_apps|uri|http://www.semanlink.net/tag/mobile_apps +http://www.semanlink.net/tag/management|creationTime|2012-04-26T00:41:34Z +http://www.semanlink.net/tag/management|prefLabel|Management +http://www.semanlink.net/tag/management|creationDate|2012-04-26 +http://www.semanlink.net/tag/management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/management|uri|http://www.semanlink.net/tag/management +http://www.semanlink.net/tag/violence|creationTime|2009-10-18T22:42:14Z +http://www.semanlink.net/tag/violence|prefLabel|Violence +http://www.semanlink.net/tag/violence|creationDate|2009-10-18 +http://www.semanlink.net/tag/violence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/violence|uri|http://www.semanlink.net/tag/violence +http://www.semanlink.net/tag/freedom_box|creationTime|2011-09-09T10:57:37Z +http://www.semanlink.net/tag/freedom_box|prefLabel|Freedom Box +http://www.semanlink.net/tag/freedom_box|broader|http://www.semanlink.net/tag/internet_libre +http://www.semanlink.net/tag/freedom_box|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/freedom_box|creationDate|2011-09-09 +http://www.semanlink.net/tag/freedom_box|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/freedom_box|uri|http://www.semanlink.net/tag/freedom_box +http://www.semanlink.net/tag/freedom_box|broader_prefLabel|Internet libre +http://www.semanlink.net/tag/freedom_box|broader_prefLabel|Internet +http://www.semanlink.net/tag/rapidminer|creationTime|2013-09-05T00:21:12Z +http://www.semanlink.net/tag/rapidminer|prefLabel|RapidMiner +http://www.semanlink.net/tag/rapidminer|broader|http://www.semanlink.net/tag/data_mining_tools +http://www.semanlink.net/tag/rapidminer|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/rapidminer|broader|http://www.semanlink.net/tag/analyse_semantique +http://www.semanlink.net/tag/rapidminer|creationDate|2013-09-05 +http://www.semanlink.net/tag/rapidminer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rapidminer|homepage|http://rapid-i.com/content/view/181/190/ +http://www.semanlink.net/tag/rapidminer|uri|http://www.semanlink.net/tag/rapidminer +http://www.semanlink.net/tag/rapidminer|broader_prefLabel|Data mining tools +http://www.semanlink.net/tag/rapidminer|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/rapidminer|broader_prefLabel|Analyse sémantique +http://www.semanlink.net/tag/therapie_genique|prefLabel|Thérapie génique +http://www.semanlink.net/tag/therapie_genique|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/therapie_genique|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/therapie_genique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/therapie_genique|uri|http://www.semanlink.net/tag/therapie_genique +http://www.semanlink.net/tag/therapie_genique|broader_prefLabel|Genetics +http://www.semanlink.net/tag/therapie_genique|broader_prefLabel|Génétique +http://www.semanlink.net/tag/therapie_genique|broader_prefLabel|Médecine +http://www.semanlink.net/tag/natural_selection|creationTime|2015-01-01T15:59:15Z +http://www.semanlink.net/tag/natural_selection|prefLabel|Natural selection +http://www.semanlink.net/tag/natural_selection|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/natural_selection|creationDate|2015-01-01 +http://www.semanlink.net/tag/natural_selection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/natural_selection|describedBy|https://en.wikipedia.org/wiki/Natural_selection +http://www.semanlink.net/tag/natural_selection|altLabel|Sélection naturelle +http://www.semanlink.net/tag/natural_selection|uri|http://www.semanlink.net/tag/natural_selection +http://www.semanlink.net/tag/natural_selection|broader_prefLabel|Evolution +http://www.semanlink.net/tag/link_prediction|creationTime|2018-03-26T08:48:01Z +http://www.semanlink.net/tag/link_prediction|prefLabel|Link Prediction +http://www.semanlink.net/tag/link_prediction|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/link_prediction|creationDate|2018-03-26 +http://www.semanlink.net/tag/link_prediction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/link_prediction|uri|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/tag/link_prediction|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/link_prediction|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/samba|creationTime|2011-07-28T13:37:29Z +http://www.semanlink.net/tag/samba|prefLabel|Samba +http://www.semanlink.net/tag/samba|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/samba|broader|http://www.semanlink.net/tag/danse +http://www.semanlink.net/tag/samba|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/samba|creationDate|2011-07-28 +http://www.semanlink.net/tag/samba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/samba|uri|http://www.semanlink.net/tag/samba +http://www.semanlink.net/tag/samba|broader_prefLabel|Brésil +http://www.semanlink.net/tag/samba|broader_prefLabel|Danse +http://www.semanlink.net/tag/samba|broader_prefLabel|Musique +http://www.semanlink.net/tag/samba|broader_altLabel|Music +http://www.semanlink.net/tag/samba|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/cross_domain_data_fetching|creationTime|2009-05-20T23:25:43Z +http://www.semanlink.net/tag/cross_domain_data_fetching|prefLabel|cross-domain data fetching +http://www.semanlink.net/tag/cross_domain_data_fetching|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/cross_domain_data_fetching|creationDate|2009-05-20 +http://www.semanlink.net/tag/cross_domain_data_fetching|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_domain_data_fetching|uri|http://www.semanlink.net/tag/cross_domain_data_fetching +http://www.semanlink.net/tag/cross_domain_data_fetching|broader_prefLabel|Web dev +http://www.semanlink.net/tag/cross_domain_data_fetching|broader_altLabel|Web app dev +http://www.semanlink.net/tag/saudade|creationTime|2013-02-22T14:12:51Z +http://www.semanlink.net/tag/saudade|prefLabel|Saudade +http://www.semanlink.net/tag/saudade|broader|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/saudade|creationDate|2013-02-22 +http://www.semanlink.net/tag/saudade|comment|a presença da ausência (Tristão de Ataíde) +http://www.semanlink.net/tag/saudade|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/saudade|uri|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/saudade|broader_prefLabel|Souvenirs +http://www.semanlink.net/tag/saudade|broader_altLabel|Souvenir +http://www.semanlink.net/tag/hack|prefLabel|Hack +http://www.semanlink.net/tag/hack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hack|uri|http://www.semanlink.net/tag/hack +http://www.semanlink.net/tag/rdfa_1_1|creationTime|2010-05-14T21:25:19Z +http://www.semanlink.net/tag/rdfa_1_1|prefLabel|RDFa 1.1 +http://www.semanlink.net/tag/rdfa_1_1|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdfa_1_1|creationDate|2010-05-14 +http://www.semanlink.net/tag/rdfa_1_1|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa_1_1|uri|http://www.semanlink.net/tag/rdfa_1_1 +http://www.semanlink.net/tag/rdfa_1_1|broader_prefLabel|RDFa +http://www.semanlink.net/tag/rdfa_1_1|broader_altLabel|RDF/A +http://www.semanlink.net/tag/rdfa_1_1|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdfa_1_1|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdfa_1_1|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/ajar|prefLabel|AJAR +http://www.semanlink.net/tag/ajar|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/ajar|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/ajar|creationDate|2007-01-03 +http://www.semanlink.net/tag/ajar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ajar|uri|http://www.semanlink.net/tag/ajar +http://www.semanlink.net/tag/ajar|broader_prefLabel|RDF +http://www.semanlink.net/tag/ajar|broader_prefLabel|Ajax +http://www.semanlink.net/tag/ajar|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/ajar|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/ajar|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/ajar|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/ajar|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/ajar|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/salzburg|creationTime|2012-05-12T09:46:59Z +http://www.semanlink.net/tag/salzburg|prefLabel|Salzburg +http://www.semanlink.net/tag/salzburg|broader|http://www.semanlink.net/tag/autriche +http://www.semanlink.net/tag/salzburg|creationDate|2012-05-12 +http://www.semanlink.net/tag/salzburg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/salzburg|uri|http://www.semanlink.net/tag/salzburg +http://www.semanlink.net/tag/salzburg|broader_prefLabel|Autriche +http://www.semanlink.net/tag/yagan|creationTime|2007-03-28T23:45:47Z +http://www.semanlink.net/tag/yagan|prefLabel|Yagán +http://www.semanlink.net/tag/yagan|broader|http://www.semanlink.net/tag/amerindien +http://www.semanlink.net/tag/yagan|broader|http://www.semanlink.net/tag/terre_de_feu +http://www.semanlink.net/tag/yagan|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/yagan|creationDate|2007-03-28 +http://www.semanlink.net/tag/yagan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yagan|altLabel|Yamana +http://www.semanlink.net/tag/yagan|uri|http://www.semanlink.net/tag/yagan +http://www.semanlink.net/tag/yagan|broader_prefLabel|Amérindien +http://www.semanlink.net/tag/yagan|broader_prefLabel|Terre de Feu +http://www.semanlink.net/tag/yagan|broader_prefLabel|Peuples +http://www.semanlink.net/tag/yagan|broader_altLabel|Native americans +http://www.semanlink.net/tag/artificial_human_intelligence|creationTime|2020-01-16T01:33:39Z +http://www.semanlink.net/tag/artificial_human_intelligence|prefLabel|Artificial Human Intelligence +http://www.semanlink.net/tag/artificial_human_intelligence|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/artificial_human_intelligence|related|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/tag/artificial_human_intelligence|creationDate|2020-01-16 +http://www.semanlink.net/tag/artificial_human_intelligence|comment|AI systems that employ architectures modeled after the human brain, +http://www.semanlink.net/tag/artificial_human_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_human_intelligence|uri|http://www.semanlink.net/tag/artificial_human_intelligence +http://www.semanlink.net/tag/artificial_human_intelligence|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/artificial_human_intelligence|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/artificial_human_intelligence|broader_altLabel|AI +http://www.semanlink.net/tag/artificial_human_intelligence|broader_altLabel|IA +http://www.semanlink.net/tag/artificial_human_intelligence|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/uri_synonymity|creationTime|2008-05-15T21:57:35Z +http://www.semanlink.net/tag/uri_synonymity|prefLabel|URI Synonymity +http://www.semanlink.net/tag/uri_synonymity|broader|http://www.semanlink.net/tag/synonym_uris +http://www.semanlink.net/tag/uri_synonymity|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/uri_synonymity|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_synonymity|related|http://www.semanlink.net/tag/sw_coreferences +http://www.semanlink.net/tag/uri_synonymity|creationDate|2008-05-15 +http://www.semanlink.net/tag/uri_synonymity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_synonymity|uri|http://www.semanlink.net/tag/uri_synonymity +http://www.semanlink.net/tag/uri_synonymity|broader_prefLabel|Synonym URIs +http://www.semanlink.net/tag/uri_synonymity|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/uri_synonymity|broader_prefLabel|URI +http://www.semanlink.net/tag/uri_synonymity|broader_altLabel|LD +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/uri_synonymity|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/allennlp|creationTime|2018-03-17T13:02:32Z +http://www.semanlink.net/tag/allennlp|prefLabel|AllenNLP +http://www.semanlink.net/tag/allennlp|broader|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/tag/allennlp|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/allennlp|related|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/tag/allennlp|creationDate|2018-03-17 +http://www.semanlink.net/tag/allennlp|comment|open-source NLP research library, built on PyTorch +http://www.semanlink.net/tag/allennlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/allennlp|homepage|http://allennlp.org/ +http://www.semanlink.net/tag/allennlp|uri|http://www.semanlink.net/tag/allennlp +http://www.semanlink.net/tag/allennlp|broader_prefLabel|Allen Institute for AI (A2I) +http://www.semanlink.net/tag/allennlp|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/facebook_graph_search|creationTime|2013-02-09T20:31:26Z +http://www.semanlink.net/tag/facebook_graph_search|prefLabel|Facebook Graph Search +http://www.semanlink.net/tag/facebook_graph_search|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/facebook_graph_search|creationDate|2013-02-09 +http://www.semanlink.net/tag/facebook_graph_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facebook_graph_search|uri|http://www.semanlink.net/tag/facebook_graph_search +http://www.semanlink.net/tag/facebook_graph_search|broader_prefLabel|Facebook +http://www.semanlink.net/tag/facebook_graph_search|broader_altLabel|FB +http://www.semanlink.net/tag/multimedia|prefLabel|Multimedia +http://www.semanlink.net/tag/multimedia|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/multimedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multimedia|uri|http://www.semanlink.net/tag/multimedia +http://www.semanlink.net/tag/multimedia|broader_prefLabel|Technologie +http://www.semanlink.net/tag/egit|creationTime|2012-09-06T14:55:03Z +http://www.semanlink.net/tag/egit|prefLabel|Egit +http://www.semanlink.net/tag/egit|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/egit|broader|http://www.semanlink.net/tag/git +http://www.semanlink.net/tag/egit|creationDate|2012-09-06 +http://www.semanlink.net/tag/egit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/egit|uri|http://www.semanlink.net/tag/egit +http://www.semanlink.net/tag/egit|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/egit|broader_prefLabel|Git +http://www.semanlink.net/tag/egit|broader_related|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/egit|broader_related|http://www.semanlink.net/tag/maven +http://www.semanlink.net/tag/word2vec|creationTime|2016-01-17T12:38:54Z +http://www.semanlink.net/tag/word2vec|prefLabel|Word2vec +http://www.semanlink.net/tag/word2vec|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word2vec|broader|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/word2vec|related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/word2vec|creationDate|2016-01-17 +http://www.semanlink.net/tag/word2vec|comment|group of related models that are used to produce word embeddings +http://www.semanlink.net/tag/word2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word2vec|describedBy|https://en.wikipedia.org/wiki/Word2vec +http://www.semanlink.net/tag/word2vec|uri|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/word2vec|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/word2vec|broader_prefLabel|NLP@Google +http://www.semanlink.net/tag/word2vec|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/word2vec|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/word2vec|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/word2vec|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/word2vec|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/backdoor|creationTime|2016-02-18T00:04:53Z +http://www.semanlink.net/tag/backdoor|prefLabel|Backdoor +http://www.semanlink.net/tag/backdoor|creationDate|2016-02-18 +http://www.semanlink.net/tag/backdoor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backdoor|uri|http://www.semanlink.net/tag/backdoor +http://www.semanlink.net/tag/roc_curve|creationTime|2019-02-15T16:04:12Z +http://www.semanlink.net/tag/roc_curve|prefLabel|ROC Curve +http://www.semanlink.net/tag/roc_curve|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/roc_curve|creationDate|2019-02-15 +http://www.semanlink.net/tag/roc_curve|comment|"""Receiver operating characteristic"". Plot used to diagnostic ability of a binary classifier as its discrimination threshold is varied. + +Plotting the true positive rate (TPR: recall) against the false positive rate (FPR: fall-out or probability of false alarm ) at various threshold settings. +" +http://www.semanlink.net/tag/roc_curve|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roc_curve|describedBy|https://en.wikipedia.org/wiki/Receiver_operating_characteristic +http://www.semanlink.net/tag/roc_curve|uri|http://www.semanlink.net/tag/roc_curve +http://www.semanlink.net/tag/roc_curve|broader_prefLabel|Classification +http://www.semanlink.net/tag/labeling_data|creationTime|2019-12-17T15:20:19Z +http://www.semanlink.net/tag/labeling_data|prefLabel|Labeling data +http://www.semanlink.net/tag/labeling_data|broader|http://www.semanlink.net/tag/labeled_data +http://www.semanlink.net/tag/labeling_data|creationDate|2019-12-17 +http://www.semanlink.net/tag/labeling_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/labeling_data|altLabel|Labelling data +http://www.semanlink.net/tag/labeling_data|altLabel|Annotating data +http://www.semanlink.net/tag/labeling_data|uri|http://www.semanlink.net/tag/labeling_data +http://www.semanlink.net/tag/labeling_data|broader_prefLabel|Labeled Data +http://www.semanlink.net/tag/coreference_resolution|creationTime|2019-05-28T16:11:07Z +http://www.semanlink.net/tag/coreference_resolution|prefLabel|Coreference resolution +http://www.semanlink.net/tag/coreference_resolution|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/coreference_resolution|creationDate|2019-05-28 +http://www.semanlink.net/tag/coreference_resolution|comment|coreference: the fact that two or more expressions in a text – like pronouns or nouns – link to the same entity +http://www.semanlink.net/tag/coreference_resolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coreference_resolution|uri|http://www.semanlink.net/tag/coreference_resolution +http://www.semanlink.net/tag/coreference_resolution|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/hbase|creationTime|2013-02-14T11:29:21Z +http://www.semanlink.net/tag/hbase|prefLabel|HBase™ +http://www.semanlink.net/tag/hbase|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/hbase|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/hbase|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/hbase|related|http://www.semanlink.net/tag/bigtable +http://www.semanlink.net/tag/hbase|creationDate|2013-02-14 +http://www.semanlink.net/tag/hbase|comment|"Apache HBase™ is the Hadoop database, a distributed, scalable, big data store.
+Just as Bigtable leverages the distributed data storage provided by the Google File System, Apache HBase provides Bigtable-like capabilities on top of Hadoop and HDFS." +http://www.semanlink.net/tag/hbase|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hbase|homepage|http://hbase.apache.org/ +http://www.semanlink.net/tag/hbase|uri|http://www.semanlink.net/tag/hbase +http://www.semanlink.net/tag/hbase|broader_prefLabel|Big Data +http://www.semanlink.net/tag/hbase|broader_prefLabel|apache.org +http://www.semanlink.net/tag/hbase|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/hbase|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/hbase|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/ccfd|creationTime|2007-04-09T23:34:22Z +http://www.semanlink.net/tag/ccfd|prefLabel|CCFD +http://www.semanlink.net/tag/ccfd|broader|http://www.semanlink.net/tag/catholicisme +http://www.semanlink.net/tag/ccfd|creationDate|2007-04-09 +http://www.semanlink.net/tag/ccfd|comment|Comité catholique contre la Faim et pour le Développement +http://www.semanlink.net/tag/ccfd|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ccfd|uri|http://www.semanlink.net/tag/ccfd +http://www.semanlink.net/tag/ccfd|broader_prefLabel|Catholicisme +http://www.semanlink.net/tag/ccfd|broader_altLabel|Catholique +http://www.semanlink.net/tag/acl|creationTime|2020-06-15T23:10:27Z +http://www.semanlink.net/tag/acl|prefLabel|ACL +http://www.semanlink.net/tag/acl|broader|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/tag/acl|creationDate|2020-06-15 +http://www.semanlink.net/tag/acl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acl|uri|http://www.semanlink.net/tag/acl +http://www.semanlink.net/tag/acl|broader_prefLabel|NLP conference +http://www.semanlink.net/tag/delocalisations|prefLabel|Délocalisations +http://www.semanlink.net/tag/delocalisations|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/delocalisations|broader|http://www.semanlink.net/tag/mondialisation +http://www.semanlink.net/tag/delocalisations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delocalisations|uri|http://www.semanlink.net/tag/delocalisations +http://www.semanlink.net/tag/delocalisations|broader_prefLabel|Economie +http://www.semanlink.net/tag/delocalisations|broader_prefLabel|Mondialisation +http://www.semanlink.net/tag/delocalisations|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/spikes|creationTime|2021-03-19T13:37:11Z +http://www.semanlink.net/tag/spikes|prefLabel|Spikes +http://www.semanlink.net/tag/spikes|creationDate|2021-03-19 +http://www.semanlink.net/tag/spikes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spikes|uri|http://www.semanlink.net/tag/spikes +http://www.semanlink.net/tag/richard_cyganiak|prefLabel|Richard Cyganiak +http://www.semanlink.net/tag/richard_cyganiak|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/richard_cyganiak|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/richard_cyganiak|creationDate|2007-01-03 +http://www.semanlink.net/tag/richard_cyganiak|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/richard_cyganiak|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/richard_cyganiak|altLabel|dowhatimean.net +http://www.semanlink.net/tag/richard_cyganiak|uri|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/richard_cyganiak|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/richard_cyganiak|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/richard_cyganiak|broader_altLabel|Technical guys +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|creationTime|2007-07-27T17:41:31Z +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|prefLabel|Affaires de Gado à Niamey +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|broader|http://www.semanlink.net/tag/niamey +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|broader|http://www.semanlink.net/tag/gado +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|creationDate|2007-07-27 +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|uri|http://www.semanlink.net/tag/affaires_de_gado_a_niamey +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|broader_prefLabel|Niamey +http://www.semanlink.net/tag/affaires_de_gado_a_niamey|broader_prefLabel|Gado +http://www.semanlink.net/tag/apigee|creationTime|2017-01-06T13:22:57Z +http://www.semanlink.net/tag/apigee|prefLabel|Apigee +http://www.semanlink.net/tag/apigee|broader|http://www.semanlink.net/tag/api_management +http://www.semanlink.net/tag/apigee|creationDate|2017-01-06 +http://www.semanlink.net/tag/apigee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apigee|describedBy|https://en.wikipedia.org/wiki/Apigee +http://www.semanlink.net/tag/apigee|uri|http://www.semanlink.net/tag/apigee +http://www.semanlink.net/tag/apigee|broader_prefLabel|API management +http://www.semanlink.net/tag/michael_moore|creationTime|2008-11-04T00:49:40Z +http://www.semanlink.net/tag/michael_moore|prefLabel|Michael Moore +http://www.semanlink.net/tag/michael_moore|creationDate|2008-11-04 +http://www.semanlink.net/tag/michael_moore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/michael_moore|describedBy|https://en.wikipedia.org/wiki/Michael_Moore +http://www.semanlink.net/tag/michael_moore|uri|http://www.semanlink.net/tag/michael_moore +http://www.semanlink.net/tag/file_convert|creationTime|2020-01-18T23:45:56Z +http://www.semanlink.net/tag/file_convert|prefLabel|File convert +http://www.semanlink.net/tag/file_convert|creationDate|2020-01-18 +http://www.semanlink.net/tag/file_convert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/file_convert|uri|http://www.semanlink.net/tag/file_convert +http://www.semanlink.net/tag/ast_workshop|creationTime|2014-03-12T22:54:16Z +http://www.semanlink.net/tag/ast_workshop|prefLabel|AST workshop +http://www.semanlink.net/tag/ast_workshop|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/ast_workshop|creationDate|2014-03-12 +http://www.semanlink.net/tag/ast_workshop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ast_workshop|uri|http://www.semanlink.net/tag/ast_workshop +http://www.semanlink.net/tag/ast_workshop|broader_prefLabel|Workshop +http://www.semanlink.net/tag/odf|prefLabel|ODF +http://www.semanlink.net/tag/odf|creationDate|2006-10-06 +http://www.semanlink.net/tag/odf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/odf|uri|http://www.semanlink.net/tag/odf +http://www.semanlink.net/tag/innoraise|creationTime|2008-10-28T16:02:16Z +http://www.semanlink.net/tag/innoraise|prefLabel|Innoraise +http://www.semanlink.net/tag/innoraise|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/innoraise|broader|http://www.semanlink.net/tag/rdf_and_social_networks +http://www.semanlink.net/tag/innoraise|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/innoraise|creationDate|2008-10-28 +http://www.semanlink.net/tag/innoraise|comment|The STI community is an international network of experts in Semantic technologies. Innoraise combines search and social networks to keep you up-to-date on your peers of interest and allows you to discover new interesting peers based on their contributions in the Web and in published work. +http://www.semanlink.net/tag/innoraise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/innoraise|describedBy|http://sti.innoraise.com/ +http://www.semanlink.net/tag/innoraise|uri|http://www.semanlink.net/tag/innoraise +http://www.semanlink.net/tag/innoraise|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/innoraise|broader_prefLabel|RDF and social networks +http://www.semanlink.net/tag/innoraise|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/innoraise|broader_related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/innoraise|broader_related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/artificial_general_intelligence|creationTime|2016-01-13T23:03:32Z +http://www.semanlink.net/tag/artificial_general_intelligence|prefLabel|Artificial general intelligence +http://www.semanlink.net/tag/artificial_general_intelligence|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/artificial_general_intelligence|related|http://www.semanlink.net/tag/combinatorial_generalization +http://www.semanlink.net/tag/artificial_general_intelligence|creationDate|2016-01-13 +http://www.semanlink.net/tag/artificial_general_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_general_intelligence|describedBy|https://en.wikipedia.org/wiki/Artificial_general_intelligence +http://www.semanlink.net/tag/artificial_general_intelligence|altLabel|AGI +http://www.semanlink.net/tag/artificial_general_intelligence|uri|http://www.semanlink.net/tag/artificial_general_intelligence +http://www.semanlink.net/tag/artificial_general_intelligence|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/artificial_general_intelligence|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/artificial_general_intelligence|broader_altLabel|AI +http://www.semanlink.net/tag/artificial_general_intelligence|broader_altLabel|IA +http://www.semanlink.net/tag/artificial_general_intelligence|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/hadopi_riposte_graduee|creationTime|2009-02-18T01:12:28Z +http://www.semanlink.net/tag/hadopi_riposte_graduee|prefLabel|HADOPI +http://www.semanlink.net/tag/hadopi_riposte_graduee|broader|http://www.semanlink.net/tag/loi_sur_le_telechargement +http://www.semanlink.net/tag/hadopi_riposte_graduee|creationDate|2009-02-18 +http://www.semanlink.net/tag/hadopi_riposte_graduee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hadopi_riposte_graduee|uri|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.semanlink.net/tag/hadopi_riposte_graduee|broader_prefLabel|Loi sur le téléchargement +http://www.semanlink.net/tag/logic|creationTime|2007-03-04T17:28:22Z +http://www.semanlink.net/tag/logic|prefLabel|Logic +http://www.semanlink.net/tag/logic|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/logic|creationDate|2007-03-04 +http://www.semanlink.net/tag/logic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/logic|uri|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/logic|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/logic|broader_altLabel|Math +http://www.semanlink.net/tag/innovation|prefLabel|Innovation +http://www.semanlink.net/tag/innovation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/innovation|uri|http://www.semanlink.net/tag/innovation +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|creationTime|2013-08-15T10:22:14Z +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|prefLabel|Championnat du monde d'athlétisme +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|broader|http://www.semanlink.net/tag/championnat_du_monde +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|broader|http://www.semanlink.net/tag/athletisme +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|creationDate|2013-08-15 +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|uri|http://www.semanlink.net/tag/championnat_du_monde_d_athletisme +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|broader_prefLabel|Championnat du monde +http://www.semanlink.net/tag/championnat_du_monde_d_athletisme|broader_prefLabel|Athlétisme +http://www.semanlink.net/tag/read_write_linked_data|creationTime|2011-03-08T09:49:21Z +http://www.semanlink.net/tag/read_write_linked_data|prefLabel|Read-Write Linked Data +http://www.semanlink.net/tag/read_write_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/read_write_linked_data|creationDate|2011-03-08 +http://www.semanlink.net/tag/read_write_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/read_write_linked_data|altLabel|RW Linked Data +http://www.semanlink.net/tag/read_write_linked_data|uri|http://www.semanlink.net/tag/read_write_linked_data +http://www.semanlink.net/tag/read_write_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/read_write_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/read_write_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/jpa|creationTime|2010-07-06T14:01:22Z +http://www.semanlink.net/tag/jpa|prefLabel|JPA +http://www.semanlink.net/tag/jpa|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/jpa|creationDate|2010-07-06 +http://www.semanlink.net/tag/jpa|comment|"Java Persistence API +" +http://www.semanlink.net/tag/jpa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jpa|describedBy|http://java.sun.com/developer/technicalArticles/J2EE/jpa/ +http://www.semanlink.net/tag/jpa|uri|http://www.semanlink.net/tag/jpa +http://www.semanlink.net/tag/jpa|broader_prefLabel|Java +http://www.semanlink.net/tag/car_diversity|creationTime|2008-10-24T18:23:14Z +http://www.semanlink.net/tag/car_diversity|prefLabel|Car diversity +http://www.semanlink.net/tag/car_diversity|creationDate|2008-10-24 +http://www.semanlink.net/tag/car_diversity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/car_diversity|uri|http://www.semanlink.net/tag/car_diversity +http://www.semanlink.net/tag/google_hummingbird|creationTime|2013-10-14T11:38:32Z +http://www.semanlink.net/tag/google_hummingbird|prefLabel|Google Hummingbird +http://www.semanlink.net/tag/google_hummingbird|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_hummingbird|creationDate|2013-10-14 +http://www.semanlink.net/tag/google_hummingbird|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_hummingbird|uri|http://www.semanlink.net/tag/google_hummingbird +http://www.semanlink.net/tag/google_hummingbird|broader_prefLabel|Google +http://www.semanlink.net/tag/google_hummingbird|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/slow_food|creationTime|2011-02-01T12:07:31Z +http://www.semanlink.net/tag/slow_food|prefLabel|Slow food +http://www.semanlink.net/tag/slow_food|creationDate|2011-02-01 +http://www.semanlink.net/tag/slow_food|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slow_food|uri|http://www.semanlink.net/tag/slow_food +http://www.semanlink.net/tag/kd_mkb_related|creationTime|2020-05-12T11:12:24Z +http://www.semanlink.net/tag/kd_mkb_related|prefLabel|KD-MKB related +http://www.semanlink.net/tag/kd_mkb_related|broader|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/kd_mkb_related|related|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/tag/kd_mkb_related|creationDate|2020-05-12 +http://www.semanlink.net/tag/kd_mkb_related|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kd_mkb_related|uri|http://www.semanlink.net/tag/kd_mkb_related +http://www.semanlink.net/tag/kd_mkb_related|broader_prefLabel|KD-MKB +http://www.semanlink.net/tag/kd_mkb_related|broader_altLabel|KDMKB +http://www.semanlink.net/tag/kd_mkb_related|broader_altLabel|KD-MKR +http://www.semanlink.net/tag/kd_mkb_related|broader_related|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/kd_mkb_related|broader_related|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/ai_facebook|creationTime|2019-05-12T12:24:58Z +http://www.semanlink.net/tag/ai_facebook|prefLabel|AI@Facebook +http://www.semanlink.net/tag/ai_facebook|broader|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/ai_facebook|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_facebook|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/ai_facebook|creationDate|2019-05-12 +http://www.semanlink.net/tag/ai_facebook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_facebook|uri|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/tag/ai_facebook|broader_prefLabel|AI teams +http://www.semanlink.net/tag/ai_facebook|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_facebook|broader_prefLabel|Facebook +http://www.semanlink.net/tag/ai_facebook|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_facebook|broader_altLabel|AI +http://www.semanlink.net/tag/ai_facebook|broader_altLabel|IA +http://www.semanlink.net/tag/ai_facebook|broader_altLabel|FB +http://www.semanlink.net/tag/ai_facebook|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/bookmarklet|prefLabel|Bookmarklet +http://www.semanlink.net/tag/bookmarklet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bookmarklet|uri|http://www.semanlink.net/tag/bookmarklet +http://www.semanlink.net/tag/text_tools|creationTime|2014-10-05T17:04:23Z +http://www.semanlink.net/tag/text_tools|prefLabel|Text tools +http://www.semanlink.net/tag/text_tools|creationDate|2014-10-05 +http://www.semanlink.net/tag/text_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_tools|uri|http://www.semanlink.net/tag/text_tools +http://www.semanlink.net/tag/archeologie|prefLabel|Archéologie +http://www.semanlink.net/tag/archeologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/archeologie|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/archeologie|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/archeologie|related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/archeologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie|uri|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/archeologie|broader_prefLabel|Science +http://www.semanlink.net/tag/archeologie|broader_prefLabel|Histoire +http://www.semanlink.net/tag/archeologie|broader_prefLabel|Favoris +http://www.semanlink.net/tag/archeologie|broader_altLabel|sciences +http://www.semanlink.net/tag/archeologie|broader_altLabel|favorites +http://www.semanlink.net/tag/java_in_python|creationTime|2020-02-20T14:09:53Z +http://www.semanlink.net/tag/java_in_python|prefLabel|Java in python +http://www.semanlink.net/tag/java_in_python|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_in_python|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/java_in_python|creationDate|2020-02-20 +http://www.semanlink.net/tag/java_in_python|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_in_python|uri|http://www.semanlink.net/tag/java_in_python +http://www.semanlink.net/tag/java_in_python|broader_prefLabel|Java +http://www.semanlink.net/tag/java_in_python|broader_prefLabel|Python +http://www.semanlink.net/tag/handwriting_recognition|creationTime|2015-12-20T14:35:30Z +http://www.semanlink.net/tag/handwriting_recognition|prefLabel|Handwriting recognition +http://www.semanlink.net/tag/handwriting_recognition|broader|http://www.semanlink.net/tag/ocr +http://www.semanlink.net/tag/handwriting_recognition|creationDate|2015-12-20 +http://www.semanlink.net/tag/handwriting_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/handwriting_recognition|uri|http://www.semanlink.net/tag/handwriting_recognition +http://www.semanlink.net/tag/handwriting_recognition|broader_prefLabel|OCR +http://www.semanlink.net/tag/uriburner_com|creationTime|2010-06-17T01:22:52Z +http://www.semanlink.net/tag/uriburner_com|prefLabel|URIBurner +http://www.semanlink.net/tag/uriburner_com|broader|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/uriburner_com|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/uriburner_com|creationDate|2010-06-17 +http://www.semanlink.net/tag/uriburner_com|comment|"A service from [OpenLink Software](tag:openlink) that enables anyone to generate structured descriptions -on the fly- for resources that are already published to HTTP based networks. + +[Example](https://linkeddata.uriburner.com/about/html/https://linkeddata.uriburner.com/about/id/entity/https/www.technologyreview.com/2020/11/12/1011944/artificial-intelligence-replication-crisis-science-big-tech-google-deepmind-facebook-openai/)" +http://www.semanlink.net/tag/uriburner_com|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uriburner_com|describedBy|http://uriburner.com/ +http://www.semanlink.net/tag/uriburner_com|uri|http://www.semanlink.net/tag/uriburner_com +http://www.semanlink.net/tag/uriburner_com|broader_prefLabel|OpenLink Software +http://www.semanlink.net/tag/uriburner_com|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/uriburner_com|broader_altLabel|LD +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/uriburner_com|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/giovanni_tummarello|creationTime|2007-11-15T08:48:03Z +http://www.semanlink.net/tag/giovanni_tummarello|prefLabel|Giovanni Tummarello +http://www.semanlink.net/tag/giovanni_tummarello|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/giovanni_tummarello|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/giovanni_tummarello|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/giovanni_tummarello|creationDate|2007-11-15 +http://www.semanlink.net/tag/giovanni_tummarello|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/giovanni_tummarello|uri|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/giovanni_tummarello|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/giovanni_tummarello|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/giovanni_tummarello|broader_altLabel|Technical guys +http://www.semanlink.net/tag/duckduckgo|creationTime|2013-06-13T00:14:38Z +http://www.semanlink.net/tag/duckduckgo|prefLabel|DuckDuckGo +http://www.semanlink.net/tag/duckduckgo|broader|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/tag/duckduckgo|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/duckduckgo|creationDate|2013-06-13 +http://www.semanlink.net/tag/duckduckgo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/duckduckgo|describedBy|https://duckduckgo.com/ +http://www.semanlink.net/tag/duckduckgo|uri|http://www.semanlink.net/tag/duckduckgo +http://www.semanlink.net/tag/duckduckgo|broader_prefLabel|Privacy and internet +http://www.semanlink.net/tag/duckduckgo|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/duckduckgo|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/duckduckgo|broader_related|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/sharepoint|creationTime|2008-06-19T01:53:39Z +http://www.semanlink.net/tag/sharepoint|prefLabel|Sharepoint +http://www.semanlink.net/tag/sharepoint|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/sharepoint|creationDate|2008-06-19 +http://www.semanlink.net/tag/sharepoint|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sharepoint|uri|http://www.semanlink.net/tag/sharepoint +http://www.semanlink.net/tag/sharepoint|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/sharepoint|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/grounded_language_learning|creationTime|2018-10-26T00:33:37Z +http://www.semanlink.net/tag/grounded_language_learning|prefLabel|Grounded Language Learning +http://www.semanlink.net/tag/grounded_language_learning|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/grounded_language_learning|broader|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/grounded_language_learning|related|http://www.semanlink.net/tag/multimodal_models +http://www.semanlink.net/tag/grounded_language_learning|creationDate|2018-10-26 +http://www.semanlink.net/tag/grounded_language_learning|comment|"Language is grounded in experience. Unlike dictionaries which define words in terms of other words, humans understand many basic words in terms of associations with sensory-motor experiences. People must interact physically with their world to grasp the essence of words like ""red,"" ""heavy,"" and ""above."" + +Abstract words are acquired only in relation to more concretely grounded terms. Grounding is thus a fundamental aspect of spoken language, which enables humans to acquire and to use words and sentences in context. + +The antithesis of grounded language is inferred language. Inferred language derives meaning from words themselves rather than what they represent. +" +http://www.semanlink.net/tag/grounded_language_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grounded_language_learning|uri|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/tag/grounded_language_learning|broader_prefLabel|NLP +http://www.semanlink.net/tag/grounded_language_learning|broader_prefLabel|NLU +http://www.semanlink.net/tag/grounded_language_learning|broader_altLabel|TALN +http://www.semanlink.net/tag/grounded_language_learning|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/grounded_language_learning|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/grounded_language_learning|broader_altLabel|Natural Language Understanding +http://www.semanlink.net/tag/david_cameron|creationTime|2014-06-27T00:44:16Z +http://www.semanlink.net/tag/david_cameron|prefLabel|David Cameron +http://www.semanlink.net/tag/david_cameron|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/david_cameron|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/david_cameron|creationDate|2014-06-27 +http://www.semanlink.net/tag/david_cameron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/david_cameron|uri|http://www.semanlink.net/tag/david_cameron +http://www.semanlink.net/tag/david_cameron|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/david_cameron|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/david_cameron|broader_altLabel|UK +http://www.semanlink.net/tag/new_horizons|prefLabel|New Horizons +http://www.semanlink.net/tag/new_horizons|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/new_horizons|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/new_horizons|broader|http://www.semanlink.net/tag/pluton +http://www.semanlink.net/tag/new_horizons|comment|New Horizons ira là où on n'est jamais allé auparavant, à la frontière de la science planétaire +http://www.semanlink.net/tag/new_horizons|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/new_horizons|uri|http://www.semanlink.net/tag/new_horizons +http://www.semanlink.net/tag/new_horizons|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/new_horizons|broader_prefLabel|NASA +http://www.semanlink.net/tag/new_horizons|broader_prefLabel|Pluton +http://www.semanlink.net/tag/jena_and_database|creationTime|2008-02-12T14:45:05Z +http://www.semanlink.net/tag/jena_and_database|prefLabel|Jena and database +http://www.semanlink.net/tag/jena_and_database|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_and_database|broader|http://www.semanlink.net/tag/semantic_web_databases +http://www.semanlink.net/tag/jena_and_database|creationDate|2008-02-12 +http://www.semanlink.net/tag/jena_and_database|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_and_database|uri|http://www.semanlink.net/tag/jena_and_database +http://www.semanlink.net/tag/jena_and_database|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_and_database|broader_prefLabel|Semantic Web: databases +http://www.semanlink.net/tag/jena_and_database|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/entity_type|creationTime|2021-06-14T16:26:56Z +http://www.semanlink.net/tag/entity_type|prefLabel|Entity type +http://www.semanlink.net/tag/entity_type|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_type|creationDate|2021-06-14 +http://www.semanlink.net/tag/entity_type|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_type|uri|http://www.semanlink.net/tag/entity_type +http://www.semanlink.net/tag/entity_type|broader_prefLabel|Entities +http://www.semanlink.net/tag/david_beckett|prefLabel|David Beckett +http://www.semanlink.net/tag/david_beckett|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/david_beckett|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/david_beckett|related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/david_beckett|comment|"""The semantic web is: a webby way to link data. That is all.""
+""The reason I got involved with the Semantic Web was...I wanted control of my data."" +" +http://www.semanlink.net/tag/david_beckett|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/david_beckett|homepage|http://www.dajobe.org/ +http://www.semanlink.net/tag/david_beckett|altLabel|dajobe +http://www.semanlink.net/tag/david_beckett|altLabel|Dave Beckett +http://www.semanlink.net/tag/david_beckett|weblog|http://journal.dajobe.org/ +http://www.semanlink.net/tag/david_beckett|uri|http://www.semanlink.net/tag/david_beckett +http://www.semanlink.net/tag/david_beckett|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/david_beckett|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/david_beckett|broader_altLabel|Technical guys +http://www.semanlink.net/tag/bittorrent|prefLabel|BitTorrent +http://www.semanlink.net/tag/bittorrent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bittorrent|uri|http://www.semanlink.net/tag/bittorrent +http://www.semanlink.net/tag/multilevel_model|creationTime|2017-02-08T13:22:48Z +http://www.semanlink.net/tag/multilevel_model|prefLabel|Hierarchical linear model +http://www.semanlink.net/tag/multilevel_model|broader|http://www.semanlink.net/tag/regression_analysis +http://www.semanlink.net/tag/multilevel_model|creationDate|2017-02-08 +http://www.semanlink.net/tag/multilevel_model|comment|"Multilevel models (also known as hierarchical linear models, nested data models, mixed models, random coefficient, random-effects models, random parameter models, or split-plot designs) are statistical models of parameters that vary at more than one level. An example could be a model of student performance that contains measures for individual students as well as measures for classrooms within which the students are grouped. These models can be seen as generalizations of linear models (in particular, linear regression), although they can also extend to non-linear models. + +" +http://www.semanlink.net/tag/multilevel_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multilevel_model|describedBy|https://en.wikipedia.org/wiki/Multilevel_model +http://www.semanlink.net/tag/multilevel_model|altLabel|Multilevel model +http://www.semanlink.net/tag/multilevel_model|uri|http://www.semanlink.net/tag/multilevel_model +http://www.semanlink.net/tag/multilevel_model|broader_prefLabel|Regression analysis +http://www.semanlink.net/tag/maxent_classifier|creationTime|2014-04-08T19:18:48Z +http://www.semanlink.net/tag/maxent_classifier|prefLabel|MaxEnt classifier (Multinomial logistic regression) +http://www.semanlink.net/tag/maxent_classifier|broader|http://www.semanlink.net/tag/multi_class_classification +http://www.semanlink.net/tag/maxent_classifier|broader|http://www.semanlink.net/tag/maxent_models +http://www.semanlink.net/tag/maxent_classifier|broader|http://www.semanlink.net/tag/logistic_regression +http://www.semanlink.net/tag/maxent_classifier|related|http://www.semanlink.net/tag/logistic_regression +http://www.semanlink.net/tag/maxent_classifier|related|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/tag/maxent_classifier|creationDate|2014-04-08 +http://www.semanlink.net/tag/maxent_classifier|comment|"classification method that generalizes logistic regression to multiclass problems. + +Assumes that a linear combination of the observed features and some problem-specific parameters can be used to determine the probability of each particular outcome of the dependent variable. + +> ""If you want to assign probabilities to an object being one of several different things, softmax is the thing to do. Even later on, when we train more sophisticated models, the final step will be a layer of softmax."" [cf.](http://www.tensorflow.org/tutorials/mnist/beginners/index.md) +" +http://www.semanlink.net/tag/maxent_classifier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maxent_classifier|describedBy|https://en.wikipedia.org/wiki/Multinomial_logistic_regression +http://www.semanlink.net/tag/maxent_classifier|describedBy|https://en.wikipedia.org/wiki/Maxent_model +http://www.semanlink.net/tag/maxent_classifier|altLabel|Maximum Entropy Classifier +http://www.semanlink.net/tag/maxent_classifier|altLabel|Softmax regression +http://www.semanlink.net/tag/maxent_classifier|uri|http://www.semanlink.net/tag/maxent_classifier +http://www.semanlink.net/tag/maxent_classifier|broader_prefLabel|Multi-class classification +http://www.semanlink.net/tag/maxent_classifier|broader_prefLabel|Maxent models +http://www.semanlink.net/tag/maxent_classifier|broader_prefLabel|Logistic regression +http://www.semanlink.net/tag/maxent_classifier|broader_altLabel|Multiclass classification +http://www.semanlink.net/tag/maxent_classifier|broader_related|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/maxent_classifier|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/apache|prefLabel|Apache web server +http://www.semanlink.net/tag/apache|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/apache|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/apache|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache|broader|http://www.semanlink.net/tag/web_server +http://www.semanlink.net/tag/apache|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache|uri|http://www.semanlink.net/tag/apache +http://www.semanlink.net/tag/apache|broader_prefLabel|Open Source +http://www.semanlink.net/tag/apache|broader_prefLabel|HTTP +http://www.semanlink.net/tag/apache|broader_prefLabel|apache.org +http://www.semanlink.net/tag/apache|broader_prefLabel|Web server +http://www.semanlink.net/tag/loosely_formatted_text|creationTime|2019-07-15T12:33:13Z +http://www.semanlink.net/tag/loosely_formatted_text|prefLabel|Loosely formatted text +http://www.semanlink.net/tag/loosely_formatted_text|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/loosely_formatted_text|related|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/tag/loosely_formatted_text|creationDate|2019-07-15 +http://www.semanlink.net/tag/loosely_formatted_text|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loosely_formatted_text|altLabel|Messy text +http://www.semanlink.net/tag/loosely_formatted_text|uri|http://www.semanlink.net/tag/loosely_formatted_text +http://www.semanlink.net/tag/loosely_formatted_text|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/identification_of_similar_documents|creationTime|2019-07-02T01:21:07Z +http://www.semanlink.net/tag/identification_of_similar_documents|prefLabel|Identification of similar documents +http://www.semanlink.net/tag/identification_of_similar_documents|broader|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/identification_of_similar_documents|creationDate|2019-07-02 +http://www.semanlink.net/tag/identification_of_similar_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/identification_of_similar_documents|uri|http://www.semanlink.net/tag/identification_of_similar_documents +http://www.semanlink.net/tag/identification_of_similar_documents|broader_prefLabel|Text Similarity +http://www.semanlink.net/tag/identification_of_similar_documents|broader_related|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/identification_of_similar_documents|broader_related|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/identification_of_similar_documents|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/neural_memory|creationTime|2020-07-09T23:56:14Z +http://www.semanlink.net/tag/neural_memory|prefLabel|Neural Memory +http://www.semanlink.net/tag/neural_memory|broader|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/tag/neural_memory|creationDate|2020-07-09 +http://www.semanlink.net/tag/neural_memory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_memory|uri|http://www.semanlink.net/tag/neural_memory +http://www.semanlink.net/tag/neural_memory|broader_prefLabel|Memory in deep learning +http://www.semanlink.net/tag/neural_memory|broader_related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/external_memory_algorithm|creationTime|2018-09-15T18:39:47Z +http://www.semanlink.net/tag/external_memory_algorithm|prefLabel|External memory algorithm +http://www.semanlink.net/tag/external_memory_algorithm|broader|http://www.semanlink.net/tag/scaling +http://www.semanlink.net/tag/external_memory_algorithm|broader|http://www.semanlink.net/tag/memoire_informatique +http://www.semanlink.net/tag/external_memory_algorithm|creationDate|2018-09-15 +http://www.semanlink.net/tag/external_memory_algorithm|comment|"algorithms designed to process data that is too large to fit into memory +" +http://www.semanlink.net/tag/external_memory_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/external_memory_algorithm|describedBy|https://en.wikipedia.org/wiki/External_memory_algorithm +http://www.semanlink.net/tag/external_memory_algorithm|altLabel|out-of-core algorithms +http://www.semanlink.net/tag/external_memory_algorithm|uri|http://www.semanlink.net/tag/external_memory_algorithm +http://www.semanlink.net/tag/external_memory_algorithm|broader_prefLabel|Scaling +http://www.semanlink.net/tag/external_memory_algorithm|broader_prefLabel|Mémoire (informatique) +http://www.semanlink.net/tag/wikidata|creationTime|2012-02-08T13:42:03Z +http://www.semanlink.net/tag/wikidata|prefLabel|Wikidata +http://www.semanlink.net/tag/wikidata|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikidata|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/wikidata|related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikidata|creationDate|2012-02-08 +http://www.semanlink.net/tag/wikidata|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikidata|homepage|http://www.wikidata.org/ +http://www.semanlink.net/tag/wikidata|uri|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/wikidata|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/wikidata|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/wikidata|broader_altLabel|LD +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/wikidata|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/violence_policiere|creationTime|2007-11-29T23:03:01Z +http://www.semanlink.net/tag/violence_policiere|prefLabel|Violence policière +http://www.semanlink.net/tag/violence_policiere|broader|http://www.semanlink.net/tag/violence +http://www.semanlink.net/tag/violence_policiere|broader|http://www.semanlink.net/tag/police +http://www.semanlink.net/tag/violence_policiere|creationDate|2007-11-29 +http://www.semanlink.net/tag/violence_policiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/violence_policiere|uri|http://www.semanlink.net/tag/violence_policiere +http://www.semanlink.net/tag/violence_policiere|broader_prefLabel|Violence +http://www.semanlink.net/tag/violence_policiere|broader_prefLabel|Police +http://www.semanlink.net/tag/autoencoder|creationTime|2017-10-03T13:46:44Z +http://www.semanlink.net/tag/autoencoder|prefLabel|Autoencoder +http://www.semanlink.net/tag/autoencoder|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/autoencoder|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/autoencoder|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/autoencoder|related|http://www.semanlink.net/tag/encoder_decoder_architecture +http://www.semanlink.net/tag/autoencoder|related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/autoencoder|creationDate|2017-10-03 +http://www.semanlink.net/tag/autoencoder|comment|"ANN used for unsupervised learning of efficient codings: learning a representation (encoding) for a set of data, typically for the purpose of dimensionality reduction. + +an unsupervised neural network +which is trained to reconstruct a given input +from its latent representation (Bengio, 2009). + +Unlike principal components analysis, the encoding and decoding steps are not limited to linear transformations (PCA learns an ""encoding linear transform"", while auto-encoders learn an ""encoding program""). +" +http://www.semanlink.net/tag/autoencoder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/autoencoder|describedBy|https://en.wikipedia.org/wiki/Autoencoder +http://www.semanlink.net/tag/autoencoder|uri|http://www.semanlink.net/tag/autoencoder +http://www.semanlink.net/tag/autoencoder|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/autoencoder|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/autoencoder|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/autoencoder|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/autoencoder|broader_altLabel|ANN +http://www.semanlink.net/tag/autoencoder|broader_altLabel|NN +http://www.semanlink.net/tag/knowledge_extraction|creationTime|2007-05-23T21:46:21Z +http://www.semanlink.net/tag/knowledge_extraction|prefLabel|Knowledge Extraction +http://www.semanlink.net/tag/knowledge_extraction|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/knowledge_extraction|broader|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/knowledge_extraction|creationDate|2007-05-23 +http://www.semanlink.net/tag/knowledge_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_extraction|uri|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/knowledge_extraction|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/knowledge_extraction|broader_prefLabel|Information extraction +http://www.semanlink.net/tag/knowledge_extraction|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/knowledge_extraction|broader_altLabel|AI +http://www.semanlink.net/tag/knowledge_extraction|broader_altLabel|IA +http://www.semanlink.net/tag/knowledge_extraction|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/knowledge_extraction|broader_related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/leonardo_da_vinci|creationTime|2008-12-10T14:58:44Z +http://www.semanlink.net/tag/leonardo_da_vinci|prefLabel|Leonardo da Vinci +http://www.semanlink.net/tag/leonardo_da_vinci|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/leonardo_da_vinci|broader|http://www.semanlink.net/tag/peintre +http://www.semanlink.net/tag/leonardo_da_vinci|broader|http://www.semanlink.net/tag/renaissance +http://www.semanlink.net/tag/leonardo_da_vinci|creationDate|2008-12-10 +http://www.semanlink.net/tag/leonardo_da_vinci|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leonardo_da_vinci|describedBy|https://en.wikipedia.org/wiki/Da_vinci +http://www.semanlink.net/tag/leonardo_da_vinci|uri|http://www.semanlink.net/tag/leonardo_da_vinci +http://www.semanlink.net/tag/leonardo_da_vinci|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/leonardo_da_vinci|broader_prefLabel|Peintre +http://www.semanlink.net/tag/leonardo_da_vinci|broader_prefLabel|Renaissance +http://www.semanlink.net/tag/leonardo_da_vinci|broader_altLabel|Savant +http://www.semanlink.net/tag/belem|creationTime|2007-09-11T21:32:25Z +http://www.semanlink.net/tag/belem|prefLabel|Belém +http://www.semanlink.net/tag/belem|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/belem|broader|http://www.semanlink.net/tag/para +http://www.semanlink.net/tag/belem|creationDate|2007-09-11 +http://www.semanlink.net/tag/belem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/belem|uri|http://www.semanlink.net/tag/belem +http://www.semanlink.net/tag/belem|broader_prefLabel|Ville +http://www.semanlink.net/tag/belem|broader_prefLabel|Pará +http://www.semanlink.net/tag/de_broglie|prefLabel|de Broglie +http://www.semanlink.net/tag/de_broglie|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/de_broglie|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/de_broglie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/de_broglie|uri|http://www.semanlink.net/tag/de_broglie +http://www.semanlink.net/tag/de_broglie|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/de_broglie|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/de_broglie|broader_altLabel|Savant +http://www.semanlink.net/tag/france_afrique|creationTime|2007-07-28T17:18:23Z +http://www.semanlink.net/tag/france_afrique|prefLabel|France / Afrique +http://www.semanlink.net/tag/france_afrique|broader|http://www.semanlink.net/tag/afrique_francophone +http://www.semanlink.net/tag/france_afrique|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/france_afrique|creationDate|2007-07-28 +http://www.semanlink.net/tag/france_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_afrique|uri|http://www.semanlink.net/tag/france_afrique +http://www.semanlink.net/tag/france_afrique|broader_prefLabel|Afrique francophone +http://www.semanlink.net/tag/france_afrique|broader_prefLabel|France +http://www.semanlink.net/tag/automobile_and_w3c|creationTime|2012-11-23T13:34:45Z +http://www.semanlink.net/tag/automobile_and_w3c|prefLabel|Automotive AND W3C +http://www.semanlink.net/tag/automobile_and_w3c|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/automobile_and_w3c|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/automobile_and_w3c|broader|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.semanlink.net/tag/automobile_and_w3c|broader|http://www.semanlink.net/tag/automobile_2_0 +http://www.semanlink.net/tag/automobile_and_w3c|creationDate|2012-11-23 +http://www.semanlink.net/tag/automobile_and_w3c|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automobile_and_w3c|uri|http://www.semanlink.net/tag/automobile_and_w3c +http://www.semanlink.net/tag/automobile_and_w3c|broader_prefLabel|W3C +http://www.semanlink.net/tag/automobile_and_w3c|broader_prefLabel|Automobile +http://www.semanlink.net/tag/automobile_and_w3c|broader_prefLabel|Automotive and web technologies +http://www.semanlink.net/tag/automobile_and_w3c|broader_prefLabel|Automobile 2.0 +http://www.semanlink.net/tag/automobile_and_w3c|broader_altLabel|Automotive +http://www.semanlink.net/tag/automobile_and_w3c|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/automobile_and_w3c|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/national_taiwan_university|creationTime|2014-03-26T09:42:48Z +http://www.semanlink.net/tag/national_taiwan_university|prefLabel|National Taiwan University +http://www.semanlink.net/tag/national_taiwan_university|broader|http://www.semanlink.net/tag/taiwan +http://www.semanlink.net/tag/national_taiwan_university|broader|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/national_taiwan_university|creationDate|2014-03-26 +http://www.semanlink.net/tag/national_taiwan_university|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/national_taiwan_university|uri|http://www.semanlink.net/tag/national_taiwan_university +http://www.semanlink.net/tag/national_taiwan_university|broader_prefLabel|Taiwan +http://www.semanlink.net/tag/national_taiwan_university|broader_prefLabel|Université +http://www.semanlink.net/tag/national_taiwan_university|broader_related|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/neurones|creationTime|2013-12-25T23:05:24Z +http://www.semanlink.net/tag/neurones|prefLabel|Neurones +http://www.semanlink.net/tag/neurones|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/neurones|creationDate|2013-12-25 +http://www.semanlink.net/tag/neurones|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neurones|uri|http://www.semanlink.net/tag/neurones +http://www.semanlink.net/tag/neurones|broader_prefLabel|Brain +http://www.semanlink.net/tag/neurones|broader_altLabel|Cerveau +http://www.semanlink.net/tag/neurones|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/entity_alignment|creationTime|2020-03-19T22:34:54Z +http://www.semanlink.net/tag/entity_alignment|prefLabel|Entity alignment +http://www.semanlink.net/tag/entity_alignment|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_alignment|broader|http://www.semanlink.net/tag/combining_knowledge_graphs +http://www.semanlink.net/tag/entity_alignment|creationDate|2020-03-19 +http://www.semanlink.net/tag/entity_alignment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_alignment|uri|http://www.semanlink.net/tag/entity_alignment +http://www.semanlink.net/tag/entity_alignment|broader_prefLabel|Entities +http://www.semanlink.net/tag/entity_alignment|broader_prefLabel|Combining knowledge graphs +http://www.semanlink.net/tag/dan_connolly|prefLabel|Dan Connolly +http://www.semanlink.net/tag/dan_connolly|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/dan_connolly|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dan_connolly|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dan_connolly|uri|http://www.semanlink.net/tag/dan_connolly +http://www.semanlink.net/tag/dan_connolly|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/dan_connolly|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dan_connolly|broader_altLabel|Technical guys +http://www.semanlink.net/tag/postman|creationTime|2019-10-24T23:37:54Z +http://www.semanlink.net/tag/postman|prefLabel|Postman +http://www.semanlink.net/tag/postman|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/postman|creationDate|2019-10-24 +http://www.semanlink.net/tag/postman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/postman|uri|http://www.semanlink.net/tag/postman +http://www.semanlink.net/tag/postman|broader_prefLabel|REST +http://www.semanlink.net/tag/guerres_coloniales|prefLabel|Guerres coloniales +http://www.semanlink.net/tag/guerres_coloniales|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/guerres_coloniales|broader|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/tag/guerres_coloniales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerres_coloniales|uri|http://www.semanlink.net/tag/guerres_coloniales +http://www.semanlink.net/tag/guerres_coloniales|broader_prefLabel|War +http://www.semanlink.net/tag/guerres_coloniales|broader_prefLabel|Colonisation +http://www.semanlink.net/tag/guerres_coloniales|broader_altLabel|Guerre +http://www.semanlink.net/tag/guerres_coloniales|broader_altLabel|Colonialisme +http://www.semanlink.net/tag/non_negative_matrix_factorization|creationTime|2017-11-13T11:25:49Z +http://www.semanlink.net/tag/non_negative_matrix_factorization|prefLabel|Non-negative matrix factorization +http://www.semanlink.net/tag/non_negative_matrix_factorization|related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/non_negative_matrix_factorization|related|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/non_negative_matrix_factorization|creationDate|2017-11-13 +http://www.semanlink.net/tag/non_negative_matrix_factorization|comment|algorithms where a matrix V is factorized into (usually) two matrices W and H, with the property that all three matrices have no negative elements. +http://www.semanlink.net/tag/non_negative_matrix_factorization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/non_negative_matrix_factorization|describedBy|https://en.wikipedia.org/wiki/Non-negative_matrix_factorization +http://www.semanlink.net/tag/non_negative_matrix_factorization|uri|http://www.semanlink.net/tag/non_negative_matrix_factorization +http://www.semanlink.net/tag/amour|prefLabel|Amour +http://www.semanlink.net/tag/amour|broader|http://www.semanlink.net/tag/sentiment +http://www.semanlink.net/tag/amour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amour|uri|http://www.semanlink.net/tag/amour +http://www.semanlink.net/tag/amour|broader_prefLabel|Sentiment +http://www.semanlink.net/tag/scaling|creationTime|2018-09-15T18:40:20Z +http://www.semanlink.net/tag/scaling|prefLabel|Scaling +http://www.semanlink.net/tag/scaling|creationDate|2018-09-15 +http://www.semanlink.net/tag/scaling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scaling|uri|http://www.semanlink.net/tag/scaling +http://www.semanlink.net/tag/attention_knowledge_graphs|creationTime|2019-08-23T00:45:10Z +http://www.semanlink.net/tag/attention_knowledge_graphs|prefLabel|Attention + Knowledge Graphs +http://www.semanlink.net/tag/attention_knowledge_graphs|broader|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/attention_knowledge_graphs|broader|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/tag/attention_knowledge_graphs|creationDate|2019-08-23 +http://www.semanlink.net/tag/attention_knowledge_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/attention_knowledge_graphs|altLabel|Transformer + Knowledge Graphs +http://www.semanlink.net/tag/attention_knowledge_graphs|uri|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/tag/attention_knowledge_graphs|broader_prefLabel|Knowledge Graph + Deep Learning +http://www.semanlink.net/tag/attention_knowledge_graphs|broader_prefLabel|Attention in Graphs +http://www.semanlink.net/tag/attention_knowledge_graphs|broader_altLabel|Graph + Transformer +http://www.semanlink.net/tag/attention_knowledge_graphs|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/attention_knowledge_graphs|broader_related|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/latent_dirichlet_allocation|creationTime|2013-08-22T11:22:59Z +http://www.semanlink.net/tag/latent_dirichlet_allocation|prefLabel|Latent Dirichlet allocation +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/latent_dirichlet_allocation|related|http://www.semanlink.net/tag/david_blei +http://www.semanlink.net/tag/latent_dirichlet_allocation|related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/latent_dirichlet_allocation|related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/latent_dirichlet_allocation|creationDate|2013-08-22 +http://www.semanlink.net/tag/latent_dirichlet_allocation|comment|"A generative model that allows sets of observations to be explained by unobserved groups that explain why some parts of the data are similar. + +Models the intuition that the topic of a document will probabilistically influence the author’s choice of words when writing the document. Documents are interpreted as a mixture of topics (a probability distribution over topics), and topics as a probability distribution over words. + +Encodes the intuition that documents cover a small number of topics and that topics often use a small number of words + +LDA is an extension of [LSI/pLSI](latent_semantic_analysis) + + + + + +" +http://www.semanlink.net/tag/latent_dirichlet_allocation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/latent_dirichlet_allocation|describedBy|https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation +http://www.semanlink.net/tag/latent_dirichlet_allocation|altLabel|LDA +http://www.semanlink.net/tag/latent_dirichlet_allocation|uri|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_prefLabel|Topic Modeling +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_altLabel|Topic model +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/latent_dirichlet_allocation|broader_related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/programming_language|prefLabel|Programming language +http://www.semanlink.net/tag/programming_language|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/programming_language|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/programming_language|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/programming_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/programming_language|altLabel|Langage de programmation +http://www.semanlink.net/tag/programming_language|uri|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/programming_language|broader_prefLabel|Informatique +http://www.semanlink.net/tag/programming_language|broader_prefLabel|Dev +http://www.semanlink.net/tag/programming_language|broader_prefLabel|Programming +http://www.semanlink.net/tag/crise_ecologique|creationTime|2009-01-31T20:36:16Z +http://www.semanlink.net/tag/crise_ecologique|prefLabel|Crise écologique +http://www.semanlink.net/tag/crise_ecologique|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/crise_ecologique|creationDate|2009-01-31 +http://www.semanlink.net/tag/crise_ecologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_ecologique|uri|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/crise_ecologique|broader_prefLabel|Écologie +http://www.semanlink.net/tag/loudness_war|creationTime|2013-02-10T22:09:57Z +http://www.semanlink.net/tag/loudness_war|prefLabel|Loudness war +http://www.semanlink.net/tag/loudness_war|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/loudness_war|related|http://www.semanlink.net/tag/robert +http://www.semanlink.net/tag/loudness_war|creationDate|2013-02-10 +http://www.semanlink.net/tag/loudness_war|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loudness_war|uri|http://www.semanlink.net/tag/loudness_war +http://www.semanlink.net/tag/loudness_war|broader_prefLabel|Musique +http://www.semanlink.net/tag/loudness_war|broader_altLabel|Music +http://www.semanlink.net/tag/target_entity_disambiguation|creationTime|2019-05-07T18:36:28Z +http://www.semanlink.net/tag/target_entity_disambiguation|prefLabel|Target Entity Disambiguation +http://www.semanlink.net/tag/target_entity_disambiguation|broader|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/target_entity_disambiguation|creationDate|2019-05-07 +http://www.semanlink.net/tag/target_entity_disambiguation|comment|The task of identifying target entities of the same domain +http://www.semanlink.net/tag/target_entity_disambiguation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/target_entity_disambiguation|uri|http://www.semanlink.net/tag/target_entity_disambiguation +http://www.semanlink.net/tag/target_entity_disambiguation|broader_prefLabel|Entity linking +http://www.semanlink.net/tag/target_entity_disambiguation|broader_altLabel|Named entity disambiguation +http://www.semanlink.net/tag/target_entity_disambiguation|broader_related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/marcel_frohlich|creationTime|2020-08-30T16:54:26Z +http://www.semanlink.net/tag/marcel_frohlich|prefLabel|Marcel Fröhlich +http://www.semanlink.net/tag/marcel_frohlich|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/marcel_frohlich|creationDate|2020-08-30 +http://www.semanlink.net/tag/marcel_frohlich|comment|[twitter](https://twitter.com/FroehlichMarcel) +http://www.semanlink.net/tag/marcel_frohlich|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marcel_frohlich|uri|http://www.semanlink.net/tag/marcel_frohlich +http://www.semanlink.net/tag/marcel_frohlich|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/marcel_frohlich|broader_altLabel|Technical guys +http://www.semanlink.net/tag/dev_tools|creationTime|2007-10-19T10:35:12Z +http://www.semanlink.net/tag/dev_tools|prefLabel|Dev tools +http://www.semanlink.net/tag/dev_tools|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/dev_tools|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/dev_tools|creationDate|2007-10-19 +http://www.semanlink.net/tag/dev_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dev_tools|uri|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/dev_tools|broader_prefLabel|Tools +http://www.semanlink.net/tag/dev_tools|broader_prefLabel|Dev +http://www.semanlink.net/tag/zune|prefLabel|Zune +http://www.semanlink.net/tag/zune|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/zune|broader|http://www.semanlink.net/tag/musique_en_ligne +http://www.semanlink.net/tag/zune|creationDate|2006-09-19 +http://www.semanlink.net/tag/zune|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zune|uri|http://www.semanlink.net/tag/zune +http://www.semanlink.net/tag/zune|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/zune|broader_prefLabel|Musique en ligne +http://www.semanlink.net/tag/zune|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/jacqueline_de_romilly|creationTime|2009-03-05T01:20:35Z +http://www.semanlink.net/tag/jacqueline_de_romilly|prefLabel|Jacqueline de Romilly +http://www.semanlink.net/tag/jacqueline_de_romilly|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/jacqueline_de_romilly|related|http://www.semanlink.net/tag/thucydide +http://www.semanlink.net/tag/jacqueline_de_romilly|creationDate|2009-03-05 +http://www.semanlink.net/tag/jacqueline_de_romilly|comment|"Elle se définit comme professeur, et littéraire, plus qu'historienne.
+Depuis un demi-siècle, on a oublié que le but de l'enseignement devait être le développement des qualités humaines (dit-elle, en substance, vers 2005) + +" +http://www.semanlink.net/tag/jacqueline_de_romilly|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jacqueline_de_romilly|describedBy|https://fr.wikipedia.org/wiki/Jacqueline_de_Romilly +http://www.semanlink.net/tag/jacqueline_de_romilly|uri|http://www.semanlink.net/tag/jacqueline_de_romilly +http://www.semanlink.net/tag/jacqueline_de_romilly|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/nlp_event|creationTime|2018-07-26T00:27:53Z +http://www.semanlink.net/tag/nlp_event|prefLabel|NLP event +http://www.semanlink.net/tag/nlp_event|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_event|broader|http://www.semanlink.net/tag/ai_event +http://www.semanlink.net/tag/nlp_event|creationDate|2018-07-26 +http://www.semanlink.net/tag/nlp_event|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_event|uri|http://www.semanlink.net/tag/nlp_event +http://www.semanlink.net/tag/nlp_event|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_event|broader_prefLabel|AI Event +http://www.semanlink.net/tag/nlp_event|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_event|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_event|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/text_search|creationTime|2011-11-13T14:26:51Z +http://www.semanlink.net/tag/text_search|prefLabel|Text Search +http://www.semanlink.net/tag/text_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/text_search|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/text_search|related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/text_search|creationDate|2011-11-13 +http://www.semanlink.net/tag/text_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_search|uri|http://www.semanlink.net/tag/text_search +http://www.semanlink.net/tag/text_search|broader_prefLabel|Search +http://www.semanlink.net/tag/text_search|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/text_search|broader_altLabel|IR +http://www.semanlink.net/tag/openai_gpt|creationTime|2019-01-24T15:54:34Z +http://www.semanlink.net/tag/openai_gpt|prefLabel|OpenAI GPT +http://www.semanlink.net/tag/openai_gpt|broader|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/tag/openai_gpt|broader|http://www.semanlink.net/tag/openai +http://www.semanlink.net/tag/openai_gpt|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/openai_gpt|creationDate|2019-01-24 +http://www.semanlink.net/tag/openai_gpt|comment|"GPT: Generative Pre-Training +" +http://www.semanlink.net/tag/openai_gpt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openai_gpt|uri|http://www.semanlink.net/tag/openai_gpt +http://www.semanlink.net/tag/openai_gpt|broader_prefLabel|Transformers +http://www.semanlink.net/tag/openai_gpt|broader_prefLabel|OpenAI +http://www.semanlink.net/tag/openai_gpt|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/openai_gpt|broader_altLabel|Transformer +http://www.semanlink.net/tag/openai_gpt|broader_altLabel|Transformers +http://www.semanlink.net/tag/openai_gpt|broader_altLabel|Attention is All You Need +http://www.semanlink.net/tag/openai_gpt|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/openai_gpt|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/openai_gpt|broader_related|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/openai_gpt|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/tpu|creationTime|2018-10-05T08:19:38Z +http://www.semanlink.net/tag/tpu|prefLabel|TPU +http://www.semanlink.net/tag/tpu|creationDate|2018-10-05 +http://www.semanlink.net/tag/tpu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tpu|uri|http://www.semanlink.net/tag/tpu +http://www.semanlink.net/tag/fix_it|creationTime|2007-11-09T13:21:06Z +http://www.semanlink.net/tag/fix_it|prefLabel|Fix it +http://www.semanlink.net/tag/fix_it|creationDate|2007-11-09 +http://www.semanlink.net/tag/fix_it|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fix_it|uri|http://www.semanlink.net/tag/fix_it +http://www.semanlink.net/tag/transfer_learning|creationTime|2017-09-18T11:55:58Z +http://www.semanlink.net/tag/transfer_learning|prefLabel|Transfer learning +http://www.semanlink.net/tag/transfer_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/transfer_learning|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/transfer_learning|related|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/transfer_learning|creationDate|2017-09-18 +http://www.semanlink.net/tag/transfer_learning|comment|performance on a target task is improved by leveraging model parameters learned on a separate but related source task +http://www.semanlink.net/tag/transfer_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transfer_learning|describedBy|https://en.wikipedia.org/wiki/Transfer_learning +http://www.semanlink.net/tag/transfer_learning|uri|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/tag/transfer_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/transfer_learning|broader_altLabel|ML +http://www.semanlink.net/tag/transfer_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/transfer_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/boura|prefLabel|Boura +http://www.semanlink.net/tag/boura|broader|http://www.semanlink.net/tag/terre_cuite +http://www.semanlink.net/tag/boura|broader|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/boura|broader|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/tag/boura|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boura|altLabel|Bura +http://www.semanlink.net/tag/boura|uri|http://www.semanlink.net/tag/boura +http://www.semanlink.net/tag/boura|broader_prefLabel|Terre cuite +http://www.semanlink.net/tag/boura|broader_prefLabel|Art d'Afrique +http://www.semanlink.net/tag/boura|broader_prefLabel|Archéologie du Niger +http://www.semanlink.net/tag/boura|broader_altLabel|African art +http://www.semanlink.net/tag/boura|broader_related|http://www.semanlink.net/tag/anne_haour +http://www.semanlink.net/tag/evolutionary_algorithm|creationTime|2019-06-27T23:45:30Z +http://www.semanlink.net/tag/evolutionary_algorithm|prefLabel|Evolutionary algorithm +http://www.semanlink.net/tag/evolutionary_algorithm|broader|http://www.semanlink.net/tag/evolutionary_computation +http://www.semanlink.net/tag/evolutionary_algorithm|creationDate|2019-06-27 +http://www.semanlink.net/tag/evolutionary_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evolutionary_algorithm|describedBy|https://en.wikipedia.org/wiki/Evolutionary_algorithm +http://www.semanlink.net/tag/evolutionary_algorithm|uri|http://www.semanlink.net/tag/evolutionary_algorithm +http://www.semanlink.net/tag/evolutionary_algorithm|broader_prefLabel|Evolutionary computation +http://www.semanlink.net/tag/darwin|prefLabel|Darwin +http://www.semanlink.net/tag/darwin|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/darwin|broader|http://www.semanlink.net/tag/explorateur +http://www.semanlink.net/tag/darwin|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/darwin|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/darwin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/darwin|uri|http://www.semanlink.net/tag/darwin +http://www.semanlink.net/tag/darwin|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/darwin|broader_prefLabel|Explorateur +http://www.semanlink.net/tag/darwin|broader_prefLabel|Evolution +http://www.semanlink.net/tag/darwin|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/darwin|broader_altLabel|Savant +http://www.semanlink.net/tag/physicien|creationTime|2018-10-18T13:29:44Z +http://www.semanlink.net/tag/physicien|prefLabel|Physicien +http://www.semanlink.net/tag/physicien|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/physicien|creationDate|2018-10-18 +http://www.semanlink.net/tag/physicien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/physicien|uri|http://www.semanlink.net/tag/physicien +http://www.semanlink.net/tag/physicien|broader_prefLabel|Physique +http://www.semanlink.net/tag/physicien|broader_altLabel|Physics +http://www.semanlink.net/tag/histoire_de_la_chine|prefLabel|Histoire de la Chine +http://www.semanlink.net/tag/histoire_de_la_chine|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/histoire_de_la_chine|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_de_la_chine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_la_chine|uri|http://www.semanlink.net/tag/histoire_de_la_chine +http://www.semanlink.net/tag/histoire_de_la_chine|broader_prefLabel|Chine +http://www.semanlink.net/tag/histoire_de_la_chine|broader_prefLabel|Histoire +http://www.semanlink.net/tag/histoire_de_la_chine|broader_altLabel|China +http://www.semanlink.net/tag/duplicate_detection|creationTime|2019-07-02T01:10:36Z +http://www.semanlink.net/tag/duplicate_detection|prefLabel|Duplicate Detection +http://www.semanlink.net/tag/duplicate_detection|broader|http://www.semanlink.net/tag/identification_of_similar_documents +http://www.semanlink.net/tag/duplicate_detection|creationDate|2019-07-02 +http://www.semanlink.net/tag/duplicate_detection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/duplicate_detection|altLabel|Paraphrase identification +http://www.semanlink.net/tag/duplicate_detection|altLabel|Duplicate search +http://www.semanlink.net/tag/duplicate_detection|uri|http://www.semanlink.net/tag/duplicate_detection +http://www.semanlink.net/tag/duplicate_detection|broader_prefLabel|Identification of similar documents +http://www.semanlink.net/tag/ldp_implementations|creationTime|2014-10-12T22:42:03Z +http://www.semanlink.net/tag/ldp_implementations|prefLabel|LDP: implementations +http://www.semanlink.net/tag/ldp_implementations|broader|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/ldp_implementations|creationDate|2014-10-12 +http://www.semanlink.net/tag/ldp_implementations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldp_implementations|uri|http://www.semanlink.net/tag/ldp_implementations +http://www.semanlink.net/tag/ldp_implementations|broader_prefLabel|Linked Data Platform +http://www.semanlink.net/tag/ldp_implementations|broader_altLabel|LDP +http://www.semanlink.net/tag/gaussian_embedding|creationTime|2018-01-28T17:32:57Z +http://www.semanlink.net/tag/gaussian_embedding|prefLabel|Gaussian embedding +http://www.semanlink.net/tag/gaussian_embedding|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/gaussian_embedding|creationDate|2018-01-28 +http://www.semanlink.net/tag/gaussian_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gaussian_embedding|uri|http://www.semanlink.net/tag/gaussian_embedding +http://www.semanlink.net/tag/gaussian_embedding|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/gaussian_embedding|broader_altLabel|embedding +http://www.semanlink.net/tag/gaussian_embedding|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/gaussian_embedding|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/wii|creationTime|2008-03-19T22:18:10Z +http://www.semanlink.net/tag/wii|prefLabel|Wii +http://www.semanlink.net/tag/wii|creationDate|2008-03-19 +http://www.semanlink.net/tag/wii|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wii|uri|http://www.semanlink.net/tag/wii +http://www.semanlink.net/tag/dbpedia|creationTime|2007-04-04T22:26:22Z +http://www.semanlink.net/tag/dbpedia|prefLabel|dbpedia +http://www.semanlink.net/tag/dbpedia|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/dbpedia|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/dbpedia|related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbpedia|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/dbpedia|creationDate|2007-04-04 +http://www.semanlink.net/tag/dbpedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dbpedia|homepage|http://dbpedia.org/ +http://www.semanlink.net/tag/dbpedia|uri|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/dbpedia|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/dbpedia|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/dbpedia|broader_altLabel|LOD +http://www.semanlink.net/tag/dbpedia|broader_altLabel|LD +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/dbpedia|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/clark_and_parsia|creationTime|2008-04-11T15:56:17Z +http://www.semanlink.net/tag/clark_and_parsia|prefLabel|Clark and Parsia +http://www.semanlink.net/tag/clark_and_parsia|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/clark_and_parsia|creationDate|2008-04-11 +http://www.semanlink.net/tag/clark_and_parsia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clark_and_parsia|uri|http://www.semanlink.net/tag/clark_and_parsia +http://www.semanlink.net/tag/active_learning|creationTime|2019-04-19T16:53:26Z +http://www.semanlink.net/tag/active_learning|prefLabel|Active learning +http://www.semanlink.net/tag/active_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/active_learning|broader|http://www.semanlink.net/tag/training_data +http://www.semanlink.net/tag/active_learning|broader|http://www.semanlink.net/tag/labeling_data +http://www.semanlink.net/tag/active_learning|creationDate|2019-04-19 +http://www.semanlink.net/tag/active_learning|comment|"When a learning algorithm is able to interactively query the user to obtain the label of a data point (pb: estimate which points are more valable to sollicit labels for) + +Active learning deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. + +The goal of active learning: to **reduce the cost of labeling**. To this end, the learning algorithm is +allowed to choose which data to label based on uncertainty (e.g., the entropy of predicted class +probabilities) or other heuristics ([src](doc:2020/07/2007_00077_similarity_search_))" +http://www.semanlink.net/tag/active_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/active_learning|describedBy|https://en.wikipedia.org/wiki/Active_learning_(machine_learning) +http://www.semanlink.net/tag/active_learning|uri|http://www.semanlink.net/tag/active_learning +http://www.semanlink.net/tag/active_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/active_learning|broader_prefLabel|Training data +http://www.semanlink.net/tag/active_learning|broader_prefLabel|Labeling data +http://www.semanlink.net/tag/active_learning|broader_altLabel|Labelling data +http://www.semanlink.net/tag/active_learning|broader_altLabel|Annotating data +http://www.semanlink.net/tag/sable|creationTime|2017-08-01T19:02:21Z +http://www.semanlink.net/tag/sable|prefLabel|Sable +http://www.semanlink.net/tag/sable|creationDate|2017-08-01 +http://www.semanlink.net/tag/sable|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sable|uri|http://www.semanlink.net/tag/sable +http://www.semanlink.net/tag/galileo_spacecraft|creationTime|2017-09-22T01:32:22Z +http://www.semanlink.net/tag/galileo_spacecraft|prefLabel|Galileo (spacecraft) +http://www.semanlink.net/tag/galileo_spacecraft|broader|http://www.semanlink.net/tag/jupiter +http://www.semanlink.net/tag/galileo_spacecraft|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/galileo_spacecraft|creationDate|2017-09-22 +http://www.semanlink.net/tag/galileo_spacecraft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/galileo_spacecraft|describedBy|https://en.wikipedia.org/wiki/Galileo_(spacecraft) +http://www.semanlink.net/tag/galileo_spacecraft|uri|http://www.semanlink.net/tag/galileo_spacecraft +http://www.semanlink.net/tag/galileo_spacecraft|broader_prefLabel|Jupiter +http://www.semanlink.net/tag/galileo_spacecraft|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/representation_learning_for_nlp|creationTime|2019-08-12T10:04:24Z +http://www.semanlink.net/tag/representation_learning_for_nlp|prefLabel|Representation Learning for NLP +http://www.semanlink.net/tag/representation_learning_for_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/representation_learning_for_nlp|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/representation_learning_for_nlp|creationDate|2019-08-12 +http://www.semanlink.net/tag/representation_learning_for_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/representation_learning_for_nlp|uri|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/representation_learning_for_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/representation_learning_for_nlp|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/representation_learning_for_nlp|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/similarity_queries|creationTime|2017-07-10T19:15:35Z +http://www.semanlink.net/tag/similarity_queries|prefLabel|Similarity queries +http://www.semanlink.net/tag/similarity_queries|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/similarity_queries|broader|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/tag/similarity_queries|related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/similarity_queries|creationDate|2017-07-10 +http://www.semanlink.net/tag/similarity_queries|comment|"Finding items that are similar to a given query is the core +aspect of search and retrieval systems, as well as of +recommendation engines." +http://www.semanlink.net/tag/similarity_queries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/similarity_queries|altLabel|Vector similarity search +http://www.semanlink.net/tag/similarity_queries|uri|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/similarity_queries|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/similarity_queries|broader_prefLabel|Recommender Systems +http://www.semanlink.net/tag/similarity_queries|broader_altLabel|Recommandation system +http://www.semanlink.net/tag/similarity_queries|broader_altLabel|Système de recommandation +http://www.semanlink.net/tag/frequently_cited_paper|creationTime|2017-11-21T00:51:29Z +http://www.semanlink.net/tag/frequently_cited_paper|prefLabel|Frequently cited paper +http://www.semanlink.net/tag/frequently_cited_paper|creationDate|2017-11-21 +http://www.semanlink.net/tag/frequently_cited_paper|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/frequently_cited_paper|uri|http://www.semanlink.net/tag/frequently_cited_paper +http://www.semanlink.net/tag/zero_shot_entity_linking|creationTime|2020-05-02T11:39:30Z +http://www.semanlink.net/tag/zero_shot_entity_linking|prefLabel|Zero-shot Entity Linking +http://www.semanlink.net/tag/zero_shot_entity_linking|broader|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/zero_shot_entity_linking|broader|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/tag/zero_shot_entity_linking|creationDate|2020-05-02 +http://www.semanlink.net/tag/zero_shot_entity_linking|comment|each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the linking decisions. +http://www.semanlink.net/tag/zero_shot_entity_linking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zero_shot_entity_linking|uri|http://www.semanlink.net/tag/zero_shot_entity_linking +http://www.semanlink.net/tag/zero_shot_entity_linking|broader_prefLabel|Entity linking +http://www.semanlink.net/tag/zero_shot_entity_linking|broader_prefLabel|Zero-Shot Learning +http://www.semanlink.net/tag/zero_shot_entity_linking|broader_altLabel|Named entity disambiguation +http://www.semanlink.net/tag/zero_shot_entity_linking|broader_related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/zero_shot_entity_linking|broader_related|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/tag/chine_afrique|prefLabel|Chine / Afrique +http://www.semanlink.net/tag/chine_afrique|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine_afrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/chine_afrique|creationDate|2006-11-10 +http://www.semanlink.net/tag/chine_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_afrique|altLabel|Chinafrique +http://www.semanlink.net/tag/chine_afrique|uri|http://www.semanlink.net/tag/chine_afrique +http://www.semanlink.net/tag/chine_afrique|broader_prefLabel|Chine +http://www.semanlink.net/tag/chine_afrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/chine_afrique|broader_altLabel|China +http://www.semanlink.net/tag/chine_afrique|broader_altLabel|Africa +http://www.semanlink.net/tag/topic_modeling_over_short_texts|creationTime|2017-06-07T18:13:43Z +http://www.semanlink.net/tag/topic_modeling_over_short_texts|prefLabel|Topic Modeling over Short Texts +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/topic_modeling_over_short_texts|creationDate|2017-06-07 +http://www.semanlink.net/tag/topic_modeling_over_short_texts|comment|"Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. + +Compared with long texts, topic discovery from short +texts has the following three challenges: + +- only very limited word co-occurrence information is available, +- the frequency of words plays a less discriminative role, +- and the limited contexts make it more dicult to identify the senses of ambiguous words + +" +http://www.semanlink.net/tag/topic_modeling_over_short_texts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topic_modeling_over_short_texts|uri|http://www.semanlink.net/tag/topic_modeling_over_short_texts +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_prefLabel|NLP: short texts +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_prefLabel|Topic Modeling +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_altLabel|Topic model +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/topic_modeling_over_short_texts|broader_related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/note_taking_app|creationTime|2020-05-19T22:51:53Z +http://www.semanlink.net/tag/note_taking_app|prefLabel|Note taking app +http://www.semanlink.net/tag/note_taking_app|creationDate|2020-05-19 +http://www.semanlink.net/tag/note_taking_app|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/note_taking_app|uri|http://www.semanlink.net/tag/note_taking_app +http://www.semanlink.net/tag/hateoas|creationTime|2011-09-20T14:14:03Z +http://www.semanlink.net/tag/hateoas|prefLabel|HATEOAS +http://www.semanlink.net/tag/hateoas|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/hateoas|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/hateoas|broader|http://www.semanlink.net/tag/hypermedia +http://www.semanlink.net/tag/hateoas|related|http://www.semanlink.net/tag/samuel_goto +http://www.semanlink.net/tag/hateoas|creationDate|2011-09-20 +http://www.semanlink.net/tag/hateoas|comment|"Hypermedia as the Engine of Application State +" +http://www.semanlink.net/tag/hateoas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hateoas|describedBy|https://en.wikipedia.org/wiki/HATEOAS +http://www.semanlink.net/tag/hateoas|altLabel|Hypermedia API +http://www.semanlink.net/tag/hateoas|uri|http://www.semanlink.net/tag/hateoas +http://www.semanlink.net/tag/hateoas|broader_prefLabel|REST +http://www.semanlink.net/tag/hateoas|broader_prefLabel|API +http://www.semanlink.net/tag/hateoas|broader_prefLabel|Hypermedia +http://www.semanlink.net/tag/collaborative_editing|creationTime|2013-04-17T10:45:32Z +http://www.semanlink.net/tag/collaborative_editing|prefLabel|Collaborative editing +http://www.semanlink.net/tag/collaborative_editing|creationDate|2013-04-17 +http://www.semanlink.net/tag/collaborative_editing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/collaborative_editing|uri|http://www.semanlink.net/tag/collaborative_editing +http://www.semanlink.net/tag/sequence_labeling|creationTime|2018-02-16T00:25:55Z +http://www.semanlink.net/tag/sequence_labeling|prefLabel|Sequence labeling +http://www.semanlink.net/tag/sequence_labeling|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/sequence_labeling|related|http://www.semanlink.net/tag/ml_sequential_data +http://www.semanlink.net/tag/sequence_labeling|creationDate|2018-02-16 +http://www.semanlink.net/tag/sequence_labeling|comment|pattern recognition task that involves the assignment of a categorical label to each member of a sequence of observed values. Eg: POS tagging +http://www.semanlink.net/tag/sequence_labeling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sequence_labeling|describedBy|https://en.wikipedia.org/wiki/Sequence_labeling +http://www.semanlink.net/tag/sequence_labeling|altLabel|Sequence Tagging +http://www.semanlink.net/tag/sequence_labeling|uri|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/sequence_labeling|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/irrigation|prefLabel|Irrigation +http://www.semanlink.net/tag/irrigation|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/irrigation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/irrigation|uri|http://www.semanlink.net/tag/irrigation +http://www.semanlink.net/tag/irrigation|broader_prefLabel|Eau +http://www.semanlink.net/tag/linked_data_cache|creationTime|2013-01-07T01:12:57Z +http://www.semanlink.net/tag/linked_data_cache|prefLabel|Linked Data Cache +http://www.semanlink.net/tag/linked_data_cache|broader|http://www.semanlink.net/tag/cache +http://www.semanlink.net/tag/linked_data_cache|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_cache|creationDate|2013-01-07 +http://www.semanlink.net/tag/linked_data_cache|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_cache|uri|http://www.semanlink.net/tag/linked_data_cache +http://www.semanlink.net/tag/linked_data_cache|broader_prefLabel|Cache +http://www.semanlink.net/tag/linked_data_cache|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_cache|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_cache|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/lalibela|creationTime|2019-03-11T19:51:22Z +http://www.semanlink.net/tag/lalibela|prefLabel|Lalibela +http://www.semanlink.net/tag/lalibela|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/lalibela|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/lalibela|creationDate|2019-03-11 +http://www.semanlink.net/tag/lalibela|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lalibela|describedBy|https://en.wikipedia.org/wiki/Lalibela +http://www.semanlink.net/tag/lalibela|uri|http://www.semanlink.net/tag/lalibela +http://www.semanlink.net/tag/lalibela|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/lalibela|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/question_answering|creationTime|2018-06-21T12:55:54Z +http://www.semanlink.net/tag/question_answering|prefLabel|Question Answering +http://www.semanlink.net/tag/question_answering|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/question_answering|related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/question_answering|creationDate|2018-06-21 +http://www.semanlink.net/tag/question_answering|comment|"For a description of the variants of this task, see this [paper](/doc/2020/02/how_much_knowledge_can_you_pack) + +- reading comprehesion +- open-domain QA + - open-book exam + - closed-book exam + +> Many real world question answering systems start by retrieving a set of support documents from a +large source of knowledge such as Wikipedia. Then, a finer-grained model processes these documents +to extract the answer. [src](doc:2020/12/2012_04584_distilling_knowled)" +http://www.semanlink.net/tag/question_answering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/question_answering|altLabel|QA +http://www.semanlink.net/tag/question_answering|uri|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/question_answering|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/windows|prefLabel|Windows +http://www.semanlink.net/tag/windows|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/windows|broader|http://www.semanlink.net/tag/os +http://www.semanlink.net/tag/windows|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/windows|uri|http://www.semanlink.net/tag/windows +http://www.semanlink.net/tag/windows|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/windows|broader_prefLabel|OS +http://www.semanlink.net/tag/windows|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/ruslan_salakhutdinov|creationTime|2017-08-28T00:20:03Z +http://www.semanlink.net/tag/ruslan_salakhutdinov|prefLabel|Ruslan Salakhutdinov +http://www.semanlink.net/tag/ruslan_salakhutdinov|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/ruslan_salakhutdinov|related|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/ruslan_salakhutdinov|creationDate|2017-08-28 +http://www.semanlink.net/tag/ruslan_salakhutdinov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ruslan_salakhutdinov|altLabel|Russ Salakhutdinov +http://www.semanlink.net/tag/ruslan_salakhutdinov|uri|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/tag/ruslan_salakhutdinov|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/declin_de_la_france|prefLabel|Déclin de la France +http://www.semanlink.net/tag/declin_de_la_france|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/declin_de_la_france|broader|http://www.semanlink.net/tag/declin_de_l_europe +http://www.semanlink.net/tag/declin_de_la_france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/declin_de_la_france|uri|http://www.semanlink.net/tag/declin_de_la_france +http://www.semanlink.net/tag/declin_de_la_france|broader_prefLabel|France +http://www.semanlink.net/tag/declin_de_la_france|broader_prefLabel|Déclin de l'Europe +http://www.semanlink.net/tag/taxonomies|prefLabel|Taxonomies +http://www.semanlink.net/tag/taxonomies|broader|http://www.semanlink.net/tag/thesaurus_taxonomies +http://www.semanlink.net/tag/taxonomies|related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/taxonomies|related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/taxonomies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taxonomies|altLabel|Taxonomy +http://www.semanlink.net/tag/taxonomies|uri|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/tag/taxonomies|broader_prefLabel|Thesaurus & Taxonomies +http://www.semanlink.net/tag/taxonomies|broader_related|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/poisson|prefLabel|Poisson +http://www.semanlink.net/tag/poisson|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/poisson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poisson|uri|http://www.semanlink.net/tag/poisson +http://www.semanlink.net/tag/poisson|broader_prefLabel|Animal +http://www.semanlink.net/tag/thomson_reuters|creationTime|2010-05-31T13:40:45Z +http://www.semanlink.net/tag/thomson_reuters|prefLabel|Thomson Reuters +http://www.semanlink.net/tag/thomson_reuters|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/thomson_reuters|creationDate|2010-05-31 +http://www.semanlink.net/tag/thomson_reuters|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thomson_reuters|describedBy|https://en.wikipedia.org/wiki/Thomson_Reuters +http://www.semanlink.net/tag/thomson_reuters|uri|http://www.semanlink.net/tag/thomson_reuters +http://www.semanlink.net/tag/thomson_reuters|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/axoum|prefLabel|Axoum +http://www.semanlink.net/tag/axoum|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/axoum|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/axoum|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/axoum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/axoum|describedBy|https://fr.wikipedia.org/wiki/Royaume_d'Aksoum +http://www.semanlink.net/tag/axoum|altLabel|Aksoum +http://www.semanlink.net/tag/axoum|altLabel|Royaume d'Aksoum +http://www.semanlink.net/tag/axoum|uri|http://www.semanlink.net/tag/axoum +http://www.semanlink.net/tag/axoum|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/axoum|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/axoum|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/axoum|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|creationTime|2012-02-20T18:59:27Z +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|prefLabel|Numérisation des œuvres indisponibles +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|broader|http://www.semanlink.net/tag/bibliotheque_numerique +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|creationDate|2012-02-20 +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|uri|http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles +http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles|broader_prefLabel|Bibliothèque numérique +http://www.semanlink.net/tag/safari|prefLabel|Safari +http://www.semanlink.net/tag/safari|broader|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/safari|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/safari|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/safari|uri|http://www.semanlink.net/tag/safari +http://www.semanlink.net/tag/safari|broader_prefLabel|Apple Software +http://www.semanlink.net/tag/safari|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/safari|broader_altLabel|Browser +http://www.semanlink.net/tag/4store|creationTime|2010-09-30T23:22:58Z +http://www.semanlink.net/tag/4store|prefLabel|4store +http://www.semanlink.net/tag/4store|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/4store|creationDate|2010-09-30 +http://www.semanlink.net/tag/4store|comment|Scalable RDF storage +http://www.semanlink.net/tag/4store|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/4store|describedBy|http://4store.org/ +http://www.semanlink.net/tag/4store|uri|http://www.semanlink.net/tag/4store +http://www.semanlink.net/tag/4store|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/4store|broader_altLabel|RDF database +http://www.semanlink.net/tag/banque|creationTime|2008-09-08T21:59:13Z +http://www.semanlink.net/tag/banque|prefLabel|Banque +http://www.semanlink.net/tag/banque|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/banque|creationDate|2008-09-08 +http://www.semanlink.net/tag/banque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banque|uri|http://www.semanlink.net/tag/banque +http://www.semanlink.net/tag/banque|broader_prefLabel|Finance +http://www.semanlink.net/tag/datalift|creationTime|2011-10-11T00:27:49Z +http://www.semanlink.net/tag/datalift|prefLabel|Datalift +http://www.semanlink.net/tag/datalift|broader|http://www.semanlink.net/tag/semantic_web_project +http://www.semanlink.net/tag/datalift|creationDate|2011-10-11 +http://www.semanlink.net/tag/datalift|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/datalift|homepage|http://datalift.org +http://www.semanlink.net/tag/datalift|uri|http://www.semanlink.net/tag/datalift +http://www.semanlink.net/tag/datalift|broader_prefLabel|Semantic Web project +http://www.semanlink.net/tag/theorie_des_cordes|prefLabel|Théorie des cordes +http://www.semanlink.net/tag/theorie_des_cordes|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/theorie_des_cordes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/theorie_des_cordes|altLabel|String theory +http://www.semanlink.net/tag/theorie_des_cordes|uri|http://www.semanlink.net/tag/theorie_des_cordes +http://www.semanlink.net/tag/theorie_des_cordes|broader_prefLabel|Physique +http://www.semanlink.net/tag/theorie_des_cordes|broader_altLabel|Physics +http://www.semanlink.net/tag/170_rue_de_lourmel|prefLabel|170, rue de Lourmel +http://www.semanlink.net/tag/170_rue_de_lourmel|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/170_rue_de_lourmel|creationDate|2006-09-23 +http://www.semanlink.net/tag/170_rue_de_lourmel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/170_rue_de_lourmel|uri|http://www.semanlink.net/tag/170_rue_de_lourmel +http://www.semanlink.net/tag/170_rue_de_lourmel|broader_prefLabel|Paris +http://www.semanlink.net/tag/dremel|creationTime|2014-09-08T22:07:17Z +http://www.semanlink.net/tag/dremel|prefLabel|Dremel +http://www.semanlink.net/tag/dremel|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/dremel|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/dremel|creationDate|2014-09-08 +http://www.semanlink.net/tag/dremel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dremel|describedBy|https://en.wikipedia.org/wiki/Dremel_(software) +http://www.semanlink.net/tag/dremel|uri|http://www.semanlink.net/tag/dremel +http://www.semanlink.net/tag/dremel|broader_prefLabel|Google +http://www.semanlink.net/tag/dremel|broader_prefLabel|Big Data +http://www.semanlink.net/tag/dremel|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/dremel|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/persistent_data_structure|creationTime|2017-01-03T17:51:03Z +http://www.semanlink.net/tag/persistent_data_structure|prefLabel|Persistent data structure +http://www.semanlink.net/tag/persistent_data_structure|related|http://www.semanlink.net/tag/functional_programming +http://www.semanlink.net/tag/persistent_data_structure|creationDate|2017-01-03 +http://www.semanlink.net/tag/persistent_data_structure|comment|a data structure that always preserves the previous version of itself when it is modified. Such data structures are effectively immutable, as their operations do not (visibly) update the structure in-place, but instead always yield a new updated structure. +http://www.semanlink.net/tag/persistent_data_structure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/persistent_data_structure|describedBy|https://en.wikipedia.org/wiki/Persistent_data_structure +http://www.semanlink.net/tag/persistent_data_structure|uri|http://www.semanlink.net/tag/persistent_data_structure +http://www.semanlink.net/tag/indo_europeen|prefLabel|Indo-européen +http://www.semanlink.net/tag/indo_europeen|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/indo_europeen|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/indo_europeen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/indo_europeen|uri|http://www.semanlink.net/tag/indo_europeen +http://www.semanlink.net/tag/indo_europeen|broader_prefLabel|Langues +http://www.semanlink.net/tag/indo_europeen|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/evolvable_systems|creationTime|2007-09-27T22:22:30Z +http://www.semanlink.net/tag/evolvable_systems|prefLabel|Evolvable systems +http://www.semanlink.net/tag/evolvable_systems|creationDate|2007-09-27 +http://www.semanlink.net/tag/evolvable_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evolvable_systems|altLabel|Evolutivité +http://www.semanlink.net/tag/evolvable_systems|uri|http://www.semanlink.net/tag/evolvable_systems +http://www.semanlink.net/tag/publicite|prefLabel|Publicité +http://www.semanlink.net/tag/publicite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/publicite|altLabel|Advertising +http://www.semanlink.net/tag/publicite|altLabel|Pub +http://www.semanlink.net/tag/publicite|uri|http://www.semanlink.net/tag/publicite +http://www.semanlink.net/tag/music_of_africa|creationTime|2011-01-03T20:56:34Z +http://www.semanlink.net/tag/music_of_africa|prefLabel|Music of Africa +http://www.semanlink.net/tag/music_of_africa|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/music_of_africa|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/music_of_africa|creationDate|2011-01-03 +http://www.semanlink.net/tag/music_of_africa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/music_of_africa|describedBy|https://en.wikipedia.org/wiki/Music_of_Africa +http://www.semanlink.net/tag/music_of_africa|altLabel|Musique africaine +http://www.semanlink.net/tag/music_of_africa|altLabel|African music +http://www.semanlink.net/tag/music_of_africa|uri|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/music_of_africa|broader_prefLabel|Musique +http://www.semanlink.net/tag/music_of_africa|broader_prefLabel|Afrique +http://www.semanlink.net/tag/music_of_africa|broader_altLabel|Music +http://www.semanlink.net/tag/music_of_africa|broader_altLabel|Africa +http://www.semanlink.net/tag/turing_test|creationTime|2008-11-21T23:44:11Z +http://www.semanlink.net/tag/turing_test|prefLabel|Turing test +http://www.semanlink.net/tag/turing_test|broader|http://www.semanlink.net/tag/turing +http://www.semanlink.net/tag/turing_test|creationDate|2008-11-21 +http://www.semanlink.net/tag/turing_test|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turing_test|uri|http://www.semanlink.net/tag/turing_test +http://www.semanlink.net/tag/turing_test|broader_prefLabel|Turing +http://www.semanlink.net/tag/siri|creationTime|2011-11-15T21:19:26Z +http://www.semanlink.net/tag/siri|prefLabel|Siri +http://www.semanlink.net/tag/siri|broader|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/siri|broader|http://www.semanlink.net/tag/voice_ai +http://www.semanlink.net/tag/siri|broader|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/siri|creationDate|2011-11-15 +http://www.semanlink.net/tag/siri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/siri|uri|http://www.semanlink.net/tag/siri +http://www.semanlink.net/tag/siri|broader_prefLabel|Apple Software +http://www.semanlink.net/tag/siri|broader_prefLabel|Voice AI +http://www.semanlink.net/tag/siri|broader_prefLabel|Speech-to-Text +http://www.semanlink.net/tag/siri|broader_altLabel|Speech recognition +http://www.semanlink.net/tag/siri|broader_altLabel|Voice recognition +http://www.semanlink.net/tag/siri|broader_related|http://www.semanlink.net/tag/siri +http://www.semanlink.net/tag/siri|broader_related|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/siri|broader_related|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/tag/money|prefLabel|Money +http://www.semanlink.net/tag/money|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/money|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/money|altLabel|Monnaie +http://www.semanlink.net/tag/money|uri|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/money|broader_prefLabel|Finance +http://www.semanlink.net/tag/gaulois|prefLabel|Gaulois +http://www.semanlink.net/tag/gaulois|broader|http://www.semanlink.net/tag/celte +http://www.semanlink.net/tag/gaulois|broader|http://www.semanlink.net/tag/histoire_de_france +http://www.semanlink.net/tag/gaulois|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/gaulois|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gaulois|uri|http://www.semanlink.net/tag/gaulois +http://www.semanlink.net/tag/gaulois|broader_prefLabel|Celte +http://www.semanlink.net/tag/gaulois|broader_prefLabel|Histoire de France +http://www.semanlink.net/tag/gaulois|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/reasoning|creationTime|2008-04-23T11:16:00Z +http://www.semanlink.net/tag/reasoning|prefLabel|Reasoning +http://www.semanlink.net/tag/reasoning|broader|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/reasoning|creationDate|2008-04-23 +http://www.semanlink.net/tag/reasoning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reasoning|uri|http://www.semanlink.net/tag/reasoning +http://www.semanlink.net/tag/reasoning|broader_prefLabel|Logic +http://www.semanlink.net/tag/matlab|creationTime|2014-04-02T18:37:55Z +http://www.semanlink.net/tag/matlab|prefLabel|Matlab +http://www.semanlink.net/tag/matlab|creationDate|2014-04-02 +http://www.semanlink.net/tag/matlab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matlab|uri|http://www.semanlink.net/tag/matlab +http://www.semanlink.net/tag/global_workspace_theory|creationTime|2019-11-12T16:26:50Z +http://www.semanlink.net/tag/global_workspace_theory|prefLabel|Global workspace theory +http://www.semanlink.net/tag/global_workspace_theory|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/global_workspace_theory|related|http://www.semanlink.net/tag/stanislas_dehaene +http://www.semanlink.net/tag/global_workspace_theory|related|http://www.semanlink.net/tag/models_of_consciousness +http://www.semanlink.net/tag/global_workspace_theory|creationDate|2019-11-12 +http://www.semanlink.net/tag/global_workspace_theory|comment|In the Global Workspace Theory developed by Bernard Baars and extended by [#Dehaene](/tag/stanislas_dehaene), sensory data is initially processed in the primary sensory areas located in posterior cortex, propagates forward and is further processed in increasingly-abstract multi-modal association areas. Even as information flows forward toward the front of the brain, the results of abstract computations performed in the association areas are fed back toward the primary sensory cortex. This basic pattern of activity is common in all mammals ([src](https://web.stanford.edu/class/cs379c/calendar_invited_talks/lectures/04/04/slides/CS379C_Thomas_Dean_Lecture_04_02_19.pdf)) +http://www.semanlink.net/tag/global_workspace_theory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/global_workspace_theory|describedBy|https://en.wikipedia.org/wiki/Global_workspace_theory +http://www.semanlink.net/tag/global_workspace_theory|uri|http://www.semanlink.net/tag/global_workspace_theory +http://www.semanlink.net/tag/global_workspace_theory|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/arthropodes|creationTime|2019-11-09T13:36:33Z +http://www.semanlink.net/tag/arthropodes|prefLabel|Arthropodes +http://www.semanlink.net/tag/arthropodes|creationDate|2019-11-09 +http://www.semanlink.net/tag/arthropodes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arthropodes|describedBy|https://fr.wikipedia.org/wiki/Arthropodes +http://www.semanlink.net/tag/arthropodes|uri|http://www.semanlink.net/tag/arthropodes +http://www.semanlink.net/tag/samy_bengio|creationTime|2018-03-17T00:01:26Z +http://www.semanlink.net/tag/samy_bengio|prefLabel|Samy Bengio +http://www.semanlink.net/tag/samy_bengio|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/samy_bengio|related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/samy_bengio|creationDate|2018-03-17 +http://www.semanlink.net/tag/samy_bengio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/samy_bengio|uri|http://www.semanlink.net/tag/samy_bengio +http://www.semanlink.net/tag/samy_bengio|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/xavier_bertrand|creationTime|2009-01-31T20:35:31Z +http://www.semanlink.net/tag/xavier_bertrand|prefLabel|Xavier Bertrand +http://www.semanlink.net/tag/xavier_bertrand|broader|http://www.semanlink.net/tag/gouvernement_sarkozy +http://www.semanlink.net/tag/xavier_bertrand|creationDate|2009-01-31 +http://www.semanlink.net/tag/xavier_bertrand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xavier_bertrand|uri|http://www.semanlink.net/tag/xavier_bertrand +http://www.semanlink.net/tag/xavier_bertrand|broader_prefLabel|Gouvernement Sarkozy +http://www.semanlink.net/tag/passage_ai|creationTime|2019-03-26T08:45:27Z +http://www.semanlink.net/tag/passage_ai|prefLabel|Passage AI +http://www.semanlink.net/tag/passage_ai|broader|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/passage_ai|broader|http://www.semanlink.net/tag/ai_startups +http://www.semanlink.net/tag/passage_ai|creationDate|2019-03-26 +http://www.semanlink.net/tag/passage_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/passage_ai|homepage|https://passage.ai/ +http://www.semanlink.net/tag/passage_ai|uri|http://www.semanlink.net/tag/passage_ai +http://www.semanlink.net/tag/passage_ai|broader_prefLabel|Chatbots +http://www.semanlink.net/tag/passage_ai|broader_prefLabel|AI: startups +http://www.semanlink.net/tag/passage_ai|broader_altLabel|Chatbot +http://www.semanlink.net/tag/list_only_entity_linking|creationTime|2019-04-24T15:22:44Z +http://www.semanlink.net/tag/list_only_entity_linking|prefLabel|List-only Entity Linking +http://www.semanlink.net/tag/list_only_entity_linking|broader|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/list_only_entity_linking|creationDate|2019-04-24 +http://www.semanlink.net/tag/list_only_entity_linking|comment|"Entity Linking often rely on rich structures and properties in the target knowledge base (KB). However, **in many applications, the KB may be as simple and sparse as lists of names of the same type** (e.g., **lists of products**) -> the List-only entity linking problem +" +http://www.semanlink.net/tag/list_only_entity_linking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/list_only_entity_linking|uri|http://www.semanlink.net/tag/list_only_entity_linking +http://www.semanlink.net/tag/list_only_entity_linking|broader_prefLabel|Entity linking +http://www.semanlink.net/tag/list_only_entity_linking|broader_altLabel|Named entity disambiguation +http://www.semanlink.net/tag/list_only_entity_linking|broader_related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|prefLabel|Paradoxe Einstein-Podolsky-Rosen +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader|http://www.semanlink.net/tag/einstein +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader|http://www.semanlink.net/tag/photons_correles +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|uri|http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader_prefLabel|Einstein +http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen|broader_prefLabel|Photons corrélés +http://www.semanlink.net/tag/simile_timeline|prefLabel|SIMILE Timeline +http://www.semanlink.net/tag/simile_timeline|broader|http://www.semanlink.net/tag/simile +http://www.semanlink.net/tag/simile_timeline|broader|http://www.semanlink.net/tag/timeline +http://www.semanlink.net/tag/simile_timeline|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/simile_timeline|uri|http://www.semanlink.net/tag/simile_timeline +http://www.semanlink.net/tag/simile_timeline|broader_prefLabel|SIMILE +http://www.semanlink.net/tag/simile_timeline|broader_prefLabel|Timeline +http://www.semanlink.net/tag/sparse_distributed_memory|creationTime|2013-04-06T02:46:14Z +http://www.semanlink.net/tag/sparse_distributed_memory|prefLabel|Sparse distributed memory +http://www.semanlink.net/tag/sparse_distributed_memory|related|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/sparse_distributed_memory|related|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/sparse_distributed_memory|creationDate|2013-04-06 +http://www.semanlink.net/tag/sparse_distributed_memory|comment|"a mathematical model of human long-term memory. + +It is a generalized random-access memory (RAM) for long (e.g., 1,000 bit) binary words. These words serve as both addresses to and data for the memory. The main attribute of the memory is sensitivity to similarity, meaning that a word can be read back not only by giving the original write address but also by giving one close to it, as measured by the number of mismatched bits (i.e., the Hamming distance between memory addresses). + +cf. Observation that the distances between points of a high-dimensional space resemble the proximity relations between concepts in human memory. + +" +http://www.semanlink.net/tag/sparse_distributed_memory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparse_distributed_memory|describedBy|https://en.wikipedia.org/wiki/Sparse_distributed_memory +http://www.semanlink.net/tag/sparse_distributed_memory|uri|http://www.semanlink.net/tag/sparse_distributed_memory +http://www.semanlink.net/tag/cryptocurrency|creationTime|2016-09-17T15:03:48Z +http://www.semanlink.net/tag/cryptocurrency|prefLabel|Cryptocurrency +http://www.semanlink.net/tag/cryptocurrency|creationDate|2016-09-17 +http://www.semanlink.net/tag/cryptocurrency|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cryptocurrency|describedBy|https://en.wikipedia.org/wiki/Cryptocurrency +http://www.semanlink.net/tag/cryptocurrency|uri|http://www.semanlink.net/tag/cryptocurrency +http://www.semanlink.net/tag/cybersex|prefLabel|Cybersex +http://www.semanlink.net/tag/cybersex|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/cybersex|broader|http://www.semanlink.net/tag/sexe +http://www.semanlink.net/tag/cybersex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cybersex|uri|http://www.semanlink.net/tag/cybersex +http://www.semanlink.net/tag/cybersex|broader_prefLabel|Internet +http://www.semanlink.net/tag/cybersex|broader_prefLabel|Sexe +http://www.semanlink.net/tag/coupling|creationTime|2016-01-04T11:52:04Z +http://www.semanlink.net/tag/coupling|prefLabel|Coupling +http://www.semanlink.net/tag/coupling|creationDate|2016-01-04 +http://www.semanlink.net/tag/coupling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupling|uri|http://www.semanlink.net/tag/coupling +http://www.semanlink.net/tag/cognition_as_a_service|creationTime|2014-02-03T22:57:06Z +http://www.semanlink.net/tag/cognition_as_a_service|prefLabel|Cognition-as-a-Service +http://www.semanlink.net/tag/cognition_as_a_service|broader|http://www.semanlink.net/tag/cognition +http://www.semanlink.net/tag/cognition_as_a_service|broader|http://www.semanlink.net/tag/cognitive_computing +http://www.semanlink.net/tag/cognition_as_a_service|related|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.semanlink.net/tag/cognition_as_a_service|creationDate|2014-02-03 +http://www.semanlink.net/tag/cognition_as_a_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cognition_as_a_service|uri|http://www.semanlink.net/tag/cognition_as_a_service +http://www.semanlink.net/tag/cognition_as_a_service|broader_prefLabel|Cognition +http://www.semanlink.net/tag/cognition_as_a_service|broader_prefLabel|Cognitive computing +http://www.semanlink.net/tag/semantic_folding|creationTime|2017-11-19T15:57:31Z +http://www.semanlink.net/tag/semantic_folding|prefLabel|Semantic folding +http://www.semanlink.net/tag/semantic_folding|broader|http://www.semanlink.net/tag/semantic_fingerprints +http://www.semanlink.net/tag/semantic_folding|related|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://www.semanlink.net/tag/semantic_folding|related|http://www.semanlink.net/tag/sparse_distributed_memory +http://www.semanlink.net/tag/semantic_folding|creationDate|2017-11-19 +http://www.semanlink.net/tag/semantic_folding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_folding|describedBy|https://en.wikipedia.org/wiki/Semantic_folding +http://www.semanlink.net/tag/semantic_folding|uri|http://www.semanlink.net/tag/semantic_folding +http://www.semanlink.net/tag/semantic_folding|broader_prefLabel|Semantic fingerprints +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|creationTime|2018-01-03T17:28:43Z +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|prefLabel|Word + Entity embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|related|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|creationDate|2018-01-03 +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|uri|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_prefLabel|Text, KG and embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_altLabel|Embeddings of Text + Knowledge Graphs +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_altLabel|Embeddings of Text + Knowledge Bases +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_altLabel|Embeddings of text + KB +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/combining_word_and_entity_embeddings|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/one_shot_generalization|creationTime|2016-03-18T00:02:45Z +http://www.semanlink.net/tag/one_shot_generalization|prefLabel|One-Shot Learning +http://www.semanlink.net/tag/one_shot_generalization|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/one_shot_generalization|broader|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/tag/one_shot_generalization|related|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/tag/one_shot_generalization|creationDate|2016-03-18 +http://www.semanlink.net/tag/one_shot_generalization|comment|"Classification under the +restriction that we may only observe a single example of +each possible class before making a prediction about a test +instance." +http://www.semanlink.net/tag/one_shot_generalization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/one_shot_generalization|altLabel|One-Shot Generalization +http://www.semanlink.net/tag/one_shot_generalization|uri|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/tag/one_shot_generalization|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/one_shot_generalization|broader_prefLabel|Few-shot learning +http://www.semanlink.net/tag/one_shot_generalization|broader_altLabel|ML +http://www.semanlink.net/tag/one_shot_generalization|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/one_shot_generalization|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/texaco|creationTime|2009-03-10T23:05:23Z +http://www.semanlink.net/tag/texaco|prefLabel|Texaco +http://www.semanlink.net/tag/texaco|broader|http://www.semanlink.net/tag/compagnies_petrolieres +http://www.semanlink.net/tag/texaco|creationDate|2009-03-10 +http://www.semanlink.net/tag/texaco|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/texaco|uri|http://www.semanlink.net/tag/texaco +http://www.semanlink.net/tag/texaco|broader_prefLabel|Compagnies pétrolières +http://www.semanlink.net/tag/selective_classification|creationTime|2021-10-16T09:13:59Z +http://www.semanlink.net/tag/selective_classification|prefLabel|Selective Classification +http://www.semanlink.net/tag/selective_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/selective_classification|creationDate|2021-10-16 +http://www.semanlink.net/tag/selective_classification|comment|In Selective classification, models can abstain when they are unsure about a prediction +http://www.semanlink.net/tag/selective_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/selective_classification|uri|http://www.semanlink.net/tag/selective_classification +http://www.semanlink.net/tag/selective_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/patent_landscaping|creationTime|2019-03-17T23:24:49Z +http://www.semanlink.net/tag/patent_landscaping|prefLabel|Patent Landscaping +http://www.semanlink.net/tag/patent_landscaping|broader|http://www.semanlink.net/tag/patent +http://www.semanlink.net/tag/patent_landscaping|broader|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.semanlink.net/tag/patent_landscaping|related|http://www.semanlink.net/tag/scientific_information_extraction +http://www.semanlink.net/tag/patent_landscaping|creationDate|2019-03-17 +http://www.semanlink.net/tag/patent_landscaping|comment|a method for searching related patents during the process of a R&D project. +http://www.semanlink.net/tag/patent_landscaping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patent_landscaping|uri|http://www.semanlink.net/tag/patent_landscaping +http://www.semanlink.net/tag/patent_landscaping|broader_prefLabel|Patent +http://www.semanlink.net/tag/patent_landscaping|broader_prefLabel|AI 4 IP +http://www.semanlink.net/tag/patent_landscaping|broader_altLabel|Brevet +http://www.semanlink.net/tag/supraconductivite|creationTime|2011-10-21T23:14:11Z +http://www.semanlink.net/tag/supraconductivite|prefLabel|Supraconductivité +http://www.semanlink.net/tag/supraconductivite|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/supraconductivite|creationDate|2011-10-21 +http://www.semanlink.net/tag/supraconductivite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/supraconductivite|uri|http://www.semanlink.net/tag/supraconductivite +http://www.semanlink.net/tag/supraconductivite|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/asimov|creationTime|2012-11-30T22:27:28Z +http://www.semanlink.net/tag/asimov|prefLabel|Asimov +http://www.semanlink.net/tag/asimov|creationDate|2012-11-30 +http://www.semanlink.net/tag/asimov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asimov|uri|http://www.semanlink.net/tag/asimov +http://www.semanlink.net/tag/gastronomie|creationTime|2007-03-08T00:32:24Z +http://www.semanlink.net/tag/gastronomie|prefLabel|Gastronomie +http://www.semanlink.net/tag/gastronomie|creationDate|2007-03-08 +http://www.semanlink.net/tag/gastronomie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gastronomie|altLabel|Cuisine +http://www.semanlink.net/tag/gastronomie|uri|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/tag/supervised_machine_learning|creationTime|2013-06-06T14:56:45Z +http://www.semanlink.net/tag/supervised_machine_learning|prefLabel|Supervised machine learning +http://www.semanlink.net/tag/supervised_machine_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/supervised_machine_learning|creationDate|2013-06-06 +http://www.semanlink.net/tag/supervised_machine_learning|comment|the machine learning task of inferring a function from labeled training data. +http://www.semanlink.net/tag/supervised_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/supervised_machine_learning|describedBy|https://en.wikipedia.org/wiki/Supervised_learning +http://www.semanlink.net/tag/supervised_machine_learning|uri|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/supervised_machine_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/supervised_machine_learning|broader_altLabel|ML +http://www.semanlink.net/tag/supervised_machine_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/supervised_machine_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/semantic_web_portal|prefLabel|Semantic Web : Portal +http://www.semanlink.net/tag/semantic_web_portal|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_portal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_portal|uri|http://www.semanlink.net/tag/semantic_web_portal +http://www.semanlink.net/tag/semantic_web_portal|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_portal|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_portal|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/rnn_based_language_model|creationTime|2018-11-06T00:10:11Z +http://www.semanlink.net/tag/rnn_based_language_model|prefLabel|RNN based Language Model +http://www.semanlink.net/tag/rnn_based_language_model|broader|http://www.semanlink.net/tag/recurrent_neural_network +http://www.semanlink.net/tag/rnn_based_language_model|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/rnn_based_language_model|creationDate|2018-11-06 +http://www.semanlink.net/tag/rnn_based_language_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rnn_based_language_model|altLabel|RNN-LM +http://www.semanlink.net/tag/rnn_based_language_model|uri|http://www.semanlink.net/tag/rnn_based_language_model +http://www.semanlink.net/tag/rnn_based_language_model|broader_prefLabel|Recurrent neural network +http://www.semanlink.net/tag/rnn_based_language_model|broader_prefLabel|Language model +http://www.semanlink.net/tag/rnn_based_language_model|broader_altLabel|RNN +http://www.semanlink.net/tag/rnn_based_language_model|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/rnn_based_language_model|broader_altLabel|LM +http://www.semanlink.net/tag/rnn_based_language_model|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/rnn_based_language_model|broader_related|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/rnn_based_language_model|broader_related|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/rnn_based_language_model|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/rnn_based_language_model|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/neural_coding|creationTime|2017-12-04T08:52:44Z +http://www.semanlink.net/tag/neural_coding|prefLabel|Neural coding +http://www.semanlink.net/tag/neural_coding|broader|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/neural_coding|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/neural_coding|creationDate|2017-12-04 +http://www.semanlink.net/tag/neural_coding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_coding|describedBy|https://en.wikipedia.org/wiki/Neural_coding +http://www.semanlink.net/tag/neural_coding|uri|http://www.semanlink.net/tag/neural_coding +http://www.semanlink.net/tag/neural_coding|broader_prefLabel|Computational Neuroscience +http://www.semanlink.net/tag/neural_coding|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/neural_coding|broader_related|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|creationTime|2008-10-16T22:00:46Z +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|prefLabel|Uncertainty Reasoning AND Semantic Web +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader|http://www.semanlink.net/tag/uncertainty_reasoning +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|creationDate|2008-10-16 +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|uri|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader_prefLabel|Uncertainty Reasoning +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/datao|creationTime|2014-11-08T07:40:51Z +http://www.semanlink.net/tag/datao|prefLabel|Datao +http://www.semanlink.net/tag/datao|broader|http://www.semanlink.net/tag/semantic_web_search_engine +http://www.semanlink.net/tag/datao|broader|http://www.semanlink.net/tag/olivier_rossel +http://www.semanlink.net/tag/datao|creationDate|2014-11-08 +http://www.semanlink.net/tag/datao|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/datao|uri|http://www.semanlink.net/tag/datao +http://www.semanlink.net/tag/datao|broader_prefLabel|Semantic Web search engine +http://www.semanlink.net/tag/datao|broader_prefLabel|Olivier Rossel +http://www.semanlink.net/tag/datao|broader_altLabel|RDF search engine +http://www.semanlink.net/tag/terre_de_feu|creationTime|2007-03-28T23:49:10Z +http://www.semanlink.net/tag/terre_de_feu|prefLabel|Terre de Feu +http://www.semanlink.net/tag/terre_de_feu|broader|http://www.semanlink.net/tag/argentine +http://www.semanlink.net/tag/terre_de_feu|broader|http://www.semanlink.net/tag/chili +http://www.semanlink.net/tag/terre_de_feu|creationDate|2007-03-28 +http://www.semanlink.net/tag/terre_de_feu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/terre_de_feu|uri|http://www.semanlink.net/tag/terre_de_feu +http://www.semanlink.net/tag/terre_de_feu|broader_prefLabel|Argentine +http://www.semanlink.net/tag/terre_de_feu|broader_prefLabel|Chili +http://www.semanlink.net/tag/volkswagen|creationTime|2011-03-24T16:38:27Z +http://www.semanlink.net/tag/volkswagen|prefLabel|Volkswagen +http://www.semanlink.net/tag/volkswagen|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/volkswagen|creationDate|2011-03-24 +http://www.semanlink.net/tag/volkswagen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/volkswagen|altLabel|VW +http://www.semanlink.net/tag/volkswagen|uri|http://www.semanlink.net/tag/volkswagen +http://www.semanlink.net/tag/volkswagen|broader_prefLabel|Automobile +http://www.semanlink.net/tag/volkswagen|broader_altLabel|Automotive +http://www.semanlink.net/tag/dev_tips|creationTime|2015-02-25T15:57:22Z +http://www.semanlink.net/tag/dev_tips|prefLabel|Dev tips +http://www.semanlink.net/tag/dev_tips|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/dev_tips|broader|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/dev_tips|creationDate|2015-02-25 +http://www.semanlink.net/tag/dev_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dev_tips|altLabel|Dev tip +http://www.semanlink.net/tag/dev_tips|uri|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/dev_tips|broader_prefLabel|Dev +http://www.semanlink.net/tag/dev_tips|broader_prefLabel|Tips +http://www.semanlink.net/tag/alexandre_monnin|creationTime|2012-10-09T11:31:35Z +http://www.semanlink.net/tag/alexandre_monnin|prefLabel|Alexandre Monnin +http://www.semanlink.net/tag/alexandre_monnin|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/alexandre_monnin|creationDate|2012-10-09 +http://www.semanlink.net/tag/alexandre_monnin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandre_monnin|uri|http://www.semanlink.net/tag/alexandre_monnin +http://www.semanlink.net/tag/alexandre_monnin|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/tiger|prefLabel|Tiger +http://www.semanlink.net/tag/tiger|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/tiger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tiger|altLabel|Mac OS X 10.4 +http://www.semanlink.net/tag/tiger|uri|http://www.semanlink.net/tag/tiger +http://www.semanlink.net/tag/tiger|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/tiger|broader_altLabel|OS X +http://www.semanlink.net/tag/tiger|broader_altLabel|OSX +http://www.semanlink.net/tag/ird|prefLabel|IRD +http://www.semanlink.net/tag/ird|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/ird|broader|http://www.semanlink.net/tag/developpement +http://www.semanlink.net/tag/ird|broader|http://www.semanlink.net/tag/zones_intertropicales +http://www.semanlink.net/tag/ird|comment|Projets scientifiques centrées sur les relations entre l'homme et son environnement dans les zones intertropicales. +http://www.semanlink.net/tag/ird|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ird|uri|http://www.semanlink.net/tag/ird +http://www.semanlink.net/tag/ird|broader_prefLabel|Recherche +http://www.semanlink.net/tag/ird|broader_prefLabel|Développement +http://www.semanlink.net/tag/ird|broader_prefLabel|Zones intertropicales +http://www.semanlink.net/tag/ird|broader_altLabel|Research +http://www.semanlink.net/tag/m2eclipse|creationTime|2012-09-06T15:00:59Z +http://www.semanlink.net/tag/m2eclipse|prefLabel|m2eclipse +http://www.semanlink.net/tag/m2eclipse|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/m2eclipse|broader|http://www.semanlink.net/tag/maven +http://www.semanlink.net/tag/m2eclipse|creationDate|2012-09-06 +http://www.semanlink.net/tag/m2eclipse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/m2eclipse|uri|http://www.semanlink.net/tag/m2eclipse +http://www.semanlink.net/tag/m2eclipse|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/m2eclipse|broader_prefLabel|Maven +http://www.semanlink.net/tag/mac_os_x_web_serving|prefLabel|Mac OS X Web serving +http://www.semanlink.net/tag/mac_os_x_web_serving|broader|http://www.semanlink.net/tag/web_serving +http://www.semanlink.net/tag/mac_os_x_web_serving|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/mac_os_x_web_serving|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_os_x_web_serving|uri|http://www.semanlink.net/tag/mac_os_x_web_serving +http://www.semanlink.net/tag/mac_os_x_web_serving|broader_prefLabel|Web Serving +http://www.semanlink.net/tag/mac_os_x_web_serving|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/mac_os_x_web_serving|broader_altLabel|OS X +http://www.semanlink.net/tag/mac_os_x_web_serving|broader_altLabel|OSX +http://www.semanlink.net/tag/rdflib|creationTime|2013-09-06T18:28:00Z +http://www.semanlink.net/tag/rdflib|prefLabel|RDFLib +http://www.semanlink.net/tag/rdflib|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/rdflib|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdflib|creationDate|2013-09-06 +http://www.semanlink.net/tag/rdflib|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdflib|uri|http://www.semanlink.net/tag/rdflib +http://www.semanlink.net/tag/rdflib|broader_prefLabel|Python +http://www.semanlink.net/tag/rdflib|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/bhaskar_mitra|creationTime|2017-09-18T19:01:07Z +http://www.semanlink.net/tag/bhaskar_mitra|prefLabel|Bhaskar Mitra +http://www.semanlink.net/tag/bhaskar_mitra|broader|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/tag/bhaskar_mitra|broader|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/tag/bhaskar_mitra|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/bhaskar_mitra|related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/bhaskar_mitra|creationDate|2017-09-18 +http://www.semanlink.net/tag/bhaskar_mitra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bhaskar_mitra|describedBy|https://www.microsoft.com/en-us/research/people/bmitra/ +http://www.semanlink.net/tag/bhaskar_mitra|uri|http://www.semanlink.net/tag/bhaskar_mitra +http://www.semanlink.net/tag/bhaskar_mitra|broader_prefLabel|NLP@Microsoft +http://www.semanlink.net/tag/bhaskar_mitra|broader_prefLabel|Microsoft Research +http://www.semanlink.net/tag/bhaskar_mitra|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/film_a_voir|creationTime|2020-11-08T19:58:16Z +http://www.semanlink.net/tag/film_a_voir|prefLabel|Film à voir +http://www.semanlink.net/tag/film_a_voir|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_a_voir|creationDate|2020-11-08 +http://www.semanlink.net/tag/film_a_voir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_a_voir|uri|http://www.semanlink.net/tag/film_a_voir +http://www.semanlink.net/tag/film_a_voir|broader_prefLabel|Film +http://www.semanlink.net/tag/film_bresilien|prefLabel|Filme brasileiro +http://www.semanlink.net/tag/film_bresilien|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/film_bresilien|broader|http://www.semanlink.net/tag/cinema_bresilien +http://www.semanlink.net/tag/film_bresilien|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_bresilien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_bresilien|altLabel|Film brésilien +http://www.semanlink.net/tag/film_bresilien|uri|http://www.semanlink.net/tag/film_bresilien +http://www.semanlink.net/tag/film_bresilien|broader_prefLabel|Brésil +http://www.semanlink.net/tag/film_bresilien|broader_prefLabel|Cinéma brésilien +http://www.semanlink.net/tag/film_bresilien|broader_prefLabel|Film +http://www.semanlink.net/tag/film_bresilien|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/film_argentin|prefLabel|Film argentin +http://www.semanlink.net/tag/film_argentin|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_argentin|broader|http://www.semanlink.net/tag/argentine +http://www.semanlink.net/tag/film_argentin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_argentin|uri|http://www.semanlink.net/tag/film_argentin +http://www.semanlink.net/tag/film_argentin|broader_prefLabel|Film +http://www.semanlink.net/tag/film_argentin|broader_prefLabel|Argentine +http://www.semanlink.net/tag/google_sucks|creationTime|2018-09-28T22:52:58Z +http://www.semanlink.net/tag/google_sucks|prefLabel|Google sucks +http://www.semanlink.net/tag/google_sucks|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_sucks|creationDate|2018-09-28 +http://www.semanlink.net/tag/google_sucks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_sucks|uri|http://www.semanlink.net/tag/google_sucks +http://www.semanlink.net/tag/google_sucks|broader_prefLabel|Google +http://www.semanlink.net/tag/google_sucks|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/thewebconf_2021|creationTime|2021-04-08T17:59:54Z +http://www.semanlink.net/tag/thewebconf_2021|prefLabel|TheWebConf 2021 +http://www.semanlink.net/tag/thewebconf_2021|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/thewebconf_2021|creationDate|2021-04-08 +http://www.semanlink.net/tag/thewebconf_2021|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thewebconf_2021|uri|http://www.semanlink.net/tag/thewebconf_2021 +http://www.semanlink.net/tag/thewebconf_2021|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/thewebconf_2021|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/google_advertising|creationTime|2021-03-06T00:01:09Z +http://www.semanlink.net/tag/google_advertising|prefLabel|Google + Advertising +http://www.semanlink.net/tag/google_advertising|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_advertising|broader|http://www.semanlink.net/tag/publicite_internet +http://www.semanlink.net/tag/google_advertising|creationDate|2021-03-06 +http://www.semanlink.net/tag/google_advertising|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_advertising|uri|http://www.semanlink.net/tag/google_advertising +http://www.semanlink.net/tag/google_advertising|broader_prefLabel|Google +http://www.semanlink.net/tag/google_advertising|broader_prefLabel|Publicité Internet +http://www.semanlink.net/tag/google_advertising|broader_altLabel|Online adverstising +http://www.semanlink.net/tag/google_advertising|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/semantic_framework|creationTime|2010-08-12T16:20:29Z +http://www.semanlink.net/tag/semantic_framework|prefLabel|Semantic framework +http://www.semanlink.net/tag/semantic_framework|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/semantic_framework|broader|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/semantic_framework|creationDate|2010-08-12 +http://www.semanlink.net/tag/semantic_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_framework|uri|http://www.semanlink.net/tag/semantic_framework +http://www.semanlink.net/tag/semantic_framework|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/semantic_framework|broader_prefLabel|Frameworks +http://www.semanlink.net/tag/herschel_telescope|creationTime|2009-05-06T23:43:39Z +http://www.semanlink.net/tag/herschel_telescope|prefLabel|Herschel telescope +http://www.semanlink.net/tag/herschel_telescope|broader|http://www.semanlink.net/tag/telescope +http://www.semanlink.net/tag/herschel_telescope|creationDate|2009-05-06 +http://www.semanlink.net/tag/herschel_telescope|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/herschel_telescope|uri|http://www.semanlink.net/tag/herschel_telescope +http://www.semanlink.net/tag/herschel_telescope|broader_prefLabel|Télescope +http://www.semanlink.net/tag/homere|creationTime|2007-10-11T02:26:04Z +http://www.semanlink.net/tag/homere|prefLabel|Homère +http://www.semanlink.net/tag/homere|creationDate|2007-10-11 +http://www.semanlink.net/tag/homere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/homere|uri|http://www.semanlink.net/tag/homere +http://www.semanlink.net/tag/nsa_spying_scandal|creationTime|2013-06-11T10:31:07Z +http://www.semanlink.net/tag/nsa_spying_scandal|prefLabel|NSA spying scandal +http://www.semanlink.net/tag/nsa_spying_scandal|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/nsa_spying_scandal|broader|http://www.semanlink.net/tag/prism_surveillance_program +http://www.semanlink.net/tag/nsa_spying_scandal|broader|http://www.semanlink.net/tag/nsa +http://www.semanlink.net/tag/nsa_spying_scandal|broader|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/nsa_spying_scandal|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/nsa_spying_scandal|related|http://www.semanlink.net/tag/the_guardian +http://www.semanlink.net/tag/nsa_spying_scandal|creationDate|2013-06-11 +http://www.semanlink.net/tag/nsa_spying_scandal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nsa_spying_scandal|uri|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.semanlink.net/tag/nsa_spying_scandal|broader_prefLabel|Leaks +http://www.semanlink.net/tag/nsa_spying_scandal|broader_prefLabel|PRISM +http://www.semanlink.net/tag/nsa_spying_scandal|broader_prefLabel|NSA +http://www.semanlink.net/tag/nsa_spying_scandal|broader_prefLabel|Edward Snowden +http://www.semanlink.net/tag/nsa_spying_scandal|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/nsa_spying_scandal|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/nsa_spying_scandal|broader_related|http://www.semanlink.net/tag/nsa +http://www.semanlink.net/tag/nlp_reading_comprehension|creationTime|2017-08-28T00:24:58Z +http://www.semanlink.net/tag/nlp_reading_comprehension|prefLabel|NLP: Reading Comprehension +http://www.semanlink.net/tag/nlp_reading_comprehension|broader|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/nlp_reading_comprehension|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_reading_comprehension|related|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/nlp_reading_comprehension|creationDate|2017-08-28 +http://www.semanlink.net/tag/nlp_reading_comprehension|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_reading_comprehension|altLabel|Reading Comprehension +http://www.semanlink.net/tag/nlp_reading_comprehension|altLabel|Machine Reading Comprehension +http://www.semanlink.net/tag/nlp_reading_comprehension|uri|http://www.semanlink.net/tag/nlp_reading_comprehension +http://www.semanlink.net/tag/nlp_reading_comprehension|broader_prefLabel|NLU +http://www.semanlink.net/tag/nlp_reading_comprehension|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_reading_comprehension|broader_altLabel|Natural Language Understanding +http://www.semanlink.net/tag/antimilitarisme|prefLabel|Antimilitarisme +http://www.semanlink.net/tag/antimilitarisme|broader|http://www.semanlink.net/tag/militaire +http://www.semanlink.net/tag/antimilitarisme|creationDate|2006-08-19 +http://www.semanlink.net/tag/antimilitarisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antimilitarisme|uri|http://www.semanlink.net/tag/antimilitarisme +http://www.semanlink.net/tag/antimilitarisme|broader_prefLabel|Militaire +http://www.semanlink.net/tag/stefano_mazzocchi|prefLabel|Stefano Mazzocchi +http://www.semanlink.net/tag/stefano_mazzocchi|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/stefano_mazzocchi|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/stefano_mazzocchi|related|http://www.semanlink.net/tag/simile +http://www.semanlink.net/tag/stefano_mazzocchi|related|http://www.semanlink.net/tag/piggy_bank +http://www.semanlink.net/tag/stefano_mazzocchi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stefano_mazzocchi|homepage|http://www.betaversion.org/~stefano/ +http://www.semanlink.net/tag/stefano_mazzocchi|uri|http://www.semanlink.net/tag/stefano_mazzocchi +http://www.semanlink.net/tag/stefano_mazzocchi|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/stefano_mazzocchi|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/stefano_mazzocchi|broader_altLabel|Technical guys +http://www.semanlink.net/tag/afrique_subsaharienne|prefLabel|Afrique subsaharienne +http://www.semanlink.net/tag/afrique_subsaharienne|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_subsaharienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_subsaharienne|uri|http://www.semanlink.net/tag/afrique_subsaharienne +http://www.semanlink.net/tag/afrique_subsaharienne|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_subsaharienne|broader_altLabel|Africa +http://www.semanlink.net/tag/ntic|prefLabel|NTIC +http://www.semanlink.net/tag/ntic|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/ntic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ntic|uri|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/ntic|broader_prefLabel|Technologie +http://www.semanlink.net/tag/artificial_life|prefLabel|Artificial life +http://www.semanlink.net/tag/artificial_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_life|uri|http://www.semanlink.net/tag/artificial_life +http://www.semanlink.net/tag/googletechtalks|creationTime|2014-10-06T00:53:19Z +http://www.semanlink.net/tag/googletechtalks|prefLabel|GoogleTechTalks +http://www.semanlink.net/tag/googletechtalks|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/googletechtalks|creationDate|2014-10-06 +http://www.semanlink.net/tag/googletechtalks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/googletechtalks|uri|http://www.semanlink.net/tag/googletechtalks +http://www.semanlink.net/tag/googletechtalks|broader_prefLabel|Google +http://www.semanlink.net/tag/googletechtalks|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/hayabusa|prefLabel|Hayabusa +http://www.semanlink.net/tag/hayabusa|broader|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/tag/hayabusa|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/hayabusa|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/hayabusa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hayabusa|describedBy|https://en.wikipedia.org/wiki/Hayabusa +http://www.semanlink.net/tag/hayabusa|uri|http://www.semanlink.net/tag/hayabusa +http://www.semanlink.net/tag/hayabusa|broader_prefLabel|Astéroïde +http://www.semanlink.net/tag/hayabusa|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/hayabusa|broader_prefLabel|Japon +http://www.semanlink.net/tag/hayabusa|broader_altLabel|Japan +http://www.semanlink.net/tag/solr_rdf|creationTime|2015-03-06T11:46:30Z +http://www.semanlink.net/tag/solr_rdf|prefLabel|Solr + RDF +http://www.semanlink.net/tag/solr_rdf|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr_rdf|creationDate|2015-03-06 +http://www.semanlink.net/tag/solr_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr_rdf|uri|http://www.semanlink.net/tag/solr_rdf +http://www.semanlink.net/tag/solr_rdf|broader_prefLabel|Solr +http://www.semanlink.net/tag/skos|prefLabel|SKOS +http://www.semanlink.net/tag/skos|broader|http://www.semanlink.net/tag/thesaurus_taxonomies +http://www.semanlink.net/tag/skos|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/skos|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/skos|related|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/skos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/skos|homepage|http://www.w3.org/2004/02/skos/ +http://www.semanlink.net/tag/skos|uri|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/skos|broader_prefLabel|Thesaurus & Taxonomies +http://www.semanlink.net/tag/skos|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/skos|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/skos|broader_related|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/statistical_machine_translation|creationTime|2016-01-03T14:40:51Z +http://www.semanlink.net/tag/statistical_machine_translation|prefLabel|Statistical machine translation +http://www.semanlink.net/tag/statistical_machine_translation|broader|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/statistical_machine_translation|creationDate|2016-01-03 +http://www.semanlink.net/tag/statistical_machine_translation|comment|"machine translation paradigm where translations are generated on the basis of statistical models whose parameters are derived from the analysis of bilingual text corpora. The statistical approach contrasts with the rule-based approaches to machine translation as well as with example-based machine translation + +" +http://www.semanlink.net/tag/statistical_machine_translation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistical_machine_translation|describedBy|https://en.wikipedia.org/wiki/Statistical_machine_translation +http://www.semanlink.net/tag/statistical_machine_translation|uri|http://www.semanlink.net/tag/statistical_machine_translation +http://www.semanlink.net/tag/statistical_machine_translation|broader_prefLabel|Machine translation +http://www.semanlink.net/tag/statistical_machine_translation|broader_altLabel|Traduction automatique +http://www.semanlink.net/tag/pagerank|creationTime|2017-06-27T11:02:36Z +http://www.semanlink.net/tag/pagerank|prefLabel|PageRank +http://www.semanlink.net/tag/pagerank|broader|http://www.semanlink.net/tag/google_ranking +http://www.semanlink.net/tag/pagerank|creationDate|2017-06-27 +http://www.semanlink.net/tag/pagerank|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pagerank|describedBy|https://en.wikipedia.org/wiki/PageRank +http://www.semanlink.net/tag/pagerank|uri|http://www.semanlink.net/tag/pagerank +http://www.semanlink.net/tag/pagerank|broader_prefLabel|Google ranking +http://www.semanlink.net/tag/sw_wiki|prefLabel|SW Wiki +http://www.semanlink.net/tag/sw_wiki|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/sw_wiki|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sw_wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_wiki|uri|http://www.semanlink.net/tag/sw_wiki +http://www.semanlink.net/tag/sw_wiki|broader_prefLabel|Wiki +http://www.semanlink.net/tag/sw_wiki|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sw_wiki|broader_altLabel|sw +http://www.semanlink.net/tag/sw_wiki|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/sony|prefLabel|Sony +http://www.semanlink.net/tag/sony|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/sony|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sony|uri|http://www.semanlink.net/tag/sony +http://www.semanlink.net/tag/sony|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/age_du_fer|creationTime|2021-06-17T09:58:59Z +http://www.semanlink.net/tag/age_du_fer|prefLabel|Âge du fer +http://www.semanlink.net/tag/age_du_fer|broader|http://www.semanlink.net/tag/prehistoire +http://www.semanlink.net/tag/age_du_fer|creationDate|2021-06-17 +http://www.semanlink.net/tag/age_du_fer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/age_du_fer|uri|http://www.semanlink.net/tag/age_du_fer +http://www.semanlink.net/tag/age_du_fer|broader_prefLabel|Préhistoire +http://www.semanlink.net/tag/css_example|prefLabel|css example +http://www.semanlink.net/tag/css_example|broader|http://www.semanlink.net/tag/css +http://www.semanlink.net/tag/css_example|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/css_example|uri|http://www.semanlink.net/tag/css_example +http://www.semanlink.net/tag/css_example|broader_prefLabel|css +http://www.semanlink.net/tag/corent|prefLabel|Corent +http://www.semanlink.net/tag/corent|broader|http://www.semanlink.net/tag/gaulois +http://www.semanlink.net/tag/corent|broader|http://www.semanlink.net/tag/auvergne +http://www.semanlink.net/tag/corent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/corent|uri|http://www.semanlink.net/tag/corent +http://www.semanlink.net/tag/corent|broader_prefLabel|Gaulois +http://www.semanlink.net/tag/corent|broader_prefLabel|Auvergne +http://www.semanlink.net/tag/archeologue|prefLabel|Archéologue +http://www.semanlink.net/tag/archeologue|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/archeologue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologue|uri|http://www.semanlink.net/tag/archeologue +http://www.semanlink.net/tag/archeologue|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/archeologue|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/scientifique|prefLabel|Scientifique +http://www.semanlink.net/tag/scientifique|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/scientifique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/scientifique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scientifique|altLabel|Savant +http://www.semanlink.net/tag/scientifique|uri|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/scientifique|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/scientifique|broader_prefLabel|Science +http://www.semanlink.net/tag/scientifique|broader_altLabel|sciences +http://www.semanlink.net/tag/ouzbekistan|creationTime|2010-06-24T02:26:17Z +http://www.semanlink.net/tag/ouzbekistan|prefLabel|Ouzbékistan +http://www.semanlink.net/tag/ouzbekistan|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/ouzbekistan|broader|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/ouzbekistan|creationDate|2010-06-24 +http://www.semanlink.net/tag/ouzbekistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ouzbekistan|describedBy|https://en.wikipedia.org/wiki/Uzbekistan +http://www.semanlink.net/tag/ouzbekistan|altLabel|Uzbekistan +http://www.semanlink.net/tag/ouzbekistan|uri|http://www.semanlink.net/tag/ouzbekistan +http://www.semanlink.net/tag/ouzbekistan|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/ouzbekistan|broader_prefLabel|URSS +http://www.semanlink.net/tag/ouzbekistan|broader_prefLabel|Asie centrale +http://www.semanlink.net/tag/rome|prefLabel|Rome +http://www.semanlink.net/tag/rome|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/rome|broader|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/rome|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rome|uri|http://www.semanlink.net/tag/rome +http://www.semanlink.net/tag/rome|broader_prefLabel|Ville +http://www.semanlink.net/tag/rome|broader_prefLabel|Italie +http://www.semanlink.net/tag/vieux|prefLabel|Vieux +http://www.semanlink.net/tag/vieux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vieux|uri|http://www.semanlink.net/tag/vieux +http://www.semanlink.net/tag/venus_express|prefLabel|Venus Express +http://www.semanlink.net/tag/venus_express|broader|http://www.semanlink.net/tag/venus +http://www.semanlink.net/tag/venus_express|broader|http://www.semanlink.net/tag/esa +http://www.semanlink.net/tag/venus_express|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/venus_express|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venus_express|uri|http://www.semanlink.net/tag/venus_express +http://www.semanlink.net/tag/venus_express|broader_prefLabel|Vénus +http://www.semanlink.net/tag/venus_express|broader_prefLabel|esa +http://www.semanlink.net/tag/venus_express|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/ulmfit|creationTime|2018-06-19T10:11:31Z +http://www.semanlink.net/tag/ulmfit|prefLabel|ULMFiT +http://www.semanlink.net/tag/ulmfit|broader|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/tag/ulmfit|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/ulmfit|broader|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/ulmfit|related|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/tag/ulmfit|related|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/tag/ulmfit|creationDate|2018-06-19 +http://www.semanlink.net/tag/ulmfit|comment|"Approach that relies on fine-tuning a Language Model to the task at hand + +3 stages: + +- General domain language model pre-training +- Target task language model fine-tuning +- Target task classifier fine-tuning +" +http://www.semanlink.net/tag/ulmfit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ulmfit|uri|http://www.semanlink.net/tag/ulmfit +http://www.semanlink.net/tag/ulmfit|broader_prefLabel|Contextualized word representations +http://www.semanlink.net/tag/ulmfit|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/ulmfit|broader_prefLabel|Text Embeddings +http://www.semanlink.net/tag/ulmfit|broader_altLabel|Contextualized word embeddings +http://www.semanlink.net/tag/ulmfit|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/ulmfit|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/ulmfit|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/ulmfit|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/ulmfit|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/ulmfit|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/lstm_networks|creationTime|2015-10-16T14:21:06Z +http://www.semanlink.net/tag/lstm_networks|prefLabel|LSTM +http://www.semanlink.net/tag/lstm_networks|broader|http://www.semanlink.net/tag/recurrent_neural_network +http://www.semanlink.net/tag/lstm_networks|related|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/tag/lstm_networks|related|http://www.semanlink.net/tag/vanishing_gradient +http://www.semanlink.net/tag/lstm_networks|creationDate|2015-10-16 +http://www.semanlink.net/tag/lstm_networks|comment|"""Long short-term memory"": recurrent neural network architecture well-suited for **time series with long time lags between important events**. +(cf the problem of long time dependencies, such as when you want to predict the next word in ""I grew up in France… I speak fluent [?]""). + +A solution to the vanishing gradient problem in RNNs + + + + +" +http://www.semanlink.net/tag/lstm_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lstm_networks|describedBy|https://en.wikipedia.org/wiki/Long_short-term_memory +http://www.semanlink.net/tag/lstm_networks|uri|http://www.semanlink.net/tag/lstm_networks +http://www.semanlink.net/tag/lstm_networks|broader_prefLabel|Recurrent neural network +http://www.semanlink.net/tag/lstm_networks|broader_altLabel|RNN +http://www.semanlink.net/tag/lstm_networks|broader_related|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/lstm_networks|broader_related|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/mathieu_d_aquin|creationTime|2014-01-21T19:08:01Z +http://www.semanlink.net/tag/mathieu_d_aquin|prefLabel|Mathieu d'Aquin +http://www.semanlink.net/tag/mathieu_d_aquin|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/mathieu_d_aquin|creationDate|2014-01-21 +http://www.semanlink.net/tag/mathieu_d_aquin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mathieu_d_aquin|uri|http://www.semanlink.net/tag/mathieu_d_aquin +http://www.semanlink.net/tag/mathieu_d_aquin|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/impot|prefLabel|Impôt +http://www.semanlink.net/tag/impot|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/impot|broader|http://www.semanlink.net/tag/prelevements_obligatoires +http://www.semanlink.net/tag/impot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/impot|uri|http://www.semanlink.net/tag/impot +http://www.semanlink.net/tag/impot|broader_prefLabel|Société +http://www.semanlink.net/tag/impot|broader_prefLabel|Prélèvements obligatoires +http://www.semanlink.net/tag/constraints_in_the_sw|creationTime|2010-12-16T15:41:29Z +http://www.semanlink.net/tag/constraints_in_the_sw|prefLabel|Constraints in the SW +http://www.semanlink.net/tag/constraints_in_the_sw|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/constraints_in_the_sw|related|http://www.semanlink.net/tag/constraint_programming +http://www.semanlink.net/tag/constraints_in_the_sw|creationDate|2010-12-16 +http://www.semanlink.net/tag/constraints_in_the_sw|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/constraints_in_the_sw|uri|http://www.semanlink.net/tag/constraints_in_the_sw +http://www.semanlink.net/tag/constraints_in_the_sw|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/constraints_in_the_sw|broader_altLabel|sw +http://www.semanlink.net/tag/constraints_in_the_sw|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/three_mile_island|creationTime|2020-04-18T13:34:11Z +http://www.semanlink.net/tag/three_mile_island|prefLabel|Three Mile Island +http://www.semanlink.net/tag/three_mile_island|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/three_mile_island|creationDate|2020-04-18 +http://www.semanlink.net/tag/three_mile_island|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/three_mile_island|describedBy|https://en.wikipedia.org/wiki/Three_Mile_Island_accident +http://www.semanlink.net/tag/three_mile_island|uri|http://www.semanlink.net/tag/three_mile_island +http://www.semanlink.net/tag/three_mile_island|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/three_mile_island|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/nn_4_nlp|creationTime|2017-07-20T13:23:20Z +http://www.semanlink.net/tag/nn_4_nlp|prefLabel|NN 4 NLP +http://www.semanlink.net/tag/nn_4_nlp|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nn_4_nlp|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/nn_4_nlp|creationDate|2017-07-20 +http://www.semanlink.net/tag/nn_4_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nn_4_nlp|uri|http://www.semanlink.net/tag/nn_4_nlp +http://www.semanlink.net/tag/nn_4_nlp|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/nn_4_nlp|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/nn_4_nlp|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/nn_4_nlp|broader_altLabel|ANN +http://www.semanlink.net/tag/nn_4_nlp|broader_altLabel|NN +http://www.semanlink.net/tag/xviie_siecle|creationTime|2021-04-03T11:37:49Z +http://www.semanlink.net/tag/xviie_siecle|prefLabel|XVIIe siècle +http://www.semanlink.net/tag/xviie_siecle|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/xviie_siecle|creationDate|2021-04-03 +http://www.semanlink.net/tag/xviie_siecle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xviie_siecle|uri|http://www.semanlink.net/tag/xviie_siecle +http://www.semanlink.net/tag/xviie_siecle|broader_prefLabel|Histoire +http://www.semanlink.net/tag/shingles|creationTime|2020-09-03T17:58:09Z +http://www.semanlink.net/tag/shingles|prefLabel|Shingles +http://www.semanlink.net/tag/shingles|broader|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/shingles|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/shingles|related|http://www.semanlink.net/tag/duplicate_detection +http://www.semanlink.net/tag/shingles|creationDate|2020-09-03 +http://www.semanlink.net/tag/shingles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shingles|altLabel|Shingling +http://www.semanlink.net/tag/shingles|uri|http://www.semanlink.net/tag/shingles +http://www.semanlink.net/tag/shingles|broader_prefLabel|N-grams +http://www.semanlink.net/tag/shingles|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/shingles|broader_altLabel|N-gram +http://www.semanlink.net/tag/amnesty_international|creationTime|2016-06-16T23:41:37Z +http://www.semanlink.net/tag/amnesty_international|prefLabel|Amnesty International +http://www.semanlink.net/tag/amnesty_international|creationDate|2016-06-16 +http://www.semanlink.net/tag/amnesty_international|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amnesty_international|describedBy|https://en.wikipedia.org/wiki/Amnesty_International +http://www.semanlink.net/tag/amnesty_international|uri|http://www.semanlink.net/tag/amnesty_international +http://www.semanlink.net/tag/quantum_neuromorphic_computing|creationTime|2021-01-04T09:37:30Z +http://www.semanlink.net/tag/quantum_neuromorphic_computing|prefLabel|Quantum neuromorphic computing +http://www.semanlink.net/tag/quantum_neuromorphic_computing|broader|http://www.semanlink.net/tag/quantum_computing +http://www.semanlink.net/tag/quantum_neuromorphic_computing|related|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/tag/quantum_neuromorphic_computing|creationDate|2021-01-04 +http://www.semanlink.net/tag/quantum_neuromorphic_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quantum_neuromorphic_computing|uri|http://www.semanlink.net/tag/quantum_neuromorphic_computing +http://www.semanlink.net/tag/quantum_neuromorphic_computing|broader_prefLabel|Quantum computing +http://www.semanlink.net/tag/quantum_neuromorphic_computing|broader_altLabel|Quantum computer +http://www.semanlink.net/tag/quantum_neuromorphic_computing|broader_altLabel|Ordinateur quantique +http://www.semanlink.net/tag/web_2_0|prefLabel|Web 2.0 +http://www.semanlink.net/tag/web_2_0|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/web_2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_2_0|uri|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/web_2_0|broader_prefLabel|Internet +http://www.semanlink.net/tag/sursauts_gamma|prefLabel|Sursauts gamma +http://www.semanlink.net/tag/sursauts_gamma|broader|http://www.semanlink.net/tag/explosions_cosmiques +http://www.semanlink.net/tag/sursauts_gamma|broader|http://www.semanlink.net/tag/rayons_cosmiques +http://www.semanlink.net/tag/sursauts_gamma|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sursauts_gamma|uri|http://www.semanlink.net/tag/sursauts_gamma +http://www.semanlink.net/tag/sursauts_gamma|broader_prefLabel|Explosions cosmiques +http://www.semanlink.net/tag/sursauts_gamma|broader_prefLabel|Rayons cosmiques +http://www.semanlink.net/tag/ajax_applications|prefLabel|Ajax applications +http://www.semanlink.net/tag/ajax_applications|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/ajax_applications|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ajax_applications|uri|http://www.semanlink.net/tag/ajax_applications +http://www.semanlink.net/tag/ajax_applications|broader_prefLabel|Ajax +http://www.semanlink.net/tag/ajax_applications|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/erythree|creationTime|2015-08-22T23:50:37Z +http://www.semanlink.net/tag/erythree|prefLabel|Érythrée +http://www.semanlink.net/tag/erythree|broader|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/erythree|creationDate|2015-08-22 +http://www.semanlink.net/tag/erythree|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erythree|describedBy|https://fr.wikipedia.org/wiki/%C3%89rythr%C3%A9e +http://www.semanlink.net/tag/erythree|uri|http://www.semanlink.net/tag/erythree +http://www.semanlink.net/tag/erythree|broader_prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/chris_manning|creationTime|2012-04-15T14:30:36Z +http://www.semanlink.net/tag/chris_manning|prefLabel|Chris Manning +http://www.semanlink.net/tag/chris_manning|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/chris_manning|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/chris_manning|creationDate|2012-04-15 +http://www.semanlink.net/tag/chris_manning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chris_manning|altLabel|Christopher Manning +http://www.semanlink.net/tag/chris_manning|uri|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/chris_manning|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/chris_manning|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/litterature_africaine|prefLabel|Littérature africaine +http://www.semanlink.net/tag/litterature_africaine|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/litterature_africaine|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/litterature_africaine|creationDate|2006-12-28 +http://www.semanlink.net/tag/litterature_africaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/litterature_africaine|uri|http://www.semanlink.net/tag/litterature_africaine +http://www.semanlink.net/tag/litterature_africaine|broader_prefLabel|Littérature +http://www.semanlink.net/tag/litterature_africaine|broader_prefLabel|Afrique +http://www.semanlink.net/tag/litterature_africaine|broader_altLabel|Africa +http://www.semanlink.net/tag/litterature_africaine|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/bertrand_sajus|creationTime|2013-10-19T18:20:54Z +http://www.semanlink.net/tag/bertrand_sajus|prefLabel|Bertrand Sajus +http://www.semanlink.net/tag/bertrand_sajus|broader|http://www.semanlink.net/tag/ministere_de_la_culture +http://www.semanlink.net/tag/bertrand_sajus|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bertrand_sajus|creationDate|2013-10-19 +http://www.semanlink.net/tag/bertrand_sajus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bertrand_sajus|uri|http://www.semanlink.net/tag/bertrand_sajus +http://www.semanlink.net/tag/bertrand_sajus|broader_prefLabel|Ministère de la culture +http://www.semanlink.net/tag/bertrand_sajus|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/reine|creationTime|2019-03-04T08:42:10Z +http://www.semanlink.net/tag/reine|prefLabel|Reine +http://www.semanlink.net/tag/reine|creationDate|2019-03-04 +http://www.semanlink.net/tag/reine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reine|uri|http://www.semanlink.net/tag/reine +http://www.semanlink.net/tag/linear_classifier|creationTime|2014-03-26T09:47:13Z +http://www.semanlink.net/tag/linear_classifier|prefLabel|Linear classifier +http://www.semanlink.net/tag/linear_classifier|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/linear_classifier|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/linear_classifier|creationDate|2014-03-26 +http://www.semanlink.net/tag/linear_classifier|comment|"classification decision based on the value of a linear combination of the feature values +" +http://www.semanlink.net/tag/linear_classifier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linear_classifier|describedBy|https://en.wikipedia.org/wiki/Linear_classifier +http://www.semanlink.net/tag/linear_classifier|uri|http://www.semanlink.net/tag/linear_classifier +http://www.semanlink.net/tag/linear_classifier|broader_prefLabel|Classification +http://www.semanlink.net/tag/linear_classifier|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/histoire_de_la_vie|creationTime|2011-10-20T00:32:08Z +http://www.semanlink.net/tag/histoire_de_la_vie|prefLabel|Histoire de la vie +http://www.semanlink.net/tag/histoire_de_la_vie|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/histoire_de_la_vie|creationDate|2011-10-20 +http://www.semanlink.net/tag/histoire_de_la_vie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_la_vie|uri|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/histoire_de_la_vie|broader_prefLabel|Evolution +http://www.semanlink.net/tag/as400|creationTime|2009-04-08T15:18:16Z +http://www.semanlink.net/tag/as400|prefLabel|AS400 +http://www.semanlink.net/tag/as400|creationDate|2009-04-08 +http://www.semanlink.net/tag/as400|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/as400|uri|http://www.semanlink.net/tag/as400 +http://www.semanlink.net/tag/semantic_browsing|prefLabel|Semantic browsing +http://www.semanlink.net/tag/semantic_browsing|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_browsing|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/semantic_browsing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_browsing|uri|http://www.semanlink.net/tag/semantic_browsing +http://www.semanlink.net/tag/semantic_browsing|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_browsing|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/semantic_browsing|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_browsing|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/openoffice|creationTime|2007-02-13T21:26:30Z +http://www.semanlink.net/tag/openoffice|prefLabel|OpenOffice +http://www.semanlink.net/tag/openoffice|creationDate|2007-02-13 +http://www.semanlink.net/tag/openoffice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openoffice|uri|http://www.semanlink.net/tag/openoffice +http://www.semanlink.net/tag/chirac|prefLabel|Chirac +http://www.semanlink.net/tag/chirac|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/chirac|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/chirac|broader|http://www.semanlink.net/tag/cons_de_francais +http://www.semanlink.net/tag/chirac|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/chirac|comment|Quel con +http://www.semanlink.net/tag/chirac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chirac|uri|http://www.semanlink.net/tag/chirac +http://www.semanlink.net/tag/chirac|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/chirac|broader_prefLabel|Politique française +http://www.semanlink.net/tag/chirac|broader_prefLabel|Cons de Français +http://www.semanlink.net/tag/chirac|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/rsa|creationTime|2021-02-20T12:44:17Z +http://www.semanlink.net/tag/rsa|prefLabel|RSA +http://www.semanlink.net/tag/rsa|creationDate|2021-02-20 +http://www.semanlink.net/tag/rsa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rsa|uri|http://www.semanlink.net/tag/rsa +http://www.semanlink.net/tag/medicaments_generiques|prefLabel|Médicaments génériques +http://www.semanlink.net/tag/medicaments_generiques|broader|http://www.semanlink.net/tag/medicaments +http://www.semanlink.net/tag/medicaments_generiques|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/medicaments_generiques|broader|http://www.semanlink.net/tag/industrie_pharmaceutique +http://www.semanlink.net/tag/medicaments_generiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medicaments_generiques|uri|http://www.semanlink.net/tag/medicaments_generiques +http://www.semanlink.net/tag/medicaments_generiques|broader_prefLabel|Médicaments +http://www.semanlink.net/tag/medicaments_generiques|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/medicaments_generiques|broader_prefLabel|Industrie pharmaceutique +http://www.semanlink.net/tag/rdfa|creationTime|2007-04-10T23:17:00Z +http://www.semanlink.net/tag/rdfa|prefLabel|RDFa +http://www.semanlink.net/tag/rdfa|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdfa|broader|http://www.semanlink.net/tag/html_data +http://www.semanlink.net/tag/rdfa|broader|http://www.semanlink.net/tag/xhtml +http://www.semanlink.net/tag/rdfa|related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdfa|related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdfa|related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/rdfa|creationDate|2007-04-10 +http://www.semanlink.net/tag/rdfa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa|homepage|http://rdfa.info/ +http://www.semanlink.net/tag/rdfa|altLabel|RDF/A +http://www.semanlink.net/tag/rdfa|uri|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdfa|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdfa|broader_prefLabel|HTML Data +http://www.semanlink.net/tag/rdfa|broader_prefLabel|XHTML +http://www.semanlink.net/tag/rdfa|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdfa|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdfa|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdfa|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdfa|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/p_np|creationTime|2012-04-27T23:32:28Z +http://www.semanlink.net/tag/p_np|prefLabel|P=NP +http://www.semanlink.net/tag/p_np|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/p_np|creationDate|2012-04-27 +http://www.semanlink.net/tag/p_np|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/p_np|uri|http://www.semanlink.net/tag/p_np +http://www.semanlink.net/tag/p_np|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/dkrl|creationTime|2020-10-02T00:43:11Z +http://www.semanlink.net/tag/dkrl|prefLabel|DKRL +http://www.semanlink.net/tag/dkrl|broader|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/tag/dkrl|related|http://www.semanlink.net/tag/transe +http://www.semanlink.net/tag/dkrl|creationDate|2020-10-02 +http://www.semanlink.net/tag/dkrl|comment|"In the DKRL model, the embedding +of an entity is responsible for both modeling the corresponding +fact triples and modeling its description: extends the translation based +embedding methods from the triple-specific one to the +“Text-Aware” model" +http://www.semanlink.net/tag/dkrl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dkrl|uri|http://www.semanlink.net/tag/dkrl +http://www.semanlink.net/tag/dkrl|broader_prefLabel|Text-Aware KG embedding +http://www.semanlink.net/tag/dkrl|broader_altLabel|Text in Knowledge Graph embeddings +http://www.semanlink.net/tag/dkrl|broader_altLabel|Text in KG embeddings +http://www.semanlink.net/tag/dkrl|broader_related|http://www.semanlink.net/tag/rdf_embeddings +http://www.semanlink.net/tag/lod_use_case|creationTime|2012-10-09T11:29:58Z +http://www.semanlink.net/tag/lod_use_case|prefLabel|LOD use case +http://www.semanlink.net/tag/lod_use_case|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/lod_use_case|creationDate|2012-10-09 +http://www.semanlink.net/tag/lod_use_case|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod_use_case|uri|http://www.semanlink.net/tag/lod_use_case +http://www.semanlink.net/tag/lod_use_case|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/lod_use_case|broader_altLabel|LOD +http://www.semanlink.net/tag/lod_use_case|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_use_case|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_use_case|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_use_case|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/hosting|creationTime|2008-07-04T14:10:24Z +http://www.semanlink.net/tag/hosting|prefLabel|Hosting +http://www.semanlink.net/tag/hosting|creationDate|2008-07-04 +http://www.semanlink.net/tag/hosting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hosting|uri|http://www.semanlink.net/tag/hosting +http://www.semanlink.net/tag/topic_embeddings|creationTime|2017-12-03T17:40:43Z +http://www.semanlink.net/tag/topic_embeddings|prefLabel|Topic embeddings +http://www.semanlink.net/tag/topic_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/topic_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/topic_embeddings|related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/topic_embeddings|creationDate|2017-12-03 +http://www.semanlink.net/tag/topic_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topic_embeddings|uri|http://www.semanlink.net/tag/topic_embeddings +http://www.semanlink.net/tag/topic_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/topic_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/topic_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/topic_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/topic_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/topic_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/short_text_clustering|creationTime|2021-05-20T16:44:11Z +http://www.semanlink.net/tag/short_text_clustering|prefLabel|Short Text Clustering +http://www.semanlink.net/tag/short_text_clustering|broader|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/short_text_clustering|broader|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/tag/short_text_clustering|creationDate|2021-05-20 +http://www.semanlink.net/tag/short_text_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/short_text_clustering|uri|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/tag/short_text_clustering|broader_prefLabel|Clustering of text documents +http://www.semanlink.net/tag/short_text_clustering|broader_prefLabel|NLP: short texts +http://www.semanlink.net/tag/short_text_clustering|broader_altLabel|Text Clustering +http://www.semanlink.net/tag/short_text_clustering|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/linked_data_fragments|creationTime|2014-10-28T22:44:56Z +http://www.semanlink.net/tag/linked_data_fragments|prefLabel|Linked Data Fragments +http://www.semanlink.net/tag/linked_data_fragments|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_fragments|related|http://www.semanlink.net/tag/ruben_verborgh +http://www.semanlink.net/tag/linked_data_fragments|creationDate|2014-10-28 +http://www.semanlink.net/tag/linked_data_fragments|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_fragments|homepage|http://linkeddatafragments.org/ +http://www.semanlink.net/tag/linked_data_fragments|uri|http://www.semanlink.net/tag/linked_data_fragments +http://www.semanlink.net/tag/linked_data_fragments|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_fragments|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_fragments|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/reader_mode_browsers|creationTime|2017-05-27T13:07:59Z +http://www.semanlink.net/tag/reader_mode_browsers|prefLabel|Reader mode (browsers) +http://www.semanlink.net/tag/reader_mode_browsers|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/reader_mode_browsers|creationDate|2017-05-27 +http://www.semanlink.net/tag/reader_mode_browsers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reader_mode_browsers|uri|http://www.semanlink.net/tag/reader_mode_browsers +http://www.semanlink.net/tag/reader_mode_browsers|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/reader_mode_browsers|broader_altLabel|Browser +http://www.semanlink.net/tag/systeme_solaire|prefLabel|Système solaire +http://www.semanlink.net/tag/systeme_solaire|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/systeme_solaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/systeme_solaire|uri|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/systeme_solaire|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/ile_maurice|prefLabel|Île Maurice +http://www.semanlink.net/tag/ile_maurice|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/ile_maurice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ile_maurice|uri|http://www.semanlink.net/tag/ile_maurice +http://www.semanlink.net/tag/ile_maurice|broader_prefLabel|Afrique +http://www.semanlink.net/tag/ile_maurice|broader_altLabel|Africa +http://www.semanlink.net/tag/public_vocabs_w3_org|creationTime|2013-07-06T23:59:36Z +http://www.semanlink.net/tag/public_vocabs_w3_org|prefLabel|public-vocabs@w3.org +http://www.semanlink.net/tag/public_vocabs_w3_org|broader|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/public_vocabs_w3_org|broader|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/public_vocabs_w3_org|broader|http://www.semanlink.net/tag/web_schemas_task_force +http://www.semanlink.net/tag/public_vocabs_w3_org|creationDate|2013-07-06 +http://www.semanlink.net/tag/public_vocabs_w3_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_vocabs_w3_org|homepage|http://lists.w3.org/Archives/Public/public-vocabs/ +http://www.semanlink.net/tag/public_vocabs_w3_org|uri|http://www.semanlink.net/tag/public_vocabs_w3_org +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_prefLabel|Mailing list +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_prefLabel|schema.org +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_prefLabel|Web Schemas Task Force +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/public_vocabs_w3_org|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/web_schemas_task_force|creationTime|2013-06-28T11:11:29Z +http://www.semanlink.net/tag/web_schemas_task_force|prefLabel|Web Schemas Task Force +http://www.semanlink.net/tag/web_schemas_task_force|related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/web_schemas_task_force|related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/web_schemas_task_force|related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/web_schemas_task_force|creationDate|2013-06-28 +http://www.semanlink.net/tag/web_schemas_task_force|comment|"schema.org is a site for documenting the vocabulary that it's conveners are +interested in/support. The goal is to provide a single place where someone +could find all the documentation for adding markup for consumption by the +schema.org sponsors. +
+It is one of many many namespaces that will be used for structured data +vocabulary. +
+This discussion group has much broader scope and should be used for +discussing vocabularies that are well outside the scope of schema.org +
+Guha" +http://www.semanlink.net/tag/web_schemas_task_force|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_schemas_task_force|homepage|http://www.w3.org/wiki/WebSchemas +http://www.semanlink.net/tag/web_schemas_task_force|uri|http://www.semanlink.net/tag/web_schemas_task_force +http://www.semanlink.net/tag/low_code|creationTime|2021-07-01T00:24:25Z +http://www.semanlink.net/tag/low_code|prefLabel|Low-code +http://www.semanlink.net/tag/low_code|creationDate|2021-07-01 +http://www.semanlink.net/tag/low_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/low_code|uri|http://www.semanlink.net/tag/low_code +http://www.semanlink.net/tag/data_augmentation|creationTime|2019-03-26T00:43:47Z +http://www.semanlink.net/tag/data_augmentation|prefLabel|Data Augmentation +http://www.semanlink.net/tag/data_augmentation|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/data_augmentation|creationDate|2019-03-26 +http://www.semanlink.net/tag/data_augmentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_augmentation|uri|http://www.semanlink.net/tag/data_augmentation +http://www.semanlink.net/tag/data_augmentation|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/end_to_end_entity_linking|creationTime|2020-01-09T15:12:14Z +http://www.semanlink.net/tag/end_to_end_entity_linking|prefLabel|End-To-End Entity Linking +http://www.semanlink.net/tag/end_to_end_entity_linking|broader|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/tag/end_to_end_entity_linking|broader|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/end_to_end_entity_linking|broader|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/end_to_end_entity_linking|creationDate|2020-01-09 +http://www.semanlink.net/tag/end_to_end_entity_linking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/end_to_end_entity_linking|uri|http://www.semanlink.net/tag/end_to_end_entity_linking +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_prefLabel|End-to-End Learning +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_prefLabel|Entity discovery and linking +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_prefLabel|Entity linking +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_altLabel|Entity Retrieval +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_altLabel|Entity Analysis +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_altLabel|Named entity disambiguation +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_related|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/tag/end_to_end_entity_linking|broader_related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/bioterrorisme|creationTime|2012-06-30T01:00:21Z +http://www.semanlink.net/tag/bioterrorisme|prefLabel|Bioterrorisme +http://www.semanlink.net/tag/bioterrorisme|broader|http://www.semanlink.net/tag/terrorisme +http://www.semanlink.net/tag/bioterrorisme|creationDate|2012-06-30 +http://www.semanlink.net/tag/bioterrorisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bioterrorisme|uri|http://www.semanlink.net/tag/bioterrorisme +http://www.semanlink.net/tag/bioterrorisme|broader_prefLabel|Terrorisme +http://www.semanlink.net/tag/asie|prefLabel|Asie +http://www.semanlink.net/tag/asie|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/asie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asie|uri|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/asie|broader_prefLabel|Géographie +http://www.semanlink.net/tag/language_models_as_knowledge_bases|creationTime|2020-10-26T17:20:15Z +http://www.semanlink.net/tag/language_models_as_knowledge_bases|prefLabel|Language Models as Knowledge Bases +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/language_models_as_knowledge_bases|creationDate|2020-10-26 +http://www.semanlink.net/tag/language_models_as_knowledge_bases|comment|In contrast to this, see an [effort to avoid encoding general knowledge in the transformer network](doc:2019/09/_1909_04120_span_selection_pre) +http://www.semanlink.net/tag/language_models_as_knowledge_bases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_models_as_knowledge_bases|uri|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader_prefLabel|Language Models + Knowledge +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader_prefLabel|Knowledge Graph + Deep Learning +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader_related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/language_models_as_knowledge_bases|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/aaron_swartz|creationTime|2008-09-02T14:18:43Z +http://www.semanlink.net/tag/aaron_swartz|prefLabel|Aaron Swartz +http://www.semanlink.net/tag/aaron_swartz|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/aaron_swartz|related|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/aaron_swartz|related|http://www.semanlink.net/tag/rss +http://www.semanlink.net/tag/aaron_swartz|creationDate|2008-09-02 +http://www.semanlink.net/tag/aaron_swartz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aaron_swartz|describedBy|http://www.aaronsw.com/ +http://www.semanlink.net/tag/aaron_swartz|uri|http://www.semanlink.net/tag/aaron_swartz +http://www.semanlink.net/tag/aaron_swartz|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/aaron_swartz|broader_altLabel|Technical guys +http://www.semanlink.net/tag/makolab_semantic_day|creationTime|2013-09-19T23:38:08Z +http://www.semanlink.net/tag/makolab_semantic_day|prefLabel|Makolab Semantic Day +http://www.semanlink.net/tag/makolab_semantic_day|broader|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/makolab_semantic_day|related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/makolab_semantic_day|creationDate|2013-09-19 +http://www.semanlink.net/tag/makolab_semantic_day|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/makolab_semantic_day|uri|http://www.semanlink.net/tag/makolab_semantic_day +http://www.semanlink.net/tag/makolab_semantic_day|broader_prefLabel|Makolab +http://www.semanlink.net/tag/makolab_semantic_day|broader_related|http://www.semanlink.net/tag/c2g +http://www.semanlink.net/tag/technical_girls_and_guys|prefLabel|Technical girls and guys +http://www.semanlink.net/tag/technical_girls_and_guys|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/technical_girls_and_guys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technical_girls_and_guys|altLabel|Technical guys +http://www.semanlink.net/tag/technical_girls_and_guys|uri|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/technical_girls_and_guys|broader_prefLabel|Technologie +http://www.semanlink.net/tag/engelbart|prefLabel|Engelbart +http://www.semanlink.net/tag/engelbart|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/engelbart|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/engelbart|uri|http://www.semanlink.net/tag/engelbart +http://www.semanlink.net/tag/engelbart|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/engelbart|broader_altLabel|Technical guys +http://www.semanlink.net/tag/mitochondries|creationTime|2016-04-26T00:42:07Z +http://www.semanlink.net/tag/mitochondries|prefLabel|Mitochondries +http://www.semanlink.net/tag/mitochondries|related|http://www.semanlink.net/tag/symbiose +http://www.semanlink.net/tag/mitochondries|creationDate|2016-04-26 +http://www.semanlink.net/tag/mitochondries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mitochondries|uri|http://www.semanlink.net/tag/mitochondries +http://www.semanlink.net/tag/nazisme|creationTime|2014-05-20T23:07:20Z +http://www.semanlink.net/tag/nazisme|prefLabel|Nazisme +http://www.semanlink.net/tag/nazisme|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/nazisme|broader|http://www.semanlink.net/tag/fascisme +http://www.semanlink.net/tag/nazisme|broader|http://www.semanlink.net/tag/mechant +http://www.semanlink.net/tag/nazisme|related|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/nazisme|related|http://www.semanlink.net/tag/antisemitisme +http://www.semanlink.net/tag/nazisme|creationDate|2014-05-20 +http://www.semanlink.net/tag/nazisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nazisme|altLabel|Nazi +http://www.semanlink.net/tag/nazisme|uri|http://www.semanlink.net/tag/nazisme +http://www.semanlink.net/tag/nazisme|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/nazisme|broader_prefLabel|Fascisme +http://www.semanlink.net/tag/nazisme|broader_prefLabel|Méchant +http://www.semanlink.net/tag/nazisme|broader_altLabel|Germany +http://www.semanlink.net/tag/nazisme|broader_altLabel|Deutschland +http://www.semanlink.net/tag/distant_supervision|creationTime|2019-05-30T12:41:27Z +http://www.semanlink.net/tag/distant_supervision|prefLabel|Distant Supervision +http://www.semanlink.net/tag/distant_supervision|creationDate|2019-05-30 +http://www.semanlink.net/tag/distant_supervision|comment|"> Heuristically mapping an external knowledge base onto the input data to generate noisy labels ([source](https://hazyresearch.github.io/snorkel/blog/ws_blog_post.html)) +" +http://www.semanlink.net/tag/distant_supervision|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/distant_supervision|uri|http://www.semanlink.net/tag/distant_supervision +http://www.semanlink.net/tag/marches_financiers|prefLabel|Marchés financiers +http://www.semanlink.net/tag/marches_financiers|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/marches_financiers|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/marches_financiers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marches_financiers|uri|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/marches_financiers|broader_prefLabel|Finance +http://www.semanlink.net/tag/marches_financiers|broader_prefLabel|Economie +http://www.semanlink.net/tag/marches_financiers|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/france_police|creationTime|2020-12-17T12:48:39Z +http://www.semanlink.net/tag/france_police|prefLabel|France : police +http://www.semanlink.net/tag/france_police|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/france_police|broader|http://www.semanlink.net/tag/police +http://www.semanlink.net/tag/france_police|creationDate|2020-12-17 +http://www.semanlink.net/tag/france_police|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_police|uri|http://www.semanlink.net/tag/france_police +http://www.semanlink.net/tag/france_police|broader_prefLabel|France +http://www.semanlink.net/tag/france_police|broader_prefLabel|Police +http://www.semanlink.net/tag/bookmarks|prefLabel|Bookmarks +http://www.semanlink.net/tag/bookmarks|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/bookmarks|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/bookmarks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bookmarks|altLabel|SIgnets +http://www.semanlink.net/tag/bookmarks|uri|http://www.semanlink.net/tag/bookmarks +http://www.semanlink.net/tag/bookmarks|broader_prefLabel|Tagging +http://www.semanlink.net/tag/bookmarks|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/bookmarks|broader_altLabel|Browser +http://www.semanlink.net/tag/hongrie|prefLabel|Hongrie +http://www.semanlink.net/tag/hongrie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/hongrie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/hongrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hongrie|uri|http://www.semanlink.net/tag/hongrie +http://www.semanlink.net/tag/hongrie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/hongrie|broader_prefLabel|Europe +http://www.semanlink.net/tag/javaone|creationTime|2008-04-08T20:37:55Z +http://www.semanlink.net/tag/javaone|prefLabel|JavaOne +http://www.semanlink.net/tag/javaone|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/javaone|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/javaone|creationDate|2008-04-08 +http://www.semanlink.net/tag/javaone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javaone|uri|http://www.semanlink.net/tag/javaone +http://www.semanlink.net/tag/javaone|broader_prefLabel|Conférences +http://www.semanlink.net/tag/javaone|broader_prefLabel|Java +http://www.semanlink.net/tag/javaone|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/continual_learning|creationTime|2020-01-01T12:12:36Z +http://www.semanlink.net/tag/continual_learning|prefLabel|Continual Learning +http://www.semanlink.net/tag/continual_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/continual_learning|creationDate|2020-01-01 +http://www.semanlink.net/tag/continual_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/continual_learning|altLabel|Online Machine Learning +http://www.semanlink.net/tag/continual_learning|altLabel|Incremental learning +http://www.semanlink.net/tag/continual_learning|altLabel|Lifelong learning +http://www.semanlink.net/tag/continual_learning|uri|http://www.semanlink.net/tag/continual_learning +http://www.semanlink.net/tag/continual_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/radix_trees|prefLabel|Radix trees +http://www.semanlink.net/tag/radix_trees|creationDate|2006-10-17 +http://www.semanlink.net/tag/radix_trees|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/radix_trees|uri|http://www.semanlink.net/tag/radix_trees +http://www.semanlink.net/tag/mots_expressions_remarquables|creationTime|2019-07-19T21:50:17Z +http://www.semanlink.net/tag/mots_expressions_remarquables|prefLabel|Mots/expressions remarquables +http://www.semanlink.net/tag/mots_expressions_remarquables|broader|http://www.semanlink.net/tag/expression +http://www.semanlink.net/tag/mots_expressions_remarquables|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/mots_expressions_remarquables|creationDate|2019-07-19 +http://www.semanlink.net/tag/mots_expressions_remarquables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mots_expressions_remarquables|uri|http://www.semanlink.net/tag/mots_expressions_remarquables +http://www.semanlink.net/tag/mots_expressions_remarquables|broader_prefLabel|Expression +http://www.semanlink.net/tag/mots_expressions_remarquables|broader_prefLabel|Langues +http://www.semanlink.net/tag/videosurveillance|creationTime|2007-04-30T01:51:55Z +http://www.semanlink.net/tag/videosurveillance|prefLabel|Vidéosurveillance +http://www.semanlink.net/tag/videosurveillance|broader|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/videosurveillance|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/videosurveillance|creationDate|2007-04-30 +http://www.semanlink.net/tag/videosurveillance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/videosurveillance|uri|http://www.semanlink.net/tag/videosurveillance +http://www.semanlink.net/tag/videosurveillance|broader_prefLabel|Etat policier +http://www.semanlink.net/tag/videosurveillance|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/videosurveillance|broader_related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/ai_conference|creationTime|2018-07-07T12:44:17Z +http://www.semanlink.net/tag/ai_conference|prefLabel|AI Conference +http://www.semanlink.net/tag/ai_conference|broader|http://www.semanlink.net/tag/ai_event +http://www.semanlink.net/tag/ai_conference|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/ai_conference|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_conference|creationDate|2018-07-07 +http://www.semanlink.net/tag/ai_conference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_conference|uri|http://www.semanlink.net/tag/ai_conference +http://www.semanlink.net/tag/ai_conference|broader_prefLabel|AI Event +http://www.semanlink.net/tag/ai_conference|broader_prefLabel|Conférences +http://www.semanlink.net/tag/ai_conference|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_conference|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_conference|broader_altLabel|AI +http://www.semanlink.net/tag/ai_conference|broader_altLabel|IA +http://www.semanlink.net/tag/ai_conference|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/ai_conference|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/sport|prefLabel|Sport +http://www.semanlink.net/tag/sport|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sport|uri|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/libshorttext|creationTime|2014-03-26T10:20:32Z +http://www.semanlink.net/tag/libshorttext|prefLabel|LibShortText +http://www.semanlink.net/tag/libshorttext|broader|http://www.semanlink.net/tag/national_taiwan_university +http://www.semanlink.net/tag/libshorttext|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/libshorttext|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/libshorttext|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/libshorttext|creationDate|2014-03-26 +http://www.semanlink.net/tag/libshorttext|comment|Tool for **short-text** classification and analysis. +http://www.semanlink.net/tag/libshorttext|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/libshorttext|homepage|http://www.csie.ntu.edu.tw/~cjlin/libshorttext/ +http://www.semanlink.net/tag/libshorttext|uri|http://www.semanlink.net/tag/libshorttext +http://www.semanlink.net/tag/libshorttext|broader_prefLabel|National Taiwan University +http://www.semanlink.net/tag/libshorttext|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/libshorttext|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/libshorttext|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/libshorttext|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/libshorttext|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/inserm|prefLabel|INSERM +http://www.semanlink.net/tag/inserm|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/inserm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inserm|uri|http://www.semanlink.net/tag/inserm +http://www.semanlink.net/tag/inserm|broader_prefLabel|Médecine +http://www.semanlink.net/tag/langage|prefLabel|Language +http://www.semanlink.net/tag/langage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langage|altLabel|Langage +http://www.semanlink.net/tag/langage|uri|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/powder|creationTime|2008-05-14T21:21:22Z +http://www.semanlink.net/tag/powder|prefLabel|POWDER +http://www.semanlink.net/tag/powder|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/powder|broader|http://www.semanlink.net/tag/w3c_working_draft +http://www.semanlink.net/tag/powder|creationDate|2008-05-14 +http://www.semanlink.net/tag/powder|comment|The purpose of the Protocol for Web Description Resources (POWDER) is to provide a means for individuals or organizations to describe a group of resources through the publication of machine-readable metada +http://www.semanlink.net/tag/powder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/powder|uri|http://www.semanlink.net/tag/powder +http://www.semanlink.net/tag/powder|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/powder|broader_prefLabel|W3C Working Draft +http://www.semanlink.net/tag/powder|broader_altLabel|sw +http://www.semanlink.net/tag/powder|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/xss|prefLabel|XSS +http://www.semanlink.net/tag/xss|broader|http://www.semanlink.net/tag/malicious_code +http://www.semanlink.net/tag/xss|creationDate|2006-12-11 +http://www.semanlink.net/tag/xss|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xss|uri|http://www.semanlink.net/tag/xss +http://www.semanlink.net/tag/xss|broader_prefLabel|Malicious code +http://www.semanlink.net/tag/semantics|creationTime|2011-01-14T15:53:24Z +http://www.semanlink.net/tag/semantics|prefLabel|Semantics +http://www.semanlink.net/tag/semantics|creationDate|2011-01-14 +http://www.semanlink.net/tag/semantics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantics|uri|http://www.semanlink.net/tag/semantics +http://www.semanlink.net/tag/foxconn|creationTime|2012-05-04T01:14:17Z +http://www.semanlink.net/tag/foxconn|prefLabel|Foxconn +http://www.semanlink.net/tag/foxconn|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/foxconn|broader|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/tag/foxconn|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/foxconn|creationDate|2012-05-04 +http://www.semanlink.net/tag/foxconn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foxconn|uri|http://www.semanlink.net/tag/foxconn +http://www.semanlink.net/tag/foxconn|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/foxconn|broader_prefLabel|Chine : technologie +http://www.semanlink.net/tag/foxconn|broader_prefLabel|Chine +http://www.semanlink.net/tag/foxconn|broader_altLabel|China +http://www.semanlink.net/tag/nerds|creationTime|2021-04-19T18:39:24Z +http://www.semanlink.net/tag/nerds|prefLabel|nerds +http://www.semanlink.net/tag/nerds|creationDate|2021-04-19 +http://www.semanlink.net/tag/nerds|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nerds|uri|http://www.semanlink.net/tag/nerds +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|creationTime|2020-04-09T16:06:00Z +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|prefLabel|Covid19 : incurie gouvernementale +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|broader|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|creationDate|2020-04-09 +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|uri|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|broader_prefLabel|Covid19 +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|broader_altLabel|covid-19 +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|broader_altLabel|Covid +http://www.semanlink.net/tag/covid19_conneries_gouvernementales|broader_altLabel|Coronavirus +http://www.semanlink.net/tag/semantic_blog|prefLabel|Semantic Blog +http://www.semanlink.net/tag/semantic_blog|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/semantic_blog|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_blog|uri|http://www.semanlink.net/tag/semantic_blog +http://www.semanlink.net/tag/semantic_blog|broader_prefLabel|Blog +http://www.semanlink.net/tag/semantic_blog|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_blog|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_blog|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/node_embeddings|creationTime|2018-05-10T14:33:26Z +http://www.semanlink.net/tag/node_embeddings|prefLabel|Node Embeddings +http://www.semanlink.net/tag/node_embeddings|broader|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/tag/node_embeddings|creationDate|2018-05-10 +http://www.semanlink.net/tag/node_embeddings|comment|"How do we do node embeddings? ([source](http://snap.stanford.edu/proj/embeddings-www/index.html#materials)) + +Intuition: Find embedding of nodes so that “similar” nodes in the graph have embeddings that are close together. + +1. Define an encoder (i.e., a mapping from nodes to embeddings) + - Shallow embedding (simplest encoding approach): encoder is just an embedding-lookup. Ex: [node2vec](/tag/node2vec), DeepWalk, LINE +2. Define a node similarity function, eg. nodes are similar if: + - they are connected? + - they share neighbours? + - have structural similar roles? + - ... +3. Optimize the parameters of the encoder so that similarity in the embedding space (e.g., dot product) approximates similarity in the original network + +Defining similarity: + +- Adjacency-based Similarity +- ""Multihop"" similarity (measure overlap between node neighborhoods) + +these two methods are expensive. +-> **Random-walk Embeddings** (Estimate probability of visiting node v on a random walk starting from node u using some random walk strategy, optimize embeddings to encode random walk statistics). Expressivity (incorporates both local and higher-order neighbourhood information) and efficiency (do not need to consider all pairs when training) + +Which random walk strategy? + +- fixed-length random walks starting from each node: **DeepWalk** (Perozzi et al., 2013) +- ""biased random walks"" that can trade off between local and global views of the network: **Node2Vec** (Micro-view / marco-view of neighbourhood) + +No method wins in all the cases + +> We can now view the popular ""node embedding"" methods as +well-understood extensions of classic work on dimensionality reduction [src](doc:2020/08/graph_representation_learning_b) +" +http://www.semanlink.net/tag/node_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/node_embeddings|uri|http://www.semanlink.net/tag/node_embeddings +http://www.semanlink.net/tag/node_embeddings|broader_prefLabel|Graph Embeddings +http://www.semanlink.net/tag/node_embeddings|broader_altLabel|Representation Learning on Networks +http://www.semanlink.net/tag/node_embeddings|broader_altLabel|Graph representation learning +http://www.semanlink.net/tag/node_embeddings|broader_altLabel|Network Representation Learning +http://www.semanlink.net/tag/node_embeddings|broader_altLabel|Network embeddings +http://www.semanlink.net/tag/node_embeddings|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/intelligence|creationTime|2010-07-17T15:31:54Z +http://www.semanlink.net/tag/intelligence|prefLabel|Intelligence +http://www.semanlink.net/tag/intelligence|creationDate|2010-07-17 +http://www.semanlink.net/tag/intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intelligence|uri|http://www.semanlink.net/tag/intelligence +http://www.semanlink.net/tag/distant_reading|creationTime|2013-08-20T16:32:51Z +http://www.semanlink.net/tag/distant_reading|prefLabel|Distant reading +http://www.semanlink.net/tag/distant_reading|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/distant_reading|creationDate|2013-08-20 +http://www.semanlink.net/tag/distant_reading|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/distant_reading|uri|http://www.semanlink.net/tag/distant_reading +http://www.semanlink.net/tag/distant_reading|broader_prefLabel|Livre +http://www.semanlink.net/tag/distant_reading|broader_altLabel|Livres +http://www.semanlink.net/tag/aol|prefLabel|AOL +http://www.semanlink.net/tag/aol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aol|uri|http://www.semanlink.net/tag/aol +http://www.semanlink.net/tag/roy_t_fielding|prefLabel|Roy T. Fielding +http://www.semanlink.net/tag/roy_t_fielding|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/roy_t_fielding|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/roy_t_fielding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roy_t_fielding|uri|http://www.semanlink.net/tag/roy_t_fielding +http://www.semanlink.net/tag/roy_t_fielding|broader_prefLabel|REST +http://www.semanlink.net/tag/roy_t_fielding|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/roy_t_fielding|broader_altLabel|Technical guys +http://www.semanlink.net/tag/kubernetes|creationTime|2018-10-07T12:51:15Z +http://www.semanlink.net/tag/kubernetes|prefLabel|Kubernetes +http://www.semanlink.net/tag/kubernetes|creationDate|2018-10-07 +http://www.semanlink.net/tag/kubernetes|comment|container-orchestration system for automating deployment, scaling and management of containerized applications +http://www.semanlink.net/tag/kubernetes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kubernetes|describedBy|https://en.wikipedia.org/wiki/Kubernetes +http://www.semanlink.net/tag/kubernetes|uri|http://www.semanlink.net/tag/kubernetes +http://www.semanlink.net/tag/fps_tweet|creationTime|2020-12-03T01:29:37Z +http://www.semanlink.net/tag/fps_tweet|prefLabel|fps' tweet +http://www.semanlink.net/tag/fps_tweet|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_tweet|broader|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/tag/fps_tweet|creationDate|2020-12-03 +http://www.semanlink.net/tag/fps_tweet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_tweet|uri|http://www.semanlink.net/tag/fps_tweet +http://www.semanlink.net/tag/fps_tweet|broader_prefLabel|fps +http://www.semanlink.net/tag/fps_tweet|broader_prefLabel|Tweet +http://www.semanlink.net/tag/knowledge_engineering|creationTime|2018-04-10T17:46:38Z +http://www.semanlink.net/tag/knowledge_engineering|prefLabel|Knowledge Engineering +http://www.semanlink.net/tag/knowledge_engineering|broader|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/knowledge_engineering|creationDate|2018-04-10 +http://www.semanlink.net/tag/knowledge_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_engineering|uri|http://www.semanlink.net/tag/knowledge_engineering +http://www.semanlink.net/tag/knowledge_engineering|broader_prefLabel|Knowledge +http://www.semanlink.net/tag/procrastination|creationTime|2011-01-08T00:57:16Z +http://www.semanlink.net/tag/procrastination|prefLabel|Procrastination +http://www.semanlink.net/tag/procrastination|creationDate|2011-01-08 +http://www.semanlink.net/tag/procrastination|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/procrastination|uri|http://www.semanlink.net/tag/procrastination +http://www.semanlink.net/tag/multiword_expressions|creationTime|2018-06-08T07:46:57Z +http://www.semanlink.net/tag/multiword_expressions|prefLabel|Multiword Expressions +http://www.semanlink.net/tag/multiword_expressions|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/multiword_expressions|creationDate|2018-06-08 +http://www.semanlink.net/tag/multiword_expressions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multiword_expressions|uri|http://www.semanlink.net/tag/multiword_expressions +http://www.semanlink.net/tag/multiword_expressions|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_sample_code|creationTime|2019-02-07T00:49:41Z +http://www.semanlink.net/tag/nlp_sample_code|prefLabel|NLP sample code +http://www.semanlink.net/tag/nlp_sample_code|broader|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/nlp_sample_code|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_sample_code|creationDate|2019-02-07 +http://www.semanlink.net/tag/nlp_sample_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_sample_code|uri|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/tag/nlp_sample_code|broader_prefLabel|Sample code +http://www.semanlink.net/tag/nlp_sample_code|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_sample_code|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_sample_code|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_sample_code|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/desobeissance_civile|creationTime|2019-06-18T23:10:18Z +http://www.semanlink.net/tag/desobeissance_civile|prefLabel|Désobéissance civile +http://www.semanlink.net/tag/desobeissance_civile|broader|http://www.semanlink.net/tag/esprit_de_resistance +http://www.semanlink.net/tag/desobeissance_civile|related|http://www.semanlink.net/tag/liberte_liberte_cherie +http://www.semanlink.net/tag/desobeissance_civile|creationDate|2019-06-18 +http://www.semanlink.net/tag/desobeissance_civile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/desobeissance_civile|uri|http://www.semanlink.net/tag/desobeissance_civile +http://www.semanlink.net/tag/desobeissance_civile|broader_prefLabel|Esprit de résistance +http://www.semanlink.net/tag/rosetta|prefLabel|Rosetta +http://www.semanlink.net/tag/rosetta|broader|http://www.semanlink.net/tag/esa +http://www.semanlink.net/tag/rosetta|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/rosetta|broader|http://www.semanlink.net/tag/comete +http://www.semanlink.net/tag/rosetta|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rosetta|uri|http://www.semanlink.net/tag/rosetta +http://www.semanlink.net/tag/rosetta|broader_prefLabel|esa +http://www.semanlink.net/tag/rosetta|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/rosetta|broader_prefLabel|Comète +http://www.semanlink.net/tag/pocketsphinx|creationTime|2019-05-27T19:18:42Z +http://www.semanlink.net/tag/pocketsphinx|prefLabel|PocketSphinx +http://www.semanlink.net/tag/pocketsphinx|broader|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/pocketsphinx|creationDate|2019-05-27 +http://www.semanlink.net/tag/pocketsphinx|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pocketsphinx|uri|http://www.semanlink.net/tag/pocketsphinx +http://www.semanlink.net/tag/pocketsphinx|broader_prefLabel|Speech-to-Text +http://www.semanlink.net/tag/pocketsphinx|broader_altLabel|Speech recognition +http://www.semanlink.net/tag/pocketsphinx|broader_altLabel|Voice recognition +http://www.semanlink.net/tag/pocketsphinx|broader_related|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/pocketsphinx|broader_related|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/tag/rdf|prefLabel|RDF +http://www.semanlink.net/tag/rdf|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/rdf|broader|http://www.semanlink.net/tag/data_interchange_format +http://www.semanlink.net/tag/rdf|related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf|related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf|related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf|related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf|related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf|homepage|http://www.w3.org/RDF/ +http://www.semanlink.net/tag/rdf|uri|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/rdf|broader_prefLabel|Data Interchange Format +http://www.semanlink.net/tag/rdf|broader_altLabel|sw +http://www.semanlink.net/tag/rdf|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/metagenomics|creationTime|2016-01-19T15:54:58Z +http://www.semanlink.net/tag/metagenomics|prefLabel|Metagenomics +http://www.semanlink.net/tag/metagenomics|broader|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/metagenomics|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/metagenomics|creationDate|2016-01-19 +http://www.semanlink.net/tag/metagenomics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metagenomics|describedBy|https://en.wikipedia.org/wiki/Metagenomics +http://www.semanlink.net/tag/metagenomics|uri|http://www.semanlink.net/tag/metagenomics +http://www.semanlink.net/tag/metagenomics|broader_prefLabel|Biodiversité +http://www.semanlink.net/tag/metagenomics|broader_prefLabel|Genetics +http://www.semanlink.net/tag/metagenomics|broader_prefLabel|Génétique +http://www.semanlink.net/tag/metagenomics|broader_altLabel|Biodiversity +http://www.semanlink.net/tag/linked_data_exploration|creationTime|2013-09-07T09:43:05Z +http://www.semanlink.net/tag/linked_data_exploration|prefLabel|Linked Data Exploration +http://www.semanlink.net/tag/linked_data_exploration|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_exploration|creationDate|2013-09-07 +http://www.semanlink.net/tag/linked_data_exploration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_exploration|uri|http://www.semanlink.net/tag/linked_data_exploration +http://www.semanlink.net/tag/linked_data_exploration|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_exploration|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_exploration|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/afghanistan|prefLabel|Afghanistan +http://www.semanlink.net/tag/afghanistan|broader|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/afghanistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afghanistan|uri|http://www.semanlink.net/tag/afghanistan +http://www.semanlink.net/tag/afghanistan|broader_prefLabel|Asie centrale +http://www.semanlink.net/tag/voice_ai|creationTime|2018-11-01T21:20:02Z +http://www.semanlink.net/tag/voice_ai|prefLabel|Voice AI +http://www.semanlink.net/tag/voice_ai|related|http://www.semanlink.net/tag/siri +http://www.semanlink.net/tag/voice_ai|creationDate|2018-11-01 +http://www.semanlink.net/tag/voice_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voice_ai|uri|http://www.semanlink.net/tag/voice_ai +http://www.semanlink.net/tag/knowledge_discovery|creationTime|2018-04-10T17:43:55Z +http://www.semanlink.net/tag/knowledge_discovery|prefLabel|Knowledge Discovery +http://www.semanlink.net/tag/knowledge_discovery|broader|http://www.semanlink.net/tag/knowledge_engineering +http://www.semanlink.net/tag/knowledge_discovery|creationDate|2018-04-10 +http://www.semanlink.net/tag/knowledge_discovery|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_discovery|uri|http://www.semanlink.net/tag/knowledge_discovery +http://www.semanlink.net/tag/knowledge_discovery|broader_prefLabel|Knowledge Engineering +http://www.semanlink.net/tag/mouchard|prefLabel|Mouchard +http://www.semanlink.net/tag/mouchard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mouchard|uri|http://www.semanlink.net/tag/mouchard +http://www.semanlink.net/tag/nlp_4_semanlink|creationTime|2020-07-28T01:40:47Z +http://www.semanlink.net/tag/nlp_4_semanlink|prefLabel|NLP 4 Semanlink +http://www.semanlink.net/tag/nlp_4_semanlink|broader|http://www.semanlink.net/tag/semanlink2 +http://www.semanlink.net/tag/nlp_4_semanlink|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/nlp_4_semanlink|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_4_semanlink|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/nlp_4_semanlink|related|http://www.semanlink.net/tag/personal_knowledge_graph +http://www.semanlink.net/tag/nlp_4_semanlink|creationDate|2020-07-28 +http://www.semanlink.net/tag/nlp_4_semanlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_4_semanlink|uri|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/nlp_4_semanlink|broader_prefLabel|Semanlink2 +http://www.semanlink.net/tag/nlp_4_semanlink|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/nlp_4_semanlink|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_4_semanlink|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/nlp_4_semanlink|broader_altLabel|SL +http://www.semanlink.net/tag/nlp_4_semanlink|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_4_semanlink|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_4_semanlink|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/nlp_4_semanlink|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/jose_moreno|creationTime|2018-06-08T17:52:27Z +http://www.semanlink.net/tag/jose_moreno|prefLabel|José Moreno +http://www.semanlink.net/tag/jose_moreno|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/jose_moreno|related|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/jose_moreno|creationDate|2018-06-08 +http://www.semanlink.net/tag/jose_moreno|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jose_moreno|uri|http://www.semanlink.net/tag/jose_moreno +http://www.semanlink.net/tag/jose_moreno|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/destination_prediction|creationTime|2013-02-28T13:58:48Z +http://www.semanlink.net/tag/destination_prediction|prefLabel|Destination prediction +http://www.semanlink.net/tag/destination_prediction|creationDate|2013-02-28 +http://www.semanlink.net/tag/destination_prediction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/destination_prediction|uri|http://www.semanlink.net/tag/destination_prediction +http://www.semanlink.net/tag/quantum_biology|creationTime|2013-12-27T14:30:52Z +http://www.semanlink.net/tag/quantum_biology|prefLabel|Quantum biology +http://www.semanlink.net/tag/quantum_biology|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/quantum_biology|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/quantum_biology|creationDate|2013-12-27 +http://www.semanlink.net/tag/quantum_biology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quantum_biology|uri|http://www.semanlink.net/tag/quantum_biology +http://www.semanlink.net/tag/quantum_biology|broader_prefLabel|Biology +http://www.semanlink.net/tag/quantum_biology|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/quantum_biology|broader_altLabel|Biologie +http://www.semanlink.net/tag/astrophysique|prefLabel|Astrophysique +http://www.semanlink.net/tag/astrophysique|broader|http://www.semanlink.net/tag/cosmologie +http://www.semanlink.net/tag/astrophysique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/astrophysique|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/astrophysique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/astrophysique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/astrophysique|uri|http://www.semanlink.net/tag/astrophysique +http://www.semanlink.net/tag/astrophysique|broader_prefLabel|Cosmologie +http://www.semanlink.net/tag/astrophysique|broader_prefLabel|Science +http://www.semanlink.net/tag/astrophysique|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/astrophysique|broader_prefLabel|Physique +http://www.semanlink.net/tag/astrophysique|broader_altLabel|sciences +http://www.semanlink.net/tag/astrophysique|broader_altLabel|Physics +http://www.semanlink.net/tag/drogues|prefLabel|Drogues +http://www.semanlink.net/tag/drogues|creationDate|2006-11-13 +http://www.semanlink.net/tag/drogues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drogues|uri|http://www.semanlink.net/tag/drogues +http://www.semanlink.net/tag/francophonie|prefLabel|Francophonie +http://www.semanlink.net/tag/francophonie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francophonie|uri|http://www.semanlink.net/tag/francophonie +http://www.semanlink.net/tag/equitation|creationTime|2012-08-05T00:18:44Z +http://www.semanlink.net/tag/equitation|prefLabel|Equitation +http://www.semanlink.net/tag/equitation|broader|http://www.semanlink.net/tag/cheval +http://www.semanlink.net/tag/equitation|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/equitation|creationDate|2012-08-05 +http://www.semanlink.net/tag/equitation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/equitation|uri|http://www.semanlink.net/tag/equitation +http://www.semanlink.net/tag/equitation|broader_prefLabel|Cheval +http://www.semanlink.net/tag/equitation|broader_prefLabel|Sport +http://www.semanlink.net/tag/ontologies_use_cases|creationTime|2020-03-18T12:06:14Z +http://www.semanlink.net/tag/ontologies_use_cases|prefLabel|Ontologies: use cases +http://www.semanlink.net/tag/ontologies_use_cases|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/ontologies_use_cases|creationDate|2020-03-18 +http://www.semanlink.net/tag/ontologies_use_cases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontologies_use_cases|uri|http://www.semanlink.net/tag/ontologies_use_cases +http://www.semanlink.net/tag/ontologies_use_cases|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/ontologies_use_cases|broader_altLabel|Ontology +http://www.semanlink.net/tag/rdf_repository|creationTime|2007-04-20T20:50:29Z +http://www.semanlink.net/tag/rdf_repository|prefLabel|RDF repository +http://www.semanlink.net/tag/rdf_repository|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_repository|creationDate|2007-04-20 +http://www.semanlink.net/tag/rdf_repository|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_repository|uri|http://www.semanlink.net/tag/rdf_repository +http://www.semanlink.net/tag/rdf_repository|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_repository|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_repository|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_repository|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_repository|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_repository|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/word_embedding_evaluation|creationTime|2017-07-20T15:44:03Z +http://www.semanlink.net/tag/word_embedding_evaluation|prefLabel|Word embedding: evaluation +http://www.semanlink.net/tag/word_embedding_evaluation|broader|http://www.semanlink.net/tag/embedding_evaluation +http://www.semanlink.net/tag/word_embedding_evaluation|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word_embedding_evaluation|creationDate|2017-07-20 +http://www.semanlink.net/tag/word_embedding_evaluation|comment|Proposed in a paper at EMLNP 2018: use Odd-Man-Out puzzles +http://www.semanlink.net/tag/word_embedding_evaluation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_embedding_evaluation|uri|http://www.semanlink.net/tag/word_embedding_evaluation +http://www.semanlink.net/tag/word_embedding_evaluation|broader_prefLabel|Embedding evaluation +http://www.semanlink.net/tag/word_embedding_evaluation|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/word_embedding_evaluation|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/word_embedding_evaluation|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/word_embedding_evaluation|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/word_embedding_evaluation|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/word_embedding_evaluation|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/basic|creationTime|2011-07-01T14:38:22Z +http://www.semanlink.net/tag/basic|prefLabel|Basic +http://www.semanlink.net/tag/basic|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/basic|creationDate|2011-07-01 +http://www.semanlink.net/tag/basic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/basic|uri|http://www.semanlink.net/tag/basic +http://www.semanlink.net/tag/basic|broader_prefLabel|Programming language +http://www.semanlink.net/tag/basic|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/reproducible_research|creationTime|2019-06-11T11:39:07Z +http://www.semanlink.net/tag/reproducible_research|prefLabel|Reproducible Research +http://www.semanlink.net/tag/reproducible_research|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/reproducible_research|creationDate|2019-06-11 +http://www.semanlink.net/tag/reproducible_research|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reproducible_research|uri|http://www.semanlink.net/tag/reproducible_research +http://www.semanlink.net/tag/reproducible_research|broader_prefLabel|Recherche +http://www.semanlink.net/tag/reproducible_research|broader_altLabel|Research +http://www.semanlink.net/tag/ramanujan|creationTime|2016-04-10T18:42:06Z +http://www.semanlink.net/tag/ramanujan|prefLabel|Ramanujan +http://www.semanlink.net/tag/ramanujan|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/ramanujan|creationDate|2016-04-10 +http://www.semanlink.net/tag/ramanujan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ramanujan|describedBy|https://en.wikipedia.org/wiki/Srinivasa_Ramanujan +http://www.semanlink.net/tag/ramanujan|uri|http://www.semanlink.net/tag/ramanujan +http://www.semanlink.net/tag/ramanujan|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/paolo_castagna|creationTime|2012-05-04T00:42:19Z +http://www.semanlink.net/tag/paolo_castagna|prefLabel|Paolo Castagna +http://www.semanlink.net/tag/paolo_castagna|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/paolo_castagna|creationDate|2012-05-04 +http://www.semanlink.net/tag/paolo_castagna|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paolo_castagna|uri|http://www.semanlink.net/tag/paolo_castagna +http://www.semanlink.net/tag/paolo_castagna|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/w3c_submission|creationTime|2007-09-19T01:06:10Z +http://www.semanlink.net/tag/w3c_submission|prefLabel|W3C Submission +http://www.semanlink.net/tag/w3c_submission|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_submission|creationDate|2007-09-19 +http://www.semanlink.net/tag/w3c_submission|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_submission|uri|http://www.semanlink.net/tag/w3c_submission +http://www.semanlink.net/tag/w3c_submission|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_submission|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_submission|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/commonsense_question_answering|creationTime|2021-02-08T13:50:45Z +http://www.semanlink.net/tag/commonsense_question_answering|prefLabel|Commonsense Question Answering +http://www.semanlink.net/tag/commonsense_question_answering|broader|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/commonsense_question_answering|creationDate|2021-02-08 +http://www.semanlink.net/tag/commonsense_question_answering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/commonsense_question_answering|uri|http://www.semanlink.net/tag/commonsense_question_answering +http://www.semanlink.net/tag/commonsense_question_answering|broader_prefLabel|Question Answering +http://www.semanlink.net/tag/commonsense_question_answering|broader_altLabel|QA +http://www.semanlink.net/tag/commonsense_question_answering|broader_related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/commerce_mondial|prefLabel|Commerce mondial +http://www.semanlink.net/tag/commerce_mondial|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/commerce_mondial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/commerce_mondial|uri|http://www.semanlink.net/tag/commerce_mondial +http://www.semanlink.net/tag/commerce_mondial|broader_prefLabel|Economie +http://www.semanlink.net/tag/commerce_mondial|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/embedding_evaluation|creationTime|2018-05-07T16:13:14Z +http://www.semanlink.net/tag/embedding_evaluation|prefLabel|Embedding evaluation +http://www.semanlink.net/tag/embedding_evaluation|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/embedding_evaluation|broader|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/tag/embedding_evaluation|creationDate|2018-05-07 +http://www.semanlink.net/tag/embedding_evaluation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/embedding_evaluation|uri|http://www.semanlink.net/tag/embedding_evaluation +http://www.semanlink.net/tag/embedding_evaluation|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/embedding_evaluation|broader_prefLabel|ML: evaluation +http://www.semanlink.net/tag/embedding_evaluation|broader_altLabel|embedding +http://www.semanlink.net/tag/embedding_evaluation|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/embedding_evaluation|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/linguistique|prefLabel|Linguistique +http://www.semanlink.net/tag/linguistique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/linguistique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linguistique|uri|http://www.semanlink.net/tag/linguistique +http://www.semanlink.net/tag/linguistique|broader_prefLabel|Science +http://www.semanlink.net/tag/linguistique|broader_altLabel|sciences +http://www.semanlink.net/tag/struts|prefLabel|Struts +http://www.semanlink.net/tag/struts|broader|http://www.semanlink.net/tag/jsp +http://www.semanlink.net/tag/struts|broader|http://www.semanlink.net/tag/mvc +http://www.semanlink.net/tag/struts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/struts|uri|http://www.semanlink.net/tag/struts +http://www.semanlink.net/tag/struts|broader_prefLabel|JSP +http://www.semanlink.net/tag/struts|broader_prefLabel|MVC +http://www.semanlink.net/tag/junit|prefLabel|JUnit +http://www.semanlink.net/tag/junit|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/junit|broader|http://www.semanlink.net/tag/unit_test +http://www.semanlink.net/tag/junit|creationDate|2006-07-22 +http://www.semanlink.net/tag/junit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/junit|describedBy|http://www.junit.org/ +http://www.semanlink.net/tag/junit|uri|http://www.semanlink.net/tag/junit +http://www.semanlink.net/tag/junit|broader_prefLabel|Java dev +http://www.semanlink.net/tag/junit|broader_prefLabel|Unit test +http://www.semanlink.net/tag/magnetisme_terrestre|creationTime|2014-10-05T23:55:32Z +http://www.semanlink.net/tag/magnetisme_terrestre|prefLabel|Magnétisme terrestre +http://www.semanlink.net/tag/magnetisme_terrestre|broader|http://www.semanlink.net/tag/magnetisme +http://www.semanlink.net/tag/magnetisme_terrestre|creationDate|2014-10-05 +http://www.semanlink.net/tag/magnetisme_terrestre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/magnetisme_terrestre|uri|http://www.semanlink.net/tag/magnetisme_terrestre +http://www.semanlink.net/tag/magnetisme_terrestre|broader_prefLabel|Magnétisme +http://www.semanlink.net/tag/tesla_inc|creationTime|2018-03-07T23:29:37Z +http://www.semanlink.net/tag/tesla_inc|prefLabel|Tesla, Inc +http://www.semanlink.net/tag/tesla_inc|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/tesla_inc|related|http://www.semanlink.net/tag/andrej_karpathy +http://www.semanlink.net/tag/tesla_inc|creationDate|2018-03-07 +http://www.semanlink.net/tag/tesla_inc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tesla_inc|describedBy|https://en.wikipedia.org/wiki/Tesla,_Inc. +http://www.semanlink.net/tag/tesla_inc|altLabel|Tesla +http://www.semanlink.net/tag/tesla_inc|uri|http://www.semanlink.net/tag/tesla_inc +http://www.semanlink.net/tag/tesla_inc|broader_prefLabel|Automobile +http://www.semanlink.net/tag/tesla_inc|broader_altLabel|Automotive +http://www.semanlink.net/tag/film_japonais|creationTime|2007-03-03T00:34:00Z +http://www.semanlink.net/tag/film_japonais|prefLabel|Film japonais +http://www.semanlink.net/tag/film_japonais|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_japonais|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/film_japonais|creationDate|2007-03-03 +http://www.semanlink.net/tag/film_japonais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_japonais|uri|http://www.semanlink.net/tag/film_japonais +http://www.semanlink.net/tag/film_japonais|broader_prefLabel|Film +http://www.semanlink.net/tag/film_japonais|broader_prefLabel|Japon +http://www.semanlink.net/tag/film_japonais|broader_altLabel|Japan +http://www.semanlink.net/tag/rumba|creationTime|2007-04-18T13:23:06Z +http://www.semanlink.net/tag/rumba|prefLabel|Rumba +http://www.semanlink.net/tag/rumba|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/rumba|creationDate|2007-04-18 +http://www.semanlink.net/tag/rumba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rumba|uri|http://www.semanlink.net/tag/rumba +http://www.semanlink.net/tag/rumba|broader_prefLabel|Musique +http://www.semanlink.net/tag/rumba|broader_altLabel|Music +http://www.semanlink.net/tag/code|prefLabel|Code +http://www.semanlink.net/tag/code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/code|uri|http://www.semanlink.net/tag/code +http://www.semanlink.net/tag/histropedia|creationTime|2017-11-01T13:57:35Z +http://www.semanlink.net/tag/histropedia|prefLabel|Histropedia +http://www.semanlink.net/tag/histropedia|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/histropedia|broader|http://www.semanlink.net/tag/timeline +http://www.semanlink.net/tag/histropedia|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histropedia|related|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/histropedia|creationDate|2017-11-01 +http://www.semanlink.net/tag/histropedia|comment|> Transforming Wikipedia and Wikidata into the world's first timeline of everything +http://www.semanlink.net/tag/histropedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histropedia|homepage|http://www.histropedia.com/ +http://www.semanlink.net/tag/histropedia|uri|http://www.semanlink.net/tag/histropedia +http://www.semanlink.net/tag/histropedia|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/histropedia|broader_prefLabel|Timeline +http://www.semanlink.net/tag/histropedia|broader_prefLabel|Histoire +http://www.semanlink.net/tag/histropedia|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/semantic_web_services|prefLabel|Semantic Web Services +http://www.semanlink.net/tag/semantic_web_services|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/semantic_web_services|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_services|uri|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/semantic_web_services|broader_prefLabel|Web Services +http://www.semanlink.net/tag/semantic_web_services|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_services|broader_altLabel|WS +http://www.semanlink.net/tag/semantic_web_services|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_services|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/private_equity|creationTime|2012-04-26T00:47:05Z +http://www.semanlink.net/tag/private_equity|prefLabel|Private equity +http://www.semanlink.net/tag/private_equity|broader|http://www.semanlink.net/tag/capitalisme_financier +http://www.semanlink.net/tag/private_equity|creationDate|2012-04-26 +http://www.semanlink.net/tag/private_equity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/private_equity|describedBy|https://en.wikipedia.org/wiki/Private_equity +http://www.semanlink.net/tag/private_equity|uri|http://www.semanlink.net/tag/private_equity +http://www.semanlink.net/tag/private_equity|broader_prefLabel|Capitalisme financier +http://www.semanlink.net/tag/fascisme|prefLabel|Fascisme +http://www.semanlink.net/tag/fascisme|broader|http://www.semanlink.net/tag/extreme_droite +http://www.semanlink.net/tag/fascisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fascisme|uri|http://www.semanlink.net/tag/fascisme +http://www.semanlink.net/tag/fascisme|broader_prefLabel|Extrème droite +http://www.semanlink.net/tag/java_web_dev|creationTime|2017-05-16T02:08:47Z +http://www.semanlink.net/tag/java_web_dev|prefLabel|Java web dev +http://www.semanlink.net/tag/java_web_dev|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/java_web_dev|creationDate|2017-05-16 +http://www.semanlink.net/tag/java_web_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_web_dev|uri|http://www.semanlink.net/tag/java_web_dev +http://www.semanlink.net/tag/java_web_dev|broader_prefLabel|Java dev +http://www.semanlink.net/tag/photo_aerienne|creationTime|2013-09-14T00:30:34Z +http://www.semanlink.net/tag/photo_aerienne|prefLabel|Photo aérienne +http://www.semanlink.net/tag/photo_aerienne|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/photo_aerienne|creationDate|2013-09-14 +http://www.semanlink.net/tag/photo_aerienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photo_aerienne|uri|http://www.semanlink.net/tag/photo_aerienne +http://www.semanlink.net/tag/photo_aerienne|broader_prefLabel|Photo +http://www.semanlink.net/tag/photo_aerienne|broader_altLabel|Images +http://www.semanlink.net/tag/prix_nobel|prefLabel|Prix Nobel +http://www.semanlink.net/tag/prix_nobel|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/prix_nobel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prix_nobel|uri|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/prix_nobel|broader_prefLabel|Science +http://www.semanlink.net/tag/prix_nobel|broader_altLabel|sciences +http://www.semanlink.net/tag/patrick_gallinari|creationTime|2019-01-27T14:17:46Z +http://www.semanlink.net/tag/patrick_gallinari|prefLabel|Patrick Gallinari +http://www.semanlink.net/tag/patrick_gallinari|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/patrick_gallinari|creationDate|2019-01-27 +http://www.semanlink.net/tag/patrick_gallinari|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patrick_gallinari|uri|http://www.semanlink.net/tag/patrick_gallinari +http://www.semanlink.net/tag/patrick_gallinari|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/infini|creationTime|2013-08-07T00:44:24Z +http://www.semanlink.net/tag/infini|prefLabel|Infini +http://www.semanlink.net/tag/infini|creationDate|2013-08-07 +http://www.semanlink.net/tag/infini|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/infini|uri|http://www.semanlink.net/tag/infini +http://www.semanlink.net/tag/topic_models_word_embedding|creationTime|2017-05-20T14:48:44Z +http://www.semanlink.net/tag/topic_models_word_embedding|prefLabel|Topic Models + Word embedding +http://www.semanlink.net/tag/topic_models_word_embedding|broader|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/topic_models_word_embedding|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/topic_models_word_embedding|creationDate|2017-05-20 +http://www.semanlink.net/tag/topic_models_word_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topic_models_word_embedding|uri|http://www.semanlink.net/tag/topic_models_word_embedding +http://www.semanlink.net/tag/topic_models_word_embedding|broader_prefLabel|Topic Modeling +http://www.semanlink.net/tag/topic_models_word_embedding|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/topic_models_word_embedding|broader_altLabel|Topic model +http://www.semanlink.net/tag/topic_models_word_embedding|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/topic_models_word_embedding|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/topic_models_word_embedding|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/ecrevisse|creationTime|2018-02-06T11:48:28Z +http://www.semanlink.net/tag/ecrevisse|prefLabel|Écrevisse +http://www.semanlink.net/tag/ecrevisse|creationDate|2018-02-06 +http://www.semanlink.net/tag/ecrevisse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecrevisse|describedBy|https://fr.wikipedia.org/wiki/%C3%89crevisse +http://www.semanlink.net/tag/ecrevisse|altLabel|Crayfish +http://www.semanlink.net/tag/ecrevisse|uri|http://www.semanlink.net/tag/ecrevisse +http://www.semanlink.net/tag/joseki|creationTime|2008-01-04T01:40:30Z +http://www.semanlink.net/tag/joseki|prefLabel|Joseki +http://www.semanlink.net/tag/joseki|broader|http://www.semanlink.net/tag/sparql_and_jena +http://www.semanlink.net/tag/joseki|related|http://www.semanlink.net/tag/arq +http://www.semanlink.net/tag/joseki|creationDate|2008-01-04 +http://www.semanlink.net/tag/joseki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/joseki|homepage|http://www.joseki.org/ +http://www.semanlink.net/tag/joseki|uri|http://www.semanlink.net/tag/joseki +http://www.semanlink.net/tag/joseki|broader_prefLabel|SPARQL AND Jena +http://www.semanlink.net/tag/joseki|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/feature_selection|creationTime|2015-10-21T17:06:46Z +http://www.semanlink.net/tag/feature_selection|prefLabel|Feature selection +http://www.semanlink.net/tag/feature_selection|broader|http://www.semanlink.net/tag/features_machine_learning +http://www.semanlink.net/tag/feature_selection|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/feature_selection|related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/feature_selection|related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/feature_selection|creationDate|2015-10-21 +http://www.semanlink.net/tag/feature_selection|comment|process of selecting a subset of relevant features for use in model construction +http://www.semanlink.net/tag/feature_selection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feature_selection|describedBy|https://en.wikipedia.org/wiki/Feature_selection +http://www.semanlink.net/tag/feature_selection|uri|http://www.semanlink.net/tag/feature_selection +http://www.semanlink.net/tag/feature_selection|broader_prefLabel|Features (Machine Learning) +http://www.semanlink.net/tag/feature_selection|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/virtuoso_open_source_edition|creationTime|2008-08-26T11:54:06Z +http://www.semanlink.net/tag/virtuoso_open_source_edition|prefLabel|Virtuoso Open-Source Edition +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/virtuoso_open_source_edition|creationDate|2008-08-26 +http://www.semanlink.net/tag/virtuoso_open_source_edition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtuoso_open_source_edition|uri|http://www.semanlink.net/tag/virtuoso_open_source_edition +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_prefLabel|OpenLink Software +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_prefLabel|Virtuoso +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_prefLabel|Open Source +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/virtuoso_open_source_edition|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/robobees|creationTime|2014-04-29T01:30:21Z +http://www.semanlink.net/tag/robobees|prefLabel|Robobees +http://www.semanlink.net/tag/robobees|related|http://www.semanlink.net/tag/abeille +http://www.semanlink.net/tag/robobees|creationDate|2014-04-29 +http://www.semanlink.net/tag/robobees|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robobees|uri|http://www.semanlink.net/tag/robobees +http://www.semanlink.net/tag/rdfa_1_1_lite|creationTime|2011-10-23T17:54:46Z +http://www.semanlink.net/tag/rdfa_1_1_lite|prefLabel|RDFa 1.1 Lite +http://www.semanlink.net/tag/rdfa_1_1_lite|broader|http://www.semanlink.net/tag/rdfa_1_1 +http://www.semanlink.net/tag/rdfa_1_1_lite|broader|http://www.semanlink.net/tag/rdfa_lite +http://www.semanlink.net/tag/rdfa_1_1_lite|related|http://www.semanlink.net/tag/ben_adida +http://www.semanlink.net/tag/rdfa_1_1_lite|creationDate|2011-10-23 +http://www.semanlink.net/tag/rdfa_1_1_lite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa_1_1_lite|homepage|http://www.w3.org/TR/rdfa-lite/ +http://www.semanlink.net/tag/rdfa_1_1_lite|uri|http://www.semanlink.net/tag/rdfa_1_1_lite +http://www.semanlink.net/tag/rdfa_1_1_lite|broader_prefLabel|RDFa 1.1 +http://www.semanlink.net/tag/rdfa_1_1_lite|broader_prefLabel|RDFa Lite +http://www.semanlink.net/tag/gina_lollobrigida|prefLabel|Gina Lollobrigida +http://www.semanlink.net/tag/gina_lollobrigida|broader|http://www.semanlink.net/tag/actrice +http://www.semanlink.net/tag/gina_lollobrigida|creationDate|2006-12-19 +http://www.semanlink.net/tag/gina_lollobrigida|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gina_lollobrigida|uri|http://www.semanlink.net/tag/gina_lollobrigida +http://www.semanlink.net/tag/gina_lollobrigida|broader_prefLabel|Actrice +http://www.semanlink.net/tag/word_mover_s_distance|creationTime|2017-11-12T02:53:02Z +http://www.semanlink.net/tag/word_mover_s_distance|prefLabel|Word Mover’s Distance +http://www.semanlink.net/tag/word_mover_s_distance|broader|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/word_mover_s_distance|broader|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/tag/word_mover_s_distance|creationDate|2017-11-12 +http://www.semanlink.net/tag/word_mover_s_distance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_mover_s_distance|uri|http://www.semanlink.net/tag/word_mover_s_distance +http://www.semanlink.net/tag/word_mover_s_distance|broader_prefLabel|Text Similarity +http://www.semanlink.net/tag/word_mover_s_distance|broader_prefLabel|Using word embeddings +http://www.semanlink.net/tag/word_mover_s_distance|broader_related|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/word_mover_s_distance|broader_related|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/word_mover_s_distance|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/lee_feigenbaum|creationTime|2007-05-18T21:53:44Z +http://www.semanlink.net/tag/lee_feigenbaum|prefLabel|Lee Feigenbaum +http://www.semanlink.net/tag/lee_feigenbaum|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/lee_feigenbaum|related|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/lee_feigenbaum|related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/lee_feigenbaum|creationDate|2007-05-18 +http://www.semanlink.net/tag/lee_feigenbaum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lee_feigenbaum|altLabel|TechnicaLee Speaking +http://www.semanlink.net/tag/lee_feigenbaum|uri|http://www.semanlink.net/tag/lee_feigenbaum +http://www.semanlink.net/tag/lee_feigenbaum|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/content_sharing|prefLabel|Content Sharing +http://www.semanlink.net/tag/content_sharing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/content_sharing|uri|http://www.semanlink.net/tag/content_sharing +http://www.semanlink.net/tag/big_data_semantic_web|creationTime|2013-08-08T23:59:33Z +http://www.semanlink.net/tag/big_data_semantic_web|prefLabel|Big data & semantic web +http://www.semanlink.net/tag/big_data_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/big_data_semantic_web|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/big_data_semantic_web|creationDate|2013-08-08 +http://www.semanlink.net/tag/big_data_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/big_data_semantic_web|uri|http://www.semanlink.net/tag/big_data_semantic_web +http://www.semanlink.net/tag/big_data_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/big_data_semantic_web|broader_prefLabel|Big Data +http://www.semanlink.net/tag/big_data_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/big_data_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/big_data_semantic_web|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/nsa|creationTime|2011-02-04T17:08:38Z +http://www.semanlink.net/tag/nsa|prefLabel|NSA +http://www.semanlink.net/tag/nsa|broader|http://www.semanlink.net/tag/services_secrets +http://www.semanlink.net/tag/nsa|broader|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/nsa|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/nsa|creationDate|2011-02-04 +http://www.semanlink.net/tag/nsa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nsa|uri|http://www.semanlink.net/tag/nsa +http://www.semanlink.net/tag/nsa|broader_prefLabel|Services secrets +http://www.semanlink.net/tag/nsa|broader_prefLabel|Cybersurveillance +http://www.semanlink.net/tag/nsa|broader_prefLabel|USA +http://www.semanlink.net/tag/nsa|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/nsa|broader_altLabel|United States +http://www.semanlink.net/tag/flaubert|creationTime|2008-07-04T22:40:04Z +http://www.semanlink.net/tag/flaubert|prefLabel|Flaubert +http://www.semanlink.net/tag/flaubert|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/flaubert|creationDate|2008-07-04 +http://www.semanlink.net/tag/flaubert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flaubert|uri|http://www.semanlink.net/tag/flaubert +http://www.semanlink.net/tag/flaubert|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/oleoduc|prefLabel|Oléoduc +http://www.semanlink.net/tag/oleoduc|broader|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/oleoduc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oleoduc|altLabel|Pipeline +http://www.semanlink.net/tag/oleoduc|uri|http://www.semanlink.net/tag/oleoduc +http://www.semanlink.net/tag/oleoduc|broader_prefLabel|Pétrole +http://www.semanlink.net/tag/yoshua_bengio|creationTime|2017-08-20T23:41:00Z +http://www.semanlink.net/tag/yoshua_bengio|prefLabel|Yoshua Bengio +http://www.semanlink.net/tag/yoshua_bengio|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/yoshua_bengio|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/yoshua_bengio|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/yoshua_bengio|creationDate|2017-08-20 +http://www.semanlink.net/tag/yoshua_bengio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yoshua_bengio|homepage|http://www.iro.umontreal.ca/~bengioy/yoshua_en/index.html +http://www.semanlink.net/tag/yoshua_bengio|describedBy|https://fr.wikipedia.org/wiki/Yoshua_Bengio +http://www.semanlink.net/tag/yoshua_bengio|weblog|https://yoshuabengio.org/ +http://www.semanlink.net/tag/yoshua_bengio|uri|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/yoshua_bengio|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/yoshua_bengio|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/chute_de_l_empire_romain|creationTime|2021-08-18T10:56:23Z +http://www.semanlink.net/tag/chute_de_l_empire_romain|prefLabel|Chute de l'empire romain +http://www.semanlink.net/tag/chute_de_l_empire_romain|broader|http://www.semanlink.net/tag/empire_romain +http://www.semanlink.net/tag/chute_de_l_empire_romain|related|http://www.semanlink.net/tag/grandes_invasions +http://www.semanlink.net/tag/chute_de_l_empire_romain|creationDate|2021-08-18 +http://www.semanlink.net/tag/chute_de_l_empire_romain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chute_de_l_empire_romain|uri|http://www.semanlink.net/tag/chute_de_l_empire_romain +http://www.semanlink.net/tag/chute_de_l_empire_romain|broader_prefLabel|Empire romain +http://www.semanlink.net/tag/nlp_text_classification|creationTime|2014-03-15T17:42:01Z +http://www.semanlink.net/tag/nlp_text_classification|prefLabel|Text Classification +http://www.semanlink.net/tag/nlp_text_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/nlp_text_classification|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_text_classification|creationDate|2014-03-15 +http://www.semanlink.net/tag/nlp_text_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_text_classification|uri|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/nlp_text_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/nlp_text_classification|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/elevage|creationTime|2013-02-14T08:44:55Z +http://www.semanlink.net/tag/elevage|prefLabel|Elevage +http://www.semanlink.net/tag/elevage|creationDate|2013-02-14 +http://www.semanlink.net/tag/elevage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elevage|uri|http://www.semanlink.net/tag/elevage +http://www.semanlink.net/tag/subtitles|creationTime|2007-09-15T23:06:59Z +http://www.semanlink.net/tag/subtitles|prefLabel|Subtitles +http://www.semanlink.net/tag/subtitles|broader|http://www.semanlink.net/tag/digital_video +http://www.semanlink.net/tag/subtitles|creationDate|2007-09-15 +http://www.semanlink.net/tag/subtitles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/subtitles|uri|http://www.semanlink.net/tag/subtitles +http://www.semanlink.net/tag/subtitles|broader_prefLabel|Digital Video +http://www.semanlink.net/tag/google_spreadsheets|creationTime|2008-10-18T10:26:23Z +http://www.semanlink.net/tag/google_spreadsheets|prefLabel|Google Spreadsheets +http://www.semanlink.net/tag/google_spreadsheets|broader|http://www.semanlink.net/tag/spreadsheets +http://www.semanlink.net/tag/google_spreadsheets|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_spreadsheets|creationDate|2008-10-18 +http://www.semanlink.net/tag/google_spreadsheets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_spreadsheets|uri|http://www.semanlink.net/tag/google_spreadsheets +http://www.semanlink.net/tag/google_spreadsheets|broader_prefLabel|Spreadsheets +http://www.semanlink.net/tag/google_spreadsheets|broader_prefLabel|Google +http://www.semanlink.net/tag/google_spreadsheets|broader_altLabel|Spreadsheet +http://www.semanlink.net/tag/google_spreadsheets|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/langues_vivantes|prefLabel|Langues vivantes +http://www.semanlink.net/tag/langues_vivantes|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/langues_vivantes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langues_vivantes|uri|http://www.semanlink.net/tag/langues_vivantes +http://www.semanlink.net/tag/langues_vivantes|broader_prefLabel|Langues +http://www.semanlink.net/tag/bigtable|creationTime|2013-02-18T11:21:48Z +http://www.semanlink.net/tag/bigtable|prefLabel|Bigtable +http://www.semanlink.net/tag/bigtable|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/bigtable|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/bigtable|creationDate|2013-02-18 +http://www.semanlink.net/tag/bigtable|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bigtable|uri|http://www.semanlink.net/tag/bigtable +http://www.semanlink.net/tag/bigtable|broader_prefLabel|Google +http://www.semanlink.net/tag/bigtable|broader_prefLabel|Big Data +http://www.semanlink.net/tag/bigtable|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/bigtable|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/adolescents|prefLabel|Adolescents +http://www.semanlink.net/tag/adolescents|broader|http://www.semanlink.net/tag/jeunesse +http://www.semanlink.net/tag/adolescents|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/adolescents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/adolescents|uri|http://www.semanlink.net/tag/adolescents +http://www.semanlink.net/tag/adolescents|broader_prefLabel|Jeunesse +http://www.semanlink.net/tag/adolescents|broader_prefLabel|Société +http://www.semanlink.net/tag/liban|prefLabel|Liban +http://www.semanlink.net/tag/liban|creationDate|2006-09-22 +http://www.semanlink.net/tag/liban|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liban|uri|http://www.semanlink.net/tag/liban +http://www.semanlink.net/tag/java_8_lambdas|creationTime|2016-11-17T02:38:10Z +http://www.semanlink.net/tag/java_8_lambdas|prefLabel|Java 8 lambdas +http://www.semanlink.net/tag/java_8_lambdas|broader|http://www.semanlink.net/tag/lambda_calculus +http://www.semanlink.net/tag/java_8_lambdas|broader|http://www.semanlink.net/tag/java_8 +http://www.semanlink.net/tag/java_8_lambdas|creationDate|2016-11-17 +http://www.semanlink.net/tag/java_8_lambdas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_8_lambdas|uri|http://www.semanlink.net/tag/java_8_lambdas +http://www.semanlink.net/tag/java_8_lambdas|broader_prefLabel|Lambda calculus +http://www.semanlink.net/tag/java_8_lambdas|broader_prefLabel|Java 8 +http://www.semanlink.net/tag/especes_menacees|prefLabel|Espèces menacées +http://www.semanlink.net/tag/especes_menacees|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/especes_menacees|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/especes_menacees|related|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/especes_menacees|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/especes_menacees|altLabel|Endangered Species +http://www.semanlink.net/tag/especes_menacees|uri|http://www.semanlink.net/tag/especes_menacees +http://www.semanlink.net/tag/especes_menacees|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/especes_menacees|broader_prefLabel|Écologie +http://www.semanlink.net/tag/thewebconf_2018|creationTime|2018-01-27T15:10:26Z +http://www.semanlink.net/tag/thewebconf_2018|prefLabel|TheWebConf 2018 +http://www.semanlink.net/tag/thewebconf_2018|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/thewebconf_2018|broader|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/thewebconf_2018|creationDate|2018-01-27 +http://www.semanlink.net/tag/thewebconf_2018|comment|">Bridging natural and artificial intelligence worldwide + +- [keynotes](https://www2018.thewebconf.org/keynotes/) +- [panels](https://www2018.thewebconf.org/plenary-panels/) +- [video](http://thewebconf.webcastor.tv) + + + +" +http://www.semanlink.net/tag/thewebconf_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thewebconf_2018|homepage|https://www2018.thewebconf.org/ +http://www.semanlink.net/tag/thewebconf_2018|altLabel|WWW 2018 +http://www.semanlink.net/tag/thewebconf_2018|uri|http://www.semanlink.net/tag/thewebconf_2018 +http://www.semanlink.net/tag/thewebconf_2018|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/thewebconf_2018|broader_prefLabel|J'y étais +http://www.semanlink.net/tag/thewebconf_2018|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/tchad|prefLabel|Tchad +http://www.semanlink.net/tag/tchad|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/tchad|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tchad|uri|http://www.semanlink.net/tag/tchad +http://www.semanlink.net/tag/tchad|broader_prefLabel|Afrique +http://www.semanlink.net/tag/tchad|broader_altLabel|Africa +http://www.semanlink.net/tag/markov_model|creationTime|2018-11-15T01:03:08Z +http://www.semanlink.net/tag/markov_model|prefLabel|Markov model +http://www.semanlink.net/tag/markov_model|broader|http://www.semanlink.net/tag/markov +http://www.semanlink.net/tag/markov_model|creationDate|2018-11-15 +http://www.semanlink.net/tag/markov_model|comment|Stochastic model used to model randomly changing systems, such that future states depend only on the current state, not on the events that occurred before it (Markov property). +http://www.semanlink.net/tag/markov_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markov_model|describedBy|https://en.wikipedia.org/wiki/Markov_model +http://www.semanlink.net/tag/markov_model|uri|http://www.semanlink.net/tag/markov_model +http://www.semanlink.net/tag/markov_model|broader_prefLabel|Markov +http://www.semanlink.net/tag/architecture|prefLabel|Architecture +http://www.semanlink.net/tag/architecture|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/architecture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/architecture|uri|http://www.semanlink.net/tag/architecture +http://www.semanlink.net/tag/architecture|broader_prefLabel|Art +http://www.semanlink.net/tag/sem_web_context|creationTime|2008-11-07T13:48:43Z +http://www.semanlink.net/tag/sem_web_context|prefLabel|Sem web: context +http://www.semanlink.net/tag/sem_web_context|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/sem_web_context|creationDate|2008-11-07 +http://www.semanlink.net/tag/sem_web_context|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sem_web_context|uri|http://www.semanlink.net/tag/sem_web_context +http://www.semanlink.net/tag/sem_web_context|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/philippe_cudre_mauroux|creationTime|2012-05-31T12:12:40Z +http://www.semanlink.net/tag/philippe_cudre_mauroux|prefLabel|Philippe Cudré-Mauroux +http://www.semanlink.net/tag/philippe_cudre_mauroux|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/philippe_cudre_mauroux|creationDate|2012-05-31 +http://www.semanlink.net/tag/philippe_cudre_mauroux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/philippe_cudre_mauroux|uri|http://www.semanlink.net/tag/philippe_cudre_mauroux +http://www.semanlink.net/tag/philippe_cudre_mauroux|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/bible|prefLabel|Bible +http://www.semanlink.net/tag/bible|broader|http://www.semanlink.net/tag/judaisme +http://www.semanlink.net/tag/bible|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bible|uri|http://www.semanlink.net/tag/bible +http://www.semanlink.net/tag/bible|broader_prefLabel|Judaïsme +http://www.semanlink.net/tag/carrot2|creationTime|2017-05-23T12:15:27Z +http://www.semanlink.net/tag/carrot2|prefLabel|Carrot2 +http://www.semanlink.net/tag/carrot2|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/carrot2|broader|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/carrot2|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/carrot2|related|http://www.semanlink.net/tag/conceptual_clustering +http://www.semanlink.net/tag/carrot2|related|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/carrot2|creationDate|2017-05-23 +http://www.semanlink.net/tag/carrot2|comment|"Open Source Search Results Clustering Engine. + +It can automatically organize small collections of documents into thematic categories." +http://www.semanlink.net/tag/carrot2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carrot2|homepage|http://project.carrot2.org +http://www.semanlink.net/tag/carrot2|uri|http://www.semanlink.net/tag/carrot2 +http://www.semanlink.net/tag/carrot2|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/carrot2|broader_prefLabel|Clustering of text documents +http://www.semanlink.net/tag/carrot2|broader_prefLabel|Open Source +http://www.semanlink.net/tag/carrot2|broader_altLabel|Text Clustering +http://www.semanlink.net/tag/carrot2|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/models_of_consciousness|creationTime|2013-11-30T21:59:21Z +http://www.semanlink.net/tag/models_of_consciousness|prefLabel|Models of consciousness +http://www.semanlink.net/tag/models_of_consciousness|broader|http://www.semanlink.net/tag/conscience +http://www.semanlink.net/tag/models_of_consciousness|creationDate|2013-11-30 +http://www.semanlink.net/tag/models_of_consciousness|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/models_of_consciousness|uri|http://www.semanlink.net/tag/models_of_consciousness +http://www.semanlink.net/tag/models_of_consciousness|broader_prefLabel|Consciousness +http://www.semanlink.net/tag/models_of_consciousness|broader_altLabel|Conscience +http://www.semanlink.net/tag/models_of_consciousness|broader_related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/consensus|creationTime|2020-01-24T13:25:00Z +http://www.semanlink.net/tag/consensus|prefLabel|Consensus +http://www.semanlink.net/tag/consensus|creationDate|2020-01-24 +http://www.semanlink.net/tag/consensus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/consensus|uri|http://www.semanlink.net/tag/consensus +http://www.semanlink.net/tag/reptile|creationTime|2020-05-12T12:52:44Z +http://www.semanlink.net/tag/reptile|prefLabel|Reptile +http://www.semanlink.net/tag/reptile|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/reptile|creationDate|2020-05-12 +http://www.semanlink.net/tag/reptile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reptile|uri|http://www.semanlink.net/tag/reptile +http://www.semanlink.net/tag/reptile|broader_prefLabel|Animal +http://www.semanlink.net/tag/sida|prefLabel|Sida +http://www.semanlink.net/tag/sida|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/sida|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/sida|broader|http://www.semanlink.net/tag/immune_system +http://www.semanlink.net/tag/sida|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sida|altLabel|VIH +http://www.semanlink.net/tag/sida|altLabel|AIDS +http://www.semanlink.net/tag/sida|altLabel|HIV +http://www.semanlink.net/tag/sida|uri|http://www.semanlink.net/tag/sida +http://www.semanlink.net/tag/sida|broader_prefLabel|Maladie +http://www.semanlink.net/tag/sida|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/sida|broader_prefLabel|Système immunitaire +http://www.semanlink.net/tag/sida|broader_prefLabel|Immune system +http://www.semanlink.net/tag/sida|broader_related|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/w3c_working_group|creationTime|2012-07-30T23:53:12Z +http://www.semanlink.net/tag/w3c_working_group|prefLabel|W3C Working group +http://www.semanlink.net/tag/w3c_working_group|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_working_group|creationDate|2012-07-30 +http://www.semanlink.net/tag/w3c_working_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_working_group|uri|http://www.semanlink.net/tag/w3c_working_group +http://www.semanlink.net/tag/w3c_working_group|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_working_group|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_working_group|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/gpt_3|creationTime|2020-11-08T10:18:00Z +http://www.semanlink.net/tag/gpt_3|prefLabel|GPT-3 +http://www.semanlink.net/tag/gpt_3|broader|http://www.semanlink.net/tag/openai_gpt +http://www.semanlink.net/tag/gpt_3|creationDate|2020-11-08 +http://www.semanlink.net/tag/gpt_3|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gpt_3|uri|http://www.semanlink.net/tag/gpt_3 +http://www.semanlink.net/tag/gpt_3|broader_prefLabel|OpenAI GPT +http://www.semanlink.net/tag/realite_virtuelle|prefLabel|Réalité virtuelle +http://www.semanlink.net/tag/realite_virtuelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/realite_virtuelle|uri|http://www.semanlink.net/tag/realite_virtuelle +http://www.semanlink.net/tag/sparse_dictionary_learning|creationTime|2014-10-06T00:58:02Z +http://www.semanlink.net/tag/sparse_dictionary_learning|prefLabel|Sparse coding +http://www.semanlink.net/tag/sparse_dictionary_learning|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/sparse_dictionary_learning|related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/sparse_dictionary_learning|creationDate|2014-10-06 +http://www.semanlink.net/tag/sparse_dictionary_learning|comment|"Dictionary learning, or sparse coding, tries to learn a sparse linear code to represent the +given data succinctly. + +Unsupervised learning algo. Images -> edge detection (similar to primary visual cortex) + +" +http://www.semanlink.net/tag/sparse_dictionary_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparse_dictionary_learning|describedBy|https://en.wikipedia.org/wiki/Sparse_dictionary_learning +http://www.semanlink.net/tag/sparse_dictionary_learning|altLabel|Sparse dictionary learning +http://www.semanlink.net/tag/sparse_dictionary_learning|uri|http://www.semanlink.net/tag/sparse_dictionary_learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_prefLabel|Feature learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_altLabel|Representation learning +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/sparse_dictionary_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/sorcellerie|prefLabel|Sorcellerie +http://www.semanlink.net/tag/sorcellerie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sorcellerie|uri|http://www.semanlink.net/tag/sorcellerie +http://www.semanlink.net/tag/roman|creationTime|2008-05-31T14:54:58Z +http://www.semanlink.net/tag/roman|prefLabel|Roman +http://www.semanlink.net/tag/roman|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/roman|creationDate|2008-05-31 +http://www.semanlink.net/tag/roman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roman|uri|http://www.semanlink.net/tag/roman +http://www.semanlink.net/tag/roman|broader_prefLabel|Livre +http://www.semanlink.net/tag/roman|broader_altLabel|Livres +http://www.semanlink.net/tag/earth_map|prefLabel|Earth map +http://www.semanlink.net/tag/earth_map|broader|http://www.semanlink.net/tag/la_terre_vue_du_ciel +http://www.semanlink.net/tag/earth_map|broader|http://www.semanlink.net/tag/carte +http://www.semanlink.net/tag/earth_map|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/earth_map|uri|http://www.semanlink.net/tag/earth_map +http://www.semanlink.net/tag/earth_map|broader_prefLabel|La Terre vue du ciel +http://www.semanlink.net/tag/earth_map|broader_prefLabel|Carte +http://www.semanlink.net/tag/variational_autoencoder_vae|creationTime|2018-05-29T15:06:15Z +http://www.semanlink.net/tag/variational_autoencoder_vae|prefLabel|Variational autoencoder (VAE) +http://www.semanlink.net/tag/variational_autoencoder_vae|broader|http://www.semanlink.net/tag/autoencoder +http://www.semanlink.net/tag/variational_autoencoder_vae|broader|http://www.semanlink.net/tag/deep_latent_variable_models +http://www.semanlink.net/tag/variational_autoencoder_vae|creationDate|2018-05-29 +http://www.semanlink.net/tag/variational_autoencoder_vae|comment|"Same architecture as autoencoder, but make strong assumptions concerning the distribution of latent variables. They use variational approach for latent representation learning (""Stochastic Gradient Variational Bayes"" (SGVB) training algorithm)" +http://www.semanlink.net/tag/variational_autoencoder_vae|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/variational_autoencoder_vae|describedBy|https://en.wikipedia.org/wiki/Autoencoder#Variational_autoencoder_(VAE) +http://www.semanlink.net/tag/variational_autoencoder_vae|altLabel|VAE +http://www.semanlink.net/tag/variational_autoencoder_vae|uri|http://www.semanlink.net/tag/variational_autoencoder_vae +http://www.semanlink.net/tag/variational_autoencoder_vae|broader_prefLabel|Autoencoder +http://www.semanlink.net/tag/variational_autoencoder_vae|broader_prefLabel|Deep latent variable models +http://www.semanlink.net/tag/variational_autoencoder_vae|broader_related|http://www.semanlink.net/tag/encoder_decoder_architecture +http://www.semanlink.net/tag/variational_autoencoder_vae|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/actualite|creationTime|2007-11-30T14:16:46Z +http://www.semanlink.net/tag/actualite|prefLabel|Actualité +http://www.semanlink.net/tag/actualite|creationDate|2007-11-30 +http://www.semanlink.net/tag/actualite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/actualite|uri|http://www.semanlink.net/tag/actualite +http://www.semanlink.net/tag/syrian_civil_war|creationTime|2014-11-08T14:11:00Z +http://www.semanlink.net/tag/syrian_civil_war|prefLabel|Syrian Civil War +http://www.semanlink.net/tag/syrian_civil_war|broader|http://www.semanlink.net/tag/syrie +http://www.semanlink.net/tag/syrian_civil_war|creationDate|2014-11-08 +http://www.semanlink.net/tag/syrian_civil_war|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/syrian_civil_war|describedBy|https://en.wikipedia.org/wiki/Syrian_Civil_War +http://www.semanlink.net/tag/syrian_civil_war|uri|http://www.semanlink.net/tag/syrian_civil_war +http://www.semanlink.net/tag/syrian_civil_war|broader_prefLabel|Syrie +http://www.semanlink.net/tag/genocide|prefLabel|Génocide +http://www.semanlink.net/tag/genocide|broader|http://www.semanlink.net/tag/crime_contre_l_humanite +http://www.semanlink.net/tag/genocide|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/genocide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genocide|uri|http://www.semanlink.net/tag/genocide +http://www.semanlink.net/tag/genocide|broader_prefLabel|Crime contre l'Humanité +http://www.semanlink.net/tag/genocide|broader_prefLabel|Horreur +http://www.semanlink.net/tag/jazz|creationTime|2009-11-05T19:10:31Z +http://www.semanlink.net/tag/jazz|prefLabel|Jazz +http://www.semanlink.net/tag/jazz|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/jazz|creationDate|2009-11-05 +http://www.semanlink.net/tag/jazz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jazz|uri|http://www.semanlink.net/tag/jazz +http://www.semanlink.net/tag/jazz|broader_prefLabel|Musique +http://www.semanlink.net/tag/jazz|broader_altLabel|Music +http://www.semanlink.net/tag/finance|creationTime|2009-08-19T21:58:35Z +http://www.semanlink.net/tag/finance|prefLabel|Finance +http://www.semanlink.net/tag/finance|creationDate|2009-08-19 +http://www.semanlink.net/tag/finance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/finance|uri|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/nlp_harvard|creationTime|2021-04-11T16:12:06Z +http://www.semanlink.net/tag/nlp_harvard|prefLabel|NLP@Harvard +http://www.semanlink.net/tag/nlp_harvard|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_harvard|broader|http://www.semanlink.net/tag/harvard +http://www.semanlink.net/tag/nlp_harvard|creationDate|2021-04-11 +http://www.semanlink.net/tag/nlp_harvard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_harvard|uri|http://www.semanlink.net/tag/nlp_harvard +http://www.semanlink.net/tag/nlp_harvard|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_harvard|broader_prefLabel|Harvard +http://www.semanlink.net/tag/nlp_harvard|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/zemanta|creationTime|2008-12-10T14:13:59Z +http://www.semanlink.net/tag/zemanta|prefLabel|Zemanta +http://www.semanlink.net/tag/zemanta|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/zemanta|broader|http://www.semanlink.net/tag/keep_new +http://www.semanlink.net/tag/zemanta|broader|http://www.semanlink.net/tag/semantic_tagging +http://www.semanlink.net/tag/zemanta|related|http://www.semanlink.net/tag/calais +http://www.semanlink.net/tag/zemanta|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/zemanta|creationDate|2008-12-10 +http://www.semanlink.net/tag/zemanta|comment|Zemanta is a tool that looks over your shoulder while you blog and gives you tips and advice, suggests related content and pictures and makes sure your posts get promoted as they deserve to be. We at Zemanta are thinking hard to help make blogging easier for you. We're engineering better creative tools to help you get the most out of your blogging time. +http://www.semanlink.net/tag/zemanta|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zemanta|describedBy|http://www.zemanta.com/ +http://www.semanlink.net/tag/zemanta|uri|http://www.semanlink.net/tag/zemanta +http://www.semanlink.net/tag/zemanta|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/zemanta|broader_prefLabel|Keep new +http://www.semanlink.net/tag/zemanta|broader_prefLabel|Semantic tagging +http://www.semanlink.net/tag/java_8|creationTime|2013-03-28T23:49:50Z +http://www.semanlink.net/tag/java_8|prefLabel|Java 8 +http://www.semanlink.net/tag/java_8|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_8|creationDate|2013-03-28 +http://www.semanlink.net/tag/java_8|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_8|uri|http://www.semanlink.net/tag/java_8 +http://www.semanlink.net/tag/java_8|broader_prefLabel|Java +http://www.semanlink.net/tag/pandas|creationTime|2016-02-09T11:23:04Z +http://www.semanlink.net/tag/pandas|prefLabel|pandas +http://www.semanlink.net/tag/pandas|broader|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/pandas|creationDate|2016-02-09 +http://www.semanlink.net/tag/pandas|comment|"""Python Data Analysis Library"" + +2 key data structures: + +- Series (1 dimensional labelled / indexed array) +- Dataframe (~ Excel workbook: column names referring to columns and rows, which can be accessed with use of row numbers) +" +http://www.semanlink.net/tag/pandas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pandas|describedBy|http://pandas.pydata.org/ +http://www.semanlink.net/tag/pandas|uri|http://www.semanlink.net/tag/pandas +http://www.semanlink.net/tag/pandas|broader_prefLabel|Python 4 Data science +http://www.semanlink.net/tag/physique|prefLabel|Physique +http://www.semanlink.net/tag/physique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/physique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/physique|altLabel|Physics +http://www.semanlink.net/tag/physique|uri|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/physique|broader_prefLabel|Science +http://www.semanlink.net/tag/physique|broader_altLabel|sciences +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|creationTime|2007-05-22T23:02:54Z +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|prefLabel|Un ivrogne dans la brousse +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader|http://www.semanlink.net/tag/litterature_africaine +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader|http://www.semanlink.net/tag/incipit +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|creationDate|2007-05-22 +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|comment|"Amos Tutuola + +> I was a palm-wine drinkard since I was a boy of ten years of age. I had no other work more than to drink palm-wine in my life. In those days we did not know other money except cowries, so that everything was very cheap, and my father was the richest man in town" +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|uri|http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader_prefLabel|Littérature africaine +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader_prefLabel|Incipit +http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse|broader_prefLabel|Nigeria +http://www.semanlink.net/tag/image_recognition|creationTime|2017-08-24T00:49:06Z +http://www.semanlink.net/tag/image_recognition|prefLabel|Image recognition +http://www.semanlink.net/tag/image_recognition|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/image_recognition|creationDate|2017-08-24 +http://www.semanlink.net/tag/image_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/image_recognition|uri|http://www.semanlink.net/tag/image_recognition +http://www.semanlink.net/tag/image_recognition|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/delicious_java|prefLabel|delicious java +http://www.semanlink.net/tag/delicious_java|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/delicious_java|broader|http://www.semanlink.net/tag/del_icio_us +http://www.semanlink.net/tag/delicious_java|creationDate|2006-09-25 +http://www.semanlink.net/tag/delicious_java|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delicious_java|uri|http://www.semanlink.net/tag/delicious_java +http://www.semanlink.net/tag/delicious_java|broader_prefLabel|Tagging +http://www.semanlink.net/tag/delicious_java|broader_prefLabel|del.icio.us +http://www.semanlink.net/tag/delicious_java|broader_altLabel|delicious +http://www.semanlink.net/tag/semweb_pro|creationTime|2011-01-18T12:30:26Z +http://www.semanlink.net/tag/semweb_pro|prefLabel|SemWeb Pro +http://www.semanlink.net/tag/semweb_pro|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/semweb_pro|broader|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/semweb_pro|creationDate|2011-01-18 +http://www.semanlink.net/tag/semweb_pro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semweb_pro|homepage|http://www.semweb.pro/ +http://www.semanlink.net/tag/semweb_pro|uri|http://www.semanlink.net/tag/semweb_pro +http://www.semanlink.net/tag/semweb_pro|broader_prefLabel|Paris +http://www.semanlink.net/tag/semweb_pro|broader_prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/yann_lecun|creationTime|2016-02-20T14:40:01Z +http://www.semanlink.net/tag/yann_lecun|prefLabel|Yann LeCun +http://www.semanlink.net/tag/yann_lecun|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/yann_lecun|related|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/yann_lecun|creationDate|2016-02-20 +http://www.semanlink.net/tag/yann_lecun|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yann_lecun|describedBy|https://en.wikipedia.org/wiki/Yann_LeCun +http://www.semanlink.net/tag/yann_lecun|uri|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/tag/yann_lecun|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/conway_s_game_of_life|creationTime|2015-11-30T12:33:28Z +http://www.semanlink.net/tag/conway_s_game_of_life|prefLabel|Conway's Game of Life +http://www.semanlink.net/tag/conway_s_game_of_life|creationDate|2015-11-30 +http://www.semanlink.net/tag/conway_s_game_of_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conway_s_game_of_life|describedBy|https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life +http://www.semanlink.net/tag/conway_s_game_of_life|uri|http://www.semanlink.net/tag/conway_s_game_of_life +http://www.semanlink.net/tag/explosions_cosmiques|prefLabel|Explosions cosmiques +http://www.semanlink.net/tag/explosions_cosmiques|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/explosions_cosmiques|broader|http://www.semanlink.net/tag/rayons_cosmiques +http://www.semanlink.net/tag/explosions_cosmiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/explosions_cosmiques|uri|http://www.semanlink.net/tag/explosions_cosmiques +http://www.semanlink.net/tag/explosions_cosmiques|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/explosions_cosmiques|broader_prefLabel|Rayons cosmiques +http://www.semanlink.net/tag/discounted_cumulative_gain|creationTime|2018-07-26T00:47:11Z +http://www.semanlink.net/tag/discounted_cumulative_gain|prefLabel|Discounted cumulative gain +http://www.semanlink.net/tag/discounted_cumulative_gain|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/discounted_cumulative_gain|related|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/discounted_cumulative_gain|creationDate|2018-07-26 +http://www.semanlink.net/tag/discounted_cumulative_gain|comment|"a measure of ranking quality. DCG measures the usefulness, or gain, of a document based on its position in the result list. +" +http://www.semanlink.net/tag/discounted_cumulative_gain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/discounted_cumulative_gain|describedBy|https://en.wikipedia.org/wiki/Discounted_cumulative_gain +http://www.semanlink.net/tag/discounted_cumulative_gain|uri|http://www.semanlink.net/tag/discounted_cumulative_gain +http://www.semanlink.net/tag/discounted_cumulative_gain|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/nlp_conference|creationTime|2018-07-11T13:43:06Z +http://www.semanlink.net/tag/nlp_conference|prefLabel|NLP conference +http://www.semanlink.net/tag/nlp_conference|broader|http://www.semanlink.net/tag/ai_conference +http://www.semanlink.net/tag/nlp_conference|broader|http://www.semanlink.net/tag/nlp_event +http://www.semanlink.net/tag/nlp_conference|creationDate|2018-07-11 +http://www.semanlink.net/tag/nlp_conference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_conference|uri|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/tag/nlp_conference|broader_prefLabel|AI Conference +http://www.semanlink.net/tag/nlp_conference|broader_prefLabel|NLP event +http://www.semanlink.net/tag/wiki_software|prefLabel|Wiki Software +http://www.semanlink.net/tag/wiki_software|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/wiki_software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wiki_software|uri|http://www.semanlink.net/tag/wiki_software +http://www.semanlink.net/tag/wiki_software|broader_prefLabel|Wiki +http://www.semanlink.net/tag/mobile_apps_dev|creationTime|2015-06-13T13:17:21Z +http://www.semanlink.net/tag/mobile_apps_dev|prefLabel|Mobile apps dev +http://www.semanlink.net/tag/mobile_apps_dev|broader|http://www.semanlink.net/tag/mobile_apps +http://www.semanlink.net/tag/mobile_apps_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/mobile_apps_dev|creationDate|2015-06-13 +http://www.semanlink.net/tag/mobile_apps_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_apps_dev|uri|http://www.semanlink.net/tag/mobile_apps_dev +http://www.semanlink.net/tag/mobile_apps_dev|broader_prefLabel|Mobile apps +http://www.semanlink.net/tag/mobile_apps_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/unsupervised_deep_pre_training|creationTime|2018-11-18T10:50:39Z +http://www.semanlink.net/tag/unsupervised_deep_pre_training|prefLabel|Unsupervised deep pre-training +http://www.semanlink.net/tag/unsupervised_deep_pre_training|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_deep_pre_training|broader|http://www.semanlink.net/tag/pretrained_models +http://www.semanlink.net/tag/unsupervised_deep_pre_training|creationDate|2018-11-18 +http://www.semanlink.net/tag/unsupervised_deep_pre_training|comment|Train a model in an unsupervised way on a large amount of data, and then fine-tune it to achieve good performance on many different tasks +http://www.semanlink.net/tag/unsupervised_deep_pre_training|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_deep_pre_training|uri|http://www.semanlink.net/tag/unsupervised_deep_pre_training +http://www.semanlink.net/tag/unsupervised_deep_pre_training|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/unsupervised_deep_pre_training|broader_prefLabel|Pretrained models +http://www.semanlink.net/tag/irit|creationTime|2018-01-03T15:43:18Z +http://www.semanlink.net/tag/irit|prefLabel|IRIT +http://www.semanlink.net/tag/irit|creationDate|2018-01-03 +http://www.semanlink.net/tag/irit|comment|Institut de Recherche en Informatique de Toulouse +http://www.semanlink.net/tag/irit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/irit|homepage|https://www.irit.fr +http://www.semanlink.net/tag/irit|uri|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/owl|prefLabel|OWL +http://www.semanlink.net/tag/owl|broader|http://www.semanlink.net/tag/reasoning +http://www.semanlink.net/tag/owl|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/owl|broader|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/owl|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/owl|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/owl|related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/owl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl|homepage|http://www.w3.org/2004/OWL/ +http://www.semanlink.net/tag/owl|uri|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl|broader_prefLabel|Reasoning +http://www.semanlink.net/tag/owl|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/owl|broader_prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/owl|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/owl|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/owl|broader_altLabel|KR +http://www.semanlink.net/tag/owl|broader_altLabel|sw +http://www.semanlink.net/tag/owl|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/owl|broader_altLabel|Ontology +http://www.semanlink.net/tag/nlp_problem|creationTime|2015-10-16T11:20:59Z +http://www.semanlink.net/tag/nlp_problem|prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_problem|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_problem|creationDate|2015-10-16 +http://www.semanlink.net/tag/nlp_problem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_problem|uri|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_problem|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_problem|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_problem|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_problem|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/antibiotic_resistance|creationTime|2013-07-15T10:04:33Z +http://www.semanlink.net/tag/antibiotic_resistance|prefLabel|Antibiotic resistance +http://www.semanlink.net/tag/antibiotic_resistance|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/antibiotic_resistance|broader|http://www.semanlink.net/tag/problemes_sanitaires +http://www.semanlink.net/tag/antibiotic_resistance|broader|http://www.semanlink.net/tag/antibiotiques +http://www.semanlink.net/tag/antibiotic_resistance|broader|http://www.semanlink.net/tag/drug_resistant_germs +http://www.semanlink.net/tag/antibiotic_resistance|creationDate|2013-07-15 +http://www.semanlink.net/tag/antibiotic_resistance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antibiotic_resistance|describedBy|https://en.wikipedia.org/wiki/Antimicrobial_resistance +http://www.semanlink.net/tag/antibiotic_resistance|altLabel|Résistance aux antibiotiques +http://www.semanlink.net/tag/antibiotic_resistance|altLabel|Superbug +http://www.semanlink.net/tag/antibiotic_resistance|altLabel|Antimicrobial resistance +http://www.semanlink.net/tag/antibiotic_resistance|uri|http://www.semanlink.net/tag/antibiotic_resistance +http://www.semanlink.net/tag/antibiotic_resistance|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/antibiotic_resistance|broader_prefLabel|Problèmes sanitaires +http://www.semanlink.net/tag/antibiotic_resistance|broader_prefLabel|Antibiotiques +http://www.semanlink.net/tag/antibiotic_resistance|broader_prefLabel|Drug-resistant germs +http://www.semanlink.net/tag/content_industries|prefLabel|Content industries +http://www.semanlink.net/tag/content_industries|broader|http://www.semanlink.net/tag/capitalistes +http://www.semanlink.net/tag/content_industries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/content_industries|uri|http://www.semanlink.net/tag/content_industries +http://www.semanlink.net/tag/content_industries|broader_prefLabel|Capitalistes +http://www.semanlink.net/tag/free_will|creationTime|2014-03-01T21:50:32Z +http://www.semanlink.net/tag/free_will|prefLabel|Free will +http://www.semanlink.net/tag/free_will|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/free_will|creationDate|2014-03-01 +http://www.semanlink.net/tag/free_will|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/free_will|uri|http://www.semanlink.net/tag/free_will +http://www.semanlink.net/tag/free_will|broader_prefLabel|Liberté +http://www.semanlink.net/tag/free_will|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/petrobras|creationTime|2017-03-26T10:52:28Z +http://www.semanlink.net/tag/petrobras|prefLabel|Petrobras +http://www.semanlink.net/tag/petrobras|creationDate|2017-03-26 +http://www.semanlink.net/tag/petrobras|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/petrobras|uri|http://www.semanlink.net/tag/petrobras +http://www.semanlink.net/tag/institutions_europeennes|creationTime|2007-06-27T21:35:46Z +http://www.semanlink.net/tag/institutions_europeennes|prefLabel|Institutions européennes +http://www.semanlink.net/tag/institutions_europeennes|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/institutions_europeennes|creationDate|2007-06-27 +http://www.semanlink.net/tag/institutions_europeennes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/institutions_europeennes|uri|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/institutions_europeennes|broader_prefLabel|Europe +http://www.semanlink.net/tag/rdf_framework|creationTime|2008-10-06T00:01:22Z +http://www.semanlink.net/tag/rdf_framework|prefLabel|RDF Framework +http://www.semanlink.net/tag/rdf_framework|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_framework|broader|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/rdf_framework|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/rdf_framework|creationDate|2008-10-06 +http://www.semanlink.net/tag/rdf_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_framework|uri|http://www.semanlink.net/tag/rdf_framework +http://www.semanlink.net/tag/rdf_framework|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_framework|broader_prefLabel|Frameworks +http://www.semanlink.net/tag/rdf_framework|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/rdf_framework|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_framework|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_framework|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_framework|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_framework|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/netflix|creationTime|2015-06-28T09:31:07Z +http://www.semanlink.net/tag/netflix|prefLabel|Netflix +http://www.semanlink.net/tag/netflix|creationDate|2015-06-28 +http://www.semanlink.net/tag/netflix|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/netflix|uri|http://www.semanlink.net/tag/netflix +http://www.semanlink.net/tag/publicite_politique|creationTime|2021-07-17T15:41:11Z +http://www.semanlink.net/tag/publicite_politique|prefLabel|Publicité politique +http://www.semanlink.net/tag/publicite_politique|broader|http://www.semanlink.net/tag/publicite +http://www.semanlink.net/tag/publicite_politique|creationDate|2021-07-17 +http://www.semanlink.net/tag/publicite_politique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/publicite_politique|uri|http://www.semanlink.net/tag/publicite_politique +http://www.semanlink.net/tag/publicite_politique|broader_prefLabel|Publicité +http://www.semanlink.net/tag/publicite_politique|broader_altLabel|Advertising +http://www.semanlink.net/tag/publicite_politique|broader_altLabel|Pub +http://www.semanlink.net/tag/pac|prefLabel|PAC +http://www.semanlink.net/tag/pac|broader|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/pac|broader|http://www.semanlink.net/tag/subventions_agricoles +http://www.semanlink.net/tag/pac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pac|uri|http://www.semanlink.net/tag/pac +http://www.semanlink.net/tag/pac|broader_prefLabel|Union européenne +http://www.semanlink.net/tag/pac|broader_prefLabel|Subventions agricoles +http://www.semanlink.net/tag/pac|broader_altLabel|UE +http://www.semanlink.net/tag/r2rml|creationTime|2010-11-07T13:02:44Z +http://www.semanlink.net/tag/r2rml|prefLabel|R2RML +http://www.semanlink.net/tag/r2rml|broader|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.semanlink.net/tag/r2rml|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/r2rml|creationDate|2010-11-07 +http://www.semanlink.net/tag/r2rml|comment|RDB to RDF Mapping Language +http://www.semanlink.net/tag/r2rml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/r2rml|uri|http://www.semanlink.net/tag/r2rml +http://www.semanlink.net/tag/r2rml|broader_prefLabel|Database to RDF mapping +http://www.semanlink.net/tag/r2rml|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/r2rml|broader_altLabel|RDF and database +http://www.semanlink.net/tag/r2rml|broader_related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/r2rml|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/uranium|prefLabel|Uranium +http://www.semanlink.net/tag/uranium|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/uranium|broader|http://www.semanlink.net/tag/matiere_premiere +http://www.semanlink.net/tag/uranium|related|http://www.semanlink.net/tag/areva +http://www.semanlink.net/tag/uranium|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uranium|uri|http://www.semanlink.net/tag/uranium +http://www.semanlink.net/tag/uranium|broader_prefLabel|Energie +http://www.semanlink.net/tag/uranium|broader_prefLabel|Matière première +http://www.semanlink.net/tag/contrastive_learning|creationTime|2021-04-19T02:13:49Z +http://www.semanlink.net/tag/contrastive_learning|prefLabel|Contrastive Learning +http://www.semanlink.net/tag/contrastive_learning|broader|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/contrastive_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/contrastive_learning|creationDate|2021-04-19 +http://www.semanlink.net/tag/contrastive_learning|comment|"Contrastive learning: a class of methods for learning representations +by contrasting pairs of related data examples against pairs of unrelated data +examples. + +Methods that build representations by learning to encode what makes two things similar or different." +http://www.semanlink.net/tag/contrastive_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/contrastive_learning|uri|http://www.semanlink.net/tag/contrastive_learning +http://www.semanlink.net/tag/contrastive_learning|broader_prefLabel|Representation learning +http://www.semanlink.net/tag/contrastive_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/contrastive_learning|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/patti_smith|prefLabel|Patti Smith +http://www.semanlink.net/tag/patti_smith|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/patti_smith|broader|http://www.semanlink.net/tag/rock +http://www.semanlink.net/tag/patti_smith|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patti_smith|uri|http://www.semanlink.net/tag/patti_smith +http://www.semanlink.net/tag/patti_smith|broader_prefLabel|Musicien +http://www.semanlink.net/tag/patti_smith|broader_prefLabel|Rock +http://www.semanlink.net/tag/pollution_des_oceans|prefLabel|Pollution des océans +http://www.semanlink.net/tag/pollution_des_oceans|broader|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/pollution_des_oceans|creationDate|2006-11-05 +http://www.semanlink.net/tag/pollution_des_oceans|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pollution_des_oceans|uri|http://www.semanlink.net/tag/pollution_des_oceans +http://www.semanlink.net/tag/pollution_des_oceans|broader_prefLabel|Pollution +http://www.semanlink.net/tag/post_verite|creationTime|2016-09-18T11:27:27Z +http://www.semanlink.net/tag/post_verite|prefLabel|"""Post-Vérité""" +http://www.semanlink.net/tag/post_verite|broader|http://www.semanlink.net/tag/verite +http://www.semanlink.net/tag/post_verite|related|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/post_verite|related|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/post_verite|related|http://www.semanlink.net/tag/fact_checking +http://www.semanlink.net/tag/post_verite|related|http://www.semanlink.net/tag/brexit +http://www.semanlink.net/tag/post_verite|creationDate|2016-09-18 +http://www.semanlink.net/tag/post_verite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/post_verite|uri|http://www.semanlink.net/tag/post_verite +http://www.semanlink.net/tag/post_verite|broader_prefLabel|Vérité +http://www.semanlink.net/tag/gafa|creationTime|2018-02-11T01:04:49Z +http://www.semanlink.net/tag/gafa|prefLabel|GAFA +http://www.semanlink.net/tag/gafa|creationDate|2018-02-11 +http://www.semanlink.net/tag/gafa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gafa|uri|http://www.semanlink.net/tag/gafa +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|creationTime|2016-03-26T12:02:55Z +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|prefLabel|Niger : festival de la jeunesse +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|creationDate|2016-03-26 +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|uri|http://www.semanlink.net/tag/niger_festival_de_la_jeunesse +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|broader_prefLabel|Niger +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/niger_festival_de_la_jeunesse|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/haoussa|prefLabel|Hausa +http://www.semanlink.net/tag/haoussa|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/haoussa|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/haoussa|broader|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/tag/haoussa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/haoussa|altLabel|Haoussa +http://www.semanlink.net/tag/haoussa|altLabel|Hawsa +http://www.semanlink.net/tag/haoussa|uri|http://www.semanlink.net/tag/haoussa +http://www.semanlink.net/tag/haoussa|broader_prefLabel|Peuples +http://www.semanlink.net/tag/haoussa|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/haoussa|broader_prefLabel|African languages +http://www.semanlink.net/tag/championnat_du_monde|prefLabel|Championnat du monde +http://www.semanlink.net/tag/championnat_du_monde|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/championnat_du_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/championnat_du_monde|uri|http://www.semanlink.net/tag/championnat_du_monde +http://www.semanlink.net/tag/championnat_du_monde|broader_prefLabel|Sport +http://www.semanlink.net/tag/deep_learning_frameworks|creationTime|2017-09-09T13:49:44Z +http://www.semanlink.net/tag/deep_learning_frameworks|prefLabel|Deep Learning frameworks +http://www.semanlink.net/tag/deep_learning_frameworks|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning_frameworks|broader|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/deep_learning_frameworks|broader|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/deep_learning_frameworks|broader|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/deep_learning_frameworks|creationDate|2017-09-09 +http://www.semanlink.net/tag/deep_learning_frameworks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning_frameworks|uri|http://www.semanlink.net/tag/deep_learning_frameworks +http://www.semanlink.net/tag/deep_learning_frameworks|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning_frameworks|broader_prefLabel|Frameworks +http://www.semanlink.net/tag/deep_learning_frameworks|broader_prefLabel|Machine Learning library +http://www.semanlink.net/tag/deep_learning_frameworks|broader_prefLabel|Machine Learning tool +http://www.semanlink.net/tag/deep_learning_frameworks|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning_frameworks|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/guerre_chimique|prefLabel|Guerre chimique +http://www.semanlink.net/tag/guerre_chimique|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/guerre_chimique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerre_chimique|uri|http://www.semanlink.net/tag/guerre_chimique +http://www.semanlink.net/tag/guerre_chimique|broader_prefLabel|War +http://www.semanlink.net/tag/guerre_chimique|broader_altLabel|Guerre +http://www.semanlink.net/tag/hayabusa2|creationTime|2021-01-09T14:22:21Z +http://www.semanlink.net/tag/hayabusa2|prefLabel|Hayabusa-2 +http://www.semanlink.net/tag/hayabusa2|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/hayabusa2|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/hayabusa2|broader|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/tag/hayabusa2|related|http://www.semanlink.net/tag/hayabusa +http://www.semanlink.net/tag/hayabusa2|creationDate|2021-01-09 +http://www.semanlink.net/tag/hayabusa2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hayabusa2|describedBy|https://en.wikipedia.org/wiki/Hayabusa2 +http://www.semanlink.net/tag/hayabusa2|altLabel|Hayabusa2 +http://www.semanlink.net/tag/hayabusa2|uri|http://www.semanlink.net/tag/hayabusa2 +http://www.semanlink.net/tag/hayabusa2|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/hayabusa2|broader_prefLabel|Japon +http://www.semanlink.net/tag/hayabusa2|broader_prefLabel|Astéroïde +http://www.semanlink.net/tag/hayabusa2|broader_altLabel|Japan +http://www.semanlink.net/tag/npm|creationTime|2017-09-29T01:37:17Z +http://www.semanlink.net/tag/npm|prefLabel|npm +http://www.semanlink.net/tag/npm|related|http://www.semanlink.net/tag/node_js +http://www.semanlink.net/tag/npm|creationDate|2017-09-29 +http://www.semanlink.net/tag/npm|comment|a package manager fo javascript +http://www.semanlink.net/tag/npm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/npm|describedBy|https://en.wikipedia.org/wiki/Npm_(software) +http://www.semanlink.net/tag/npm|uri|http://www.semanlink.net/tag/npm +http://www.semanlink.net/tag/xcode|creationTime|2013-05-30T02:07:40Z +http://www.semanlink.net/tag/xcode|prefLabel|XCode +http://www.semanlink.net/tag/xcode|creationDate|2013-05-30 +http://www.semanlink.net/tag/xcode|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xcode|uri|http://www.semanlink.net/tag/xcode +http://www.semanlink.net/tag/sentiment_analysis|creationTime|2013-09-11T16:57:10Z +http://www.semanlink.net/tag/sentiment_analysis|prefLabel|Sentiment analysis +http://www.semanlink.net/tag/sentiment_analysis|broader|http://www.semanlink.net/tag/sentiment +http://www.semanlink.net/tag/sentiment_analysis|broader|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/sentiment_analysis|creationDate|2013-09-11 +http://www.semanlink.net/tag/sentiment_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sentiment_analysis|uri|http://www.semanlink.net/tag/sentiment_analysis +http://www.semanlink.net/tag/sentiment_analysis|broader_prefLabel|Sentiment +http://www.semanlink.net/tag/sentiment_analysis|broader_prefLabel|Data mining +http://www.semanlink.net/tag/journaliste|prefLabel|Journaliste +http://www.semanlink.net/tag/journaliste|broader|http://www.semanlink.net/tag/journalisme +http://www.semanlink.net/tag/journaliste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/journaliste|uri|http://www.semanlink.net/tag/journaliste +http://www.semanlink.net/tag/journaliste|broader_prefLabel|Journalisme +http://www.semanlink.net/tag/bitcoin|creationTime|2011-06-15T15:58:36Z +http://www.semanlink.net/tag/bitcoin|prefLabel|Bitcoin +http://www.semanlink.net/tag/bitcoin|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/bitcoin|broader|http://www.semanlink.net/tag/peer_to_peer +http://www.semanlink.net/tag/bitcoin|broader|http://www.semanlink.net/tag/virtual_currency +http://www.semanlink.net/tag/bitcoin|broader|http://www.semanlink.net/tag/cryptocurrency +http://www.semanlink.net/tag/bitcoin|creationDate|2011-06-15 +http://www.semanlink.net/tag/bitcoin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bitcoin|describedBy|https://en.wikipedia.org/wiki/Bitcoin +http://www.semanlink.net/tag/bitcoin|uri|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/bitcoin|broader_prefLabel|Money +http://www.semanlink.net/tag/bitcoin|broader_prefLabel|Peer to peer +http://www.semanlink.net/tag/bitcoin|broader_prefLabel|Digital currency +http://www.semanlink.net/tag/bitcoin|broader_prefLabel|Cryptocurrency +http://www.semanlink.net/tag/bitcoin|broader_altLabel|Monnaie +http://www.semanlink.net/tag/bitcoin|broader_altLabel|P2P +http://www.semanlink.net/tag/bitcoin|broader_altLabel|Virtual currency +http://www.semanlink.net/tag/bitcoin|broader_altLabel|Monnaie virtuelle +http://www.semanlink.net/tag/tim_berners_lee|prefLabel|Tim Berners-Lee +http://www.semanlink.net/tag/tim_berners_lee|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/tim_berners_lee|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/tim_berners_lee|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/tim_berners_lee|related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/tim_berners_lee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tim_berners_lee|altLabel|TBL +http://www.semanlink.net/tag/tim_berners_lee|altLabel|TimBL +http://www.semanlink.net/tag/tim_berners_lee|uri|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/tim_berners_lee|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/tim_berners_lee|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/tim_berners_lee|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/tim_berners_lee|broader_altLabel|Technical guys +http://www.semanlink.net/tag/orri_erling|creationTime|2008-05-04T14:47:29Z +http://www.semanlink.net/tag/orri_erling|prefLabel|Orri Erling +http://www.semanlink.net/tag/orri_erling|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/orri_erling|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/orri_erling|related|http://www.semanlink.net/tag/www08 +http://www.semanlink.net/tag/orri_erling|creationDate|2008-05-04 +http://www.semanlink.net/tag/orri_erling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orri_erling|uri|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/orri_erling|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/orri_erling|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/orri_erling|broader_altLabel|Technical guys +http://www.semanlink.net/tag/slot_tagging|creationTime|2020-01-09T00:53:50Z +http://www.semanlink.net/tag/slot_tagging|prefLabel|Slot tagging +http://www.semanlink.net/tag/slot_tagging|broader|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/slot_tagging|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/slot_tagging|broader|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/slot_tagging|broader|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/slot_tagging|related|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/slot_tagging|related|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/slot_tagging|related|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/slot_tagging|creationDate|2020-01-09 +http://www.semanlink.net/tag/slot_tagging|comment|extracting semantic concepts in a query (a sequence labeling task that tags the input word sequence). +http://www.semanlink.net/tag/slot_tagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slot_tagging|altLabel|Slot filling +http://www.semanlink.net/tag/slot_tagging|uri|http://www.semanlink.net/tag/slot_tagging +http://www.semanlink.net/tag/slot_tagging|broader_prefLabel|NLU +http://www.semanlink.net/tag/slot_tagging|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/slot_tagging|broader_prefLabel|Sequence labeling +http://www.semanlink.net/tag/slot_tagging|broader_prefLabel|Chatbots +http://www.semanlink.net/tag/slot_tagging|broader_altLabel|Natural Language Understanding +http://www.semanlink.net/tag/slot_tagging|broader_altLabel|Sequence Tagging +http://www.semanlink.net/tag/slot_tagging|broader_altLabel|Chatbot +http://www.semanlink.net/tag/slot_tagging|broader_related|http://www.semanlink.net/tag/ml_sequential_data +http://www.semanlink.net/tag/film_fantastique|creationTime|2008-03-11T00:40:04Z +http://www.semanlink.net/tag/film_fantastique|prefLabel|Film fantastique +http://www.semanlink.net/tag/film_fantastique|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_fantastique|creationDate|2008-03-11 +http://www.semanlink.net/tag/film_fantastique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_fantastique|uri|http://www.semanlink.net/tag/film_fantastique +http://www.semanlink.net/tag/film_fantastique|broader_prefLabel|Film +http://www.semanlink.net/tag/occam_s_razor|creationTime|2019-10-11T01:57:16Z +http://www.semanlink.net/tag/occam_s_razor|prefLabel|Occam's razor +http://www.semanlink.net/tag/occam_s_razor|creationDate|2019-10-11 +http://www.semanlink.net/tag/occam_s_razor|comment|when presented with competing hypotheses that make the same predictions, one should select the solution with the fewest assumptions +http://www.semanlink.net/tag/occam_s_razor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/occam_s_razor|describedBy|https://en.wikipedia.org/wiki/Occam%27s_razor +http://www.semanlink.net/tag/occam_s_razor|uri|http://www.semanlink.net/tag/occam_s_razor +http://www.semanlink.net/tag/quantum_computing|creationTime|2018-02-19T00:34:34Z +http://www.semanlink.net/tag/quantum_computing|prefLabel|Quantum computing +http://www.semanlink.net/tag/quantum_computing|broader|http://www.semanlink.net/tag/computers +http://www.semanlink.net/tag/quantum_computing|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/quantum_computing|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/quantum_computing|creationDate|2018-02-19 +http://www.semanlink.net/tag/quantum_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quantum_computing|describedBy|https://en.wikipedia.org/wiki/Quantum_computing +http://www.semanlink.net/tag/quantum_computing|altLabel|Quantum computer +http://www.semanlink.net/tag/quantum_computing|altLabel|Ordinateur quantique +http://www.semanlink.net/tag/quantum_computing|uri|http://www.semanlink.net/tag/quantum_computing +http://www.semanlink.net/tag/quantum_computing|broader_prefLabel|Computers +http://www.semanlink.net/tag/quantum_computing|broader_prefLabel|NTIC +http://www.semanlink.net/tag/quantum_computing|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/shoira_otabekova|creationTime|2010-06-24T23:56:56Z +http://www.semanlink.net/tag/shoira_otabekova|prefLabel|Shoira Otabekova +http://www.semanlink.net/tag/shoira_otabekova|broader|http://www.semanlink.net/tag/ouzbekistan +http://www.semanlink.net/tag/shoira_otabekova|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/shoira_otabekova|creationDate|2010-06-24 +http://www.semanlink.net/tag/shoira_otabekova|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shoira_otabekova|uri|http://www.semanlink.net/tag/shoira_otabekova +http://www.semanlink.net/tag/shoira_otabekova|broader_prefLabel|Ouzbékistan +http://www.semanlink.net/tag/shoira_otabekova|broader_prefLabel|Musique +http://www.semanlink.net/tag/shoira_otabekova|broader_altLabel|Uzbekistan +http://www.semanlink.net/tag/shoira_otabekova|broader_altLabel|Music +http://www.semanlink.net/tag/microscope|creationTime|2013-03-15T12:30:49Z +http://www.semanlink.net/tag/microscope|prefLabel|Microscope +http://www.semanlink.net/tag/microscope|creationDate|2013-03-15 +http://www.semanlink.net/tag/microscope|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microscope|uri|http://www.semanlink.net/tag/microscope +http://www.semanlink.net/tag/cinema_africain|creationTime|2007-08-24T23:38:21Z +http://www.semanlink.net/tag/cinema_africain|prefLabel|Cinéma africain +http://www.semanlink.net/tag/cinema_africain|broader|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/cinema_africain|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/cinema_africain|creationDate|2007-08-24 +http://www.semanlink.net/tag/cinema_africain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cinema_africain|uri|http://www.semanlink.net/tag/cinema_africain +http://www.semanlink.net/tag/cinema_africain|broader_prefLabel|Art d'Afrique +http://www.semanlink.net/tag/cinema_africain|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/cinema_africain|broader_altLabel|African art +http://www.semanlink.net/tag/ai_event|creationTime|2021-01-27T13:44:14Z +http://www.semanlink.net/tag/ai_event|prefLabel|AI Event +http://www.semanlink.net/tag/ai_event|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_event|broader|http://www.semanlink.net/tag/event +http://www.semanlink.net/tag/ai_event|creationDate|2021-01-27 +http://www.semanlink.net/tag/ai_event|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_event|uri|http://www.semanlink.net/tag/ai_event +http://www.semanlink.net/tag/ai_event|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_event|broader_prefLabel|Event +http://www.semanlink.net/tag/ai_event|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_event|broader_altLabel|AI +http://www.semanlink.net/tag/ai_event|broader_altLabel|IA +http://www.semanlink.net/tag/ai_event|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/automatic_summarization|creationTime|2017-05-24T17:08:11Z +http://www.semanlink.net/tag/automatic_summarization|prefLabel|Text Summarization +http://www.semanlink.net/tag/automatic_summarization|broader|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/automatic_summarization|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/automatic_summarization|related|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/tag/automatic_summarization|creationDate|2017-05-24 +http://www.semanlink.net/tag/automatic_summarization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automatic_summarization|describedBy|https://en.wikipedia.org/wiki/Automatic_summarization +http://www.semanlink.net/tag/automatic_summarization|altLabel|Automatic summarization +http://www.semanlink.net/tag/automatic_summarization|uri|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/automatic_summarization|broader_prefLabel|Information extraction +http://www.semanlink.net/tag/automatic_summarization|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/automatic_summarization|broader_related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/greffe_arbre|creationTime|2014-09-02T23:07:35Z +http://www.semanlink.net/tag/greffe_arbre|prefLabel|Greffe (arbre) +http://www.semanlink.net/tag/greffe_arbre|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/greffe_arbre|creationDate|2014-09-02 +http://www.semanlink.net/tag/greffe_arbre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greffe_arbre|uri|http://www.semanlink.net/tag/greffe_arbre +http://www.semanlink.net/tag/greffe_arbre|broader_prefLabel|Arbres +http://www.semanlink.net/tag/cell|creationTime|2020-08-28T13:35:04Z +http://www.semanlink.net/tag/cell|prefLabel|Cell +http://www.semanlink.net/tag/cell|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/cell|creationDate|2020-08-28 +http://www.semanlink.net/tag/cell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cell|uri|http://www.semanlink.net/tag/cell +http://www.semanlink.net/tag/cell|broader_prefLabel|Biology +http://www.semanlink.net/tag/cell|broader_altLabel|Biologie +http://www.semanlink.net/tag/fpservant_slideshare|creationTime|2012-06-03T17:04:26Z +http://www.semanlink.net/tag/fpservant_slideshare|prefLabel|fpservant@slideshare +http://www.semanlink.net/tag/fpservant_slideshare|broader|http://www.semanlink.net/tag/slideshare +http://www.semanlink.net/tag/fpservant_slideshare|broader|http://www.semanlink.net/tag/slides_fps +http://www.semanlink.net/tag/fpservant_slideshare|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fpservant_slideshare|creationDate|2012-06-03 +http://www.semanlink.net/tag/fpservant_slideshare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fpservant_slideshare|uri|http://www.semanlink.net/tag/fpservant_slideshare +http://www.semanlink.net/tag/fpservant_slideshare|broader_prefLabel|SlideShare +http://www.semanlink.net/tag/fpservant_slideshare|broader_prefLabel|slides fps +http://www.semanlink.net/tag/fpservant_slideshare|broader_prefLabel|fps +http://www.semanlink.net/tag/ecrivain|prefLabel|Ecrivain +http://www.semanlink.net/tag/ecrivain|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/ecrivain|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/ecrivain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecrivain|uri|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/ecrivain|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/ecrivain|broader_prefLabel|Littérature +http://www.semanlink.net/tag/ecrivain|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/triple_store_powered_site|creationTime|2010-07-16T14:42:51Z +http://www.semanlink.net/tag/triple_store_powered_site|prefLabel|Triple-store powered site +http://www.semanlink.net/tag/triple_store_powered_site|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/triple_store_powered_site|creationDate|2010-07-16 +http://www.semanlink.net/tag/triple_store_powered_site|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/triple_store_powered_site|uri|http://www.semanlink.net/tag/triple_store_powered_site +http://www.semanlink.net/tag/triple_store_powered_site|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/triple_store_powered_site|broader_altLabel|RDF database +http://www.semanlink.net/tag/virtual_personal_assistant|creationTime|2010-07-01T16:13:04Z +http://www.semanlink.net/tag/virtual_personal_assistant|prefLabel|Virtual Personal Assistant +http://www.semanlink.net/tag/virtual_personal_assistant|creationDate|2010-07-01 +http://www.semanlink.net/tag/virtual_personal_assistant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtual_personal_assistant|uri|http://www.semanlink.net/tag/virtual_personal_assistant +http://www.semanlink.net/tag/radio|creationTime|2011-02-06T19:36:20Z +http://www.semanlink.net/tag/radio|prefLabel|Radio +http://www.semanlink.net/tag/radio|creationDate|2011-02-06 +http://www.semanlink.net/tag/radio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/radio|uri|http://www.semanlink.net/tag/radio +http://www.semanlink.net/tag/nlp_ibm|creationTime|2018-11-10T15:39:10Z +http://www.semanlink.net/tag/nlp_ibm|prefLabel|NLP@IBM +http://www.semanlink.net/tag/nlp_ibm|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_ibm|broader|http://www.semanlink.net/tag/ai_ibm +http://www.semanlink.net/tag/nlp_ibm|related|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.semanlink.net/tag/nlp_ibm|creationDate|2018-11-10 +http://www.semanlink.net/tag/nlp_ibm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_ibm|uri|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/tag/nlp_ibm|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_ibm|broader_prefLabel|AI@IBM +http://www.semanlink.net/tag/nlp_ibm|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/lilian_weng|creationTime|2019-02-10T19:21:59Z +http://www.semanlink.net/tag/lilian_weng|prefLabel|Lilian Weng +http://www.semanlink.net/tag/lilian_weng|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/lilian_weng|related|http://www.semanlink.net/tag/openai +http://www.semanlink.net/tag/lilian_weng|creationDate|2019-02-10 +http://www.semanlink.net/tag/lilian_weng|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lilian_weng|homepage|https://lilianweng.github.io/lil-log/ +http://www.semanlink.net/tag/lilian_weng|weblog|https://lilianweng.github.io/lil-log/ +http://www.semanlink.net/tag/lilian_weng|uri|http://www.semanlink.net/tag/lilian_weng +http://www.semanlink.net/tag/lilian_weng|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/disparition_d_especes|creationTime|2013-03-18T22:07:35Z +http://www.semanlink.net/tag/disparition_d_especes|prefLabel|Disparition d'espèces +http://www.semanlink.net/tag/disparition_d_especes|creationDate|2013-03-18 +http://www.semanlink.net/tag/disparition_d_especes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disparition_d_especes|altLabel|Espèces disparues +http://www.semanlink.net/tag/disparition_d_especes|uri|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/foaf_ssl|creationTime|2010-05-04T08:56:47Z +http://www.semanlink.net/tag/foaf_ssl|prefLabel|foaf+ssl +http://www.semanlink.net/tag/foaf_ssl|broader|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/foaf_ssl|broader|http://www.semanlink.net/tag/ssl +http://www.semanlink.net/tag/foaf_ssl|broader|http://www.semanlink.net/tag/webid +http://www.semanlink.net/tag/foaf_ssl|creationDate|2010-05-04 +http://www.semanlink.net/tag/foaf_ssl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foaf_ssl|homepage|http://esw.w3.org/Foaf%2Bssl +http://www.semanlink.net/tag/foaf_ssl|uri|http://www.semanlink.net/tag/foaf_ssl +http://www.semanlink.net/tag/foaf_ssl|broader_prefLabel|foaf +http://www.semanlink.net/tag/foaf_ssl|broader_prefLabel|SSL +http://www.semanlink.net/tag/foaf_ssl|broader_prefLabel|WebID +http://www.semanlink.net/tag/foaf_ssl|broader_related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/android|creationTime|2013-11-28T23:50:28Z +http://www.semanlink.net/tag/android|prefLabel|Android +http://www.semanlink.net/tag/android|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/android|creationDate|2013-11-28 +http://www.semanlink.net/tag/android|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/android|uri|http://www.semanlink.net/tag/android +http://www.semanlink.net/tag/android|broader_prefLabel|Google +http://www.semanlink.net/tag/android|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/diffbot|creationTime|2020-11-14T08:53:56Z +http://www.semanlink.net/tag/diffbot|prefLabel|Diffbot +http://www.semanlink.net/tag/diffbot|broader|http://www.semanlink.net/tag/ai_application +http://www.semanlink.net/tag/diffbot|creationDate|2020-11-14 +http://www.semanlink.net/tag/diffbot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diffbot|homepage|https://www.diffbot.com/ +http://www.semanlink.net/tag/diffbot|uri|http://www.semanlink.net/tag/diffbot +http://www.semanlink.net/tag/diffbot|broader_prefLabel|AI Application +http://www.semanlink.net/tag/rdf_star|creationTime|2020-12-22T18:16:28Z +http://www.semanlink.net/tag/rdf_star|prefLabel|RDF* +http://www.semanlink.net/tag/rdf_star|broader|http://www.semanlink.net/tag/rdf_and_property_graphs +http://www.semanlink.net/tag/rdf_star|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_star|creationDate|2020-12-22 +http://www.semanlink.net/tag/rdf_star|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_star|uri|http://www.semanlink.net/tag/rdf_star +http://www.semanlink.net/tag/rdf_star|broader_prefLabel|RDF and Property Graphs +http://www.semanlink.net/tag/rdf_star|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_star|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_star|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_star|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_star|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_star|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/semantic_startup|creationTime|2015-03-09T11:30:29Z +http://www.semanlink.net/tag/semantic_startup|prefLabel|Semantic startup +http://www.semanlink.net/tag/semantic_startup|broader|http://www.semanlink.net/tag/startups +http://www.semanlink.net/tag/semantic_startup|creationDate|2015-03-09 +http://www.semanlink.net/tag/semantic_startup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_startup|uri|http://www.semanlink.net/tag/semantic_startup +http://www.semanlink.net/tag/semantic_startup|broader_prefLabel|Startups +http://www.semanlink.net/tag/semantic_startup|broader_altLabel|Startup +http://www.semanlink.net/tag/concept_bottleneck_models|creationTime|2020-07-10T09:48:57Z +http://www.semanlink.net/tag/concept_bottleneck_models|prefLabel|Concept Bottleneck Models +http://www.semanlink.net/tag/concept_bottleneck_models|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/concept_bottleneck_models|broader|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/tag/concept_bottleneck_models|creationDate|2020-07-10 +http://www.semanlink.net/tag/concept_bottleneck_models|comment|first predicting concepts that are provided at training time, and then using these concepts to predict the label. +http://www.semanlink.net/tag/concept_bottleneck_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concept_bottleneck_models|uri|http://www.semanlink.net/tag/concept_bottleneck_models +http://www.semanlink.net/tag/concept_bottleneck_models|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/concept_bottleneck_models|broader_prefLabel|Explainable AI +http://www.semanlink.net/tag/niger|prefLabel|Niger +http://www.semanlink.net/tag/niger|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/niger|broader|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niger|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/niger|related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niger|related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/niger|related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niger|uri|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/niger|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/niger|broader_prefLabel|Sahel +http://www.semanlink.net/tag/niger|broader_prefLabel|Favoris +http://www.semanlink.net/tag/niger|broader_altLabel|favorites +http://www.semanlink.net/tag/fps_and_ldow2008|creationTime|2008-05-08T14:26:47Z +http://www.semanlink.net/tag/fps_and_ldow2008|prefLabel|fps AND LDOW2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/linking_enterprise_data +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/ldow2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/fps_and_www_2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader|http://www.semanlink.net/tag/sw_in_technical_automotive_documentation +http://www.semanlink.net/tag/fps_and_ldow2008|related|http://www.semanlink.net/tag/hugh_glaser +http://www.semanlink.net/tag/fps_and_ldow2008|related|http://www.semanlink.net/tag/rdf_forms +http://www.semanlink.net/tag/fps_and_ldow2008|related|http://www.semanlink.net/tag/linking_enterprise_data +http://www.semanlink.net/tag/fps_and_ldow2008|creationDate|2008-05-08 +http://www.semanlink.net/tag/fps_and_ldow2008|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_and_ldow2008|uri|http://www.semanlink.net/tag/fps_and_ldow2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|Semantic Enterprise +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|Linking Enterprise Data +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|fps +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|LDOW2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|fps and WWW 2008 +http://www.semanlink.net/tag/fps_and_ldow2008|broader_prefLabel|SW in Technical Automotive Documentation +http://www.semanlink.net/tag/fps_and_ldow2008|broader_altLabel|Enterprise Semantic Web +http://www.semanlink.net/tag/fps_and_ldow2008|broader_altLabel|Corporate Semantic Web +http://www.semanlink.net/tag/fps_and_ldow2008|broader_altLabel|Semantic Web in the enterprise +http://www.semanlink.net/tag/fps_and_ldow2008|broader_altLabel|Linked Data in enterprise +http://www.semanlink.net/tag/civilisations_precolombiennes|prefLabel|Civilisations précolombiennes +http://www.semanlink.net/tag/civilisations_precolombiennes|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/civilisations_precolombiennes|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/civilisations_precolombiennes|broader|http://www.semanlink.net/tag/amerindien +http://www.semanlink.net/tag/civilisations_precolombiennes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/civilisations_precolombiennes|uri|http://www.semanlink.net/tag/civilisations_precolombiennes +http://www.semanlink.net/tag/civilisations_precolombiennes|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/civilisations_precolombiennes|broader_prefLabel|Amérique +http://www.semanlink.net/tag/civilisations_precolombiennes|broader_prefLabel|Amérindien +http://www.semanlink.net/tag/civilisations_precolombiennes|broader_altLabel|Native americans +http://www.semanlink.net/tag/civilisations_precolombiennes|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/new_yorker|creationTime|2012-08-17T12:37:00Z +http://www.semanlink.net/tag/new_yorker|prefLabel|New Yorker +http://www.semanlink.net/tag/new_yorker|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/new_yorker|creationDate|2012-08-17 +http://www.semanlink.net/tag/new_yorker|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/new_yorker|uri|http://www.semanlink.net/tag/new_yorker +http://www.semanlink.net/tag/new_yorker|broader_prefLabel|Presse +http://www.semanlink.net/tag/new_yorker|broader_altLabel|Journal +http://www.semanlink.net/tag/peinture_rupestre|prefLabel|Peinture rupestre +http://www.semanlink.net/tag/peinture_rupestre|broader|http://www.semanlink.net/tag/rockart +http://www.semanlink.net/tag/peinture_rupestre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peinture_rupestre|uri|http://www.semanlink.net/tag/peinture_rupestre +http://www.semanlink.net/tag/peinture_rupestre|broader_prefLabel|Rockart +http://www.semanlink.net/tag/economies_d_energie|prefLabel|Economies d'énergie +http://www.semanlink.net/tag/economies_d_energie|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/economies_d_energie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economies_d_energie|uri|http://www.semanlink.net/tag/economies_d_energie +http://www.semanlink.net/tag/economies_d_energie|broader_prefLabel|Energie +http://www.semanlink.net/tag/text_multi_label_classification|creationTime|2018-08-06T17:48:23Z +http://www.semanlink.net/tag/text_multi_label_classification|prefLabel|Multi-label Text classification +http://www.semanlink.net/tag/text_multi_label_classification|broader|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/text_multi_label_classification|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/text_multi_label_classification|creationDate|2018-08-06 +http://www.semanlink.net/tag/text_multi_label_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_multi_label_classification|uri|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/tag/text_multi_label_classification|broader_prefLabel|Multi-label classification +http://www.semanlink.net/tag/text_multi_label_classification|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/text_multi_label_classification|broader_altLabel|Multilabel classification +http://www.semanlink.net/tag/text_multi_label_classification|broader_related|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/paris|prefLabel|Paris +http://www.semanlink.net/tag/paris|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/paris|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/paris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paris|uri|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/paris|broader_prefLabel|Ville +http://www.semanlink.net/tag/paris|broader_prefLabel|France +http://www.semanlink.net/tag/machine_learning_techniques|creationTime|2015-10-16T11:01:41Z +http://www.semanlink.net/tag/machine_learning_techniques|prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/machine_learning_techniques|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_techniques|creationDate|2015-10-16 +http://www.semanlink.net/tag/machine_learning_techniques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_techniques|uri|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/machine_learning_techniques|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_techniques|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_techniques|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_techniques|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/converting_data_into_rdf|creationTime|2008-10-29T17:55:25Z +http://www.semanlink.net/tag/converting_data_into_rdf|prefLabel|Converting data into RDF +http://www.semanlink.net/tag/converting_data_into_rdf|creationDate|2008-10-29 +http://www.semanlink.net/tag/converting_data_into_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/converting_data_into_rdf|uri|http://www.semanlink.net/tag/converting_data_into_rdf +http://www.semanlink.net/tag/astronomie_multi_signaux|creationTime|2017-10-18T13:44:14Z +http://www.semanlink.net/tag/astronomie_multi_signaux|prefLabel|Astronomie multi-signaux +http://www.semanlink.net/tag/astronomie_multi_signaux|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/astronomie_multi_signaux|creationDate|2017-10-18 +http://www.semanlink.net/tag/astronomie_multi_signaux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/astronomie_multi_signaux|uri|http://www.semanlink.net/tag/astronomie_multi_signaux +http://www.semanlink.net/tag/astronomie_multi_signaux|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/graphs_nlp|creationTime|2020-12-13T23:55:54Z +http://www.semanlink.net/tag/graphs_nlp|prefLabel|Graphs + NLP +http://www.semanlink.net/tag/graphs_nlp|broader|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/graphs_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/graphs_nlp|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graphs_nlp|creationDate|2020-12-13 +http://www.semanlink.net/tag/graphs_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graphs_nlp|uri|http://www.semanlink.net/tag/graphs_nlp +http://www.semanlink.net/tag/graphs_nlp|broader_prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/graphs_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/graphs_nlp|broader_prefLabel|Graph +http://www.semanlink.net/tag/graphs_nlp|broader_altLabel|Graph Machine Learning +http://www.semanlink.net/tag/graphs_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/graphs_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/graphs_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/boolean|creationTime|2010-11-12T15:27:53Z +http://www.semanlink.net/tag/boolean|prefLabel|Boolean +http://www.semanlink.net/tag/boolean|creationDate|2010-11-12 +http://www.semanlink.net/tag/boolean|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boolean|uri|http://www.semanlink.net/tag/boolean +http://www.semanlink.net/tag/jupiter_europe|prefLabel|Jupiter/Europe +http://www.semanlink.net/tag/jupiter_europe|broader|http://www.semanlink.net/tag/jupiter +http://www.semanlink.net/tag/jupiter_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jupiter_europe|uri|http://www.semanlink.net/tag/jupiter_europe +http://www.semanlink.net/tag/jupiter_europe|broader_prefLabel|Jupiter +http://www.semanlink.net/tag/zitgist|creationTime|2007-05-23T21:11:50Z +http://www.semanlink.net/tag/zitgist|prefLabel|Zitgist +http://www.semanlink.net/tag/zitgist|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/zitgist|broader|http://www.semanlink.net/tag/rdf_browser +http://www.semanlink.net/tag/zitgist|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/zitgist|broader|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/zitgist|related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/zitgist|creationDate|2007-05-23 +http://www.semanlink.net/tag/zitgist|comment|"Zitgist (pronounced ""zeitgeist"") is an industry standards compliant Semantic Web Query Service. Its goal is to help Web users locate data, information, and knowledge on the Web. +
+Zitgist is based on a new search paradigm: users describe characteristics of their search target, instead of relying entirely on content keywords" +http://www.semanlink.net/tag/zitgist|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zitgist|uri|http://www.semanlink.net/tag/zitgist +http://www.semanlink.net/tag/zitgist|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/zitgist|broader_prefLabel|RDF browser +http://www.semanlink.net/tag/zitgist|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/zitgist|broader_prefLabel|Semantic Web Services +http://www.semanlink.net/tag/zitgist|broader_altLabel|LD +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/zitgist|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/biopiles|creationTime|2014-07-25T15:55:56Z +http://www.semanlink.net/tag/biopiles|prefLabel|Biopiles +http://www.semanlink.net/tag/biopiles|broader|http://www.semanlink.net/tag/energies_renouvelables +http://www.semanlink.net/tag/biopiles|related|http://www.semanlink.net/tag/batteries +http://www.semanlink.net/tag/biopiles|creationDate|2014-07-25 +http://www.semanlink.net/tag/biopiles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biopiles|uri|http://www.semanlink.net/tag/biopiles +http://www.semanlink.net/tag/biopiles|broader_prefLabel|Energies renouvelables +http://www.semanlink.net/tag/nlp_french|creationTime|2017-06-20T13:46:05Z +http://www.semanlink.net/tag/nlp_french|prefLabel|NLP: French +http://www.semanlink.net/tag/nlp_french|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_french|creationDate|2017-06-20 +http://www.semanlink.net/tag/nlp_french|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_french|altLabel|NLP: Français +http://www.semanlink.net/tag/nlp_french|uri|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/tag/nlp_french|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_french|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_french|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_french|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/rip|creationTime|2013-07-29T10:06:10Z +http://www.semanlink.net/tag/rip|prefLabel|RIP +http://www.semanlink.net/tag/rip|creationDate|2013-07-29 +http://www.semanlink.net/tag/rip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rip|uri|http://www.semanlink.net/tag/rip +http://www.semanlink.net/tag/film_francais|prefLabel|Film français +http://www.semanlink.net/tag/film_francais|broader|http://www.semanlink.net/tag/cinema_francais +http://www.semanlink.net/tag/film_francais|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/film_francais|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_francais|uri|http://www.semanlink.net/tag/film_francais +http://www.semanlink.net/tag/film_francais|broader_prefLabel|Cinéma français +http://www.semanlink.net/tag/film_francais|broader_prefLabel|France +http://www.semanlink.net/tag/film_francais|broader_prefLabel|Film +http://www.semanlink.net/tag/web_intelligence|creationTime|2013-04-02T01:23:50Z +http://www.semanlink.net/tag/web_intelligence|prefLabel|Web Intelligence +http://www.semanlink.net/tag/web_intelligence|creationDate|2013-04-02 +http://www.semanlink.net/tag/web_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_intelligence|uri|http://www.semanlink.net/tag/web_intelligence +http://www.semanlink.net/tag/uri_reference|creationTime|2007-11-17T16:01:27Z +http://www.semanlink.net/tag/uri_reference|prefLabel|URI Reference +http://www.semanlink.net/tag/uri_reference|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_reference|creationDate|2007-11-17 +http://www.semanlink.net/tag/uri_reference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_reference|uri|http://www.semanlink.net/tag/uri_reference +http://www.semanlink.net/tag/uri_reference|broader_prefLabel|URI +http://www.semanlink.net/tag/kullback_leibler_divergence|creationTime|2020-05-31T10:31:48Z +http://www.semanlink.net/tag/kullback_leibler_divergence|prefLabel|Kullback–Leibler divergence +http://www.semanlink.net/tag/kullback_leibler_divergence|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/kullback_leibler_divergence|related|http://www.semanlink.net/tag/cross_entropy +http://www.semanlink.net/tag/kullback_leibler_divergence|creationDate|2020-05-31 +http://www.semanlink.net/tag/kullback_leibler_divergence|comment|measure of how one probability distribution is different from a second, reference one. Equal to the difference between cross entropy and entropy. +http://www.semanlink.net/tag/kullback_leibler_divergence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kullback_leibler_divergence|describedBy|https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence +http://www.semanlink.net/tag/kullback_leibler_divergence|altLabel|KL divergence +http://www.semanlink.net/tag/kullback_leibler_divergence|uri|http://www.semanlink.net/tag/kullback_leibler_divergence +http://www.semanlink.net/tag/kullback_leibler_divergence|broader_prefLabel|Information theory +http://www.semanlink.net/tag/optimization|creationTime|2019-05-27T09:42:35Z +http://www.semanlink.net/tag/optimization|prefLabel|Optimization +http://www.semanlink.net/tag/optimization|creationDate|2019-05-27 +http://www.semanlink.net/tag/optimization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/optimization|uri|http://www.semanlink.net/tag/optimization +http://www.semanlink.net/tag/film_policier|prefLabel|Film policier +http://www.semanlink.net/tag/film_policier|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_policier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_policier|uri|http://www.semanlink.net/tag/film_policier +http://www.semanlink.net/tag/film_policier|broader_prefLabel|Film +http://www.semanlink.net/tag/microsoft|prefLabel|Microsoft +http://www.semanlink.net/tag/microsoft|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/microsoft|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/microsoft|related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/microsoft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microsoft|uri|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/microsoft|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/microsoft|broader_prefLabel|Software +http://www.semanlink.net/tag/adaboost|creationTime|2018-10-28T00:50:47Z +http://www.semanlink.net/tag/adaboost|prefLabel|AdaBoost +http://www.semanlink.net/tag/adaboost|broader|http://www.semanlink.net/tag/boosting +http://www.semanlink.net/tag/adaboost|creationDate|2018-10-28 +http://www.semanlink.net/tag/adaboost|comment|"""Adaptative boosting"" + +(Authors won Gödel Prize for their work) + +output of the 'weak learners' is combined into a weighted sum that represents the final output of the boosted classifier. Sensitive to noisy data and outliers (says wikipedia) + +> AdaBoost (with decision trees as the weak learners) is often referred to as the best out-of-the-box classifier +" +http://www.semanlink.net/tag/adaboost|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/adaboost|describedBy|https://en.wikipedia.org/wiki/AdaBoost +http://www.semanlink.net/tag/adaboost|uri|http://www.semanlink.net/tag/adaboost +http://www.semanlink.net/tag/adaboost|broader_prefLabel|Boosting +http://www.semanlink.net/tag/adaboost|broader_related|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +http://www.semanlink.net/tag/antoine_bordes|creationTime|2018-01-05T14:47:25Z +http://www.semanlink.net/tag/antoine_bordes|prefLabel|Antoine Bordes +http://www.semanlink.net/tag/antoine_bordes|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/antoine_bordes|related|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/tag/antoine_bordes|related|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/antoine_bordes|creationDate|2018-01-05 +http://www.semanlink.net/tag/antoine_bordes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antoine_bordes|uri|http://www.semanlink.net/tag/antoine_bordes +http://www.semanlink.net/tag/antoine_bordes|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/localization|creationTime|2013-07-03T13:43:29Z +http://www.semanlink.net/tag/localization|prefLabel|localization +http://www.semanlink.net/tag/localization|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/localization|creationDate|2013-07-03 +http://www.semanlink.net/tag/localization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/localization|uri|http://www.semanlink.net/tag/localization +http://www.semanlink.net/tag/localization|broader_prefLabel|Dev +http://www.semanlink.net/tag/blues|creationTime|2013-02-22T14:11:53Z +http://www.semanlink.net/tag/blues|prefLabel|Blues +http://www.semanlink.net/tag/blues|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/blues|creationDate|2013-02-22 +http://www.semanlink.net/tag/blues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blues|uri|http://www.semanlink.net/tag/blues +http://www.semanlink.net/tag/blues|broader_prefLabel|Musique +http://www.semanlink.net/tag/blues|broader_altLabel|Music +http://www.semanlink.net/tag/google_colab|creationTime|2018-05-31T08:31:12Z +http://www.semanlink.net/tag/google_colab|prefLabel|Google Colab +http://www.semanlink.net/tag/google_colab|broader|http://www.semanlink.net/tag/ai_cloud_service +http://www.semanlink.net/tag/google_colab|broader|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/tag/google_colab|creationDate|2018-05-31 +http://www.semanlink.net/tag/google_colab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_colab|homepage|https://colab.research.google.com/ +http://www.semanlink.net/tag/google_colab|altLabel|Google Colaboratory +http://www.semanlink.net/tag/google_colab|uri|http://www.semanlink.net/tag/google_colab +http://www.semanlink.net/tag/google_colab|broader_prefLabel|AI cloud service +http://www.semanlink.net/tag/google_colab|broader_prefLabel|Google Research +http://www.semanlink.net/tag/hidden_markov_model|creationTime|2016-01-31T13:13:12Z +http://www.semanlink.net/tag/hidden_markov_model|prefLabel|Hidden Markov model +http://www.semanlink.net/tag/hidden_markov_model|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/hidden_markov_model|broader|http://www.semanlink.net/tag/markov +http://www.semanlink.net/tag/hidden_markov_model|creationDate|2016-01-31 +http://www.semanlink.net/tag/hidden_markov_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hidden_markov_model|describedBy|https://en.wikipedia.org/wiki/Hidden_Markov_model +http://www.semanlink.net/tag/hidden_markov_model|uri|http://www.semanlink.net/tag/hidden_markov_model +http://www.semanlink.net/tag/hidden_markov_model|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/hidden_markov_model|broader_prefLabel|Markov +http://www.semanlink.net/tag/swad_e|prefLabel|SWAD-E +http://www.semanlink.net/tag/swad_e|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/swad_e|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swad_e|uri|http://www.semanlink.net/tag/swad_e +http://www.semanlink.net/tag/swad_e|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/swad_e|broader_altLabel|sw +http://www.semanlink.net/tag/swad_e|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/african_languages|creationTime|2019-08-29T23:08:13Z +http://www.semanlink.net/tag/african_languages|prefLabel|African languages +http://www.semanlink.net/tag/african_languages|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/african_languages|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/african_languages|creationDate|2019-08-29 +http://www.semanlink.net/tag/african_languages|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/african_languages|uri|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/tag/african_languages|broader_prefLabel|Afrique +http://www.semanlink.net/tag/african_languages|broader_prefLabel|Langues +http://www.semanlink.net/tag/african_languages|broader_altLabel|Africa +http://www.semanlink.net/tag/google_car|creationTime|2012-11-30T22:28:13Z +http://www.semanlink.net/tag/google_car|prefLabel|Google car +http://www.semanlink.net/tag/google_car|broader|http://www.semanlink.net/tag/driverless_car +http://www.semanlink.net/tag/google_car|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_car|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/google_car|creationDate|2012-11-30 +http://www.semanlink.net/tag/google_car|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_car|uri|http://www.semanlink.net/tag/google_car +http://www.semanlink.net/tag/google_car|broader_prefLabel|Driverless car +http://www.semanlink.net/tag/google_car|broader_prefLabel|Google +http://www.semanlink.net/tag/google_car|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/google_car|broader_altLabel|Self-driving car +http://www.semanlink.net/tag/google_car|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/servlet|prefLabel|Servlet +http://www.semanlink.net/tag/servlet|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/servlet|broader|http://www.semanlink.net/tag/web_app +http://www.semanlink.net/tag/servlet|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/servlet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/servlet|uri|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/tag/servlet|broader_prefLabel|Java +http://www.semanlink.net/tag/servlet|broader_prefLabel|Web app +http://www.semanlink.net/tag/servlet|broader_prefLabel|Java dev +http://www.semanlink.net/tag/pytorch|creationTime|2017-10-16T14:39:07Z +http://www.semanlink.net/tag/pytorch|prefLabel|PyTorch +http://www.semanlink.net/tag/pytorch|broader|http://www.semanlink.net/tag/deep_learning_frameworks +http://www.semanlink.net/tag/pytorch|related|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/pytorch|creationDate|2017-10-16 +http://www.semanlink.net/tag/pytorch|comment|"- A replacement for [#NumPy](/tag/numpy) to use the power of GPUs +- a deep learning research platform +" +http://www.semanlink.net/tag/pytorch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pytorch|homepage|http://pytorch.org/ +http://www.semanlink.net/tag/pytorch|uri|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/tag/pytorch|broader_prefLabel|Deep Learning frameworks +http://www.semanlink.net/tag/volkswagate|creationTime|2015-10-03T10:17:55Z +http://www.semanlink.net/tag/volkswagate|prefLabel|Volkswagate +http://www.semanlink.net/tag/volkswagate|broader|http://www.semanlink.net/tag/volkswagen +http://www.semanlink.net/tag/volkswagate|creationDate|2015-10-03 +http://www.semanlink.net/tag/volkswagate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/volkswagate|uri|http://www.semanlink.net/tag/volkswagate +http://www.semanlink.net/tag/volkswagate|broader_prefLabel|Volkswagen +http://www.semanlink.net/tag/volkswagate|broader_altLabel|VW +http://www.semanlink.net/tag/nepomuk|creationTime|2008-06-04T23:07:59Z +http://www.semanlink.net/tag/nepomuk|prefLabel|NEPOMUK +http://www.semanlink.net/tag/nepomuk|broader|http://www.semanlink.net/tag/semantic_web_project +http://www.semanlink.net/tag/nepomuk|broader|http://www.semanlink.net/tag/semantic_desktop +http://www.semanlink.net/tag/nepomuk|related|http://www.semanlink.net/tag/leo_sauermann +http://www.semanlink.net/tag/nepomuk|creationDate|2008-06-04 +http://www.semanlink.net/tag/nepomuk|comment|Social Semantic Desktop +http://www.semanlink.net/tag/nepomuk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nepomuk|describedBy|http://nepomuk.semanticdesktop.org/xwiki/bin/view/Main1/ +http://www.semanlink.net/tag/nepomuk|describedBy|http://nepomuk.semanticdesktop.org +http://www.semanlink.net/tag/nepomuk|uri|http://www.semanlink.net/tag/nepomuk +http://www.semanlink.net/tag/nepomuk|broader_prefLabel|Semantic Web project +http://www.semanlink.net/tag/nepomuk|broader_prefLabel|Semantic Desktop +http://www.semanlink.net/tag/nepomuk|broader_related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/vehicular_communication_systems|creationTime|2014-02-03T22:11:43Z +http://www.semanlink.net/tag/vehicular_communication_systems|prefLabel|Vehicular communication systems +http://www.semanlink.net/tag/vehicular_communication_systems|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/vehicular_communication_systems|creationDate|2014-02-03 +http://www.semanlink.net/tag/vehicular_communication_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vehicular_communication_systems|describedBy|https://en.wikipedia.org/wiki/Vehicular_communication_systems +http://www.semanlink.net/tag/vehicular_communication_systems|altLabel|V2V +http://www.semanlink.net/tag/vehicular_communication_systems|uri|http://www.semanlink.net/tag/vehicular_communication_systems +http://www.semanlink.net/tag/vehicular_communication_systems|broader_prefLabel|Automobile +http://www.semanlink.net/tag/vehicular_communication_systems|broader_altLabel|Automotive +http://www.semanlink.net/tag/fasttext|creationTime|2017-06-28T01:04:05Z +http://www.semanlink.net/tag/fasttext|prefLabel|FastText +http://www.semanlink.net/tag/fasttext|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/fasttext|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/fasttext|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/fasttext|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/fasttext|creationDate|2017-06-28 +http://www.semanlink.net/tag/fasttext|comment|"Classifier on top of a sentence2vec model. + +Main idea: the morphological structure of a word carries important information about the meaning of the word, which is not taken into account by traditional [word embeddings](/tag/word_embedding). This is especially significant for morphologically rich languages (German, Turkish) in which a single word can have a large number of morphological forms, each of which might occur rarely, thus making it hard to train good word embeddings. + +FastText attempts to solve this by treating each word as the aggregation of its subwords (uses character n-grams as features -> avoids the OOV (out of vocabulary) problem) + +(FastText represents words as the sum of their n-gram representations trained with a skip-gram model) + +Embeddings learned using FastText (trained on wikipedia) are available in [many languages](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) + + + + + +" +http://www.semanlink.net/tag/fasttext|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fasttext|homepage|https://fasttext.cc +http://www.semanlink.net/tag/fasttext|uri|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/tag/fasttext|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/fasttext|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/fasttext|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/fasttext|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/fasttext|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/fasttext|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/fasttext|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/fasttext|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/pierre_de_volvic|creationTime|2016-06-09T22:44:20Z +http://www.semanlink.net/tag/pierre_de_volvic|prefLabel|Pierre de Volvic +http://www.semanlink.net/tag/pierre_de_volvic|broader|http://www.semanlink.net/tag/volvic +http://www.semanlink.net/tag/pierre_de_volvic|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/pierre_de_volvic|creationDate|2016-06-09 +http://www.semanlink.net/tag/pierre_de_volvic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierre_de_volvic|uri|http://www.semanlink.net/tag/pierre_de_volvic +http://www.semanlink.net/tag/pierre_de_volvic|broader_prefLabel|Volvic +http://www.semanlink.net/tag/pierre_de_volvic|broader_prefLabel|Volcan +http://www.semanlink.net/tag/it_failures|creationTime|2010-09-06T21:45:59Z +http://www.semanlink.net/tag/it_failures|prefLabel|IT failures +http://www.semanlink.net/tag/it_failures|creationDate|2010-09-06 +http://www.semanlink.net/tag/it_failures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/it_failures|uri|http://www.semanlink.net/tag/it_failures +http://www.semanlink.net/tag/identifying_triples|creationTime|2007-12-31T16:26:14Z +http://www.semanlink.net/tag/identifying_triples|prefLabel|Identifying triples +http://www.semanlink.net/tag/identifying_triples|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/identifying_triples|creationDate|2007-12-31 +http://www.semanlink.net/tag/identifying_triples|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/identifying_triples|uri|http://www.semanlink.net/tag/identifying_triples +http://www.semanlink.net/tag/identifying_triples|broader_prefLabel|RDF +http://www.semanlink.net/tag/identifying_triples|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/identifying_triples|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/identifying_triples|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/identifying_triples|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/identifying_triples|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/milliardaire|prefLabel|Milliardaire +http://www.semanlink.net/tag/milliardaire|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/milliardaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/milliardaire|uri|http://www.semanlink.net/tag/milliardaire +http://www.semanlink.net/tag/milliardaire|broader_prefLabel|Money +http://www.semanlink.net/tag/milliardaire|broader_altLabel|Monnaie +http://www.semanlink.net/tag/ai_dangers|creationTime|2017-12-19T13:51:38Z +http://www.semanlink.net/tag/ai_dangers|prefLabel|AI: dangers +http://www.semanlink.net/tag/ai_dangers|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_dangers|creationDate|2017-12-19 +http://www.semanlink.net/tag/ai_dangers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_dangers|altLabel|AI risks +http://www.semanlink.net/tag/ai_dangers|uri|http://www.semanlink.net/tag/ai_dangers +http://www.semanlink.net/tag/ai_dangers|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_dangers|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_dangers|broader_altLabel|AI +http://www.semanlink.net/tag/ai_dangers|broader_altLabel|IA +http://www.semanlink.net/tag/ai_dangers|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/recette_de_cuisine|creationTime|2012-08-15T18:28:51Z +http://www.semanlink.net/tag/recette_de_cuisine|prefLabel|Recette de cuisine +http://www.semanlink.net/tag/recette_de_cuisine|broader|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/tag/recette_de_cuisine|creationDate|2012-08-15 +http://www.semanlink.net/tag/recette_de_cuisine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recette_de_cuisine|uri|http://www.semanlink.net/tag/recette_de_cuisine +http://www.semanlink.net/tag/recette_de_cuisine|broader_prefLabel|Gastronomie +http://www.semanlink.net/tag/recette_de_cuisine|broader_altLabel|Cuisine +http://www.semanlink.net/tag/zapata|prefLabel|Zapata +http://www.semanlink.net/tag/zapata|broader|http://www.semanlink.net/tag/mexique +http://www.semanlink.net/tag/zapata|broader|http://www.semanlink.net/tag/personnage_historique +http://www.semanlink.net/tag/zapata|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zapata|uri|http://www.semanlink.net/tag/zapata +http://www.semanlink.net/tag/zapata|broader_prefLabel|Mexique +http://www.semanlink.net/tag/zapata|broader_prefLabel|Personnage historique +http://www.semanlink.net/tag/delon|creationTime|2020-05-06T23:23:29Z +http://www.semanlink.net/tag/delon|prefLabel|Delon +http://www.semanlink.net/tag/delon|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/delon|creationDate|2020-05-06 +http://www.semanlink.net/tag/delon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delon|uri|http://www.semanlink.net/tag/delon +http://www.semanlink.net/tag/delon|broader_prefLabel|Acteur +http://www.semanlink.net/tag/portugal|prefLabel|Portugal +http://www.semanlink.net/tag/portugal|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/portugal|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/portugal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/portugal|uri|http://www.semanlink.net/tag/portugal +http://www.semanlink.net/tag/portugal|broader_prefLabel|Europe +http://www.semanlink.net/tag/portugal|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/nokia|prefLabel|Nokia +http://www.semanlink.net/tag/nokia|broader|http://www.semanlink.net/tag/telephone +http://www.semanlink.net/tag/nokia|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/nokia|broader|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/tag/nokia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nokia|uri|http://www.semanlink.net/tag/nokia +http://www.semanlink.net/tag/nokia|broader_prefLabel|Téléphone +http://www.semanlink.net/tag/nokia|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/nokia|broader_prefLabel|Finlande +http://www.semanlink.net/tag/nokia|broader_altLabel|Téléphonie +http://www.semanlink.net/tag/ai_knowledge|creationTime|2019-08-07T01:21:13Z +http://www.semanlink.net/tag/ai_knowledge|prefLabel|AI + Knowledge +http://www.semanlink.net/tag/ai_knowledge|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_knowledge|broader|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/ai_knowledge|related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/ai_knowledge|related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/ai_knowledge|creationDate|2019-08-07 +http://www.semanlink.net/tag/ai_knowledge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_knowledge|altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/ai_knowledge|uri|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/ai_knowledge|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_knowledge|broader_prefLabel|Knowledge +http://www.semanlink.net/tag/ai_knowledge|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_knowledge|broader_altLabel|AI +http://www.semanlink.net/tag/ai_knowledge|broader_altLabel|IA +http://www.semanlink.net/tag/ai_knowledge|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/nlp_human_resources|creationTime|2020-01-23T01:18:54Z +http://www.semanlink.net/tag/nlp_human_resources|prefLabel|NLP + Human Resources +http://www.semanlink.net/tag/nlp_human_resources|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_human_resources|creationDate|2020-01-23 +http://www.semanlink.net/tag/nlp_human_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_human_resources|uri|http://www.semanlink.net/tag/nlp_human_resources +http://www.semanlink.net/tag/nlp_human_resources|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_human_resources|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/goldman_sachs|creationTime|2010-05-04T09:36:48Z +http://www.semanlink.net/tag/goldman_sachs|prefLabel|Goldman Sachs +http://www.semanlink.net/tag/goldman_sachs|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/goldman_sachs|creationDate|2010-05-04 +http://www.semanlink.net/tag/goldman_sachs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/goldman_sachs|uri|http://www.semanlink.net/tag/goldman_sachs +http://www.semanlink.net/tag/goldman_sachs|broader_prefLabel|Finance +http://www.semanlink.net/tag/chirac_ami_des_africains|creationTime|2007-06-09T11:29:13Z +http://www.semanlink.net/tag/chirac_ami_des_africains|prefLabel|Chirac ami des Africains +http://www.semanlink.net/tag/chirac_ami_des_africains|broader|http://www.semanlink.net/tag/chirac +http://www.semanlink.net/tag/chirac_ami_des_africains|creationDate|2007-06-09 +http://www.semanlink.net/tag/chirac_ami_des_africains|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chirac_ami_des_africains|uri|http://www.semanlink.net/tag/chirac_ami_des_africains +http://www.semanlink.net/tag/chirac_ami_des_africains|broader_prefLabel|Chirac +http://www.semanlink.net/tag/dbpedia_mobile|creationTime|2008-04-23T13:53:03Z +http://www.semanlink.net/tag/dbpedia_mobile|prefLabel|DBpedia Mobile +http://www.semanlink.net/tag/dbpedia_mobile|broader|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/dbpedia_mobile|creationDate|2008-04-23 +http://www.semanlink.net/tag/dbpedia_mobile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dbpedia_mobile|describedBy|http://wiki.dbpedia.org/DBpediaMobile +http://www.semanlink.net/tag/dbpedia_mobile|uri|http://www.semanlink.net/tag/dbpedia_mobile +http://www.semanlink.net/tag/dbpedia_mobile|broader_prefLabel|dbpedia +http://www.semanlink.net/tag/dbpedia_mobile|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbpedia_mobile|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/nlp_google|creationTime|2018-10-12T18:24:26Z +http://www.semanlink.net/tag/nlp_google|prefLabel|NLP@Google +http://www.semanlink.net/tag/nlp_google|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/nlp_google|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_google|creationDate|2018-10-12 +http://www.semanlink.net/tag/nlp_google|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_google|uri|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/nlp_google|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/nlp_google|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_google|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/the_dao|creationTime|2016-06-30T14:30:19Z +http://www.semanlink.net/tag/the_dao|prefLabel|The DAO +http://www.semanlink.net/tag/the_dao|broader|http://www.semanlink.net/tag/ethereum +http://www.semanlink.net/tag/the_dao|related|http://www.semanlink.net/tag/dao_attack +http://www.semanlink.net/tag/the_dao|creationDate|2016-06-30 +http://www.semanlink.net/tag/the_dao|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_dao|describedBy|https://en.wikipedia.org/wiki/The_DAO_(organization) +http://www.semanlink.net/tag/the_dao|uri|http://www.semanlink.net/tag/the_dao +http://www.semanlink.net/tag/the_dao|broader_prefLabel|Ethereum +http://www.semanlink.net/tag/the_dao|broader_related|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/regions_polaires|prefLabel|Régions polaires +http://www.semanlink.net/tag/regions_polaires|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/regions_polaires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/regions_polaires|uri|http://www.semanlink.net/tag/regions_polaires +http://www.semanlink.net/tag/regions_polaires|broader_prefLabel|Géographie +http://www.semanlink.net/tag/guerre_de_yougoslavie|prefLabel|Guerre de Yougoslavie +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader|http://www.semanlink.net/tag/yougoslavie +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/guerre_de_yougoslavie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerre_de_yougoslavie|uri|http://www.semanlink.net/tag/guerre_de_yougoslavie +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader_prefLabel|Yougoslavie +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader_prefLabel|Ex Yougoslavie +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader_prefLabel|War +http://www.semanlink.net/tag/guerre_de_yougoslavie|broader_altLabel|Guerre +http://www.semanlink.net/tag/patent|prefLabel|Patent +http://www.semanlink.net/tag/patent|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/patent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patent|altLabel|Brevet +http://www.semanlink.net/tag/patent|uri|http://www.semanlink.net/tag/patent +http://www.semanlink.net/tag/patent|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/inference|prefLabel|Inference +http://www.semanlink.net/tag/inference|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/inference|creationDate|2006-12-01 +http://www.semanlink.net/tag/inference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inference|uri|http://www.semanlink.net/tag/inference +http://www.semanlink.net/tag/inference|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/inference|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/inference|broader_altLabel|AI +http://www.semanlink.net/tag/inference|broader_altLabel|IA +http://www.semanlink.net/tag/inference|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/antiquite|prefLabel|Antiquité +http://www.semanlink.net/tag/antiquite|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/antiquite|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/antiquite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite|uri|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/antiquite|broader_prefLabel|Histoire +http://www.semanlink.net/tag/antiquite|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/antiquite|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/exomars|creationTime|2016-03-27T13:32:07Z +http://www.semanlink.net/tag/exomars|prefLabel|ExoMars +http://www.semanlink.net/tag/exomars|broader|http://www.semanlink.net/tag/exploration_marsienne +http://www.semanlink.net/tag/exomars|creationDate|2016-03-27 +http://www.semanlink.net/tag/exomars|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exomars|describedBy|https://en.wikipedia.org/wiki/ExoMars +http://www.semanlink.net/tag/exomars|uri|http://www.semanlink.net/tag/exomars +http://www.semanlink.net/tag/exomars|broader_prefLabel|Exploration marsienne +http://www.semanlink.net/tag/ghana_empire|creationTime|2013-01-06T18:55:30Z +http://www.semanlink.net/tag/ghana_empire|prefLabel|Ghana Empire +http://www.semanlink.net/tag/ghana_empire|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/ghana_empire|broader|http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest +http://www.semanlink.net/tag/ghana_empire|creationDate|2013-01-06 +http://www.semanlink.net/tag/ghana_empire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ghana_empire|describedBy|https://en.wikipedia.org/wiki/Ghana_Empire +http://www.semanlink.net/tag/ghana_empire|uri|http://www.semanlink.net/tag/ghana_empire +http://www.semanlink.net/tag/ghana_empire|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/ghana_empire|broader_prefLabel|Empires d'Afrique de l'Ouest +http://www.semanlink.net/tag/mac_os_x_10_8|creationTime|2012-11-05T08:03:45Z +http://www.semanlink.net/tag/mac_os_x_10_8|prefLabel|Mac OS X 10.8 +http://www.semanlink.net/tag/mac_os_x_10_8|creationDate|2012-11-05 +http://www.semanlink.net/tag/mac_os_x_10_8|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_os_x_10_8|uri|http://www.semanlink.net/tag/mac_os_x_10_8 +http://www.semanlink.net/tag/real_time|creationTime|2014-12-18T11:39:32Z +http://www.semanlink.net/tag/real_time|prefLabel|Real-Time +http://www.semanlink.net/tag/real_time|creationDate|2014-12-18 +http://www.semanlink.net/tag/real_time|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/real_time|uri|http://www.semanlink.net/tag/real_time +http://www.semanlink.net/tag/nlu_is_hard|creationTime|2019-12-18T14:48:00Z +http://www.semanlink.net/tag/nlu_is_hard|prefLabel|NLU is hard +http://www.semanlink.net/tag/nlu_is_hard|broader|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/nlu_is_hard|creationDate|2019-12-18 +http://www.semanlink.net/tag/nlu_is_hard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlu_is_hard|uri|http://www.semanlink.net/tag/nlu_is_hard +http://www.semanlink.net/tag/nlu_is_hard|broader_prefLabel|NLU +http://www.semanlink.net/tag/nlu_is_hard|broader_altLabel|Natural Language Understanding +http://www.semanlink.net/tag/fps_notes|creationTime|2007-09-18T15:05:43Z +http://www.semanlink.net/tag/fps_notes|prefLabel|fps notes +http://www.semanlink.net/tag/fps_notes|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_notes|creationDate|2007-09-18 +http://www.semanlink.net/tag/fps_notes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_notes|uri|http://www.semanlink.net/tag/fps_notes +http://www.semanlink.net/tag/fps_notes|broader_prefLabel|fps +http://www.semanlink.net/tag/neolithique|prefLabel|Néolithique +http://www.semanlink.net/tag/neolithique|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/neolithique|broader|http://www.semanlink.net/tag/prehistoire +http://www.semanlink.net/tag/neolithique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neolithique|uri|http://www.semanlink.net/tag/neolithique +http://www.semanlink.net/tag/neolithique|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/neolithique|broader_prefLabel|Préhistoire +http://www.semanlink.net/tag/neolithique|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/data_interoperability|creationTime|2014-07-25T01:43:23Z +http://www.semanlink.net/tag/data_interoperability|prefLabel|Data Interoperability +http://www.semanlink.net/tag/data_interoperability|creationDate|2014-07-25 +http://www.semanlink.net/tag/data_interoperability|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_interoperability|uri|http://www.semanlink.net/tag/data_interoperability +http://www.semanlink.net/tag/allemagne|prefLabel|Allemagne +http://www.semanlink.net/tag/allemagne|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/allemagne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/allemagne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/allemagne|altLabel|Germany +http://www.semanlink.net/tag/allemagne|altLabel|Deutschland +http://www.semanlink.net/tag/allemagne|uri|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/allemagne|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/allemagne|broader_prefLabel|Europe +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|creationTime|2013-02-01T15:50:13Z +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|prefLabel|C2GWeb, Product description and Makolab +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader|http://www.semanlink.net/tag/mirek_sopek +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader|http://www.semanlink.net/tag/c2gweb_and_product_description +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|creationDate|2013-02-01 +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|uri|http://www.semanlink.net/tag/c2gweb_product_description_and_makolab +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_prefLabel|Makolab +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_prefLabel|Product description +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_prefLabel|Mirek Sopek +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_prefLabel|C2GWeb and Product description +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_related|http://www.semanlink.net/tag/c2g +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_related|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/c2gweb_product_description_and_makolab|broader_related|http://www.semanlink.net/tag/configuration_ontology +http://www.semanlink.net/tag/good_idea|creationTime|2013-07-11T01:32:11Z +http://www.semanlink.net/tag/good_idea|prefLabel|Good idea +http://www.semanlink.net/tag/good_idea|broader|http://www.semanlink.net/tag/good +http://www.semanlink.net/tag/good_idea|creationDate|2013-07-11 +http://www.semanlink.net/tag/good_idea|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/good_idea|uri|http://www.semanlink.net/tag/good_idea +http://www.semanlink.net/tag/good_idea|broader_prefLabel|Good +http://www.semanlink.net/tag/nlp|creationTime|2008-07-19T18:25:29Z +http://www.semanlink.net/tag/nlp|prefLabel|NLP +http://www.semanlink.net/tag/nlp|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/nlp|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/nlp|broader|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/nlp|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/nlp|creationDate|2008-07-19 +http://www.semanlink.net/tag/nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp|describedBy|https://en.wikipedia.org/wiki/Natural_language_processing +http://www.semanlink.net/tag/nlp|altLabel|TALN +http://www.semanlink.net/tag/nlp|altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp|altLabel|Natural Language Processing +http://www.semanlink.net/tag/nlp|uri|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp|broader_prefLabel|Language +http://www.semanlink.net/tag/nlp|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/nlp|broader_prefLabel|Semantic technology +http://www.semanlink.net/tag/nlp|broader_prefLabel|Favoris +http://www.semanlink.net/tag/nlp|broader_altLabel|Langage +http://www.semanlink.net/tag/nlp|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/nlp|broader_altLabel|AI +http://www.semanlink.net/tag/nlp|broader_altLabel|IA +http://www.semanlink.net/tag/nlp|broader_altLabel|favorites +http://www.semanlink.net/tag/nlp|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/survey|creationTime|2018-05-30T23:47:37Z +http://www.semanlink.net/tag/survey|prefLabel|Survey / Review +http://www.semanlink.net/tag/survey|creationDate|2018-05-30 +http://www.semanlink.net/tag/survey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/survey|altLabel|Surveys +http://www.semanlink.net/tag/survey|uri|http://www.semanlink.net/tag/survey +http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie|prefLabel|Cité des sciences et de l'industrie +http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie|uri|http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie +http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie|broader_prefLabel|Musée +http://www.semanlink.net/tag/constraint_programming|creationTime|2010-09-04T14:25:27Z +http://www.semanlink.net/tag/constraint_programming|prefLabel|Constraint Programming +http://www.semanlink.net/tag/constraint_programming|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/constraint_programming|broader|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/constraint_programming|creationDate|2010-09-04 +http://www.semanlink.net/tag/constraint_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/constraint_programming|uri|http://www.semanlink.net/tag/constraint_programming +http://www.semanlink.net/tag/constraint_programming|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/constraint_programming|broader_prefLabel|Logic +http://www.semanlink.net/tag/constraint_programming|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/constraint_programming|broader_altLabel|AI +http://www.semanlink.net/tag/constraint_programming|broader_altLabel|IA +http://www.semanlink.net/tag/constraint_programming|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/nlu|creationTime|2018-10-26T00:38:21Z +http://www.semanlink.net/tag/nlu|prefLabel|NLU +http://www.semanlink.net/tag/nlu|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlu|creationDate|2018-10-26 +http://www.semanlink.net/tag/nlu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlu|altLabel|Natural Language Understanding +http://www.semanlink.net/tag/nlu|uri|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/nlu|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/decroissance|prefLabel|Décroissance +http://www.semanlink.net/tag/decroissance|broader|http://www.semanlink.net/tag/croissance +http://www.semanlink.net/tag/decroissance|broader|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.semanlink.net/tag/decroissance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decroissance|uri|http://www.semanlink.net/tag/decroissance +http://www.semanlink.net/tag/decroissance|broader_prefLabel|Croissance +http://www.semanlink.net/tag/decroissance|broader_prefLabel|Critique du libéralisme +http://www.semanlink.net/tag/browser_back_button|prefLabel|Browser : back button +http://www.semanlink.net/tag/browser_back_button|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/browser_back_button|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/browser_back_button|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/browser_back_button|uri|http://www.semanlink.net/tag/browser_back_button +http://www.semanlink.net/tag/browser_back_button|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/browser_back_button|broader_prefLabel|Web dev +http://www.semanlink.net/tag/browser_back_button|broader_altLabel|Browser +http://www.semanlink.net/tag/browser_back_button|broader_altLabel|Web app dev +http://www.semanlink.net/tag/multimedia_ld|creationTime|2012-04-16T09:30:12Z +http://www.semanlink.net/tag/multimedia_ld|prefLabel|Multimedia + LD +http://www.semanlink.net/tag/multimedia_ld|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/multimedia_ld|broader|http://www.semanlink.net/tag/multimedia +http://www.semanlink.net/tag/multimedia_ld|creationDate|2012-04-16 +http://www.semanlink.net/tag/multimedia_ld|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multimedia_ld|uri|http://www.semanlink.net/tag/multimedia_ld +http://www.semanlink.net/tag/multimedia_ld|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/multimedia_ld|broader_prefLabel|Multimedia +http://www.semanlink.net/tag/multimedia_ld|broader_altLabel|LD +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/multimedia_ld|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/proletaires2_0|creationTime|2021-04-09T19:14:15Z +http://www.semanlink.net/tag/proletaires2_0|prefLabel|Prolétaires2.0 +http://www.semanlink.net/tag/proletaires2_0|broader|http://www.semanlink.net/tag/proletaire +http://www.semanlink.net/tag/proletaires2_0|broader|http://www.semanlink.net/tag/digital_economy +http://www.semanlink.net/tag/proletaires2_0|creationDate|2021-04-09 +http://www.semanlink.net/tag/proletaires2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/proletaires2_0|uri|http://www.semanlink.net/tag/proletaires2_0 +http://www.semanlink.net/tag/proletaires2_0|broader_prefLabel|Prolétaire +http://www.semanlink.net/tag/proletaires2_0|broader_prefLabel|Digital economy +http://www.semanlink.net/tag/proletaires2_0|broader_altLabel|Economie numérique +http://www.semanlink.net/tag/contextualised_word_representations|creationTime|2018-11-27T11:35:49Z +http://www.semanlink.net/tag/contextualised_word_representations|prefLabel|Contextualized word representations +http://www.semanlink.net/tag/contextualised_word_representations|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/contextualised_word_representations|related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/contextualised_word_representations|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/contextualised_word_representations|creationDate|2018-11-27 +http://www.semanlink.net/tag/contextualised_word_representations|comment|"replacement of the vectorial representation of words with a matrix representation where each word’s representation includes information about its context + +Embedding words through a language model + +Language-model-based encoders + +> The key idea underneath is to train a contextual encoder with a language model objective on a large unannotated text corpus. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. During the training, part of the text is masked and the goal is to encode the remaining +context and predict the missing part. ([source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1902.11269)) + +" +http://www.semanlink.net/tag/contextualised_word_representations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/contextualised_word_representations|altLabel|Contextualized word embeddings +http://www.semanlink.net/tag/contextualised_word_representations|uri|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/tag/contextualised_word_representations|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/michel_servet|creationTime|2021-04-11T12:31:58Z +http://www.semanlink.net/tag/michel_servet|prefLabel|Michel Servet +http://www.semanlink.net/tag/michel_servet|broader|http://www.semanlink.net/tag/xvie_siecle +http://www.semanlink.net/tag/michel_servet|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/michel_servet|related|http://www.semanlink.net/tag/stefan_zweig +http://www.semanlink.net/tag/michel_servet|related|http://www.semanlink.net/tag/john_calvin +http://www.semanlink.net/tag/michel_servet|creationDate|2021-04-11 +http://www.semanlink.net/tag/michel_servet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/michel_servet|describedBy|https://fr.wikipedia.org/wiki/Michel_Servet +http://www.semanlink.net/tag/michel_servet|uri|http://www.semanlink.net/tag/michel_servet +http://www.semanlink.net/tag/michel_servet|broader_prefLabel|XVIe siècle +http://www.semanlink.net/tag/michel_servet|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/michel_servet|broader_related|http://www.semanlink.net/tag/renaissance +http://www.semanlink.net/tag/explainable_nlp|creationTime|2019-12-05T15:26:18Z +http://www.semanlink.net/tag/explainable_nlp|prefLabel|Explainable NLP +http://www.semanlink.net/tag/explainable_nlp|broader|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/tag/explainable_nlp|broader|http://www.semanlink.net/tag/deep_nlp +http://www.semanlink.net/tag/explainable_nlp|creationDate|2019-12-05 +http://www.semanlink.net/tag/explainable_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/explainable_nlp|uri|http://www.semanlink.net/tag/explainable_nlp +http://www.semanlink.net/tag/explainable_nlp|broader_prefLabel|Explainable AI +http://www.semanlink.net/tag/explainable_nlp|broader_prefLabel|Deep NLP +http://www.semanlink.net/tag/video_ina_fr|creationTime|2014-02-17T00:42:09Z +http://www.semanlink.net/tag/video_ina_fr|prefLabel|Vidéo Ina.fr +http://www.semanlink.net/tag/video_ina_fr|broader|http://www.semanlink.net/tag/ina +http://www.semanlink.net/tag/video_ina_fr|broader|http://www.semanlink.net/tag/video +http://www.semanlink.net/tag/video_ina_fr|creationDate|2014-02-17 +http://www.semanlink.net/tag/video_ina_fr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/video_ina_fr|uri|http://www.semanlink.net/tag/video_ina_fr +http://www.semanlink.net/tag/video_ina_fr|broader_prefLabel|INA +http://www.semanlink.net/tag/video_ina_fr|broader_prefLabel|Video +http://www.semanlink.net/tag/cosine_similarity|creationTime|2017-07-21T11:44:37Z +http://www.semanlink.net/tag/cosine_similarity|prefLabel|Cosine similarity +http://www.semanlink.net/tag/cosine_similarity|broader|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/cosine_similarity|related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/cosine_similarity|creationDate|2017-07-21 +http://www.semanlink.net/tag/cosine_similarity|comment|in cosine similarity, the number of common attributes is divided by the total number of possible attributes. Whereas in Jaccard Similarity, the number of common attributes is divided by the number of attributes that exist in at least one of the two objects. +http://www.semanlink.net/tag/cosine_similarity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cosine_similarity|describedBy|https://en.wikipedia.org/wiki/Cosine_similarity +http://www.semanlink.net/tag/cosine_similarity|uri|http://www.semanlink.net/tag/cosine_similarity +http://www.semanlink.net/tag/cosine_similarity|broader_prefLabel|Similarity queries +http://www.semanlink.net/tag/cosine_similarity|broader_altLabel|Vector similarity search +http://www.semanlink.net/tag/cosine_similarity|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/text_to_semantic_data|creationTime|2010-07-30T14:39:16Z +http://www.semanlink.net/tag/text_to_semantic_data|prefLabel|Text to semantic data +http://www.semanlink.net/tag/text_to_semantic_data|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/text_to_semantic_data|creationDate|2010-07-30 +http://www.semanlink.net/tag/text_to_semantic_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_to_semantic_data|uri|http://www.semanlink.net/tag/text_to_semantic_data +http://www.semanlink.net/tag/text_to_semantic_data|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/text_to_semantic_data|broader_altLabel|sw +http://www.semanlink.net/tag/text_to_semantic_data|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/europeana|creationTime|2009-05-05T16:21:38Z +http://www.semanlink.net/tag/europeana|prefLabel|Europeana +http://www.semanlink.net/tag/europeana|broader|http://www.semanlink.net/tag/cultural_heritage +http://www.semanlink.net/tag/europeana|broader|http://www.semanlink.net/tag/bibliotheque_numerique +http://www.semanlink.net/tag/europeana|broader|http://www.semanlink.net/tag/european_project +http://www.semanlink.net/tag/europeana|creationDate|2009-05-05 +http://www.semanlink.net/tag/europeana|comment|Paintings, music, films and books from Europe's galleries, libraries, archives and museums. +http://www.semanlink.net/tag/europeana|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/europeana|homepage|http://www.europeana.eu/portal/ +http://www.semanlink.net/tag/europeana|uri|http://www.semanlink.net/tag/europeana +http://www.semanlink.net/tag/europeana|broader_prefLabel|Cultural heritage +http://www.semanlink.net/tag/europeana|broader_prefLabel|Bibliothèque numérique +http://www.semanlink.net/tag/europeana|broader_prefLabel|European project +http://www.semanlink.net/tag/europeana|broader_related|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/os_x_unix|prefLabel|OS X Unix +http://www.semanlink.net/tag/os_x_unix|broader|http://www.semanlink.net/tag/unix +http://www.semanlink.net/tag/os_x_unix|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/os_x_unix|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/os_x_unix|uri|http://www.semanlink.net/tag/os_x_unix +http://www.semanlink.net/tag/os_x_unix|broader_prefLabel|Unix +http://www.semanlink.net/tag/os_x_unix|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/os_x_unix|broader_altLabel|OS X +http://www.semanlink.net/tag/os_x_unix|broader_altLabel|OSX +http://www.semanlink.net/tag/alphago|creationTime|2016-03-11T21:01:58Z +http://www.semanlink.net/tag/alphago|prefLabel|Alphago +http://www.semanlink.net/tag/alphago|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/alphago|broader|http://www.semanlink.net/tag/go_game +http://www.semanlink.net/tag/alphago|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/alphago|creationDate|2016-03-11 +http://www.semanlink.net/tag/alphago|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alphago|uri|http://www.semanlink.net/tag/alphago +http://www.semanlink.net/tag/alphago|broader_prefLabel|Google +http://www.semanlink.net/tag/alphago|broader_prefLabel|Go (Game) +http://www.semanlink.net/tag/alphago|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/alphago|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/alphago|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/alphago|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/ibm_developerworks|prefLabel|IBM developerWorks +http://www.semanlink.net/tag/ibm_developerworks|broader|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/ibm_developerworks|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/ibm_developerworks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ibm_developerworks|uri|http://www.semanlink.net/tag/ibm_developerworks +http://www.semanlink.net/tag/ibm_developerworks|broader_prefLabel|IBM +http://www.semanlink.net/tag/ibm_developerworks|broader_prefLabel|Dev +http://www.semanlink.net/tag/crowd_sourcing|creationTime|2011-09-20T09:00:19Z +http://www.semanlink.net/tag/crowd_sourcing|prefLabel|Crowd sourcing +http://www.semanlink.net/tag/crowd_sourcing|creationDate|2011-09-20 +http://www.semanlink.net/tag/crowd_sourcing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crowd_sourcing|altLabel|Crowdsourcing +http://www.semanlink.net/tag/crowd_sourcing|uri|http://www.semanlink.net/tag/crowd_sourcing +http://www.semanlink.net/tag/ford|creationTime|2008-03-25T11:47:41Z +http://www.semanlink.net/tag/ford|prefLabel|Ford +http://www.semanlink.net/tag/ford|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/ford|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/ford|creationDate|2008-03-25 +http://www.semanlink.net/tag/ford|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ford|uri|http://www.semanlink.net/tag/ford +http://www.semanlink.net/tag/ford|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/ford|broader_prefLabel|Automobile +http://www.semanlink.net/tag/ford|broader_altLabel|Automotive +http://www.semanlink.net/tag/natural_language_semantic_search|creationTime|2018-06-02T10:24:32Z +http://www.semanlink.net/tag/natural_language_semantic_search|prefLabel|Natural Language Semantic Search +http://www.semanlink.net/tag/natural_language_semantic_search|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/natural_language_semantic_search|broader|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/tag/natural_language_semantic_search|creationDate|2018-06-02 +http://www.semanlink.net/tag/natural_language_semantic_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/natural_language_semantic_search|uri|http://www.semanlink.net/tag/natural_language_semantic_search +http://www.semanlink.net/tag/natural_language_semantic_search|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/natural_language_semantic_search|broader_prefLabel|Semantic Search +http://www.semanlink.net/tag/natural_language_semantic_search|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/natural_language_semantic_search|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/natural_language_semantic_search|broader_related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/alexandre_passant|creationTime|2008-03-30T20:23:56Z +http://www.semanlink.net/tag/alexandre_passant|prefLabel|Alexandre Passant +http://www.semanlink.net/tag/alexandre_passant|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/alexandre_passant|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/alexandre_passant|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/alexandre_passant|creationDate|2008-03-30 +http://www.semanlink.net/tag/alexandre_passant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandre_passant|homepage|http://apassant.net/ +http://www.semanlink.net/tag/alexandre_passant|uri|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/alexandre_passant|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/alexandre_passant|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/alexandre_passant|broader_altLabel|Technical guys +http://www.semanlink.net/tag/common_web_language|prefLabel|Common Web Language +http://www.semanlink.net/tag/common_web_language|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/common_web_language|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/common_web_language|creationDate|2006-12-01 +http://www.semanlink.net/tag/common_web_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/common_web_language|altLabel|CWL +http://www.semanlink.net/tag/common_web_language|uri|http://www.semanlink.net/tag/common_web_language +http://www.semanlink.net/tag/common_web_language|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/common_web_language|broader_prefLabel|W3C +http://www.semanlink.net/tag/common_web_language|broader_altLabel|sw +http://www.semanlink.net/tag/common_web_language|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/common_web_language|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/common_web_language|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/organizer|creationTime|2012-01-17T01:09:38Z +http://www.semanlink.net/tag/organizer|prefLabel|Organizer +http://www.semanlink.net/tag/organizer|creationDate|2012-01-17 +http://www.semanlink.net/tag/organizer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/organizer|uri|http://www.semanlink.net/tag/organizer +http://www.semanlink.net/tag/phrase_mining|creationTime|2020-12-14T19:11:07Z +http://www.semanlink.net/tag/phrase_mining|prefLabel|Phrase mining +http://www.semanlink.net/tag/phrase_mining|broader|http://www.semanlink.net/tag/phrases_nlp +http://www.semanlink.net/tag/phrase_mining|creationDate|2020-12-14 +http://www.semanlink.net/tag/phrase_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phrase_mining|uri|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/phrase_mining|broader_prefLabel|Phrases (NLP) +http://www.semanlink.net/tag/phrase_mining|broader_related|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/seyni_kountche|creationTime|2009-01-14T22:50:14Z +http://www.semanlink.net/tag/seyni_kountche|prefLabel|Seyni Kountché +http://www.semanlink.net/tag/seyni_kountche|broader|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/seyni_kountche|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/seyni_kountche|creationDate|2009-01-14 +http://www.semanlink.net/tag/seyni_kountche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seyni_kountche|uri|http://www.semanlink.net/tag/seyni_kountche +http://www.semanlink.net/tag/seyni_kountche|broader_prefLabel|Histoire du Niger +http://www.semanlink.net/tag/seyni_kountche|broader_prefLabel|Niger +http://www.semanlink.net/tag/seyni_kountche|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/seyni_kountche|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/seyni_kountche|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/prohibition|creationTime|2008-12-10T15:01:21Z +http://www.semanlink.net/tag/prohibition|prefLabel|Prohibition +http://www.semanlink.net/tag/prohibition|creationDate|2008-12-10 +http://www.semanlink.net/tag/prohibition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prohibition|uri|http://www.semanlink.net/tag/prohibition +http://www.semanlink.net/tag/label_embedding|creationTime|2020-02-18T15:00:36Z +http://www.semanlink.net/tag/label_embedding|prefLabel|Label Embedding +http://www.semanlink.net/tag/label_embedding|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/label_embedding|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/label_embedding|related|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/tag/label_embedding|creationDate|2020-02-18 +http://www.semanlink.net/tag/label_embedding|comment|"How to embed (describe) classes (in classification)? See related work section of this [paper](doc:2020/02/joint_embedding_of_words_and_la) + +> [FastText](tag:fasttext) generates both word +embeddings and label embeddings. It seeks to predict one of the document’s labels (instead of the central +word) ([src](doc:2020/10/1911_11506_word_class_embeddi))" +http://www.semanlink.net/tag/label_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/label_embedding|uri|http://www.semanlink.net/tag/label_embedding +http://www.semanlink.net/tag/label_embedding|broader_prefLabel|Classification +http://www.semanlink.net/tag/label_embedding|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/label_embedding|broader_altLabel|embedding +http://www.semanlink.net/tag/label_embedding|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/label_embedding|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/hashtag|creationTime|2017-05-26T16:47:54Z +http://www.semanlink.net/tag/hashtag|prefLabel|Hashtag +http://www.semanlink.net/tag/hashtag|creationDate|2017-05-26 +http://www.semanlink.net/tag/hashtag|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hashtag|uri|http://www.semanlink.net/tag/hashtag +http://www.semanlink.net/tag/molecular_clock|creationTime|2018-02-22T00:33:44Z +http://www.semanlink.net/tag/molecular_clock|prefLabel|Molecular clock +http://www.semanlink.net/tag/molecular_clock|creationDate|2018-02-22 +http://www.semanlink.net/tag/molecular_clock|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/molecular_clock|describedBy|https://en.wikipedia.org/wiki/Molecular_clock +http://www.semanlink.net/tag/molecular_clock|uri|http://www.semanlink.net/tag/molecular_clock +http://www.semanlink.net/tag/labeled_data|creationTime|2019-08-07T00:51:53Z +http://www.semanlink.net/tag/labeled_data|prefLabel|Labeled Data +http://www.semanlink.net/tag/labeled_data|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/labeled_data|broader|http://www.semanlink.net/tag/training_data +http://www.semanlink.net/tag/labeled_data|creationDate|2019-08-07 +http://www.semanlink.net/tag/labeled_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/labeled_data|uri|http://www.semanlink.net/tag/labeled_data +http://www.semanlink.net/tag/labeled_data|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/labeled_data|broader_prefLabel|Training data +http://www.semanlink.net/tag/social_democracy|creationTime|2015-09-20T10:26:19Z +http://www.semanlink.net/tag/social_democracy|prefLabel|Social democracy +http://www.semanlink.net/tag/social_democracy|creationDate|2015-09-20 +http://www.semanlink.net/tag/social_democracy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_democracy|uri|http://www.semanlink.net/tag/social_democracy +http://www.semanlink.net/tag/mycarevent|creationTime|2007-10-05T21:16:33Z +http://www.semanlink.net/tag/mycarevent|prefLabel|MyCarEvent +http://www.semanlink.net/tag/mycarevent|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/mycarevent|broader|http://www.semanlink.net/tag/reparation_automobile +http://www.semanlink.net/tag/mycarevent|creationDate|2007-10-05 +http://www.semanlink.net/tag/mycarevent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mycarevent|uri|http://www.semanlink.net/tag/mycarevent +http://www.semanlink.net/tag/mycarevent|broader_prefLabel|OWL +http://www.semanlink.net/tag/mycarevent|broader_prefLabel|Réparation automobile +http://www.semanlink.net/tag/mycarevent|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/roosevelt|creationTime|2008-08-26T22:53:00Z +http://www.semanlink.net/tag/roosevelt|prefLabel|Roosevelt +http://www.semanlink.net/tag/roosevelt|broader|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/roosevelt|creationDate|2008-08-26 +http://www.semanlink.net/tag/roosevelt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/roosevelt|uri|http://www.semanlink.net/tag/roosevelt +http://www.semanlink.net/tag/roosevelt|broader_prefLabel|Président des USA +http://www.semanlink.net/tag/infringing_material|prefLabel|Infringing material +http://www.semanlink.net/tag/infringing_material|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/infringing_material|uri|http://www.semanlink.net/tag/infringing_material +http://www.semanlink.net/tag/neurala_lifelong_dnn|creationTime|2020-01-01T12:08:22Z +http://www.semanlink.net/tag/neurala_lifelong_dnn|prefLabel|Neurala: Lifelong-DNN +http://www.semanlink.net/tag/neurala_lifelong_dnn|broader|http://www.semanlink.net/tag/continual_learning +http://www.semanlink.net/tag/neurala_lifelong_dnn|related|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/tag/neurala_lifelong_dnn|creationDate|2020-01-01 +http://www.semanlink.net/tag/neurala_lifelong_dnn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neurala_lifelong_dnn|uri|http://www.semanlink.net/tag/neurala_lifelong_dnn +http://www.semanlink.net/tag/neurala_lifelong_dnn|broader_prefLabel|Continual Learning +http://www.semanlink.net/tag/neurala_lifelong_dnn|broader_altLabel|Online Machine Learning +http://www.semanlink.net/tag/neurala_lifelong_dnn|broader_altLabel|Incremental learning +http://www.semanlink.net/tag/neurala_lifelong_dnn|broader_altLabel|Lifelong learning +http://www.semanlink.net/tag/gouvernement_chirac|prefLabel|Gouvernement Chirac +http://www.semanlink.net/tag/gouvernement_chirac|broader|http://www.semanlink.net/tag/chirac +http://www.semanlink.net/tag/gouvernement_chirac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gouvernement_chirac|uri|http://www.semanlink.net/tag/gouvernement_chirac +http://www.semanlink.net/tag/gouvernement_chirac|broader_prefLabel|Chirac +http://www.semanlink.net/tag/greenpeace|prefLabel|Greenpeace +http://www.semanlink.net/tag/greenpeace|broader|http://www.semanlink.net/tag/ong +http://www.semanlink.net/tag/greenpeace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greenpeace|uri|http://www.semanlink.net/tag/greenpeace +http://www.semanlink.net/tag/greenpeace|broader_prefLabel|ONG +http://www.semanlink.net/tag/webcomponents|creationTime|2014-11-23T12:42:09Z +http://www.semanlink.net/tag/webcomponents|prefLabel|Web Components +http://www.semanlink.net/tag/webcomponents|related|http://www.semanlink.net/tag/html5 +http://www.semanlink.net/tag/webcomponents|creationDate|2014-11-23 +http://www.semanlink.net/tag/webcomponents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/webcomponents|describedBy|https://en.wikipedia.org/wiki/Web_Components +http://www.semanlink.net/tag/webcomponents|altLabel|WebComponents +http://www.semanlink.net/tag/webcomponents|uri|http://www.semanlink.net/tag/webcomponents +http://www.semanlink.net/tag/lip6|creationTime|2018-01-23T14:43:20Z +http://www.semanlink.net/tag/lip6|prefLabel|LIP6 +http://www.semanlink.net/tag/lip6|related|http://www.semanlink.net/tag/patrick_gallinari +http://www.semanlink.net/tag/lip6|creationDate|2018-01-23 +http://www.semanlink.net/tag/lip6|comment|Laboratoire d'Informatique de Paris 6 +http://www.semanlink.net/tag/lip6|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lip6|homepage|https://www.lip6.fr +http://www.semanlink.net/tag/lip6|uri|http://www.semanlink.net/tag/lip6 +http://www.semanlink.net/tag/biomedical_nlp|creationTime|2021-10-21T14:28:04Z +http://www.semanlink.net/tag/biomedical_nlp|prefLabel|Biomedical NLP +http://www.semanlink.net/tag/biomedical_nlp|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/biomedical_nlp|related|http://www.semanlink.net/tag/biomedical_data +http://www.semanlink.net/tag/biomedical_nlp|creationDate|2021-10-21 +http://www.semanlink.net/tag/biomedical_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biomedical_nlp|uri|http://www.semanlink.net/tag/biomedical_nlp +http://www.semanlink.net/tag/biomedical_nlp|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/biomedical_nlp|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/captcha|creationTime|2013-05-15T15:56:02Z +http://www.semanlink.net/tag/captcha|prefLabel|Captcha +http://www.semanlink.net/tag/captcha|related|http://www.semanlink.net/tag/crowd_sourcing +http://www.semanlink.net/tag/captcha|related|http://www.semanlink.net/tag/luis_von_ahn +http://www.semanlink.net/tag/captcha|creationDate|2013-05-15 +http://www.semanlink.net/tag/captcha|comment|"""A program that can generate and grade tests that most human can pass and current computers cannot pass"" (Luis von Ahn) +""Running a computation in people's brains instead of silicon processors"" +" +http://www.semanlink.net/tag/captcha|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/captcha|uri|http://www.semanlink.net/tag/captcha +http://www.semanlink.net/tag/gueant|creationTime|2012-01-15T13:00:37Z +http://www.semanlink.net/tag/gueant|prefLabel|Guéant +http://www.semanlink.net/tag/gueant|broader|http://www.semanlink.net/tag/gouvernement_sarkozy +http://www.semanlink.net/tag/gueant|broader|http://www.semanlink.net/tag/sarkozy_immigration +http://www.semanlink.net/tag/gueant|creationDate|2012-01-15 +http://www.semanlink.net/tag/gueant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gueant|uri|http://www.semanlink.net/tag/gueant +http://www.semanlink.net/tag/gueant|broader_prefLabel|Gouvernement Sarkozy +http://www.semanlink.net/tag/gueant|broader_prefLabel|Sarkozy : immigration +http://www.semanlink.net/tag/world_bank|creationTime|2010-11-16T17:28:12Z +http://www.semanlink.net/tag/world_bank|prefLabel|World Bank +http://www.semanlink.net/tag/world_bank|creationDate|2010-11-16 +http://www.semanlink.net/tag/world_bank|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/world_bank|uri|http://www.semanlink.net/tag/world_bank +http://www.semanlink.net/tag/ner_unseen_mentions|creationTime|2020-10-01T11:44:09Z +http://www.semanlink.net/tag/ner_unseen_mentions|prefLabel|NER: unseen mentions +http://www.semanlink.net/tag/ner_unseen_mentions|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/ner_unseen_mentions|related|http://www.semanlink.net/tag/zero_shot_entity_linking +http://www.semanlink.net/tag/ner_unseen_mentions|creationDate|2020-10-01 +http://www.semanlink.net/tag/ner_unseen_mentions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ner_unseen_mentions|uri|http://www.semanlink.net/tag/ner_unseen_mentions +http://www.semanlink.net/tag/ner_unseen_mentions|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/ner_unseen_mentions|broader_altLabel|NER +http://www.semanlink.net/tag/obama|creationTime|2008-10-28T22:39:42Z +http://www.semanlink.net/tag/obama|prefLabel|Obama +http://www.semanlink.net/tag/obama|broader|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/obama|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/obama|creationDate|2008-10-28 +http://www.semanlink.net/tag/obama|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obama|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/obama|sameAs|http://dbpedia.org/resource/Barack_Obama +http://www.semanlink.net/tag/obama|uri|http://www.semanlink.net/tag/obama +http://www.semanlink.net/tag/obama|broader_prefLabel|Président des USA +http://www.semanlink.net/tag/obama|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/anzo|creationTime|2008-06-05T23:21:22Z +http://www.semanlink.net/tag/anzo|prefLabel|Anzo +http://www.semanlink.net/tag/anzo|broader|http://www.semanlink.net/tag/semantic_web_platform +http://www.semanlink.net/tag/anzo|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/anzo|related|http://www.semanlink.net/tag/lee_feigenbaum +http://www.semanlink.net/tag/anzo|creationDate|2008-06-05 +http://www.semanlink.net/tag/anzo|comment|Anzo is an open source enterprise-featured RDF store and middleware platform that provides support for multiple users, distributed clients, offline work, real-time notification, named-graph modularization, versioning, access controls, and transactions with preconditions. Java developers will discover a host of features we believe are necessary for the creation of sophisticated Semantic technology based applications. +http://www.semanlink.net/tag/anzo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anzo|describedBy|http://www.openanzo.org/ +http://www.semanlink.net/tag/anzo|uri|http://www.semanlink.net/tag/anzo +http://www.semanlink.net/tag/anzo|broader_prefLabel|Semantic Web Platform +http://www.semanlink.net/tag/anzo|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/anzo|broader_altLabel|RDF database +http://www.semanlink.net/tag/paul_krugman|creationTime|2010-01-13T21:08:30Z +http://www.semanlink.net/tag/paul_krugman|prefLabel|Paul Krugman +http://www.semanlink.net/tag/paul_krugman|broader|http://www.semanlink.net/tag/prix_nobel_d_economie +http://www.semanlink.net/tag/paul_krugman|broader|http://www.semanlink.net/tag/economiste +http://www.semanlink.net/tag/paul_krugman|creationDate|2010-01-13 +http://www.semanlink.net/tag/paul_krugman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paul_krugman|uri|http://www.semanlink.net/tag/paul_krugman +http://www.semanlink.net/tag/paul_krugman|broader_prefLabel|Prix Nobel d'économie +http://www.semanlink.net/tag/paul_krugman|broader_prefLabel|Economiste +http://www.semanlink.net/tag/validation_xml_vs_rdf|creationTime|2008-01-25T15:50:46Z +http://www.semanlink.net/tag/validation_xml_vs_rdf|prefLabel|Validation: XML vs RDF +http://www.semanlink.net/tag/validation_xml_vs_rdf|broader|http://www.semanlink.net/tag/rdf_vs_xml +http://www.semanlink.net/tag/validation_xml_vs_rdf|broader|http://www.semanlink.net/tag/validation +http://www.semanlink.net/tag/validation_xml_vs_rdf|creationDate|2008-01-25 +http://www.semanlink.net/tag/validation_xml_vs_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/validation_xml_vs_rdf|uri|http://www.semanlink.net/tag/validation_xml_vs_rdf +http://www.semanlink.net/tag/validation_xml_vs_rdf|broader_prefLabel|RDF vs XML +http://www.semanlink.net/tag/validation_xml_vs_rdf|broader_prefLabel|Validator +http://www.semanlink.net/tag/ubuntu|creationTime|2014-02-18T00:04:31Z +http://www.semanlink.net/tag/ubuntu|prefLabel|Ubuntu +http://www.semanlink.net/tag/ubuntu|broader|http://www.semanlink.net/tag/linux +http://www.semanlink.net/tag/ubuntu|creationDate|2014-02-18 +http://www.semanlink.net/tag/ubuntu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ubuntu|uri|http://www.semanlink.net/tag/ubuntu +http://www.semanlink.net/tag/ubuntu|broader_prefLabel|Linux +http://www.semanlink.net/tag/economie_de_la_gratuite|creationTime|2015-01-08T15:56:11Z +http://www.semanlink.net/tag/economie_de_la_gratuite|prefLabel|Economie de la gratuité +http://www.semanlink.net/tag/economie_de_la_gratuite|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/economie_de_la_gratuite|creationDate|2015-01-08 +http://www.semanlink.net/tag/economie_de_la_gratuite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economie_de_la_gratuite|uri|http://www.semanlink.net/tag/economie_de_la_gratuite +http://www.semanlink.net/tag/economie_de_la_gratuite|broader_prefLabel|Economie +http://www.semanlink.net/tag/economie_de_la_gratuite|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/ukraine|prefLabel|Ukraine +http://www.semanlink.net/tag/ukraine|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/ukraine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ukraine|uri|http://www.semanlink.net/tag/ukraine +http://www.semanlink.net/tag/ukraine|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/ukraine|broader_prefLabel|URSS +http://www.semanlink.net/tag/sindice|creationTime|2007-06-13T23:42:24Z +http://www.semanlink.net/tag/sindice|prefLabel|sindice +http://www.semanlink.net/tag/sindice|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/sindice|broader|http://www.semanlink.net/tag/semantic_web_search_engine +http://www.semanlink.net/tag/sindice|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/sindice|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/sindice|related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/sindice|creationDate|2007-06-13 +http://www.semanlink.net/tag/sindice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sindice|uri|http://www.semanlink.net/tag/sindice +http://www.semanlink.net/tag/sindice|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/sindice|broader_prefLabel|Semantic Web search engine +http://www.semanlink.net/tag/sindice|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/sindice|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/sindice|broader_altLabel|RDF search engine +http://www.semanlink.net/tag/sindice|broader_altLabel|LD +http://www.semanlink.net/tag/sindice|broader_altLabel|LOD +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/sindice|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/kigali|creationTime|2017-08-06T10:47:38Z +http://www.semanlink.net/tag/kigali|prefLabel|Kigali +http://www.semanlink.net/tag/kigali|creationDate|2017-08-06 +http://www.semanlink.net/tag/kigali|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kigali|describedBy|https://en.wikipedia.org/wiki/Kigali +http://www.semanlink.net/tag/kigali|uri|http://www.semanlink.net/tag/kigali +http://www.semanlink.net/tag/openstreetmap|creationTime|2013-03-20T15:07:51Z +http://www.semanlink.net/tag/openstreetmap|prefLabel|OpenStreetMap +http://www.semanlink.net/tag/openstreetmap|related|http://www.semanlink.net/tag/destination_prediction +http://www.semanlink.net/tag/openstreetmap|creationDate|2013-03-20 +http://www.semanlink.net/tag/openstreetmap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openstreetmap|uri|http://www.semanlink.net/tag/openstreetmap +http://www.semanlink.net/tag/vocamp|creationTime|2010-05-10T09:23:54Z +http://www.semanlink.net/tag/vocamp|prefLabel|VoCamp +http://www.semanlink.net/tag/vocamp|broader|http://www.semanlink.net/tag/barcamp +http://www.semanlink.net/tag/vocamp|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/vocamp|creationDate|2010-05-10 +http://www.semanlink.net/tag/vocamp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vocamp|describedBy|http://vocamp.org/wiki/Main_Page +http://www.semanlink.net/tag/vocamp|uri|http://www.semanlink.net/tag/vocamp +http://www.semanlink.net/tag/vocamp|broader_prefLabel|Barcamp +http://www.semanlink.net/tag/vocamp|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/vocamp|broader_altLabel|sw +http://www.semanlink.net/tag/vocamp|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/bob_ducharme|creationTime|2008-04-07T16:58:12Z +http://www.semanlink.net/tag/bob_ducharme|prefLabel|Bob DuCharme +http://www.semanlink.net/tag/bob_ducharme|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bob_ducharme|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bob_ducharme|creationDate|2008-04-07 +http://www.semanlink.net/tag/bob_ducharme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bob_ducharme|altLabel| bobdc.blog +http://www.semanlink.net/tag/bob_ducharme|uri|http://www.semanlink.net/tag/bob_ducharme +http://www.semanlink.net/tag/bob_ducharme|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bob_ducharme|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/bob_ducharme|broader_altLabel|Technical guys +http://www.semanlink.net/tag/electricite|creationTime|2012-12-10T20:03:10Z +http://www.semanlink.net/tag/electricite|prefLabel|Electricité +http://www.semanlink.net/tag/electricite|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/electricite|creationDate|2012-12-10 +http://www.semanlink.net/tag/electricite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/electricite|uri|http://www.semanlink.net/tag/electricite +http://www.semanlink.net/tag/electricite|broader_prefLabel|Energie +http://www.semanlink.net/tag/neo_fascites|creationTime|2018-10-03T23:43:44Z +http://www.semanlink.net/tag/neo_fascites|prefLabel|Neo-fascites +http://www.semanlink.net/tag/neo_fascites|broader|http://www.semanlink.net/tag/fascisme +http://www.semanlink.net/tag/neo_fascites|creationDate|2018-10-03 +http://www.semanlink.net/tag/neo_fascites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neo_fascites|uri|http://www.semanlink.net/tag/neo_fascites +http://www.semanlink.net/tag/neo_fascites|broader_prefLabel|Fascisme +http://www.semanlink.net/tag/automotive_ontology_working_group|creationTime|2014-07-24T15:07:15Z +http://www.semanlink.net/tag/automotive_ontology_working_group|prefLabel|Automotive Ontology Working Group +http://www.semanlink.net/tag/automotive_ontology_working_group|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/automotive_ontology_working_group|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/automotive_ontology_working_group|related|http://www.semanlink.net/tag/mirek_sopek +http://www.semanlink.net/tag/automotive_ontology_working_group|related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/automotive_ontology_working_group|creationDate|2014-07-24 +http://www.semanlink.net/tag/automotive_ontology_working_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automotive_ontology_working_group|uri|http://www.semanlink.net/tag/automotive_ontology_working_group +http://www.semanlink.net/tag/automotive_ontology_working_group|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/automotive_ontology_working_group|broader_prefLabel|Automobile +http://www.semanlink.net/tag/automotive_ontology_working_group|broader_altLabel|Ontology +http://www.semanlink.net/tag/automotive_ontology_working_group|broader_altLabel|Automotive +http://www.semanlink.net/tag/cocoon|prefLabel|Cocoon +http://www.semanlink.net/tag/cocoon|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/cocoon|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/cocoon|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/cocoon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cocoon|uri|http://www.semanlink.net/tag/cocoon +http://www.semanlink.net/tag/cocoon|broader_prefLabel|Java +http://www.semanlink.net/tag/cocoon|broader_prefLabel|Dev +http://www.semanlink.net/tag/cocoon|broader_prefLabel|XML +http://www.semanlink.net/tag/tombe_d_amphipolis|creationTime|2014-10-18T09:30:24Z +http://www.semanlink.net/tag/tombe_d_amphipolis|prefLabel|Tombe d'amphipolis +http://www.semanlink.net/tag/tombe_d_amphipolis|broader|http://www.semanlink.net/tag/alexandre_le_grand +http://www.semanlink.net/tag/tombe_d_amphipolis|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/tombe_d_amphipolis|broader|http://www.semanlink.net/tag/decouverte_archeologique +http://www.semanlink.net/tag/tombe_d_amphipolis|creationDate|2014-10-18 +http://www.semanlink.net/tag/tombe_d_amphipolis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tombe_d_amphipolis|uri|http://www.semanlink.net/tag/tombe_d_amphipolis +http://www.semanlink.net/tag/tombe_d_amphipolis|broader_prefLabel|Alexandre le Grand +http://www.semanlink.net/tag/tombe_d_amphipolis|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/tombe_d_amphipolis|broader_prefLabel|Découverte archéologique +http://www.semanlink.net/tag/elliotte_rusty_harold|creationTime|2012-07-12T12:21:07Z +http://www.semanlink.net/tag/elliotte_rusty_harold|prefLabel|Elliotte Rusty Harold +http://www.semanlink.net/tag/elliotte_rusty_harold|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/elliotte_rusty_harold|creationDate|2012-07-12 +http://www.semanlink.net/tag/elliotte_rusty_harold|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elliotte_rusty_harold|uri|http://www.semanlink.net/tag/elliotte_rusty_harold +http://www.semanlink.net/tag/elliotte_rusty_harold|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/elliotte_rusty_harold|broader_altLabel|Technical guys +http://www.semanlink.net/tag/porc|prefLabel|Porc +http://www.semanlink.net/tag/porc|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/porc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/porc|uri|http://www.semanlink.net/tag/porc +http://www.semanlink.net/tag/porc|broader_prefLabel|Animal +http://www.semanlink.net/tag/nlp_using_knowledge|creationTime|2019-06-28T00:35:14Z +http://www.semanlink.net/tag/nlp_using_knowledge|prefLabel|NLP: using Knowledge +http://www.semanlink.net/tag/nlp_using_knowledge|broader|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/nlp_using_knowledge|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nlp_using_knowledge|creationDate|2019-06-28 +http://www.semanlink.net/tag/nlp_using_knowledge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_using_knowledge|uri|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/nlp_using_knowledge|broader_prefLabel|Knowledge +http://www.semanlink.net/tag/nlp_using_knowledge|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/agriculture_industrielle|creationTime|2007-09-18T22:04:39Z +http://www.semanlink.net/tag/agriculture_industrielle|prefLabel|Agriculture industrielle +http://www.semanlink.net/tag/agriculture_industrielle|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/agriculture_industrielle|creationDate|2007-09-18 +http://www.semanlink.net/tag/agriculture_industrielle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agriculture_industrielle|uri|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/tag/agriculture_industrielle|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/opml|creationTime|2007-07-17T23:13:52Z +http://www.semanlink.net/tag/opml|prefLabel|OPML +http://www.semanlink.net/tag/opml|broader|http://www.semanlink.net/tag/dave_winer +http://www.semanlink.net/tag/opml|broader|http://www.semanlink.net/tag/outliner +http://www.semanlink.net/tag/opml|creationDate|2007-07-17 +http://www.semanlink.net/tag/opml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/opml|uri|http://www.semanlink.net/tag/opml +http://www.semanlink.net/tag/opml|broader_prefLabel|Dave Winer +http://www.semanlink.net/tag/opml|broader_prefLabel|Outliner +http://www.semanlink.net/tag/douglas_rushkoff|creationTime|2016-03-27T12:03:39Z +http://www.semanlink.net/tag/douglas_rushkoff|prefLabel|Douglas Rushkoff +http://www.semanlink.net/tag/douglas_rushkoff|creationDate|2016-03-27 +http://www.semanlink.net/tag/douglas_rushkoff|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/douglas_rushkoff|describedBy|https://en.wikipedia.org/wiki/Douglas_Rushkoff +http://www.semanlink.net/tag/douglas_rushkoff|uri|http://www.semanlink.net/tag/douglas_rushkoff +http://www.semanlink.net/tag/hash_bang_uris|creationTime|2011-08-11T11:29:47Z +http://www.semanlink.net/tag/hash_bang_uris|prefLabel|Hash-bang URIs +http://www.semanlink.net/tag/hash_bang_uris|broader|http://www.semanlink.net/tag/hash_uris +http://www.semanlink.net/tag/hash_bang_uris|creationDate|2011-08-11 +http://www.semanlink.net/tag/hash_bang_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hash_bang_uris|uri|http://www.semanlink.net/tag/hash_bang_uris +http://www.semanlink.net/tag/hash_bang_uris|broader_prefLabel|Hash URIs +http://www.semanlink.net/tag/romancier|prefLabel|Romancier +http://www.semanlink.net/tag/romancier|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/romancier|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/romancier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/romancier|uri|http://www.semanlink.net/tag/romancier +http://www.semanlink.net/tag/romancier|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/romancier|broader_prefLabel|Littérature +http://www.semanlink.net/tag/romancier|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/niger_petrole|creationTime|2008-06-20T23:53:32Z +http://www.semanlink.net/tag/niger_petrole|prefLabel|Niger : pétrole +http://www.semanlink.net/tag/niger_petrole|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/niger_petrole|broader|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/niger_petrole|creationDate|2008-06-20 +http://www.semanlink.net/tag/niger_petrole|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niger_petrole|uri|http://www.semanlink.net/tag/niger_petrole +http://www.semanlink.net/tag/niger_petrole|broader_prefLabel|Niger +http://www.semanlink.net/tag/niger_petrole|broader_prefLabel|Pétrole +http://www.semanlink.net/tag/niger_petrole|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niger_petrole|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/niger_petrole|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/fungal_infections|creationTime|2016-08-28T15:56:53Z +http://www.semanlink.net/tag/fungal_infections|prefLabel|Fungal infections +http://www.semanlink.net/tag/fungal_infections|broader|http://www.semanlink.net/tag/champignon +http://www.semanlink.net/tag/fungal_infections|broader|http://www.semanlink.net/tag/problemes_sanitaires +http://www.semanlink.net/tag/fungal_infections|creationDate|2016-08-28 +http://www.semanlink.net/tag/fungal_infections|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fungal_infections|uri|http://www.semanlink.net/tag/fungal_infections +http://www.semanlink.net/tag/fungal_infections|broader_prefLabel|Champignons +http://www.semanlink.net/tag/fungal_infections|broader_prefLabel|Problèmes sanitaires +http://www.semanlink.net/tag/rdf_service|creationTime|2007-04-25T15:39:40Z +http://www.semanlink.net/tag/rdf_service|prefLabel|RDF Service +http://www.semanlink.net/tag/rdf_service|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_service|creationDate|2007-04-25 +http://www.semanlink.net/tag/rdf_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_service|uri|http://www.semanlink.net/tag/rdf_service +http://www.semanlink.net/tag/rdf_service|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_service|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_service|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_service|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_service|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_service|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/sentiment|creationTime|2011-05-29T20:44:45Z +http://www.semanlink.net/tag/sentiment|prefLabel|Sentiment +http://www.semanlink.net/tag/sentiment|creationDate|2011-05-29 +http://www.semanlink.net/tag/sentiment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sentiment|uri|http://www.semanlink.net/tag/sentiment +http://www.semanlink.net/tag/cognitive_search|creationTime|2020-12-19T11:26:57Z +http://www.semanlink.net/tag/cognitive_search|prefLabel|Cognitive Search +http://www.semanlink.net/tag/cognitive_search|broader|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/cognitive_search|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/cognitive_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/cognitive_search|broader|http://www.semanlink.net/tag/cognitive_computing +http://www.semanlink.net/tag/cognitive_search|related|http://www.semanlink.net/tag/stardog +http://www.semanlink.net/tag/cognitive_search|related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/cognitive_search|related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/cognitive_search|creationDate|2020-12-19 +http://www.semanlink.net/tag/cognitive_search|comment|"(enterprise) search platform that uses AI technologies (NLP, ML, [Knowledge Graphs](tag:knowledge_graph)) to ingest, organize and query content from multiple sources. + +- NLP pipeline to extract, augment and index data + +- solutions: + - IBM Watson + - [Sinequa](https://www.sinequa.com) (insight platform) + - Microsoft azure + - [Goldfire](tag:goldfire) + - [Enterprise Knowledge Graph Platforms](tag:enterprise_knowledge_graph_platform) (?) + +Notes + +- text search - cf apache lucene +- NLP pipeline + - connectors to data sources + - ""reading"" documents (first, understanding their structure) + - embeddings (ML requires numeric representations) + - Entity recognition/linking, relation extraction + - Knowledge Graph +- Knowledge + - general / domain related ; extern / intern ; formal or not + - text / document embeddings +- Database + - elasticsearch, graph DB (neo4j) + - cloud + - and all questions related to DB management + - access control" +http://www.semanlink.net/tag/cognitive_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cognitive_search|uri|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/cognitive_search|broader_prefLabel|AI + Knowledge +http://www.semanlink.net/tag/cognitive_search|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/cognitive_search|broader_prefLabel|Search +http://www.semanlink.net/tag/cognitive_search|broader_prefLabel|Cognitive computing +http://www.semanlink.net/tag/cognitive_search|broader_altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/cognitive_search|broader_altLabel|IR +http://www.semanlink.net/tag/cognitive_search|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/cognitive_search|broader_related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/agriculture_biologique|creationTime|2014-02-23T13:16:30Z +http://www.semanlink.net/tag/agriculture_biologique|prefLabel|Agriculture biologique +http://www.semanlink.net/tag/agriculture_biologique|creationDate|2014-02-23 +http://www.semanlink.net/tag/agriculture_biologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agriculture_biologique|uri|http://www.semanlink.net/tag/agriculture_biologique +http://www.semanlink.net/tag/panama_papers|creationTime|2016-04-04T12:41:59Z +http://www.semanlink.net/tag/panama_papers|prefLabel|Panama papers +http://www.semanlink.net/tag/panama_papers|broader|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/tag/panama_papers|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/panama_papers|creationDate|2016-04-04 +http://www.semanlink.net/tag/panama_papers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/panama_papers|uri|http://www.semanlink.net/tag/panama_papers +http://www.semanlink.net/tag/panama_papers|broader_prefLabel|Paradis fiscaux +http://www.semanlink.net/tag/panama_papers|broader_prefLabel|Leaks +http://www.semanlink.net/tag/panama_papers|broader_altLabel|Tax Haven +http://www.semanlink.net/tag/panama_papers|broader_altLabel|Paradis fiscal +http://www.semanlink.net/tag/panama_papers|broader_related|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/panama_papers|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/banksy|creationTime|2014-03-03T23:00:54Z +http://www.semanlink.net/tag/banksy|prefLabel|Banksy +http://www.semanlink.net/tag/banksy|broader|http://www.semanlink.net/tag/peintre +http://www.semanlink.net/tag/banksy|broader|http://www.semanlink.net/tag/street_art +http://www.semanlink.net/tag/banksy|creationDate|2014-03-03 +http://www.semanlink.net/tag/banksy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banksy|describedBy|https://en.wikipedia.org/wiki/Banksy +http://www.semanlink.net/tag/banksy|uri|http://www.semanlink.net/tag/banksy +http://www.semanlink.net/tag/banksy|broader_prefLabel|Peintre +http://www.semanlink.net/tag/banksy|broader_prefLabel|Street art +http://www.semanlink.net/tag/sphere_packing|creationTime|2016-04-03T13:42:32Z +http://www.semanlink.net/tag/sphere_packing|prefLabel|Sphere packing +http://www.semanlink.net/tag/sphere_packing|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/sphere_packing|creationDate|2016-04-03 +http://www.semanlink.net/tag/sphere_packing|comment|a sphere packing is an arrangement of non-overlapping spheres within a containing space +http://www.semanlink.net/tag/sphere_packing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sphere_packing|describedBy|https://en.wikipedia.org/wiki/Sphere_packing +http://www.semanlink.net/tag/sphere_packing|uri|http://www.semanlink.net/tag/sphere_packing +http://www.semanlink.net/tag/sphere_packing|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/african_land_grab|creationTime|2014-07-22T18:26:47Z +http://www.semanlink.net/tag/african_land_grab|prefLabel|African land grab +http://www.semanlink.net/tag/african_land_grab|broader|http://www.semanlink.net/tag/accaparement_des_terres_agricoles +http://www.semanlink.net/tag/african_land_grab|broader|http://www.semanlink.net/tag/terres_agricoles +http://www.semanlink.net/tag/african_land_grab|broader|http://www.semanlink.net/tag/agriculture_africaine +http://www.semanlink.net/tag/african_land_grab|creationDate|2014-07-22 +http://www.semanlink.net/tag/african_land_grab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/african_land_grab|uri|http://www.semanlink.net/tag/african_land_grab +http://www.semanlink.net/tag/african_land_grab|broader_prefLabel|Accaparement des terres agricoles +http://www.semanlink.net/tag/african_land_grab|broader_prefLabel|Terres agricoles +http://www.semanlink.net/tag/african_land_grab|broader_prefLabel|Agriculture africaine +http://www.semanlink.net/tag/virus|prefLabel|Virus +http://www.semanlink.net/tag/virus|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/virus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virus|uri|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/virus|broader_prefLabel|Biology +http://www.semanlink.net/tag/virus|broader_altLabel|Biologie +http://www.semanlink.net/tag/cern|prefLabel|CERN +http://www.semanlink.net/tag/cern|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/cern|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/cern|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cern|uri|http://www.semanlink.net/tag/cern +http://www.semanlink.net/tag/cern|broader_prefLabel|Recherche +http://www.semanlink.net/tag/cern|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/cern|broader_altLabel|Research +http://www.semanlink.net/tag/developpement_humain|prefLabel|"""Développement humain""" +http://www.semanlink.net/tag/developpement_humain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/developpement_humain|uri|http://www.semanlink.net/tag/developpement_humain +http://www.semanlink.net/tag/semantic_web_life_sciences|creationTime|2008-06-12T08:30:39Z +http://www.semanlink.net/tag/semantic_web_life_sciences|prefLabel|Semantic Web: Life Sciences +http://www.semanlink.net/tag/semantic_web_life_sciences|broader|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.semanlink.net/tag/semantic_web_life_sciences|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/semantic_web_life_sciences|creationDate|2008-06-12 +http://www.semanlink.net/tag/semantic_web_life_sciences|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_life_sciences|uri|http://www.semanlink.net/tag/semantic_web_life_sciences +http://www.semanlink.net/tag/semantic_web_life_sciences|broader_prefLabel|Semantic web : Use cases +http://www.semanlink.net/tag/semantic_web_life_sciences|broader_prefLabel|Biology +http://www.semanlink.net/tag/semantic_web_life_sciences|broader_altLabel|Biologie +http://www.semanlink.net/tag/google_patents|creationTime|2019-02-09T00:52:11Z +http://www.semanlink.net/tag/google_patents|prefLabel|Google Patents +http://www.semanlink.net/tag/google_patents|broader|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.semanlink.net/tag/google_patents|broader|http://www.semanlink.net/tag/patent_finding +http://www.semanlink.net/tag/google_patents|broader|http://www.semanlink.net/tag/patent +http://www.semanlink.net/tag/google_patents|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_patents|creationDate|2019-02-09 +http://www.semanlink.net/tag/google_patents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_patents|homepage|https://patents.google.com +http://www.semanlink.net/tag/google_patents|uri|http://www.semanlink.net/tag/google_patents +http://www.semanlink.net/tag/google_patents|broader_prefLabel|AI 4 IP +http://www.semanlink.net/tag/google_patents|broader_prefLabel|Patent finding +http://www.semanlink.net/tag/google_patents|broader_prefLabel|Patent +http://www.semanlink.net/tag/google_patents|broader_prefLabel|Google +http://www.semanlink.net/tag/google_patents|broader_altLabel|Brevet +http://www.semanlink.net/tag/google_patents|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/ontowiki|creationTime|2013-09-10T01:26:24Z +http://www.semanlink.net/tag/ontowiki|prefLabel|OntoWiki +http://www.semanlink.net/tag/ontowiki|broader|http://www.semanlink.net/tag/semantic_data_wiki +http://www.semanlink.net/tag/ontowiki|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/ontowiki|broader|http://www.semanlink.net/tag/linked_data_publishing +http://www.semanlink.net/tag/ontowiki|broader|http://www.semanlink.net/tag/php +http://www.semanlink.net/tag/ontowiki|related|http://www.semanlink.net/tag/soren_auer +http://www.semanlink.net/tag/ontowiki|creationDate|2013-09-10 +http://www.semanlink.net/tag/ontowiki|comment|Semantic data wiki as well as Linked Data publishing engine +http://www.semanlink.net/tag/ontowiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontowiki|uri|http://www.semanlink.net/tag/ontowiki +http://www.semanlink.net/tag/ontowiki|broader_prefLabel|Semantic data wiki +http://www.semanlink.net/tag/ontowiki|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/ontowiki|broader_prefLabel|Linked Data publishing +http://www.semanlink.net/tag/ontowiki|broader_prefLabel|PHP +http://www.semanlink.net/tag/ontowiki|broader_altLabel|RDF Data publishing +http://www.semanlink.net/tag/proletaire|creationTime|2015-07-19T17:35:38Z +http://www.semanlink.net/tag/proletaire|prefLabel|Prolétaire +http://www.semanlink.net/tag/proletaire|broader|http://www.semanlink.net/tag/proletarisation +http://www.semanlink.net/tag/proletaire|creationDate|2015-07-19 +http://www.semanlink.net/tag/proletaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/proletaire|uri|http://www.semanlink.net/tag/proletaire +http://www.semanlink.net/tag/proletaire|broader_prefLabel|Prolétarisation +http://www.semanlink.net/tag/proletaire|broader_related|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/courtadon|creationTime|2016-06-09T22:42:50Z +http://www.semanlink.net/tag/courtadon|prefLabel|Courtadon +http://www.semanlink.net/tag/courtadon|broader|http://www.semanlink.net/tag/sculpture +http://www.semanlink.net/tag/courtadon|broader|http://www.semanlink.net/tag/pierre_de_volvic +http://www.semanlink.net/tag/courtadon|creationDate|2016-06-09 +http://www.semanlink.net/tag/courtadon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/courtadon|uri|http://www.semanlink.net/tag/courtadon +http://www.semanlink.net/tag/courtadon|broader_prefLabel|Sculpture +http://www.semanlink.net/tag/courtadon|broader_prefLabel|Pierre de Volvic +http://www.semanlink.net/tag/courtadon|broader_altLabel|Statuaire +http://www.semanlink.net/tag/to_do|creationTime|2009-05-14T00:39:03Z +http://www.semanlink.net/tag/to_do|prefLabel|To do +http://www.semanlink.net/tag/to_do|creationDate|2009-05-14 +http://www.semanlink.net/tag/to_do|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/to_do|altLabel|Todo +http://www.semanlink.net/tag/to_do|uri|http://www.semanlink.net/tag/to_do +http://www.semanlink.net/tag/synonymy|creationTime|2019-01-26T01:28:27Z +http://www.semanlink.net/tag/synonymy|prefLabel|Synonymy +http://www.semanlink.net/tag/synonymy|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/synonymy|creationDate|2019-01-26 +http://www.semanlink.net/tag/synonymy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synonymy|uri|http://www.semanlink.net/tag/synonymy +http://www.semanlink.net/tag/synonymy|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/python_tips|creationTime|2017-06-19T09:49:53Z +http://www.semanlink.net/tag/python_tips|prefLabel|Python tips +http://www.semanlink.net/tag/python_tips|broader|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/python_tips|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_tips|creationDate|2017-06-19 +http://www.semanlink.net/tag/python_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_tips|uri|http://www.semanlink.net/tag/python_tips +http://www.semanlink.net/tag/python_tips|broader_prefLabel|Dev tips +http://www.semanlink.net/tag/python_tips|broader_prefLabel|Python +http://www.semanlink.net/tag/python_tips|broader_altLabel|Dev tip +http://www.semanlink.net/tag/pesticide|creationTime|2010-12-14T23:32:04Z +http://www.semanlink.net/tag/pesticide|prefLabel|Pesticides +http://www.semanlink.net/tag/pesticide|broader|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/tag/pesticide|creationDate|2010-12-14 +http://www.semanlink.net/tag/pesticide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pesticide|uri|http://www.semanlink.net/tag/pesticide +http://www.semanlink.net/tag/pesticide|broader_prefLabel|Agriculture industrielle +http://www.semanlink.net/tag/martynas_jusevicius|creationTime|2020-03-13T10:39:19Z +http://www.semanlink.net/tag/martynas_jusevicius|prefLabel|Martynas Jusevicius +http://www.semanlink.net/tag/martynas_jusevicius|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/martynas_jusevicius|creationDate|2020-03-13 +http://www.semanlink.net/tag/martynas_jusevicius|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/martynas_jusevicius|uri|http://www.semanlink.net/tag/martynas_jusevicius +http://www.semanlink.net/tag/martynas_jusevicius|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/satori|creationTime|2013-03-25T13:12:16Z +http://www.semanlink.net/tag/satori|prefLabel|Satori +http://www.semanlink.net/tag/satori|broader|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/tag/satori|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/satori|related|http://www.semanlink.net/tag/google_knowledge_graph +http://www.semanlink.net/tag/satori|creationDate|2013-03-25 +http://www.semanlink.net/tag/satori|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/satori|uri|http://www.semanlink.net/tag/satori +http://www.semanlink.net/tag/satori|broader_prefLabel|Microsoft Research +http://www.semanlink.net/tag/satori|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/satori|broader_altLabel|sw +http://www.semanlink.net/tag/satori|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/minting_uris|creationTime|2010-04-27T10:07:52Z +http://www.semanlink.net/tag/minting_uris|prefLabel|Minting URIs +http://www.semanlink.net/tag/minting_uris|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/minting_uris|creationDate|2010-04-27 +http://www.semanlink.net/tag/minting_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minting_uris|uri|http://www.semanlink.net/tag/minting_uris +http://www.semanlink.net/tag/minting_uris|broader_prefLabel|URI +http://www.semanlink.net/tag/gene_editing|creationTime|2016-08-19T12:39:31Z +http://www.semanlink.net/tag/gene_editing|prefLabel|Gene editing +http://www.semanlink.net/tag/gene_editing|broader|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/gene_editing|creationDate|2016-08-19 +http://www.semanlink.net/tag/gene_editing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gene_editing|uri|http://www.semanlink.net/tag/gene_editing +http://www.semanlink.net/tag/gene_editing|broader_prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/stanford_pos_tagger|creationTime|2017-07-11T15:36:54Z +http://www.semanlink.net/tag/stanford_pos_tagger|prefLabel|Stanford POS Tagger +http://www.semanlink.net/tag/stanford_pos_tagger|broader|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/stanford_pos_tagger|broader|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/stanford_pos_tagger|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/stanford_pos_tagger|creationDate|2017-07-11 +http://www.semanlink.net/tag/stanford_pos_tagger|comment|"Java 1.8. Trained tagger models for English, Arabic, Chinese, French, German. The tagger can be retrained on any language, given POS-annotated training text for the language. Uses the Penn Treebank tag set. [NLTK interface](http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford) +" +http://www.semanlink.net/tag/stanford_pos_tagger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stanford_pos_tagger|homepage|https://nlp.stanford.edu/software/tagger.shtml +http://www.semanlink.net/tag/stanford_pos_tagger|uri|http://www.semanlink.net/tag/stanford_pos_tagger +http://www.semanlink.net/tag/stanford_pos_tagger|broader_prefLabel|NLP@Stanford +http://www.semanlink.net/tag/stanford_pos_tagger|broader_prefLabel|Part Of Speech Tagging +http://www.semanlink.net/tag/stanford_pos_tagger|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/stanford_pos_tagger|broader_altLabel|POS +http://www.semanlink.net/tag/stanford_pos_tagger|broader_altLabel|POS Tagging +http://www.semanlink.net/tag/stanford_pos_tagger|broader_related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/stanford_pos_tagger|broader_related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/schema_org|creationTime|2011-06-07T14:04:08Z +http://www.semanlink.net/tag/schema_org|prefLabel|schema.org +http://www.semanlink.net/tag/schema_org|broader|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/schema_org|related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/schema_org|creationDate|2011-06-07 +http://www.semanlink.net/tag/schema_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/schema_org|homepage|http://schema.org/ +http://www.semanlink.net/tag/schema_org|uri|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/schema_org|broader_prefLabel|Web of data +http://www.semanlink.net/tag/elites|creationTime|2013-12-28T18:49:11Z +http://www.semanlink.net/tag/elites|prefLabel|Elites +http://www.semanlink.net/tag/elites|creationDate|2013-12-28 +http://www.semanlink.net/tag/elites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elites|uri|http://www.semanlink.net/tag/elites +http://www.semanlink.net/tag/tibet|creationTime|2007-08-06T17:23:32Z +http://www.semanlink.net/tag/tibet|prefLabel|Tibet +http://www.semanlink.net/tag/tibet|related|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/tibet|creationDate|2007-08-06 +http://www.semanlink.net/tag/tibet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tibet|describedBy|https://en.wikipedia.org/wiki/Tibet +http://www.semanlink.net/tag/tibet|uri|http://www.semanlink.net/tag/tibet +http://www.semanlink.net/tag/university_of_maryland|prefLabel|University of Maryland +http://www.semanlink.net/tag/university_of_maryland|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/university_of_maryland|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/university_of_maryland|uri|http://www.semanlink.net/tag/university_of_maryland +http://www.semanlink.net/tag/university_of_maryland|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/election|prefLabel|Election +http://www.semanlink.net/tag/election|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/election|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/election|broader|http://www.semanlink.net/tag/democratie +http://www.semanlink.net/tag/election|related|http://www.semanlink.net/tag/facebook_cambridge_analytica +http://www.semanlink.net/tag/election|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/election|uri|http://www.semanlink.net/tag/election +http://www.semanlink.net/tag/election|broader_prefLabel|Politique +http://www.semanlink.net/tag/election|broader_prefLabel|Société +http://www.semanlink.net/tag/election|broader_prefLabel|Démocratie +http://www.semanlink.net/tag/create_js|creationTime|2012-06-13T11:43:50Z +http://www.semanlink.net/tag/create_js|prefLabel|create.js +http://www.semanlink.net/tag/create_js|broader|http://www.semanlink.net/tag/henri_bergius +http://www.semanlink.net/tag/create_js|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/create_js|broader|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/create_js|creationDate|2012-06-13 +http://www.semanlink.net/tag/create_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/create_js|uri|http://www.semanlink.net/tag/create_js +http://www.semanlink.net/tag/create_js|broader_prefLabel|Henri Bergius +http://www.semanlink.net/tag/create_js|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/create_js|broader_prefLabel|Interactive Knowledge Stack +http://www.semanlink.net/tag/create_js|broader_altLabel|js +http://www.semanlink.net/tag/create_js|broader_altLabel|IKS +http://www.semanlink.net/tag/create_js|broader_related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/create_js|broader_related|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://www.semanlink.net/tag/create_js|broader_related|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/tag/geste_ecologique|creationTime|2009-11-21T18:09:31Z +http://www.semanlink.net/tag/geste_ecologique|prefLabel|Geste écologique +http://www.semanlink.net/tag/geste_ecologique|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/geste_ecologique|creationDate|2009-11-21 +http://www.semanlink.net/tag/geste_ecologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geste_ecologique|uri|http://www.semanlink.net/tag/geste_ecologique +http://www.semanlink.net/tag/geste_ecologique|broader_prefLabel|Écologie +http://www.semanlink.net/tag/refugies|creationTime|2008-03-11T21:07:05Z +http://www.semanlink.net/tag/refugies|prefLabel|Réfugiés +http://www.semanlink.net/tag/refugies|broader|http://www.semanlink.net/tag/migrations_humaines +http://www.semanlink.net/tag/refugies|creationDate|2008-03-11 +http://www.semanlink.net/tag/refugies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/refugies|uri|http://www.semanlink.net/tag/refugies +http://www.semanlink.net/tag/refugies|broader_prefLabel|Migrations humaines +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|creationTime|2007-08-06T17:32:39Z +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|prefLabel|Massacre de la Saint-Barthélemy +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|broader|http://www.semanlink.net/tag/histoire_de_france +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|broader|http://www.semanlink.net/tag/guerres_de_religion +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|creationDate|2007-08-06 +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|describedBy|https://fr.wikipedia.org/wiki/Massacre_de_la_Saint-Barth%C3%A9lemy +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|altLabel|Saint-Barthélemy +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|uri|http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|broader_prefLabel|Histoire de France +http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy|broader_prefLabel|Guerres de religion +http://www.semanlink.net/tag/football|prefLabel|Football +http://www.semanlink.net/tag/football|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/football|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/football|uri|http://www.semanlink.net/tag/football +http://www.semanlink.net/tag/football|broader_prefLabel|Sport +http://www.semanlink.net/tag/nginx|creationTime|2016-04-04T16:34:08Z +http://www.semanlink.net/tag/nginx|prefLabel|Nginx +http://www.semanlink.net/tag/nginx|broader|http://www.semanlink.net/tag/web_server +http://www.semanlink.net/tag/nginx|related|http://www.semanlink.net/tag/apache +http://www.semanlink.net/tag/nginx|creationDate|2016-04-04 +http://www.semanlink.net/tag/nginx|comment|"Nginx (pronounced ""engine x"") is a web server. It can act as a reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer and an HTTP cache." +http://www.semanlink.net/tag/nginx|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nginx|describedBy|https://en.wikipedia.org/wiki/Nginx +http://www.semanlink.net/tag/nginx|uri|http://www.semanlink.net/tag/nginx +http://www.semanlink.net/tag/nginx|broader_prefLabel|Web server +http://www.semanlink.net/tag/cnil|creationTime|2008-11-10T00:57:56Z +http://www.semanlink.net/tag/cnil|prefLabel|cnil +http://www.semanlink.net/tag/cnil|creationDate|2008-11-10 +http://www.semanlink.net/tag/cnil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cnil|uri|http://www.semanlink.net/tag/cnil +http://www.semanlink.net/tag/entailment|creationTime|2007-02-06T23:28:22Z +http://www.semanlink.net/tag/entailment|prefLabel|Entailment +http://www.semanlink.net/tag/entailment|creationDate|2007-02-06 +http://www.semanlink.net/tag/entailment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entailment|uri|http://www.semanlink.net/tag/entailment +http://www.semanlink.net/tag/taxonomy_expansion_task|creationTime|2020-04-25T10:33:13Z +http://www.semanlink.net/tag/taxonomy_expansion_task|prefLabel|Taxonomy expansion task +http://www.semanlink.net/tag/taxonomy_expansion_task|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/taxonomy_expansion_task|broader|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/tag/taxonomy_expansion_task|creationDate|2020-04-25 +http://www.semanlink.net/tag/taxonomy_expansion_task|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taxonomy_expansion_task|uri|http://www.semanlink.net/tag/taxonomy_expansion_task +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_prefLabel|Taxonomies +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_altLabel|Taxonomy +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/taxonomy_expansion_task|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/eclipse_tip|creationTime|2008-01-03T14:56:16Z +http://www.semanlink.net/tag/eclipse_tip|prefLabel|Eclipse tip +http://www.semanlink.net/tag/eclipse_tip|broader|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/eclipse_tip|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/eclipse_tip|creationDate|2008-01-03 +http://www.semanlink.net/tag/eclipse_tip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eclipse_tip|uri|http://www.semanlink.net/tag/eclipse_tip +http://www.semanlink.net/tag/eclipse_tip|broader_prefLabel|Dev tips +http://www.semanlink.net/tag/eclipse_tip|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/eclipse_tip|broader_altLabel|Dev tip +http://www.semanlink.net/tag/internet_en_afrique|prefLabel|Internet en Afrique +http://www.semanlink.net/tag/internet_en_afrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/internet_en_afrique|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet_en_afrique|broader|http://www.semanlink.net/tag/ntic_et_developpement +http://www.semanlink.net/tag/internet_en_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_en_afrique|uri|http://www.semanlink.net/tag/internet_en_afrique +http://www.semanlink.net/tag/internet_en_afrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/internet_en_afrique|broader_prefLabel|Internet +http://www.semanlink.net/tag/internet_en_afrique|broader_prefLabel|NTIC et développement +http://www.semanlink.net/tag/internet_en_afrique|broader_altLabel|Africa +http://www.semanlink.net/tag/internet_en_afrique|broader_altLabel|Tech / developing world +http://www.semanlink.net/tag/gouvernement|creationTime|2013-10-10T01:36:28Z +http://www.semanlink.net/tag/gouvernement|prefLabel|Gouvernement +http://www.semanlink.net/tag/gouvernement|creationDate|2013-10-10 +http://www.semanlink.net/tag/gouvernement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gouvernement|uri|http://www.semanlink.net/tag/gouvernement +http://www.semanlink.net/tag/humanitaire|prefLabel|Humanitaire +http://www.semanlink.net/tag/humanitaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/humanitaire|uri|http://www.semanlink.net/tag/humanitaire +http://www.semanlink.net/tag/poker|creationTime|2007-07-28T17:59:03Z +http://www.semanlink.net/tag/poker|prefLabel|Poker +http://www.semanlink.net/tag/poker|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/poker|creationDate|2007-07-28 +http://www.semanlink.net/tag/poker|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poker|uri|http://www.semanlink.net/tag/poker +http://www.semanlink.net/tag/poker|broader_prefLabel|Jeux +http://www.semanlink.net/tag/statistical_relational_learning|creationTime|2017-10-24T14:41:47Z +http://www.semanlink.net/tag/statistical_relational_learning|prefLabel|Statistical relational learning +http://www.semanlink.net/tag/statistical_relational_learning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/statistical_relational_learning|related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/statistical_relational_learning|creationDate|2017-10-24 +http://www.semanlink.net/tag/statistical_relational_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistical_relational_learning|describedBy|https://en.wikipedia.org/wiki/Statistical_relational_learning +http://www.semanlink.net/tag/statistical_relational_learning|altLabel|Relational Machine Learning +http://www.semanlink.net/tag/statistical_relational_learning|uri|http://www.semanlink.net/tag/statistical_relational_learning +http://www.semanlink.net/tag/statistical_relational_learning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/alibaba|creationTime|2019-12-06T10:38:36Z +http://www.semanlink.net/tag/alibaba|prefLabel|Alibaba +http://www.semanlink.net/tag/alibaba|creationDate|2019-12-06 +http://www.semanlink.net/tag/alibaba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alibaba|uri|http://www.semanlink.net/tag/alibaba +http://www.semanlink.net/tag/umbel|creationTime|2008-04-25T08:57:11Z +http://www.semanlink.net/tag/umbel|prefLabel|Umbel +http://www.semanlink.net/tag/umbel|related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/umbel|creationDate|2008-04-25 +http://www.semanlink.net/tag/umbel|comment|"A Lightweight Subject Concept Structure for the Web
+UMBEL (Upper-level Mapping and Binding Exchange Layer) is a lightweight reference structure for placing Web content and data in context with other data. It is comprised of about 21,000 subject concepts and their relationships — with one another and with external vocabularies and named entities. +" +http://www.semanlink.net/tag/umbel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/umbel|homepage|http://umbel.org/ +http://www.semanlink.net/tag/umbel|uri|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/macronie|creationTime|2020-12-19T17:16:43Z +http://www.semanlink.net/tag/macronie|prefLabel|Macronie +http://www.semanlink.net/tag/macronie|broader|http://www.semanlink.net/tag/macron +http://www.semanlink.net/tag/macronie|creationDate|2020-12-19 +http://www.semanlink.net/tag/macronie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/macronie|uri|http://www.semanlink.net/tag/macronie +http://www.semanlink.net/tag/macronie|broader_prefLabel|Macron +http://www.semanlink.net/tag/etat_policier|prefLabel|Etat policier +http://www.semanlink.net/tag/etat_policier|broader|http://www.semanlink.net/tag/police +http://www.semanlink.net/tag/etat_policier|broader|http://www.semanlink.net/tag/ca_craint +http://www.semanlink.net/tag/etat_policier|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/etat_policier|related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/etat_policier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/etat_policier|uri|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/etat_policier|broader_prefLabel|Police +http://www.semanlink.net/tag/etat_policier|broader_prefLabel|Ca craint +http://www.semanlink.net/tag/etat_policier|broader_prefLabel|Société +http://www.semanlink.net/tag/lobby_nucleaire|creationTime|2016-02-09T23:41:28Z +http://www.semanlink.net/tag/lobby_nucleaire|prefLabel|Lobby nucléaire +http://www.semanlink.net/tag/lobby_nucleaire|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/lobby_nucleaire|broader|http://www.semanlink.net/tag/lobby +http://www.semanlink.net/tag/lobby_nucleaire|creationDate|2016-02-09 +http://www.semanlink.net/tag/lobby_nucleaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lobby_nucleaire|uri|http://www.semanlink.net/tag/lobby_nucleaire +http://www.semanlink.net/tag/lobby_nucleaire|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/lobby_nucleaire|broader_prefLabel|Lobby +http://www.semanlink.net/tag/lobby_nucleaire|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/product_knowledge_graph|creationTime|2020-04-09T21:19:40Z +http://www.semanlink.net/tag/product_knowledge_graph|prefLabel|Product Knowledge Graph +http://www.semanlink.net/tag/product_knowledge_graph|broader|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/product_knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/product_knowledge_graph|creationDate|2020-04-09 +http://www.semanlink.net/tag/product_knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/product_knowledge_graph|uri|http://www.semanlink.net/tag/product_knowledge_graph +http://www.semanlink.net/tag/product_knowledge_graph|broader_prefLabel|Product description +http://www.semanlink.net/tag/product_knowledge_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/product_knowledge_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/product_knowledge_graph|broader_altLabel|KG +http://www.semanlink.net/tag/product_knowledge_graph|broader_related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/google_visualization_api|creationTime|2010-08-27T17:42:13Z +http://www.semanlink.net/tag/google_visualization_api|prefLabel|Google Visualization API +http://www.semanlink.net/tag/google_visualization_api|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_visualization_api|broader|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/google_visualization_api|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/google_visualization_api|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/google_visualization_api|creationDate|2010-08-27 +http://www.semanlink.net/tag/google_visualization_api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_visualization_api|uri|http://www.semanlink.net/tag/google_visualization_api +http://www.semanlink.net/tag/google_visualization_api|broader_prefLabel|Google +http://www.semanlink.net/tag/google_visualization_api|broader_prefLabel|Visualization Tools +http://www.semanlink.net/tag/google_visualization_api|broader_prefLabel|API +http://www.semanlink.net/tag/google_visualization_api|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/google_visualization_api|broader_altLabel|Data Visualization Tools +http://www.semanlink.net/tag/google_visualization_api|broader_altLabel|js +http://www.semanlink.net/tag/google_visualization_api|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/google_visualization_api|broader_related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/gene_therapy|creationTime|2016-08-02T10:07:04Z +http://www.semanlink.net/tag/gene_therapy|prefLabel|Gene therapy +http://www.semanlink.net/tag/gene_therapy|broader|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/gene_therapy|creationDate|2016-08-02 +http://www.semanlink.net/tag/gene_therapy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gene_therapy|describedBy|https://en.wikipedia.org/wiki/Gene_therapy +http://www.semanlink.net/tag/gene_therapy|uri|http://www.semanlink.net/tag/gene_therapy +http://www.semanlink.net/tag/gene_therapy|broader_prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/semantic_web_blog|prefLabel|Semantic Web blog +http://www.semanlink.net/tag/semantic_web_blog|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/semantic_web_blog|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_blog|uri|http://www.semanlink.net/tag/semantic_web_blog +http://www.semanlink.net/tag/semantic_web_blog|broader_prefLabel|Blog +http://www.semanlink.net/tag/semantic_web_blog|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_blog|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_blog|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/terres_agricoles|creationTime|2008-11-20T21:47:12Z +http://www.semanlink.net/tag/terres_agricoles|prefLabel|Terres agricoles +http://www.semanlink.net/tag/terres_agricoles|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/terres_agricoles|creationDate|2008-11-20 +http://www.semanlink.net/tag/terres_agricoles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/terres_agricoles|uri|http://www.semanlink.net/tag/terres_agricoles +http://www.semanlink.net/tag/terres_agricoles|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/audio_classification|creationTime|2019-06-29T10:16:12Z +http://www.semanlink.net/tag/audio_classification|prefLabel|Audio classification +http://www.semanlink.net/tag/audio_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/audio_classification|creationDate|2019-06-29 +http://www.semanlink.net/tag/audio_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/audio_classification|uri|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/tag/audio_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/open_knowledge_foundation|creationTime|2013-09-02T11:05:36Z +http://www.semanlink.net/tag/open_knowledge_foundation|prefLabel|Open Knowledge Foundation +http://www.semanlink.net/tag/open_knowledge_foundation|creationDate|2013-09-02 +http://www.semanlink.net/tag/open_knowledge_foundation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_knowledge_foundation|homepage|http://okfn.org +http://www.semanlink.net/tag/open_knowledge_foundation|uri|http://www.semanlink.net/tag/open_knowledge_foundation +http://www.semanlink.net/tag/amazon_mechanical_turk|prefLabel|Amazon Mechanical Turk +http://www.semanlink.net/tag/amazon_mechanical_turk|broader|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/tag/amazon_mechanical_turk|broader|http://www.semanlink.net/tag/web_marchand +http://www.semanlink.net/tag/amazon_mechanical_turk|broader|http://www.semanlink.net/tag/delocalisation_des_services +http://www.semanlink.net/tag/amazon_mechanical_turk|broader|http://www.semanlink.net/tag/artificial_artificial_intelligence +http://www.semanlink.net/tag/amazon_mechanical_turk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amazon_mechanical_turk|altLabel|mturc +http://www.semanlink.net/tag/amazon_mechanical_turk|uri|http://www.semanlink.net/tag/amazon_mechanical_turk +http://www.semanlink.net/tag/amazon_mechanical_turk|broader_prefLabel|Amazon +http://www.semanlink.net/tag/amazon_mechanical_turk|broader_prefLabel|Web marchand +http://www.semanlink.net/tag/amazon_mechanical_turk|broader_prefLabel|Délocalisation des services +http://www.semanlink.net/tag/amazon_mechanical_turk|broader_prefLabel|Artificial, Artificial Intelligence +http://www.semanlink.net/tag/fujitsu|creationTime|2012-04-25T14:06:19Z +http://www.semanlink.net/tag/fujitsu|prefLabel|Fujitsu +http://www.semanlink.net/tag/fujitsu|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/fujitsu|creationDate|2012-04-25 +http://www.semanlink.net/tag/fujitsu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fujitsu|uri|http://www.semanlink.net/tag/fujitsu +http://www.semanlink.net/tag/fujitsu|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/crise_des_migrants|creationTime|2016-01-03T12:23:05Z +http://www.semanlink.net/tag/crise_des_migrants|prefLabel|Crise des migrants +http://www.semanlink.net/tag/crise_des_migrants|broader|http://www.semanlink.net/tag/refugies +http://www.semanlink.net/tag/crise_des_migrants|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/crise_des_migrants|related|http://www.semanlink.net/tag/ei +http://www.semanlink.net/tag/crise_des_migrants|creationDate|2016-01-03 +http://www.semanlink.net/tag/crise_des_migrants|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_des_migrants|uri|http://www.semanlink.net/tag/crise_des_migrants +http://www.semanlink.net/tag/crise_des_migrants|broader_prefLabel|Réfugiés +http://www.semanlink.net/tag/crise_des_migrants|broader_prefLabel|Immigration +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|creationTime|2008-09-23T14:31:35Z +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|prefLabel|SW in Technical Automotive Documentation +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|broader|http://www.semanlink.net/tag/technical_documentation +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|creationDate|2008-09-23 +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|uri|http://www.semanlink.net/tag/sw_in_technical_automotive_documentation +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|broader_prefLabel|Technical documentation +http://www.semanlink.net/tag/sw_in_technical_automotive_documentation|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/travail|creationTime|2007-11-21T15:18:10Z +http://www.semanlink.net/tag/travail|prefLabel|Travail +http://www.semanlink.net/tag/travail|creationDate|2007-11-21 +http://www.semanlink.net/tag/travail|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/travail|uri|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/evgeny_morozov|creationTime|2014-02-01T10:07:39Z +http://www.semanlink.net/tag/evgeny_morozov|prefLabel|Evgeny Morozov +http://www.semanlink.net/tag/evgeny_morozov|creationDate|2014-02-01 +http://www.semanlink.net/tag/evgeny_morozov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evgeny_morozov|homepage|http://www.evgenymorozov.com +http://www.semanlink.net/tag/evgeny_morozov|describedBy|https://en.wikipedia.org/wiki/Evgeny_Morozov +http://www.semanlink.net/tag/evgeny_morozov|uri|http://www.semanlink.net/tag/evgeny_morozov +http://www.semanlink.net/tag/memory_leak|creationTime|2007-10-31T15:51:17Z +http://www.semanlink.net/tag/memory_leak|prefLabel|Memory leak +http://www.semanlink.net/tag/memory_leak|broader|http://www.semanlink.net/tag/memoire_informatique +http://www.semanlink.net/tag/memory_leak|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/memory_leak|creationDate|2007-10-31 +http://www.semanlink.net/tag/memory_leak|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_leak|uri|http://www.semanlink.net/tag/memory_leak +http://www.semanlink.net/tag/memory_leak|broader_prefLabel|Mémoire (informatique) +http://www.semanlink.net/tag/memory_leak|broader_prefLabel|Dev +http://www.semanlink.net/tag/ray_kurzweil|creationTime|2008-11-21T23:55:42Z +http://www.semanlink.net/tag/ray_kurzweil|prefLabel|Ray Kurzweil +http://www.semanlink.net/tag/ray_kurzweil|related|http://www.semanlink.net/tag/technological_singularity +http://www.semanlink.net/tag/ray_kurzweil|creationDate|2008-11-21 +http://www.semanlink.net/tag/ray_kurzweil|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/ray_kurzweil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ray_kurzweil|describedBy|https://en.wikipedia.org/wiki/Raymond_Kurzweil +http://www.semanlink.net/tag/ray_kurzweil|uri|http://www.semanlink.net/tag/ray_kurzweil +http://www.semanlink.net/tag/bernard_maris|creationTime|2015-12-21T13:59:54Z +http://www.semanlink.net/tag/bernard_maris|prefLabel|Bernard Maris +http://www.semanlink.net/tag/bernard_maris|related|http://www.semanlink.net/tag/charlie_hebdo +http://www.semanlink.net/tag/bernard_maris|creationDate|2015-12-21 +http://www.semanlink.net/tag/bernard_maris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bernard_maris|uri|http://www.semanlink.net/tag/bernard_maris +http://www.semanlink.net/tag/language_model|creationTime|2012-03-24T09:01:10Z +http://www.semanlink.net/tag/language_model|prefLabel|Language model +http://www.semanlink.net/tag/language_model|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/language_model|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/language_model|related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/language_model|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/language_model|creationDate|2012-03-24 +http://www.semanlink.net/tag/language_model|comment|"Language modeling: task of predicting the next word in a text given the previous words. Example of concrete practical applications: intelligent keyboards + +Language model: probability distribution over sequences of words. Statistical language models try to learn the probability of the next word given its previous words. + +> Models rely on an auto-regressive factorization of the joint probability of a corpus using different approaches, from n-gram models to RNNs (SOTA as of 2018-01) ([source](https://arxiv.org/abs/1801.06146))" +http://www.semanlink.net/tag/language_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_model|describedBy|https://en.wikipedia.org/wiki/Language_model +http://www.semanlink.net/tag/language_model|altLabel|Language Modeling +http://www.semanlink.net/tag/language_model|altLabel|LM +http://www.semanlink.net/tag/language_model|altLabel|Statistical Language Model +http://www.semanlink.net/tag/language_model|uri|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/language_model|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/language_model|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/tcp|creationTime|2013-07-22T09:52:54Z +http://www.semanlink.net/tag/tcp|prefLabel|TCP +http://www.semanlink.net/tag/tcp|creationDate|2013-07-22 +http://www.semanlink.net/tag/tcp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tcp|uri|http://www.semanlink.net/tag/tcp +http://www.semanlink.net/tag/ann_introduction|creationTime|2016-09-17T19:01:57Z +http://www.semanlink.net/tag/ann_introduction|prefLabel|ANN: introduction +http://www.semanlink.net/tag/ann_introduction|broader|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/tag/ann_introduction|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/ann_introduction|creationDate|2016-09-17 +http://www.semanlink.net/tag/ann_introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ann_introduction|uri|http://www.semanlink.net/tag/ann_introduction +http://www.semanlink.net/tag/ann_introduction|broader_prefLabel|Introduction +http://www.semanlink.net/tag/ann_introduction|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/ann_introduction|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/ann_introduction|broader_altLabel|ANN +http://www.semanlink.net/tag/ann_introduction|broader_altLabel|NN +http://www.semanlink.net/tag/the_web_sucks|creationTime|2020-01-20T00:06:30Z +http://www.semanlink.net/tag/the_web_sucks|prefLabel|The web sucks +http://www.semanlink.net/tag/the_web_sucks|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/the_web_sucks|creationDate|2020-01-20 +http://www.semanlink.net/tag/the_web_sucks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_web_sucks|uri|http://www.semanlink.net/tag/the_web_sucks +http://www.semanlink.net/tag/the_web_sucks|broader_prefLabel|Web +http://www.semanlink.net/tag/rdfa_parser|creationTime|2013-07-05T14:00:10Z +http://www.semanlink.net/tag/rdfa_parser|prefLabel|RDFa parser +http://www.semanlink.net/tag/rdfa_parser|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdfa_parser|creationDate|2013-07-05 +http://www.semanlink.net/tag/rdfa_parser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa_parser|uri|http://www.semanlink.net/tag/rdfa_parser +http://www.semanlink.net/tag/rdfa_parser|broader_prefLabel|RDFa +http://www.semanlink.net/tag/rdfa_parser|broader_altLabel|RDF/A +http://www.semanlink.net/tag/rdfa_parser|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdfa_parser|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdfa_parser|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/times|creationTime|2008-07-08T21:22:32Z +http://www.semanlink.net/tag/times|prefLabel|Times +http://www.semanlink.net/tag/times|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/times|creationDate|2008-07-08 +http://www.semanlink.net/tag/times|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/times|uri|http://www.semanlink.net/tag/times +http://www.semanlink.net/tag/times|broader_prefLabel|Presse +http://www.semanlink.net/tag/times|broader_altLabel|Journal +http://www.semanlink.net/tag/rdf_templating|creationTime|2007-07-04T22:23:39Z +http://www.semanlink.net/tag/rdf_templating|prefLabel|RDF Templating +http://www.semanlink.net/tag/rdf_templating|creationDate|2007-07-04 +http://www.semanlink.net/tag/rdf_templating|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_templating|uri|http://www.semanlink.net/tag/rdf_templating +http://www.semanlink.net/tag/knowledge|creationTime|2016-08-28T15:51:05Z +http://www.semanlink.net/tag/knowledge|prefLabel|Knowledge +http://www.semanlink.net/tag/knowledge|creationDate|2016-08-28 +http://www.semanlink.net/tag/knowledge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge|uri|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/rosetta_project|creationTime|2009-07-20T18:52:20Z +http://www.semanlink.net/tag/rosetta_project|prefLabel|Rosetta Project +http://www.semanlink.net/tag/rosetta_project|broader|http://www.semanlink.net/tag/disparition_de_langues_vivantes +http://www.semanlink.net/tag/rosetta_project|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/rosetta_project|creationDate|2009-07-20 +http://www.semanlink.net/tag/rosetta_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rosetta_project|describedBy|http://www.rosettaproject.org/ +http://www.semanlink.net/tag/rosetta_project|uri|http://www.semanlink.net/tag/rosetta_project +http://www.semanlink.net/tag/rosetta_project|broader_prefLabel|Disparition de langues vivantes +http://www.semanlink.net/tag/rosetta_project|broader_prefLabel|Langues +http://www.semanlink.net/tag/musicbrainz|creationTime|2007-05-23T18:46:35Z +http://www.semanlink.net/tag/musicbrainz|prefLabel|MusicBrainz +http://www.semanlink.net/tag/musicbrainz|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/musicbrainz|related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/musicbrainz|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/musicbrainz|creationDate|2007-05-23 +http://www.semanlink.net/tag/musicbrainz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musicbrainz|homepage|http://musicbrainz.org/ +http://www.semanlink.net/tag/musicbrainz|uri|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/musicbrainz|broader_prefLabel|Musique +http://www.semanlink.net/tag/musicbrainz|broader_altLabel|Music +http://www.semanlink.net/tag/fossile_vivant|prefLabel|Fossile vivant +http://www.semanlink.net/tag/fossile_vivant|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/fossile_vivant|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/fossile_vivant|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/fossile_vivant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fossile_vivant|uri|http://www.semanlink.net/tag/fossile_vivant +http://www.semanlink.net/tag/fossile_vivant|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/fossile_vivant|broader_prefLabel|Evolution +http://www.semanlink.net/tag/fossile_vivant|broader_prefLabel|Biology +http://www.semanlink.net/tag/fossile_vivant|broader_altLabel|Biologie +http://www.semanlink.net/tag/arduino|creationTime|2013-04-19T14:16:48Z +http://www.semanlink.net/tag/arduino|prefLabel|Arduino +http://www.semanlink.net/tag/arduino|broader|http://www.semanlink.net/tag/electronics +http://www.semanlink.net/tag/arduino|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/arduino|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/arduino|creationDate|2013-04-19 +http://www.semanlink.net/tag/arduino|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arduino|homepage|http://www.arduino.cc +http://www.semanlink.net/tag/arduino|uri|http://www.semanlink.net/tag/arduino +http://www.semanlink.net/tag/arduino|broader_prefLabel|Electronics +http://www.semanlink.net/tag/arduino|broader_prefLabel|Open Source +http://www.semanlink.net/tag/arduino|broader_prefLabel|Robotique +http://www.semanlink.net/tag/arduino|broader_altLabel|Robotics +http://www.semanlink.net/tag/arduino|broader_altLabel|Robot +http://www.semanlink.net/tag/histoire_du_niger|prefLabel|Histoire du Niger +http://www.semanlink.net/tag/histoire_du_niger|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/histoire_du_niger|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/histoire_du_niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_du_niger|uri|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/histoire_du_niger|broader_prefLabel|Niger +http://www.semanlink.net/tag/histoire_du_niger|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/histoire_du_niger|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/histoire_du_niger|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/histoire_du_niger|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/censure_et_maltraitance_animale|creationTime|2013-09-29T13:29:41Z +http://www.semanlink.net/tag/censure_et_maltraitance_animale|prefLabel|Censure et maltraitance animale +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/censorship +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/factory_farming +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/cruaute +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/droit_a_l_information +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/animal_rights +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/maltraitance_animale +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/tag/censure_et_maltraitance_animale|creationDate|2013-09-29 +http://www.semanlink.net/tag/censure_et_maltraitance_animale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/censure_et_maltraitance_animale|uri|http://www.semanlink.net/tag/censure_et_maltraitance_animale +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Censorship +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Factory farming +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Vive le capitalisme ! +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Justice +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Cruauté +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Droit à l'information +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Animal rights +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Maltraitance animale +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_prefLabel|Agriculture industrielle +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_altLabel|Censure +http://www.semanlink.net/tag/censure_et_maltraitance_animale|broader_altLabel|Capitalisme de merde +http://www.semanlink.net/tag/vie_privee|creationTime|2013-06-08T09:36:14Z +http://www.semanlink.net/tag/vie_privee|prefLabel|Privacy +http://www.semanlink.net/tag/vie_privee|creationDate|2013-06-08 +http://www.semanlink.net/tag/vie_privee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vie_privee|altLabel|Vie privée +http://www.semanlink.net/tag/vie_privee|uri|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/tag/sigmund_freud|creationTime|2013-08-27T13:47:34Z +http://www.semanlink.net/tag/sigmund_freud|prefLabel|Sigmund Freud +http://www.semanlink.net/tag/sigmund_freud|creationDate|2013-08-27 +http://www.semanlink.net/tag/sigmund_freud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sigmund_freud|describedBy|https://en.wikipedia.org/wiki/Freud +http://www.semanlink.net/tag/sigmund_freud|uri|http://www.semanlink.net/tag/sigmund_freud +http://www.semanlink.net/tag/discute_avec_raphael|creationTime|2020-10-10T01:52:31Z +http://www.semanlink.net/tag/discute_avec_raphael|prefLabel|Discuté avec Raphaël +http://www.semanlink.net/tag/discute_avec_raphael|related|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/discute_avec_raphael|creationDate|2020-10-10 +http://www.semanlink.net/tag/discute_avec_raphael|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/discute_avec_raphael|uri|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/tag/brain_vs_deep_learning|creationTime|2019-10-11T11:44:18Z +http://www.semanlink.net/tag/brain_vs_deep_learning|prefLabel|Brain vs Deep Learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/brain_vs_deep_learning|broader|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/tag/brain_vs_deep_learning|creationDate|2019-10-11 +http://www.semanlink.net/tag/brain_vs_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_vs_deep_learning|uri|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_prefLabel|Brain +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_prefLabel|Neuroscience AND AI +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_altLabel|Cerveau +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_altLabel|Neuroscience AND Machine learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/brain_vs_deep_learning|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/ecologie|prefLabel|Écologie +http://www.semanlink.net/tag/ecologie|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/ecologie|broader|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/ecologie|broader|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/tag/ecologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecologie|uri|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/ecologie|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/ecologie|broader_prefLabel|Nature +http://www.semanlink.net/tag/ecologie|broader_prefLabel|Environnement +http://www.semanlink.net/tag/javascript_patterns|creationTime|2012-10-18T17:44:46Z +http://www.semanlink.net/tag/javascript_patterns|prefLabel|Javascript patterns +http://www.semanlink.net/tag/javascript_patterns|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_patterns|broader|http://www.semanlink.net/tag/design_pattern +http://www.semanlink.net/tag/javascript_patterns|creationDate|2012-10-18 +http://www.semanlink.net/tag/javascript_patterns|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_patterns|uri|http://www.semanlink.net/tag/javascript_patterns +http://www.semanlink.net/tag/javascript_patterns|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_patterns|broader_prefLabel|Design pattern +http://www.semanlink.net/tag/javascript_patterns|broader_altLabel|js +http://www.semanlink.net/tag/javascript_patterns|broader_altLabel|Patterns +http://www.semanlink.net/tag/elections_americaines_2020|creationTime|2020-11-04T17:49:53Z +http://www.semanlink.net/tag/elections_americaines_2020|prefLabel|Elections américaines 2020 +http://www.semanlink.net/tag/elections_americaines_2020|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/elections_americaines_2020|broader|http://www.semanlink.net/tag/trump +http://www.semanlink.net/tag/elections_americaines_2020|broader|http://www.semanlink.net/tag/2020 +http://www.semanlink.net/tag/elections_americaines_2020|creationDate|2020-11-04 +http://www.semanlink.net/tag/elections_americaines_2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elections_americaines_2020|uri|http://www.semanlink.net/tag/elections_americaines_2020 +http://www.semanlink.net/tag/elections_americaines_2020|broader_prefLabel|USA +http://www.semanlink.net/tag/elections_americaines_2020|broader_prefLabel|Trump +http://www.semanlink.net/tag/elections_americaines_2020|broader_prefLabel|2020 +http://www.semanlink.net/tag/elections_americaines_2020|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/elections_americaines_2020|broader_altLabel|United States +http://www.semanlink.net/tag/pregnancy|creationTime|2014-08-28T15:20:25Z +http://www.semanlink.net/tag/pregnancy|prefLabel|Pregnancy +http://www.semanlink.net/tag/pregnancy|creationDate|2014-08-28 +http://www.semanlink.net/tag/pregnancy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pregnancy|uri|http://www.semanlink.net/tag/pregnancy +http://www.semanlink.net/tag/part_of_speech_tagging|creationTime|2014-06-18T09:34:21Z +http://www.semanlink.net/tag/part_of_speech_tagging|prefLabel|Part Of Speech Tagging +http://www.semanlink.net/tag/part_of_speech_tagging|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/part_of_speech_tagging|broader|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/part_of_speech_tagging|broader|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/part_of_speech_tagging|creationDate|2014-06-18 +http://www.semanlink.net/tag/part_of_speech_tagging|comment|or grammatical tagging, or word-category disambiguation: the process of marking up a word in a text as corresponding to a particular part of speech +http://www.semanlink.net/tag/part_of_speech_tagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/part_of_speech_tagging|describedBy|https://en.wikipedia.org/wiki/Part-of-speech_tagging +http://www.semanlink.net/tag/part_of_speech_tagging|altLabel|POS +http://www.semanlink.net/tag/part_of_speech_tagging|altLabel|POS Tagging +http://www.semanlink.net/tag/part_of_speech_tagging|uri|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/part_of_speech_tagging|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/part_of_speech_tagging|broader_prefLabel|Sequence labeling +http://www.semanlink.net/tag/part_of_speech_tagging|broader_prefLabel|General NLP tasks +http://www.semanlink.net/tag/part_of_speech_tagging|broader_altLabel|Sequence Tagging +http://www.semanlink.net/tag/part_of_speech_tagging|broader_related|http://www.semanlink.net/tag/ml_sequential_data +http://www.semanlink.net/tag/magnetisme|creationTime|2009-11-08T12:13:59Z +http://www.semanlink.net/tag/magnetisme|prefLabel|Magnétisme +http://www.semanlink.net/tag/magnetisme|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/magnetisme|creationDate|2009-11-08 +http://www.semanlink.net/tag/magnetisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/magnetisme|uri|http://www.semanlink.net/tag/magnetisme +http://www.semanlink.net/tag/magnetisme|broader_prefLabel|Physique +http://www.semanlink.net/tag/magnetisme|broader_altLabel|Physics +http://www.semanlink.net/tag/metadata_indexing|prefLabel|Metadata indexing +http://www.semanlink.net/tag/metadata_indexing|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/metadata_indexing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metadata_indexing|uri|http://www.semanlink.net/tag/metadata_indexing +http://www.semanlink.net/tag/metadata_indexing|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/metadata_indexing|broader_altLabel|sw +http://www.semanlink.net/tag/metadata_indexing|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/sense_embeddings|creationTime|2018-04-30T18:48:27Z +http://www.semanlink.net/tag/sense_embeddings|prefLabel|Sense embeddings +http://www.semanlink.net/tag/sense_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/sense_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/sense_embeddings|related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/sense_embeddings|creationDate|2018-04-30 +http://www.semanlink.net/tag/sense_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sense_embeddings|uri|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/sense_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/sense_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/sense_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/sense_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/sense_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/sense_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/uberisation|creationTime|2015-10-30T10:33:44Z +http://www.semanlink.net/tag/uberisation|prefLabel|Uberisation +http://www.semanlink.net/tag/uberisation|broader|http://www.semanlink.net/tag/gig_economy +http://www.semanlink.net/tag/uberisation|broader|http://www.semanlink.net/tag/uber +http://www.semanlink.net/tag/uberisation|creationDate|2015-10-30 +http://www.semanlink.net/tag/uberisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uberisation|uri|http://www.semanlink.net/tag/uberisation +http://www.semanlink.net/tag/uberisation|broader_prefLabel|Gig economy +http://www.semanlink.net/tag/uberisation|broader_prefLabel|Uber +http://www.semanlink.net/tag/nlp_topic_extraction|creationTime|2017-05-24T18:08:08Z +http://www.semanlink.net/tag/nlp_topic_extraction|prefLabel|Keyword/keyphrase extraction +http://www.semanlink.net/tag/nlp_topic_extraction|broader|http://www.semanlink.net/tag/keywords +http://www.semanlink.net/tag/nlp_topic_extraction|broader|http://www.semanlink.net/tag/automatic_tagging +http://www.semanlink.net/tag/nlp_topic_extraction|broader|http://www.semanlink.net/tag/phrases_nlp +http://www.semanlink.net/tag/nlp_topic_extraction|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_topic_extraction|related|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/nlp_topic_extraction|related|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/nlp_topic_extraction|related|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/nlp_topic_extraction|related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/nlp_topic_extraction|creationDate|2017-05-24 +http://www.semanlink.net/tag/nlp_topic_extraction|comment|"“the automatic +selection of important and topical phrases +from the body of a document” (Turney, 2000)" +http://www.semanlink.net/tag/nlp_topic_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_topic_extraction|altLabel|Topic extraction +http://www.semanlink.net/tag/nlp_topic_extraction|altLabel|Keyword extraction +http://www.semanlink.net/tag/nlp_topic_extraction|altLabel|Keyphrase extraction +http://www.semanlink.net/tag/nlp_topic_extraction|uri|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/nlp_topic_extraction|broader_prefLabel|Keywords +http://www.semanlink.net/tag/nlp_topic_extraction|broader_prefLabel|Automatic tagging +http://www.semanlink.net/tag/nlp_topic_extraction|broader_prefLabel|Phrases (NLP) +http://www.semanlink.net/tag/nlp_topic_extraction|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_topic_extraction|broader_related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/nlp_topic_extraction|broader_related|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/silicon_valley|creationTime|2013-05-11T11:24:41Z +http://www.semanlink.net/tag/silicon_valley|prefLabel|Silicon Valley +http://www.semanlink.net/tag/silicon_valley|broader|http://www.semanlink.net/tag/californie +http://www.semanlink.net/tag/silicon_valley|creationDate|2013-05-11 +http://www.semanlink.net/tag/silicon_valley|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/silicon_valley|uri|http://www.semanlink.net/tag/silicon_valley +http://www.semanlink.net/tag/silicon_valley|broader_prefLabel|Californie +http://www.semanlink.net/tag/ontoprise|creationTime|2008-10-17T18:26:59Z +http://www.semanlink.net/tag/ontoprise|prefLabel|Ontoprise +http://www.semanlink.net/tag/ontoprise|broader|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/ontoprise|creationDate|2008-10-17 +http://www.semanlink.net/tag/ontoprise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontoprise|uri|http://www.semanlink.net/tag/ontoprise +http://www.semanlink.net/tag/ontoprise|broader_prefLabel|Semantic web company +http://www.semanlink.net/tag/ontoprise|broader_altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/ontoprise|broader_altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/html_data|creationTime|2012-01-04T21:45:22Z +http://www.semanlink.net/tag/html_data|prefLabel|HTML Data +http://www.semanlink.net/tag/html_data|broader|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/html_data|creationDate|2012-01-04 +http://www.semanlink.net/tag/html_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html_data|uri|http://www.semanlink.net/tag/html_data +http://www.semanlink.net/tag/html_data|broader_prefLabel|Web of data +http://www.semanlink.net/tag/jvisualvm|creationTime|2015-02-09T21:22:26Z +http://www.semanlink.net/tag/jvisualvm|prefLabel|JVisualVM +http://www.semanlink.net/tag/jvisualvm|broader|http://www.semanlink.net/tag/java_profiling +http://www.semanlink.net/tag/jvisualvm|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/jvisualvm|creationDate|2015-02-09 +http://www.semanlink.net/tag/jvisualvm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jvisualvm|uri|http://www.semanlink.net/tag/jvisualvm +http://www.semanlink.net/tag/jvisualvm|broader_prefLabel|Java profiling +http://www.semanlink.net/tag/jvisualvm|broader_prefLabel|Java dev +http://www.semanlink.net/tag/minimum_description_length_principle|creationTime|2018-12-06T10:01:53Z +http://www.semanlink.net/tag/minimum_description_length_principle|prefLabel|Minimum Description Length Principle +http://www.semanlink.net/tag/minimum_description_length_principle|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/minimum_description_length_principle|related|http://www.semanlink.net/tag/occam_s_razor +http://www.semanlink.net/tag/minimum_description_length_principle|creationDate|2018-12-06 +http://www.semanlink.net/tag/minimum_description_length_principle|comment|"the best hypothesis (a model and its parameters) for a given set of data is the one that leads to the best compression of the data. + +> In information theory and Minimum Description Length (MDL), learning a good model of the data +is recast as using the model to losslessly transmit the data in as few bits as possible. ([source](/doc/2019/10/_1802_07044_the_description_le)) +" +http://www.semanlink.net/tag/minimum_description_length_principle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minimum_description_length_principle|describedBy|https://en.wikipedia.org/wiki/Minimum_description_length +http://www.semanlink.net/tag/minimum_description_length_principle|uri|http://www.semanlink.net/tag/minimum_description_length_principle +http://www.semanlink.net/tag/minimum_description_length_principle|broader_prefLabel|Information theory +http://www.semanlink.net/tag/ontology_based_data_access|creationTime|2021-01-29T13:38:21Z +http://www.semanlink.net/tag/ontology_based_data_access|prefLabel|Ontology-based Data Access +http://www.semanlink.net/tag/ontology_based_data_access|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/ontology_based_data_access|creationDate|2021-01-29 +http://www.semanlink.net/tag/ontology_based_data_access|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontology_based_data_access|uri|http://www.semanlink.net/tag/ontology_based_data_access +http://www.semanlink.net/tag/ontology_based_data_access|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/ontology_based_data_access|broader_altLabel|Ontology +http://www.semanlink.net/tag/compagnies_petrolieres|prefLabel|Compagnies pétrolières +http://www.semanlink.net/tag/compagnies_petrolieres|broader|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/compagnies_petrolieres|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/compagnies_petrolieres|broader|http://www.semanlink.net/tag/exploitation_petroliere +http://www.semanlink.net/tag/compagnies_petrolieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/compagnies_petrolieres|uri|http://www.semanlink.net/tag/compagnies_petrolieres +http://www.semanlink.net/tag/compagnies_petrolieres|broader_prefLabel|Pétrole +http://www.semanlink.net/tag/compagnies_petrolieres|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/compagnies_petrolieres|broader_prefLabel|Exploitation pétrolière +http://www.semanlink.net/tag/security|creationTime|2008-09-05T21:22:31Z +http://www.semanlink.net/tag/security|prefLabel|Security +http://www.semanlink.net/tag/security|creationDate|2008-09-05 +http://www.semanlink.net/tag/security|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/security|uri|http://www.semanlink.net/tag/security +http://www.semanlink.net/tag/madonna|creationTime|2018-08-04T01:21:21Z +http://www.semanlink.net/tag/madonna|prefLabel|Madonna +http://www.semanlink.net/tag/madonna|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/madonna|creationDate|2018-08-04 +http://www.semanlink.net/tag/madonna|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/madonna|describedBy|https://en.wikipedia.org/wiki/Madonna_(entertainer) +http://www.semanlink.net/tag/madonna|uri|http://www.semanlink.net/tag/madonna +http://www.semanlink.net/tag/madonna|broader_prefLabel|Musicien +http://www.semanlink.net/tag/lynda_tamine|creationTime|2018-02-12T17:01:20Z +http://www.semanlink.net/tag/lynda_tamine|prefLabel|Lynda Tamine +http://www.semanlink.net/tag/lynda_tamine|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/lynda_tamine|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/lynda_tamine|related|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/lynda_tamine|creationDate|2018-02-12 +http://www.semanlink.net/tag/lynda_tamine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lynda_tamine|homepage|https://www.irit.fr/~Lynda.Tamine-Lechani/ +http://www.semanlink.net/tag/lynda_tamine|uri|http://www.semanlink.net/tag/lynda_tamine +http://www.semanlink.net/tag/lynda_tamine|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/kristallnacht|creationTime|2010-01-07T01:26:14Z +http://www.semanlink.net/tag/kristallnacht|prefLabel|Kristallnacht +http://www.semanlink.net/tag/kristallnacht|broader|http://www.semanlink.net/tag/nazisme +http://www.semanlink.net/tag/kristallnacht|related|http://www.semanlink.net/tag/shoah +http://www.semanlink.net/tag/kristallnacht|creationDate|2010-01-07 +http://www.semanlink.net/tag/kristallnacht|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kristallnacht|describedBy|https://fr.wikipedia.org/wiki/Nuit_de_Cristal +http://www.semanlink.net/tag/kristallnacht|uri|http://www.semanlink.net/tag/kristallnacht +http://www.semanlink.net/tag/kristallnacht|broader_prefLabel|Nazisme +http://www.semanlink.net/tag/kristallnacht|broader_altLabel|Nazi +http://www.semanlink.net/tag/kristallnacht|broader_related|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/kristallnacht|broader_related|http://www.semanlink.net/tag/antisemitisme +http://www.semanlink.net/tag/conquistadores|prefLabel|Conquistadores +http://www.semanlink.net/tag/conquistadores|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/conquistadores|related|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/conquistadores|related|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/conquistadores|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conquistadores|uri|http://www.semanlink.net/tag/conquistadores +http://www.semanlink.net/tag/conquistadores|broader_prefLabel|Histoire +http://www.semanlink.net/tag/github_project|creationTime|2014-09-09T14:35:35Z +http://www.semanlink.net/tag/github_project|prefLabel|GitHub project +http://www.semanlink.net/tag/github_project|broader|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/github_project|creationDate|2014-09-09 +http://www.semanlink.net/tag/github_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/github_project|uri|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/tag/github_project|broader_prefLabel|GitHub +http://www.semanlink.net/tag/partial_differential_equations|creationTime|2020-10-31T12:33:39Z +http://www.semanlink.net/tag/partial_differential_equations|prefLabel|Partial differential equations +http://www.semanlink.net/tag/partial_differential_equations|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/partial_differential_equations|creationDate|2020-10-31 +http://www.semanlink.net/tag/partial_differential_equations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/partial_differential_equations|uri|http://www.semanlink.net/tag/partial_differential_equations +http://www.semanlink.net/tag/partial_differential_equations|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/partial_differential_equations|broader_altLabel|Math +http://www.semanlink.net/tag/singular_value_decomposition|creationTime|2017-07-10T18:11:09Z +http://www.semanlink.net/tag/singular_value_decomposition|prefLabel|Singular Value Decomposition +http://www.semanlink.net/tag/singular_value_decomposition|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/singular_value_decomposition|broader|http://www.semanlink.net/tag/linear_algebra +http://www.semanlink.net/tag/singular_value_decomposition|related|http://www.semanlink.net/tag/principal_component_analysis +http://www.semanlink.net/tag/singular_value_decomposition|creationDate|2017-07-10 +http://www.semanlink.net/tag/singular_value_decomposition|comment|"SVD factorizes the word-context co-occurrence matrix into the product of three matrices UΣV where U and V are orthonormal matrices (i.e. square matrices whose rows and columns are orthogonal unit vectors) and Σ is a diagonal matrix of eigenvalues in decreasing order. + +" +http://www.semanlink.net/tag/singular_value_decomposition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/singular_value_decomposition|describedBy|https://en.wikipedia.org/wiki/Singular_value_decomposition +http://www.semanlink.net/tag/singular_value_decomposition|altLabel|SVD +http://www.semanlink.net/tag/singular_value_decomposition|uri|http://www.semanlink.net/tag/singular_value_decomposition +http://www.semanlink.net/tag/singular_value_decomposition|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/singular_value_decomposition|broader_prefLabel|Linear algebra +http://www.semanlink.net/tag/singular_value_decomposition|broader_altLabel|Algèbre linéaire +http://www.semanlink.net/tag/classification_relations_between_classes|creationTime|2021-01-10T12:13:08Z +http://www.semanlink.net/tag/classification_relations_between_classes|prefLabel|Classification: dependencies between labels +http://www.semanlink.net/tag/classification_relations_between_classes|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/classification_relations_between_classes|creationDate|2021-01-10 +http://www.semanlink.net/tag/classification_relations_between_classes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/classification_relations_between_classes|altLabel|Classification: relations between labels +http://www.semanlink.net/tag/classification_relations_between_classes|uri|http://www.semanlink.net/tag/classification_relations_between_classes +http://www.semanlink.net/tag/classification_relations_between_classes|broader_prefLabel|Classification +http://www.semanlink.net/tag/georges_brassens|creationTime|2011-06-09T23:19:27Z +http://www.semanlink.net/tag/georges_brassens|prefLabel|Georges Brassens +http://www.semanlink.net/tag/georges_brassens|creationDate|2011-06-09 +http://www.semanlink.net/tag/georges_brassens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/georges_brassens|uri|http://www.semanlink.net/tag/georges_brassens +http://www.semanlink.net/tag/video_games|creationTime|2014-09-26T16:38:36Z +http://www.semanlink.net/tag/video_games|prefLabel|Video games +http://www.semanlink.net/tag/video_games|creationDate|2014-09-26 +http://www.semanlink.net/tag/video_games|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/video_games|uri|http://www.semanlink.net/tag/video_games +http://www.semanlink.net/tag/hymne_national|prefLabel|Hymne national +http://www.semanlink.net/tag/hymne_national|creationDate|2006-09-01 +http://www.semanlink.net/tag/hymne_national|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hymne_national|uri|http://www.semanlink.net/tag/hymne_national +http://www.semanlink.net/tag/armee_americaine|prefLabel|Armée américaine +http://www.semanlink.net/tag/armee_americaine|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/armee_americaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/armee_americaine|uri|http://www.semanlink.net/tag/armee_americaine +http://www.semanlink.net/tag/armee_americaine|broader_prefLabel|USA +http://www.semanlink.net/tag/armee_americaine|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/armee_americaine|broader_altLabel|United States +http://www.semanlink.net/tag/self_supervised_learning|creationTime|2018-10-27T14:41:10Z +http://www.semanlink.net/tag/self_supervised_learning|prefLabel|Self-Supervised Learning +http://www.semanlink.net/tag/self_supervised_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/self_supervised_learning|creationDate|2018-10-27 +http://www.semanlink.net/tag/self_supervised_learning|comment|"Kind of supervised learning, where labels can be generated automatically. Uses signals or domain knowledge, intrinsically correlated to the data, as automatic sources of supervision, thus removing the need for humans to label data. + +Examples include [#autoencoders](/tag/autoencoder) and computation of [#word embeddings](/tag/word_embedding) + + +> In self-supervised learning, the system learns to predict part of its input from other parts of it input. ([Lecun](https://www.facebook.com/722677142/posts/10155934004262143/))" +http://www.semanlink.net/tag/self_supervised_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/self_supervised_learning|uri|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/tag/self_supervised_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/self_supervised_learning|broader_altLabel|ML +http://www.semanlink.net/tag/self_supervised_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/self_supervised_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/google_rich_cards|creationTime|2016-05-18T23:25:43Z +http://www.semanlink.net/tag/google_rich_cards|prefLabel|Google Rich Cards +http://www.semanlink.net/tag/google_rich_cards|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_rich_cards|related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/google_rich_cards|creationDate|2016-05-18 +http://www.semanlink.net/tag/google_rich_cards|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_rich_cards|uri|http://www.semanlink.net/tag/google_rich_cards +http://www.semanlink.net/tag/google_rich_cards|broader_prefLabel|Google +http://www.semanlink.net/tag/google_rich_cards|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/pekin|prefLabel|Pékin +http://www.semanlink.net/tag/pekin|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/pekin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pekin|uri|http://www.semanlink.net/tag/pekin +http://www.semanlink.net/tag/pekin|broader_prefLabel|Chine +http://www.semanlink.net/tag/pekin|broader_altLabel|China +http://www.semanlink.net/tag/deepwalk|creationTime|2019-08-25T11:43:18Z +http://www.semanlink.net/tag/deepwalk|prefLabel|DeepWalk +http://www.semanlink.net/tag/deepwalk|broader|http://www.semanlink.net/tag/node_embeddings +http://www.semanlink.net/tag/deepwalk|related|http://www.semanlink.net/tag/random_walk +http://www.semanlink.net/tag/deepwalk|creationDate|2019-08-25 +http://www.semanlink.net/tag/deepwalk|comment|"Computes embeddings +for the vertices of unlabeled graphs. DeepWalk bridges the gap between network +embeddings and word embeddings by treating **nodes as words** and generating **short random walks +as sentences**. Then, neural language models such as Skip-gram can be applied on these random +walks to obtain network embedding." +http://www.semanlink.net/tag/deepwalk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deepwalk|uri|http://www.semanlink.net/tag/deepwalk +http://www.semanlink.net/tag/deepwalk|broader_prefLabel|Node Embeddings +http://www.semanlink.net/tag/smartphone|creationTime|2013-05-23T14:19:39Z +http://www.semanlink.net/tag/smartphone|prefLabel|Smartphone +http://www.semanlink.net/tag/smartphone|broader|http://www.semanlink.net/tag/mobile_device +http://www.semanlink.net/tag/smartphone|broader|http://www.semanlink.net/tag/mobile_phone +http://www.semanlink.net/tag/smartphone|creationDate|2013-05-23 +http://www.semanlink.net/tag/smartphone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/smartphone|altLabel|Téléphone portable +http://www.semanlink.net/tag/smartphone|uri|http://www.semanlink.net/tag/smartphone +http://www.semanlink.net/tag/smartphone|broader_prefLabel|Mobile device +http://www.semanlink.net/tag/smartphone|broader_prefLabel|Mobile phone +http://www.semanlink.net/tag/about_rdf|prefLabel|About RDF +http://www.semanlink.net/tag/about_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/about_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/about_rdf|uri|http://www.semanlink.net/tag/about_rdf +http://www.semanlink.net/tag/about_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/about_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/about_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/about_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/about_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/about_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/jena_rules|prefLabel|Jena rules +http://www.semanlink.net/tag/jena_rules|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_rules|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_rules|uri|http://www.semanlink.net/tag/jena_rules +http://www.semanlink.net/tag/jena_rules|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_rules|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/google_ranking|prefLabel|Google ranking +http://www.semanlink.net/tag/google_ranking|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_ranking|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/google_ranking|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/google_ranking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_ranking|uri|http://www.semanlink.net/tag/google_ranking +http://www.semanlink.net/tag/google_ranking|broader_prefLabel|Google +http://www.semanlink.net/tag/google_ranking|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/google_ranking|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/google_ranking|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/google_ranking|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/test_of_independent_invention|creationTime|2009-04-14T01:12:39Z +http://www.semanlink.net/tag/test_of_independent_invention|prefLabel|Test of independent invention +http://www.semanlink.net/tag/test_of_independent_invention|broader|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/test_of_independent_invention|creationDate|2009-04-14 +http://www.semanlink.net/tag/test_of_independent_invention|comment|"""It is modularity inside-out: designing a system not to be modular in itself, but to be a part of an as-yet unspecified larger system."" TBL" +http://www.semanlink.net/tag/test_of_independent_invention|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/test_of_independent_invention|uri|http://www.semanlink.net/tag/test_of_independent_invention +http://www.semanlink.net/tag/test_of_independent_invention|broader_prefLabel|Web architecture +http://www.semanlink.net/tag/stemming|creationTime|2012-04-14T12:08:12Z +http://www.semanlink.net/tag/stemming|prefLabel|Stemming +http://www.semanlink.net/tag/stemming|broader|http://www.semanlink.net/tag/text_preprocessing +http://www.semanlink.net/tag/stemming|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/stemming|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/stemming|broader|http://www.semanlink.net/tag/general_nlp_tasks +http://www.semanlink.net/tag/stemming|related|http://www.semanlink.net/tag/lucene +http://www.semanlink.net/tag/stemming|creationDate|2012-04-14 +http://www.semanlink.net/tag/stemming|comment|stemming is the process for reducing inflected (or sometimes derived) words to their stem, base or root form—generally a written word form +http://www.semanlink.net/tag/stemming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stemming|describedBy|https://en.wikipedia.org/wiki/Porter_Stemmer +http://www.semanlink.net/tag/stemming|uri|http://www.semanlink.net/tag/stemming +http://www.semanlink.net/tag/stemming|broader_prefLabel|Text preprocessing +http://www.semanlink.net/tag/stemming|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/stemming|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/stemming|broader_prefLabel|General NLP tasks +http://www.semanlink.net/tag/nlp_low_resource_scenarios|creationTime|2021-08-24T02:47:09Z +http://www.semanlink.net/tag/nlp_low_resource_scenarios|prefLabel|Low-Resource NLP +http://www.semanlink.net/tag/nlp_low_resource_scenarios|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_low_resource_scenarios|creationDate|2021-08-24 +http://www.semanlink.net/tag/nlp_low_resource_scenarios|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_low_resource_scenarios|uri|http://www.semanlink.net/tag/nlp_low_resource_scenarios +http://www.semanlink.net/tag/nlp_low_resource_scenarios|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/judea_pearl|creationTime|2013-05-21T13:05:47Z +http://www.semanlink.net/tag/judea_pearl|prefLabel|Judea Pearl +http://www.semanlink.net/tag/judea_pearl|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/judea_pearl|related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/judea_pearl|creationDate|2013-05-21 +http://www.semanlink.net/tag/judea_pearl|comment|> Hate killed my son. Therefore I am determined to fight hate +http://www.semanlink.net/tag/judea_pearl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/judea_pearl|describedBy|https://en.wikipedia.org/wiki/Judea_Pearl +http://www.semanlink.net/tag/judea_pearl|uri|http://www.semanlink.net/tag/judea_pearl +http://www.semanlink.net/tag/judea_pearl|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/disparition_de_langues_vivantes|prefLabel|Disparition de langues vivantes +http://www.semanlink.net/tag/disparition_de_langues_vivantes|broader|http://www.semanlink.net/tag/langues_vivantes +http://www.semanlink.net/tag/disparition_de_langues_vivantes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disparition_de_langues_vivantes|uri|http://www.semanlink.net/tag/disparition_de_langues_vivantes +http://www.semanlink.net/tag/disparition_de_langues_vivantes|broader_prefLabel|Langues vivantes +http://www.semanlink.net/tag/ckan|creationTime|2012-10-23T00:56:22Z +http://www.semanlink.net/tag/ckan|prefLabel|CKAN +http://www.semanlink.net/tag/ckan|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/ckan|broader|http://www.semanlink.net/tag/data_portal +http://www.semanlink.net/tag/ckan|related|http://www.semanlink.net/tag/open_data +http://www.semanlink.net/tag/ckan|creationDate|2012-10-23 +http://www.semanlink.net/tag/ckan|comment|data management system that makes data accessible – by providing tools to streamline publishing, sharing, finding and using data. CKAN is aimed at data publishers (national and regional governments, companies and organizations) wanting to make their data open and available. +http://www.semanlink.net/tag/ckan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ckan|homepage|http://ckan.org +http://www.semanlink.net/tag/ckan|describedBy|https://en.wikipedia.org/wiki/CKAN +http://www.semanlink.net/tag/ckan|uri|http://www.semanlink.net/tag/ckan +http://www.semanlink.net/tag/ckan|broader_prefLabel|Open Source +http://www.semanlink.net/tag/ckan|broader_prefLabel|Data portal +http://www.semanlink.net/tag/colbert|creationTime|2021-07-10T09:15:57Z +http://www.semanlink.net/tag/colbert|prefLabel|ColBERT +http://www.semanlink.net/tag/colbert|broader|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/colbert|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/colbert|broader|http://www.semanlink.net/tag/retrieval_augmented_lm +http://www.semanlink.net/tag/colbert|creationDate|2021-07-10 +http://www.semanlink.net/tag/colbert|comment|[Github](https://github.com/stanford-futuredata/ColBERT) +http://www.semanlink.net/tag/colbert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/colbert|uri|http://www.semanlink.net/tag/colbert +http://www.semanlink.net/tag/colbert|broader_prefLabel|Neural Search +http://www.semanlink.net/tag/colbert|broader_prefLabel|BERT +http://www.semanlink.net/tag/colbert|broader_prefLabel|Retrieval augmented LM +http://www.semanlink.net/tag/colbert|broader_altLabel|Neural IR models +http://www.semanlink.net/tag/colbert|broader_altLabel|Neural retrieval +http://www.semanlink.net/tag/colbert|broader_altLabel|Neural Models for Information Retrieval +http://www.semanlink.net/tag/colbert|broader_related|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/tag/colbert|broader_related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/emnlp_2018|creationTime|2018-10-01T10:55:27Z +http://www.semanlink.net/tag/emnlp_2018|prefLabel|EMNLP 2018 +http://www.semanlink.net/tag/emnlp_2018|broader|http://www.semanlink.net/tag/emnlp +http://www.semanlink.net/tag/emnlp_2018|broader|http://www.semanlink.net/tag/bruxelles +http://www.semanlink.net/tag/emnlp_2018|broader|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/emnlp_2018|creationDate|2018-10-01 +http://www.semanlink.net/tag/emnlp_2018|comment|Conference on Empirical Methods in Natural Language Processing. [Proceedings](https://www.aclweb.org/anthology/D18-1000/) ([was](https://aclanthology.coli.uni-saarland.de/events/emnlp-2018)) +http://www.semanlink.net/tag/emnlp_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emnlp_2018|homepage|http://emnlp2018.org/ +http://www.semanlink.net/tag/emnlp_2018|uri|http://www.semanlink.net/tag/emnlp_2018 +http://www.semanlink.net/tag/emnlp_2018|broader_prefLabel|EMNLP +http://www.semanlink.net/tag/emnlp_2018|broader_prefLabel|Bruxelles +http://www.semanlink.net/tag/emnlp_2018|broader_prefLabel|J'y étais +http://www.semanlink.net/tag/owllink_protocol|creationTime|2010-07-20T00:12:08Z +http://www.semanlink.net/tag/owllink_protocol|prefLabel|OWLlink Protocol +http://www.semanlink.net/tag/owllink_protocol|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owllink_protocol|creationDate|2010-07-20 +http://www.semanlink.net/tag/owllink_protocol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owllink_protocol|uri|http://www.semanlink.net/tag/owllink_protocol +http://www.semanlink.net/tag/owllink_protocol|broader_prefLabel|OWL +http://www.semanlink.net/tag/owllink_protocol|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/visual_search|creationTime|2015-07-14T23:43:31Z +http://www.semanlink.net/tag/visual_search|prefLabel|Visual search +http://www.semanlink.net/tag/visual_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/visual_search|creationDate|2015-07-14 +http://www.semanlink.net/tag/visual_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/visual_search|uri|http://www.semanlink.net/tag/visual_search +http://www.semanlink.net/tag/visual_search|broader_prefLabel|Search +http://www.semanlink.net/tag/sony_hack|creationTime|2014-12-18T22:58:33Z +http://www.semanlink.net/tag/sony_hack|prefLabel|Sony Hack +http://www.semanlink.net/tag/sony_hack|broader|http://www.semanlink.net/tag/sony +http://www.semanlink.net/tag/sony_hack|broader|http://www.semanlink.net/tag/hack +http://www.semanlink.net/tag/sony_hack|creationDate|2014-12-18 +http://www.semanlink.net/tag/sony_hack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sony_hack|uri|http://www.semanlink.net/tag/sony_hack +http://www.semanlink.net/tag/sony_hack|broader_prefLabel|Sony +http://www.semanlink.net/tag/sony_hack|broader_prefLabel|Hack +http://www.semanlink.net/tag/restricted_boltzmann_machine|creationTime|2017-10-30T12:33:51Z +http://www.semanlink.net/tag/restricted_boltzmann_machine|prefLabel|Restricted Boltzmann machine +http://www.semanlink.net/tag/restricted_boltzmann_machine|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/restricted_boltzmann_machine|related|http://www.semanlink.net/tag/autoencoder +http://www.semanlink.net/tag/restricted_boltzmann_machine|related|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/tag/restricted_boltzmann_machine|related|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/tag/restricted_boltzmann_machine|creationDate|2017-10-30 +http://www.semanlink.net/tag/restricted_boltzmann_machine|comment|"a generative stochastic neural network that can **learn a probability distribution over its set of inputs**. ""**Energy based model**"": tries to minimize a predefined energy function. +" +http://www.semanlink.net/tag/restricted_boltzmann_machine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/restricted_boltzmann_machine|describedBy|https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine +http://www.semanlink.net/tag/restricted_boltzmann_machine|altLabel|RBM +http://www.semanlink.net/tag/restricted_boltzmann_machine|uri|http://www.semanlink.net/tag/restricted_boltzmann_machine +http://www.semanlink.net/tag/restricted_boltzmann_machine|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/restricted_boltzmann_machine|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/restricted_boltzmann_machine|broader_altLabel|ANN +http://www.semanlink.net/tag/restricted_boltzmann_machine|broader_altLabel|NN +http://www.semanlink.net/tag/hector_lavoe|creationTime|2010-02-13T01:57:10Z +http://www.semanlink.net/tag/hector_lavoe|prefLabel|Héctor Lavoe +http://www.semanlink.net/tag/hector_lavoe|broader|http://www.semanlink.net/tag/salsa +http://www.semanlink.net/tag/hector_lavoe|creationDate|2010-02-13 +http://www.semanlink.net/tag/hector_lavoe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hector_lavoe|describedBy|https://en.wikipedia.org/wiki/H%C3%A9ctor_Lavoe +http://www.semanlink.net/tag/hector_lavoe|uri|http://www.semanlink.net/tag/hector_lavoe +http://www.semanlink.net/tag/hector_lavoe|broader_prefLabel|Salsa +http://www.semanlink.net/tag/naftali_tishby|creationTime|2019-08-15T17:05:26Z +http://www.semanlink.net/tag/naftali_tishby|prefLabel|Naftali Tishby +http://www.semanlink.net/tag/naftali_tishby|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/naftali_tishby|related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/naftali_tishby|creationDate|2019-08-15 +http://www.semanlink.net/tag/naftali_tishby|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/naftali_tishby|describedBy|https://en.wikipedia.org/wiki/Naftali_Tishby +http://www.semanlink.net/tag/naftali_tishby|uri|http://www.semanlink.net/tag/naftali_tishby +http://www.semanlink.net/tag/naftali_tishby|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/java|prefLabel|Java +http://www.semanlink.net/tag/java|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/java|broader|http://www.semanlink.net/tag/sun_microsystems +http://www.semanlink.net/tag/java|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/java|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java|uri|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java|broader_prefLabel|Programming language +http://www.semanlink.net/tag/java|broader_prefLabel|Sun Microsystems +http://www.semanlink.net/tag/java|broader_prefLabel|Dev +http://www.semanlink.net/tag/java|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/java|broader_altLabel|sun +http://www.semanlink.net/tag/kaguya|creationTime|2007-11-09T13:24:32Z +http://www.semanlink.net/tag/kaguya|prefLabel|Kaguya +http://www.semanlink.net/tag/kaguya|broader|http://www.semanlink.net/tag/lune +http://www.semanlink.net/tag/kaguya|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/kaguya|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/kaguya|creationDate|2007-11-09 +http://www.semanlink.net/tag/kaguya|comment|The Kaguya mission consists of a main orbiter and two smaller satellites in a 100-km-high, polar orbit. +http://www.semanlink.net/tag/kaguya|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kaguya|uri|http://www.semanlink.net/tag/kaguya +http://www.semanlink.net/tag/kaguya|broader_prefLabel|Lune +http://www.semanlink.net/tag/kaguya|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/kaguya|broader_prefLabel|Japon +http://www.semanlink.net/tag/kaguya|broader_altLabel|Japan +http://www.semanlink.net/tag/anne_haour|creationTime|2021-02-11T16:48:58Z +http://www.semanlink.net/tag/anne_haour|prefLabel| Anne Haour +http://www.semanlink.net/tag/anne_haour|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/anne_haour|broader|http://www.semanlink.net/tag/archeologue +http://www.semanlink.net/tag/anne_haour|creationDate|2021-02-11 +http://www.semanlink.net/tag/anne_haour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anne_haour|uri|http://www.semanlink.net/tag/anne_haour +http://www.semanlink.net/tag/anne_haour|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/anne_haour|broader_prefLabel|Archéologue +http://www.semanlink.net/tag/anne_haour|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/sparql_construct|creationTime|2008-08-27T17:53:48Z +http://www.semanlink.net/tag/sparql_construct|prefLabel|SPARQL Construct +http://www.semanlink.net/tag/sparql_construct|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_construct|creationDate|2008-08-27 +http://www.semanlink.net/tag/sparql_construct|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_construct|uri|http://www.semanlink.net/tag/sparql_construct +http://www.semanlink.net/tag/sparql_construct|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/semantic_web_outliner|prefLabel|Semantic Web Outliner +http://www.semanlink.net/tag/semantic_web_outliner|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/semantic_web_outliner|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_web_outliner|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_outliner|uri|http://www.semanlink.net/tag/semantic_web_outliner +http://www.semanlink.net/tag/semantic_web_outliner|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/semantic_web_outliner|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/text_processing|creationTime|2019-04-24T11:36:15Z +http://www.semanlink.net/tag/text_processing|prefLabel|Text processing +http://www.semanlink.net/tag/text_processing|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/text_processing|creationDate|2019-04-24 +http://www.semanlink.net/tag/text_processing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_processing|uri|http://www.semanlink.net/tag/text_processing +http://www.semanlink.net/tag/text_processing|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/pre_trained_models|creationTime|2021-02-16T20:29:31Z +http://www.semanlink.net/tag/pre_trained_models|prefLabel|Pre-trained Models +http://www.semanlink.net/tag/pre_trained_models|creationDate|2021-02-16 +http://www.semanlink.net/tag/pre_trained_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pre_trained_models|uri|http://www.semanlink.net/tag/pre_trained_models +http://www.semanlink.net/tag/synaptic_web|creationTime|2014-02-03T22:54:41Z +http://www.semanlink.net/tag/synaptic_web|prefLabel|Synaptic Web +http://www.semanlink.net/tag/synaptic_web|creationDate|2014-02-03 +http://www.semanlink.net/tag/synaptic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synaptic_web|uri|http://www.semanlink.net/tag/synaptic_web +http://www.semanlink.net/tag/secheresse|creationTime|2007-07-08T02:43:38Z +http://www.semanlink.net/tag/secheresse|prefLabel|Sécheresse +http://www.semanlink.net/tag/secheresse|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/secheresse|creationDate|2007-07-08 +http://www.semanlink.net/tag/secheresse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/secheresse|uri|http://www.semanlink.net/tag/secheresse +http://www.semanlink.net/tag/secheresse|broader_prefLabel|Eau +http://www.semanlink.net/tag/zero_shot_text_classifier|creationTime|2021-02-24T01:49:09Z +http://www.semanlink.net/tag/zero_shot_text_classifier|prefLabel|Zero-shot Text Classifier +http://www.semanlink.net/tag/zero_shot_text_classifier|broader|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/tag/zero_shot_text_classifier|broader|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/tag/zero_shot_text_classifier|creationDate|2021-02-24 +http://www.semanlink.net/tag/zero_shot_text_classifier|comment|"learn a classifier on one set of labels and then evaluate on a +different set of labels that the classifier has never seen before" +http://www.semanlink.net/tag/zero_shot_text_classifier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zero_shot_text_classifier|uri|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/tag/zero_shot_text_classifier|broader_prefLabel|Zero-Shot Learning +http://www.semanlink.net/tag/zero_shot_text_classifier|broader_prefLabel|Unsupervised Text Classification +http://www.semanlink.net/tag/zero_shot_text_classifier|broader_altLabel|Dataless Text Classication +http://www.semanlink.net/tag/zero_shot_text_classifier|broader_related|http://www.semanlink.net/tag/few_shot_learning +http://www.semanlink.net/tag/r|creationTime|2013-05-08T15:06:29Z +http://www.semanlink.net/tag/r|prefLabel|R +http://www.semanlink.net/tag/r|broader|http://www.semanlink.net/tag/data_mining_tools +http://www.semanlink.net/tag/r|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/r|creationDate|2013-05-08 +http://www.semanlink.net/tag/r|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/r|homepage|http://www.r-project.org/ +http://www.semanlink.net/tag/r|describedBy|https://en.wikipedia.org/wiki/R_(programming_language) +http://www.semanlink.net/tag/r|altLabel|R (programming language) +http://www.semanlink.net/tag/r|uri|http://www.semanlink.net/tag/r +http://www.semanlink.net/tag/r|broader_prefLabel|Data mining tools +http://www.semanlink.net/tag/r|broader_prefLabel|Data science +http://www.semanlink.net/tag/r|broader_altLabel|Data analysis +http://www.semanlink.net/tag/multilinguisme|creationTime|2007-09-14T13:37:38Z +http://www.semanlink.net/tag/multilinguisme|prefLabel|Multilinguisme +http://www.semanlink.net/tag/multilinguisme|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/multilinguisme|creationDate|2007-09-14 +http://www.semanlink.net/tag/multilinguisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multilinguisme|uri|http://www.semanlink.net/tag/multilinguisme +http://www.semanlink.net/tag/multilinguisme|broader_prefLabel|Langues +http://www.semanlink.net/tag/dalai_lama|creationTime|2015-09-22T22:43:01Z +http://www.semanlink.net/tag/dalai_lama|prefLabel|Dalai Lama +http://www.semanlink.net/tag/dalai_lama|broader|http://www.semanlink.net/tag/tibet +http://www.semanlink.net/tag/dalai_lama|broader|http://www.semanlink.net/tag/boudhisme +http://www.semanlink.net/tag/dalai_lama|creationDate|2015-09-22 +http://www.semanlink.net/tag/dalai_lama|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dalai_lama|describedBy|https://en.wikipedia.org/wiki/Dalai_Lama +http://www.semanlink.net/tag/dalai_lama|uri|http://www.semanlink.net/tag/dalai_lama +http://www.semanlink.net/tag/dalai_lama|broader_prefLabel|Tibet +http://www.semanlink.net/tag/dalai_lama|broader_prefLabel|Boudhisme +http://www.semanlink.net/tag/dalai_lama|broader_related|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/sonarqube|creationTime|2013-11-29T11:32:33Z +http://www.semanlink.net/tag/sonarqube|prefLabel|SonarQube +http://www.semanlink.net/tag/sonarqube|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/sonarqube|creationDate|2013-11-29 +http://www.semanlink.net/tag/sonarqube|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sonarqube|uri|http://www.semanlink.net/tag/sonarqube +http://www.semanlink.net/tag/sonarqube|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/civilisation_elamite|prefLabel|Civilisation élamite +http://www.semanlink.net/tag/civilisation_elamite|broader|http://www.semanlink.net/tag/antiquite_iranienne +http://www.semanlink.net/tag/civilisation_elamite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/civilisation_elamite|uri|http://www.semanlink.net/tag/civilisation_elamite +http://www.semanlink.net/tag/civilisation_elamite|broader_prefLabel|Antiquité iranienne +http://www.semanlink.net/tag/exploration_marsienne|prefLabel|Exploration marsienne +http://www.semanlink.net/tag/exploration_marsienne|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/exploration_marsienne|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/exploration_marsienne|broader|http://www.semanlink.net/tag/mars +http://www.semanlink.net/tag/exploration_marsienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exploration_marsienne|uri|http://www.semanlink.net/tag/exploration_marsienne +http://www.semanlink.net/tag/exploration_marsienne|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/exploration_marsienne|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/exploration_marsienne|broader_prefLabel|Mars +http://www.semanlink.net/tag/openlink|creationTime|2008-03-15T00:24:18Z +http://www.semanlink.net/tag/openlink|prefLabel|OpenLink Software +http://www.semanlink.net/tag/openlink|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/openlink|broader|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/openlink|related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/openlink|related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/openlink|related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/openlink|creationDate|2008-03-15 +http://www.semanlink.net/tag/openlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openlink|homepage|http://www.openlinksw.com/ +http://www.semanlink.net/tag/openlink|uri|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/openlink|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/openlink|broader_prefLabel|Semantic web company +http://www.semanlink.net/tag/openlink|broader_altLabel|sw +http://www.semanlink.net/tag/openlink|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/openlink|broader_altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/openlink|broader_altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/alphafold|creationTime|2021-10-20T00:33:21Z +http://www.semanlink.net/tag/alphafold|prefLabel|AlphaFold +http://www.semanlink.net/tag/alphafold|broader|http://www.semanlink.net/tag/molecular_biology +http://www.semanlink.net/tag/alphafold|broader|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/tag/alphafold|creationDate|2021-10-20 +http://www.semanlink.net/tag/alphafold|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alphafold|uri|http://www.semanlink.net/tag/alphafold +http://www.semanlink.net/tag/alphafold|broader_prefLabel|Molecular Biology +http://www.semanlink.net/tag/alphafold|broader_prefLabel|DeepMind +http://www.semanlink.net/tag/alphafold|broader_altLabel|Biologie moléculaire +http://www.semanlink.net/tag/alphafold|broader_related|http://www.semanlink.net/tag/reinforcement_learning +http://www.semanlink.net/tag/medecins_sans_frontieres|prefLabel|Médecins sans frontières +http://www.semanlink.net/tag/medecins_sans_frontieres|broader|http://www.semanlink.net/tag/ong +http://www.semanlink.net/tag/medecins_sans_frontieres|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/medecins_sans_frontieres|broader|http://www.semanlink.net/tag/humanitaire +http://www.semanlink.net/tag/medecins_sans_frontieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medecins_sans_frontieres|altLabel|MSF +http://www.semanlink.net/tag/medecins_sans_frontieres|uri|http://www.semanlink.net/tag/medecins_sans_frontieres +http://www.semanlink.net/tag/medecins_sans_frontieres|broader_prefLabel|ONG +http://www.semanlink.net/tag/medecins_sans_frontieres|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/medecins_sans_frontieres|broader_prefLabel|Humanitaire +http://www.semanlink.net/tag/deep_learning_attention|creationTime|2016-01-07T00:58:24Z +http://www.semanlink.net/tag/deep_learning_attention|prefLabel|Attention mechanism +http://www.semanlink.net/tag/deep_learning_attention|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning_attention|creationDate|2016-01-07 +http://www.semanlink.net/tag/deep_learning_attention|comment|"Good explanation is this [blog post by D. Britz](/doc/?uri=http%3A%2F%2Fwww.wildml.com%2F2016%2F01%2Fattention-and-memory-in-deep-learning-and-nlp%2F). (But the best explanation related to attention is to be found in this [post](/doc/2019/08/transformers_from_scratch_%7C_pet) about Self-Attention.) + +While simple Seq2Seq builds a single context vector out of the encoder’s last hidden state, attention creates +shortcuts between the context vector and the entire source input: the context vector has access to the entire input sequence. +The decoder can “attend” to different parts of the source sentence at each step of the output generation, and the model learns what to attend to based on the input sentence and what it has produced so far. + +Possible to interpret what the model is doing by looking at the Attention weight matrix + +Cost: We need to calculate an attention value for each combination of input and output word (D. Britz: -> ""attention is a bit of a misnomer: we look at everything in details before deciding what to focus on"") + + + + + + + + + +" +http://www.semanlink.net/tag/deep_learning_attention|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning_attention|uri|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/deep_learning_attention|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning_attention|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning_attention|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/meetup|creationTime|2011-01-09T21:39:47Z +http://www.semanlink.net/tag/meetup|prefLabel|Meetup +http://www.semanlink.net/tag/meetup|creationDate|2011-01-09 +http://www.semanlink.net/tag/meetup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meetup|uri|http://www.semanlink.net/tag/meetup +http://www.semanlink.net/tag/disque_a_retrouver|creationTime|2008-12-29T20:18:40Z +http://www.semanlink.net/tag/disque_a_retrouver|prefLabel|Disque à retrouver +http://www.semanlink.net/tag/disque_a_retrouver|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/disque_a_retrouver|broader|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/disque_a_retrouver|creationDate|2008-12-29 +http://www.semanlink.net/tag/disque_a_retrouver|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disque_a_retrouver|uri|http://www.semanlink.net/tag/disque_a_retrouver +http://www.semanlink.net/tag/disque_a_retrouver|broader_prefLabel|Musique +http://www.semanlink.net/tag/disque_a_retrouver|broader_prefLabel|Souvenirs +http://www.semanlink.net/tag/disque_a_retrouver|broader_altLabel|Music +http://www.semanlink.net/tag/disque_a_retrouver|broader_altLabel|Souvenir +http://www.semanlink.net/tag/monsanto|prefLabel|Monsanto +http://www.semanlink.net/tag/monsanto|broader|http://www.semanlink.net/tag/semencier +http://www.semanlink.net/tag/monsanto|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/monsanto|broader|http://www.semanlink.net/tag/biotech_industry +http://www.semanlink.net/tag/monsanto|related|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/monsanto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/monsanto|describedBy|https://en.wikipedia.org/wiki/Monsanto +http://www.semanlink.net/tag/monsanto|uri|http://www.semanlink.net/tag/monsanto +http://www.semanlink.net/tag/monsanto|broader_prefLabel|Semencier +http://www.semanlink.net/tag/monsanto|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/monsanto|broader_prefLabel|Biotech industry +http://www.semanlink.net/tag/monsanto|broader_altLabel|Firme biotechnologique +http://www.semanlink.net/tag/western|creationTime|2021-10-08T23:22:50Z +http://www.semanlink.net/tag/western|prefLabel|Western +http://www.semanlink.net/tag/western|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/western|creationDate|2021-10-08 +http://www.semanlink.net/tag/western|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/western|uri|http://www.semanlink.net/tag/western +http://www.semanlink.net/tag/western|broader_prefLabel|Film +http://www.semanlink.net/tag/txtai|creationTime|2021-10-04T16:38:44Z +http://www.semanlink.net/tag/txtai|prefLabel|txtai +http://www.semanlink.net/tag/txtai|broader|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/txtai|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/txtai|broader|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/tag/txtai|broader|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/tag/txtai|creationDate|2021-10-04 +http://www.semanlink.net/tag/txtai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/txtai|uri|http://www.semanlink.net/tag/txtai +http://www.semanlink.net/tag/txtai|broader_prefLabel|Neural Search +http://www.semanlink.net/tag/txtai|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/txtai|broader_prefLabel|Discuté avec Raphaël +http://www.semanlink.net/tag/txtai|broader_prefLabel|Semantic Search +http://www.semanlink.net/tag/txtai|broader_altLabel|Neural IR models +http://www.semanlink.net/tag/txtai|broader_altLabel|Neural retrieval +http://www.semanlink.net/tag/txtai|broader_altLabel|Neural Models for Information Retrieval +http://www.semanlink.net/tag/txtai|broader_related|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/txtai|broader_related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/credit_card|creationTime|2010-12-27T13:54:00Z +http://www.semanlink.net/tag/credit_card|prefLabel|Credit card +http://www.semanlink.net/tag/credit_card|broader|http://www.semanlink.net/tag/banque +http://www.semanlink.net/tag/credit_card|creationDate|2010-12-27 +http://www.semanlink.net/tag/credit_card|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/credit_card|uri|http://www.semanlink.net/tag/credit_card +http://www.semanlink.net/tag/credit_card|broader_prefLabel|Banque +http://www.semanlink.net/tag/sarkozy_et_la_recherche|creationTime|2009-02-18T21:52:01Z +http://www.semanlink.net/tag/sarkozy_et_la_recherche|prefLabel|Sarkozy et la recherche +http://www.semanlink.net/tag/sarkozy_et_la_recherche|broader|http://www.semanlink.net/tag/recherche_francaise +http://www.semanlink.net/tag/sarkozy_et_la_recherche|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/sarkozy_et_la_recherche|creationDate|2009-02-18 +http://www.semanlink.net/tag/sarkozy_et_la_recherche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarkozy_et_la_recherche|uri|http://www.semanlink.net/tag/sarkozy_et_la_recherche +http://www.semanlink.net/tag/sarkozy_et_la_recherche|broader_prefLabel|France : recherche +http://www.semanlink.net/tag/sarkozy_et_la_recherche|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/sarkozy_et_la_recherche|broader_altLabel|Recherche française +http://www.semanlink.net/tag/coursera_r_programming|creationTime|2015-01-06T17:52:11Z +http://www.semanlink.net/tag/coursera_r_programming|prefLabel|Coursera: R Programming +http://www.semanlink.net/tag/coursera_r_programming|broader|http://www.semanlink.net/tag/r +http://www.semanlink.net/tag/coursera_r_programming|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_r_programming|creationDate|2015-01-06 +http://www.semanlink.net/tag/coursera_r_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_r_programming|homepage|https://class.coursera.org/rprog-010 +http://www.semanlink.net/tag/coursera_r_programming|uri|http://www.semanlink.net/tag/coursera_r_programming +http://www.semanlink.net/tag/coursera_r_programming|broader_prefLabel|R +http://www.semanlink.net/tag/coursera_r_programming|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_r_programming|broader_altLabel|R (programming language) +http://www.semanlink.net/tag/coursera_r_programming|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/linked_data_application|creationTime|2008-06-14T00:31:58Z +http://www.semanlink.net/tag/linked_data_application|prefLabel|Linked Data: application +http://www.semanlink.net/tag/linked_data_application|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_application|creationDate|2008-06-14 +http://www.semanlink.net/tag/linked_data_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_application|uri|http://www.semanlink.net/tag/linked_data_application +http://www.semanlink.net/tag/linked_data_application|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_application|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_application|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/ios|creationTime|2016-03-27T18:17:50Z +http://www.semanlink.net/tag/ios|prefLabel|iOS +http://www.semanlink.net/tag/ios|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/ios|related|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/ios|creationDate|2016-03-27 +http://www.semanlink.net/tag/ios|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ios|uri|http://www.semanlink.net/tag/ios +http://www.semanlink.net/tag/ios|broader_prefLabel|Apple +http://www.semanlink.net/tag/semantic_web_p2p|prefLabel|Semantic Web P2P +http://www.semanlink.net/tag/semantic_web_p2p|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_web_p2p|broader|http://www.semanlink.net/tag/peer_to_peer +http://www.semanlink.net/tag/semantic_web_p2p|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_p2p|uri|http://www.semanlink.net/tag/semantic_web_p2p +http://www.semanlink.net/tag/semantic_web_p2p|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/semantic_web_p2p|broader_prefLabel|Peer to peer +http://www.semanlink.net/tag/semantic_web_p2p|broader_altLabel|P2P +http://www.semanlink.net/tag/huggingface_bigscience|creationTime|2021-03-31T16:53:05Z +http://www.semanlink.net/tag/huggingface_bigscience|prefLabel|HuggingFace BigScience +http://www.semanlink.net/tag/huggingface_bigscience|broader|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/tag/huggingface_bigscience|creationDate|2021-03-31 +http://www.semanlink.net/tag/huggingface_bigscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/huggingface_bigscience|homepage|https://bigscience.huggingface.co +http://www.semanlink.net/tag/huggingface_bigscience|uri|http://www.semanlink.net/tag/huggingface_bigscience +http://www.semanlink.net/tag/huggingface_bigscience|broader_prefLabel|Hugging Face +http://www.semanlink.net/tag/huggingface_bigscience|broader_altLabel|HuggingFace +http://www.semanlink.net/tag/fps_ontologies|creationTime|2013-02-01T16:02:53Z +http://www.semanlink.net/tag/fps_ontologies|prefLabel|fps ontologies +http://www.semanlink.net/tag/fps_ontologies|broader|http://www.semanlink.net/tag/fps_dev +http://www.semanlink.net/tag/fps_ontologies|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/fps_ontologies|creationDate|2013-02-01 +http://www.semanlink.net/tag/fps_ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_ontologies|uri|http://www.semanlink.net/tag/fps_ontologies +http://www.semanlink.net/tag/fps_ontologies|broader_prefLabel|fps dev +http://www.semanlink.net/tag/fps_ontologies|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/fps_ontologies|broader_altLabel|Ontology +http://www.semanlink.net/tag/italie|prefLabel|Italie +http://www.semanlink.net/tag/italie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/italie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/italie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/italie|uri|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/italie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/italie|broader_prefLabel|Europe +http://www.semanlink.net/tag/edd_dumbill|prefLabel|Edd Dumbill +http://www.semanlink.net/tag/edd_dumbill|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/edd_dumbill|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edd_dumbill|uri|http://www.semanlink.net/tag/edd_dumbill +http://www.semanlink.net/tag/edd_dumbill|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/edd_dumbill|broader_altLabel|Technical guys +http://www.semanlink.net/tag/federated_sparql_queries|creationTime|2012-04-16T11:45:33Z +http://www.semanlink.net/tag/federated_sparql_queries|prefLabel|Federated SPARQL queries +http://www.semanlink.net/tag/federated_sparql_queries|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/federated_sparql_queries|creationDate|2012-04-16 +http://www.semanlink.net/tag/federated_sparql_queries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/federated_sparql_queries|uri|http://www.semanlink.net/tag/federated_sparql_queries +http://www.semanlink.net/tag/federated_sparql_queries|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/to_see|prefLabel|To see +http://www.semanlink.net/tag/to_see|broader|http://www.semanlink.net/tag/todo_list +http://www.semanlink.net/tag/to_see|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/to_see|uri|http://www.semanlink.net/tag/to_see +http://www.semanlink.net/tag/to_see|broader_prefLabel|Todo list +http://www.semanlink.net/tag/technological_singularity|creationTime|2008-08-17T14:51:06Z +http://www.semanlink.net/tag/technological_singularity|prefLabel|Technological singularity +http://www.semanlink.net/tag/technological_singularity|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/technological_singularity|broader|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/technological_singularity|creationDate|2008-08-17 +http://www.semanlink.net/tag/technological_singularity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technological_singularity|describedBy|https://en.wikipedia.org/wiki/Technological_singularity +http://www.semanlink.net/tag/technological_singularity|uri|http://www.semanlink.net/tag/technological_singularity +http://www.semanlink.net/tag/technological_singularity|broader_prefLabel|Technologie +http://www.semanlink.net/tag/technological_singularity|broader_prefLabel|Anticipation +http://www.semanlink.net/tag/multimodal_classification|creationTime|2020-10-14T09:55:36Z +http://www.semanlink.net/tag/multimodal_classification|prefLabel|Multimodal classification +http://www.semanlink.net/tag/multimodal_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/multimodal_classification|creationDate|2020-10-14 +http://www.semanlink.net/tag/multimodal_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multimodal_classification|uri|http://www.semanlink.net/tag/multimodal_classification +http://www.semanlink.net/tag/multimodal_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/microsoft_azure|creationTime|2020-12-19T11:28:32Z +http://www.semanlink.net/tag/microsoft_azure|prefLabel|Microsoft Azure +http://www.semanlink.net/tag/microsoft_azure|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/microsoft_azure|creationDate|2020-12-19 +http://www.semanlink.net/tag/microsoft_azure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microsoft_azure|uri|http://www.semanlink.net/tag/microsoft_azure +http://www.semanlink.net/tag/microsoft_azure|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/microsoft_azure|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/entities_to_topics|creationTime|2019-05-03T20:01:05Z +http://www.semanlink.net/tag/entities_to_topics|prefLabel|Entities to topics +http://www.semanlink.net/tag/entities_to_topics|broader|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/entities_to_topics|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entities_to_topics|creationDate|2019-05-03 +http://www.semanlink.net/tag/entities_to_topics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entities_to_topics|uri|http://www.semanlink.net/tag/entities_to_topics +http://www.semanlink.net/tag/entities_to_topics|broader_prefLabel|KG: tasks +http://www.semanlink.net/tag/entities_to_topics|broader_prefLabel|Entities +http://www.semanlink.net/tag/entities_to_topics|broader_altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/spin_functions|creationTime|2011-01-09T23:48:46Z +http://www.semanlink.net/tag/spin_functions|prefLabel|SPIN functions +http://www.semanlink.net/tag/spin_functions|broader|http://www.semanlink.net/tag/topbraid_spin +http://www.semanlink.net/tag/spin_functions|creationDate|2011-01-09 +http://www.semanlink.net/tag/spin_functions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spin_functions|uri|http://www.semanlink.net/tag/spin_functions +http://www.semanlink.net/tag/spin_functions|broader_prefLabel|TopBraid/SPIN +http://www.semanlink.net/tag/chene|prefLabel|Chêne +http://www.semanlink.net/tag/chene|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/chene|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chene|uri|http://www.semanlink.net/tag/chene +http://www.semanlink.net/tag/chene|broader_prefLabel|Arbres +http://www.semanlink.net/tag/guantanamo|prefLabel|Guantanamo +http://www.semanlink.net/tag/guantanamo|broader|http://www.semanlink.net/tag/11_septembre_2001 +http://www.semanlink.net/tag/guantanamo|broader|http://www.semanlink.net/tag/bush +http://www.semanlink.net/tag/guantanamo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guantanamo|uri|http://www.semanlink.net/tag/guantanamo +http://www.semanlink.net/tag/guantanamo|broader_prefLabel|11 septembre 2001 +http://www.semanlink.net/tag/guantanamo|broader_prefLabel|Bush +http://www.semanlink.net/tag/guantanamo|broader_altLabel|11 septembre +http://www.semanlink.net/tag/john_steinbeck|creationTime|2017-02-05T00:58:55Z +http://www.semanlink.net/tag/john_steinbeck|prefLabel|John Steinbeck +http://www.semanlink.net/tag/john_steinbeck|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/john_steinbeck|creationDate|2017-02-05 +http://www.semanlink.net/tag/john_steinbeck|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/john_steinbeck|describedBy|https://en.wikipedia.org/wiki/John_Steinbeck +http://www.semanlink.net/tag/john_steinbeck|uri|http://www.semanlink.net/tag/john_steinbeck +http://www.semanlink.net/tag/john_steinbeck|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/json|prefLabel|JSON +http://www.semanlink.net/tag/json|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json|homepage|http://www.json.org/ +http://www.semanlink.net/tag/json|uri|http://www.semanlink.net/tag/json +http://www.semanlink.net/tag/keras_embedding_layer|creationTime|2017-10-25T15:40:51Z +http://www.semanlink.net/tag/keras_embedding_layer|prefLabel|Keras embedding layer +http://www.semanlink.net/tag/keras_embedding_layer|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/keras_embedding_layer|broader|http://www.semanlink.net/tag/keras +http://www.semanlink.net/tag/keras_embedding_layer|creationDate|2017-10-25 +http://www.semanlink.net/tag/keras_embedding_layer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keras_embedding_layer|uri|http://www.semanlink.net/tag/keras_embedding_layer +http://www.semanlink.net/tag/keras_embedding_layer|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/keras_embedding_layer|broader_prefLabel|Keras +http://www.semanlink.net/tag/keras_embedding_layer|broader_altLabel|embedding +http://www.semanlink.net/tag/keras_embedding_layer|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/keras_embedding_layer|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/rayons_cosmiques|prefLabel|Rayons cosmiques +http://www.semanlink.net/tag/rayons_cosmiques|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/rayons_cosmiques|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/rayons_cosmiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rayons_cosmiques|uri|http://www.semanlink.net/tag/rayons_cosmiques +http://www.semanlink.net/tag/rayons_cosmiques|broader_prefLabel|Physique +http://www.semanlink.net/tag/rayons_cosmiques|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/rayons_cosmiques|broader_altLabel|Physics +http://www.semanlink.net/tag/a_la_carte_embedding|creationTime|2018-09-18T18:16:06Z +http://www.semanlink.net/tag/a_la_carte_embedding|prefLabel|A La Carte Embedding +http://www.semanlink.net/tag/a_la_carte_embedding|broader|http://www.semanlink.net/tag/nlp_rare_words +http://www.semanlink.net/tag/a_la_carte_embedding|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/a_la_carte_embedding|broader|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/a_la_carte_embedding|broader|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/a_la_carte_embedding|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/a_la_carte_embedding|related|http://www.semanlink.net/tag/sif_embeddings +http://www.semanlink.net/tag/a_la_carte_embedding|creationDate|2018-09-18 +http://www.semanlink.net/tag/a_la_carte_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/a_la_carte_embedding|uri|http://www.semanlink.net/tag/a_la_carte_embedding +http://www.semanlink.net/tag/a_la_carte_embedding|broader_prefLabel|Rare words (NLP) +http://www.semanlink.net/tag/a_la_carte_embedding|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/a_la_carte_embedding|broader_prefLabel|N-grams +http://www.semanlink.net/tag/a_la_carte_embedding|broader_prefLabel|Sanjeev Arora +http://www.semanlink.net/tag/a_la_carte_embedding|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/a_la_carte_embedding|broader_altLabel|N-gram +http://www.semanlink.net/tag/a_la_carte_embedding|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/a_la_carte_embedding|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/a_la_carte_embedding|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/a_la_carte_embedding|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/a_la_carte_embedding|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/bertrand_russell|creationTime|2017-12-16T15:04:05Z +http://www.semanlink.net/tag/bertrand_russell|prefLabel|Bertrand Russell +http://www.semanlink.net/tag/bertrand_russell|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/bertrand_russell|creationDate|2017-12-16 +http://www.semanlink.net/tag/bertrand_russell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bertrand_russell|describedBy|https://en.wikipedia.org/wiki/Bertrand_Russell +http://www.semanlink.net/tag/bertrand_russell|uri|http://www.semanlink.net/tag/bertrand_russell +http://www.semanlink.net/tag/bertrand_russell|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/finite_state_transducer|creationTime|2018-11-11T13:13:17Z +http://www.semanlink.net/tag/finite_state_transducer|prefLabel|Finite-state transducer +http://www.semanlink.net/tag/finite_state_transducer|creationDate|2018-11-11 +http://www.semanlink.net/tag/finite_state_transducer|comment|a type of finite-state automaton that maps between two sets of symbols +http://www.semanlink.net/tag/finite_state_transducer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/finite_state_transducer|describedBy|https://en.wikipedia.org/wiki/Finite-state_transducer +http://www.semanlink.net/tag/finite_state_transducer|uri|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/pre_trained_language_models|creationTime|2019-01-24T16:01:02Z +http://www.semanlink.net/tag/pre_trained_language_models|prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/unsupervised_deep_pre_training +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/nlp_pretraining +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/pre_trained_models +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/pre_trained_language_models|broader|http://www.semanlink.net/tag/deep_nlp +http://www.semanlink.net/tag/pre_trained_language_models|related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/pre_trained_language_models|creationDate|2019-01-24 +http://www.semanlink.net/tag/pre_trained_language_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pre_trained_language_models|altLabel|PreTrained Language Models +http://www.semanlink.net/tag/pre_trained_language_models|altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/pre_trained_language_models|uri|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|Unsupervised deep pre-training +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|NLP: pretraining +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|Pre-trained Models +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|Language model +http://www.semanlink.net/tag/pre_trained_language_models|broader_prefLabel|Deep NLP +http://www.semanlink.net/tag/pre_trained_language_models|broader_altLabel|nlp: pre-training +http://www.semanlink.net/tag/pre_trained_language_models|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/pre_trained_language_models|broader_altLabel|LM +http://www.semanlink.net/tag/pre_trained_language_models|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/pre_trained_language_models|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/pre_trained_language_models|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/steve_cayzer|creationTime|2007-04-03T23:21:52Z +http://www.semanlink.net/tag/steve_cayzer|prefLabel|Steve Cayzer +http://www.semanlink.net/tag/steve_cayzer|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/steve_cayzer|broader|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/steve_cayzer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/steve_cayzer|related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/steve_cayzer|creationDate|2007-04-03 +http://www.semanlink.net/tag/steve_cayzer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/steve_cayzer|uri|http://www.semanlink.net/tag/steve_cayzer +http://www.semanlink.net/tag/steve_cayzer|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/steve_cayzer|broader_prefLabel|HP +http://www.semanlink.net/tag/steve_cayzer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/steve_cayzer|broader_altLabel|Technical guys +http://www.semanlink.net/tag/nlp_rare_words|creationTime|2018-09-18T18:08:21Z +http://www.semanlink.net/tag/nlp_rare_words|prefLabel|Rare words (NLP) +http://www.semanlink.net/tag/nlp_rare_words|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_rare_words|creationDate|2018-09-18 +http://www.semanlink.net/tag/nlp_rare_words|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_rare_words|uri|http://www.semanlink.net/tag/nlp_rare_words +http://www.semanlink.net/tag/nlp_rare_words|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/hierarchical_memory_networks|creationTime|2018-11-14T01:42:14Z +http://www.semanlink.net/tag/hierarchical_memory_networks|prefLabel|Hierarchical Memory Networks +http://www.semanlink.net/tag/hierarchical_memory_networks|broader|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/tag/hierarchical_memory_networks|related|http://www.semanlink.net/tag/memory_networks +http://www.semanlink.net/tag/hierarchical_memory_networks|creationDate|2018-11-14 +http://www.semanlink.net/tag/hierarchical_memory_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_memory_networks|uri|http://www.semanlink.net/tag/hierarchical_memory_networks +http://www.semanlink.net/tag/hierarchical_memory_networks|broader_prefLabel|Memory in deep learning +http://www.semanlink.net/tag/hierarchical_memory_networks|broader_related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/paggr|creationTime|2008-11-03T10:15:21Z +http://www.semanlink.net/tag/paggr|prefLabel|paggr +http://www.semanlink.net/tag/paggr|broader|http://www.semanlink.net/tag/rdf_application +http://www.semanlink.net/tag/paggr|broader|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/paggr|broader|http://www.semanlink.net/tag/benjamin_nowack +http://www.semanlink.net/tag/paggr|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/paggr|creationDate|2008-11-03 +http://www.semanlink.net/tag/paggr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paggr|uri|http://www.semanlink.net/tag/paggr +http://www.semanlink.net/tag/paggr|broader_prefLabel|RDF Application +http://www.semanlink.net/tag/paggr|broader_prefLabel|Web of data +http://www.semanlink.net/tag/paggr|broader_prefLabel|Benjamin Nowack +http://www.semanlink.net/tag/paggr|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/paggr|broader_altLabel|bengee +http://www.semanlink.net/tag/recyclage|creationTime|2007-11-09T13:20:54Z +http://www.semanlink.net/tag/recyclage|prefLabel|Recyclage +http://www.semanlink.net/tag/recyclage|creationDate|2007-11-09 +http://www.semanlink.net/tag/recyclage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recyclage|uri|http://www.semanlink.net/tag/recyclage +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|creationTime|2015-08-14T15:15:47Z +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|prefLabel|Boucle ferroviaire d’Afrique de l’Ouest +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/train +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/bollore +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|creationDate|2015-08-14 +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|uri|http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader_prefLabel|Train +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader_prefLabel|New Africa +http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest|broader_prefLabel|Bolloré +http://www.semanlink.net/tag/cinema_americain|creationTime|2011-12-28T22:14:23Z +http://www.semanlink.net/tag/cinema_americain|prefLabel|Cinéma américain +http://www.semanlink.net/tag/cinema_americain|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/cinema_americain|creationDate|2011-12-28 +http://www.semanlink.net/tag/cinema_americain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cinema_americain|uri|http://www.semanlink.net/tag/cinema_americain +http://www.semanlink.net/tag/cinema_americain|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/malicious_code|prefLabel|Malicious code +http://www.semanlink.net/tag/malicious_code|creationDate|2006-12-10 +http://www.semanlink.net/tag/malicious_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/malicious_code|uri|http://www.semanlink.net/tag/malicious_code +http://www.semanlink.net/tag/programmers|creationTime|2013-01-18T00:52:43Z +http://www.semanlink.net/tag/programmers|prefLabel|Programmers +http://www.semanlink.net/tag/programmers|related|http://www.semanlink.net/tag/hackers +http://www.semanlink.net/tag/programmers|creationDate|2013-01-18 +http://www.semanlink.net/tag/programmers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/programmers|uri|http://www.semanlink.net/tag/programmers +http://www.semanlink.net/tag/film_cubain|creationTime|2020-06-13T19:14:08Z +http://www.semanlink.net/tag/film_cubain|prefLabel|Film cubain +http://www.semanlink.net/tag/film_cubain|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_cubain|broader|http://www.semanlink.net/tag/cuba +http://www.semanlink.net/tag/film_cubain|creationDate|2020-06-13 +http://www.semanlink.net/tag/film_cubain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_cubain|uri|http://www.semanlink.net/tag/film_cubain +http://www.semanlink.net/tag/film_cubain|broader_prefLabel|Film +http://www.semanlink.net/tag/film_cubain|broader_prefLabel|Cuba +http://www.semanlink.net/tag/kids|creationTime|2013-12-09T15:17:29Z +http://www.semanlink.net/tag/kids|prefLabel|Kids +http://www.semanlink.net/tag/kids|creationDate|2013-12-09 +http://www.semanlink.net/tag/kids|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kids|uri|http://www.semanlink.net/tag/kids +http://www.semanlink.net/tag/kirghizistan|creationTime|2021-02-07T12:01:30Z +http://www.semanlink.net/tag/kirghizistan|prefLabel|Kirghizistan +http://www.semanlink.net/tag/kirghizistan|broader|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/kirghizistan|creationDate|2021-02-07 +http://www.semanlink.net/tag/kirghizistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kirghizistan|describedBy|https://fr.wikipedia.org/wiki/Kirghizistan +http://www.semanlink.net/tag/kirghizistan|altLabel|Kyrgyzstan +http://www.semanlink.net/tag/kirghizistan|uri|http://www.semanlink.net/tag/kirghizistan +http://www.semanlink.net/tag/kirghizistan|broader_prefLabel|Asie centrale +http://www.semanlink.net/tag/text_dim_reduction|creationTime|2017-06-26T09:46:03Z +http://www.semanlink.net/tag/text_dim_reduction|prefLabel|Text: dimension reduction +http://www.semanlink.net/tag/text_dim_reduction|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/text_dim_reduction|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/text_dim_reduction|creationDate|2017-06-26 +http://www.semanlink.net/tag/text_dim_reduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_dim_reduction|uri|http://www.semanlink.net/tag/text_dim_reduction +http://www.semanlink.net/tag/text_dim_reduction|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/text_dim_reduction|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/algebre|creationTime|2012-03-20T23:49:32Z +http://www.semanlink.net/tag/algebre|prefLabel|Algèbre +http://www.semanlink.net/tag/algebre|creationDate|2012-03-20 +http://www.semanlink.net/tag/algebre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/algebre|uri|http://www.semanlink.net/tag/algebre +http://www.semanlink.net/tag/enigmes_de_la_physique|prefLabel|Enigmes de la physique +http://www.semanlink.net/tag/enigmes_de_la_physique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/enigmes_de_la_physique|broader|http://www.semanlink.net/tag/enigme +http://www.semanlink.net/tag/enigmes_de_la_physique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enigmes_de_la_physique|uri|http://www.semanlink.net/tag/enigmes_de_la_physique +http://www.semanlink.net/tag/enigmes_de_la_physique|broader_prefLabel|Physique +http://www.semanlink.net/tag/enigmes_de_la_physique|broader_prefLabel|Enigme +http://www.semanlink.net/tag/enigmes_de_la_physique|broader_altLabel|Physics +http://www.semanlink.net/tag/danemark|prefLabel|Danemark +http://www.semanlink.net/tag/danemark|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/danemark|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/danemark|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/danemark|uri|http://www.semanlink.net/tag/danemark +http://www.semanlink.net/tag/danemark|broader_prefLabel|Europe +http://www.semanlink.net/tag/danemark|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/pandora_papers|creationTime|2021-10-03T22:39:19Z +http://www.semanlink.net/tag/pandora_papers|prefLabel|Pandora Papers +http://www.semanlink.net/tag/pandora_papers|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/pandora_papers|broader|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/pandora_papers|broader|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/tag/pandora_papers|creationDate|2021-10-03 +http://www.semanlink.net/tag/pandora_papers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pandora_papers|uri|http://www.semanlink.net/tag/pandora_papers +http://www.semanlink.net/tag/pandora_papers|broader_prefLabel|Finance +http://www.semanlink.net/tag/pandora_papers|broader_prefLabel|Leaks +http://www.semanlink.net/tag/pandora_papers|broader_prefLabel|Paradis fiscaux +http://www.semanlink.net/tag/pandora_papers|broader_altLabel|Tax Haven +http://www.semanlink.net/tag/pandora_papers|broader_altLabel|Paradis fiscal +http://www.semanlink.net/tag/pandora_papers|broader_related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/pandora_papers|broader_related|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/lord_s_resistance_army|prefLabel|Lord's Resistance Army +http://www.semanlink.net/tag/lord_s_resistance_army|broader|http://www.semanlink.net/tag/ouganda +http://www.semanlink.net/tag/lord_s_resistance_army|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/lord_s_resistance_army|creationDate|2006-09-15 +http://www.semanlink.net/tag/lord_s_resistance_army|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lord_s_resistance_army|altLabel|LRA +http://www.semanlink.net/tag/lord_s_resistance_army|uri|http://www.semanlink.net/tag/lord_s_resistance_army +http://www.semanlink.net/tag/lord_s_resistance_army|broader_prefLabel|Ouganda +http://www.semanlink.net/tag/lord_s_resistance_army|broader_prefLabel|Horreur +http://www.semanlink.net/tag/facebook_fair|creationTime|2019-05-12T12:25:13Z +http://www.semanlink.net/tag/facebook_fair|prefLabel|Facebook FAIR +http://www.semanlink.net/tag/facebook_fair|broader|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/tag/facebook_fair|creationDate|2019-05-12 +http://www.semanlink.net/tag/facebook_fair|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facebook_fair|uri|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/tag/facebook_fair|broader_prefLabel|AI@Facebook +http://www.semanlink.net/tag/elephant|prefLabel|Eléphant +http://www.semanlink.net/tag/elephant|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/elephant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elephant|uri|http://www.semanlink.net/tag/elephant +http://www.semanlink.net/tag/elephant|broader_prefLabel|Animal +http://www.semanlink.net/tag/kd_mkb_biblio|creationTime|2020-05-11T18:57:12Z +http://www.semanlink.net/tag/kd_mkb_biblio|prefLabel|KD-MKB biblio +http://www.semanlink.net/tag/kd_mkb_biblio|broader|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/kd_mkb_biblio|creationDate|2020-05-11 +http://www.semanlink.net/tag/kd_mkb_biblio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kd_mkb_biblio|altLabel|KD-MKR biblio +http://www.semanlink.net/tag/kd_mkb_biblio|uri|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/tag/kd_mkb_biblio|broader_prefLabel|KD-MKB +http://www.semanlink.net/tag/kd_mkb_biblio|broader_altLabel|KDMKB +http://www.semanlink.net/tag/kd_mkb_biblio|broader_altLabel|KD-MKR +http://www.semanlink.net/tag/kd_mkb_biblio|broader_related|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/kd_mkb_biblio|broader_related|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/masse_manquante|prefLabel|Masse manquante +http://www.semanlink.net/tag/masse_manquante|broader|http://www.semanlink.net/tag/enigmes_de_la_physique +http://www.semanlink.net/tag/masse_manquante|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/masse_manquante|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/masse_manquante|altLabel|Missing Matter +http://www.semanlink.net/tag/masse_manquante|uri|http://www.semanlink.net/tag/masse_manquante +http://www.semanlink.net/tag/masse_manquante|broader_prefLabel|Enigmes de la physique +http://www.semanlink.net/tag/masse_manquante|broader_prefLabel|Physique +http://www.semanlink.net/tag/masse_manquante|broader_altLabel|Physics +http://www.semanlink.net/tag/jersey_cache_control|creationTime|2015-02-11T16:30:45Z +http://www.semanlink.net/tag/jersey_cache_control|prefLabel|Jersey Cache-Control +http://www.semanlink.net/tag/jersey_cache_control|broader|http://www.semanlink.net/tag/jersey +http://www.semanlink.net/tag/jersey_cache_control|broader|http://www.semanlink.net/tag/http_cache +http://www.semanlink.net/tag/jersey_cache_control|creationDate|2015-02-11 +http://www.semanlink.net/tag/jersey_cache_control|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jersey_cache_control|uri|http://www.semanlink.net/tag/jersey_cache_control +http://www.semanlink.net/tag/jersey_cache_control|broader_prefLabel|jersey +http://www.semanlink.net/tag/jersey_cache_control|broader_prefLabel|HTTP Cache +http://www.semanlink.net/tag/perelman|prefLabel|Perelman +http://www.semanlink.net/tag/perelman|broader|http://www.semanlink.net/tag/medaille_fields +http://www.semanlink.net/tag/perelman|broader|http://www.semanlink.net/tag/conjecture_de_poincare +http://www.semanlink.net/tag/perelman|creationDate|2006-08-28 +http://www.semanlink.net/tag/perelman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perelman|uri|http://www.semanlink.net/tag/perelman +http://www.semanlink.net/tag/perelman|broader_prefLabel|Médaille Fields +http://www.semanlink.net/tag/perelman|broader_prefLabel|Conjecture de Poincaré +http://www.semanlink.net/tag/microsoft_concept_graph|creationTime|2019-05-28T16:54:25Z +http://www.semanlink.net/tag/microsoft_concept_graph|prefLabel|Microsoft Concept Graph +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/microsoft_concept_graph|broader|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/tag/microsoft_concept_graph|creationDate|2019-05-28 +http://www.semanlink.net/tag/microsoft_concept_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microsoft_concept_graph|uri|http://www.semanlink.net/tag/microsoft_concept_graph +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|NLP@Microsoft +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|NLU +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|Knowledge Extraction +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/microsoft_concept_graph|broader_prefLabel|Microsoft Research +http://www.semanlink.net/tag/microsoft_concept_graph|broader_altLabel|Natural Language Understanding +http://www.semanlink.net/tag/microsoft_concept_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/microsoft_concept_graph|broader_altLabel|KG +http://www.semanlink.net/tag/microsoft_concept_graph|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/pubby|creationTime|2007-06-23T15:15:57Z +http://www.semanlink.net/tag/pubby|prefLabel|Pubby +http://www.semanlink.net/tag/pubby|broader|http://www.semanlink.net/tag/sparql_endpoint +http://www.semanlink.net/tag/pubby|broader|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/pubby|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/pubby|broader|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/pubby|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/pubby|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/pubby|creationDate|2007-06-23 +http://www.semanlink.net/tag/pubby|comment|"A Linked Data Frontend for SPARQL Endpoints
Much Semantic Web data lives inside triple stores and can be accessed only by sending SPARQL queries to a SPARQL endpoint. Pubby can be used to add a Linked Data interface to those SPARQL endpoints. +" +http://www.semanlink.net/tag/pubby|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pubby|homepage|http://www4.wiwiss.fu-berlin.de/pubby/ +http://www.semanlink.net/tag/pubby|uri|http://www.semanlink.net/tag/pubby +http://www.semanlink.net/tag/pubby|broader_prefLabel|SPARQL endpoint +http://www.semanlink.net/tag/pubby|broader_prefLabel|Chris Bizer +http://www.semanlink.net/tag/pubby|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/pubby|broader_prefLabel|Richard Cyganiak +http://www.semanlink.net/tag/pubby|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/pubby|broader_altLabel|LD +http://www.semanlink.net/tag/pubby|broader_altLabel|dowhatimean.net +http://www.semanlink.net/tag/pubby|broader_altLabel|LOD +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/pubby|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/civilisation_de_l_indus|prefLabel|Civilisation de l'Indus +http://www.semanlink.net/tag/civilisation_de_l_indus|broader|http://www.semanlink.net/tag/antiquite_de_l_inde +http://www.semanlink.net/tag/civilisation_de_l_indus|broader|http://www.semanlink.net/tag/antiquite_du_pakistan +http://www.semanlink.net/tag/civilisation_de_l_indus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/civilisation_de_l_indus|uri|http://www.semanlink.net/tag/civilisation_de_l_indus +http://www.semanlink.net/tag/civilisation_de_l_indus|broader_prefLabel|Antiquité de l'Inde +http://www.semanlink.net/tag/civilisation_de_l_indus|broader_prefLabel|Antiquité du Pakistan +http://www.semanlink.net/tag/personal_archives|creationTime|2012-09-26T15:12:22Z +http://www.semanlink.net/tag/personal_archives|prefLabel|Personal archives +http://www.semanlink.net/tag/personal_archives|broader|http://www.semanlink.net/tag/personal_information_management +http://www.semanlink.net/tag/personal_archives|related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/personal_archives|creationDate|2012-09-26 +http://www.semanlink.net/tag/personal_archives|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_archives|altLabel|Personal Archive +http://www.semanlink.net/tag/personal_archives|uri|http://www.semanlink.net/tag/personal_archives +http://www.semanlink.net/tag/personal_archives|broader_prefLabel|Personal-information management +http://www.semanlink.net/tag/personal_archives|broader_altLabel|PIM +http://www.semanlink.net/tag/personal_archives|broader_related|http://www.semanlink.net/tag/personal_knowledge_management +http://www.semanlink.net/tag/soap|prefLabel|SOAP +http://www.semanlink.net/tag/soap|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/soap|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/soap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soap|uri|http://www.semanlink.net/tag/soap +http://www.semanlink.net/tag/soap|broader_prefLabel|XML +http://www.semanlink.net/tag/soap|broader_prefLabel|Web Services +http://www.semanlink.net/tag/soap|broader_altLabel|WS +http://www.semanlink.net/tag/benin|prefLabel|Bénin +http://www.semanlink.net/tag/benin|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/benin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/benin|uri|http://www.semanlink.net/tag/benin +http://www.semanlink.net/tag/benin|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/capsule_networks|creationTime|2018-04-26T14:21:34Z +http://www.semanlink.net/tag/capsule_networks|prefLabel|Capsule networks +http://www.semanlink.net/tag/capsule_networks|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/capsule_networks|creationDate|2018-04-26 +http://www.semanlink.net/tag/capsule_networks|comment|"Capsule networks encapsulate +features into groups of neurons, so-called +capsules (Hinton et al., 2011; Sabour et al., 2017). +Originally introduced for a handwritten digit image +classification task where each digit has been +associated with a capsule." +http://www.semanlink.net/tag/capsule_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/capsule_networks|uri|http://www.semanlink.net/tag/capsule_networks +http://www.semanlink.net/tag/capsule_networks|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/capsule_networks|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/capsule_networks|broader_altLabel|ANN +http://www.semanlink.net/tag/capsule_networks|broader_altLabel|NN +http://www.semanlink.net/tag/emotions|creationTime|2019-11-19T09:15:15Z +http://www.semanlink.net/tag/emotions|prefLabel|Emotions +http://www.semanlink.net/tag/emotions|creationDate|2019-11-19 +http://www.semanlink.net/tag/emotions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emotions|uri|http://www.semanlink.net/tag/emotions +http://www.semanlink.net/tag/loi_sur_le_telechargement|prefLabel|Loi sur le téléchargement +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader|http://www.semanlink.net/tag/industrie_du_disque +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader|http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader|http://www.semanlink.net/tag/droit_et_internet +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader|http://www.semanlink.net/tag/piratage_des_oeuvres +http://www.semanlink.net/tag/loi_sur_le_telechargement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loi_sur_le_telechargement|uri|http://www.semanlink.net/tag/loi_sur_le_telechargement +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_prefLabel|Industrie du disque +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_prefLabel|Le gouvernement Chirac est trop con +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_prefLabel|Droit et internet +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_prefLabel|Piratage des œuvres +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_altLabel|Record industry +http://www.semanlink.net/tag/loi_sur_le_telechargement|broader_altLabel|Industrie musicale +http://www.semanlink.net/tag/javascript_tips|creationTime|2008-06-14T13:53:57Z +http://www.semanlink.net/tag/javascript_tips|prefLabel|Javascript tips +http://www.semanlink.net/tag/javascript_tips|broader|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/javascript_tips|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_tips|creationDate|2008-06-14 +http://www.semanlink.net/tag/javascript_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_tips|uri|http://www.semanlink.net/tag/javascript_tips +http://www.semanlink.net/tag/javascript_tips|broader_prefLabel|Dev tips +http://www.semanlink.net/tag/javascript_tips|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_tips|broader_altLabel|Dev tip +http://www.semanlink.net/tag/javascript_tips|broader_altLabel|js +http://www.semanlink.net/tag/cinema_francais|creationTime|2007-12-27T22:19:36Z +http://www.semanlink.net/tag/cinema_francais|prefLabel|Cinéma français +http://www.semanlink.net/tag/cinema_francais|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/cinema_francais|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/cinema_francais|creationDate|2007-12-27 +http://www.semanlink.net/tag/cinema_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cinema_francais|uri|http://www.semanlink.net/tag/cinema_francais +http://www.semanlink.net/tag/cinema_francais|broader_prefLabel|France +http://www.semanlink.net/tag/cinema_francais|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/sauver_la_planete|creationTime|2009-07-20T16:36:17Z +http://www.semanlink.net/tag/sauver_la_planete|prefLabel|Sauver la planète +http://www.semanlink.net/tag/sauver_la_planete|creationDate|2009-07-20 +http://www.semanlink.net/tag/sauver_la_planete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sauver_la_planete|uri|http://www.semanlink.net/tag/sauver_la_planete +http://www.semanlink.net/tag/ofir|creationTime|2007-09-11T21:26:46Z +http://www.semanlink.net/tag/ofir|prefLabel|Ofir +http://www.semanlink.net/tag/ofir|broader|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/tag/ofir|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/ofir|broader|http://www.semanlink.net/tag/amazonie +http://www.semanlink.net/tag/ofir|creationDate|2007-09-11 +http://www.semanlink.net/tag/ofir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ofir|uri|http://www.semanlink.net/tag/ofir +http://www.semanlink.net/tag/ofir|broader_prefLabel|Gastronomie +http://www.semanlink.net/tag/ofir|broader_prefLabel|Ami +http://www.semanlink.net/tag/ofir|broader_prefLabel|Amazonie +http://www.semanlink.net/tag/ofir|broader_altLabel|Cuisine +http://www.semanlink.net/tag/intellectuel|prefLabel|Intellectuel +http://www.semanlink.net/tag/intellectuel|broader|http://www.semanlink.net/tag/penseur +http://www.semanlink.net/tag/intellectuel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intellectuel|uri|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/intellectuel|broader_prefLabel|Penseur +http://www.semanlink.net/tag/deep_learning_book|creationTime|2017-12-16T14:25:26Z +http://www.semanlink.net/tag/deep_learning_book|prefLabel|Deep Learning Book +http://www.semanlink.net/tag/deep_learning_book|broader|http://www.semanlink.net/tag/ai_book +http://www.semanlink.net/tag/deep_learning_book|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning_book|creationDate|2017-12-16 +http://www.semanlink.net/tag/deep_learning_book|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning_book|uri|http://www.semanlink.net/tag/deep_learning_book +http://www.semanlink.net/tag/deep_learning_book|broader_prefLabel|AI: books & journals +http://www.semanlink.net/tag/deep_learning_book|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning_book|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning_book|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/telelamarna|prefLabel|Telelamarna +http://www.semanlink.net/tag/telelamarna|broader|http://www.semanlink.net/tag/akhenaton +http://www.semanlink.net/tag/telelamarna|comment|Capitale de Akhênaton +http://www.semanlink.net/tag/telelamarna|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/telelamarna|uri|http://www.semanlink.net/tag/telelamarna +http://www.semanlink.net/tag/telelamarna|broader_prefLabel|Akhênaton +http://www.semanlink.net/tag/semantic_web_propaganda|creationTime|2011-09-27T11:10:03Z +http://www.semanlink.net/tag/semantic_web_propaganda|prefLabel|Semantic Web propaganda +http://www.semanlink.net/tag/semantic_web_propaganda|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_propaganda|creationDate|2011-09-27 +http://www.semanlink.net/tag/semantic_web_propaganda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_propaganda|uri|http://www.semanlink.net/tag/semantic_web_propaganda +http://www.semanlink.net/tag/semantic_web_propaganda|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_propaganda|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_propaganda|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/matrix_calculus|creationTime|2020-02-19T21:53:00Z +http://www.semanlink.net/tag/matrix_calculus|prefLabel|Matrix calculus +http://www.semanlink.net/tag/matrix_calculus|creationDate|2020-02-19 +http://www.semanlink.net/tag/matrix_calculus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matrix_calculus|describedBy|https://en.wikipedia.org/wiki/Matrix_calculus +http://www.semanlink.net/tag/matrix_calculus|uri|http://www.semanlink.net/tag/matrix_calculus +http://www.semanlink.net/tag/stanford_ner|creationTime|2018-05-20T22:39:14Z +http://www.semanlink.net/tag/stanford_ner|prefLabel|Stanford NER +http://www.semanlink.net/tag/stanford_ner|broader|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/stanford_ner|broader|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/stanford_ner|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/stanford_ner|creationDate|2018-05-20 +http://www.semanlink.net/tag/stanford_ner|comment|general implementation of (arbitrary order) linear chain Conditional Random Field (CRF) sequence models +http://www.semanlink.net/tag/stanford_ner|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stanford_ner|uri|http://www.semanlink.net/tag/stanford_ner +http://www.semanlink.net/tag/stanford_ner|broader_prefLabel|NLP@Stanford +http://www.semanlink.net/tag/stanford_ner|broader_prefLabel|Conditional random fields +http://www.semanlink.net/tag/stanford_ner|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/stanford_ner|broader_altLabel|CRF +http://www.semanlink.net/tag/stanford_ner|broader_altLabel|NER +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/hidden_markov_model +http://www.semanlink.net/tag/stanford_ner|broader_related|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/pauli|prefLabel|Pauli +http://www.semanlink.net/tag/pauli|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/pauli|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/pauli|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pauli|uri|http://www.semanlink.net/tag/pauli +http://www.semanlink.net/tag/pauli|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/pauli|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/pauli|broader_altLabel|Savant +http://www.semanlink.net/tag/semantic_web_training|creationTime|2010-08-24T22:45:23Z +http://www.semanlink.net/tag/semantic_web_training|prefLabel|Semantic web: training +http://www.semanlink.net/tag/semantic_web_training|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_training|creationDate|2010-08-24 +http://www.semanlink.net/tag/semantic_web_training|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_training|uri|http://www.semanlink.net/tag/semantic_web_training +http://www.semanlink.net/tag/semantic_web_training|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_training|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_training|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/hittite|creationTime|2016-05-23T08:44:40Z +http://www.semanlink.net/tag/hittite|prefLabel|Hittite +http://www.semanlink.net/tag/hittite|broader|http://www.semanlink.net/tag/asie_mineure +http://www.semanlink.net/tag/hittite|creationDate|2016-05-23 +http://www.semanlink.net/tag/hittite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hittite|describedBy|https://en.wikipedia.org/wiki/Hittites +http://www.semanlink.net/tag/hittite|altLabel|Hittites +http://www.semanlink.net/tag/hittite|uri|http://www.semanlink.net/tag/hittite +http://www.semanlink.net/tag/hittite|broader_prefLabel|Asie mineure +http://www.semanlink.net/tag/hittite|broader_altLabel|Anatolie +http://www.semanlink.net/tag/i_b_m_s_watson|creationTime|2010-06-23T00:28:45Z +http://www.semanlink.net/tag/i_b_m_s_watson|prefLabel|IBM Watson +http://www.semanlink.net/tag/i_b_m_s_watson|broader|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/i_b_m_s_watson|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/i_b_m_s_watson|related|http://www.semanlink.net/tag/chris_welty +http://www.semanlink.net/tag/i_b_m_s_watson|creationDate|2010-06-23 +http://www.semanlink.net/tag/i_b_m_s_watson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/i_b_m_s_watson|altLabel|IBM's Watson +http://www.semanlink.net/tag/i_b_m_s_watson|altLabel|Watson +http://www.semanlink.net/tag/i_b_m_s_watson|uri|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.semanlink.net/tag/i_b_m_s_watson|broader_prefLabel|IBM +http://www.semanlink.net/tag/i_b_m_s_watson|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/i_b_m_s_watson|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/i_b_m_s_watson|broader_altLabel|AI +http://www.semanlink.net/tag/i_b_m_s_watson|broader_altLabel|IA +http://www.semanlink.net/tag/i_b_m_s_watson|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/conjecture_de_poincare|prefLabel|Conjecture de Poincaré +http://www.semanlink.net/tag/conjecture_de_poincare|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/conjecture_de_poincare|broader|http://www.semanlink.net/tag/poincare +http://www.semanlink.net/tag/conjecture_de_poincare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conjecture_de_poincare|uri|http://www.semanlink.net/tag/conjecture_de_poincare +http://www.semanlink.net/tag/conjecture_de_poincare|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/conjecture_de_poincare|broader_prefLabel|Poincaré +http://www.semanlink.net/tag/caterpillar|creationTime|2021-01-28T12:19:57Z +http://www.semanlink.net/tag/caterpillar|prefLabel|Caterpillar +http://www.semanlink.net/tag/caterpillar|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/caterpillar|creationDate|2021-01-28 +http://www.semanlink.net/tag/caterpillar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/caterpillar|describedBy|https://en.wikipedia.org/wiki/Caterpillar_Inc. +http://www.semanlink.net/tag/caterpillar|uri|http://www.semanlink.net/tag/caterpillar +http://www.semanlink.net/tag/caterpillar|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/link_to_me|creationTime|2008-03-04T22:53:04Z +http://www.semanlink.net/tag/link_to_me|prefLabel|Link to me +http://www.semanlink.net/tag/link_to_me|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/link_to_me|creationDate|2008-03-04 +http://www.semanlink.net/tag/link_to_me|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/link_to_me|uri|http://www.semanlink.net/tag/link_to_me +http://www.semanlink.net/tag/link_to_me|broader_prefLabel|fps +http://www.semanlink.net/tag/simple_idea|creationTime|2019-02-09T01:48:17Z +http://www.semanlink.net/tag/simple_idea|prefLabel|Simple idea +http://www.semanlink.net/tag/simple_idea|broader|http://www.semanlink.net/tag/good_idea +http://www.semanlink.net/tag/simple_idea|creationDate|2019-02-09 +http://www.semanlink.net/tag/simple_idea|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/simple_idea|uri|http://www.semanlink.net/tag/simple_idea +http://www.semanlink.net/tag/simple_idea|broader_prefLabel|Good idea +http://www.semanlink.net/tag/venus|prefLabel|Vénus +http://www.semanlink.net/tag/venus|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/venus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venus|uri|http://www.semanlink.net/tag/venus +http://www.semanlink.net/tag/venus|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/cortical_io|creationTime|2017-07-10T15:01:34Z +http://www.semanlink.net/tag/cortical_io|prefLabel|Cortical.io +http://www.semanlink.net/tag/cortical_io|broader|http://www.semanlink.net/tag/semantic_folding +http://www.semanlink.net/tag/cortical_io|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/cortical_io|related|http://www.semanlink.net/tag/jeff_hawkins +http://www.semanlink.net/tag/cortical_io|related|http://www.semanlink.net/tag/sparse_distributed_memory +http://www.semanlink.net/tag/cortical_io|related|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://www.semanlink.net/tag/cortical_io|creationDate|2017-07-10 +http://www.semanlink.net/tag/cortical_io|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cortical_io|homepage|http://www.cortical.io/ +http://www.semanlink.net/tag/cortical_io|uri|http://www.semanlink.net/tag/cortical_io +http://www.semanlink.net/tag/cortical_io|broader_prefLabel|Semantic folding +http://www.semanlink.net/tag/cortical_io|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/cortical_io|broader_related|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://www.semanlink.net/tag/cortical_io|broader_related|http://www.semanlink.net/tag/sparse_distributed_memory +http://www.semanlink.net/tag/short_sales|creationTime|2010-05-21T21:24:10Z +http://www.semanlink.net/tag/short_sales|prefLabel|Short selling +http://www.semanlink.net/tag/short_sales|broader|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/short_sales|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/short_sales|broader|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/tag/short_sales|creationDate|2010-05-21 +http://www.semanlink.net/tag/short_sales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/short_sales|altLabel|Ventes à découvert +http://www.semanlink.net/tag/short_sales|altLabel|Short sales +http://www.semanlink.net/tag/short_sales|uri|http://www.semanlink.net/tag/short_sales +http://www.semanlink.net/tag/short_sales|broader_prefLabel|Marchés financiers +http://www.semanlink.net/tag/short_sales|broader_prefLabel|Finance +http://www.semanlink.net/tag/short_sales|broader_prefLabel|Spéculation +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|creationTime|2019-06-21T21:50:22Z +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|prefLabel|What's encoded by a NN +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|creationDate|2019-06-21 +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|uri|http://www.semanlink.net/tag/what_s_encoded_by_a_nn +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|broader_altLabel|ANN +http://www.semanlink.net/tag/what_s_encoded_by_a_nn|broader_altLabel|NN +http://www.semanlink.net/tag/skos_owl|creationTime|2008-05-12T19:30:54Z +http://www.semanlink.net/tag/skos_owl|prefLabel|SKOS/OWL +http://www.semanlink.net/tag/skos_owl|broader|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/skos_owl|creationDate|2008-05-12 +http://www.semanlink.net/tag/skos_owl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/skos_owl|uri|http://www.semanlink.net/tag/skos_owl +http://www.semanlink.net/tag/skos_owl|broader_prefLabel|SKOS +http://www.semanlink.net/tag/skos_owl|broader_related|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/mathematica|creationTime|2014-09-16T10:07:16Z +http://www.semanlink.net/tag/mathematica|prefLabel|Mathematica +http://www.semanlink.net/tag/mathematica|broader|http://www.semanlink.net/tag/wolfram +http://www.semanlink.net/tag/mathematica|creationDate|2014-09-16 +http://www.semanlink.net/tag/mathematica|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mathematica|uri|http://www.semanlink.net/tag/mathematica +http://www.semanlink.net/tag/mathematica|broader_prefLabel|Stephen Wolfram +http://www.semanlink.net/tag/mathematica|broader_altLabel|Wolfram +http://www.semanlink.net/tag/autriche|prefLabel|Autriche +http://www.semanlink.net/tag/autriche|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/autriche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/autriche|uri|http://www.semanlink.net/tag/autriche +http://www.semanlink.net/tag/autriche|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/marsupiaux|creationTime|2007-05-23T01:21:13Z +http://www.semanlink.net/tag/marsupiaux|prefLabel|Marsupiaux +http://www.semanlink.net/tag/marsupiaux|creationDate|2007-05-23 +http://www.semanlink.net/tag/marsupiaux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marsupiaux|uri|http://www.semanlink.net/tag/marsupiaux +http://www.semanlink.net/tag/chiffres|prefLabel|Chiffres +http://www.semanlink.net/tag/chiffres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chiffres|uri|http://www.semanlink.net/tag/chiffres +http://www.semanlink.net/tag/empire_romain|creationTime|2012-04-18T11:08:08Z +http://www.semanlink.net/tag/empire_romain|prefLabel|Empire romain +http://www.semanlink.net/tag/empire_romain|broader|http://www.semanlink.net/tag/antiquite_romaine +http://www.semanlink.net/tag/empire_romain|creationDate|2012-04-18 +http://www.semanlink.net/tag/empire_romain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/empire_romain|uri|http://www.semanlink.net/tag/empire_romain +http://www.semanlink.net/tag/empire_romain|broader_prefLabel|Antiquité romaine +http://www.semanlink.net/tag/guha|prefLabel|Guha +http://www.semanlink.net/tag/guha|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/guha|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/guha|related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/guha|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/guha|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guha|describedBy|https://en.wikipedia.org/wiki/Ramanathan_V._Guha +http://www.semanlink.net/tag/guha|uri|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/guha|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/guha|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/guha|broader_altLabel|Technical guys +http://www.semanlink.net/tag/primate|prefLabel|Primate +http://www.semanlink.net/tag/primate|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/primate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/primate|uri|http://www.semanlink.net/tag/primate +http://www.semanlink.net/tag/primate|broader_prefLabel|Animal +http://www.semanlink.net/tag/spec|prefLabel|Spec +http://www.semanlink.net/tag/spec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spec|uri|http://www.semanlink.net/tag/spec +http://www.semanlink.net/tag/rdf_binary|creationTime|2014-09-08T18:09:06Z +http://www.semanlink.net/tag/rdf_binary|prefLabel|RDF/binary +http://www.semanlink.net/tag/rdf_binary|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_binary|creationDate|2014-09-08 +http://www.semanlink.net/tag/rdf_binary|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_binary|uri|http://www.semanlink.net/tag/rdf_binary +http://www.semanlink.net/tag/rdf_binary|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_binary|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_binary|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_binary|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_binary|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_binary|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/statistical_classification|creationTime|2014-04-24T13:29:38Z +http://www.semanlink.net/tag/statistical_classification|prefLabel|Classification +http://www.semanlink.net/tag/statistical_classification|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/statistical_classification|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/statistical_classification|creationDate|2014-04-24 +http://www.semanlink.net/tag/statistical_classification|comment|the problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations whose category membership is known. +http://www.semanlink.net/tag/statistical_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistical_classification|describedBy|https://en.wikipedia.org/wiki/Statistical_classification +http://www.semanlink.net/tag/statistical_classification|uri|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/statistical_classification|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/statistical_classification|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/judo|creationTime|2012-07-25T18:12:47Z +http://www.semanlink.net/tag/judo|prefLabel|Judo +http://www.semanlink.net/tag/judo|creationDate|2012-07-25 +http://www.semanlink.net/tag/judo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/judo|uri|http://www.semanlink.net/tag/judo +http://www.semanlink.net/tag/explorateur|prefLabel|Explorateur +http://www.semanlink.net/tag/explorateur|broader|http://www.semanlink.net/tag/grand_voyageur +http://www.semanlink.net/tag/explorateur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/explorateur|uri|http://www.semanlink.net/tag/explorateur +http://www.semanlink.net/tag/explorateur|broader_prefLabel|Grand voyageur +http://www.semanlink.net/tag/macron|creationTime|2016-06-05T00:20:23Z +http://www.semanlink.net/tag/macron|prefLabel|Macron +http://www.semanlink.net/tag/macron|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/macron|creationDate|2016-06-05 +http://www.semanlink.net/tag/macron|comment|inactivisme climatique, procrastivaccination, quoi qu'il en coûte. +http://www.semanlink.net/tag/macron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/macron|uri|http://www.semanlink.net/tag/macron +http://www.semanlink.net/tag/macron|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/construction_europeenne|prefLabel|Construction européenne +http://www.semanlink.net/tag/construction_europeenne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/construction_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/construction_europeenne|uri|http://www.semanlink.net/tag/construction_europeenne +http://www.semanlink.net/tag/construction_europeenne|broader_prefLabel|Europe +http://www.semanlink.net/tag/chris_bizer|creationTime|2007-03-12T23:40:22Z +http://www.semanlink.net/tag/chris_bizer|prefLabel|Chris Bizer +http://www.semanlink.net/tag/chris_bizer|broader|http://www.semanlink.net/tag/freie_universitat_berlin +http://www.semanlink.net/tag/chris_bizer|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/chris_bizer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/chris_bizer|creationDate|2007-03-12 +http://www.semanlink.net/tag/chris_bizer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chris_bizer|uri|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/chris_bizer|broader_prefLabel|Freie Universität Berlin +http://www.semanlink.net/tag/chris_bizer|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/chris_bizer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/chris_bizer|broader_altLabel|Technical guys +http://www.semanlink.net/tag/chris_bizer|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|creationTime|2021-10-21T15:40:58Z +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|prefLabel|Aspect-Based Sentiment Analysis +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|broader|http://www.semanlink.net/tag/sentiment_analysis +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|broader|http://www.semanlink.net/tag/aspect_nlp +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|creationDate|2021-10-21 +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|uri|http://www.semanlink.net/tag/aspect_based_sentiment_analysis +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|broader_prefLabel|Sentiment analysis +http://www.semanlink.net/tag/aspect_based_sentiment_analysis|broader_prefLabel|Aspect (NLP) +http://www.semanlink.net/tag/nuxeo|creationTime|2012-08-02T09:58:48Z +http://www.semanlink.net/tag/nuxeo|prefLabel|Nuxeo +http://www.semanlink.net/tag/nuxeo|broader|http://www.semanlink.net/tag/enterprise_content_management +http://www.semanlink.net/tag/nuxeo|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/nuxeo|related|http://www.semanlink.net/tag/olivier_grisel +http://www.semanlink.net/tag/nuxeo|creationDate|2012-08-02 +http://www.semanlink.net/tag/nuxeo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nuxeo|homepage|http://www.nuxeo.com +http://www.semanlink.net/tag/nuxeo|uri|http://www.semanlink.net/tag/nuxeo +http://www.semanlink.net/tag/nuxeo|broader_prefLabel|Enterprise Content Management +http://www.semanlink.net/tag/nuxeo|broader_altLabel|ECM +http://www.semanlink.net/tag/marisa_monte|creationTime|2021-08-13T11:38:22Z +http://www.semanlink.net/tag/marisa_monte|prefLabel|Marisa Monte +http://www.semanlink.net/tag/marisa_monte|broader|http://www.semanlink.net/tag/musique_bresilienne +http://www.semanlink.net/tag/marisa_monte|creationDate|2021-08-13 +http://www.semanlink.net/tag/marisa_monte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marisa_monte|uri|http://www.semanlink.net/tag/marisa_monte +http://www.semanlink.net/tag/marisa_monte|broader_prefLabel|Musique brésilienne +http://www.semanlink.net/tag/projet_pharaonique|creationTime|2020-09-10T20:52:03Z +http://www.semanlink.net/tag/projet_pharaonique|prefLabel|Projet pharaonique +http://www.semanlink.net/tag/projet_pharaonique|creationDate|2020-09-10 +http://www.semanlink.net/tag/projet_pharaonique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/projet_pharaonique|uri|http://www.semanlink.net/tag/projet_pharaonique +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|creationTime|2012-05-08T11:51:31Z +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|prefLabel|Crise de la dette publique grecque +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|broader|http://www.semanlink.net/tag/crise_de_la_dette +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|creationDate|2012-05-08 +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|describedBy|https://fr.wikipedia.org/wiki/Crise_de_la_dette_publique_grecque +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|uri|http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque +http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque|broader_prefLabel|Crise de la dette +http://www.semanlink.net/tag/sarkozyland|creationTime|2009-06-05T23:28:49Z +http://www.semanlink.net/tag/sarkozyland|prefLabel|Sarkozyland +http://www.semanlink.net/tag/sarkozyland|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/sarkozyland|creationDate|2009-06-05 +http://www.semanlink.net/tag/sarkozyland|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarkozyland|uri|http://www.semanlink.net/tag/sarkozyland +http://www.semanlink.net/tag/sarkozyland|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/economie_allemande|prefLabel|Economie allemande +http://www.semanlink.net/tag/economie_allemande|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/economie_allemande|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/economie_allemande|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economie_allemande|uri|http://www.semanlink.net/tag/economie_allemande +http://www.semanlink.net/tag/economie_allemande|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/economie_allemande|broader_prefLabel|Economie +http://www.semanlink.net/tag/economie_allemande|broader_altLabel|Germany +http://www.semanlink.net/tag/economie_allemande|broader_altLabel|Deutschland +http://www.semanlink.net/tag/economie_allemande|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/richard_socher|creationTime|2017-09-18T15:06:27Z +http://www.semanlink.net/tag/richard_socher|prefLabel|Richard Socher +http://www.semanlink.net/tag/richard_socher|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/richard_socher|related|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/richard_socher|related|http://www.semanlink.net/tag/salesforce +http://www.semanlink.net/tag/richard_socher|related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/richard_socher|creationDate|2017-09-18 +http://www.semanlink.net/tag/richard_socher|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/richard_socher|homepage|http://www.socher.org/ +http://www.semanlink.net/tag/richard_socher|uri|http://www.semanlink.net/tag/richard_socher +http://www.semanlink.net/tag/richard_socher|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/brains_in_silicon|creationTime|2017-09-07T13:04:47Z +http://www.semanlink.net/tag/brains_in_silicon|prefLabel|Bio inspired computing devices +http://www.semanlink.net/tag/brains_in_silicon|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/brains_in_silicon|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/brains_in_silicon|creationDate|2017-09-07 +http://www.semanlink.net/tag/brains_in_silicon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brains_in_silicon|altLabel|Neuromorphic engineering +http://www.semanlink.net/tag/brains_in_silicon|altLabel|Brains in silicon +http://www.semanlink.net/tag/brains_in_silicon|altLabel|Neuromorphique +http://www.semanlink.net/tag/brains_in_silicon|uri|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/tag/brains_in_silicon|broader_prefLabel|Brain +http://www.semanlink.net/tag/brains_in_silicon|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/brains_in_silicon|broader_altLabel|Cerveau +http://www.semanlink.net/tag/brains_in_silicon|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/brains_in_silicon|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/google_fusion_tables|creationTime|2012-12-07T10:56:42Z +http://www.semanlink.net/tag/google_fusion_tables|prefLabel|Google Fusion Tables +http://www.semanlink.net/tag/google_fusion_tables|broader|http://www.semanlink.net/tag/tables +http://www.semanlink.net/tag/google_fusion_tables|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_fusion_tables|creationDate|2012-12-07 +http://www.semanlink.net/tag/google_fusion_tables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_fusion_tables|uri|http://www.semanlink.net/tag/google_fusion_tables +http://www.semanlink.net/tag/google_fusion_tables|broader_prefLabel|Tables +http://www.semanlink.net/tag/google_fusion_tables|broader_prefLabel|Google +http://www.semanlink.net/tag/google_fusion_tables|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/jena_tdb|creationTime|2008-10-05T22:39:03Z +http://www.semanlink.net/tag/jena_tdb|prefLabel|Jena TDB +http://www.semanlink.net/tag/jena_tdb|broader|http://www.semanlink.net/tag/sparql_and_jena +http://www.semanlink.net/tag/jena_tdb|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_tdb|broader|http://www.semanlink.net/tag/jena_and_database +http://www.semanlink.net/tag/jena_tdb|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/jena_tdb|creationDate|2008-10-05 +http://www.semanlink.net/tag/jena_tdb|comment|"TDB is a component of Jena for RDF storage and query, as well as the full range of Jena APIs.
+TDB can be used as a high performance, non-transactional, RDF store on a single machine. +" +http://www.semanlink.net/tag/jena_tdb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_tdb|describedBy|http://jena.sourceforge.net/TDB/ +http://www.semanlink.net/tag/jena_tdb|altLabel|TDB +http://www.semanlink.net/tag/jena_tdb|uri|http://www.semanlink.net/tag/jena_tdb +http://www.semanlink.net/tag/jena_tdb|broader_prefLabel|SPARQL AND Jena +http://www.semanlink.net/tag/jena_tdb|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_tdb|broader_prefLabel|Jena and database +http://www.semanlink.net/tag/jena_tdb|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/jena_tdb|broader_altLabel|RDF database +http://www.semanlink.net/tag/jena_tdb|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/jena_tdb|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/ambiguity_nlp|creationTime|2019-05-28T16:42:54Z +http://www.semanlink.net/tag/ambiguity_nlp|prefLabel|Ambiguity (NLP) +http://www.semanlink.net/tag/ambiguity_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/ambiguity_nlp|creationDate|2019-05-28 +http://www.semanlink.net/tag/ambiguity_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ambiguity_nlp|uri|http://www.semanlink.net/tag/ambiguity_nlp +http://www.semanlink.net/tag/ambiguity_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/peter_mika|creationTime|2012-05-10T08:51:20Z +http://www.semanlink.net/tag/peter_mika|prefLabel|Peter Mika +http://www.semanlink.net/tag/peter_mika|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/peter_mika|broader|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/peter_mika|creationDate|2012-05-10 +http://www.semanlink.net/tag/peter_mika|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peter_mika|uri|http://www.semanlink.net/tag/peter_mika +http://www.semanlink.net/tag/peter_mika|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/peter_mika|broader_prefLabel|Yahoo! +http://www.semanlink.net/tag/atoll|creationTime|2013-06-01T21:39:15Z +http://www.semanlink.net/tag/atoll|prefLabel|Atoll +http://www.semanlink.net/tag/atoll|broader|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/atoll|creationDate|2013-06-01 +http://www.semanlink.net/tag/atoll|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/atoll|uri|http://www.semanlink.net/tag/atoll +http://www.semanlink.net/tag/atoll|broader_prefLabel|Océan +http://www.semanlink.net/tag/plante|prefLabel|Plante +http://www.semanlink.net/tag/plante|broader|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/plante|broader|http://www.semanlink.net/tag/botanique +http://www.semanlink.net/tag/plante|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plante|uri|http://www.semanlink.net/tag/plante +http://www.semanlink.net/tag/plante|broader_prefLabel|Nature +http://www.semanlink.net/tag/plante|broader_prefLabel|Botanique +http://www.semanlink.net/tag/chomage|prefLabel|Chômage +http://www.semanlink.net/tag/chomage|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/chomage|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/chomage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chomage|uri|http://www.semanlink.net/tag/chomage +http://www.semanlink.net/tag/chomage|broader_prefLabel|Economie +http://www.semanlink.net/tag/chomage|broader_prefLabel|Société +http://www.semanlink.net/tag/chomage|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/bayer|creationTime|2021-03-27T21:10:24Z +http://www.semanlink.net/tag/bayer|prefLabel|Bayer +http://www.semanlink.net/tag/bayer|creationDate|2021-03-27 +http://www.semanlink.net/tag/bayer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bayer|uri|http://www.semanlink.net/tag/bayer +http://www.semanlink.net/tag/second_life|prefLabel|Second Life +http://www.semanlink.net/tag/second_life|broader|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://www.semanlink.net/tag/second_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/second_life|uri|http://www.semanlink.net/tag/second_life +http://www.semanlink.net/tag/second_life|broader_prefLabel|Massively multiplayer online games +http://www.semanlink.net/tag/second_life|broader_altLabel|MMOG +http://www.semanlink.net/tag/sentence_similarity|creationTime|2018-05-07T16:04:09Z +http://www.semanlink.net/tag/sentence_similarity|prefLabel|Sentence Similarity +http://www.semanlink.net/tag/sentence_similarity|broader|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/sentence_similarity|related|http://www.semanlink.net/tag/doc2vec +http://www.semanlink.net/tag/sentence_similarity|related|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/sentence_similarity|creationDate|2018-05-07 +http://www.semanlink.net/tag/sentence_similarity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sentence_similarity|uri|http://www.semanlink.net/tag/sentence_similarity +http://www.semanlink.net/tag/sentence_similarity|broader_prefLabel|Text Similarity +http://www.semanlink.net/tag/sentence_similarity|broader_related|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/sentence_similarity|broader_related|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/sentence_similarity|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/phase_transition|creationTime|2020-09-17T23:50:10Z +http://www.semanlink.net/tag/phase_transition|prefLabel|Phase transition +http://www.semanlink.net/tag/phase_transition|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/phase_transition|broader|http://www.semanlink.net/tag/statistical_physics +http://www.semanlink.net/tag/phase_transition|creationDate|2020-09-17 +http://www.semanlink.net/tag/phase_transition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phase_transition|uri|http://www.semanlink.net/tag/phase_transition +http://www.semanlink.net/tag/phase_transition|broader_prefLabel|Physique +http://www.semanlink.net/tag/phase_transition|broader_prefLabel|Statistical physics +http://www.semanlink.net/tag/phase_transition|broader_altLabel|Physics +http://www.semanlink.net/tag/chatbot|creationTime|2016-05-14T19:45:54Z +http://www.semanlink.net/tag/chatbot|prefLabel|Chatbots +http://www.semanlink.net/tag/chatbot|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/chatbot|broader|http://www.semanlink.net/tag/conversational_ai +http://www.semanlink.net/tag/chatbot|creationDate|2016-05-14 +http://www.semanlink.net/tag/chatbot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chatbot|describedBy|https://en.wikipedia.org/wiki/Chatbot +http://www.semanlink.net/tag/chatbot|altLabel|Chatbot +http://www.semanlink.net/tag/chatbot|uri|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/chatbot|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/chatbot|broader_prefLabel|Conversational AI +http://www.semanlink.net/tag/chatbot|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/deep_nlp|creationTime|2017-02-07T13:59:42Z +http://www.semanlink.net/tag/deep_nlp|prefLabel|Deep NLP +http://www.semanlink.net/tag/deep_nlp|broader|http://www.semanlink.net/tag/nn_4_nlp +http://www.semanlink.net/tag/deep_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/deep_nlp|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_nlp|creationDate|2017-02-07 +http://www.semanlink.net/tag/deep_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_nlp|uri|http://www.semanlink.net/tag/deep_nlp +http://www.semanlink.net/tag/deep_nlp|broader_prefLabel|NN 4 NLP +http://www.semanlink.net/tag/deep_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/deep_nlp|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/deep_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/deep_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/deep_nlp|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_nlp|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/re_decentralize_the_web|creationTime|2017-12-21T00:42:33Z +http://www.semanlink.net/tag/re_decentralize_the_web|prefLabel|(Re-)decentralize the Web +http://www.semanlink.net/tag/re_decentralize_the_web|creationDate|2017-12-21 +http://www.semanlink.net/tag/re_decentralize_the_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/re_decentralize_the_web|uri|http://www.semanlink.net/tag/re_decentralize_the_web +http://www.semanlink.net/tag/hydrogen_cars|creationTime|2014-11-18T13:16:53Z +http://www.semanlink.net/tag/hydrogen_cars|prefLabel|Hydrogen Cars +http://www.semanlink.net/tag/hydrogen_cars|broader|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/tag/hydrogen_cars|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/hydrogen_cars|creationDate|2014-11-18 +http://www.semanlink.net/tag/hydrogen_cars|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hydrogen_cars|altLabel|Voiture à hydrogène +http://www.semanlink.net/tag/hydrogen_cars|uri|http://www.semanlink.net/tag/hydrogen_cars +http://www.semanlink.net/tag/hydrogen_cars|broader_prefLabel|Hydrogen economy +http://www.semanlink.net/tag/hydrogen_cars|broader_prefLabel|Automobile +http://www.semanlink.net/tag/hydrogen_cars|broader_altLabel|Automotive +http://www.semanlink.net/tag/gaussian_process|creationTime|2019-02-11T11:43:40Z +http://www.semanlink.net/tag/gaussian_process|prefLabel|Gaussian process +http://www.semanlink.net/tag/gaussian_process|creationDate|2019-02-11 +http://www.semanlink.net/tag/gaussian_process|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gaussian_process|describedBy|https://en.wikipedia.org/wiki/Gaussian_process +http://www.semanlink.net/tag/gaussian_process|uri|http://www.semanlink.net/tag/gaussian_process +http://www.semanlink.net/tag/semantic_enterprise|creationTime|2009-01-27T20:57:55Z +http://www.semanlink.net/tag/semantic_enterprise|prefLabel|Semantic Enterprise +http://www.semanlink.net/tag/semantic_enterprise|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_enterprise|creationDate|2009-01-27 +http://www.semanlink.net/tag/semantic_enterprise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_enterprise|altLabel|Enterprise Semantic Web +http://www.semanlink.net/tag/semantic_enterprise|altLabel|Corporate Semantic Web +http://www.semanlink.net/tag/semantic_enterprise|altLabel|Semantic Web in the enterprise +http://www.semanlink.net/tag/semantic_enterprise|uri|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanlink.net/tag/semantic_enterprise|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_enterprise|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_enterprise|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/homme_politique|prefLabel|Homme politique +http://www.semanlink.net/tag/homme_politique|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/homme_politique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/homme_politique|uri|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/homme_politique|broader_prefLabel|Politique +http://www.semanlink.net/tag/neandertal|prefLabel|Néandertal +http://www.semanlink.net/tag/neandertal|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/neandertal|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/neandertal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neandertal|uri|http://www.semanlink.net/tag/neandertal +http://www.semanlink.net/tag/neandertal|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/neandertal|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/neandertal|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/car_options_ontology|creationTime|2011-03-24T16:39:57Z +http://www.semanlink.net/tag/car_options_ontology|prefLabel|Car Options Ontology +http://www.semanlink.net/tag/car_options_ontology|broader|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/car_options_ontology|broader|http://www.semanlink.net/tag/volkswagen +http://www.semanlink.net/tag/car_options_ontology|broader|http://www.semanlink.net/tag/automotive_ontologies +http://www.semanlink.net/tag/car_options_ontology|creationDate|2011-03-24 +http://www.semanlink.net/tag/car_options_ontology|comment|"OK, but where are the data about VW range? Partial stuff here: http://www.volkswagen.co.uk/new/polo-v/which-model/compare/interior +" +http://www.semanlink.net/tag/car_options_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/car_options_ontology|altLabel|COO +http://www.semanlink.net/tag/car_options_ontology|uri|http://www.semanlink.net/tag/car_options_ontology +http://www.semanlink.net/tag/car_options_ontology|broader_prefLabel|GoodRelations +http://www.semanlink.net/tag/car_options_ontology|broader_prefLabel|Volkswagen +http://www.semanlink.net/tag/car_options_ontology|broader_prefLabel|Automotive ontologies +http://www.semanlink.net/tag/car_options_ontology|broader_altLabel|VW +http://www.semanlink.net/tag/car_options_ontology|broader_altLabel|Car ontology +http://www.semanlink.net/tag/car_options_ontology|broader_related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/droit_d_auteur|prefLabel|Droit d'auteur +http://www.semanlink.net/tag/droit_d_auteur|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/droit_d_auteur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/droit_d_auteur|uri|http://www.semanlink.net/tag/droit_d_auteur +http://www.semanlink.net/tag/droit_d_auteur|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/palestine|creationTime|2018-03-04T10:41:22Z +http://www.semanlink.net/tag/palestine|prefLabel|Palestine +http://www.semanlink.net/tag/palestine|creationDate|2018-03-04 +http://www.semanlink.net/tag/palestine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/palestine|uri|http://www.semanlink.net/tag/palestine +http://www.semanlink.net/tag/criquet|prefLabel|Criquet +http://www.semanlink.net/tag/criquet|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/criquet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/criquet|uri|http://www.semanlink.net/tag/criquet +http://www.semanlink.net/tag/criquet|broader_prefLabel|Insecte +http://www.semanlink.net/tag/denny_britz|creationTime|2017-11-06T18:43:20Z +http://www.semanlink.net/tag/denny_britz|prefLabel|Denny Britz +http://www.semanlink.net/tag/denny_britz|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/denny_britz|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/denny_britz|related|http://www.semanlink.net/tag/apache_spark +http://www.semanlink.net/tag/denny_britz|related|http://www.semanlink.net/tag/google_brain +http://www.semanlink.net/tag/denny_britz|creationDate|2017-11-06 +http://www.semanlink.net/tag/denny_britz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/denny_britz|weblog|http://www.wildml.com +http://www.semanlink.net/tag/denny_britz|uri|http://www.semanlink.net/tag/denny_britz +http://www.semanlink.net/tag/denny_britz|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/denny_britz|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/alistair_miles|creationTime|2007-11-20T21:51:53Z +http://www.semanlink.net/tag/alistair_miles|prefLabel|Alistair Miles +http://www.semanlink.net/tag/alistair_miles|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/alistair_miles|related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/alistair_miles|creationDate|2007-11-20 +http://www.semanlink.net/tag/alistair_miles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alistair_miles|uri|http://www.semanlink.net/tag/alistair_miles +http://www.semanlink.net/tag/alistair_miles|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/what_could_go_wrong|creationTime|2018-05-20T18:18:42Z +http://www.semanlink.net/tag/what_could_go_wrong|prefLabel|What could go wrong? +http://www.semanlink.net/tag/what_could_go_wrong|creationDate|2018-05-20 +http://www.semanlink.net/tag/what_could_go_wrong|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/what_could_go_wrong|uri|http://www.semanlink.net/tag/what_could_go_wrong +http://www.semanlink.net/tag/reinhard_mey|creationTime|2014-11-11T23:31:40Z +http://www.semanlink.net/tag/reinhard_mey|prefLabel|Reinhard Mey +http://www.semanlink.net/tag/reinhard_mey|broader|http://www.semanlink.net/tag/chanson +http://www.semanlink.net/tag/reinhard_mey|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/reinhard_mey|related|http://www.semanlink.net/tag/yves_roth +http://www.semanlink.net/tag/reinhard_mey|creationDate|2014-11-11 +http://www.semanlink.net/tag/reinhard_mey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reinhard_mey|describedBy|https://de.wikipedia.org/wiki/Reinhard_Mey +http://www.semanlink.net/tag/reinhard_mey|uri|http://www.semanlink.net/tag/reinhard_mey +http://www.semanlink.net/tag/reinhard_mey|broader_prefLabel|Chanson +http://www.semanlink.net/tag/reinhard_mey|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/reinhard_mey|broader_altLabel|Germany +http://www.semanlink.net/tag/reinhard_mey|broader_altLabel|Deutschland +http://www.semanlink.net/tag/continent_de_plastique|creationTime|2013-04-04T01:14:56Z +http://www.semanlink.net/tag/continent_de_plastique|prefLabel|Continent de plastique +http://www.semanlink.net/tag/continent_de_plastique|broader|http://www.semanlink.net/tag/pollution_des_oceans +http://www.semanlink.net/tag/continent_de_plastique|broader|http://www.semanlink.net/tag/plastic +http://www.semanlink.net/tag/continent_de_plastique|creationDate|2013-04-04 +http://www.semanlink.net/tag/continent_de_plastique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/continent_de_plastique|uri|http://www.semanlink.net/tag/continent_de_plastique +http://www.semanlink.net/tag/continent_de_plastique|broader_prefLabel|Pollution des océans +http://www.semanlink.net/tag/continent_de_plastique|broader_prefLabel|Plastic +http://www.semanlink.net/tag/continent_de_plastique|broader_altLabel|Plastique +http://www.semanlink.net/tag/antilles|prefLabel|Antilles +http://www.semanlink.net/tag/antilles|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/antilles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antilles|uri|http://www.semanlink.net/tag/antilles +http://www.semanlink.net/tag/antilles|broader_prefLabel|Amérique +http://www.semanlink.net/tag/ijcai|creationTime|2018-07-23T11:33:01Z +http://www.semanlink.net/tag/ijcai|prefLabel|IJCAI +http://www.semanlink.net/tag/ijcai|broader|http://www.semanlink.net/tag/ai_conference +http://www.semanlink.net/tag/ijcai|creationDate|2018-07-23 +http://www.semanlink.net/tag/ijcai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ijcai|uri|http://www.semanlink.net/tag/ijcai +http://www.semanlink.net/tag/ijcai|broader_prefLabel|AI Conference +http://www.semanlink.net/tag/supernova|prefLabel|Supernova +http://www.semanlink.net/tag/supernova|broader|http://www.semanlink.net/tag/explosions_cosmiques +http://www.semanlink.net/tag/supernova|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/supernova|uri|http://www.semanlink.net/tag/supernova +http://www.semanlink.net/tag/supernova|broader_prefLabel|Explosions cosmiques +http://www.semanlink.net/tag/gouvernement_francais|creationTime|2014-03-27T16:19:36Z +http://www.semanlink.net/tag/gouvernement_francais|prefLabel|Gouvernement français +http://www.semanlink.net/tag/gouvernement_francais|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/gouvernement_francais|creationDate|2014-03-27 +http://www.semanlink.net/tag/gouvernement_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gouvernement_francais|uri|http://www.semanlink.net/tag/gouvernement_francais +http://www.semanlink.net/tag/gouvernement_francais|broader_prefLabel|France +http://www.semanlink.net/tag/stacking_ensemble_learning|creationTime|2019-07-04T01:40:11Z +http://www.semanlink.net/tag/stacking_ensemble_learning|prefLabel|Stacking (ensemble learning) +http://www.semanlink.net/tag/stacking_ensemble_learning|broader|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/stacking_ensemble_learning|creationDate|2019-07-04 +http://www.semanlink.net/tag/stacking_ensemble_learning|comment|"ensemble learning technique that combines multiple classification or regression models via a meta-classifier or a meta-regressor. The base level models are trained based on a complete training set, then the meta-model is trained on the outputs of the base level model as features. ([source](/doc/2019/07/ensemble_learning_to_improve_ma)) +" +http://www.semanlink.net/tag/stacking_ensemble_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stacking_ensemble_learning|uri|http://www.semanlink.net/tag/stacking_ensemble_learning +http://www.semanlink.net/tag/stacking_ensemble_learning|broader_prefLabel|Ensemble learning +http://www.semanlink.net/tag/meta_content_framework|prefLabel|Meta Content Framework +http://www.semanlink.net/tag/meta_content_framework|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/meta_content_framework|broader|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/meta_content_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meta_content_framework|altLabel|MCF +http://www.semanlink.net/tag/meta_content_framework|uri|http://www.semanlink.net/tag/meta_content_framework +http://www.semanlink.net/tag/meta_content_framework|broader_prefLabel|Apple +http://www.semanlink.net/tag/meta_content_framework|broader_prefLabel|Guha +http://www.semanlink.net/tag/meta_content_framework|broader_related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/meta_content_framework|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/w3c|prefLabel|W3C +http://www.semanlink.net/tag/w3c|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/w3c|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/w3c|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c|related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/w3c|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c|homepage|http://www.w3.org +http://www.semanlink.net/tag/w3c|uri|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c|broader_prefLabel|Technologie +http://www.semanlink.net/tag/w3c|broader_prefLabel|Internet +http://www.semanlink.net/tag/henri_bergius|creationTime|2012-02-07T20:58:51Z +http://www.semanlink.net/tag/henri_bergius|prefLabel|Henri Bergius +http://www.semanlink.net/tag/henri_bergius|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/henri_bergius|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/henri_bergius|related|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://www.semanlink.net/tag/henri_bergius|related|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/tag/henri_bergius|creationDate|2012-02-07 +http://www.semanlink.net/tag/henri_bergius|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/henri_bergius|homepage|http://bergie.iki.fi/ +http://www.semanlink.net/tag/henri_bergius|weblog|http://bergie.iki.fi/ +http://www.semanlink.net/tag/henri_bergius|uri|http://www.semanlink.net/tag/henri_bergius +http://www.semanlink.net/tag/henri_bergius|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ml_engineering|creationTime|2020-01-21T16:40:37Z +http://www.semanlink.net/tag/ml_engineering|prefLabel|ML Engineering +http://www.semanlink.net/tag/ml_engineering|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/ml_engineering|creationDate|2020-01-21 +http://www.semanlink.net/tag/ml_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_engineering|uri|http://www.semanlink.net/tag/ml_engineering +http://www.semanlink.net/tag/ml_engineering|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/ml_engineering|broader_altLabel|ML +http://www.semanlink.net/tag/ml_engineering|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ml_engineering|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/redis|creationTime|2018-07-12T23:23:31Z +http://www.semanlink.net/tag/redis|prefLabel|Redis +http://www.semanlink.net/tag/redis|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/redis|creationDate|2018-07-12 +http://www.semanlink.net/tag/redis|comment|open-source in-memory database project implementing a distributed, in-memory key-value store with optional durability +http://www.semanlink.net/tag/redis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/redis|homepage|https://redis.io/ +http://www.semanlink.net/tag/redis|describedBy|https://en.wikipedia.org/wiki/Redis +http://www.semanlink.net/tag/redis|uri|http://www.semanlink.net/tag/redis +http://www.semanlink.net/tag/redis|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/redis|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/baikal|prefLabel|Baïkal +http://www.semanlink.net/tag/baikal|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/baikal|broader|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/baikal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/baikal|uri|http://www.semanlink.net/tag/baikal +http://www.semanlink.net/tag/baikal|broader_prefLabel|Eau +http://www.semanlink.net/tag/baikal|broader_prefLabel|Russie +http://www.semanlink.net/tag/knowledge_graph_completion|creationTime|2018-01-27T13:30:38Z +http://www.semanlink.net/tag/knowledge_graph_completion|prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/knowledge_graph_completion|broader|http://www.semanlink.net/tag/knowledge_discovery +http://www.semanlink.net/tag/knowledge_graph_completion|broader|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/knowledge_graph_completion|related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/knowledge_graph_completion|creationDate|2018-01-27 +http://www.semanlink.net/tag/knowledge_graph_completion|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_completion|uri|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/knowledge_graph_completion|broader_prefLabel|Knowledge Discovery +http://www.semanlink.net/tag/knowledge_graph_completion|broader_prefLabel|KG: tasks +http://www.semanlink.net/tag/knowledge_graph_completion|broader_altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/pain|creationTime|2014-10-26T22:08:16Z +http://www.semanlink.net/tag/pain|prefLabel|Pain +http://www.semanlink.net/tag/pain|creationDate|2014-10-26 +http://www.semanlink.net/tag/pain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pain|uri|http://www.semanlink.net/tag/pain +http://www.semanlink.net/tag/todo_read|creationTime|2020-10-15T01:38:55Z +http://www.semanlink.net/tag/todo_read|prefLabel|TODO-READ! +http://www.semanlink.net/tag/todo_read|creationDate|2020-10-15 +http://www.semanlink.net/tag/todo_read|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/todo_read|uri|http://www.semanlink.net/tag/todo_read +http://www.semanlink.net/tag/conflits|creationTime|2007-08-07T10:01:04Z +http://www.semanlink.net/tag/conflits|prefLabel|Conflits +http://www.semanlink.net/tag/conflits|creationDate|2007-08-07 +http://www.semanlink.net/tag/conflits|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conflits|uri|http://www.semanlink.net/tag/conflits +http://www.semanlink.net/tag/satellite_images|creationTime|2012-03-20T23:06:10Z +http://www.semanlink.net/tag/satellite_images|prefLabel|Satellite images +http://www.semanlink.net/tag/satellite_images|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/satellite_images|related|http://www.semanlink.net/tag/google_maps +http://www.semanlink.net/tag/satellite_images|creationDate|2012-03-20 +http://www.semanlink.net/tag/satellite_images|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/satellite_images|uri|http://www.semanlink.net/tag/satellite_images +http://www.semanlink.net/tag/satellite_images|broader_prefLabel|Photo +http://www.semanlink.net/tag/satellite_images|broader_altLabel|Images +http://www.semanlink.net/tag/lord_of_the_flies|creationTime|2020-05-10T16:59:07Z +http://www.semanlink.net/tag/lord_of_the_flies|prefLabel|Lord of the Flies +http://www.semanlink.net/tag/lord_of_the_flies|broader|http://www.semanlink.net/tag/roman +http://www.semanlink.net/tag/lord_of_the_flies|broader|http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre +http://www.semanlink.net/tag/lord_of_the_flies|creationDate|2020-05-10 +http://www.semanlink.net/tag/lord_of_the_flies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lord_of_the_flies|uri|http://www.semanlink.net/tag/lord_of_the_flies +http://www.semanlink.net/tag/lord_of_the_flies|broader_prefLabel|Roman +http://www.semanlink.net/tag/lord_of_the_flies|broader_prefLabel|L'humanité mérite de disparaître +http://www.semanlink.net/tag/ldap|creationTime|2008-10-28T22:40:58Z +http://www.semanlink.net/tag/ldap|prefLabel|LDAP +http://www.semanlink.net/tag/ldap|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/ldap|creationDate|2008-10-28 +http://www.semanlink.net/tag/ldap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldap|uri|http://www.semanlink.net/tag/ldap +http://www.semanlink.net/tag/ldap|broader_prefLabel|Database +http://www.semanlink.net/tag/abuse_of_power|creationTime|2020-03-08T22:50:34Z +http://www.semanlink.net/tag/abuse_of_power|prefLabel|Abuse of power +http://www.semanlink.net/tag/abuse_of_power|creationDate|2020-03-08 +http://www.semanlink.net/tag/abuse_of_power|comment|comes as no surprise +http://www.semanlink.net/tag/abuse_of_power|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abuse_of_power|uri|http://www.semanlink.net/tag/abuse_of_power +http://www.semanlink.net/tag/rdf_performance_issues|prefLabel|RDF performance issues +http://www.semanlink.net/tag/rdf_performance_issues|broader|http://www.semanlink.net/tag/rdf_dev +http://www.semanlink.net/tag/rdf_performance_issues|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_performance_issues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_performance_issues|uri|http://www.semanlink.net/tag/rdf_performance_issues +http://www.semanlink.net/tag/rdf_performance_issues|broader_prefLabel|RDF dev +http://www.semanlink.net/tag/rdf_performance_issues|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_performance_issues|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_performance_issues|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_performance_issues|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_performance_issues|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_performance_issues|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/friedrich_nietzsche|creationTime|2016-10-02T12:09:06Z +http://www.semanlink.net/tag/friedrich_nietzsche|prefLabel|Nietzsche +http://www.semanlink.net/tag/friedrich_nietzsche|broader|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/friedrich_nietzsche|creationDate|2016-10-02 +http://www.semanlink.net/tag/friedrich_nietzsche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/friedrich_nietzsche|describedBy|https://fr.wikipedia.org/wiki/Friedrich_Nietzsche +http://www.semanlink.net/tag/friedrich_nietzsche|uri|http://www.semanlink.net/tag/friedrich_nietzsche +http://www.semanlink.net/tag/friedrich_nietzsche|broader_prefLabel|Philosophe +http://www.semanlink.net/tag/entity_linking|creationTime|2010-08-12T16:24:23Z +http://www.semanlink.net/tag/entity_linking|prefLabel|Entity linking +http://www.semanlink.net/tag/entity_linking|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_linking|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/entity_linking|broader|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/entity_linking|related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/entity_linking|creationDate|2010-08-12 +http://www.semanlink.net/tag/entity_linking|comment|= named entity disambiguation: the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base. +http://www.semanlink.net/tag/entity_linking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_linking|describedBy|https://en.wikipedia.org/wiki/Entity_linking +http://www.semanlink.net/tag/entity_linking|altLabel|Named entity disambiguation +http://www.semanlink.net/tag/entity_linking|uri|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/entity_linking|broader_prefLabel|Entities +http://www.semanlink.net/tag/entity_linking|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/entity_linking|broader_prefLabel|Entity discovery and linking +http://www.semanlink.net/tag/entity_linking|broader_altLabel|Entity Retrieval +http://www.semanlink.net/tag/entity_linking|broader_altLabel|Entity Analysis +http://www.semanlink.net/tag/entity_linking|broader_related|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|creationTime|2012-04-16T11:45:00Z +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|prefLabel|VoID (Vocabulary of Interlinked Datasets) +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|creationDate|2012-04-16 +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|describedBy|http://semanticweb.org/wiki/VoiD#World_Bank +http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets|uri|http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets +http://www.semanlink.net/tag/ora_lassila|prefLabel|Ora Lassila +http://www.semanlink.net/tag/ora_lassila|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ora_lassila|broader|http://www.semanlink.net/tag/nokia +http://www.semanlink.net/tag/ora_lassila|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/ora_lassila|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ora_lassila|uri|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/ora_lassila|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ora_lassila|broader_prefLabel|Nokia +http://www.semanlink.net/tag/ora_lassila|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ora_lassila|broader_altLabel|Technical guys +http://www.semanlink.net/tag/rplug|creationTime|2012-10-24T13:25:46Z +http://www.semanlink.net/tag/rplug|prefLabel|rplug +http://www.semanlink.net/tag/rplug|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/rplug|creationDate|2012-10-24 +http://www.semanlink.net/tag/rplug|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rplug|uri|http://www.semanlink.net/tag/rplug +http://www.semanlink.net/tag/rplug|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/topbraid_spin|creationTime|2010-12-13T17:23:10Z +http://www.semanlink.net/tag/topbraid_spin|prefLabel|TopBraid/SPIN +http://www.semanlink.net/tag/topbraid_spin|broader|http://www.semanlink.net/tag/topbraid +http://www.semanlink.net/tag/topbraid_spin|broader|http://www.semanlink.net/tag/inference +http://www.semanlink.net/tag/topbraid_spin|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/topbraid_spin|creationDate|2010-12-13 +http://www.semanlink.net/tag/topbraid_spin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topbraid_spin|homepage|http://spinrdf.org/ +http://www.semanlink.net/tag/topbraid_spin|uri|http://www.semanlink.net/tag/topbraid_spin +http://www.semanlink.net/tag/topbraid_spin|broader_prefLabel|TopBraid +http://www.semanlink.net/tag/topbraid_spin|broader_prefLabel|Inference +http://www.semanlink.net/tag/topbraid_spin|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/evilstreak_markdown_js|creationTime|2015-10-11T10:49:20Z +http://www.semanlink.net/tag/evilstreak_markdown_js|prefLabel|evilstreak/markdown-js +http://www.semanlink.net/tag/evilstreak_markdown_js|broader|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/tag/evilstreak_markdown_js|broader|http://www.semanlink.net/tag/markown_javascript +http://www.semanlink.net/tag/evilstreak_markdown_js|creationDate|2015-10-11 +http://www.semanlink.net/tag/evilstreak_markdown_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evilstreak_markdown_js|homepage|https://github.com/evilstreak/markdown-js +http://www.semanlink.net/tag/evilstreak_markdown_js|uri|http://www.semanlink.net/tag/evilstreak_markdown_js +http://www.semanlink.net/tag/evilstreak_markdown_js|broader_prefLabel|GitHub project +http://www.semanlink.net/tag/evilstreak_markdown_js|broader_prefLabel|Markown / Javascript +http://www.semanlink.net/tag/neuroscience|creationTime|2013-05-25T15:07:14Z +http://www.semanlink.net/tag/neuroscience|prefLabel|Neuroscience +http://www.semanlink.net/tag/neuroscience|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/neuroscience|creationDate|2013-05-25 +http://www.semanlink.net/tag/neuroscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neuroscience|describedBy|https://en.wikipedia.org/wiki/Neuroscience +http://www.semanlink.net/tag/neuroscience|uri|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/neuroscience|broader_prefLabel|Brain +http://www.semanlink.net/tag/neuroscience|broader_altLabel|Cerveau +http://www.semanlink.net/tag/neuroscience|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/explainable_ai|creationTime|2018-05-26T10:51:34Z +http://www.semanlink.net/tag/explainable_ai|prefLabel|Explainable AI +http://www.semanlink.net/tag/explainable_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/explainable_ai|broader|http://www.semanlink.net/tag/accountable_ai +http://www.semanlink.net/tag/explainable_ai|creationDate|2018-05-26 +http://www.semanlink.net/tag/explainable_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/explainable_ai|uri|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/tag/explainable_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/explainable_ai|broader_prefLabel|Accountable AI +http://www.semanlink.net/tag/explainable_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/explainable_ai|broader_altLabel|AI +http://www.semanlink.net/tag/explainable_ai|broader_altLabel|IA +http://www.semanlink.net/tag/explainable_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/ballmer|prefLabel|Ballmer +http://www.semanlink.net/tag/ballmer|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/ballmer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ballmer|uri|http://www.semanlink.net/tag/ballmer +http://www.semanlink.net/tag/ballmer|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/ballmer|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/uri_encoding|prefLabel|URI encoding +http://www.semanlink.net/tag/uri_encoding|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_encoding|broader|http://www.semanlink.net/tag/encoding +http://www.semanlink.net/tag/uri_encoding|creationDate|2006-09-12 +http://www.semanlink.net/tag/uri_encoding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_encoding|uri|http://www.semanlink.net/tag/uri_encoding +http://www.semanlink.net/tag/uri_encoding|broader_prefLabel|URI +http://www.semanlink.net/tag/uri_encoding|broader_prefLabel|Encoding +http://www.semanlink.net/tag/politique_de_l_innovation|prefLabel|Politique de l'innovation +http://www.semanlink.net/tag/politique_de_l_innovation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_de_l_innovation|uri|http://www.semanlink.net/tag/politique_de_l_innovation +http://www.semanlink.net/tag/david_peterson|creationTime|2008-05-04T13:53:48Z +http://www.semanlink.net/tag/david_peterson|prefLabel|David Peterson +http://www.semanlink.net/tag/david_peterson|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/david_peterson|broader|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/david_peterson|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/david_peterson|related|http://www.semanlink.net/tag/www08 +http://www.semanlink.net/tag/david_peterson|creationDate|2008-05-04 +http://www.semanlink.net/tag/david_peterson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/david_peterson|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/david_peterson|homepage|http://www.sitepoint.com/articlelist/497 +http://www.semanlink.net/tag/david_peterson|uri|http://www.semanlink.net/tag/david_peterson +http://www.semanlink.net/tag/david_peterson|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/david_peterson|broader_prefLabel|Australie +http://www.semanlink.net/tag/david_peterson|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/david_peterson|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ontologies|prefLabel|Ontologies +http://www.semanlink.net/tag/ontologies|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/ontologies|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/ontologies|comment|An ontology is a specification of a conceptualization. +http://www.semanlink.net/tag/ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontologies|altLabel|Ontology +http://www.semanlink.net/tag/ontologies|uri|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/ontologies|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/ontologies|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/ontologies|broader_altLabel|sw +http://www.semanlink.net/tag/ontologies|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/ontologies|broader_altLabel|KR +http://www.semanlink.net/tag/film_indien|creationTime|2007-10-18T01:30:13Z +http://www.semanlink.net/tag/film_indien|prefLabel|Film indien +http://www.semanlink.net/tag/film_indien|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_indien|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/film_indien|creationDate|2007-10-18 +http://www.semanlink.net/tag/film_indien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_indien|uri|http://www.semanlink.net/tag/film_indien +http://www.semanlink.net/tag/film_indien|broader_prefLabel|Film +http://www.semanlink.net/tag/film_indien|broader_prefLabel|Inde +http://www.semanlink.net/tag/arte|prefLabel|Arte +http://www.semanlink.net/tag/arte|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/arte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arte|uri|http://www.semanlink.net/tag/arte +http://www.semanlink.net/tag/arte|broader_prefLabel|Télévision +http://www.semanlink.net/tag/arte|broader_altLabel|TV +http://www.semanlink.net/tag/n3|prefLabel|N3 +http://www.semanlink.net/tag/n3|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/n3|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/n3|uri|http://www.semanlink.net/tag/n3 +http://www.semanlink.net/tag/n3|broader_prefLabel|RDF +http://www.semanlink.net/tag/n3|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/n3|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/n3|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/n3|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/n3|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/critique_du_liberalisme|prefLabel|Critique du libéralisme +http://www.semanlink.net/tag/critique_du_liberalisme|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/critique_du_liberalisme|broader|http://www.semanlink.net/tag/critique_du_capitalisme +http://www.semanlink.net/tag/critique_du_liberalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/critique_du_liberalisme|uri|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.semanlink.net/tag/critique_du_liberalisme|broader_prefLabel|Politique +http://www.semanlink.net/tag/critique_du_liberalisme|broader_prefLabel|Critique du capitalisme +http://www.semanlink.net/tag/sebastian_schaffert|creationTime|2012-11-19T14:14:08Z +http://www.semanlink.net/tag/sebastian_schaffert|prefLabel|Sebastian Schaffert +http://www.semanlink.net/tag/sebastian_schaffert|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/sebastian_schaffert|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/sebastian_schaffert|related|http://www.semanlink.net/tag/linked_media_framework +http://www.semanlink.net/tag/sebastian_schaffert|creationDate|2012-11-19 +http://www.semanlink.net/tag/sebastian_schaffert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sebastian_schaffert|uri|http://www.semanlink.net/tag/sebastian_schaffert +http://www.semanlink.net/tag/sebastian_schaffert|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/aterm|creationTime|2012-02-15T00:12:01Z +http://www.semanlink.net/tag/aterm|prefLabel|ATerm +http://www.semanlink.net/tag/aterm|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/aterm|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/aterm|related|http://www.semanlink.net/tag/fadi_badra +http://www.semanlink.net/tag/aterm|creationDate|2012-02-15 +http://www.semanlink.net/tag/aterm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aterm|uri|http://www.semanlink.net/tag/aterm +http://www.semanlink.net/tag/aterm|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/aterm|broader_prefLabel|Dev +http://www.semanlink.net/tag/sparte|creationTime|2017-07-02T13:15:38Z +http://www.semanlink.net/tag/sparte|prefLabel|Sparte +http://www.semanlink.net/tag/sparte|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/sparte|creationDate|2017-07-02 +http://www.semanlink.net/tag/sparte|comment|> Etranger, va dire à Sparte que nous gisons ici, fidèles à ses lois +http://www.semanlink.net/tag/sparte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparte|describedBy|https://en.wikipedia.org/wiki/Sparta +http://www.semanlink.net/tag/sparte|uri|http://www.semanlink.net/tag/sparte +http://www.semanlink.net/tag/sparte|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/drupal|creationTime|2008-04-25T08:45:36Z +http://www.semanlink.net/tag/drupal|prefLabel|Drupal +http://www.semanlink.net/tag/drupal|broader|http://www.semanlink.net/tag/website_creation +http://www.semanlink.net/tag/drupal|broader|http://www.semanlink.net/tag/cms +http://www.semanlink.net/tag/drupal|creationDate|2008-04-25 +http://www.semanlink.net/tag/drupal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drupal|homepage|http://drupal.org/ +http://www.semanlink.net/tag/drupal|describedBy|https://en.wikipedia.org/wiki/Drupal +http://www.semanlink.net/tag/drupal|uri|http://www.semanlink.net/tag/drupal +http://www.semanlink.net/tag/drupal|broader_prefLabel|Website: creation +http://www.semanlink.net/tag/drupal|broader_prefLabel|CMS +http://www.semanlink.net/tag/origines_de_l_homme|prefLabel|Origines de l'homme +http://www.semanlink.net/tag/origines_de_l_homme|broader|http://www.semanlink.net/tag/prehistoire +http://www.semanlink.net/tag/origines_de_l_homme|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/origines_de_l_homme|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/origines_de_l_homme|broader|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/origines_de_l_homme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/origines_de_l_homme|uri|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/origines_de_l_homme|broader_prefLabel|Préhistoire +http://www.semanlink.net/tag/origines_de_l_homme|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/origines_de_l_homme|broader_prefLabel|Evolution +http://www.semanlink.net/tag/origines_de_l_homme|broader_prefLabel|Histoire de la vie +http://www.semanlink.net/tag/origines_de_l_homme|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/mars_curiosity|creationTime|2012-08-06T08:51:23Z +http://www.semanlink.net/tag/mars_curiosity|prefLabel|Mars/Curiosity +http://www.semanlink.net/tag/mars_curiosity|broader|http://www.semanlink.net/tag/exploration_marsienne +http://www.semanlink.net/tag/mars_curiosity|creationDate|2012-08-06 +http://www.semanlink.net/tag/mars_curiosity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mars_curiosity|uri|http://www.semanlink.net/tag/mars_curiosity +http://www.semanlink.net/tag/mars_curiosity|broader_prefLabel|Exploration marsienne +http://www.semanlink.net/tag/french_semantic_web_company|creationTime|2013-11-23T00:01:30Z +http://www.semanlink.net/tag/french_semantic_web_company|prefLabel|French Semantic web company +http://www.semanlink.net/tag/french_semantic_web_company|broader|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/french_semantic_web_company|creationDate|2013-11-23 +http://www.semanlink.net/tag/french_semantic_web_company|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/french_semantic_web_company|uri|http://www.semanlink.net/tag/french_semantic_web_company +http://www.semanlink.net/tag/french_semantic_web_company|broader_prefLabel|Semantic web company +http://www.semanlink.net/tag/french_semantic_web_company|broader_altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/french_semantic_web_company|broader_altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/flickr|prefLabel|Flickr +http://www.semanlink.net/tag/flickr|broader|http://www.semanlink.net/tag/photos_online +http://www.semanlink.net/tag/flickr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flickr|uri|http://www.semanlink.net/tag/flickr +http://www.semanlink.net/tag/flickr|broader_prefLabel|photos online +http://www.semanlink.net/tag/right_to_explanation|creationTime|2017-11-23T00:05:15Z +http://www.semanlink.net/tag/right_to_explanation|prefLabel|Right to explanation +http://www.semanlink.net/tag/right_to_explanation|creationDate|2017-11-23 +http://www.semanlink.net/tag/right_to_explanation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/right_to_explanation|uri|http://www.semanlink.net/tag/right_to_explanation +http://www.semanlink.net/tag/creve_coeur|creationTime|2009-01-13T01:22:55Z +http://www.semanlink.net/tag/creve_coeur|prefLabel|Crève cœur +http://www.semanlink.net/tag/creve_coeur|creationDate|2009-01-13 +http://www.semanlink.net/tag/creve_coeur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/creve_coeur|uri|http://www.semanlink.net/tag/creve_coeur +http://www.semanlink.net/tag/rant|creationTime|2018-01-03T15:22:35Z +http://www.semanlink.net/tag/rant|prefLabel|Rant +http://www.semanlink.net/tag/rant|creationDate|2018-01-03 +http://www.semanlink.net/tag/rant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rant|uri|http://www.semanlink.net/tag/rant +http://www.semanlink.net/tag/conceptual_clustering|creationTime|2021-05-26T00:56:09Z +http://www.semanlink.net/tag/conceptual_clustering|prefLabel|Conceptual clustering +http://www.semanlink.net/tag/conceptual_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/conceptual_clustering|creationDate|2021-05-26 +http://www.semanlink.net/tag/conceptual_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conceptual_clustering|describedBy|https://en.wikipedia.org/wiki/Conceptual_clustering +http://www.semanlink.net/tag/conceptual_clustering|uri|http://www.semanlink.net/tag/conceptual_clustering +http://www.semanlink.net/tag/conceptual_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/conceptual_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/conceptual_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/conceptual_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/grands_singes|prefLabel|Grands Singes +http://www.semanlink.net/tag/grands_singes|broader|http://www.semanlink.net/tag/especes_menacees +http://www.semanlink.net/tag/grands_singes|broader|http://www.semanlink.net/tag/singe +http://www.semanlink.net/tag/grands_singes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grands_singes|altLabel|Apes +http://www.semanlink.net/tag/grands_singes|uri|http://www.semanlink.net/tag/grands_singes +http://www.semanlink.net/tag/grands_singes|broader_prefLabel|Espèces menacées +http://www.semanlink.net/tag/grands_singes|broader_prefLabel|Singe +http://www.semanlink.net/tag/grands_singes|broader_altLabel|Endangered Species +http://www.semanlink.net/tag/grands_singes|broader_related|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/arq|creationTime|2007-07-15T19:06:19Z +http://www.semanlink.net/tag/arq|prefLabel|ARQ +http://www.semanlink.net/tag/arq|broader|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/arq|broader|http://www.semanlink.net/tag/sparql_and_jena +http://www.semanlink.net/tag/arq|creationDate|2007-07-15 +http://www.semanlink.net/tag/arq|comment|ARQ - A SPARQL Processor for Jena +http://www.semanlink.net/tag/arq|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arq|homepage|http://jena.sourceforge.net/ARQ/ +http://www.semanlink.net/tag/arq|uri|http://www.semanlink.net/tag/arq +http://www.semanlink.net/tag/arq|broader_prefLabel|Andy Seaborne +http://www.semanlink.net/tag/arq|broader_prefLabel|SPARQL AND Jena +http://www.semanlink.net/tag/arq|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/arq|broader_related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/arq|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/digital_media|prefLabel|Digital Media +http://www.semanlink.net/tag/digital_media|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/digital_media|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_media|uri|http://www.semanlink.net/tag/digital_media +http://www.semanlink.net/tag/digital_media|broader_prefLabel|Technologie +http://www.semanlink.net/tag/enterprise_content_management|creationTime|2012-08-02T10:00:56Z +http://www.semanlink.net/tag/enterprise_content_management|prefLabel|Enterprise Content Management +http://www.semanlink.net/tag/enterprise_content_management|broader|http://www.semanlink.net/tag/ged +http://www.semanlink.net/tag/enterprise_content_management|creationDate|2012-08-02 +http://www.semanlink.net/tag/enterprise_content_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_content_management|altLabel|ECM +http://www.semanlink.net/tag/enterprise_content_management|uri|http://www.semanlink.net/tag/enterprise_content_management +http://www.semanlink.net/tag/enterprise_content_management|broader_prefLabel|GED +http://www.semanlink.net/tag/owlsight|creationTime|2008-04-11T15:53:06Z +http://www.semanlink.net/tag/owlsight|prefLabel|OwlSight +http://www.semanlink.net/tag/owlsight|broader|http://www.semanlink.net/tag/clark_and_parsia +http://www.semanlink.net/tag/owlsight|broader|http://www.semanlink.net/tag/owl_ontology_browser +http://www.semanlink.net/tag/owlsight|broader|http://www.semanlink.net/tag/google_web_toolkit +http://www.semanlink.net/tag/owlsight|creationDate|2008-04-11 +http://www.semanlink.net/tag/owlsight|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owlsight|uri|http://www.semanlink.net/tag/owlsight +http://www.semanlink.net/tag/owlsight|broader_prefLabel|Clark and Parsia +http://www.semanlink.net/tag/owlsight|broader_prefLabel|OWL ontology browser +http://www.semanlink.net/tag/owlsight|broader_prefLabel|Google Web Toolkit +http://www.semanlink.net/tag/owlsight|broader_altLabel|GWT +http://www.semanlink.net/tag/owlsight|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/hello_world|creationTime|2019-11-19T16:13:21Z +http://www.semanlink.net/tag/hello_world|prefLabel|Hello World +http://www.semanlink.net/tag/hello_world|broader|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/hello_world|creationDate|2019-11-19 +http://www.semanlink.net/tag/hello_world|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hello_world|uri|http://www.semanlink.net/tag/hello_world +http://www.semanlink.net/tag/hello_world|broader_prefLabel|Sample code +http://www.semanlink.net/tag/backpropagation|creationTime|2016-01-03T16:00:09Z +http://www.semanlink.net/tag/backpropagation|prefLabel|Backpropagation +http://www.semanlink.net/tag/backpropagation|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/backpropagation|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/backpropagation|creationDate|2016-01-03 +http://www.semanlink.net/tag/backpropagation|comment|"abbreviation for ""backward propagation of errors"". + +common method of training artificial neural networks used in conjunction with an optimization method such as gradient descent. The method calculates the gradient of a loss function with respect to all the weights in the network. The gradient is fed to the optimization method which in turn uses it to update the weights, in an attempt to minimize the loss function. + +Backpropagation requires a known, desired output for each input value in order to calculate the loss function gradient. It is therefore usually considered to be a supervised learning method. Backpropagation requires that the activation function used by the artificial neurons be differentiable." +http://www.semanlink.net/tag/backpropagation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backpropagation|describedBy|https://en.wikipedia.org/wiki/Backpropagation +http://www.semanlink.net/tag/backpropagation|altLabel|Back Propagation +http://www.semanlink.net/tag/backpropagation|uri|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/tag/backpropagation|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/backpropagation|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/backpropagation|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/backpropagation|broader_altLabel|ANN +http://www.semanlink.net/tag/backpropagation|broader_altLabel|NN +http://www.semanlink.net/tag/rapidminer_java|creationTime|2013-09-11T01:13:41Z +http://www.semanlink.net/tag/rapidminer_java|prefLabel|RapidMiner/Java +http://www.semanlink.net/tag/rapidminer_java|broader|http://www.semanlink.net/tag/rapidminer +http://www.semanlink.net/tag/rapidminer_java|creationDate|2013-09-11 +http://www.semanlink.net/tag/rapidminer_java|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rapidminer_java|uri|http://www.semanlink.net/tag/rapidminer_java +http://www.semanlink.net/tag/rapidminer_java|broader_prefLabel|RapidMiner +http://www.semanlink.net/tag/naive_bayes_classifier|creationTime|2015-10-10T14:57:00Z +http://www.semanlink.net/tag/naive_bayes_classifier|prefLabel|Naive Bayes classifier +http://www.semanlink.net/tag/naive_bayes_classifier|broader|http://www.semanlink.net/tag/bayesian_classification +http://www.semanlink.net/tag/naive_bayes_classifier|creationDate|2015-10-10 +http://www.semanlink.net/tag/naive_bayes_classifier|comment|"naive Bayes classifiers assume that the value of a particular feature is independent of the value of any other feature, **given the class variable** +" +http://www.semanlink.net/tag/naive_bayes_classifier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/naive_bayes_classifier|describedBy|https://en.wikipedia.org/wiki/Naive_Bayes_classifier +http://www.semanlink.net/tag/naive_bayes_classifier|uri|http://www.semanlink.net/tag/naive_bayes_classifier +http://www.semanlink.net/tag/naive_bayes_classifier|broader_prefLabel|Bayesian classification +http://www.semanlink.net/tag/naive_bayes_classifier|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/jena_dev|prefLabel|Jena dev +http://www.semanlink.net/tag/jena_dev|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_dev|uri|http://www.semanlink.net/tag/jena_dev +http://www.semanlink.net/tag/jena_dev|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_dev|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/mesh_network|creationTime|2009-05-11T22:52:11Z +http://www.semanlink.net/tag/mesh_network|prefLabel|Mesh network +http://www.semanlink.net/tag/mesh_network|creationDate|2009-05-11 +http://www.semanlink.net/tag/mesh_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mesh_network|describedBy|https://en.wikipedia.org/wiki/Mesh_network +http://www.semanlink.net/tag/mesh_network|uri|http://www.semanlink.net/tag/mesh_network +http://www.semanlink.net/tag/xenophon|creationTime|2007-06-04T18:55:55Z +http://www.semanlink.net/tag/xenophon|prefLabel|Xenophon +http://www.semanlink.net/tag/xenophon|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/xenophon|creationDate|2007-06-04 +http://www.semanlink.net/tag/xenophon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xenophon|uri|http://www.semanlink.net/tag/xenophon +http://www.semanlink.net/tag/xenophon|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/neural_machine_translation|creationTime|2016-01-03T14:38:26Z +http://www.semanlink.net/tag/neural_machine_translation|prefLabel|Neural machine translation +http://www.semanlink.net/tag/neural_machine_translation|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/neural_machine_translation|broader|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/neural_machine_translation|creationDate|2016-01-03 +http://www.semanlink.net/tag/neural_machine_translation|comment|"Approach to machine translation in which a large neural network is trained to maximize translation performance. It is a radical departure from the phrase-based statistical translation approaches, in which a translation system consists of subcomponents that are separately optimized. + +A bidirectional recurrent neural network (RNN), known as an encoder, is used by the neural network to encode a source sentence for a second RNN, known as a decoder, that is used to predict words in the target language + +" +http://www.semanlink.net/tag/neural_machine_translation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_machine_translation|describedBy|https://en.wikipedia.org/wiki/Neural_machine_translation +http://www.semanlink.net/tag/neural_machine_translation|altLabel|NMT +http://www.semanlink.net/tag/neural_machine_translation|uri|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/tag/neural_machine_translation|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/neural_machine_translation|broader_prefLabel|Machine translation +http://www.semanlink.net/tag/neural_machine_translation|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/neural_machine_translation|broader_altLabel|ANN +http://www.semanlink.net/tag/neural_machine_translation|broader_altLabel|NN +http://www.semanlink.net/tag/neural_machine_translation|broader_altLabel|Traduction automatique +http://www.semanlink.net/tag/ai_application|creationTime|2020-11-14T09:03:45Z +http://www.semanlink.net/tag/ai_application|prefLabel|AI Application +http://www.semanlink.net/tag/ai_application|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_application|creationDate|2020-11-14 +http://www.semanlink.net/tag/ai_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_application|uri|http://www.semanlink.net/tag/ai_application +http://www.semanlink.net/tag/ai_application|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_application|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_application|broader_altLabel|AI +http://www.semanlink.net/tag/ai_application|broader_altLabel|IA +http://www.semanlink.net/tag/ai_application|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/la_main_a_la_pate|prefLabel|La main à la pâte +http://www.semanlink.net/tag/la_main_a_la_pate|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/la_main_a_la_pate|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/la_main_a_la_pate|related|http://www.semanlink.net/tag/jean_paul +http://www.semanlink.net/tag/la_main_a_la_pate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_main_a_la_pate|uri|http://www.semanlink.net/tag/la_main_a_la_pate +http://www.semanlink.net/tag/la_main_a_la_pate|broader_prefLabel|Education +http://www.semanlink.net/tag/la_main_a_la_pate|broader_prefLabel|Science +http://www.semanlink.net/tag/la_main_a_la_pate|broader_altLabel|Enseignement +http://www.semanlink.net/tag/la_main_a_la_pate|broader_altLabel|sciences +http://www.semanlink.net/tag/markets|creationTime|2013-05-11T11:18:47Z +http://www.semanlink.net/tag/markets|prefLabel|Markets +http://www.semanlink.net/tag/markets|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/markets|creationDate|2013-05-11 +http://www.semanlink.net/tag/markets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markets|uri|http://www.semanlink.net/tag/markets +http://www.semanlink.net/tag/markets|broader_prefLabel|Finance +http://www.semanlink.net/tag/html_editor|creationTime|2012-08-06T14:29:40Z +http://www.semanlink.net/tag/html_editor|prefLabel|HTML Editor +http://www.semanlink.net/tag/html_editor|broader|http://www.semanlink.net/tag/html +http://www.semanlink.net/tag/html_editor|creationDate|2012-08-06 +http://www.semanlink.net/tag/html_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html_editor|uri|http://www.semanlink.net/tag/html_editor +http://www.semanlink.net/tag/html_editor|broader_prefLabel|HTML +http://www.semanlink.net/tag/matthew_honnibal|creationTime|2018-04-12T23:41:02Z +http://www.semanlink.net/tag/matthew_honnibal|prefLabel|Matthew Honnibal +http://www.semanlink.net/tag/matthew_honnibal|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/matthew_honnibal|related|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/tag/matthew_honnibal|creationDate|2018-04-12 +http://www.semanlink.net/tag/matthew_honnibal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matthew_honnibal|uri|http://www.semanlink.net/tag/matthew_honnibal +http://www.semanlink.net/tag/matthew_honnibal|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/python_tools|creationTime|2017-06-28T23:29:32Z +http://www.semanlink.net/tag/python_tools|prefLabel|Python tools +http://www.semanlink.net/tag/python_tools|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_tools|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/python_tools|creationDate|2017-06-28 +http://www.semanlink.net/tag/python_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_tools|uri|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/tag/python_tools|broader_prefLabel|Python +http://www.semanlink.net/tag/python_tools|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/owl2vec|creationTime|2020-10-07T08:37:19Z +http://www.semanlink.net/tag/owl2vec|prefLabel|OWL2Vec +http://www.semanlink.net/tag/owl2vec|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl2vec|broader|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/tag/owl2vec|creationDate|2020-10-07 +http://www.semanlink.net/tag/owl2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl2vec|uri|http://www.semanlink.net/tag/owl2vec +http://www.semanlink.net/tag/owl2vec|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl2vec|broader_prefLabel|Graph Embeddings +http://www.semanlink.net/tag/owl2vec|broader_altLabel|Representation Learning on Networks +http://www.semanlink.net/tag/owl2vec|broader_altLabel|Graph representation learning +http://www.semanlink.net/tag/owl2vec|broader_altLabel|Network Representation Learning +http://www.semanlink.net/tag/owl2vec|broader_altLabel|Network embeddings +http://www.semanlink.net/tag/owl2vec|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/owl2vec|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/xsl|prefLabel|XSL +http://www.semanlink.net/tag/xsl|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/xsl|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/xsl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xsl|uri|http://www.semanlink.net/tag/xsl +http://www.semanlink.net/tag/xsl|broader_prefLabel|XML +http://www.semanlink.net/tag/xsl|broader_prefLabel|Dev +http://www.semanlink.net/tag/kingsley_idehen|creationTime|2007-04-03T22:58:44Z +http://www.semanlink.net/tag/kingsley_idehen|prefLabel|Kingsley Idehen +http://www.semanlink.net/tag/kingsley_idehen|broader|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/kingsley_idehen|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/kingsley_idehen|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/kingsley_idehen|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/kingsley_idehen|creationDate|2007-04-03 +http://www.semanlink.net/tag/kingsley_idehen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kingsley_idehen|uri|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/kingsley_idehen|broader_prefLabel|OpenLink Software +http://www.semanlink.net/tag/kingsley_idehen|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/kingsley_idehen|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/kingsley_idehen|broader_altLabel|Technical guys +http://www.semanlink.net/tag/kingsley_idehen|broader_related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/kingsley_idehen|broader_related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/kingsley_idehen|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/personal_cloud|creationTime|2013-09-22T13:16:35Z +http://www.semanlink.net/tag/personal_cloud|prefLabel|Personal cloud +http://www.semanlink.net/tag/personal_cloud|creationDate|2013-09-22 +http://www.semanlink.net/tag/personal_cloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_cloud|uri|http://www.semanlink.net/tag/personal_cloud +http://www.semanlink.net/tag/dsi|creationTime|2012-12-06T23:44:30Z +http://www.semanlink.net/tag/dsi|prefLabel|DSI +http://www.semanlink.net/tag/dsi|creationDate|2012-12-06 +http://www.semanlink.net/tag/dsi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dsi|uri|http://www.semanlink.net/tag/dsi +http://www.semanlink.net/tag/mac_os_x|prefLabel|Mac OS X +http://www.semanlink.net/tag/mac_os_x|broader|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/mac_os_x|broader|http://www.semanlink.net/tag/os +http://www.semanlink.net/tag/mac_os_x|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/mac_os_x|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_os_x|altLabel|OS X +http://www.semanlink.net/tag/mac_os_x|altLabel|OSX +http://www.semanlink.net/tag/mac_os_x|uri|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/mac_os_x|broader_prefLabel|Apple Software +http://www.semanlink.net/tag/mac_os_x|broader_prefLabel|OS +http://www.semanlink.net/tag/mac_os_x|broader_prefLabel|Apple +http://www.semanlink.net/tag/automobile_manuals|creationTime|2019-07-28T10:48:05Z +http://www.semanlink.net/tag/automobile_manuals|prefLabel|Automobile manuals +http://www.semanlink.net/tag/automobile_manuals|creationDate|2019-07-28 +http://www.semanlink.net/tag/automobile_manuals|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automobile_manuals|uri|http://www.semanlink.net/tag/automobile_manuals +http://www.semanlink.net/tag/bbc_semantic_publishing|creationTime|2011-02-04T21:35:17Z +http://www.semanlink.net/tag/bbc_semantic_publishing|prefLabel|BBC semantic publishing +http://www.semanlink.net/tag/bbc_semantic_publishing|broader|http://www.semanlink.net/tag/dynamic_semantic_publishing +http://www.semanlink.net/tag/bbc_semantic_publishing|broader|http://www.semanlink.net/tag/good +http://www.semanlink.net/tag/bbc_semantic_publishing|broader|http://www.semanlink.net/tag/rdf_application +http://www.semanlink.net/tag/bbc_semantic_publishing|broader|http://www.semanlink.net/tag/semantic_web_sites +http://www.semanlink.net/tag/bbc_semantic_publishing|related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/bbc_semantic_publishing|creationDate|2011-02-04 +http://www.semanlink.net/tag/bbc_semantic_publishing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bbc_semantic_publishing|uri|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.semanlink.net/tag/bbc_semantic_publishing|broader_prefLabel|Dynamic Semantic Publishing +http://www.semanlink.net/tag/bbc_semantic_publishing|broader_prefLabel|Good +http://www.semanlink.net/tag/bbc_semantic_publishing|broader_prefLabel|RDF Application +http://www.semanlink.net/tag/bbc_semantic_publishing|broader_prefLabel|semantic web sites +http://www.semanlink.net/tag/bbc_semantic_publishing|broader_related|http://www.semanlink.net/tag/bbc +http://www.semanlink.net/tag/dette|creationTime|2009-04-03T23:26:54Z +http://www.semanlink.net/tag/dette|prefLabel|Debt +http://www.semanlink.net/tag/dette|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/dette|creationDate|2009-04-03 +http://www.semanlink.net/tag/dette|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dette|altLabel|Dette +http://www.semanlink.net/tag/dette|uri|http://www.semanlink.net/tag/dette +http://www.semanlink.net/tag/dette|broader_prefLabel|Economie +http://www.semanlink.net/tag/dette|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/common_sense|creationTime|2018-07-23T12:52:39Z +http://www.semanlink.net/tag/common_sense|prefLabel|Common Sense +http://www.semanlink.net/tag/common_sense|creationDate|2018-07-23 +http://www.semanlink.net/tag/common_sense|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/common_sense|uri|http://www.semanlink.net/tag/common_sense +http://www.semanlink.net/tag/good_related_work_section|creationTime|2021-06-15T01:03:51Z +http://www.semanlink.net/tag/good_related_work_section|prefLabel|Good related work section +http://www.semanlink.net/tag/good_related_work_section|broader|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/tag/good_related_work_section|related|http://www.semanlink.net/tag/good +http://www.semanlink.net/tag/good_related_work_section|related|http://www.semanlink.net/tag/survey +http://www.semanlink.net/tag/good_related_work_section|creationDate|2021-06-15 +http://www.semanlink.net/tag/good_related_work_section|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/good_related_work_section|uri|http://www.semanlink.net/tag/good_related_work_section +http://www.semanlink.net/tag/good_related_work_section|broader_prefLabel|Research papers +http://www.semanlink.net/tag/france_politique_etrangere|creationTime|2020-08-11T14:43:06Z +http://www.semanlink.net/tag/france_politique_etrangere|prefLabel|France : politique étrangère +http://www.semanlink.net/tag/france_politique_etrangere|creationDate|2020-08-11 +http://www.semanlink.net/tag/france_politique_etrangere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_politique_etrangere|uri|http://www.semanlink.net/tag/france_politique_etrangere +http://www.semanlink.net/tag/crise_des_banlieues|prefLabel|Crise des banlieues +http://www.semanlink.net/tag/crise_des_banlieues|broader|http://www.semanlink.net/tag/societe_francaise +http://www.semanlink.net/tag/crise_des_banlieues|broader|http://www.semanlink.net/tag/banlieue +http://www.semanlink.net/tag/crise_des_banlieues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_des_banlieues|uri|http://www.semanlink.net/tag/crise_des_banlieues +http://www.semanlink.net/tag/crise_des_banlieues|broader_prefLabel|Société française +http://www.semanlink.net/tag/crise_des_banlieues|broader_prefLabel|Société française +http://www.semanlink.net/tag/crise_des_banlieues|broader_prefLabel|Banlieue +http://www.semanlink.net/tag/speech_recognition|creationTime|2010-07-01T16:17:46Z +http://www.semanlink.net/tag/speech_recognition|prefLabel|Speech-to-Text +http://www.semanlink.net/tag/speech_recognition|broader|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/speech_recognition|related|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/speech_recognition|related|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/tag/speech_recognition|creationDate|2010-07-01 +http://www.semanlink.net/tag/speech_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/speech_recognition|describedBy|https://en.wikipedia.org/wiki/Speech_recognition +http://www.semanlink.net/tag/speech_recognition|altLabel|Speech recognition +http://www.semanlink.net/tag/speech_recognition|altLabel|Voice recognition +http://www.semanlink.net/tag/speech_recognition|uri|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/speech_recognition|broader_prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/speech_recognition|broader_altLabel|Sequence Modeling +http://www.semanlink.net/tag/speech_recognition|broader_altLabel|Seq2Seq +http://www.semanlink.net/tag/speech_recognition|broader_related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/speech_recognition|broader_related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/speech_recognition|broader_related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/speech_recognition|broader_related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/cea|creationTime|2009-06-20T11:20:29Z +http://www.semanlink.net/tag/cea|prefLabel|CEA +http://www.semanlink.net/tag/cea|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/cea|creationDate|2009-06-20 +http://www.semanlink.net/tag/cea|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cea|uri|http://www.semanlink.net/tag/cea +http://www.semanlink.net/tag/cea|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/cea|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/land_degradation|creationTime|2018-03-26T23:23:00Z +http://www.semanlink.net/tag/land_degradation|prefLabel|Land Degradation +http://www.semanlink.net/tag/land_degradation|broader|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/land_degradation|creationDate|2018-03-26 +http://www.semanlink.net/tag/land_degradation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/land_degradation|uri|http://www.semanlink.net/tag/land_degradation +http://www.semanlink.net/tag/land_degradation|broader_prefLabel|Crise écologique +http://www.semanlink.net/tag/botanique|prefLabel|Botanique +http://www.semanlink.net/tag/botanique|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/botanique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/botanique|uri|http://www.semanlink.net/tag/botanique +http://www.semanlink.net/tag/botanique|broader_prefLabel|Biology +http://www.semanlink.net/tag/botanique|broader_altLabel|Biologie +http://www.semanlink.net/tag/semantic_camp_paris|creationTime|2008-04-04T08:19:34Z +http://www.semanlink.net/tag/semantic_camp_paris|prefLabel|Semantic Camp Paris +http://www.semanlink.net/tag/semantic_camp_paris|broader|http://www.semanlink.net/tag/barcamp +http://www.semanlink.net/tag/semantic_camp_paris|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_camp_paris|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/semantic_camp_paris|creationDate|2008-04-04 +http://www.semanlink.net/tag/semantic_camp_paris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_camp_paris|altLabel|SemanticCampParis +http://www.semanlink.net/tag/semantic_camp_paris|uri|http://www.semanlink.net/tag/semantic_camp_paris +http://www.semanlink.net/tag/semantic_camp_paris|broader_prefLabel|Barcamp +http://www.semanlink.net/tag/semantic_camp_paris|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_camp_paris|broader_prefLabel|Paris +http://www.semanlink.net/tag/semantic_camp_paris|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_camp_paris|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/markdown_ittt|creationTime|2017-04-01T00:41:34Z +http://www.semanlink.net/tag/markdown_ittt|prefLabel|markdown-it +http://www.semanlink.net/tag/markdown_ittt|broader|http://www.semanlink.net/tag/markown_javascript +http://www.semanlink.net/tag/markdown_ittt|creationDate|2017-04-01 +http://www.semanlink.net/tag/markdown_ittt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markdown_ittt|homepage|https://github.com/markdown-it/markdown-it +http://www.semanlink.net/tag/markdown_ittt|uri|http://www.semanlink.net/tag/markdown_ittt +http://www.semanlink.net/tag/markdown_ittt|broader_prefLabel|Markown / Javascript +http://www.semanlink.net/tag/olivier_rossel|creationTime|2014-11-08T07:37:53Z +http://www.semanlink.net/tag/olivier_rossel|prefLabel|Olivier Rossel +http://www.semanlink.net/tag/olivier_rossel|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/olivier_rossel|creationDate|2014-11-08 +http://www.semanlink.net/tag/olivier_rossel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/olivier_rossel|uri|http://www.semanlink.net/tag/olivier_rossel +http://www.semanlink.net/tag/olivier_rossel|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/olivier_rossel|broader_altLabel|Technical guys +http://www.semanlink.net/tag/language_identification|creationTime|2020-11-19T14:31:37Z +http://www.semanlink.net/tag/language_identification|prefLabel|Language Identification +http://www.semanlink.net/tag/language_identification|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/language_identification|creationDate|2020-11-19 +http://www.semanlink.net/tag/language_identification|comment|"In javascript: + +- +- [demo](http://richtr.github.io/guessLanguage.js/)" +http://www.semanlink.net/tag/language_identification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_identification|altLabel|Language recognition +http://www.semanlink.net/tag/language_identification|altLabel|Language Detection +http://www.semanlink.net/tag/language_identification|uri|http://www.semanlink.net/tag/language_identification +http://www.semanlink.net/tag/language_identification|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/new_york|prefLabel|New York +http://www.semanlink.net/tag/new_york|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/new_york|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/new_york|uri|http://www.semanlink.net/tag/new_york +http://www.semanlink.net/tag/new_york|broader_prefLabel|Ville +http://www.semanlink.net/tag/realite_augmentee|creationTime|2012-08-04T11:43:45Z +http://www.semanlink.net/tag/realite_augmentee|prefLabel|Réalité augmentée +http://www.semanlink.net/tag/realite_augmentee|broader|http://www.semanlink.net/tag/realite_virtuelle +http://www.semanlink.net/tag/realite_augmentee|creationDate|2012-08-04 +http://www.semanlink.net/tag/realite_augmentee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/realite_augmentee|uri|http://www.semanlink.net/tag/realite_augmentee +http://www.semanlink.net/tag/realite_augmentee|broader_prefLabel|Réalité virtuelle +http://www.semanlink.net/tag/croisade_des_enfants|prefLabel|Croisade des enfants +http://www.semanlink.net/tag/croisade_des_enfants|broader|http://www.semanlink.net/tag/croisades +http://www.semanlink.net/tag/croisade_des_enfants|creationDate|2006-07-26 +http://www.semanlink.net/tag/croisade_des_enfants|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/croisade_des_enfants|uri|http://www.semanlink.net/tag/croisade_des_enfants +http://www.semanlink.net/tag/croisade_des_enfants|broader_prefLabel|Croisades +http://www.semanlink.net/tag/nlp_girls_and_guys|creationTime|2017-07-17T00:23:09Z +http://www.semanlink.net/tag/nlp_girls_and_guys|prefLabel|NLP girls and guys +http://www.semanlink.net/tag/nlp_girls_and_guys|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_girls_and_guys|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/nlp_girls_and_guys|creationDate|2017-07-17 +http://www.semanlink.net/tag/nlp_girls_and_guys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_girls_and_guys|uri|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/nlp_girls_and_guys|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_girls_and_guys|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/nlp_girls_and_guys|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_girls_and_guys|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_girls_and_guys|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/livres_audio|creationTime|2009-08-19T18:36:00Z +http://www.semanlink.net/tag/livres_audio|prefLabel|Livres audio +http://www.semanlink.net/tag/livres_audio|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/livres_audio|creationDate|2009-08-19 +http://www.semanlink.net/tag/livres_audio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/livres_audio|uri|http://www.semanlink.net/tag/livres_audio +http://www.semanlink.net/tag/livres_audio|broader_prefLabel|Livre +http://www.semanlink.net/tag/livres_audio|broader_altLabel|Livres +http://www.semanlink.net/tag/entity_embeddings|creationTime|2017-11-27T16:41:15Z +http://www.semanlink.net/tag/entity_embeddings|prefLabel|Entity embeddings +http://www.semanlink.net/tag/entity_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/entity_embeddings|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_embeddings|creationDate|2017-11-27 +http://www.semanlink.net/tag/entity_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_embeddings|uri|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/entity_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/entity_embeddings|broader_prefLabel|Entities +http://www.semanlink.net/tag/entity_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/entity_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/entity_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/loropeni|creationTime|2016-09-16T16:19:22Z +http://www.semanlink.net/tag/loropeni|prefLabel|Loropéni +http://www.semanlink.net/tag/loropeni|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/loropeni|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/loropeni|broader|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/loropeni|creationDate|2016-09-16 +http://www.semanlink.net/tag/loropeni|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loropeni|describedBy|https://fr.wikipedia.org/wiki/Lorop%C3%A9ni +http://www.semanlink.net/tag/loropeni|uri|http://www.semanlink.net/tag/loropeni +http://www.semanlink.net/tag/loropeni|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/loropeni|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/loropeni|broader_prefLabel|Burkina Faso +http://www.semanlink.net/tag/loropeni|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/type_system|creationTime|2017-07-05T00:15:03Z +http://www.semanlink.net/tag/type_system|prefLabel|Type system +http://www.semanlink.net/tag/type_system|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/type_system|creationDate|2017-07-05 +http://www.semanlink.net/tag/type_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/type_system|describedBy|https://en.wikipedia.org/wiki/Type_system +http://www.semanlink.net/tag/type_system|uri|http://www.semanlink.net/tag/type_system +http://www.semanlink.net/tag/type_system|broader_prefLabel|Programming language +http://www.semanlink.net/tag/type_system|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/web_services|prefLabel|Web Services +http://www.semanlink.net/tag/web_services|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/web_services|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/web_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_services|altLabel|WS +http://www.semanlink.net/tag/web_services|uri|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/web_services|broader_prefLabel|Dev +http://www.semanlink.net/tag/web_services|broader_prefLabel|Web +http://www.semanlink.net/tag/prediction|creationTime|2011-12-21T23:12:04Z +http://www.semanlink.net/tag/prediction|prefLabel|Prediction +http://www.semanlink.net/tag/prediction|creationDate|2011-12-21 +http://www.semanlink.net/tag/prediction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prediction|uri|http://www.semanlink.net/tag/prediction +http://www.semanlink.net/tag/niamey|prefLabel|Niamey +http://www.semanlink.net/tag/niamey|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/niamey|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/niamey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niamey|uri|http://www.semanlink.net/tag/niamey +http://www.semanlink.net/tag/niamey|broader_prefLabel|Niger +http://www.semanlink.net/tag/niamey|broader_prefLabel|Ville +http://www.semanlink.net/tag/niamey|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niamey|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/niamey|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/semantic_web_evangelization|creationTime|2007-02-06T21:48:41Z +http://www.semanlink.net/tag/semantic_web_evangelization|prefLabel|Semantic web: evangelization +http://www.semanlink.net/tag/semantic_web_evangelization|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_evangelization|creationDate|2007-02-06 +http://www.semanlink.net/tag/semantic_web_evangelization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_evangelization|uri|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.semanlink.net/tag/semantic_web_evangelization|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_evangelization|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_evangelization|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/stanislas_dehaene|creationTime|2019-11-12T16:31:30Z +http://www.semanlink.net/tag/stanislas_dehaene|prefLabel|Stanislas Dehaene +http://www.semanlink.net/tag/stanislas_dehaene|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/stanislas_dehaene|creationDate|2019-11-12 +http://www.semanlink.net/tag/stanislas_dehaene|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stanislas_dehaene|describedBy|https://fr.wikipedia.org/wiki/Stanislas_Dehaene +http://www.semanlink.net/tag/stanislas_dehaene|uri|http://www.semanlink.net/tag/stanislas_dehaene +http://www.semanlink.net/tag/stanislas_dehaene|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/causal_inference|creationTime|2020-03-08T11:53:19Z +http://www.semanlink.net/tag/causal_inference|prefLabel|Causal inference +http://www.semanlink.net/tag/causal_inference|broader|http://www.semanlink.net/tag/inference +http://www.semanlink.net/tag/causal_inference|creationDate|2020-03-08 +http://www.semanlink.net/tag/causal_inference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/causal_inference|uri|http://www.semanlink.net/tag/causal_inference +http://www.semanlink.net/tag/causal_inference|broader_prefLabel|Inference +http://www.semanlink.net/tag/data_gouv_fr|creationTime|2013-09-22T12:23:09Z +http://www.semanlink.net/tag/data_gouv_fr|prefLabel|data.gouv.fr +http://www.semanlink.net/tag/data_gouv_fr|broader|http://www.semanlink.net/tag/government_data +http://www.semanlink.net/tag/data_gouv_fr|broader|http://www.semanlink.net/tag/open_data +http://www.semanlink.net/tag/data_gouv_fr|broader|http://www.semanlink.net/tag/gouvernement_francais +http://www.semanlink.net/tag/data_gouv_fr|creationDate|2013-09-22 +http://www.semanlink.net/tag/data_gouv_fr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_gouv_fr|uri|http://www.semanlink.net/tag/data_gouv_fr +http://www.semanlink.net/tag/data_gouv_fr|broader_prefLabel|Government data +http://www.semanlink.net/tag/data_gouv_fr|broader_prefLabel|Open Data +http://www.semanlink.net/tag/data_gouv_fr|broader_prefLabel|Gouvernement français +http://www.semanlink.net/tag/data_gouv_fr|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/sioc|creationTime|2008-03-30T20:26:03Z +http://www.semanlink.net/tag/sioc|prefLabel|SIOC +http://www.semanlink.net/tag/sioc|broader|http://www.semanlink.net/tag/rdf_and_social_networks +http://www.semanlink.net/tag/sioc|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/sioc|creationDate|2008-03-30 +http://www.semanlink.net/tag/sioc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sioc|uri|http://www.semanlink.net/tag/sioc +http://www.semanlink.net/tag/sioc|broader_prefLabel|RDF and social networks +http://www.semanlink.net/tag/sioc|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/sioc|broader_related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/sioc|broader_related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/drupal_rdf|creationTime|2011-09-15T13:56:35Z +http://www.semanlink.net/tag/drupal_rdf|prefLabel|Drupal/RDF +http://www.semanlink.net/tag/drupal_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/drupal_rdf|broader|http://www.semanlink.net/tag/drupal +http://www.semanlink.net/tag/drupal_rdf|broader|http://www.semanlink.net/tag/semantic_cms +http://www.semanlink.net/tag/drupal_rdf|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/drupal_rdf|creationDate|2011-09-15 +http://www.semanlink.net/tag/drupal_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drupal_rdf|uri|http://www.semanlink.net/tag/drupal_rdf +http://www.semanlink.net/tag/drupal_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/drupal_rdf|broader_prefLabel|Drupal +http://www.semanlink.net/tag/drupal_rdf|broader_prefLabel|Semantic CMS +http://www.semanlink.net/tag/drupal_rdf|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/drupal_rdf|broader_altLabel|LD +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/drupal_rdf|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|creationTime|2021-05-31T17:03:00Z +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|prefLabel|Unsupervised keyphrase extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|creationDate|2021-05-31 +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|uri|http://www.semanlink.net/tag/unsupervised_keyphrase_extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_prefLabel|Keyword/keyphrase extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_altLabel|Topic extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_altLabel|Keyword extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_altLabel|Keyphrase extraction +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_related|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_related|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_related|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/unsupervised_keyphrase_extraction|broader_related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/matiere_noire|prefLabel|Dark matter +http://www.semanlink.net/tag/matiere_noire|broader|http://www.semanlink.net/tag/masse_manquante +http://www.semanlink.net/tag/matiere_noire|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/matiere_noire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matiere_noire|altLabel|Matière noire +http://www.semanlink.net/tag/matiere_noire|uri|http://www.semanlink.net/tag/matiere_noire +http://www.semanlink.net/tag/matiere_noire|broader_prefLabel|Masse manquante +http://www.semanlink.net/tag/matiere_noire|broader_prefLabel|Physique +http://www.semanlink.net/tag/matiere_noire|broader_altLabel|Missing Matter +http://www.semanlink.net/tag/matiere_noire|broader_altLabel|Physics +http://www.semanlink.net/tag/fanatisme|creationTime|2019-08-25T19:41:13Z +http://www.semanlink.net/tag/fanatisme|prefLabel|Fanatisme +http://www.semanlink.net/tag/fanatisme|creationDate|2019-08-25 +http://www.semanlink.net/tag/fanatisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fanatisme|uri|http://www.semanlink.net/tag/fanatisme +http://www.semanlink.net/tag/gensim|creationTime|2017-05-23T10:19:24Z +http://www.semanlink.net/tag/gensim|prefLabel|gensim +http://www.semanlink.net/tag/gensim|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/gensim|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/gensim|broader|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/gensim|broader|http://www.semanlink.net/tag/vector_space_model +http://www.semanlink.net/tag/gensim|creationDate|2017-05-23 +http://www.semanlink.net/tag/gensim|comment|"""Topic modelling for humans"" ; ""Python framework for fast Vector Space Modelling"" + +" +http://www.semanlink.net/tag/gensim|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gensim|homepage|https://radimrehurek.com/gensim/ +http://www.semanlink.net/tag/gensim|uri|http://www.semanlink.net/tag/gensim +http://www.semanlink.net/tag/gensim|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/gensim|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/gensim|broader_prefLabel|Topic Modeling +http://www.semanlink.net/tag/gensim|broader_prefLabel|Vector space model +http://www.semanlink.net/tag/gensim|broader_altLabel|Topic model +http://www.semanlink.net/tag/gensim|broader_altLabel|Vectorial semantics +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/gensim|broader_related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/owl_dl|creationTime|2007-06-05T07:30:46Z +http://www.semanlink.net/tag/owl_dl|prefLabel|OWL DL +http://www.semanlink.net/tag/owl_dl|broader|http://www.semanlink.net/tag/description_logic +http://www.semanlink.net/tag/owl_dl|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_dl|creationDate|2007-06-05 +http://www.semanlink.net/tag/owl_dl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_dl|uri|http://www.semanlink.net/tag/owl_dl +http://www.semanlink.net/tag/owl_dl|broader_prefLabel|Description Logic +http://www.semanlink.net/tag/owl_dl|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_dl|broader_related|http://www.semanlink.net/tag/first_order_logic +http://www.semanlink.net/tag/owl_dl|broader_related|http://www.semanlink.net/tag/owl_dl +http://www.semanlink.net/tag/owl_dl|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/foaf|prefLabel|foaf +http://www.semanlink.net/tag/foaf|broader|http://www.semanlink.net/tag/social_semantic_web +http://www.semanlink.net/tag/foaf|broader|http://www.semanlink.net/tag/rdf_and_social_networks +http://www.semanlink.net/tag/foaf|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/foaf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foaf|homepage|http://www.foaf-project.org/ +http://www.semanlink.net/tag/foaf|uri|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/foaf|broader_prefLabel|Social Semantic Web +http://www.semanlink.net/tag/foaf|broader_prefLabel|RDF and social networks +http://www.semanlink.net/tag/foaf|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/foaf|broader_related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/foaf|broader_related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/web_apis|creationTime|2017-03-02T12:26:36Z +http://www.semanlink.net/tag/web_apis|prefLabel|Web APIs +http://www.semanlink.net/tag/web_apis|creationDate|2017-03-02 +http://www.semanlink.net/tag/web_apis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_apis|uri|http://www.semanlink.net/tag/web_apis +http://www.semanlink.net/tag/top_k|creationTime|2020-06-29T14:04:36Z +http://www.semanlink.net/tag/top_k|prefLabel|Top-k +http://www.semanlink.net/tag/top_k|related|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/top_k|related|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/top_k|creationDate|2020-06-29 +http://www.semanlink.net/tag/top_k|comment|eg. finding the k largest or smallest elements from a collection of scores +http://www.semanlink.net/tag/top_k|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/top_k|uri|http://www.semanlink.net/tag/top_k +http://www.semanlink.net/tag/bielorussie|prefLabel|Biélorussie +http://www.semanlink.net/tag/bielorussie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/bielorussie|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/bielorussie|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/bielorussie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bielorussie|describedBy|https://fr.wikipedia.org/wiki/Bi%C3%A9lorussie +http://www.semanlink.net/tag/bielorussie|altLabel|Belarus +http://www.semanlink.net/tag/bielorussie|uri|http://www.semanlink.net/tag/bielorussie +http://www.semanlink.net/tag/bielorussie|broader_prefLabel|Europe +http://www.semanlink.net/tag/bielorussie|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/bielorussie|broader_prefLabel|URSS +http://www.semanlink.net/tag/bielorussie|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/crise_des_subprimes|creationTime|2008-09-18T21:19:40Z +http://www.semanlink.net/tag/crise_des_subprimes|prefLabel|Crise des subprimes +http://www.semanlink.net/tag/crise_des_subprimes|broader|http://www.semanlink.net/tag/crise_financiere +http://www.semanlink.net/tag/crise_des_subprimes|creationDate|2008-09-18 +http://www.semanlink.net/tag/crise_des_subprimes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_des_subprimes|uri|http://www.semanlink.net/tag/crise_des_subprimes +http://www.semanlink.net/tag/crise_des_subprimes|broader_prefLabel|Crise financière +http://www.semanlink.net/tag/java_library|creationTime|2014-10-12T22:58:32Z +http://www.semanlink.net/tag/java_library|prefLabel|Java library +http://www.semanlink.net/tag/java_library|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_library|broader|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/java_library|creationDate|2014-10-12 +http://www.semanlink.net/tag/java_library|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_library|uri|http://www.semanlink.net/tag/java_library +http://www.semanlink.net/tag/java_library|broader_prefLabel|Java +http://www.semanlink.net/tag/java_library|broader_prefLabel|Library (code) +http://www.semanlink.net/tag/java_library|broader_related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/guerre_civile|creationTime|2009-01-13T01:22:31Z +http://www.semanlink.net/tag/guerre_civile|prefLabel|Guerre civile +http://www.semanlink.net/tag/guerre_civile|creationDate|2009-01-13 +http://www.semanlink.net/tag/guerre_civile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerre_civile|uri|http://www.semanlink.net/tag/guerre_civile +http://www.semanlink.net/tag/administration|prefLabel|Administration +http://www.semanlink.net/tag/administration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/administration|uri|http://www.semanlink.net/tag/administration +http://www.semanlink.net/tag/tips|creationTime|2007-11-09T13:07:16Z +http://www.semanlink.net/tag/tips|prefLabel|Tips +http://www.semanlink.net/tag/tips|creationDate|2007-11-09 +http://www.semanlink.net/tag/tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tips|uri|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/nlp_and_humanities|creationTime|2017-05-22T13:32:43Z +http://www.semanlink.net/tag/nlp_and_humanities|prefLabel|NLP and humanities +http://www.semanlink.net/tag/nlp_and_humanities|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_and_humanities|creationDate|2017-05-22 +http://www.semanlink.net/tag/nlp_and_humanities|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_and_humanities|uri|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/nlp_and_humanities|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_and_humanities|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/script_tag_hack|creationTime|2009-05-20T23:41:32Z +http://www.semanlink.net/tag/script_tag_hack|prefLabel|Script tag hack +http://www.semanlink.net/tag/script_tag_hack|broader|http://www.semanlink.net/tag/cross_domain_data_fetching +http://www.semanlink.net/tag/script_tag_hack|creationDate|2009-05-20 +http://www.semanlink.net/tag/script_tag_hack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/script_tag_hack|uri|http://www.semanlink.net/tag/script_tag_hack +http://www.semanlink.net/tag/script_tag_hack|broader_prefLabel|cross-domain data fetching +http://www.semanlink.net/tag/afrique|prefLabel|Afrique +http://www.semanlink.net/tag/afrique|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique|altLabel|Africa +http://www.semanlink.net/tag/afrique|uri|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique|broader_prefLabel|Géographie +http://www.semanlink.net/tag/frameworks|creationTime|2018-12-19T13:10:28Z +http://www.semanlink.net/tag/frameworks|prefLabel|Frameworks +http://www.semanlink.net/tag/frameworks|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/frameworks|creationDate|2018-12-19 +http://www.semanlink.net/tag/frameworks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/frameworks|uri|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/frameworks|broader_prefLabel|Programming +http://www.semanlink.net/tag/metaweb|creationTime|2010-07-18T19:30:06Z +http://www.semanlink.net/tag/metaweb|prefLabel|Metaweb +http://www.semanlink.net/tag/metaweb|creationDate|2010-07-18 +http://www.semanlink.net/tag/metaweb|comment|"""Connect your site to the web's best sources""" +http://www.semanlink.net/tag/metaweb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metaweb|describedBy|http://www.metaweb.com/ +http://www.semanlink.net/tag/metaweb|uri|http://www.semanlink.net/tag/metaweb +http://www.semanlink.net/tag/multimodal_models|creationTime|2018-11-15T01:44:27Z +http://www.semanlink.net/tag/multimodal_models|prefLabel|Multimodal Models +http://www.semanlink.net/tag/multimodal_models|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/multimodal_models|creationDate|2018-11-15 +http://www.semanlink.net/tag/multimodal_models|comment|multimodal learning involves learning representations that can process and relate information from multiple modalities, such as text and perceptual information (e.g., images, sounds) +http://www.semanlink.net/tag/multimodal_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multimodal_models|altLabel|Multimodal learning +http://www.semanlink.net/tag/multimodal_models|uri|http://www.semanlink.net/tag/multimodal_models +http://www.semanlink.net/tag/multimodal_models|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/lov_linked_open_vocabularies|creationTime|2012-07-05T16:07:17Z +http://www.semanlink.net/tag/lov_linked_open_vocabularies|prefLabel|(LOV) Linked Open Vocabularies +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader|http://www.semanlink.net/tag/mondeca +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/lov_linked_open_vocabularies|related|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/lov_linked_open_vocabularies|creationDate|2012-07-05 +http://www.semanlink.net/tag/lov_linked_open_vocabularies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lov_linked_open_vocabularies|describedBy|http://lov.okfn.org/ +http://www.semanlink.net/tag/lov_linked_open_vocabularies|uri|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_prefLabel|Mondeca +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_prefLabel|Bernard Vatant +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_altLabel|LD +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/lov_linked_open_vocabularies|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/eclipse|prefLabel|Eclipse +http://www.semanlink.net/tag/eclipse|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/eclipse|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/eclipse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eclipse|uri|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/eclipse|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/eclipse|broader_prefLabel|Dev +http://www.semanlink.net/tag/bart_van_leeuwen|creationTime|2013-10-18T22:34:57Z +http://www.semanlink.net/tag/bart_van_leeuwen|prefLabel|Bart van Leeuwen +http://www.semanlink.net/tag/bart_van_leeuwen|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bart_van_leeuwen|broader|http://www.semanlink.net/tag/firefighter +http://www.semanlink.net/tag/bart_van_leeuwen|related|http://www.semanlink.net/tag/amsterdam +http://www.semanlink.net/tag/bart_van_leeuwen|creationDate|2013-10-18 +http://www.semanlink.net/tag/bart_van_leeuwen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bart_van_leeuwen|uri|http://www.semanlink.net/tag/bart_van_leeuwen +http://www.semanlink.net/tag/bart_van_leeuwen|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/bart_van_leeuwen|broader_prefLabel|Firefighter +http://www.semanlink.net/tag/servlet_3_0|creationTime|2011-02-03T23:08:51Z +http://www.semanlink.net/tag/servlet_3_0|prefLabel|Servlet 3.0 +http://www.semanlink.net/tag/servlet_3_0|broader|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/tag/servlet_3_0|creationDate|2011-02-03 +http://www.semanlink.net/tag/servlet_3_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/servlet_3_0|uri|http://www.semanlink.net/tag/servlet_3_0 +http://www.semanlink.net/tag/servlet_3_0|broader_prefLabel|Servlet +http://www.semanlink.net/tag/hypothese_de_riemann|prefLabel|Riemann Hypothesis +http://www.semanlink.net/tag/hypothese_de_riemann|broader|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/hypothese_de_riemann|broader|http://www.semanlink.net/tag/nombres_premiers +http://www.semanlink.net/tag/hypothese_de_riemann|broader|http://www.semanlink.net/tag/riemann +http://www.semanlink.net/tag/hypothese_de_riemann|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypothese_de_riemann|describedBy|https://en.wikipedia.org/wiki/Riemann_hypothesis +http://www.semanlink.net/tag/hypothese_de_riemann|altLabel|Hypothèse de Riemann +http://www.semanlink.net/tag/hypothese_de_riemann|uri|http://www.semanlink.net/tag/hypothese_de_riemann +http://www.semanlink.net/tag/hypothese_de_riemann|broader_prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/hypothese_de_riemann|broader_prefLabel|Nombres premiers +http://www.semanlink.net/tag/hypothese_de_riemann|broader_prefLabel|Riemann +http://www.semanlink.net/tag/chine|prefLabel|Chine +http://www.semanlink.net/tag/chine|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/chine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine|altLabel|China +http://www.semanlink.net/tag/chine|uri|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine|broader_prefLabel|Asie +http://www.semanlink.net/tag/sweo_interest_group|creationTime|2007-04-25T15:23:04Z +http://www.semanlink.net/tag/sweo_interest_group|prefLabel|SWEO Interest Group +http://www.semanlink.net/tag/sweo_interest_group|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/sweo_interest_group|broader|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.semanlink.net/tag/sweo_interest_group|related|http://www.semanlink.net/tag/ivan_herman +http://www.semanlink.net/tag/sweo_interest_group|creationDate|2007-04-25 +http://www.semanlink.net/tag/sweo_interest_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sweo_interest_group|uri|http://www.semanlink.net/tag/sweo_interest_group +http://www.semanlink.net/tag/sweo_interest_group|broader_prefLabel|W3C +http://www.semanlink.net/tag/sweo_interest_group|broader_prefLabel|Semantic web: evangelization +http://www.semanlink.net/tag/sweo_interest_group|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/sweo_interest_group|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/royaume_uni|prefLabel|Royaume Uni +http://www.semanlink.net/tag/royaume_uni|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/royaume_uni|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/royaume_uni|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/royaume_uni|altLabel|UK +http://www.semanlink.net/tag/royaume_uni|uri|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/royaume_uni|broader_prefLabel|Europe +http://www.semanlink.net/tag/royaume_uni|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/traders|creationTime|2014-01-20T11:36:31Z +http://www.semanlink.net/tag/traders|prefLabel|Traders +http://www.semanlink.net/tag/traders|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/traders|creationDate|2014-01-20 +http://www.semanlink.net/tag/traders|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/traders|uri|http://www.semanlink.net/tag/traders +http://www.semanlink.net/tag/traders|broader_prefLabel|Finance +http://www.semanlink.net/tag/concept_search|creationTime|2017-09-10T17:26:10Z +http://www.semanlink.net/tag/concept_search|prefLabel|Concept Search +http://www.semanlink.net/tag/concept_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/concept_search|creationDate|2017-09-10 +http://www.semanlink.net/tag/concept_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concept_search|uri|http://www.semanlink.net/tag/concept_search +http://www.semanlink.net/tag/concept_search|broader_prefLabel|Search +http://www.semanlink.net/tag/laure_soulier|creationTime|2018-07-11T13:07:03Z +http://www.semanlink.net/tag/laure_soulier|prefLabel|Laure Soulier +http://www.semanlink.net/tag/laure_soulier|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/laure_soulier|related|http://www.semanlink.net/tag/lip6 +http://www.semanlink.net/tag/laure_soulier|creationDate|2018-07-11 +http://www.semanlink.net/tag/laure_soulier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/laure_soulier|uri|http://www.semanlink.net/tag/laure_soulier +http://www.semanlink.net/tag/laure_soulier|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/cost_of_linked_data|creationTime|2010-07-30T14:06:20Z +http://www.semanlink.net/tag/cost_of_linked_data|prefLabel|Cost of Linked Data +http://www.semanlink.net/tag/cost_of_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/cost_of_linked_data|creationDate|2010-07-30 +http://www.semanlink.net/tag/cost_of_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cost_of_linked_data|uri|http://www.semanlink.net/tag/cost_of_linked_data +http://www.semanlink.net/tag/cost_of_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/cost_of_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/cost_of_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/chimie|creationTime|2013-04-11T09:13:24Z +http://www.semanlink.net/tag/chimie|prefLabel|Chimie +http://www.semanlink.net/tag/chimie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/chimie|creationDate|2013-04-11 +http://www.semanlink.net/tag/chimie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chimie|uri|http://www.semanlink.net/tag/chimie +http://www.semanlink.net/tag/chimie|broader_prefLabel|Science +http://www.semanlink.net/tag/chimie|broader_altLabel|sciences +http://www.semanlink.net/tag/kernel_method|creationTime|2018-11-06T10:37:50Z +http://www.semanlink.net/tag/kernel_method|prefLabel|Kernel methods +http://www.semanlink.net/tag/kernel_method|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/kernel_method|creationDate|2018-11-06 +http://www.semanlink.net/tag/kernel_method|comment|"Class of algorithms for pattern analysis (eg. SVM). **Kernel trick**: transforming data into another dimension that has a clear dividing margin between classes of data, without computing the coordinates of the data in that space, but the inner products between the images of all pairs of data in the feature space (using a user-defined similarity function, the ""kernel function"") + +Kernel methods are powerful learning methodologies that provide **a simple way to construct nonlinear algorithms from linear ones**. Despite their popularity, they suffer from **poor scalability in big data scenarios** ([src](https://arxiv.org/abs/1706.06296)). + +**Kernel trick**: Kernel functions enable to operate in a high-dimensional, implicit feature space without computing the coordinates of the data in that space, by simply computing the inner products between the images of all pairs of data in the feature space. + +Algorithms capable of operating with kernels include SVM, Gaussian processes,PCA, spectral clustering... Any linear model can be turned into a non-linear model by applying the kernel trick to the model: replacing its features (predictors) by a kernel function. +" +http://www.semanlink.net/tag/kernel_method|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kernel_method|describedBy|https://en.wikipedia.org/wiki/Kernel_method +http://www.semanlink.net/tag/kernel_method|altLabel|Kernel trick +http://www.semanlink.net/tag/kernel_method|altLabel|Kernel method +http://www.semanlink.net/tag/kernel_method|uri|http://www.semanlink.net/tag/kernel_method +http://www.semanlink.net/tag/kernel_method|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/garamantes|creationTime|2021-02-17T17:29:47Z +http://www.semanlink.net/tag/garamantes|prefLabel|Garamantes +http://www.semanlink.net/tag/garamantes|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/garamantes|creationDate|2021-02-17 +http://www.semanlink.net/tag/garamantes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/garamantes|describedBy|https://fr.wikipedia.org/wiki/Garamantes +http://www.semanlink.net/tag/garamantes|uri|http://www.semanlink.net/tag/garamantes +http://www.semanlink.net/tag/garamantes|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/garamantes|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|creationTime|2007-09-18T22:08:44Z +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|prefLabel|Regroupement familial et test ADN de filiation +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader|http://www.semanlink.net/tag/test_adn_de_filiation +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader|http://www.semanlink.net/tag/sarkozy_immigration +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader|http://www.semanlink.net/tag/regroupement_familial +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader|http://www.semanlink.net/tag/ca_craint +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|creationDate|2007-09-18 +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|uri|http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader_prefLabel|Test ADN de filiation +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader_prefLabel|Sarkozy : immigration +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader_prefLabel|Regroupement familial +http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation|broader_prefLabel|Ca craint +http://www.semanlink.net/tag/nlp_princeton|creationTime|2021-09-30T14:53:08Z +http://www.semanlink.net/tag/nlp_princeton|prefLabel|NLP@Princeton +http://www.semanlink.net/tag/nlp_princeton|broader|http://www.semanlink.net/tag/princeton +http://www.semanlink.net/tag/nlp_princeton|creationDate|2021-09-30 +http://www.semanlink.net/tag/nlp_princeton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_princeton|uri|http://www.semanlink.net/tag/nlp_princeton +http://www.semanlink.net/tag/nlp_princeton|broader_prefLabel|Princeton +http://www.semanlink.net/tag/nlp_princeton|broader_related|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/knowbert|creationTime|2020-05-13T02:02:25Z +http://www.semanlink.net/tag/knowbert|prefLabel|KnowBert +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/bert_kb +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/knowbert|broader|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/tag/knowbert|creationDate|2020-05-13 +http://www.semanlink.net/tag/knowbert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowbert|uri|http://www.semanlink.net/tag/knowbert +http://www.semanlink.net/tag/knowbert|broader_prefLabel|Knowledge-driven embeddings +http://www.semanlink.net/tag/knowbert|broader_prefLabel|Allen Institute for AI (A2I) +http://www.semanlink.net/tag/knowbert|broader_prefLabel|BERT + KB +http://www.semanlink.net/tag/knowbert|broader_prefLabel|Contextualized word representations +http://www.semanlink.net/tag/knowbert|broader_prefLabel|BERT +http://www.semanlink.net/tag/knowbert|broader_prefLabel|Knowledge Graphs in NLP +http://www.semanlink.net/tag/knowbert|broader_altLabel|BERT + Knowledge Graphs +http://www.semanlink.net/tag/knowbert|broader_altLabel|BERT + Knowledge Bases +http://www.semanlink.net/tag/knowbert|broader_altLabel|Contextualized word embeddings +http://www.semanlink.net/tag/knowbert|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/knowbert|broader_related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/knowbert|broader_related|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/knowbert|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/knowbert|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/pedro_almodovar|creationTime|2009-01-31T23:48:43Z +http://www.semanlink.net/tag/pedro_almodovar|prefLabel|Pedro Almodóvar +http://www.semanlink.net/tag/pedro_almodovar|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/pedro_almodovar|broader|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/pedro_almodovar|creationDate|2009-01-31 +http://www.semanlink.net/tag/pedro_almodovar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pedro_almodovar|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/pedro_almodovar|describedBy|https://es.wikipedia.org/wiki/Pedro_Almod%C3%B3var +http://www.semanlink.net/tag/pedro_almodovar|uri|http://www.semanlink.net/tag/pedro_almodovar +http://www.semanlink.net/tag/pedro_almodovar|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/pedro_almodovar|broader_prefLabel|Espagne +http://www.semanlink.net/tag/pedro_almodovar|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/credit_default_swap|creationTime|2012-11-02T00:41:45Z +http://www.semanlink.net/tag/credit_default_swap|prefLabel|Credit default swap +http://www.semanlink.net/tag/credit_default_swap|broader|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/credit_default_swap|creationDate|2012-11-02 +http://www.semanlink.net/tag/credit_default_swap|comment|produit financier assurant contre le risque de non-remboursement d'un agent économique +http://www.semanlink.net/tag/credit_default_swap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/credit_default_swap|uri|http://www.semanlink.net/tag/credit_default_swap +http://www.semanlink.net/tag/credit_default_swap|broader_prefLabel|Marchés financiers +http://www.semanlink.net/tag/biotech_industry|prefLabel|Biotech industry +http://www.semanlink.net/tag/biotech_industry|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/biotech_industry|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/biotech_industry|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biotech_industry|altLabel|Firme biotechnologique +http://www.semanlink.net/tag/biotech_industry|uri|http://www.semanlink.net/tag/biotech_industry +http://www.semanlink.net/tag/biotech_industry|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/biotech_industry|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/biotech_industry|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/masakhane|creationTime|2021-08-26T14:59:55Z +http://www.semanlink.net/tag/masakhane|prefLabel|Masakhane +http://www.semanlink.net/tag/masakhane|broader|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/tag/masakhane|related|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/tag/masakhane|creationDate|2021-08-26 +http://www.semanlink.net/tag/masakhane|comment|> A grassroots NLP community for Africa, by Africans +http://www.semanlink.net/tag/masakhane|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/masakhane|homepage|https://www.masakhane.io +http://www.semanlink.net/tag/masakhane|uri|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/tag/masakhane|broader_prefLabel|NLP 4 Africa +http://www.semanlink.net/tag/masakhane|broader_altLabel|Africa - NLP +http://www.semanlink.net/tag/masakhane|broader_altLabel|Afrique - NLP +http://www.semanlink.net/tag/owl_ontology|creationTime|2014-10-26T12:49:05Z +http://www.semanlink.net/tag/owl_ontology|prefLabel|OWL ontology +http://www.semanlink.net/tag/owl_ontology|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/owl_ontology|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_ontology|creationDate|2014-10-26 +http://www.semanlink.net/tag/owl_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_ontology|uri|http://www.semanlink.net/tag/owl_ontology +http://www.semanlink.net/tag/owl_ontology|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/owl_ontology|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_ontology|broader_altLabel|Ontology +http://www.semanlink.net/tag/owl_ontology|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/clerezza|creationTime|2011-09-09T21:51:42Z +http://www.semanlink.net/tag/clerezza|prefLabel|Clerezza +http://www.semanlink.net/tag/clerezza|broader|http://www.semanlink.net/tag/restful_semantic_web_services +http://www.semanlink.net/tag/clerezza|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/clerezza|related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/clerezza|creationDate|2011-09-09 +http://www.semanlink.net/tag/clerezza|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clerezza|uri|http://www.semanlink.net/tag/clerezza +http://www.semanlink.net/tag/clerezza|broader_prefLabel|Restful semantic web services +http://www.semanlink.net/tag/clerezza|broader_prefLabel|apache.org +http://www.semanlink.net/tag/coronavirus|creationTime|2020-02-16T11:12:55Z +http://www.semanlink.net/tag/coronavirus|prefLabel|Covid19 +http://www.semanlink.net/tag/coronavirus|broader|http://www.semanlink.net/tag/pandemie +http://www.semanlink.net/tag/coronavirus|broader|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/coronavirus|creationDate|2020-02-16 +http://www.semanlink.net/tag/coronavirus|comment|La vengeance du pangolin, et de Roseline Bachelot +http://www.semanlink.net/tag/coronavirus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coronavirus|altLabel|covid-19 +http://www.semanlink.net/tag/coronavirus|altLabel|Covid +http://www.semanlink.net/tag/coronavirus|altLabel|Coronavirus +http://www.semanlink.net/tag/coronavirus|uri|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/tag/coronavirus|broader_prefLabel|Pandémie +http://www.semanlink.net/tag/coronavirus|broader_prefLabel|Virus +http://www.semanlink.net/tag/suede|prefLabel|Suède +http://www.semanlink.net/tag/suede|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/suede|broader|http://www.semanlink.net/tag/scandinavie +http://www.semanlink.net/tag/suede|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/suede|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/suede|uri|http://www.semanlink.net/tag/suede +http://www.semanlink.net/tag/suede|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/suede|broader_prefLabel|Scandinavie +http://www.semanlink.net/tag/suede|broader_prefLabel|Europe +http://www.semanlink.net/tag/surprises_me|creationTime|2017-11-21T00:32:34Z +http://www.semanlink.net/tag/surprises_me|prefLabel|Surprising +http://www.semanlink.net/tag/surprises_me|creationDate|2017-11-21 +http://www.semanlink.net/tag/surprises_me|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/surprises_me|uri|http://www.semanlink.net/tag/surprises_me +http://www.semanlink.net/tag/mac_dev|prefLabel|Mac dev +http://www.semanlink.net/tag/mac_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/mac_dev|broader|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/mac_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_dev|uri|http://www.semanlink.net/tag/mac_dev +http://www.semanlink.net/tag/mac_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/mac_dev|broader_prefLabel|Macintosh +http://www.semanlink.net/tag/low_tech|creationTime|2021-03-14T17:56:12Z +http://www.semanlink.net/tag/low_tech|prefLabel|Low-Tech +http://www.semanlink.net/tag/low_tech|creationDate|2021-03-14 +http://www.semanlink.net/tag/low_tech|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/low_tech|uri|http://www.semanlink.net/tag/low_tech +http://www.semanlink.net/tag/transfer_learning_in_nlp|creationTime|2018-06-12T09:17:29Z +http://www.semanlink.net/tag/transfer_learning_in_nlp|prefLabel|Transfer learning in NLP +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/tag/transfer_learning_in_nlp|related|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/transfer_learning_in_nlp|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/transfer_learning_in_nlp|creationDate|2018-06-12 +http://www.semanlink.net/tag/transfer_learning_in_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transfer_learning_in_nlp|uri|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader_prefLabel|Transfer learning +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/transfer_learning_in_nlp|broader_related|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/hebbian_theory|creationTime|2017-04-28T22:54:00Z +http://www.semanlink.net/tag/hebbian_theory|prefLabel|Hebb's rule +http://www.semanlink.net/tag/hebbian_theory|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/hebbian_theory|creationDate|2017-04-28 +http://www.semanlink.net/tag/hebbian_theory|comment|fire together, wire together +http://www.semanlink.net/tag/hebbian_theory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hebbian_theory|describedBy|https://en.wikipedia.org/wiki/Hebbian_theory +http://www.semanlink.net/tag/hebbian_theory|altLabel|Hebbian learning +http://www.semanlink.net/tag/hebbian_theory|altLabel|Hebbian theory +http://www.semanlink.net/tag/hebbian_theory|uri|http://www.semanlink.net/tag/hebbian_theory +http://www.semanlink.net/tag/hebbian_theory|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/treeview|prefLabel|Treeview +http://www.semanlink.net/tag/treeview|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/treeview|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/treeview|uri|http://www.semanlink.net/tag/treeview +http://www.semanlink.net/tag/treeview|broader_prefLabel|Dev +http://www.semanlink.net/tag/analyse_semantique|creationTime|2013-08-19T16:34:25Z +http://www.semanlink.net/tag/analyse_semantique|prefLabel|Analyse sémantique +http://www.semanlink.net/tag/analyse_semantique|broader|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/analyse_semantique|creationDate|2013-08-19 +http://www.semanlink.net/tag/analyse_semantique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/analyse_semantique|uri|http://www.semanlink.net/tag/analyse_semantique +http://www.semanlink.net/tag/analyse_semantique|broader_prefLabel|Semantic technology +http://www.semanlink.net/tag/wikipedia|prefLabel|Wikipedia +http://www.semanlink.net/tag/wikipedia|broader|http://www.semanlink.net/tag/encyclopedie_collaborative +http://www.semanlink.net/tag/wikipedia|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/wikipedia|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/wikipedia|related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikipedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikipedia|uri|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikipedia|broader_prefLabel|Encyclopédie collaborative +http://www.semanlink.net/tag/wikipedia|broader_prefLabel|Wiki +http://www.semanlink.net/tag/wikipedia|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/wikipedia|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/wikipedia|broader_related|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/souverainete_numerique|creationTime|2014-07-03T11:49:42Z +http://www.semanlink.net/tag/souverainete_numerique|prefLabel|Souveraineté numérique +http://www.semanlink.net/tag/souverainete_numerique|creationDate|2014-07-03 +http://www.semanlink.net/tag/souverainete_numerique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/souverainete_numerique|uri|http://www.semanlink.net/tag/souverainete_numerique +http://www.semanlink.net/tag/chrome|creationTime|2010-11-23T14:08:39Z +http://www.semanlink.net/tag/chrome|prefLabel|Chrome +http://www.semanlink.net/tag/chrome|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/chrome|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/chrome|creationDate|2010-11-23 +http://www.semanlink.net/tag/chrome|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chrome|uri|http://www.semanlink.net/tag/chrome +http://www.semanlink.net/tag/chrome|broader_prefLabel|Google +http://www.semanlink.net/tag/chrome|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/chrome|broader_altLabel|Browser +http://www.semanlink.net/tag/chrome|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/rene_vautier|creationTime|2009-03-11T01:13:37Z +http://www.semanlink.net/tag/rene_vautier|prefLabel|René Vautier +http://www.semanlink.net/tag/rene_vautier|broader|http://www.semanlink.net/tag/anticolonialisme +http://www.semanlink.net/tag/rene_vautier|creationDate|2009-03-11 +http://www.semanlink.net/tag/rene_vautier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rene_vautier|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/rene_vautier|describedBy|https://fr.wikipedia.org/wiki/Rene_Vautier +http://www.semanlink.net/tag/rene_vautier|uri|http://www.semanlink.net/tag/rene_vautier +http://www.semanlink.net/tag/rene_vautier|broader_prefLabel|Anticolonialisme +http://www.semanlink.net/tag/industrie_nucleaire|prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/industrie_nucleaire|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/industrie_nucleaire|broader|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/industrie_nucleaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_nucleaire|altLabel|Nucléaire +http://www.semanlink.net/tag/industrie_nucleaire|uri|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/industrie_nucleaire|broader_prefLabel|Energie +http://www.semanlink.net/tag/industrie_nucleaire|broader_prefLabel|industrie +http://www.semanlink.net/tag/skos_editor|creationTime|2010-05-07T11:52:30Z +http://www.semanlink.net/tag/skos_editor|prefLabel|SKOS editor +http://www.semanlink.net/tag/skos_editor|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/skos_editor|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/skos_editor|broader|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/skos_editor|creationDate|2010-05-07 +http://www.semanlink.net/tag/skos_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/skos_editor|uri|http://www.semanlink.net/tag/skos_editor +http://www.semanlink.net/tag/skos_editor|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/skos_editor|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/skos_editor|broader_prefLabel|SKOS +http://www.semanlink.net/tag/skos_editor|broader_related|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/moat|creationTime|2008-01-20T15:43:12Z +http://www.semanlink.net/tag/moat|prefLabel|MOAT +http://www.semanlink.net/tag/moat|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/moat|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/moat|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/moat|broader|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/moat|creationDate|2008-01-20 +http://www.semanlink.net/tag/moat|comment|"Semantic Web framework to publish semantically-annotated content from free-tagging. +Provides a way for users to define meaning(s) of their tag(s) using URIs of Semantic Web resources (such as URIs from dbpedia, geonames … or any knowledge base), and then annotate content with those URIs rather than free-text tags. Tag meanings can be shared between people, providing an architecture of participation to define and exchange potential meanings of tags within a community of users." +http://www.semanlink.net/tag/moat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moat|homepage|http://moat-project.org/ +http://www.semanlink.net/tag/moat|uri|http://www.semanlink.net/tag/moat +http://www.semanlink.net/tag/moat|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/moat|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/moat|broader_prefLabel|Tagging +http://www.semanlink.net/tag/moat|broader_prefLabel|Alexandre Passant +http://www.semanlink.net/tag/moat|broader_altLabel|LD +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/moat|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/generative_model|creationTime|2017-08-15T21:01:08Z +http://www.semanlink.net/tag/generative_model|prefLabel|Generative model +http://www.semanlink.net/tag/generative_model|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/generative_model|creationDate|2017-08-15 +http://www.semanlink.net/tag/generative_model|comment|a model for randomly generating observable data values, typically given some hidden parameters. It specifies a joint probability distribution over observation and label sequences. +http://www.semanlink.net/tag/generative_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/generative_model|describedBy|https://en.wikipedia.org/wiki/Generative_model +http://www.semanlink.net/tag/generative_model|altLabel|Generative modeling +http://www.semanlink.net/tag/generative_model|uri|http://www.semanlink.net/tag/generative_model +http://www.semanlink.net/tag/generative_model|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/multilingual_nlp|creationTime|2020-08-01T18:54:00Z +http://www.semanlink.net/tag/multilingual_nlp|prefLabel|Multilingual NLP +http://www.semanlink.net/tag/multilingual_nlp|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/multilingual_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/multilingual_nlp|creationDate|2020-08-01 +http://www.semanlink.net/tag/multilingual_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multilingual_nlp|uri|http://www.semanlink.net/tag/multilingual_nlp +http://www.semanlink.net/tag/multilingual_nlp|broader_prefLabel|Langues +http://www.semanlink.net/tag/multilingual_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/multilingual_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/multilingual_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/multilingual_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/chimere|creationTime|2013-09-22T11:34:04Z +http://www.semanlink.net/tag/chimere|prefLabel|Chimère +http://www.semanlink.net/tag/chimere|creationDate|2013-09-22 +http://www.semanlink.net/tag/chimere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chimere|uri|http://www.semanlink.net/tag/chimere +http://www.semanlink.net/tag/fbi|creationTime|2013-08-06T11:06:09Z +http://www.semanlink.net/tag/fbi|prefLabel|FBI +http://www.semanlink.net/tag/fbi|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/fbi|creationDate|2013-08-06 +http://www.semanlink.net/tag/fbi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fbi|uri|http://www.semanlink.net/tag/fbi +http://www.semanlink.net/tag/fbi|broader_prefLabel|USA +http://www.semanlink.net/tag/fbi|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/fbi|broader_altLabel|United States +http://www.semanlink.net/tag/ip_ir_ml_ia|creationTime|2019-02-09T11:21:49Z +http://www.semanlink.net/tag/ip_ir_ml_ia|prefLabel|AI 4 IP +http://www.semanlink.net/tag/ip_ir_ml_ia|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/ip_ir_ml_ia|broader|http://www.semanlink.net/tag/nlp_juridique +http://www.semanlink.net/tag/ip_ir_ml_ia|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/ip_ir_ml_ia|broader|http://www.semanlink.net/tag/ml_domaines_d_application +http://www.semanlink.net/tag/ip_ir_ml_ia|creationDate|2019-02-09 +http://www.semanlink.net/tag/ip_ir_ml_ia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ip_ir_ml_ia|uri|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.semanlink.net/tag/ip_ir_ml_ia|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/ip_ir_ml_ia|broader_prefLabel|NLP + juridique +http://www.semanlink.net/tag/ip_ir_ml_ia|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/ip_ir_ml_ia|broader_prefLabel|IA/ML: domaines d'application +http://www.semanlink.net/tag/ip_ir_ml_ia|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/risk_analysis|creationTime|2020-09-05T18:59:26Z +http://www.semanlink.net/tag/risk_analysis|prefLabel|Risk analysis +http://www.semanlink.net/tag/risk_analysis|creationDate|2020-09-05 +http://www.semanlink.net/tag/risk_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/risk_analysis|uri|http://www.semanlink.net/tag/risk_analysis +http://www.semanlink.net/tag/pensee|prefLabel|Pensée +http://www.semanlink.net/tag/pensee|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/pensee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pensee|uri|http://www.semanlink.net/tag/pensee +http://www.semanlink.net/tag/pensee|broader_prefLabel|Brain +http://www.semanlink.net/tag/pensee|broader_altLabel|Cerveau +http://www.semanlink.net/tag/pensee|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/blackbox_nlp|creationTime|2018-11-01T16:58:57Z +http://www.semanlink.net/tag/blackbox_nlp|prefLabel|Blackbox NLP +http://www.semanlink.net/tag/blackbox_nlp|broader|http://www.semanlink.net/tag/ai_black_box +http://www.semanlink.net/tag/blackbox_nlp|broader|http://www.semanlink.net/tag/explainable_nlp +http://www.semanlink.net/tag/blackbox_nlp|creationDate|2018-11-01 +http://www.semanlink.net/tag/blackbox_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blackbox_nlp|uri|http://www.semanlink.net/tag/blackbox_nlp +http://www.semanlink.net/tag/blackbox_nlp|broader_prefLabel|AI black box +http://www.semanlink.net/tag/blackbox_nlp|broader_prefLabel|Explainable NLP +http://www.semanlink.net/tag/learning|creationTime|2019-09-07T11:09:21Z +http://www.semanlink.net/tag/learning|prefLabel|Learning +http://www.semanlink.net/tag/learning|creationDate|2019-09-07 +http://www.semanlink.net/tag/learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/learning|uri|http://www.semanlink.net/tag/learning +http://www.semanlink.net/tag/semantic_web_critique|prefLabel|Semantic Web : critique +http://www.semanlink.net/tag/semantic_web_critique|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_critique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_critique|uri|http://www.semanlink.net/tag/semantic_web_critique +http://www.semanlink.net/tag/semantic_web_critique|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_critique|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_critique|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/feature_hashing|creationTime|2015-10-20T15:42:17Z +http://www.semanlink.net/tag/feature_hashing|prefLabel|"Feature hashing (""Hashing trick"")" +http://www.semanlink.net/tag/feature_hashing|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/feature_hashing|creationDate|2015-10-20 +http://www.semanlink.net/tag/feature_hashing|comment|"fast and space-efficient way of vectorizing categorical features. Applies a hash function to the features to determine their column index + + + + + +" +http://www.semanlink.net/tag/feature_hashing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feature_hashing|describedBy|https://en.wikipedia.org/wiki/Feature_hashing +http://www.semanlink.net/tag/feature_hashing|altLabel|Hashing trick +http://www.semanlink.net/tag/feature_hashing|altLabel|Feature hashing +http://www.semanlink.net/tag/feature_hashing|uri|http://www.semanlink.net/tag/feature_hashing +http://www.semanlink.net/tag/feature_hashing|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/w3c_working_draft|creationTime|2007-11-07T17:02:55Z +http://www.semanlink.net/tag/w3c_working_draft|prefLabel|W3C Working Draft +http://www.semanlink.net/tag/w3c_working_draft|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_working_draft|creationDate|2007-11-07 +http://www.semanlink.net/tag/w3c_working_draft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_working_draft|uri|http://www.semanlink.net/tag/w3c_working_draft +http://www.semanlink.net/tag/w3c_working_draft|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_working_draft|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_working_draft|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/mathematiques|prefLabel|Mathématiques +http://www.semanlink.net/tag/mathematiques|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/mathematiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mathematiques|altLabel|Math +http://www.semanlink.net/tag/mathematiques|uri|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/mathematiques|broader_prefLabel|Science +http://www.semanlink.net/tag/mathematiques|broader_altLabel|sciences +http://www.semanlink.net/tag/pillage_du_palais_d_ete|creationTime|2013-04-29T01:19:47Z +http://www.semanlink.net/tag/pillage_du_palais_d_ete|prefLabel|Pillage du palais d'été +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader|http://www.semanlink.net/tag/guerres_coloniales +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader|http://www.semanlink.net/tag/pekin +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader|http://www.semanlink.net/tag/histoire_coloniale +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader|http://www.semanlink.net/tag/histoire_de_la_chine +http://www.semanlink.net/tag/pillage_du_palais_d_ete|creationDate|2013-04-29 +http://www.semanlink.net/tag/pillage_du_palais_d_ete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pillage_du_palais_d_ete|uri|http://www.semanlink.net/tag/pillage_du_palais_d_ete +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader_prefLabel|Guerres coloniales +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader_prefLabel|Pékin +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader_prefLabel|Histoire coloniale +http://www.semanlink.net/tag/pillage_du_palais_d_ete|broader_prefLabel|Histoire de la Chine +http://www.semanlink.net/tag/sw_at_renault|creationTime|2007-04-03T22:45:49Z +http://www.semanlink.net/tag/sw_at_renault|prefLabel|SW at Renault +http://www.semanlink.net/tag/sw_at_renault|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sw_at_renault|creationDate|2007-04-03 +http://www.semanlink.net/tag/sw_at_renault|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_at_renault|altLabel|Semantic Web@Renault +http://www.semanlink.net/tag/sw_at_renault|uri|http://www.semanlink.net/tag/sw_at_renault +http://www.semanlink.net/tag/sw_at_renault|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sw_at_renault|broader_altLabel|sw +http://www.semanlink.net/tag/sw_at_renault|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/xlnet|creationTime|2019-08-02T17:46:26Z +http://www.semanlink.net/tag/xlnet|prefLabel|XLNet +http://www.semanlink.net/tag/xlnet|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/xlnet|related|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/tag/xlnet|creationDate|2019-08-02 +http://www.semanlink.net/tag/xlnet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xlnet|uri|http://www.semanlink.net/tag/xlnet +http://www.semanlink.net/tag/xlnet|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/xlnet|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/xlnet|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/xlnet|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/grands_problemes|prefLabel|Grands problèmes +http://www.semanlink.net/tag/grands_problemes|broader|http://www.semanlink.net/tag/etat_du_monde +http://www.semanlink.net/tag/grands_problemes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grands_problemes|uri|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/grands_problemes|broader_prefLabel|Etat du monde +http://www.semanlink.net/tag/commercialising_the_semantic_web|creationTime|2008-05-17T23:12:34Z +http://www.semanlink.net/tag/commercialising_the_semantic_web|prefLabel|Commercialising the Semantic Web +http://www.semanlink.net/tag/commercialising_the_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/commercialising_the_semantic_web|creationDate|2008-05-17 +http://www.semanlink.net/tag/commercialising_the_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/commercialising_the_semantic_web|uri|http://www.semanlink.net/tag/commercialising_the_semantic_web +http://www.semanlink.net/tag/commercialising_the_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/commercialising_the_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/commercialising_the_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/topic_modeling|creationTime|2012-09-20T10:47:34Z +http://www.semanlink.net/tag/topic_modeling|prefLabel|Topic Modeling +http://www.semanlink.net/tag/topic_modeling|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/topic_modeling|broader|http://www.semanlink.net/tag/analyse_semantique +http://www.semanlink.net/tag/topic_modeling|broader|http://www.semanlink.net/tag/distributional_semantics +http://www.semanlink.net/tag/topic_modeling|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/topic_modeling|related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/topic_modeling|related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/topic_modeling|related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/topic_modeling|related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/topic_modeling|creationDate|2012-09-20 +http://www.semanlink.net/tag/topic_modeling|comment|"A statistical model for discovering the abstract ""topics"" that occur in a collection of documents. + + +" +http://www.semanlink.net/tag/topic_modeling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topic_modeling|describedBy|https://en.wikipedia.org/wiki/Topic_model +http://www.semanlink.net/tag/topic_modeling|altLabel|Topic model +http://www.semanlink.net/tag/topic_modeling|uri|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/topic_modeling|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/topic_modeling|broader_prefLabel|Analyse sémantique +http://www.semanlink.net/tag/topic_modeling|broader_prefLabel|Distributional semantics +http://www.semanlink.net/tag/topic_modeling|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/topic_modeling|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/topic_modeling|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/knowledge_graph_construction|creationTime|2018-11-04T18:11:23Z +http://www.semanlink.net/tag/knowledge_graph_construction|prefLabel|Knowledge Graph Construction +http://www.semanlink.net/tag/knowledge_graph_construction|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/knowledge_graph_construction|broader|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/knowledge_graph_construction|creationDate|2018-11-04 +http://www.semanlink.net/tag/knowledge_graph_construction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_construction|uri|http://www.semanlink.net/tag/knowledge_graph_construction +http://www.semanlink.net/tag/knowledge_graph_construction|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/knowledge_graph_construction|broader_prefLabel|KG: tasks +http://www.semanlink.net/tag/knowledge_graph_construction|broader_altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/knowledge_graph_construction|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/hoax|creationTime|2007-05-09T00:02:02Z +http://www.semanlink.net/tag/hoax|prefLabel|Hoax +http://www.semanlink.net/tag/hoax|creationDate|2007-05-09 +http://www.semanlink.net/tag/hoax|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hoax|uri|http://www.semanlink.net/tag/hoax +http://www.semanlink.net/tag/tahar_ben_jelloun|creationTime|2010-04-10T15:29:27Z +http://www.semanlink.net/tag/tahar_ben_jelloun|prefLabel|Tahar Ben Jelloun +http://www.semanlink.net/tag/tahar_ben_jelloun|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/tahar_ben_jelloun|broader|http://www.semanlink.net/tag/maroc +http://www.semanlink.net/tag/tahar_ben_jelloun|creationDate|2010-04-10 +http://www.semanlink.net/tag/tahar_ben_jelloun|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tahar_ben_jelloun|describedBy|https://fr.wikipedia.org/wiki/Tahar_Ben_Jelloun +http://www.semanlink.net/tag/tahar_ben_jelloun|uri|http://www.semanlink.net/tag/tahar_ben_jelloun +http://www.semanlink.net/tag/tahar_ben_jelloun|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/tahar_ben_jelloun|broader_prefLabel|Maroc +http://www.semanlink.net/tag/devops|creationTime|2014-01-24T14:36:23Z +http://www.semanlink.net/tag/devops|prefLabel|DevOps +http://www.semanlink.net/tag/devops|creationDate|2014-01-24 +http://www.semanlink.net/tag/devops|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/devops|uri|http://www.semanlink.net/tag/devops +http://www.semanlink.net/tag/calais|creationTime|2008-06-04T22:44:16Z +http://www.semanlink.net/tag/calais|prefLabel|Calais +http://www.semanlink.net/tag/calais|broader|http://www.semanlink.net/tag/thomson_reuters +http://www.semanlink.net/tag/calais|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/calais|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/calais|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/calais|creationDate|2008-06-04 +http://www.semanlink.net/tag/calais|comment|The web service is an API that accepts unstructured text (like news articles, blog postings, etc.), processes them using natural language processing and machine learning algorithms, and returns RDF-formatted entities, facts and events. +http://www.semanlink.net/tag/calais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/calais|homepage|http://www.opencalais.com/ +http://www.semanlink.net/tag/calais|describedBy|http://www.opencalais.com/ +http://www.semanlink.net/tag/calais|uri|http://www.semanlink.net/tag/calais +http://www.semanlink.net/tag/calais|broader_prefLabel|Thomson Reuters +http://www.semanlink.net/tag/calais|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/calais|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/calais|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/calais|broader_altLabel|LOD +http://www.semanlink.net/tag/calais|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/calais|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/calais|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/calais|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/xvie_siecle|creationTime|2021-04-11T12:34:31Z +http://www.semanlink.net/tag/xvie_siecle|prefLabel|XVIe siècle +http://www.semanlink.net/tag/xvie_siecle|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/xvie_siecle|related|http://www.semanlink.net/tag/renaissance +http://www.semanlink.net/tag/xvie_siecle|creationDate|2021-04-11 +http://www.semanlink.net/tag/xvie_siecle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xvie_siecle|uri|http://www.semanlink.net/tag/xvie_siecle +http://www.semanlink.net/tag/xvie_siecle|broader_prefLabel|Histoire +http://www.semanlink.net/tag/krill|creationTime|2018-03-13T14:12:03Z +http://www.semanlink.net/tag/krill|prefLabel|Krill +http://www.semanlink.net/tag/krill|broader|http://www.semanlink.net/tag/crustace +http://www.semanlink.net/tag/krill|creationDate|2018-03-13 +http://www.semanlink.net/tag/krill|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/krill|describedBy|https://en.wikipedia.org/wiki/Krill +http://www.semanlink.net/tag/krill|uri|http://www.semanlink.net/tag/krill +http://www.semanlink.net/tag/krill|broader_prefLabel|Crustacé +http://www.semanlink.net/tag/voyage_en_chine|creationTime|2008-04-12T00:26:08Z +http://www.semanlink.net/tag/voyage_en_chine|prefLabel|Voyage en Chine +http://www.semanlink.net/tag/voyage_en_chine|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/voyage_en_chine|creationDate|2008-04-12 +http://www.semanlink.net/tag/voyage_en_chine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voyage_en_chine|uri|http://www.semanlink.net/tag/voyage_en_chine +http://www.semanlink.net/tag/voyage_en_chine|broader_prefLabel|Chine +http://www.semanlink.net/tag/voyage_en_chine|broader_altLabel|China +http://www.semanlink.net/tag/greasemonkey|prefLabel|Greasemonkey +http://www.semanlink.net/tag/greasemonkey|broader|http://www.semanlink.net/tag/firefox +http://www.semanlink.net/tag/greasemonkey|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/greasemonkey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greasemonkey|uri|http://www.semanlink.net/tag/greasemonkey +http://www.semanlink.net/tag/greasemonkey|broader_prefLabel|Firefox +http://www.semanlink.net/tag/greasemonkey|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/greasemonkey|broader_altLabel|js +http://www.semanlink.net/tag/encelade|prefLabel|Encelade +http://www.semanlink.net/tag/encelade|broader|http://www.semanlink.net/tag/saturne +http://www.semanlink.net/tag/encelade|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encelade|altLabel|Enceladus +http://www.semanlink.net/tag/encelade|uri|http://www.semanlink.net/tag/encelade +http://www.semanlink.net/tag/encelade|broader_prefLabel|Saturne +http://www.semanlink.net/tag/encelade|broader_altLabel|Saturn +http://www.semanlink.net/tag/virtual_currency|creationTime|2016-03-28T12:50:27Z +http://www.semanlink.net/tag/virtual_currency|prefLabel|Digital currency +http://www.semanlink.net/tag/virtual_currency|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/virtual_currency|creationDate|2016-03-28 +http://www.semanlink.net/tag/virtual_currency|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtual_currency|altLabel|Virtual currency +http://www.semanlink.net/tag/virtual_currency|altLabel|Monnaie virtuelle +http://www.semanlink.net/tag/virtual_currency|uri|http://www.semanlink.net/tag/virtual_currency +http://www.semanlink.net/tag/virtual_currency|broader_prefLabel|Money +http://www.semanlink.net/tag/virtual_currency|broader_altLabel|Monnaie +http://www.semanlink.net/tag/automotive_ontology_community_group|creationTime|2015-05-14T12:28:44Z +http://www.semanlink.net/tag/automotive_ontology_community_group|prefLabel|Automotive Ontology Community Group +http://www.semanlink.net/tag/automotive_ontology_community_group|broader|http://www.semanlink.net/tag/automobile_and_w3c +http://www.semanlink.net/tag/automotive_ontology_community_group|related|http://www.semanlink.net/tag/gao +http://www.semanlink.net/tag/automotive_ontology_community_group|related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/automotive_ontology_community_group|related|http://www.semanlink.net/tag/mirek_sopek +http://www.semanlink.net/tag/automotive_ontology_community_group|creationDate|2015-05-14 +http://www.semanlink.net/tag/automotive_ontology_community_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automotive_ontology_community_group|describedBy|https://www.w3.org/community/gao/ +http://www.semanlink.net/tag/automotive_ontology_community_group|uri|http://www.semanlink.net/tag/automotive_ontology_community_group +http://www.semanlink.net/tag/automotive_ontology_community_group|broader_prefLabel|Automotive AND W3C +http://www.semanlink.net/tag/ftp|creationTime|2007-03-16T01:23:58Z +http://www.semanlink.net/tag/ftp|prefLabel|FTP +http://www.semanlink.net/tag/ftp|creationDate|2007-03-16 +http://www.semanlink.net/tag/ftp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ftp|uri|http://www.semanlink.net/tag/ftp +http://www.semanlink.net/tag/edf|creationTime|2008-06-12T08:17:08Z +http://www.semanlink.net/tag/edf|prefLabel|EDF +http://www.semanlink.net/tag/edf|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/edf|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/edf|creationDate|2008-06-12 +http://www.semanlink.net/tag/edf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edf|uri|http://www.semanlink.net/tag/edf +http://www.semanlink.net/tag/edf|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/edf|broader_prefLabel|Energie +http://www.semanlink.net/tag/semantic_media_wiki|creationTime|2008-07-04T14:10:15Z +http://www.semanlink.net/tag/semantic_media_wiki|prefLabel|Semantic Media Wiki +http://www.semanlink.net/tag/semantic_media_wiki|broader|http://www.semanlink.net/tag/semantic_wiki +http://www.semanlink.net/tag/semantic_media_wiki|creationDate|2008-07-04 +http://www.semanlink.net/tag/semantic_media_wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_media_wiki|describedBy|http://semantic-mediawiki.org/wiki/Semantic_MediaWiki +http://www.semanlink.net/tag/semantic_media_wiki|uri|http://www.semanlink.net/tag/semantic_media_wiki +http://www.semanlink.net/tag/semantic_media_wiki|broader_prefLabel|Semantic Wiki +http://www.semanlink.net/tag/publicite_internet|prefLabel|Publicité Internet +http://www.semanlink.net/tag/publicite_internet|broader|http://www.semanlink.net/tag/publicite +http://www.semanlink.net/tag/publicite_internet|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/publicite_internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/publicite_internet|altLabel|Online adverstising +http://www.semanlink.net/tag/publicite_internet|uri|http://www.semanlink.net/tag/publicite_internet +http://www.semanlink.net/tag/publicite_internet|broader_prefLabel|Publicité +http://www.semanlink.net/tag/publicite_internet|broader_prefLabel|Internet +http://www.semanlink.net/tag/publicite_internet|broader_altLabel|Advertising +http://www.semanlink.net/tag/publicite_internet|broader_altLabel|Pub +http://www.semanlink.net/tag/howto|prefLabel|Howto +http://www.semanlink.net/tag/howto|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/howto|broader|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.semanlink.net/tag/howto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/howto|uri|http://www.semanlink.net/tag/howto +http://www.semanlink.net/tag/howto|broader_prefLabel|Dev +http://www.semanlink.net/tag/howto|broader_prefLabel|Howto, tutorial, FAQ +http://www.semanlink.net/tag/text_preprocessing|creationTime|2018-04-09T13:28:32Z +http://www.semanlink.net/tag/text_preprocessing|prefLabel|Text preprocessing +http://www.semanlink.net/tag/text_preprocessing|broader|http://www.semanlink.net/tag/text_processing +http://www.semanlink.net/tag/text_preprocessing|creationDate|2018-04-09 +http://www.semanlink.net/tag/text_preprocessing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_preprocessing|uri|http://www.semanlink.net/tag/text_preprocessing +http://www.semanlink.net/tag/text_preprocessing|broader_prefLabel|Text processing +http://www.semanlink.net/tag/espionnage|creationTime|2015-05-12T02:05:20Z +http://www.semanlink.net/tag/espionnage|prefLabel|Espionnage +http://www.semanlink.net/tag/espionnage|creationDate|2015-05-12 +http://www.semanlink.net/tag/espionnage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/espionnage|uri|http://www.semanlink.net/tag/espionnage +http://www.semanlink.net/tag/crete_antique|prefLabel|Crète antique +http://www.semanlink.net/tag/crete_antique|broader|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/crete_antique|broader|http://www.semanlink.net/tag/crete +http://www.semanlink.net/tag/crete_antique|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/crete_antique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crete_antique|uri|http://www.semanlink.net/tag/crete_antique +http://www.semanlink.net/tag/crete_antique|broader_prefLabel|Grèce +http://www.semanlink.net/tag/crete_antique|broader_prefLabel|Crète +http://www.semanlink.net/tag/crete_antique|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/revolution_francaise|prefLabel|Révolution française +http://www.semanlink.net/tag/revolution_francaise|broader|http://www.semanlink.net/tag/histoire_de_france +http://www.semanlink.net/tag/revolution_francaise|broader|http://www.semanlink.net/tag/revolution +http://www.semanlink.net/tag/revolution_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/revolution_francaise|uri|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/revolution_francaise|broader_prefLabel|Histoire de France +http://www.semanlink.net/tag/revolution_francaise|broader_prefLabel|Révolution +http://www.semanlink.net/tag/semblog|prefLabel|semblog +http://www.semanlink.net/tag/semblog|broader|http://www.semanlink.net/tag/steve_cayzer +http://www.semanlink.net/tag/semblog|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/semblog|broader|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/tag/semblog|broader|http://www.semanlink.net/tag/semantic_blog +http://www.semanlink.net/tag/semblog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semblog|uri|http://www.semanlink.net/tag/semblog +http://www.semanlink.net/tag/semblog|broader_prefLabel|Steve Cayzer +http://www.semanlink.net/tag/semblog|broader_prefLabel|Jena +http://www.semanlink.net/tag/semblog|broader_prefLabel|Blog software +http://www.semanlink.net/tag/semblog|broader_prefLabel|Semantic Blog +http://www.semanlink.net/tag/semblog|broader_related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/semblog|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/tasmanie|prefLabel|Tasmanie +http://www.semanlink.net/tag/tasmanie|broader|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/tasmanie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tasmanie|uri|http://www.semanlink.net/tag/tasmanie +http://www.semanlink.net/tag/tasmanie|broader_prefLabel|Australie +http://www.semanlink.net/tag/itunes|prefLabel|iTunes +http://www.semanlink.net/tag/itunes|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/itunes|broader|http://www.semanlink.net/tag/music_store +http://www.semanlink.net/tag/itunes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/itunes|uri|http://www.semanlink.net/tag/itunes +http://www.semanlink.net/tag/itunes|broader_prefLabel|Apple +http://www.semanlink.net/tag/itunes|broader_prefLabel|Music store +http://www.semanlink.net/tag/france_culture|creationTime|2020-08-01T21:58:40Z +http://www.semanlink.net/tag/france_culture|prefLabel|France Culture +http://www.semanlink.net/tag/france_culture|broader|http://www.semanlink.net/tag/radio +http://www.semanlink.net/tag/france_culture|creationDate|2020-08-01 +http://www.semanlink.net/tag/france_culture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_culture|uri|http://www.semanlink.net/tag/france_culture +http://www.semanlink.net/tag/france_culture|broader_prefLabel|Radio +http://www.semanlink.net/tag/xhtml|prefLabel|XHTML +http://www.semanlink.net/tag/xhtml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xhtml|uri|http://www.semanlink.net/tag/xhtml +http://www.semanlink.net/tag/faq|prefLabel|FAQ +http://www.semanlink.net/tag/faq|broader|http://www.semanlink.net/tag/q_a +http://www.semanlink.net/tag/faq|broader|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.semanlink.net/tag/faq|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/faq|uri|http://www.semanlink.net/tag/faq +http://www.semanlink.net/tag/faq|broader_prefLabel|Q&A +http://www.semanlink.net/tag/faq|broader_prefLabel|Howto, tutorial, FAQ +http://www.semanlink.net/tag/slime_mold|creationTime|2017-06-01T23:54:55Z +http://www.semanlink.net/tag/slime_mold|prefLabel|Slime mold +http://www.semanlink.net/tag/slime_mold|broader|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/slime_mold|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/slime_mold|creationDate|2017-06-01 +http://www.semanlink.net/tag/slime_mold|comment|"- myxomicette +- unicellulaire. +- Présent sur terre depuis 1 milliard d'années +- Il se déplace vitesse 1cm/heure (jusqu'à 4cm/heure s'il a faim) +- peut rester en ""dormance"" (asséché) pendant 2 ans" +http://www.semanlink.net/tag/slime_mold|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slime_mold|describedBy|https://en.wikipedia.org/wiki/Slime_mold +http://www.semanlink.net/tag/slime_mold|altLabel|Blob +http://www.semanlink.net/tag/slime_mold|uri|http://www.semanlink.net/tag/slime_mold +http://www.semanlink.net/tag/slime_mold|broader_prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/slime_mold|broader_prefLabel|Biology +http://www.semanlink.net/tag/slime_mold|broader_altLabel|Biologie +http://www.semanlink.net/tag/zika|creationTime|2016-01-27T13:58:03Z +http://www.semanlink.net/tag/zika|prefLabel|Zika +http://www.semanlink.net/tag/zika|broader|http://www.semanlink.net/tag/virus +http://www.semanlink.net/tag/zika|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/zika|broader|http://www.semanlink.net/tag/moustique +http://www.semanlink.net/tag/zika|creationDate|2016-01-27 +http://www.semanlink.net/tag/zika|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zika|uri|http://www.semanlink.net/tag/zika +http://www.semanlink.net/tag/zika|broader_prefLabel|Virus +http://www.semanlink.net/tag/zika|broader_prefLabel|Maladie +http://www.semanlink.net/tag/zika|broader_prefLabel|Moustique +http://www.semanlink.net/tag/zika|broader_related|http://www.semanlink.net/tag/dengue +http://www.semanlink.net/tag/zika|broader_related|http://www.semanlink.net/tag/paludisme +http://www.semanlink.net/tag/spatial_search|creationTime|2019-08-29T00:34:48Z +http://www.semanlink.net/tag/spatial_search|prefLabel|Spatial search +http://www.semanlink.net/tag/spatial_search|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/spatial_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/spatial_search|creationDate|2019-08-29 +http://www.semanlink.net/tag/spatial_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spatial_search|uri|http://www.semanlink.net/tag/spatial_search +http://www.semanlink.net/tag/spatial_search|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/spatial_search|broader_prefLabel|Search +http://www.semanlink.net/tag/master_data_management|creationTime|2010-08-24T23:37:27Z +http://www.semanlink.net/tag/master_data_management|prefLabel|Master Data Management +http://www.semanlink.net/tag/master_data_management|creationDate|2010-08-24 +http://www.semanlink.net/tag/master_data_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/master_data_management|altLabel|MDM +http://www.semanlink.net/tag/master_data_management|uri|http://www.semanlink.net/tag/master_data_management +http://www.semanlink.net/tag/reading|creationTime|2014-03-01T15:08:37Z +http://www.semanlink.net/tag/reading|prefLabel|Reading +http://www.semanlink.net/tag/reading|creationDate|2014-03-01 +http://www.semanlink.net/tag/reading|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reading|uri|http://www.semanlink.net/tag/reading +http://www.semanlink.net/tag/semantic_web_ui|prefLabel|Semantic Web : UI +http://www.semanlink.net/tag/semantic_web_ui|broader|http://www.semanlink.net/tag/ui +http://www.semanlink.net/tag/semantic_web_ui|creationDate|2006-12-01 +http://www.semanlink.net/tag/semantic_web_ui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_ui|uri|http://www.semanlink.net/tag/semantic_web_ui +http://www.semanlink.net/tag/semantic_web_ui|broader_prefLabel|UI +http://www.semanlink.net/tag/apache_opennlp|creationTime|2012-03-15T13:43:29Z +http://www.semanlink.net/tag/apache_opennlp|prefLabel|Apache OpenNLP +http://www.semanlink.net/tag/apache_opennlp|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/apache_opennlp|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_opennlp|related|http://www.semanlink.net/tag/maxent_classifier +http://www.semanlink.net/tag/apache_opennlp|creationDate|2012-03-15 +http://www.semanlink.net/tag/apache_opennlp|comment|"The Apache OpenNLP library is a machine learning based toolkit for the processing of natural language text. +
+It supports the most common NLP tasks, such as tokenization, sentence segmentation, part-of-speech tagging, named entity extraction, chunking, parsing, and coreference resolution. These tasks are usually required to build more advanced text processing services. OpenNLP also includes maximum entropy and perceptron based machine learning." +http://www.semanlink.net/tag/apache_opennlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_opennlp|describedBy|http://opennlp.apache.org/ +http://www.semanlink.net/tag/apache_opennlp|uri|http://www.semanlink.net/tag/apache_opennlp +http://www.semanlink.net/tag/apache_opennlp|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/apache_opennlp|broader_prefLabel|apache.org +http://www.semanlink.net/tag/tombouctou|prefLabel|Tombouctou +http://www.semanlink.net/tag/tombouctou|broader|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/tombouctou|broader|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/tombouctou|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tombouctou|describedBy|https://en.wikipedia.org/wiki/Timbuktu +http://www.semanlink.net/tag/tombouctou|altLabel|Timbuktu +http://www.semanlink.net/tag/tombouctou|uri|http://www.semanlink.net/tag/tombouctou +http://www.semanlink.net/tag/tombouctou|broader_prefLabel|Sahara +http://www.semanlink.net/tag/tombouctou|broader_prefLabel|Mali +http://www.semanlink.net/tag/prix_nobel_de_physique|creationTime|2007-10-09T21:31:33Z +http://www.semanlink.net/tag/prix_nobel_de_physique|prefLabel|Prix Nobel de physique +http://www.semanlink.net/tag/prix_nobel_de_physique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/prix_nobel_de_physique|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/prix_nobel_de_physique|creationDate|2007-10-09 +http://www.semanlink.net/tag/prix_nobel_de_physique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prix_nobel_de_physique|uri|http://www.semanlink.net/tag/prix_nobel_de_physique +http://www.semanlink.net/tag/prix_nobel_de_physique|broader_prefLabel|Physique +http://www.semanlink.net/tag/prix_nobel_de_physique|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/prix_nobel_de_physique|broader_altLabel|Physics +http://www.semanlink.net/tag/siamese_network|creationTime|2019-06-28T18:51:21Z +http://www.semanlink.net/tag/siamese_network|prefLabel|Siamese networks +http://www.semanlink.net/tag/siamese_network|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/siamese_network|broader|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/tag/siamese_network|related|http://www.semanlink.net/tag/face_recognition +http://www.semanlink.net/tag/siamese_network|related|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/tag/siamese_network|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/siamese_network|creationDate|2019-06-28 +http://www.semanlink.net/tag/siamese_network|comment|Introduced in the early 1990s by Bromley and LeCun to solve signature verification as an image matching problem +http://www.semanlink.net/tag/siamese_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/siamese_network|describedBy|https://en.wikipedia.org/wiki/Siamese_neural_network +http://www.semanlink.net/tag/siamese_network|altLabel|Siamese network +http://www.semanlink.net/tag/siamese_network|uri|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/tag/siamese_network|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/siamese_network|broader_prefLabel|Similarity learning +http://www.semanlink.net/tag/siamese_network|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/siamese_network|broader_altLabel|ANN +http://www.semanlink.net/tag/siamese_network|broader_altLabel|NN +http://www.semanlink.net/tag/siamese_network|broader_related|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/siamese_network|broader_related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/principal_component_analysis|creationTime|2015-10-16T22:42:33Z +http://www.semanlink.net/tag/principal_component_analysis|prefLabel|Principal component analysis +http://www.semanlink.net/tag/principal_component_analysis|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/principal_component_analysis|broader|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/principal_component_analysis|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/principal_component_analysis|creationDate|2015-10-16 +http://www.semanlink.net/tag/principal_component_analysis|comment|"PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. + +-> PCA is based on extracting the axes on which the data shows the highest variability. + +PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute" +http://www.semanlink.net/tag/principal_component_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/principal_component_analysis|describedBy|https://en.wikipedia.org/wiki/Principal_component_analysis +http://www.semanlink.net/tag/principal_component_analysis|altLabel|PCA +http://www.semanlink.net/tag/principal_component_analysis|uri|http://www.semanlink.net/tag/principal_component_analysis +http://www.semanlink.net/tag/principal_component_analysis|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/principal_component_analysis|broader_prefLabel|Feature learning +http://www.semanlink.net/tag/principal_component_analysis|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/principal_component_analysis|broader_altLabel|Representation learning +http://www.semanlink.net/tag/principal_component_analysis|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|prefLabel|La communauté internationale est une garce +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|broader|http://www.semanlink.net/tag/garce +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|broader|http://www.semanlink.net/tag/communaute_internationale +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|creationDate|2006-11-21 +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|uri|http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|broader_prefLabel|Garce +http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce|broader_prefLabel|Communauté internationale +http://www.semanlink.net/tag/revolution|creationTime|2007-11-08T10:20:39Z +http://www.semanlink.net/tag/revolution|prefLabel|Révolution +http://www.semanlink.net/tag/revolution|creationDate|2007-11-08 +http://www.semanlink.net/tag/revolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/revolution|uri|http://www.semanlink.net/tag/revolution +http://www.semanlink.net/tag/everipedia|creationTime|2017-12-17T11:56:15Z +http://www.semanlink.net/tag/everipedia|prefLabel|Everipedia +http://www.semanlink.net/tag/everipedia|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/everipedia|related|http://www.semanlink.net/tag/blockchain +http://www.semanlink.net/tag/everipedia|creationDate|2017-12-17 +http://www.semanlink.net/tag/everipedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/everipedia|uri|http://www.semanlink.net/tag/everipedia +http://www.semanlink.net/tag/the_guardian|creationTime|2013-03-19T23:14:56Z +http://www.semanlink.net/tag/the_guardian|prefLabel|The Guardian +http://www.semanlink.net/tag/the_guardian|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/the_guardian|creationDate|2013-03-19 +http://www.semanlink.net/tag/the_guardian|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_guardian|homepage|http://www.guardian.co.uk +http://www.semanlink.net/tag/the_guardian|uri|http://www.semanlink.net/tag/the_guardian +http://www.semanlink.net/tag/the_guardian|broader_prefLabel|Presse +http://www.semanlink.net/tag/the_guardian|broader_altLabel|Journal +http://www.semanlink.net/tag/evaluation_measures|creationTime|2020-10-02T02:02:42Z +http://www.semanlink.net/tag/evaluation_measures|prefLabel|Evaluation measures +http://www.semanlink.net/tag/evaluation_measures|creationDate|2020-10-02 +http://www.semanlink.net/tag/evaluation_measures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evaluation_measures|describedBy|https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval) +http://www.semanlink.net/tag/evaluation_measures|uri|http://www.semanlink.net/tag/evaluation_measures +http://www.semanlink.net/tag/penseur|prefLabel|Penseur +http://www.semanlink.net/tag/penseur|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/penseur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/penseur|uri|http://www.semanlink.net/tag/penseur +http://www.semanlink.net/tag/penseur|broader_prefLabel|Divers +http://www.semanlink.net/tag/adn|prefLabel|ADN +http://www.semanlink.net/tag/adn|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/adn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/adn|altLabel|DNA +http://www.semanlink.net/tag/adn|uri|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/adn|broader_prefLabel|Genetics +http://www.semanlink.net/tag/adn|broader_prefLabel|Génétique +http://www.semanlink.net/tag/concept_extraction|creationTime|2018-08-12T18:31:31Z +http://www.semanlink.net/tag/concept_extraction|prefLabel|Concept Extraction / Linking +http://www.semanlink.net/tag/concept_extraction|broader|http://www.semanlink.net/tag/automatic_tagging +http://www.semanlink.net/tag/concept_extraction|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/concept_extraction|broader|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/concept_extraction|related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/concept_extraction|related|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/concept_extraction|creationDate|2018-08-12 +http://www.semanlink.net/tag/concept_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concept_extraction|altLabel|Concept linking +http://www.semanlink.net/tag/concept_extraction|altLabel|Concept extraction +http://www.semanlink.net/tag/concept_extraction|uri|http://www.semanlink.net/tag/concept_extraction +http://www.semanlink.net/tag/concept_extraction|broader_prefLabel|Automatic tagging +http://www.semanlink.net/tag/concept_extraction|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/concept_extraction|broader_prefLabel|Keyword/keyphrase extraction +http://www.semanlink.net/tag/concept_extraction|broader_altLabel|Topic extraction +http://www.semanlink.net/tag/concept_extraction|broader_altLabel|Keyword extraction +http://www.semanlink.net/tag/concept_extraction|broader_altLabel|Keyphrase extraction +http://www.semanlink.net/tag/concept_extraction|broader_related|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/concept_extraction|broader_related|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/concept_extraction|broader_related|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/concept_extraction|broader_related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/serverless|creationTime|2018-08-29T16:17:41Z +http://www.semanlink.net/tag/serverless|prefLabel|Serverless +http://www.semanlink.net/tag/serverless|creationDate|2018-08-29 +http://www.semanlink.net/tag/serverless|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/serverless|uri|http://www.semanlink.net/tag/serverless +http://www.semanlink.net/tag/geolocalisation|creationTime|2014-01-24T00:22:55Z +http://www.semanlink.net/tag/geolocalisation|prefLabel|Geolocalisation +http://www.semanlink.net/tag/geolocalisation|creationDate|2014-01-24 +http://www.semanlink.net/tag/geolocalisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geolocalisation|uri|http://www.semanlink.net/tag/geolocalisation +http://www.semanlink.net/tag/os_x_app|prefLabel|OS X app +http://www.semanlink.net/tag/os_x_app|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/os_x_app|creationDate|2006-10-06 +http://www.semanlink.net/tag/os_x_app|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/os_x_app|uri|http://www.semanlink.net/tag/os_x_app +http://www.semanlink.net/tag/os_x_app|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/os_x_app|broader_altLabel|OS X +http://www.semanlink.net/tag/os_x_app|broader_altLabel|OSX +http://www.semanlink.net/tag/linked_data_browser|creationTime|2009-03-31T14:16:04Z +http://www.semanlink.net/tag/linked_data_browser|prefLabel|Linked Data Browser +http://www.semanlink.net/tag/linked_data_browser|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/linked_data_browser|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_browser|creationDate|2009-03-31 +http://www.semanlink.net/tag/linked_data_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_browser|uri|http://www.semanlink.net/tag/linked_data_browser +http://www.semanlink.net/tag/linked_data_browser|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/linked_data_browser|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_browser|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_browser|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/color_naming|creationTime|2019-08-15T17:40:12Z +http://www.semanlink.net/tag/color_naming|prefLabel|Color naming +http://www.semanlink.net/tag/color_naming|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/color_naming|creationDate|2019-08-15 +http://www.semanlink.net/tag/color_naming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/color_naming|uri|http://www.semanlink.net/tag/color_naming +http://www.semanlink.net/tag/color_naming|broader_prefLabel|Language +http://www.semanlink.net/tag/color_naming|broader_altLabel|Langage +http://www.semanlink.net/tag/genetic_programming|creationTime|2008-12-10T14:56:25Z +http://www.semanlink.net/tag/genetic_programming|prefLabel|Genetic Programming +http://www.semanlink.net/tag/genetic_programming|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/genetic_programming|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/genetic_programming|creationDate|2008-12-10 +http://www.semanlink.net/tag/genetic_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetic_programming|uri|http://www.semanlink.net/tag/genetic_programming +http://www.semanlink.net/tag/genetic_programming|broader_prefLabel|Evolution +http://www.semanlink.net/tag/genetic_programming|broader_prefLabel|Programming +http://www.semanlink.net/tag/pierre_rebour|prefLabel|Pierre Rebour +http://www.semanlink.net/tag/pierre_rebour|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/pierre_rebour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierre_rebour|uri|http://www.semanlink.net/tag/pierre_rebour +http://www.semanlink.net/tag/pierre_rebour|broader_prefLabel|Ami +http://www.semanlink.net/tag/giec|creationTime|2021-08-09T11:34:31Z +http://www.semanlink.net/tag/giec|prefLabel|GIEC +http://www.semanlink.net/tag/giec|creationDate|2021-08-09 +http://www.semanlink.net/tag/giec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/giec|uri|http://www.semanlink.net/tag/giec +http://www.semanlink.net/tag/jean_paul|prefLabel|Jean-Paul Cardinal +http://www.semanlink.net/tag/jean_paul|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/jean_paul|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/jean_paul|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_paul|uri|http://www.semanlink.net/tag/jean_paul +http://www.semanlink.net/tag/jean_paul|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/jean_paul|broader_prefLabel|Ami +http://www.semanlink.net/tag/paleontologie|prefLabel|Paléontologie +http://www.semanlink.net/tag/paleontologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/paleontologie|broader|http://www.semanlink.net/tag/geologie +http://www.semanlink.net/tag/paleontologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paleontologie|uri|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/paleontologie|broader_prefLabel|Science +http://www.semanlink.net/tag/paleontologie|broader_prefLabel|Géologie +http://www.semanlink.net/tag/paleontologie|broader_altLabel|sciences +http://www.semanlink.net/tag/edith_piaf|creationTime|2009-06-25T08:33:18Z +http://www.semanlink.net/tag/edith_piaf|prefLabel|Edith Piaf +http://www.semanlink.net/tag/edith_piaf|creationDate|2009-06-25 +http://www.semanlink.net/tag/edith_piaf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edith_piaf|uri|http://www.semanlink.net/tag/edith_piaf +http://www.semanlink.net/tag/prejuges|creationTime|2021-04-14T18:05:23Z +http://www.semanlink.net/tag/prejuges|prefLabel|Préjugés +http://www.semanlink.net/tag/prejuges|creationDate|2021-04-14 +http://www.semanlink.net/tag/prejuges|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prejuges|uri|http://www.semanlink.net/tag/prejuges +http://www.semanlink.net/tag/islande|creationTime|2008-06-21T00:18:43Z +http://www.semanlink.net/tag/islande|prefLabel|Islande +http://www.semanlink.net/tag/islande|creationDate|2008-06-21 +http://www.semanlink.net/tag/islande|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/islande|uri|http://www.semanlink.net/tag/islande +http://www.semanlink.net/tag/phpmyadmin|creationTime|2008-10-20T14:36:46Z +http://www.semanlink.net/tag/phpmyadmin|prefLabel|phpMyAdmin +http://www.semanlink.net/tag/phpmyadmin|broader|http://www.semanlink.net/tag/mysql +http://www.semanlink.net/tag/phpmyadmin|creationDate|2008-10-20 +http://www.semanlink.net/tag/phpmyadmin|comment|MySQL Database Administration Tool +http://www.semanlink.net/tag/phpmyadmin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phpmyadmin|describedBy|http://www.phpmyadmin.net/ +http://www.semanlink.net/tag/phpmyadmin|uri|http://www.semanlink.net/tag/phpmyadmin +http://www.semanlink.net/tag/phpmyadmin|broader_prefLabel|MySQL +http://www.semanlink.net/tag/yrjana_rankka|creationTime|2010-02-03T13:57:34Z +http://www.semanlink.net/tag/yrjana_rankka|prefLabel|Yrjänä Rankka +http://www.semanlink.net/tag/yrjana_rankka|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/yrjana_rankka|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/yrjana_rankka|related|http://www.semanlink.net/tag/ldow2008 +http://www.semanlink.net/tag/yrjana_rankka|creationDate|2010-02-03 +http://www.semanlink.net/tag/yrjana_rankka|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yrjana_rankka|uri|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/yrjana_rankka|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/yrjana_rankka|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/yrjana_rankka|broader_altLabel|Technical guys +http://www.semanlink.net/tag/semantic_web_crawler|prefLabel|Semantic Web Crawler +http://www.semanlink.net/tag/semantic_web_crawler|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/semantic_web_crawler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_crawler|uri|http://www.semanlink.net/tag/semantic_web_crawler +http://www.semanlink.net/tag/semantic_web_crawler|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/bullshit_web|creationTime|2018-08-05T15:49:40Z +http://www.semanlink.net/tag/bullshit_web|prefLabel|Bullshit Web +http://www.semanlink.net/tag/bullshit_web|creationDate|2018-08-05 +http://www.semanlink.net/tag/bullshit_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bullshit_web|uri|http://www.semanlink.net/tag/bullshit_web +http://www.semanlink.net/tag/arts_premiers|prefLabel|Arts premiers +http://www.semanlink.net/tag/arts_premiers|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/arts_premiers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arts_premiers|uri|http://www.semanlink.net/tag/arts_premiers +http://www.semanlink.net/tag/arts_premiers|broader_prefLabel|Art +http://www.semanlink.net/tag/carl_lewis|creationTime|2008-08-14T02:43:03Z +http://www.semanlink.net/tag/carl_lewis|prefLabel|Carl Lewis +http://www.semanlink.net/tag/carl_lewis|broader|http://www.semanlink.net/tag/sportif +http://www.semanlink.net/tag/carl_lewis|broader|http://www.semanlink.net/tag/athletisme +http://www.semanlink.net/tag/carl_lewis|creationDate|2008-08-14 +http://www.semanlink.net/tag/carl_lewis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carl_lewis|uri|http://www.semanlink.net/tag/carl_lewis +http://www.semanlink.net/tag/carl_lewis|broader_prefLabel|Sportif +http://www.semanlink.net/tag/carl_lewis|broader_prefLabel|Athlétisme +http://www.semanlink.net/tag/spectral_clustering|creationTime|2018-05-05T12:18:19Z +http://www.semanlink.net/tag/spectral_clustering|prefLabel|Spectral clustering +http://www.semanlink.net/tag/spectral_clustering|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/spectral_clustering|broader|http://www.semanlink.net/tag/statistics +http://www.semanlink.net/tag/spectral_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/spectral_clustering|creationDate|2018-05-05 +http://www.semanlink.net/tag/spectral_clustering|comment|techniques that make use of the spectrum (eigenvalues) of the similarity matrix of the data to perform dimensionality reduction before clustering in fewer dimensions. +http://www.semanlink.net/tag/spectral_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spectral_clustering|describedBy|https://en.wikipedia.org/wiki/Spectral_clustering +http://www.semanlink.net/tag/spectral_clustering|uri|http://www.semanlink.net/tag/spectral_clustering +http://www.semanlink.net/tag/spectral_clustering|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/spectral_clustering|broader_prefLabel|Statistics +http://www.semanlink.net/tag/spectral_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/spectral_clustering|broader_altLabel|stats +http://www.semanlink.net/tag/spectral_clustering|broader_altLabel|Statistiques +http://www.semanlink.net/tag/spectral_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/spectral_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/spectral_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/tap|prefLabel|TAP +http://www.semanlink.net/tag/tap|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/tap|broader|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/tap|broader|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/tap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tap|homepage|http://tap.stanford.edu/ +http://www.semanlink.net/tag/tap|uri|http://www.semanlink.net/tag/tap +http://www.semanlink.net/tag/tap|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/tap|broader_prefLabel|Stanford +http://www.semanlink.net/tag/tap|broader_prefLabel|Guha +http://www.semanlink.net/tag/tap|broader_related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/tap|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/jeremy_howard|creationTime|2018-01-20T10:51:12Z +http://www.semanlink.net/tag/jeremy_howard|prefLabel|Jeremy Howard +http://www.semanlink.net/tag/jeremy_howard|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/jeremy_howard|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/jeremy_howard|related|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/jeremy_howard|related|http://www.semanlink.net/tag/kaggle +http://www.semanlink.net/tag/jeremy_howard|creationDate|2018-01-20 +http://www.semanlink.net/tag/jeremy_howard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeremy_howard|describedBy|https://en.wikipedia.org/wiki/Jeremy_Howard_(entrepreneur) +http://www.semanlink.net/tag/jeremy_howard|uri|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/tag/jeremy_howard|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/jeremy_howard|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/poolparty|creationTime|2014-07-28T01:49:48Z +http://www.semanlink.net/tag/poolparty|prefLabel|PoolParty +http://www.semanlink.net/tag/poolparty|broader|http://www.semanlink.net/tag/skos_editor +http://www.semanlink.net/tag/poolparty|creationDate|2014-07-28 +http://www.semanlink.net/tag/poolparty|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poolparty|uri|http://www.semanlink.net/tag/poolparty +http://www.semanlink.net/tag/poolparty|broader_prefLabel|SKOS editor +http://www.semanlink.net/tag/nli|creationTime|2020-02-14T11:32:34Z +http://www.semanlink.net/tag/nli|prefLabel|Natural Language Inference +http://www.semanlink.net/tag/nli|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nli|related|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/tag/nli|creationDate|2020-02-14 +http://www.semanlink.net/tag/nli|comment|"considering two sentences: a ""premise"" and a ""hypothesis"", the task of determining whether the “hypothesis” is true (entailment), false (contradiction), or undetermined (neutral) given the “premise”. + +> When using transformer architectures like BERT, NLI datasets are typically +modeled via sequence-pair classification. That is, we feed both the premise +and the hypothesis through the model together as distinct segments and +learn a classification head predicting one of [contradiction, neutral, entailment] . [src](doc:2021/02/zero_shot_learning_in_modern_nl)" +http://www.semanlink.net/tag/nli|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nli|altLabel|NLI +http://www.semanlink.net/tag/nli|uri|http://www.semanlink.net/tag/nli +http://www.semanlink.net/tag/nli|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/maali_mnasri|creationTime|2017-07-17T00:24:44Z +http://www.semanlink.net/tag/maali_mnasri|prefLabel|Maâli Mnasri +http://www.semanlink.net/tag/maali_mnasri|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/maali_mnasri|related|http://www.semanlink.net/tag/cea +http://www.semanlink.net/tag/maali_mnasri|creationDate|2017-07-17 +http://www.semanlink.net/tag/maali_mnasri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maali_mnasri|uri|http://www.semanlink.net/tag/maali_mnasri +http://www.semanlink.net/tag/maali_mnasri|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/insectes_fossiles|creationTime|2008-04-10T10:37:09Z +http://www.semanlink.net/tag/insectes_fossiles|prefLabel|Insectes fossiles +http://www.semanlink.net/tag/insectes_fossiles|broader|http://www.semanlink.net/tag/fossile +http://www.semanlink.net/tag/insectes_fossiles|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/insectes_fossiles|creationDate|2008-04-10 +http://www.semanlink.net/tag/insectes_fossiles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/insectes_fossiles|uri|http://www.semanlink.net/tag/insectes_fossiles +http://www.semanlink.net/tag/insectes_fossiles|broader_prefLabel|Fossile +http://www.semanlink.net/tag/insectes_fossiles|broader_prefLabel|Insecte +http://www.semanlink.net/tag/tensorflow_2_0|creationTime|2019-03-12T22:49:50Z +http://www.semanlink.net/tag/tensorflow_2_0|prefLabel|TensorFlow 2.0 +http://www.semanlink.net/tag/tensorflow_2_0|broader|http://www.semanlink.net/tag/tensorflow +http://www.semanlink.net/tag/tensorflow_2_0|creationDate|2019-03-12 +http://www.semanlink.net/tag/tensorflow_2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tensorflow_2_0|uri|http://www.semanlink.net/tag/tensorflow_2_0 +http://www.semanlink.net/tag/tensorflow_2_0|broader_prefLabel|TensorFlow +http://www.semanlink.net/tag/tensorflow_2_0|broader_related|http://www.semanlink.net/tag/christopher_olah +http://www.semanlink.net/tag/arbres|prefLabel|Arbres +http://www.semanlink.net/tag/arbres|broader|http://www.semanlink.net/tag/plante +http://www.semanlink.net/tag/arbres|broader|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/arbres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arbres|uri|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/arbres|broader_prefLabel|Plante +http://www.semanlink.net/tag/arbres|broader_prefLabel|Nature +http://www.semanlink.net/tag/anonymous|creationTime|2011-07-18T13:06:59Z +http://www.semanlink.net/tag/anonymous|prefLabel|Anonymous +http://www.semanlink.net/tag/anonymous|creationDate|2011-07-18 +http://www.semanlink.net/tag/anonymous|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anonymous|uri|http://www.semanlink.net/tag/anonymous +http://www.semanlink.net/tag/virtuoso_universal_server|creationTime|2009-05-18T09:19:57Z +http://www.semanlink.net/tag/virtuoso_universal_server|prefLabel|Virtuoso Universal Server +http://www.semanlink.net/tag/virtuoso_universal_server|broader|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/virtuoso_universal_server|creationDate|2009-05-18 +http://www.semanlink.net/tag/virtuoso_universal_server|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtuoso_universal_server|uri|http://www.semanlink.net/tag/virtuoso_universal_server +http://www.semanlink.net/tag/virtuoso_universal_server|broader_prefLabel|Virtuoso +http://www.semanlink.net/tag/naomi_klein|prefLabel|Naomi Klein +http://www.semanlink.net/tag/naomi_klein|broader|http://www.semanlink.net/tag/critique_du_capitalisme +http://www.semanlink.net/tag/naomi_klein|broader|http://www.semanlink.net/tag/critique_de_la_societe_occidentale +http://www.semanlink.net/tag/naomi_klein|comment|No Logo +http://www.semanlink.net/tag/naomi_klein|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/naomi_klein|uri|http://www.semanlink.net/tag/naomi_klein +http://www.semanlink.net/tag/naomi_klein|broader_prefLabel|Critique du capitalisme +http://www.semanlink.net/tag/naomi_klein|broader_prefLabel|Critique de la société occidentale +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|creationTime|2019-06-18T08:33:54Z +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|prefLabel|Graph-based Semi-Supervised Learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|creationDate|2019-06-18 +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|uri|http://www.semanlink.net/tag/graph_based_semi_supervised_learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader_prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader_prefLabel|Graph +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader_prefLabel|Semi-supervised learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader_altLabel|Graph Machine Learning +http://www.semanlink.net/tag/graph_based_semi_supervised_learning|broader_related|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/tag/hong_kong|prefLabel|Hong Kong +http://www.semanlink.net/tag/hong_kong|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/hong_kong|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/hong_kong|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hong_kong|altLabel|Hongkong +http://www.semanlink.net/tag/hong_kong|uri|http://www.semanlink.net/tag/hong_kong +http://www.semanlink.net/tag/hong_kong|broader_prefLabel|Ville +http://www.semanlink.net/tag/hong_kong|broader_prefLabel|Chine +http://www.semanlink.net/tag/hong_kong|broader_altLabel|China +http://www.semanlink.net/tag/woody_allen|creationTime|2015-12-03T00:50:46Z +http://www.semanlink.net/tag/woody_allen|prefLabel|Woody Allen +http://www.semanlink.net/tag/woody_allen|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/woody_allen|broader|http://www.semanlink.net/tag/cinema_americain +http://www.semanlink.net/tag/woody_allen|creationDate|2015-12-03 +http://www.semanlink.net/tag/woody_allen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/woody_allen|describedBy|https://en.wikipedia.org/wiki/Woody_Allen +http://www.semanlink.net/tag/woody_allen|uri|http://www.semanlink.net/tag/woody_allen +http://www.semanlink.net/tag/woody_allen|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/woody_allen|broader_prefLabel|Cinéma américain +http://www.semanlink.net/tag/woody_allen|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/reagan|prefLabel|Reagan +http://www.semanlink.net/tag/reagan|broader|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/reagan|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/reagan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reagan|uri|http://www.semanlink.net/tag/reagan +http://www.semanlink.net/tag/reagan|broader_prefLabel|Président des USA +http://www.semanlink.net/tag/reagan|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/tabulator|prefLabel|Tabulator +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/rdf_browser +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/ajar +http://www.semanlink.net/tag/tabulator|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/tabulator|creationDate|2007-01-02 +http://www.semanlink.net/tag/tabulator|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tabulator|homepage|http://www.w3.org/2005/ajar/tab +http://www.semanlink.net/tag/tabulator|uri|http://www.semanlink.net/tag/tabulator +http://www.semanlink.net/tag/tabulator|broader_prefLabel|Tim Berners-Lee +http://www.semanlink.net/tag/tabulator|broader_prefLabel|RDF browser +http://www.semanlink.net/tag/tabulator|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/tabulator|broader_prefLabel|Javascript RDF +http://www.semanlink.net/tag/tabulator|broader_prefLabel|AJAR +http://www.semanlink.net/tag/tabulator|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/tabulator|broader_altLabel|TBL +http://www.semanlink.net/tag/tabulator|broader_altLabel|TimBL +http://www.semanlink.net/tag/tabulator|broader_altLabel|LD +http://www.semanlink.net/tag/tabulator|broader_altLabel|LOD +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/tabulator|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/nova_spivak|creationTime|2008-04-08T20:41:04Z +http://www.semanlink.net/tag/nova_spivak|prefLabel|Nova Spivak +http://www.semanlink.net/tag/nova_spivak|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/nova_spivak|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/nova_spivak|creationDate|2008-04-08 +http://www.semanlink.net/tag/nova_spivak|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nova_spivak|uri|http://www.semanlink.net/tag/nova_spivak +http://www.semanlink.net/tag/nova_spivak|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/nova_spivak|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/nova_spivak|broader_altLabel|Technical guys +http://www.semanlink.net/tag/tagging|prefLabel|Tagging +http://www.semanlink.net/tag/tagging|broader|http://www.semanlink.net/tag/semantic_annotation +http://www.semanlink.net/tag/tagging|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/tagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tagging|uri|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/tagging|broader_prefLabel|Semantic annotation +http://www.semanlink.net/tag/tagging|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/palmyra|creationTime|2015-06-21T22:51:38Z +http://www.semanlink.net/tag/palmyra|prefLabel|Palmyra +http://www.semanlink.net/tag/palmyra|broader|http://www.semanlink.net/tag/syrie +http://www.semanlink.net/tag/palmyra|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/palmyra|creationDate|2015-06-21 +http://www.semanlink.net/tag/palmyra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/palmyra|describedBy|https://en.wikipedia.org/wiki/Palmyra +http://www.semanlink.net/tag/palmyra|uri|http://www.semanlink.net/tag/palmyra +http://www.semanlink.net/tag/palmyra|broader_prefLabel|Syrie +http://www.semanlink.net/tag/palmyra|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/palmyra|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/cyborg|creationTime|2009-11-13T21:27:27Z +http://www.semanlink.net/tag/cyborg|prefLabel|Cyborg +http://www.semanlink.net/tag/cyborg|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/cyborg|creationDate|2009-11-13 +http://www.semanlink.net/tag/cyborg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cyborg|uri|http://www.semanlink.net/tag/cyborg +http://www.semanlink.net/tag/cyborg|broader_prefLabel|Robotique +http://www.semanlink.net/tag/cyborg|broader_altLabel|Robotics +http://www.semanlink.net/tag/cyborg|broader_altLabel|Robot +http://www.semanlink.net/tag/hixie|creationTime|2011-06-10T00:18:23Z +http://www.semanlink.net/tag/hixie|prefLabel|Hixie +http://www.semanlink.net/tag/hixie|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/hixie|related|http://www.semanlink.net/tag/html5 +http://www.semanlink.net/tag/hixie|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/hixie|related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/hixie|creationDate|2011-06-10 +http://www.semanlink.net/tag/hixie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hixie|describedBy|https://en.wikipedia.org/wiki/Ian_Hickson +http://www.semanlink.net/tag/hixie|altLabel|Ian Hickson +http://www.semanlink.net/tag/hixie|uri|http://www.semanlink.net/tag/hixie +http://www.semanlink.net/tag/hixie|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/hixie|broader_altLabel|Technical guys +http://www.semanlink.net/tag/context_free_grammar|creationTime|2020-09-17T23:58:04Z +http://www.semanlink.net/tag/context_free_grammar|prefLabel|Context-free grammar +http://www.semanlink.net/tag/context_free_grammar|broader|http://www.semanlink.net/tag/linguistique +http://www.semanlink.net/tag/context_free_grammar|creationDate|2020-09-17 +http://www.semanlink.net/tag/context_free_grammar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/context_free_grammar|describedBy|https://en.wikipedia.org/wiki/Context-free_grammar +http://www.semanlink.net/tag/context_free_grammar|uri|http://www.semanlink.net/tag/context_free_grammar +http://www.semanlink.net/tag/context_free_grammar|broader_prefLabel|Linguistique +http://www.semanlink.net/tag/fruit|creationTime|2008-06-10T21:04:22Z +http://www.semanlink.net/tag/fruit|prefLabel|Fruit +http://www.semanlink.net/tag/fruit|creationDate|2008-06-10 +http://www.semanlink.net/tag/fruit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fruit|uri|http://www.semanlink.net/tag/fruit +http://www.semanlink.net/tag/attention_is_all_you_need|creationTime|2018-10-12T19:05:45Z +http://www.semanlink.net/tag/attention_is_all_you_need|prefLabel|Transformers +http://www.semanlink.net/tag/attention_is_all_you_need|broader|http://www.semanlink.net/tag/self_attention +http://www.semanlink.net/tag/attention_is_all_you_need|broader|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/attention_is_all_you_need|related|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/attention_is_all_you_need|creationDate|2018-10-12 +http://www.semanlink.net/tag/attention_is_all_you_need|comment|"[Vaswani, et al. 2017 paper](https://arxiv.org/abs/1706.03762): ""Attention is all you need"". + +[#seq2seq](/tag/sequence_to_sequence_learning) using only improved self-attention units (""multi-head self-attention +mechanism""), without any RNN. + +Best explanation: [Transformers from scratch Peter Bloem](doc:2019/08/transformers_from_scratch_%7C_pet)" +http://www.semanlink.net/tag/attention_is_all_you_need|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/attention_is_all_you_need|altLabel|Transformer +http://www.semanlink.net/tag/attention_is_all_you_need|altLabel|Transformers +http://www.semanlink.net/tag/attention_is_all_you_need|altLabel|Attention is All You Need +http://www.semanlink.net/tag/attention_is_all_you_need|uri|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/tag/attention_is_all_you_need|broader_prefLabel|Self-Attention +http://www.semanlink.net/tag/attention_is_all_you_need|broader_prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/attention_is_all_you_need|broader_altLabel|Sequence Modeling +http://www.semanlink.net/tag/attention_is_all_you_need|broader_altLabel|Seq2Seq +http://www.semanlink.net/tag/attention_is_all_you_need|broader_related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/attention_is_all_you_need|broader_related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/attention_is_all_you_need|broader_related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/attention_is_all_you_need|broader_related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/souvenirs|creationTime|2012-01-15T00:53:10Z +http://www.semanlink.net/tag/souvenirs|prefLabel|Souvenirs +http://www.semanlink.net/tag/souvenirs|creationDate|2012-01-15 +http://www.semanlink.net/tag/souvenirs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/souvenirs|altLabel|Souvenir +http://www.semanlink.net/tag/souvenirs|uri|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/industrie_textile|prefLabel|Industrie textile +http://www.semanlink.net/tag/industrie_textile|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/industrie_textile|broader|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/industrie_textile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_textile|uri|http://www.semanlink.net/tag/industrie_textile +http://www.semanlink.net/tag/industrie_textile|broader_prefLabel|Economie +http://www.semanlink.net/tag/industrie_textile|broader_prefLabel|industrie +http://www.semanlink.net/tag/industrie_textile|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/graph_neural_networks|creationTime|2018-06-13T13:38:36Z +http://www.semanlink.net/tag/graph_neural_networks|prefLabel|Graph neural networks +http://www.semanlink.net/tag/graph_neural_networks|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/graph_neural_networks|broader|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/tag/graph_neural_networks|creationDate|2018-06-13 +http://www.semanlink.net/tag/graph_neural_networks|comment|"Neural networks that operate on graphs + +> a general framework for +defining deep neural networks on graph data. The key idea is that we want to +generate representations of nodes that actually depend on the structure of the +graph, as well as any feature information we might have. [src](doc:2020/09/latent_graph_neural_networks_m)" +http://www.semanlink.net/tag/graph_neural_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_neural_networks|altLabel|GNN +http://www.semanlink.net/tag/graph_neural_networks|uri|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/graph_neural_networks|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/graph_neural_networks|broader_prefLabel|Graphs+Machine Learning +http://www.semanlink.net/tag/graph_neural_networks|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/graph_neural_networks|broader_altLabel|ANN +http://www.semanlink.net/tag/graph_neural_networks|broader_altLabel|NN +http://www.semanlink.net/tag/graph_neural_networks|broader_altLabel|Graph Machine Learning +http://www.semanlink.net/tag/fps_paper|creationTime|2008-02-19T01:04:37Z +http://www.semanlink.net/tag/fps_paper|prefLabel|fps: paper +http://www.semanlink.net/tag/fps_paper|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_paper|broader|http://www.semanlink.net/tag/fps_pres +http://www.semanlink.net/tag/fps_paper|creationDate|2008-02-19 +http://www.semanlink.net/tag/fps_paper|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_paper|uri|http://www.semanlink.net/tag/fps_paper +http://www.semanlink.net/tag/fps_paper|broader_prefLabel|fps +http://www.semanlink.net/tag/fps_paper|broader_prefLabel|fps pres +http://www.semanlink.net/tag/cathares|prefLabel|Cathares +http://www.semanlink.net/tag/cathares|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/cathares|broader|http://www.semanlink.net/tag/chretiente +http://www.semanlink.net/tag/cathares|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cathares|uri|http://www.semanlink.net/tag/cathares +http://www.semanlink.net/tag/cathares|broader_prefLabel|Religion +http://www.semanlink.net/tag/cathares|broader_prefLabel|Chrétienté +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|creationTime|2013-10-10T01:35:39Z +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|prefLabel|Ministère de l'enseignement supérieur et de la recherche +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|broader|http://www.semanlink.net/tag/enseignement_superieur +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|broader|http://www.semanlink.net/tag/gouvernement +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|creationDate|2013-10-10 +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|uri|http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|broader_prefLabel|Enseignement supérieur +http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche|broader_prefLabel|Gouvernement +http://www.semanlink.net/tag/millennium_goal|prefLabel|Millennium Goal +http://www.semanlink.net/tag/millennium_goal|broader|http://www.semanlink.net/tag/pauvrete +http://www.semanlink.net/tag/millennium_goal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/millennium_goal|altLabel|Objectifs du millénaire +http://www.semanlink.net/tag/millennium_goal|uri|http://www.semanlink.net/tag/millennium_goal +http://www.semanlink.net/tag/millennium_goal|broader_prefLabel|Pauvreté +http://www.semanlink.net/tag/ldow2011|creationTime|2013-03-02T13:10:54Z +http://www.semanlink.net/tag/ldow2011|prefLabel|LDOW2011 +http://www.semanlink.net/tag/ldow2011|broader|http://www.semanlink.net/tag/ldow +http://www.semanlink.net/tag/ldow2011|creationDate|2013-03-02 +http://www.semanlink.net/tag/ldow2011|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldow2011|uri|http://www.semanlink.net/tag/ldow2011 +http://www.semanlink.net/tag/ldow2011|broader_prefLabel|LDOW +http://www.semanlink.net/tag/multnomah_falls|creationTime|2008-08-25T14:13:30Z +http://www.semanlink.net/tag/multnomah_falls|prefLabel|Multnomah Falls +http://www.semanlink.net/tag/multnomah_falls|broader|http://www.semanlink.net/tag/oregon +http://www.semanlink.net/tag/multnomah_falls|related|http://www.semanlink.net/tag/missoula_floods +http://www.semanlink.net/tag/multnomah_falls|creationDate|2008-08-25 +http://www.semanlink.net/tag/multnomah_falls|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multnomah_falls|describedBy|https://en.wikipedia.org/wiki/Multnomah_Falls +http://www.semanlink.net/tag/multnomah_falls|uri|http://www.semanlink.net/tag/multnomah_falls +http://www.semanlink.net/tag/multnomah_falls|broader_prefLabel|Oregon +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|creationTime|2007-08-07T10:23:47Z +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|prefLabel|OpenLink Ajax Toolkit (OAT) +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader|http://www.semanlink.net/tag/openlink +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|creationDate|2007-08-07 +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|altLabel|OAT +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|uri|http://www.semanlink.net/tag/openlink_ajax_toolkit_oat +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_prefLabel|Kingsley Idehen +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_prefLabel|Ajax +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_prefLabel|OpenLink Software +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_related|http://www.semanlink.net/tag/yrjana_rankka +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_related|http://www.semanlink.net/tag/orri_erling +http://www.semanlink.net/tag/openlink_ajax_toolkit_oat|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/andy_seaborne|creationTime|2007-07-07T13:50:43Z +http://www.semanlink.net/tag/andy_seaborne|prefLabel|Andy Seaborne +http://www.semanlink.net/tag/andy_seaborne|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/andy_seaborne|related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/andy_seaborne|related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/andy_seaborne|creationDate|2007-07-07 +http://www.semanlink.net/tag/andy_seaborne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/andy_seaborne|uri|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/andy_seaborne|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/auvergne|prefLabel|Auvergne +http://www.semanlink.net/tag/auvergne|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/auvergne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/auvergne|uri|http://www.semanlink.net/tag/auvergne +http://www.semanlink.net/tag/auvergne|broader_prefLabel|France +http://www.semanlink.net/tag/web_2_0_businesses|prefLabel|Web 2.0 businesses +http://www.semanlink.net/tag/web_2_0_businesses|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/web_2_0_businesses|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_2_0_businesses|uri|http://www.semanlink.net/tag/web_2_0_businesses +http://www.semanlink.net/tag/web_2_0_businesses|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/delip_rao|creationTime|2017-12-13T11:19:02Z +http://www.semanlink.net/tag/delip_rao|prefLabel|Delip Rao +http://www.semanlink.net/tag/delip_rao|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/delip_rao|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/delip_rao|creationDate|2017-12-13 +http://www.semanlink.net/tag/delip_rao|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delip_rao|homepage|http://deliprao.com/ +http://www.semanlink.net/tag/delip_rao|uri|http://www.semanlink.net/tag/delip_rao +http://www.semanlink.net/tag/delip_rao|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/delip_rao|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/paris_2024|creationTime|2021-08-10T12:46:27Z +http://www.semanlink.net/tag/paris_2024|prefLabel|Paris 2024 +http://www.semanlink.net/tag/paris_2024|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/paris_2024|broader|http://www.semanlink.net/tag/jeux_olympiques +http://www.semanlink.net/tag/paris_2024|creationDate|2021-08-10 +http://www.semanlink.net/tag/paris_2024|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paris_2024|uri|http://www.semanlink.net/tag/paris_2024 +http://www.semanlink.net/tag/paris_2024|broader_prefLabel|Paris +http://www.semanlink.net/tag/paris_2024|broader_prefLabel|Jeux Olympiques +http://www.semanlink.net/tag/minhash|creationTime|2017-09-12T15:15:45Z +http://www.semanlink.net/tag/minhash|prefLabel|MinHash +http://www.semanlink.net/tag/minhash|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/minhash|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/minhash|creationDate|2017-09-12 +http://www.semanlink.net/tag/minhash|comment|technique for quickly estimating how similar two sets are +http://www.semanlink.net/tag/minhash|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minhash|describedBy|https://en.wikipedia.org/wiki/MinHash +http://www.semanlink.net/tag/minhash|uri|http://www.semanlink.net/tag/minhash +http://www.semanlink.net/tag/minhash|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/units_of_measure|creationTime|2013-02-15T11:42:55Z +http://www.semanlink.net/tag/units_of_measure|prefLabel|Units of measure +http://www.semanlink.net/tag/units_of_measure|creationDate|2013-02-15 +http://www.semanlink.net/tag/units_of_measure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/units_of_measure|uri|http://www.semanlink.net/tag/units_of_measure +http://www.semanlink.net/tag/semantic_text_matching|creationTime|2020-01-23T10:22:31Z +http://www.semanlink.net/tag/semantic_text_matching|prefLabel|Semantic Text Matching +http://www.semanlink.net/tag/semantic_text_matching|broader|http://www.semanlink.net/tag/identification_of_similar_documents +http://www.semanlink.net/tag/semantic_text_matching|broader|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/semantic_text_matching|creationDate|2020-01-23 +http://www.semanlink.net/tag/semantic_text_matching|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_text_matching|uri|http://www.semanlink.net/tag/semantic_text_matching +http://www.semanlink.net/tag/semantic_text_matching|broader_prefLabel|Identification of similar documents +http://www.semanlink.net/tag/semantic_text_matching|broader_prefLabel|Text Similarity +http://www.semanlink.net/tag/semantic_text_matching|broader_related|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/semantic_text_matching|broader_related|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/semantic_text_matching|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/imperialisme_americain|creationTime|2020-08-22T14:37:12Z +http://www.semanlink.net/tag/imperialisme_americain|prefLabel|Impérialisme américain +http://www.semanlink.net/tag/imperialisme_americain|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/imperialisme_americain|broader|http://www.semanlink.net/tag/imperialisme +http://www.semanlink.net/tag/imperialisme_americain|creationDate|2020-08-22 +http://www.semanlink.net/tag/imperialisme_americain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imperialisme_americain|uri|http://www.semanlink.net/tag/imperialisme_americain +http://www.semanlink.net/tag/imperialisme_americain|broader_prefLabel|USA +http://www.semanlink.net/tag/imperialisme_americain|broader_prefLabel|Impérialisme +http://www.semanlink.net/tag/imperialisme_americain|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/imperialisme_americain|broader_altLabel|United States +http://www.semanlink.net/tag/jeux_olympiques|prefLabel|Jeux Olympiques +http://www.semanlink.net/tag/jeux_olympiques|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/jeux_olympiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeux_olympiques|uri|http://www.semanlink.net/tag/jeux_olympiques +http://www.semanlink.net/tag/jeux_olympiques|broader_prefLabel|Sport +http://www.semanlink.net/tag/object_oriented_programming|prefLabel|Object Oriented Programming +http://www.semanlink.net/tag/object_oriented_programming|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/object_oriented_programming|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/object_oriented_programming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/object_oriented_programming|altLabel|OOP +http://www.semanlink.net/tag/object_oriented_programming|uri|http://www.semanlink.net/tag/object_oriented_programming +http://www.semanlink.net/tag/object_oriented_programming|broader_prefLabel|Dev +http://www.semanlink.net/tag/object_oriented_programming|broader_prefLabel|Programming +http://www.semanlink.net/tag/cohn_bendit|prefLabel|Cohn-Bendit +http://www.semanlink.net/tag/cohn_bendit|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/cohn_bendit|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/cohn_bendit|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/cohn_bendit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cohn_bendit|uri|http://www.semanlink.net/tag/cohn_bendit +http://www.semanlink.net/tag/cohn_bendit|broader_prefLabel|I like +http://www.semanlink.net/tag/cohn_bendit|broader_prefLabel|I like +http://www.semanlink.net/tag/cohn_bendit|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/cohn_bendit|broader_prefLabel|Politique française +http://www.semanlink.net/tag/novartis|prefLabel|Novartis +http://www.semanlink.net/tag/novartis|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/novartis|broader|http://www.semanlink.net/tag/industrie_pharmaceutique +http://www.semanlink.net/tag/novartis|creationDate|2006-12-29 +http://www.semanlink.net/tag/novartis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/novartis|uri|http://www.semanlink.net/tag/novartis +http://www.semanlink.net/tag/novartis|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/novartis|broader_prefLabel|Industrie pharmaceutique +http://www.semanlink.net/tag/congo_kinshasa|prefLabel|RDC +http://www.semanlink.net/tag/congo_kinshasa|broader|http://www.semanlink.net/tag/afrique_centrale +http://www.semanlink.net/tag/congo_kinshasa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/congo_kinshasa|altLabel|République démocratique du Congo +http://www.semanlink.net/tag/congo_kinshasa|uri|http://www.semanlink.net/tag/congo_kinshasa +http://www.semanlink.net/tag/congo_kinshasa|broader_prefLabel|Afrique Centrale +http://www.semanlink.net/tag/julien_cardinal|creationTime|2017-07-21T02:04:55Z +http://www.semanlink.net/tag/julien_cardinal|prefLabel|Julien Cardinal +http://www.semanlink.net/tag/julien_cardinal|creationDate|2017-07-21 +http://www.semanlink.net/tag/julien_cardinal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/julien_cardinal|uri|http://www.semanlink.net/tag/julien_cardinal +http://www.semanlink.net/tag/nlp_task_as_qa_problem|creationTime|2021-01-26T15:19:38Z +http://www.semanlink.net/tag/nlp_task_as_qa_problem|prefLabel|NLP task as a QA problem +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_task_as_qa_problem|creationDate|2021-01-26 +http://www.semanlink.net/tag/nlp_task_as_qa_problem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_task_as_qa_problem|uri|http://www.semanlink.net/tag/nlp_task_as_qa_problem +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader_prefLabel|Question Answering +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader_altLabel|QA +http://www.semanlink.net/tag/nlp_task_as_qa_problem|broader_related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/euro|prefLabel|Euro +http://www.semanlink.net/tag/euro|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/euro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/euro|uri|http://www.semanlink.net/tag/euro +http://www.semanlink.net/tag/euro|broader_prefLabel|Money +http://www.semanlink.net/tag/euro|broader_altLabel|Monnaie +http://www.semanlink.net/tag/arbres_remarquables|creationTime|2017-10-06T22:17:47Z +http://www.semanlink.net/tag/arbres_remarquables|prefLabel|Arbres remarquables +http://www.semanlink.net/tag/arbres_remarquables|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/arbres_remarquables|creationDate|2017-10-06 +http://www.semanlink.net/tag/arbres_remarquables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arbres_remarquables|uri|http://www.semanlink.net/tag/arbres_remarquables +http://www.semanlink.net/tag/arbres_remarquables|broader_prefLabel|Arbres +http://www.semanlink.net/tag/akhenaton|prefLabel|Akhênaton +http://www.semanlink.net/tag/akhenaton|broader|http://www.semanlink.net/tag/pharaon +http://www.semanlink.net/tag/akhenaton|broader|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/akhenaton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/akhenaton|uri|http://www.semanlink.net/tag/akhenaton +http://www.semanlink.net/tag/akhenaton|broader_prefLabel|Pharaon +http://www.semanlink.net/tag/akhenaton|broader_prefLabel|Egypte antique +http://www.semanlink.net/tag/predicting_numeric_values_from_text|creationTime|2018-02-06T16:43:42Z +http://www.semanlink.net/tag/predicting_numeric_values_from_text|prefLabel|Predicting numeric values from text +http://www.semanlink.net/tag/predicting_numeric_values_from_text|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/predicting_numeric_values_from_text|creationDate|2018-02-06 +http://www.semanlink.net/tag/predicting_numeric_values_from_text|comment|"could use Regression based on k-nearest neighbors ([sci-kit implementation](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html)) +" +http://www.semanlink.net/tag/predicting_numeric_values_from_text|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/predicting_numeric_values_from_text|uri|http://www.semanlink.net/tag/predicting_numeric_values_from_text +http://www.semanlink.net/tag/predicting_numeric_values_from_text|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/politique_economique_francaise|creationTime|2014-04-11T23:47:58Z +http://www.semanlink.net/tag/politique_economique_francaise|prefLabel|Politique économique française +http://www.semanlink.net/tag/politique_economique_francaise|broader|http://www.semanlink.net/tag/economie_francaise +http://www.semanlink.net/tag/politique_economique_francaise|creationDate|2014-04-11 +http://www.semanlink.net/tag/politique_economique_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_economique_francaise|uri|http://www.semanlink.net/tag/politique_economique_francaise +http://www.semanlink.net/tag/politique_economique_francaise|broader_prefLabel|Economie française +http://www.semanlink.net/tag/politique_economique_francaise|broader_altLabel|Economie France +http://www.semanlink.net/tag/rdfj|creationTime|2012-03-20T17:39:16Z +http://www.semanlink.net/tag/rdfj|prefLabel|RDFj +http://www.semanlink.net/tag/rdfj|broader|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/rdfj|broader|http://www.semanlink.net/tag/backplanejs +http://www.semanlink.net/tag/rdfj|creationDate|2012-03-20 +http://www.semanlink.net/tag/rdfj|comment|"RDFj is a set of conventions for
- constructing JSON objects in such a way that they can easily be interpreted as RDF;
+- taking RDF and arriving at canonical JSON objects." +http://www.semanlink.net/tag/rdfj|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfj|uri|http://www.semanlink.net/tag/rdfj +http://www.semanlink.net/tag/rdfj|broader_prefLabel|RDF-in-JSON +http://www.semanlink.net/tag/rdfj|broader_prefLabel|backplanejs +http://www.semanlink.net/tag/rdfj|broader_related|http://www.semanlink.net/tag/mark_birbeck +http://www.semanlink.net/tag/mars_2004|prefLabel|Mars 2004 +http://www.semanlink.net/tag/mars_2004|broader|http://www.semanlink.net/tag/exploration_marsienne +http://www.semanlink.net/tag/mars_2004|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mars_2004|uri|http://www.semanlink.net/tag/mars_2004 +http://www.semanlink.net/tag/mars_2004|broader_prefLabel|Exploration marsienne +http://www.semanlink.net/tag/my_old_things|creationTime|2010-05-19T16:49:21Z +http://www.semanlink.net/tag/my_old_things|prefLabel|My old things +http://www.semanlink.net/tag/my_old_things|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/my_old_things|creationDate|2010-05-19 +http://www.semanlink.net/tag/my_old_things|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/my_old_things|uri|http://www.semanlink.net/tag/my_old_things +http://www.semanlink.net/tag/my_old_things|broader_prefLabel|fps +http://www.semanlink.net/tag/mark_zuckerberg|creationTime|2015-12-04T00:12:39Z +http://www.semanlink.net/tag/mark_zuckerberg|prefLabel|Zuckerberg +http://www.semanlink.net/tag/mark_zuckerberg|related|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/mark_zuckerberg|creationDate|2015-12-04 +http://www.semanlink.net/tag/mark_zuckerberg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mark_zuckerberg|describedBy|https://en.wikipedia.org/wiki/Mark_Zuckerberg +http://www.semanlink.net/tag/mark_zuckerberg|uri|http://www.semanlink.net/tag/mark_zuckerberg +http://www.semanlink.net/tag/docker_tomcat|creationTime|2016-04-06T14:58:58Z +http://www.semanlink.net/tag/docker_tomcat|prefLabel|Docker-Tomcat +http://www.semanlink.net/tag/docker_tomcat|broader|http://www.semanlink.net/tag/docker +http://www.semanlink.net/tag/docker_tomcat|broader|http://www.semanlink.net/tag/tomcat +http://www.semanlink.net/tag/docker_tomcat|creationDate|2016-04-06 +http://www.semanlink.net/tag/docker_tomcat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/docker_tomcat|uri|http://www.semanlink.net/tag/docker_tomcat +http://www.semanlink.net/tag/docker_tomcat|broader_prefLabel|Docker +http://www.semanlink.net/tag/docker_tomcat|broader_prefLabel|Tomcat +http://www.semanlink.net/tag/meaning_in_nlp|creationTime|2018-08-14T22:02:30Z +http://www.semanlink.net/tag/meaning_in_nlp|prefLabel|Meaning in NLP +http://www.semanlink.net/tag/meaning_in_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/meaning_in_nlp|related|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/tag/meaning_in_nlp|creationDate|2018-08-14 +http://www.semanlink.net/tag/meaning_in_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meaning_in_nlp|uri|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/meaning_in_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/esprit_de_resistance|prefLabel|Esprit de résistance +http://www.semanlink.net/tag/esprit_de_resistance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/esprit_de_resistance|uri|http://www.semanlink.net/tag/esprit_de_resistance +http://www.semanlink.net/tag/oceanie|prefLabel|Océanie +http://www.semanlink.net/tag/oceanie|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/oceanie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oceanie|uri|http://www.semanlink.net/tag/oceanie +http://www.semanlink.net/tag/oceanie|broader_prefLabel|Géographie +http://www.semanlink.net/tag/cedric_villani|creationTime|2013-03-21T19:36:34Z +http://www.semanlink.net/tag/cedric_villani|prefLabel|Cédric Villani +http://www.semanlink.net/tag/cedric_villani|broader|http://www.semanlink.net/tag/medaille_fields +http://www.semanlink.net/tag/cedric_villani|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/cedric_villani|creationDate|2013-03-21 +http://www.semanlink.net/tag/cedric_villani|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cedric_villani|uri|http://www.semanlink.net/tag/cedric_villani +http://www.semanlink.net/tag/cedric_villani|broader_prefLabel|Médaille Fields +http://www.semanlink.net/tag/cedric_villani|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/madame_bovary|creationTime|2015-03-24T09:38:30Z +http://www.semanlink.net/tag/madame_bovary|prefLabel|Madame Bovary +http://www.semanlink.net/tag/madame_bovary|broader|http://www.semanlink.net/tag/flaubert +http://www.semanlink.net/tag/madame_bovary|creationDate|2015-03-24 +http://www.semanlink.net/tag/madame_bovary|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/madame_bovary|uri|http://www.semanlink.net/tag/madame_bovary +http://www.semanlink.net/tag/madame_bovary|broader_prefLabel|Flaubert +http://www.semanlink.net/tag/peter_patel_schneider|creationTime|2011-02-14T16:11:49Z +http://www.semanlink.net/tag/peter_patel_schneider|prefLabel|Peter Patel-Schneider +http://www.semanlink.net/tag/peter_patel_schneider|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/peter_patel_schneider|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/peter_patel_schneider|creationDate|2011-02-14 +http://www.semanlink.net/tag/peter_patel_schneider|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peter_patel_schneider|altLabel|Patel-Schneider +http://www.semanlink.net/tag/peter_patel_schneider|uri|http://www.semanlink.net/tag/peter_patel_schneider +http://www.semanlink.net/tag/peter_patel_schneider|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/peter_patel_schneider|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/peter_patel_schneider|broader_altLabel|Technical guys +http://www.semanlink.net/tag/variabilite_du_genome_humain|prefLabel|Variabilité du génome humain +http://www.semanlink.net/tag/variabilite_du_genome_humain|broader|http://www.semanlink.net/tag/genetique_humaine +http://www.semanlink.net/tag/variabilite_du_genome_humain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/variabilite_du_genome_humain|uri|http://www.semanlink.net/tag/variabilite_du_genome_humain +http://www.semanlink.net/tag/variabilite_du_genome_humain|broader_prefLabel|Génétique humaine +http://www.semanlink.net/tag/variabilite_du_genome_humain|broader_altLabel|Génome humain +http://www.semanlink.net/tag/fps_post|creationTime|2007-09-25T22:22:53Z +http://www.semanlink.net/tag/fps_post|prefLabel|fps' post +http://www.semanlink.net/tag/fps_post|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_post|creationDate|2007-09-25 +http://www.semanlink.net/tag/fps_post|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_post|uri|http://www.semanlink.net/tag/fps_post +http://www.semanlink.net/tag/fps_post|broader_prefLabel|fps +http://www.semanlink.net/tag/finding_rdf_documents|prefLabel|Finding RDF documents +http://www.semanlink.net/tag/finding_rdf_documents|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/finding_rdf_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/finding_rdf_documents|uri|http://www.semanlink.net/tag/finding_rdf_documents +http://www.semanlink.net/tag/finding_rdf_documents|broader_prefLabel|RDF +http://www.semanlink.net/tag/finding_rdf_documents|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/finding_rdf_documents|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/finding_rdf_documents|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/finding_rdf_documents|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/finding_rdf_documents|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/emnlp_2019|creationTime|2019-09-16T22:03:38Z +http://www.semanlink.net/tag/emnlp_2019|prefLabel|EMNLP 2019 +http://www.semanlink.net/tag/emnlp_2019|broader|http://www.semanlink.net/tag/emnlp +http://www.semanlink.net/tag/emnlp_2019|creationDate|2019-09-16 +http://www.semanlink.net/tag/emnlp_2019|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emnlp_2019|uri|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/tag/emnlp_2019|broader_prefLabel|EMNLP +http://www.semanlink.net/tag/knowledge_graph_embeddings|creationTime|2018-01-03T16:42:41Z +http://www.semanlink.net/tag/knowledge_graph_embeddings|prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/knowledge_graph_embeddings|related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/knowledge_graph_embeddings|related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/knowledge_graph_embeddings|creationDate|2018-01-03 +http://www.semanlink.net/tag/knowledge_graph_embeddings|comment|"How can we use knowledge graph in computing? + +A knowledge graph is a symbolic and logical system but applications often involve numerical computing in continuous spaces. Formal logic is neither tractable nor robust when dealing with knowledge graph. Hence the idea of Knowledge graph embeddings. + +Generally, each entity is represented +as a point in that space while each relation is interpreted as an operation over entity embeddings (eg. in Bordes et al. ""[TransE](tag:transe)"", a translation). The embedding representations are usually learnt by minimizing a global loss function involving all entities and relations so that each entity +embedding encodes both local and global connectivity patterns of the original graph. +" +http://www.semanlink.net/tag/knowledge_graph_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_embeddings|altLabel|KGE +http://www.semanlink.net/tag/knowledge_graph_embeddings|altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/knowledge_graph_embeddings|altLabel|KG embedding +http://www.semanlink.net/tag/knowledge_graph_embeddings|uri|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_prefLabel|Graph Embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|KG +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|Representation Learning on Networks +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|Graph representation learning +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|Network Representation Learning +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|Network embeddings +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_altLabel|KR +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/tag/natural_language_generation|creationTime|2017-06-09T10:56:54Z +http://www.semanlink.net/tag/natural_language_generation|prefLabel|Natural language generation +http://www.semanlink.net/tag/natural_language_generation|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/natural_language_generation|creationDate|2017-06-09 +http://www.semanlink.net/tag/natural_language_generation|comment|task of generating natural language from a machine representation system such as a knowledge base or a logical form +http://www.semanlink.net/tag/natural_language_generation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/natural_language_generation|describedBy|https://en.wikipedia.org/wiki/Natural_language_generation +http://www.semanlink.net/tag/natural_language_generation|altLabel|Text generation +http://www.semanlink.net/tag/natural_language_generation|altLabel|NLG +http://www.semanlink.net/tag/natural_language_generation|altLabel|Language production +http://www.semanlink.net/tag/natural_language_generation|uri|http://www.semanlink.net/tag/natural_language_generation +http://www.semanlink.net/tag/natural_language_generation|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|creationTime|2021-05-17T15:13:07Z +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|prefLabel|Hierarchy-Aware KG Embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/hierarchical_categories +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/hierarchies_in_ml +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/semantic_hierarchies +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|related|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|related|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|creationDate|2021-05-17 +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|altLabel|Hierarchy-Aware Knowledge Graph Embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|uri|http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_prefLabel|Hierarchical Categories +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_prefLabel|Hierarchies in ML +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_prefLabel|Semantic hierarchies +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_altLabel|KGE +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_altLabel|KG embedding +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/semanlink_tag_finder +http://www.semanlink.net/tag/isp|prefLabel|ISP +http://www.semanlink.net/tag/isp|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/isp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/isp|uri|http://www.semanlink.net/tag/isp +http://www.semanlink.net/tag/isp|broader_prefLabel|Internet +http://www.semanlink.net/tag/yougoslavie|prefLabel|Yougoslavie +http://www.semanlink.net/tag/yougoslavie|prefLabel|Ex Yougoslavie +http://www.semanlink.net/tag/yougoslavie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/yougoslavie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yougoslavie|uri|http://www.semanlink.net/tag/yougoslavie +http://www.semanlink.net/tag/yougoslavie|broader_prefLabel|Europe +http://www.semanlink.net/tag/cool|prefLabel|Cool +http://www.semanlink.net/tag/cool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cool|uri|http://www.semanlink.net/tag/cool +http://www.semanlink.net/tag/equateur|creationTime|2009-03-10T23:06:05Z +http://www.semanlink.net/tag/equateur|prefLabel|Equateur +http://www.semanlink.net/tag/equateur|broader|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/equateur|creationDate|2009-03-10 +http://www.semanlink.net/tag/equateur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/equateur|uri|http://www.semanlink.net/tag/equateur +http://www.semanlink.net/tag/equateur|broader_prefLabel|Amérique du sud +http://www.semanlink.net/tag/c2gweb_and_product_description|creationTime|2013-01-24T01:21:29Z +http://www.semanlink.net/tag/c2gweb_and_product_description|prefLabel|C2GWeb and Product description +http://www.semanlink.net/tag/c2gweb_and_product_description|broader|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/c2gweb_and_product_description|broader|http://www.semanlink.net/tag/e_commerce_data +http://www.semanlink.net/tag/c2gweb_and_product_description|broader|http://www.semanlink.net/tag/c2gweb_seo +http://www.semanlink.net/tag/c2gweb_and_product_description|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_and_product_description|related|http://www.semanlink.net/tag/configuration_ontology +http://www.semanlink.net/tag/c2gweb_and_product_description|creationDate|2013-01-24 +http://www.semanlink.net/tag/c2gweb_and_product_description|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_and_product_description|uri|http://www.semanlink.net/tag/c2gweb_and_product_description +http://www.semanlink.net/tag/c2gweb_and_product_description|broader_prefLabel|Product description +http://www.semanlink.net/tag/c2gweb_and_product_description|broader_prefLabel|e-commerce data +http://www.semanlink.net/tag/c2gweb_and_product_description|broader_prefLabel|C2GWeb: SEO +http://www.semanlink.net/tag/c2gweb_and_product_description|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/c2gweb_and_product_description|broader_related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/prelevements_obligatoires|prefLabel|Prélèvements obligatoires +http://www.semanlink.net/tag/prelevements_obligatoires|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/prelevements_obligatoires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prelevements_obligatoires|uri|http://www.semanlink.net/tag/prelevements_obligatoires +http://www.semanlink.net/tag/prelevements_obligatoires|broader_prefLabel|Economie +http://www.semanlink.net/tag/prelevements_obligatoires|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/chlorinated_chicken|creationTime|2017-07-30T01:23:07Z +http://www.semanlink.net/tag/chlorinated_chicken|prefLabel|Chlorinated chicken +http://www.semanlink.net/tag/chlorinated_chicken|creationDate|2017-07-30 +http://www.semanlink.net/tag/chlorinated_chicken|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chlorinated_chicken|uri|http://www.semanlink.net/tag/chlorinated_chicken +http://www.semanlink.net/tag/owled|creationTime|2007-07-13T18:49:36Z +http://www.semanlink.net/tag/owled|prefLabel|OWLED +http://www.semanlink.net/tag/owled|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owled|creationDate|2007-07-13 +http://www.semanlink.net/tag/owled|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owled|uri|http://www.semanlink.net/tag/owled +http://www.semanlink.net/tag/owled|broader_prefLabel|OWL +http://www.semanlink.net/tag/owled|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/loi_renseignement|creationTime|2015-07-11T00:37:45Z +http://www.semanlink.net/tag/loi_renseignement|prefLabel|Loi Renseignement +http://www.semanlink.net/tag/loi_renseignement|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/loi_renseignement|creationDate|2015-07-11 +http://www.semanlink.net/tag/loi_renseignement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loi_renseignement|uri|http://www.semanlink.net/tag/loi_renseignement +http://www.semanlink.net/tag/loi_renseignement|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/fn|prefLabel|FN +http://www.semanlink.net/tag/fn|broader|http://www.semanlink.net/tag/extreme_droite +http://www.semanlink.net/tag/fn|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/fn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fn|uri|http://www.semanlink.net/tag/fn +http://www.semanlink.net/tag/fn|broader_prefLabel|Extrème droite +http://www.semanlink.net/tag/fn|broader_prefLabel|Politique française +http://www.semanlink.net/tag/installing_apps|creationTime|2007-07-07T15:22:29Z +http://www.semanlink.net/tag/installing_apps|prefLabel|Installing apps +http://www.semanlink.net/tag/installing_apps|creationDate|2007-07-07 +http://www.semanlink.net/tag/installing_apps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/installing_apps|uri|http://www.semanlink.net/tag/installing_apps +http://www.semanlink.net/tag/kaggle|creationTime|2018-01-20T10:52:54Z +http://www.semanlink.net/tag/kaggle|prefLabel|Kaggle +http://www.semanlink.net/tag/kaggle|creationDate|2018-01-20 +http://www.semanlink.net/tag/kaggle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kaggle|describedBy|https://en.wikipedia.org/wiki/Kaggle +http://www.semanlink.net/tag/kaggle|uri|http://www.semanlink.net/tag/kaggle +http://www.semanlink.net/tag/philosophe|prefLabel|Philosophe +http://www.semanlink.net/tag/philosophe|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/philosophe|broader|http://www.semanlink.net/tag/philosophie +http://www.semanlink.net/tag/philosophe|broader|http://www.semanlink.net/tag/penseur +http://www.semanlink.net/tag/philosophe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/philosophe|uri|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/philosophe|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/philosophe|broader_prefLabel|Philosophie +http://www.semanlink.net/tag/philosophe|broader_prefLabel|Penseur +http://www.semanlink.net/tag/philosophe|broader_altLabel|Philosophy +http://www.semanlink.net/tag/w3c_note|creationTime|2008-09-01T13:40:05Z +http://www.semanlink.net/tag/w3c_note|prefLabel|W3C Note +http://www.semanlink.net/tag/w3c_note|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_note|creationDate|2008-09-01 +http://www.semanlink.net/tag/w3c_note|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_note|uri|http://www.semanlink.net/tag/w3c_note +http://www.semanlink.net/tag/w3c_note|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_note|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_note|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/keras_functional_api|creationTime|2018-12-12T11:36:10Z +http://www.semanlink.net/tag/keras_functional_api|prefLabel|Keras Functional API +http://www.semanlink.net/tag/keras_functional_api|broader|http://www.semanlink.net/tag/keras +http://www.semanlink.net/tag/keras_functional_api|creationDate|2018-12-12 +http://www.semanlink.net/tag/keras_functional_api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keras_functional_api|uri|http://www.semanlink.net/tag/keras_functional_api +http://www.semanlink.net/tag/keras_functional_api|broader_prefLabel|Keras +http://www.semanlink.net/tag/theatre|prefLabel|Théatre +http://www.semanlink.net/tag/theatre|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/theatre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/theatre|uri|http://www.semanlink.net/tag/theatre +http://www.semanlink.net/tag/theatre|broader_prefLabel|Art +http://www.semanlink.net/tag/film_americain|prefLabel|Film américain +http://www.semanlink.net/tag/film_americain|broader|http://www.semanlink.net/tag/cinema_americain +http://www.semanlink.net/tag/film_americain|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_americain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_americain|uri|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/tag/film_americain|broader_prefLabel|Cinéma américain +http://www.semanlink.net/tag/film_americain|broader_prefLabel|Film +http://www.semanlink.net/tag/ernest|creationTime|2014-07-03T14:25:31Z +http://www.semanlink.net/tag/ernest|prefLabel|Ernest Ilisca +http://www.semanlink.net/tag/ernest|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/ernest|related|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/tag/ernest|creationDate|2014-07-03 +http://www.semanlink.net/tag/ernest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ernest|altLabel|Ernest +http://www.semanlink.net/tag/ernest|uri|http://www.semanlink.net/tag/ernest +http://www.semanlink.net/tag/ernest|broader_prefLabel|Ami +http://www.semanlink.net/tag/science|prefLabel|Science +http://www.semanlink.net/tag/science|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/science|altLabel|sciences +http://www.semanlink.net/tag/science|uri|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/haskell|creationTime|2015-01-06T12:25:16Z +http://www.semanlink.net/tag/haskell|prefLabel|Haskell +http://www.semanlink.net/tag/haskell|broader|http://www.semanlink.net/tag/functional_programming +http://www.semanlink.net/tag/haskell|creationDate|2015-01-06 +http://www.semanlink.net/tag/haskell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/haskell|uri|http://www.semanlink.net/tag/haskell +http://www.semanlink.net/tag/haskell|broader_prefLabel|Functional programming +http://www.semanlink.net/tag/lobbies_economiques|creationTime|2013-07-05T23:57:22Z +http://www.semanlink.net/tag/lobbies_economiques|prefLabel|Lobbies économiques +http://www.semanlink.net/tag/lobbies_economiques|broader|http://www.semanlink.net/tag/lobby +http://www.semanlink.net/tag/lobbies_economiques|creationDate|2013-07-05 +http://www.semanlink.net/tag/lobbies_economiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lobbies_economiques|uri|http://www.semanlink.net/tag/lobbies_economiques +http://www.semanlink.net/tag/lobbies_economiques|broader_prefLabel|Lobby +http://www.semanlink.net/tag/walmart|creationTime|2020-04-09T21:21:56Z +http://www.semanlink.net/tag/walmart|prefLabel|Walmart +http://www.semanlink.net/tag/walmart|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/walmart|creationDate|2020-04-09 +http://www.semanlink.net/tag/walmart|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/walmart|describedBy|https://en.wikipedia.org/wiki/Walmart +http://www.semanlink.net/tag/walmart|altLabel|Wal-Mart +http://www.semanlink.net/tag/walmart|uri|http://www.semanlink.net/tag/walmart +http://www.semanlink.net/tag/walmart|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/dereferencing_http_uris|creationTime|2007-10-13T19:26:48Z +http://www.semanlink.net/tag/dereferencing_http_uris|prefLabel|Dereferencing HTTP URIs +http://www.semanlink.net/tag/dereferencing_http_uris|broader|http://www.semanlink.net/tag/uri_dereferencing +http://www.semanlink.net/tag/dereferencing_http_uris|creationDate|2007-10-13 +http://www.semanlink.net/tag/dereferencing_http_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dereferencing_http_uris|uri|http://www.semanlink.net/tag/dereferencing_http_uris +http://www.semanlink.net/tag/dereferencing_http_uris|broader_prefLabel|URI dereferencing +http://www.semanlink.net/tag/httprange_14|prefLabel|httpRange-14 +http://www.semanlink.net/tag/httprange_14|broader|http://www.semanlink.net/tag/dereferencing_http_uris +http://www.semanlink.net/tag/httprange_14|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/httprange_14|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/httprange_14|broader|http://www.semanlink.net/tag/information_resources +http://www.semanlink.net/tag/httprange_14|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/httprange_14|related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/httprange_14|creationDate|2007-01-02 +http://www.semanlink.net/tag/httprange_14|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/httprange_14|altLabel|303-redirect +http://www.semanlink.net/tag/httprange_14|uri|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/httprange_14|broader_prefLabel|Dereferencing HTTP URIs +http://www.semanlink.net/tag/httprange_14|broader_prefLabel|HTTP +http://www.semanlink.net/tag/httprange_14|broader_prefLabel|URI +http://www.semanlink.net/tag/httprange_14|broader_prefLabel|Information resources +http://www.semanlink.net/tag/httprange_14|broader_altLabel|Concept's URI +http://www.semanlink.net/tag/hierarchical_tags|creationTime|2009-06-23T08:56:08Z +http://www.semanlink.net/tag/hierarchical_tags|prefLabel|Hierarchical tags +http://www.semanlink.net/tag/hierarchical_tags|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/hierarchical_tags|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/hierarchical_tags|broader|http://www.semanlink.net/tag/concept_hierarchies +http://www.semanlink.net/tag/hierarchical_tags|related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/hierarchical_tags|creationDate|2009-06-23 +http://www.semanlink.net/tag/hierarchical_tags|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_tags|uri|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/hierarchical_tags|broader_prefLabel|Tagging +http://www.semanlink.net/tag/hierarchical_tags|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/hierarchical_tags|broader_prefLabel|Concept hierarchies +http://www.semanlink.net/tag/hierarchical_tags|broader_related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/liberte_de_pensee|prefLabel|Liberté de pensée +http://www.semanlink.net/tag/liberte_de_pensee|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberte_de_pensee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte_de_pensee|uri|http://www.semanlink.net/tag/liberte_de_pensee +http://www.semanlink.net/tag/liberte_de_pensee|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberte_de_pensee|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/exploration_spatiale|prefLabel|Exploration spatiale +http://www.semanlink.net/tag/exploration_spatiale|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/exploration_spatiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exploration_spatiale|uri|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/exploration_spatiale|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/faiss|creationTime|2020-07-10T09:34:44Z +http://www.semanlink.net/tag/faiss|prefLabel|faiss +http://www.semanlink.net/tag/faiss|broader|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/tag/faiss|broader|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/faiss|broader|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/faiss|broader|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/faiss|creationDate|2020-07-10 +http://www.semanlink.net/tag/faiss|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/faiss|uri|http://www.semanlink.net/tag/faiss +http://www.semanlink.net/tag/faiss|broader_prefLabel|Facebook FAIR +http://www.semanlink.net/tag/faiss|broader_prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/faiss|broader_prefLabel|Library (code) +http://www.semanlink.net/tag/faiss|broader_prefLabel|Machine Learning library +http://www.semanlink.net/tag/faiss|broader_altLabel|Similarity search +http://www.semanlink.net/tag/faiss|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/faiss|broader_related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/approximate_nearest_neighbor|creationTime|2020-01-11T02:39:51Z +http://www.semanlink.net/tag/approximate_nearest_neighbor|prefLabel|Approximate nearest-neighbor +http://www.semanlink.net/tag/approximate_nearest_neighbor|broader|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/approximate_nearest_neighbor|creationDate|2020-01-11 +http://www.semanlink.net/tag/approximate_nearest_neighbor|comment|"There are ([src](/doc/2020/01/building_a_real_time_embeddings)) two main approaches for approximate similarity +matching: + +- **tree-based approaches** (recursively partition the data, putting similar vectors +near each other in the tree) +- and **hashing-based approaches** (learn a model that +converts an item into a code, where similar items +produce the same or similar code). + +> Three popular libraries for approximate nearest neighbor are Annoy, FAISS, and hnswlib. ([src](https://www.sbert.net/examples/applications/semantic-search/README.html#approximate-nearest-neighbor))" +http://www.semanlink.net/tag/approximate_nearest_neighbor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/approximate_nearest_neighbor|uri|http://www.semanlink.net/tag/approximate_nearest_neighbor +http://www.semanlink.net/tag/approximate_nearest_neighbor|broader_prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/approximate_nearest_neighbor|broader_altLabel|Similarity search +http://www.semanlink.net/tag/approximate_nearest_neighbor|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/nlp_class|creationTime|2012-03-07T13:09:29Z +http://www.semanlink.net/tag/nlp_class|prefLabel|Coursera: NLP class +http://www.semanlink.net/tag/nlp_class|broader|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/nlp_class|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/nlp_class|broader|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/nlp_class|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_class|related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/nlp_class|related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/nlp_class|creationDate|2012-03-07 +http://www.semanlink.net/tag/nlp_class|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_class|homepage|https://www.coursera.org/course/nlp +http://www.semanlink.net/tag/nlp_class|uri|http://www.semanlink.net/tag/nlp_class +http://www.semanlink.net/tag/nlp_class|broader_prefLabel|NLP@Stanford +http://www.semanlink.net/tag/nlp_class|broader_prefLabel|Coursera +http://www.semanlink.net/tag/nlp_class|broader_prefLabel|Stanford +http://www.semanlink.net/tag/nlp_class|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_class|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_class|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_class|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/nlp_class|broader_related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/nlp_class|broader_related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/nlp_class|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/james_stewart|creationTime|2011-12-31T02:09:41Z +http://www.semanlink.net/tag/james_stewart|prefLabel|James Stewart +http://www.semanlink.net/tag/james_stewart|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/james_stewart|creationDate|2011-12-31 +http://www.semanlink.net/tag/james_stewart|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/james_stewart|describedBy|https://en.wikipedia.org/wiki/James_Stewart +http://www.semanlink.net/tag/james_stewart|uri|http://www.semanlink.net/tag/james_stewart +http://www.semanlink.net/tag/james_stewart|broader_prefLabel|Acteur +http://www.semanlink.net/tag/orne|creationTime|2011-09-26T14:41:05Z +http://www.semanlink.net/tag/orne|prefLabel|Orne +http://www.semanlink.net/tag/orne|creationDate|2011-09-26 +http://www.semanlink.net/tag/orne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orne|uri|http://www.semanlink.net/tag/orne +http://www.semanlink.net/tag/iran|prefLabel|Iran +http://www.semanlink.net/tag/iran|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/iran|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iran|uri|http://www.semanlink.net/tag/iran +http://www.semanlink.net/tag/iran|broader_prefLabel|Asie +http://www.semanlink.net/tag/faceted_search|creationTime|2009-01-16T21:50:40Z +http://www.semanlink.net/tag/faceted_search|prefLabel|Faceted Search +http://www.semanlink.net/tag/faceted_search|creationDate|2009-01-16 +http://www.semanlink.net/tag/faceted_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/faceted_search|altLabel|Facets +http://www.semanlink.net/tag/faceted_search|uri|http://www.semanlink.net/tag/faceted_search +http://www.semanlink.net/tag/nlp_use_cases|creationTime|2014-03-26T13:14:46Z +http://www.semanlink.net/tag/nlp_use_cases|prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_use_cases|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_use_cases|creationDate|2014-03-26 +http://www.semanlink.net/tag/nlp_use_cases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_use_cases|altLabel|NLP: applications +http://www.semanlink.net/tag/nlp_use_cases|uri|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_use_cases|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_use_cases|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_use_cases|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_use_cases|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/fastai_nbdev|creationTime|2020-02-20T23:13:08Z +http://www.semanlink.net/tag/fastai_nbdev|prefLabel|nbdev.fast.ai +http://www.semanlink.net/tag/fastai_nbdev|broader|http://www.semanlink.net/tag/jupyter +http://www.semanlink.net/tag/fastai_nbdev|broader|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/tag/fastai_nbdev|broader|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/fastai_nbdev|creationDate|2020-02-20 +http://www.semanlink.net/tag/fastai_nbdev|comment|> Create delightful python projects using Jupyter Notebooks +http://www.semanlink.net/tag/fastai_nbdev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fastai_nbdev|homepage|https://nbdev.fast.ai/ +http://www.semanlink.net/tag/fastai_nbdev|uri|http://www.semanlink.net/tag/fastai_nbdev +http://www.semanlink.net/tag/fastai_nbdev|broader_prefLabel|Jupyter +http://www.semanlink.net/tag/fastai_nbdev|broader_prefLabel|Python tools +http://www.semanlink.net/tag/fastai_nbdev|broader_prefLabel|fast.ai +http://www.semanlink.net/tag/fastai_nbdev|broader_altLabel|fastai +http://www.semanlink.net/tag/automobile_2_0|creationTime|2014-04-08T09:40:05Z +http://www.semanlink.net/tag/automobile_2_0|prefLabel|Automobile 2.0 +http://www.semanlink.net/tag/automobile_2_0|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/automobile_2_0|creationDate|2014-04-08 +http://www.semanlink.net/tag/automobile_2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automobile_2_0|uri|http://www.semanlink.net/tag/automobile_2_0 +http://www.semanlink.net/tag/automobile_2_0|broader_prefLabel|Automobile +http://www.semanlink.net/tag/automobile_2_0|broader_altLabel|Automotive +http://www.semanlink.net/tag/mvc|prefLabel|MVC +http://www.semanlink.net/tag/mvc|broader|http://www.semanlink.net/tag/design_pattern +http://www.semanlink.net/tag/mvc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mvc|uri|http://www.semanlink.net/tag/mvc +http://www.semanlink.net/tag/mvc|broader_prefLabel|Design pattern +http://www.semanlink.net/tag/mvc|broader_altLabel|Patterns +http://www.semanlink.net/tag/accountable_ai|creationTime|2018-11-05T09:40:12Z +http://www.semanlink.net/tag/accountable_ai|prefLabel|Accountable AI +http://www.semanlink.net/tag/accountable_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/accountable_ai|creationDate|2018-11-05 +http://www.semanlink.net/tag/accountable_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/accountable_ai|uri|http://www.semanlink.net/tag/accountable_ai +http://www.semanlink.net/tag/accountable_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/accountable_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/accountable_ai|broader_altLabel|AI +http://www.semanlink.net/tag/accountable_ai|broader_altLabel|IA +http://www.semanlink.net/tag/accountable_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/xtech_2007|creationTime|2007-05-02T21:57:10Z +http://www.semanlink.net/tag/xtech_2007|prefLabel|XTech 2007 +http://www.semanlink.net/tag/xtech_2007|broader|http://www.semanlink.net/tag/xtech +http://www.semanlink.net/tag/xtech_2007|creationDate|2007-05-02 +http://www.semanlink.net/tag/xtech_2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xtech_2007|uri|http://www.semanlink.net/tag/xtech_2007 +http://www.semanlink.net/tag/xtech_2007|broader_prefLabel|XTech +http://www.semanlink.net/tag/machines_teaching_machines|creationTime|2020-05-31T10:43:32Z +http://www.semanlink.net/tag/machines_teaching_machines|prefLabel|Machines teaching machines +http://www.semanlink.net/tag/machines_teaching_machines|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machines_teaching_machines|creationDate|2020-05-31 +http://www.semanlink.net/tag/machines_teaching_machines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machines_teaching_machines|uri|http://www.semanlink.net/tag/machines_teaching_machines +http://www.semanlink.net/tag/machines_teaching_machines|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machines_teaching_machines|broader_altLabel|ML +http://www.semanlink.net/tag/machines_teaching_machines|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machines_teaching_machines|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/markup|creationTime|2020-01-18T23:45:42Z +http://www.semanlink.net/tag/markup|prefLabel|Markup +http://www.semanlink.net/tag/markup|creationDate|2020-01-18 +http://www.semanlink.net/tag/markup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markup|uri|http://www.semanlink.net/tag/markup +http://www.semanlink.net/tag/user_manuals|creationTime|2019-07-28T10:48:23Z +http://www.semanlink.net/tag/user_manuals|prefLabel|User manuals +http://www.semanlink.net/tag/user_manuals|creationDate|2019-07-28 +http://www.semanlink.net/tag/user_manuals|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/user_manuals|uri|http://www.semanlink.net/tag/user_manuals +http://www.semanlink.net/tag/swse|creationTime|2007-07-20T00:48:16Z +http://www.semanlink.net/tag/swse|prefLabel|SWSE +http://www.semanlink.net/tag/swse|broader|http://www.semanlink.net/tag/semantic_search +http://www.semanlink.net/tag/swse|creationDate|2007-07-20 +http://www.semanlink.net/tag/swse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/swse|uri|http://www.semanlink.net/tag/swse +http://www.semanlink.net/tag/swse|broader_prefLabel|Semantic Search +http://www.semanlink.net/tag/swse|broader_related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/jsonp|creationTime|2009-05-20T23:17:47Z +http://www.semanlink.net/tag/jsonp|prefLabel|JSONP +http://www.semanlink.net/tag/jsonp|broader|http://www.semanlink.net/tag/cross_domain_data_fetching +http://www.semanlink.net/tag/jsonp|broader|http://www.semanlink.net/tag/json +http://www.semanlink.net/tag/jsonp|broader|http://www.semanlink.net/tag/script_tag_hack +http://www.semanlink.net/tag/jsonp|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/jsonp|creationDate|2009-05-20 +http://www.semanlink.net/tag/jsonp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsonp|uri|http://www.semanlink.net/tag/jsonp +http://www.semanlink.net/tag/jsonp|broader_prefLabel|cross-domain data fetching +http://www.semanlink.net/tag/jsonp|broader_prefLabel|JSON +http://www.semanlink.net/tag/jsonp|broader_prefLabel|Script tag hack +http://www.semanlink.net/tag/jsonp|broader_prefLabel|Ajax +http://www.semanlink.net/tag/jsonp|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/semantic_web_services_vs_soap|prefLabel|Semantic Web Services vs SOAP +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader|http://www.semanlink.net/tag/soap +http://www.semanlink.net/tag/semantic_web_services_vs_soap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_services_vs_soap|uri|http://www.semanlink.net/tag/semantic_web_services_vs_soap +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader_prefLabel|Semantic Web Services +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader_prefLabel|Web Services +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader_prefLabel|SOAP +http://www.semanlink.net/tag/semantic_web_services_vs_soap|broader_altLabel|WS +http://www.semanlink.net/tag/tolerance|creationTime|2015-04-02T22:17:55Z +http://www.semanlink.net/tag/tolerance|prefLabel|Tolérance +http://www.semanlink.net/tag/tolerance|broader|http://www.semanlink.net/tag/prejuges +http://www.semanlink.net/tag/tolerance|creationDate|2015-04-02 +http://www.semanlink.net/tag/tolerance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tolerance|uri|http://www.semanlink.net/tag/tolerance +http://www.semanlink.net/tag/tolerance|broader_prefLabel|Préjugés +http://www.semanlink.net/tag/gui|creationTime|2007-07-10T23:04:22Z +http://www.semanlink.net/tag/gui|prefLabel|GUI +http://www.semanlink.net/tag/gui|broader|http://www.semanlink.net/tag/ui +http://www.semanlink.net/tag/gui|creationDate|2007-07-10 +http://www.semanlink.net/tag/gui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gui|uri|http://www.semanlink.net/tag/gui +http://www.semanlink.net/tag/gui|broader_prefLabel|UI +http://www.semanlink.net/tag/mort|prefLabel|Mort +http://www.semanlink.net/tag/mort|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mort|uri|http://www.semanlink.net/tag/mort +http://www.semanlink.net/tag/presentation_tool|creationTime|2014-01-08T14:12:02Z +http://www.semanlink.net/tag/presentation_tool|prefLabel|Presentation tool +http://www.semanlink.net/tag/presentation_tool|creationDate|2014-01-08 +http://www.semanlink.net/tag/presentation_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/presentation_tool|uri|http://www.semanlink.net/tag/presentation_tool +http://www.semanlink.net/tag/arundhati_roy|creationTime|2020-04-06T19:32:44Z +http://www.semanlink.net/tag/arundhati_roy|prefLabel|Arundhati Roy +http://www.semanlink.net/tag/arundhati_roy|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/arundhati_roy|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/arundhati_roy|creationDate|2020-04-06 +http://www.semanlink.net/tag/arundhati_roy|comment|"""The god of Small Things"" (Le Dieu des Petits Riens)" +http://www.semanlink.net/tag/arundhati_roy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arundhati_roy|describedBy|https://en.wikipedia.org/wiki/Arundhati_Roy +http://www.semanlink.net/tag/arundhati_roy|uri|http://www.semanlink.net/tag/arundhati_roy +http://www.semanlink.net/tag/arundhati_roy|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/arundhati_roy|broader_prefLabel|Inde +http://www.semanlink.net/tag/validation|prefLabel|Validator +http://www.semanlink.net/tag/validation|creationDate|2006-12-16 +http://www.semanlink.net/tag/validation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/validation|uri|http://www.semanlink.net/tag/validation +http://www.semanlink.net/tag/sicile|creationTime|2015-04-02T22:18:07Z +http://www.semanlink.net/tag/sicile|prefLabel|Sicile +http://www.semanlink.net/tag/sicile|broader|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/sicile|creationDate|2015-04-02 +http://www.semanlink.net/tag/sicile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sicile|uri|http://www.semanlink.net/tag/sicile +http://www.semanlink.net/tag/sicile|broader_prefLabel|Italie +http://www.semanlink.net/tag/weak_supervision|creationTime|2019-01-29T01:28:53Z +http://www.semanlink.net/tag/weak_supervision|prefLabel|Weak supervision +http://www.semanlink.net/tag/weak_supervision|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/weak_supervision|creationDate|2019-01-29 +http://www.semanlink.net/tag/weak_supervision|comment|"Noisy, limited, or imprecise sources are used to provide supervision signal for labeling large amounts of training data in a supervised learning setting + +Programmatic or otherwise more efficient but noisier ways of generating training label +" +http://www.semanlink.net/tag/weak_supervision|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/weak_supervision|describedBy|https://en.wikipedia.org/wiki/Weak_supervision +http://www.semanlink.net/tag/weak_supervision|uri|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/tag/weak_supervision|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/fourmi|prefLabel|Fourmi +http://www.semanlink.net/tag/fourmi|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/fourmi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fourmi|uri|http://www.semanlink.net/tag/fourmi +http://www.semanlink.net/tag/fourmi|broader_prefLabel|Insecte +http://www.semanlink.net/tag/rif|creationTime|2007-11-07T17:03:50Z +http://www.semanlink.net/tag/rif|prefLabel|RIF +http://www.semanlink.net/tag/rif|broader|http://www.semanlink.net/tag/rules +http://www.semanlink.net/tag/rif|creationDate|2007-11-07 +http://www.semanlink.net/tag/rif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rif|uri|http://www.semanlink.net/tag/rif +http://www.semanlink.net/tag/rif|broader_prefLabel|Rules +http://www.semanlink.net/tag/notes_d_install|prefLabel|Notes d'install +http://www.semanlink.net/tag/notes_d_install|broader|http://www.semanlink.net/tag/installing_apps +http://www.semanlink.net/tag/notes_d_install|broader|http://www.semanlink.net/tag/fps_notes +http://www.semanlink.net/tag/notes_d_install|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/notes_d_install|uri|http://www.semanlink.net/tag/notes_d_install +http://www.semanlink.net/tag/notes_d_install|broader_prefLabel|Installing apps +http://www.semanlink.net/tag/notes_d_install|broader_prefLabel|fps notes +http://www.semanlink.net/tag/access_token|creationTime|2020-10-23T11:47:53Z +http://www.semanlink.net/tag/access_token|prefLabel|Access Token +http://www.semanlink.net/tag/access_token|broader|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/access_token|broader|http://www.semanlink.net/tag/authentication +http://www.semanlink.net/tag/access_token|creationDate|2020-10-23 +http://www.semanlink.net/tag/access_token|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/access_token|uri|http://www.semanlink.net/tag/access_token +http://www.semanlink.net/tag/access_token|broader_prefLabel|Sécurité +http://www.semanlink.net/tag/access_token|broader_prefLabel|Authentication +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|creationTime|2012-05-09T22:32:17Z +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|prefLabel|Sarkozy et extrème droite +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|broader|http://www.semanlink.net/tag/sarkozyland +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|creationDate|2012-05-09 +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|uri|http://www.semanlink.net/tag/sarkozy_et_extreme_droite +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|broader_prefLabel|Sarkozyland +http://www.semanlink.net/tag/sarkozy_et_extreme_droite|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/hackers|prefLabel|Hackers +http://www.semanlink.net/tag/hackers|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/hackers|broader|http://www.semanlink.net/tag/hack +http://www.semanlink.net/tag/hackers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hackers|uri|http://www.semanlink.net/tag/hackers +http://www.semanlink.net/tag/hackers|broader_prefLabel|Informatique +http://www.semanlink.net/tag/hackers|broader_prefLabel|Hack +http://www.semanlink.net/tag/boko_haram|creationTime|2014-03-18T23:59:12Z +http://www.semanlink.net/tag/boko_haram|prefLabel|Boko Haram +http://www.semanlink.net/tag/boko_haram|broader|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/boko_haram|broader|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/boko_haram|related|http://www.semanlink.net/tag/aqmi +http://www.semanlink.net/tag/boko_haram|creationDate|2014-03-18 +http://www.semanlink.net/tag/boko_haram|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boko_haram|uri|http://www.semanlink.net/tag/boko_haram +http://www.semanlink.net/tag/boko_haram|broader_prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/boko_haram|broader_prefLabel|Nigeria +http://www.semanlink.net/tag/w3c_community_group|creationTime|2015-02-19T01:16:29Z +http://www.semanlink.net/tag/w3c_community_group|prefLabel|W3C Community Group +http://www.semanlink.net/tag/w3c_community_group|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_community_group|creationDate|2015-02-19 +http://www.semanlink.net/tag/w3c_community_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_community_group|uri|http://www.semanlink.net/tag/w3c_community_group +http://www.semanlink.net/tag/w3c_community_group|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_community_group|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_community_group|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/semantic_seo|creationTime|2011-12-17T12:05:49Z +http://www.semanlink.net/tag/semantic_seo|prefLabel|Semantic SEO +http://www.semanlink.net/tag/semantic_seo|broader|http://www.semanlink.net/tag/seo +http://www.semanlink.net/tag/semantic_seo|creationDate|2011-12-17 +http://www.semanlink.net/tag/semantic_seo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_seo|uri|http://www.semanlink.net/tag/semantic_seo +http://www.semanlink.net/tag/semantic_seo|broader_prefLabel|SEO +http://www.semanlink.net/tag/data_visualization_tools|creationTime|2008-02-15T23:50:23Z +http://www.semanlink.net/tag/data_visualization_tools|prefLabel|Visualization Tools +http://www.semanlink.net/tag/data_visualization_tools|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/data_visualization_tools|broader|http://www.semanlink.net/tag/data_visualisation +http://www.semanlink.net/tag/data_visualization_tools|related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/data_visualization_tools|creationDate|2008-02-15 +http://www.semanlink.net/tag/data_visualization_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_visualization_tools|altLabel|Data Visualization Tools +http://www.semanlink.net/tag/data_visualization_tools|uri|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/data_visualization_tools|broader_prefLabel|Tools +http://www.semanlink.net/tag/data_visualization_tools|broader_prefLabel|Data visualisation +http://www.semanlink.net/tag/charlton_heston|creationTime|2008-05-15T22:35:29Z +http://www.semanlink.net/tag/charlton_heston|prefLabel|Charlton Heston +http://www.semanlink.net/tag/charlton_heston|creationDate|2008-05-15 +http://www.semanlink.net/tag/charlton_heston|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/charlton_heston|describedBy|https://en.wikipedia.org/wiki/Charlton_Heston +http://www.semanlink.net/tag/charlton_heston|uri|http://www.semanlink.net/tag/charlton_heston +http://www.semanlink.net/tag/obsidian|creationTime|2021-03-13T11:27:50Z +http://www.semanlink.net/tag/obsidian|prefLabel|Obsidian +http://www.semanlink.net/tag/obsidian|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/obsidian|broader|http://www.semanlink.net/tag/note_taking_app +http://www.semanlink.net/tag/obsidian|creationDate|2021-03-13 +http://www.semanlink.net/tag/obsidian|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obsidian|homepage|https://obsidian.md/ +http://www.semanlink.net/tag/obsidian|uri|http://www.semanlink.net/tag/obsidian +http://www.semanlink.net/tag/obsidian|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/obsidian|broader_prefLabel|Note taking app +http://www.semanlink.net/tag/syngenta|prefLabel|Syngenta +http://www.semanlink.net/tag/syngenta|broader|http://www.semanlink.net/tag/suisse +http://www.semanlink.net/tag/syngenta|broader|http://www.semanlink.net/tag/biotech_industry +http://www.semanlink.net/tag/syngenta|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/syngenta|uri|http://www.semanlink.net/tag/syngenta +http://www.semanlink.net/tag/syngenta|broader_prefLabel|Suisse +http://www.semanlink.net/tag/syngenta|broader_prefLabel|Biotech industry +http://www.semanlink.net/tag/syngenta|broader_altLabel|Firme biotechnologique +http://www.semanlink.net/tag/pape_francois|creationTime|2015-06-14T14:32:01Z +http://www.semanlink.net/tag/pape_francois|prefLabel|Pape François +http://www.semanlink.net/tag/pape_francois|broader|http://www.semanlink.net/tag/pape +http://www.semanlink.net/tag/pape_francois|creationDate|2015-06-14 +http://www.semanlink.net/tag/pape_francois|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pape_francois|uri|http://www.semanlink.net/tag/pape_francois +http://www.semanlink.net/tag/pape_francois|broader_prefLabel|Pape +http://www.semanlink.net/tag/responsive_design|creationTime|2013-01-26T11:44:46Z +http://www.semanlink.net/tag/responsive_design|prefLabel|Responsive Design +http://www.semanlink.net/tag/responsive_design|creationDate|2013-01-26 +http://www.semanlink.net/tag/responsive_design|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/responsive_design|uri|http://www.semanlink.net/tag/responsive_design +http://www.semanlink.net/tag/gradient_boosting|creationTime|2018-11-13T18:30:39Z +http://www.semanlink.net/tag/gradient_boosting|prefLabel|Gradient boosting +http://www.semanlink.net/tag/gradient_boosting|broader|http://www.semanlink.net/tag/boosting +http://www.semanlink.net/tag/gradient_boosting|creationDate|2018-11-13 +http://www.semanlink.net/tag/gradient_boosting|comment|"ML technique for regression and classification problems, which **produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees**. Allows the optimization of an arbitrary differentiable loss function. +" +http://www.semanlink.net/tag/gradient_boosting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gradient_boosting|describedBy|https://en.wikipedia.org/wiki/Gradient_boosting +http://www.semanlink.net/tag/gradient_boosting|uri|http://www.semanlink.net/tag/gradient_boosting +http://www.semanlink.net/tag/gradient_boosting|broader_prefLabel|Boosting +http://www.semanlink.net/tag/gradient_boosting|broader_related|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +http://www.semanlink.net/tag/jax_rs|creationTime|2015-02-11T16:23:54Z +http://www.semanlink.net/tag/jax_rs|prefLabel|JAX-RS +http://www.semanlink.net/tag/jax_rs|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/jax_rs|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/jax_rs|creationDate|2015-02-11 +http://www.semanlink.net/tag/jax_rs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jax_rs|uri|http://www.semanlink.net/tag/jax_rs +http://www.semanlink.net/tag/jax_rs|broader_prefLabel|REST +http://www.semanlink.net/tag/jax_rs|broader_prefLabel|Java +http://www.semanlink.net/tag/celte|prefLabel|Celte +http://www.semanlink.net/tag/celte|broader|http://www.semanlink.net/tag/archeologie_europeenne +http://www.semanlink.net/tag/celte|creationDate|2006-08-29 +http://www.semanlink.net/tag/celte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/celte|uri|http://www.semanlink.net/tag/celte +http://www.semanlink.net/tag/celte|broader_prefLabel|Archéologie européenne +http://www.semanlink.net/tag/nikolai_vavilov|creationTime|2020-02-12T00:41:11Z +http://www.semanlink.net/tag/nikolai_vavilov|prefLabel|Nikolai Vavilov +http://www.semanlink.net/tag/nikolai_vavilov|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/nikolai_vavilov|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/nikolai_vavilov|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/nikolai_vavilov|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/nikolai_vavilov|broader|http://www.semanlink.net/tag/botanique +http://www.semanlink.net/tag/nikolai_vavilov|creationDate|2020-02-12 +http://www.semanlink.net/tag/nikolai_vavilov|comment|Soviet agronomist, botanist and geneticist, criticized by Lysenko, sentenced to death, died of starvation in prison in 1943. +http://www.semanlink.net/tag/nikolai_vavilov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nikolai_vavilov|describedBy|https://en.wikipedia.org/wiki/Nikolai_Vavilov +http://www.semanlink.net/tag/nikolai_vavilov|uri|http://www.semanlink.net/tag/nikolai_vavilov +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|URSS +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Genetics +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Génétique +http://www.semanlink.net/tag/nikolai_vavilov|broader_prefLabel|Botanique +http://www.semanlink.net/tag/nikolai_vavilov|broader_altLabel|Savant +http://www.semanlink.net/tag/lauryn_hill|creationTime|2018-11-22T09:46:35Z +http://www.semanlink.net/tag/lauryn_hill|prefLabel|Lauryn Hill +http://www.semanlink.net/tag/lauryn_hill|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/lauryn_hill|creationDate|2018-11-22 +http://www.semanlink.net/tag/lauryn_hill|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lauryn_hill|uri|http://www.semanlink.net/tag/lauryn_hill +http://www.semanlink.net/tag/lauryn_hill|broader_prefLabel|Musique +http://www.semanlink.net/tag/lauryn_hill|broader_altLabel|Music +http://www.semanlink.net/tag/presidentielles_2007|creationTime|2007-04-09T23:03:52Z +http://www.semanlink.net/tag/presidentielles_2007|prefLabel|Présidentielles 2007 +http://www.semanlink.net/tag/presidentielles_2007|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/presidentielles_2007|creationDate|2007-04-09 +http://www.semanlink.net/tag/presidentielles_2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/presidentielles_2007|uri|http://www.semanlink.net/tag/presidentielles_2007 +http://www.semanlink.net/tag/presidentielles_2007|broader_prefLabel|Politique française +http://www.semanlink.net/tag/chip|creationTime|2018-02-22T00:42:02Z +http://www.semanlink.net/tag/chip|prefLabel|Chip +http://www.semanlink.net/tag/chip|creationDate|2018-02-22 +http://www.semanlink.net/tag/chip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chip|uri|http://www.semanlink.net/tag/chip +http://www.semanlink.net/tag/geologie|prefLabel|Géologie +http://www.semanlink.net/tag/geologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/geologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geologie|uri|http://www.semanlink.net/tag/geologie +http://www.semanlink.net/tag/geologie|broader_prefLabel|Science +http://www.semanlink.net/tag/geologie|broader_altLabel|sciences +http://www.semanlink.net/tag/debarquement|prefLabel|Débarquement +http://www.semanlink.net/tag/debarquement|broader|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/debarquement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/debarquement|uri|http://www.semanlink.net/tag/debarquement +http://www.semanlink.net/tag/debarquement|broader_prefLabel|2eme guerre mondiale +http://www.semanlink.net/tag/nebra_sky_disc|prefLabel|Nebra Sky Disc +http://www.semanlink.net/tag/nebra_sky_disc|broader|http://www.semanlink.net/tag/archeologie_europeenne +http://www.semanlink.net/tag/nebra_sky_disc|broader|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/nebra_sky_disc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nebra_sky_disc|uri|http://www.semanlink.net/tag/nebra_sky_disc +http://www.semanlink.net/tag/nebra_sky_disc|broader_prefLabel|Archéologie européenne +http://www.semanlink.net/tag/nebra_sky_disc|broader_prefLabel|Âge du bronze +http://www.semanlink.net/tag/anticolonialisme|creationTime|2009-03-11T01:15:23Z +http://www.semanlink.net/tag/anticolonialisme|prefLabel|Anticolonialisme +http://www.semanlink.net/tag/anticolonialisme|broader|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/tag/anticolonialisme|creationDate|2009-03-11 +http://www.semanlink.net/tag/anticolonialisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anticolonialisme|uri|http://www.semanlink.net/tag/anticolonialisme +http://www.semanlink.net/tag/anticolonialisme|broader_prefLabel|Colonisation +http://www.semanlink.net/tag/anticolonialisme|broader_altLabel|Colonialisme +http://www.semanlink.net/tag/implementing_a_jena_graph|creationTime|2009-09-18T19:19:35Z +http://www.semanlink.net/tag/implementing_a_jena_graph|prefLabel|Implementing a Jena Graph +http://www.semanlink.net/tag/implementing_a_jena_graph|broader|http://www.semanlink.net/tag/jena_dev +http://www.semanlink.net/tag/implementing_a_jena_graph|creationDate|2009-09-18 +http://www.semanlink.net/tag/implementing_a_jena_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/implementing_a_jena_graph|uri|http://www.semanlink.net/tag/implementing_a_jena_graph +http://www.semanlink.net/tag/implementing_a_jena_graph|broader_prefLabel|Jena dev +http://www.semanlink.net/tag/researchgate|creationTime|2017-10-24T10:22:08Z +http://www.semanlink.net/tag/researchgate|prefLabel|ResearchGate +http://www.semanlink.net/tag/researchgate|creationDate|2017-10-24 +http://www.semanlink.net/tag/researchgate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/researchgate|uri|http://www.semanlink.net/tag/researchgate +http://www.semanlink.net/tag/pydev|creationTime|2013-04-23T21:39:00Z +http://www.semanlink.net/tag/pydev|prefLabel|PyDev +http://www.semanlink.net/tag/pydev|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/pydev|creationDate|2013-04-23 +http://www.semanlink.net/tag/pydev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pydev|uri|http://www.semanlink.net/tag/pydev +http://www.semanlink.net/tag/pydev|broader_prefLabel|Python +http://www.semanlink.net/tag/ikuya_yamada|creationTime|2021-06-03T11:12:12Z +http://www.semanlink.net/tag/ikuya_yamada|prefLabel|Ikuya Yamada +http://www.semanlink.net/tag/ikuya_yamada|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/ikuya_yamada|creationDate|2021-06-03 +http://www.semanlink.net/tag/ikuya_yamada|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ikuya_yamada|uri|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/tag/ikuya_yamada|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/n_gram|creationTime|2012-03-24T09:04:23Z +http://www.semanlink.net/tag/n_gram|prefLabel|N-grams +http://www.semanlink.net/tag/n_gram|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/n_gram|creationDate|2012-03-24 +http://www.semanlink.net/tag/n_gram|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/n_gram|describedBy|https://en.wikipedia.org/wiki/N-gram +http://www.semanlink.net/tag/n_gram|altLabel|N-gram +http://www.semanlink.net/tag/n_gram|uri|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/n_gram|broader_prefLabel|Language model +http://www.semanlink.net/tag/n_gram|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/n_gram|broader_altLabel|LM +http://www.semanlink.net/tag/n_gram|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/n_gram|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/n_gram|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/javascript_tool|creationTime|2011-01-10T01:32:25Z +http://www.semanlink.net/tag/javascript_tool|prefLabel|Javascript tool +http://www.semanlink.net/tag/javascript_tool|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/javascript_tool|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_tool|creationDate|2011-01-10 +http://www.semanlink.net/tag/javascript_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_tool|uri|http://www.semanlink.net/tag/javascript_tool +http://www.semanlink.net/tag/javascript_tool|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/javascript_tool|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_tool|broader_altLabel|js +http://www.semanlink.net/tag/cory_doctorow|creationTime|2008-02-02T20:40:56Z +http://www.semanlink.net/tag/cory_doctorow|prefLabel|Cory Doctorow +http://www.semanlink.net/tag/cory_doctorow|broader|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/cory_doctorow|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/cory_doctorow|creationDate|2008-02-02 +http://www.semanlink.net/tag/cory_doctorow|comment|" +" +http://www.semanlink.net/tag/cory_doctorow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cory_doctorow|uri|http://www.semanlink.net/tag/cory_doctorow +http://www.semanlink.net/tag/cory_doctorow|broader_prefLabel|Anticipation +http://www.semanlink.net/tag/cory_doctorow|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/triplestore|prefLabel|TripleStore +http://www.semanlink.net/tag/triplestore|broader|http://www.semanlink.net/tag/rdf_and_database +http://www.semanlink.net/tag/triplestore|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/triplestore|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/triplestore|broader|http://www.semanlink.net/tag/semantic_web_databases +http://www.semanlink.net/tag/triplestore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/triplestore|altLabel|RDF database +http://www.semanlink.net/tag/triplestore|uri|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/triplestore|broader_prefLabel|RDF and database +http://www.semanlink.net/tag/triplestore|broader_prefLabel|RDF +http://www.semanlink.net/tag/triplestore|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/triplestore|broader_prefLabel|Semantic Web: databases +http://www.semanlink.net/tag/triplestore|broader_altLabel|RDF and database +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/triplestore|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/apple|prefLabel|Apple +http://www.semanlink.net/tag/apple|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/apple|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/apple|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple|uri|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/apple|broader_prefLabel|Technologie +http://www.semanlink.net/tag/ec_web|creationTime|2014-04-18T23:35:13Z +http://www.semanlink.net/tag/ec_web|prefLabel|EC-Web +http://www.semanlink.net/tag/ec_web|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/ec_web|creationDate|2014-04-18 +http://www.semanlink.net/tag/ec_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ec_web|uri|http://www.semanlink.net/tag/ec_web +http://www.semanlink.net/tag/ec_web|broader_prefLabel|Conférences +http://www.semanlink.net/tag/ec_web|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/medical_ir_ml_ia|creationTime|2019-02-09T11:35:09Z +http://www.semanlink.net/tag/medical_ir_ml_ia|prefLabel|Medical IR, ML, IA +http://www.semanlink.net/tag/medical_ir_ml_ia|broader|http://www.semanlink.net/tag/ml_domaines_d_application +http://www.semanlink.net/tag/medical_ir_ml_ia|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/medical_ir_ml_ia|creationDate|2019-02-09 +http://www.semanlink.net/tag/medical_ir_ml_ia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medical_ir_ml_ia|uri|http://www.semanlink.net/tag/medical_ir_ml_ia +http://www.semanlink.net/tag/medical_ir_ml_ia|broader_prefLabel|IA/ML: domaines d'application +http://www.semanlink.net/tag/medical_ir_ml_ia|broader_prefLabel|Santé +http://www.semanlink.net/tag/tulipe|creationTime|2008-01-25T15:06:00Z +http://www.semanlink.net/tag/tulipe|prefLabel|Tulipe +http://www.semanlink.net/tag/tulipe|broader|http://www.semanlink.net/tag/fleur +http://www.semanlink.net/tag/tulipe|creationDate|2008-01-25 +http://www.semanlink.net/tag/tulipe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tulipe|uri|http://www.semanlink.net/tag/tulipe +http://www.semanlink.net/tag/tulipe|broader_prefLabel|Fleur +http://www.semanlink.net/tag/triplet_loss|creationTime|2019-10-13T19:01:16Z +http://www.semanlink.net/tag/triplet_loss|prefLabel|Triplet Loss +http://www.semanlink.net/tag/triplet_loss|broader|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/tag/triplet_loss|creationDate|2019-10-13 +http://www.semanlink.net/tag/triplet_loss|comment|for instance in image recognition using siamese networks, triplet loss function tries to maximize the distance between anchor image and negative image while minimizing the distance between anchor image and positive image, thereby learning to differentiate similar images to non similar ones +http://www.semanlink.net/tag/triplet_loss|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/triplet_loss|uri|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/tag/triplet_loss|broader_prefLabel|Siamese networks +http://www.semanlink.net/tag/triplet_loss|broader_altLabel|Siamese network +http://www.semanlink.net/tag/triplet_loss|broader_related|http://www.semanlink.net/tag/face_recognition +http://www.semanlink.net/tag/triplet_loss|broader_related|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/tag/triplet_loss|broader_related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/nils_reimers|creationTime|2021-04-17T10:07:45Z +http://www.semanlink.net/tag/nils_reimers|prefLabel|Nils Reimers +http://www.semanlink.net/tag/nils_reimers|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/nils_reimers|related|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/tag/nils_reimers|creationDate|2021-04-17 +http://www.semanlink.net/tag/nils_reimers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nils_reimers|uri|http://www.semanlink.net/tag/nils_reimers +http://www.semanlink.net/tag/nils_reimers|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/these_irit_renault_biblio|creationTime|2019-01-27T15:05:14Z +http://www.semanlink.net/tag/these_irit_renault_biblio|prefLabel|Thèse IRIT-Renault: biblio +http://www.semanlink.net/tag/these_irit_renault_biblio|related|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/tag/these_irit_renault_biblio|related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/these_irit_renault_biblio|related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/these_irit_renault_biblio|creationDate|2019-01-27 +http://www.semanlink.net/tag/these_irit_renault_biblio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/these_irit_renault_biblio|uri|http://www.semanlink.net/tag/these_irit_renault_biblio +http://www.semanlink.net/tag/archeologie_percheronne|creationTime|2019-06-01T15:22:39Z +http://www.semanlink.net/tag/archeologie_percheronne|prefLabel|Archeologie percheronne +http://www.semanlink.net/tag/archeologie_percheronne|broader|http://www.semanlink.net/tag/orne +http://www.semanlink.net/tag/archeologie_percheronne|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/archeologie_percheronne|creationDate|2019-06-01 +http://www.semanlink.net/tag/archeologie_percheronne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie_percheronne|uri|http://www.semanlink.net/tag/archeologie_percheronne +http://www.semanlink.net/tag/archeologie_percheronne|broader_prefLabel|Orne +http://www.semanlink.net/tag/archeologie_percheronne|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/archeologie_percheronne|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/aventure|prefLabel|Aventure +http://www.semanlink.net/tag/aventure|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/aventure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aventure|uri|http://www.semanlink.net/tag/aventure +http://www.semanlink.net/tag/aventure|broader_prefLabel|I like +http://www.semanlink.net/tag/aventure|broader_prefLabel|I like +http://www.semanlink.net/tag/image_classification|creationTime|2018-03-04T16:56:56Z +http://www.semanlink.net/tag/image_classification|prefLabel|Image classification +http://www.semanlink.net/tag/image_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/image_classification|creationDate|2018-03-04 +http://www.semanlink.net/tag/image_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/image_classification|uri|http://www.semanlink.net/tag/image_classification +http://www.semanlink.net/tag/image_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/rotate|creationTime|2020-04-25T16:13:12Z +http://www.semanlink.net/tag/rotate|prefLabel|RotatE +http://www.semanlink.net/tag/rotate|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/rotate|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/rotate|creationDate|2020-04-25 +http://www.semanlink.net/tag/rotate|comment|Knowledge Graph Embedding by Relational Rotation in Complex Space +http://www.semanlink.net/tag/rotate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rotate|uri|http://www.semanlink.net/tag/rotate +http://www.semanlink.net/tag/rotate|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/rotate|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/rotate|broader_altLabel|KGE +http://www.semanlink.net/tag/rotate|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/rotate|broader_altLabel|KG embedding +http://www.semanlink.net/tag/rotate|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/rotate|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/rotate|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/rotate|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/rotate|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/spam|prefLabel|Spam +http://www.semanlink.net/tag/spam|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/spam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spam|uri|http://www.semanlink.net/tag/spam +http://www.semanlink.net/tag/spam|broader_prefLabel|Internet +http://www.semanlink.net/tag/moyen_age|prefLabel|Moyen-âge +http://www.semanlink.net/tag/moyen_age|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/moyen_age|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moyen_age|uri|http://www.semanlink.net/tag/moyen_age +http://www.semanlink.net/tag/moyen_age|broader_prefLabel|Histoire +http://www.semanlink.net/tag/cringely|prefLabel|Cringely +http://www.semanlink.net/tag/cringely|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/cringely|broader|http://www.semanlink.net/tag/pbs +http://www.semanlink.net/tag/cringely|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cringely|uri|http://www.semanlink.net/tag/cringely +http://www.semanlink.net/tag/cringely|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/cringely|broader_prefLabel|PBS +http://www.semanlink.net/tag/cringely|broader_altLabel|Technical guys +http://www.semanlink.net/tag/economic_sanctions|creationTime|2020-12-22T21:04:49Z +http://www.semanlink.net/tag/economic_sanctions|prefLabel|Economic sanctions +http://www.semanlink.net/tag/economic_sanctions|creationDate|2020-12-22 +http://www.semanlink.net/tag/economic_sanctions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economic_sanctions|describedBy|https://en.wikipedia.org/wiki/Economic_sanctions +http://www.semanlink.net/tag/economic_sanctions|uri|http://www.semanlink.net/tag/economic_sanctions +http://www.semanlink.net/tag/time_series|creationTime|2014-04-28T15:41:23Z +http://www.semanlink.net/tag/time_series|prefLabel|Time Series +http://www.semanlink.net/tag/time_series|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/time_series|creationDate|2014-04-28 +http://www.semanlink.net/tag/time_series|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/time_series|uri|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/tag/time_series|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/femme|creationTime|2007-12-01T23:14:33Z +http://www.semanlink.net/tag/femme|prefLabel|Femme +http://www.semanlink.net/tag/femme|creationDate|2007-12-01 +http://www.semanlink.net/tag/femme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/femme|uri|http://www.semanlink.net/tag/femme +http://www.semanlink.net/tag/sql_to_rdf_mapping|creationTime|2007-06-14T21:53:55Z +http://www.semanlink.net/tag/sql_to_rdf_mapping|prefLabel|SQL to RDF mapping +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader|http://www.semanlink.net/tag/rdf_and_database +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/sql_to_rdf_mapping|creationDate|2007-06-14 +http://www.semanlink.net/tag/sql_to_rdf_mapping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sql_to_rdf_mapping|uri|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_prefLabel|RDF and database +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_prefLabel|Database to RDF mapping +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_prefLabel|RDF +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_prefLabel|SQL +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_altLabel|RDF and database +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/sql_to_rdf_mapping|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/enswers|creationTime|2013-08-25T12:59:36Z +http://www.semanlink.net/tag/enswers|prefLabel|Enswers +http://www.semanlink.net/tag/enswers|broader|http://www.semanlink.net/tag/coree_du_sud +http://www.semanlink.net/tag/enswers|broader|http://www.semanlink.net/tag/digital_video +http://www.semanlink.net/tag/enswers|broader|http://www.semanlink.net/tag/winch5 +http://www.semanlink.net/tag/enswers|creationDate|2013-08-25 +http://www.semanlink.net/tag/enswers|comment|automatic content recognition through audiovisual fingerprinting, signal processing, large-scale clustering, and computer vision. +http://www.semanlink.net/tag/enswers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enswers|uri|http://www.semanlink.net/tag/enswers +http://www.semanlink.net/tag/enswers|broader_prefLabel|Corée du Sud +http://www.semanlink.net/tag/enswers|broader_prefLabel|Digital Video +http://www.semanlink.net/tag/enswers|broader_prefLabel|Winch 5 +http://www.semanlink.net/tag/enswers|broader_altLabel|South Korea +http://www.semanlink.net/tag/information_retrieval_techniques|creationTime|2013-05-31T14:40:09Z +http://www.semanlink.net/tag/information_retrieval_techniques|prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/information_retrieval_techniques|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/information_retrieval_techniques|creationDate|2013-05-31 +http://www.semanlink.net/tag/information_retrieval_techniques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_retrieval_techniques|uri|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/information_retrieval_techniques|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/information_retrieval_techniques|broader_altLabel|IR +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|creationTime|2007-04-10T23:12:09Z +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|creationDate|2007-01-20 +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|altLabel|C'est déjà demain +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|uri|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne|broader_prefLabel|Technologie +http://www.semanlink.net/tag/linked_learning|creationTime|2011-12-17T12:29:21Z +http://www.semanlink.net/tag/linked_learning|prefLabel|Linked Learning +http://www.semanlink.net/tag/linked_learning|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_learning|broader|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/tag/linked_learning|creationDate|2011-12-17 +http://www.semanlink.net/tag/linked_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_learning|uri|http://www.semanlink.net/tag/linked_learning +http://www.semanlink.net/tag/linked_learning|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_learning|broader_prefLabel|Online Learning +http://www.semanlink.net/tag/linked_learning|broader_altLabel|LD +http://www.semanlink.net/tag/linked_learning|broader_altLabel|Pédagogie numérique +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_learning|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/multilingual_embeddings|creationTime|2018-05-11T22:47:46Z +http://www.semanlink.net/tag/multilingual_embeddings|prefLabel|Multilingual embeddings +http://www.semanlink.net/tag/multilingual_embeddings|broader|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/tag/multilingual_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/multilingual_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/multilingual_embeddings|creationDate|2018-05-11 +http://www.semanlink.net/tag/multilingual_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multilingual_embeddings|uri|http://www.semanlink.net/tag/multilingual_embeddings +http://www.semanlink.net/tag/multilingual_embeddings|broader_prefLabel|Cross-lingual NLP +http://www.semanlink.net/tag/multilingual_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/multilingual_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/multilingual_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/multilingual_embeddings|broader_related|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/tag/multilingual_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/multilingual_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/multilingual_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/liberte_de_la_presse|creationTime|2008-01-10T01:10:05Z +http://www.semanlink.net/tag/liberte_de_la_presse|prefLabel|Liberté de la presse +http://www.semanlink.net/tag/liberte_de_la_presse|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberte_de_la_presse|creationDate|2008-01-10 +http://www.semanlink.net/tag/liberte_de_la_presse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte_de_la_presse|uri|http://www.semanlink.net/tag/liberte_de_la_presse +http://www.semanlink.net/tag/liberte_de_la_presse|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberte_de_la_presse|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/telephone|prefLabel|Téléphone +http://www.semanlink.net/tag/telephone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/telephone|altLabel|Téléphonie +http://www.semanlink.net/tag/telephone|uri|http://www.semanlink.net/tag/telephone +http://www.semanlink.net/tag/vector_space_model|creationTime|2013-05-31T14:46:32Z +http://www.semanlink.net/tag/vector_space_model|prefLabel|Vector space model +http://www.semanlink.net/tag/vector_space_model|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/vector_space_model|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/vector_space_model|creationDate|2013-05-31 +http://www.semanlink.net/tag/vector_space_model|comment|"Algebraic model for representing text documents as vectors of identifiers such as index terms.
+Documents and queries are represented as vectors. +Each dimension corresponds to a separate term. If a term occurs in the document, its value in the vector is non-zero. One way of computing the value: TD-IDF + + + + +" +http://www.semanlink.net/tag/vector_space_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vector_space_model|describedBy|https://en.wikipedia.org/wiki/Vector_space_model +http://www.semanlink.net/tag/vector_space_model|describedBy|https://en.wikipedia.org/wiki/Vectorial_semantics +http://www.semanlink.net/tag/vector_space_model|altLabel|Vectorial semantics +http://www.semanlink.net/tag/vector_space_model|uri|http://www.semanlink.net/tag/vector_space_model +http://www.semanlink.net/tag/vector_space_model|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/vector_space_model|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/qotd|creationTime|2007-09-27T22:22:51Z +http://www.semanlink.net/tag/qotd|prefLabel|QOTD +http://www.semanlink.net/tag/qotd|broader|http://www.semanlink.net/tag/citation +http://www.semanlink.net/tag/qotd|creationDate|2007-09-27 +http://www.semanlink.net/tag/qotd|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/qotd|uri|http://www.semanlink.net/tag/qotd +http://www.semanlink.net/tag/qotd|broader_prefLabel|Quote +http://www.semanlink.net/tag/qotd|broader_altLabel|Citation +http://www.semanlink.net/tag/firefox|prefLabel|Firefox +http://www.semanlink.net/tag/firefox|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/firefox|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/firefox|uri|http://www.semanlink.net/tag/firefox +http://www.semanlink.net/tag/firefox|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/firefox|broader_altLabel|Browser +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|creationTime|2018-03-28T23:58:26Z +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|prefLabel|Mission Villani sur l'IA +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader|http://www.semanlink.net/tag/cedric_villani +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader|http://www.semanlink.net/tag/politique_economique_francaise +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|creationDate|2018-03-28 +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|altLabel|Rapport Villani +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|uri|http://www.semanlink.net/tag/rapport_villani_sur_l_ia +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_prefLabel|Cédric Villani +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_prefLabel|Politique économique française +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_altLabel|AI +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_altLabel|IA +http://www.semanlink.net/tag/rapport_villani_sur_l_ia|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/deutsch|creationTime|2010-06-30T00:42:55Z +http://www.semanlink.net/tag/deutsch|prefLabel|Deutsch +http://www.semanlink.net/tag/deutsch|broader|http://www.semanlink.net/tag/langues_vivantes +http://www.semanlink.net/tag/deutsch|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/deutsch|related|http://www.semanlink.net/tag/yves_roth +http://www.semanlink.net/tag/deutsch|creationDate|2010-06-30 +http://www.semanlink.net/tag/deutsch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deutsch|altLabel|Allemand +http://www.semanlink.net/tag/deutsch|uri|http://www.semanlink.net/tag/deutsch +http://www.semanlink.net/tag/deutsch|broader_prefLabel|Langues vivantes +http://www.semanlink.net/tag/deutsch|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/deutsch|broader_altLabel|Germany +http://www.semanlink.net/tag/deutsch|broader_altLabel|Deutschland +http://www.semanlink.net/tag/episodic_memory|creationTime|2019-09-08T13:43:20Z +http://www.semanlink.net/tag/episodic_memory|prefLabel|Episodic Memory +http://www.semanlink.net/tag/episodic_memory|broader|http://www.semanlink.net/tag/memoire_humaine +http://www.semanlink.net/tag/episodic_memory|creationDate|2019-09-08 +http://www.semanlink.net/tag/episodic_memory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/episodic_memory|describedBy|https://en.wikipedia.org/wiki/Episodic_memory +http://www.semanlink.net/tag/episodic_memory|uri|http://www.semanlink.net/tag/episodic_memory +http://www.semanlink.net/tag/episodic_memory|broader_prefLabel|Mémoire humaine +http://www.semanlink.net/tag/hierarchical_clustering|creationTime|2017-07-18T16:11:10Z +http://www.semanlink.net/tag/hierarchical_clustering|prefLabel|Hierarchical Clustering +http://www.semanlink.net/tag/hierarchical_clustering|broader|http://www.semanlink.net/tag/hierarchies_in_ml +http://www.semanlink.net/tag/hierarchical_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/hierarchical_clustering|creationDate|2017-07-18 +http://www.semanlink.net/tag/hierarchical_clustering|comment|"Cluster analysis which seeks to build a hierarchy of clusters: given datapoints and their +pairwise similarities, the goal is to construct a hierarchy over clusters, in the form of a tree whose leaves +correspond to datapoints and internal nodes correspond to clusters + +2 kinds: + +- Agglomerative +- Divisive" +http://www.semanlink.net/tag/hierarchical_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_clustering|describedBy|https://en.wikipedia.org/wiki/Hierarchical_clustering +http://www.semanlink.net/tag/hierarchical_clustering|uri|http://www.semanlink.net/tag/hierarchical_clustering +http://www.semanlink.net/tag/hierarchical_clustering|broader_prefLabel|Hierarchies in ML +http://www.semanlink.net/tag/hierarchical_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/hierarchical_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/hierarchical_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/hierarchical_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/social_content_services|prefLabel|Social Content Services +http://www.semanlink.net/tag/social_content_services|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/social_content_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_content_services|uri|http://www.semanlink.net/tag/social_content_services +http://www.semanlink.net/tag/social_content_services|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/conversational_ai|creationTime|2019-12-13T23:10:02Z +http://www.semanlink.net/tag/conversational_ai|prefLabel|Conversational AI +http://www.semanlink.net/tag/conversational_ai|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/conversational_ai|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/conversational_ai|creationDate|2019-12-13 +http://www.semanlink.net/tag/conversational_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conversational_ai|uri|http://www.semanlink.net/tag/conversational_ai +http://www.semanlink.net/tag/conversational_ai|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/conversational_ai|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/conversational_ai|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/oauth2|creationTime|2015-11-16T11:53:35Z +http://www.semanlink.net/tag/oauth2|prefLabel|OAuth2 +http://www.semanlink.net/tag/oauth2|broader|http://www.semanlink.net/tag/oauth +http://www.semanlink.net/tag/oauth2|creationDate|2015-11-16 +http://www.semanlink.net/tag/oauth2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oauth2|uri|http://www.semanlink.net/tag/oauth2 +http://www.semanlink.net/tag/oauth2|broader_prefLabel|OAuth +http://www.semanlink.net/tag/peintre|prefLabel|Peintre +http://www.semanlink.net/tag/peintre|broader|http://www.semanlink.net/tag/artiste +http://www.semanlink.net/tag/peintre|broader|http://www.semanlink.net/tag/peinture +http://www.semanlink.net/tag/peintre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peintre|uri|http://www.semanlink.net/tag/peintre +http://www.semanlink.net/tag/peintre|broader_prefLabel|Artiste +http://www.semanlink.net/tag/peintre|broader_prefLabel|Painting +http://www.semanlink.net/tag/peintre|broader_altLabel|Peinture +http://www.semanlink.net/tag/puceron|creationTime|2020-05-09T21:01:14Z +http://www.semanlink.net/tag/puceron|prefLabel|Puceron +http://www.semanlink.net/tag/puceron|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/puceron|creationDate|2020-05-09 +http://www.semanlink.net/tag/puceron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/puceron|uri|http://www.semanlink.net/tag/puceron +http://www.semanlink.net/tag/puceron|broader_prefLabel|Insecte +http://www.semanlink.net/tag/jean_rouch|prefLabel|Jean Rouch +http://www.semanlink.net/tag/jean_rouch|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/jean_rouch|broader|http://www.semanlink.net/tag/ethnologie +http://www.semanlink.net/tag/jean_rouch|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/jean_rouch|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/jean_rouch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_rouch|describedBy|https://fr.wikipedia.org/wiki/Jean_Rouch +http://www.semanlink.net/tag/jean_rouch|uri|http://www.semanlink.net/tag/jean_rouch +http://www.semanlink.net/tag/jean_rouch|broader_prefLabel|Niger +http://www.semanlink.net/tag/jean_rouch|broader_prefLabel|Ethnologie +http://www.semanlink.net/tag/jean_rouch|broader_prefLabel|Afrique +http://www.semanlink.net/tag/jean_rouch|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/jean_rouch|broader_altLabel|Africa +http://www.semanlink.net/tag/jean_rouch|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/jean_rouch|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/jean_rouch|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/jean_rouch|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/map_territory_relation|creationTime|2010-04-28T23:35:51Z +http://www.semanlink.net/tag/map_territory_relation|prefLabel|Map–territory relation +http://www.semanlink.net/tag/map_territory_relation|broader|http://www.semanlink.net/tag/general_semantics +http://www.semanlink.net/tag/map_territory_relation|creationDate|2010-04-28 +http://www.semanlink.net/tag/map_territory_relation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/map_territory_relation|describedBy|https://en.wikipedia.org/wiki/Map%E2%80%93territory_relation +http://www.semanlink.net/tag/map_territory_relation|uri|http://www.semanlink.net/tag/map_territory_relation +http://www.semanlink.net/tag/map_territory_relation|broader_prefLabel|General semantics +http://www.semanlink.net/tag/property_graphs|creationTime|2018-04-08T12:46:24Z +http://www.semanlink.net/tag/property_graphs|prefLabel|Property Graphs +http://www.semanlink.net/tag/property_graphs|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/property_graphs|broader|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/tag/property_graphs|creationDate|2018-04-08 +http://www.semanlink.net/tag/property_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/property_graphs|altLabel|Property Graph Model +http://www.semanlink.net/tag/property_graphs|uri|http://www.semanlink.net/tag/property_graphs +http://www.semanlink.net/tag/property_graphs|broader_prefLabel|Graph +http://www.semanlink.net/tag/property_graphs|broader_prefLabel|Graph database +http://www.semanlink.net/tag/predictions|creationTime|2015-10-24T22:26:25Z +http://www.semanlink.net/tag/predictions|prefLabel|Predictions +http://www.semanlink.net/tag/predictions|creationDate|2015-10-24 +http://www.semanlink.net/tag/predictions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/predictions|uri|http://www.semanlink.net/tag/predictions +http://www.semanlink.net/tag/gradient_descent|creationTime|2016-01-21T14:05:38Z +http://www.semanlink.net/tag/gradient_descent|prefLabel|Gradient descent +http://www.semanlink.net/tag/gradient_descent|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/gradient_descent|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/gradient_descent|related|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/tag/gradient_descent|creationDate|2016-01-21 +http://www.semanlink.net/tag/gradient_descent|comment|"**gradient descent optimization method**: to find a local minimum of a function, take steps proportional to the negative of the gradient (or of the approximate gradient) of the function at the current point +" +http://www.semanlink.net/tag/gradient_descent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gradient_descent|describedBy|https://en.wikipedia.org/wiki/Gradient_descent +http://www.semanlink.net/tag/gradient_descent|uri|http://www.semanlink.net/tag/gradient_descent +http://www.semanlink.net/tag/gradient_descent|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/gradient_descent|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions|creationTime|2020-06-06T19:35:16Z +http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions|prefLabel|France : dysfonctionnement des institutions +http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions|creationDate|2020-06-06 +http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions|uri|http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions +http://www.semanlink.net/tag/hypermedia|prefLabel|Hypermedia +http://www.semanlink.net/tag/hypermedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypermedia|uri|http://www.semanlink.net/tag/hypermedia +http://www.semanlink.net/tag/semantic_web_conferences|creationTime|2007-04-20T00:37:35Z +http://www.semanlink.net/tag/semantic_web_conferences|prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/semantic_web_conferences|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/semantic_web_conferences|creationDate|2007-04-20 +http://www.semanlink.net/tag/semantic_web_conferences|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_conferences|uri|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/semantic_web_conferences|broader_prefLabel|Conférences +http://www.semanlink.net/tag/semantic_web_conferences|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/monde_moderne|prefLabel|Monde moderne +http://www.semanlink.net/tag/monde_moderne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/monde_moderne|uri|http://www.semanlink.net/tag/monde_moderne +http://www.semanlink.net/tag/social_web|creationTime|2011-01-18T09:17:58Z +http://www.semanlink.net/tag/social_web|prefLabel|Social Web +http://www.semanlink.net/tag/social_web|broader|http://www.semanlink.net/tag/social_semantic_web +http://www.semanlink.net/tag/social_web|creationDate|2011-01-18 +http://www.semanlink.net/tag/social_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_web|uri|http://www.semanlink.net/tag/social_web +http://www.semanlink.net/tag/social_web|broader_prefLabel|Social Semantic Web +http://www.semanlink.net/tag/information_visualization|creationTime|2008-02-15T22:39:43Z +http://www.semanlink.net/tag/information_visualization|prefLabel|Information visualization +http://www.semanlink.net/tag/information_visualization|broader|http://www.semanlink.net/tag/gui +http://www.semanlink.net/tag/information_visualization|broader|http://www.semanlink.net/tag/information +http://www.semanlink.net/tag/information_visualization|creationDate|2008-02-15 +http://www.semanlink.net/tag/information_visualization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_visualization|uri|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/information_visualization|broader_prefLabel|GUI +http://www.semanlink.net/tag/information_visualization|broader_prefLabel|Information +http://www.semanlink.net/tag/semanlink_todo|prefLabel|Semanlink todo +http://www.semanlink.net/tag/semanlink_todo|broader|http://www.semanlink.net/tag/semanlink_dev +http://www.semanlink.net/tag/semanlink_todo|broader|http://www.semanlink.net/tag/to_do +http://www.semanlink.net/tag/semanlink_todo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_todo|altLabel|SL todo +http://www.semanlink.net/tag/semanlink_todo|uri|http://www.semanlink.net/tag/semanlink_todo +http://www.semanlink.net/tag/semanlink_todo|broader_prefLabel|Semanlink dev +http://www.semanlink.net/tag/semanlink_todo|broader_prefLabel|To do +http://www.semanlink.net/tag/semanlink_todo|broader_altLabel|Todo +http://www.semanlink.net/tag/luis_von_ahn|creationTime|2013-05-15T14:44:19Z +http://www.semanlink.net/tag/luis_von_ahn|prefLabel|Luis von Ahn +http://www.semanlink.net/tag/luis_von_ahn|related|http://www.semanlink.net/tag/crowd_sourcing +http://www.semanlink.net/tag/luis_von_ahn|related|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/luis_von_ahn|creationDate|2013-05-15 +http://www.semanlink.net/tag/luis_von_ahn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/luis_von_ahn|describedBy|https://en.wikipedia.org/wiki/Luis_von_Ahn +http://www.semanlink.net/tag/luis_von_ahn|uri|http://www.semanlink.net/tag/luis_von_ahn +http://www.semanlink.net/tag/sculpture|prefLabel|Sculpture +http://www.semanlink.net/tag/sculpture|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/sculpture|creationDate|2006-11-24 +http://www.semanlink.net/tag/sculpture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sculpture|altLabel|Statuaire +http://www.semanlink.net/tag/sculpture|uri|http://www.semanlink.net/tag/sculpture +http://www.semanlink.net/tag/sculpture|broader_prefLabel|Art +http://www.semanlink.net/tag/web_pollution|creationTime|2018-04-27T19:13:55Z +http://www.semanlink.net/tag/web_pollution|prefLabel|Web Pollution +http://www.semanlink.net/tag/web_pollution|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/web_pollution|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/web_pollution|creationDate|2018-04-27 +http://www.semanlink.net/tag/web_pollution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_pollution|uri|http://www.semanlink.net/tag/web_pollution +http://www.semanlink.net/tag/web_pollution|broader_prefLabel|Web +http://www.semanlink.net/tag/web_pollution|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/lodr|creationTime|2008-10-07T13:40:32Z +http://www.semanlink.net/tag/lodr|prefLabel|LODr +http://www.semanlink.net/tag/lodr|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/lodr|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/lodr|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/lodr|broader|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/lodr|creationDate|2008-10-07 +http://www.semanlink.net/tag/lodr|comment|LODr is a RDF-based (re-)tagging service, that allows people to weave their Web 2.0 tagged data into the Linked Data Web and provides a dedicated browsing interface. +http://www.semanlink.net/tag/lodr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lodr|homepage|http://lodr.info/ +http://www.semanlink.net/tag/lodr|uri|http://www.semanlink.net/tag/lodr +http://www.semanlink.net/tag/lodr|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/lodr|broader_prefLabel|Tagging +http://www.semanlink.net/tag/lodr|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/lodr|broader_prefLabel|Alexandre Passant +http://www.semanlink.net/tag/lodr|broader_altLabel|LD +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/lodr|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/knowledge_based_ai|creationTime|2014-07-24T23:21:36Z +http://www.semanlink.net/tag/knowledge_based_ai|prefLabel|Knowledge-based AI +http://www.semanlink.net/tag/knowledge_based_ai|broader|http://www.semanlink.net/tag/ai_knowledge +http://www.semanlink.net/tag/knowledge_based_ai|related|http://www.semanlink.net/tag/semantic_web_and_ai +http://www.semanlink.net/tag/knowledge_based_ai|related|http://www.semanlink.net/tag/cognitive_computing +http://www.semanlink.net/tag/knowledge_based_ai|creationDate|2014-07-24 +http://www.semanlink.net/tag/knowledge_based_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_based_ai|uri|http://www.semanlink.net/tag/knowledge_based_ai +http://www.semanlink.net/tag/knowledge_based_ai|broader_prefLabel|AI + Knowledge +http://www.semanlink.net/tag/knowledge_based_ai|broader_altLabel|Domain Knowledge in AI +http://www.semanlink.net/tag/knowledge_based_ai|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/knowledge_based_ai|broader_related|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|creationTime|2019-01-27T15:08:53Z +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|prefLabel|Thèse IRIT-Renault: biblio initiale +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|broader|http://www.semanlink.net/tag/these_irit_renault_biblio +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|creationDate|2019-01-27 +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|comment|"- [Deghani2017] Mostafa Dehghani, Hamed Zamani, Aliaksei Severyn, Jaap +Kamps, W. Bruce Croft. Neural Ranking Models with Weak +Supervision. SIGIR 2017: 65-74 +- [Faruqui2014] Faruqui M., Dodge J., Jauhar S. K., Dyer C., Hovy E., +Smith N. A., « Retrofitting Word Vectors to Semantic Lexicons », NAACL, 2014 +- [Moreno2017] Moreno, J. G., Besançon, R., Beaumont, R., D’hondt, E., +Ligozat, A. L., Rosset, S., Grau, B. (2017, Combining word and entity +embeddings for entity linking. In Extended Semantic Web Conference +(ESWC) pp. 337-352, 2017 +- [Nickel2017] Nickel, M., & Kiela, D. Poincaré embeddings for learning +hierarchical representations. In Advances in Neural Information +Processing Systems (pp. 6341-6350), 2017. +- [Nguyen2017] Nguyen, G. H., Tamine, L., Soulier, L., & Souf, N. (2017, +June). Learning Concept-Driven Document Embeddings for Medical +Information Search. In Conference on Artificial Intelligence in Medicine +in Europe (pp. 160-170). Springer, Cham +- [Nguyen2018] Gia Nguyen, Lynda Tamine, Laure Soulier, Nathalie Souf, A +Tri-Partite Neural Document Language Model for Semantic Information +Retrieval. In Extended Semantic Web Conference (ESWC), 2018 +- [Yu2014] Yu M., Dredze M., « Improving Lexical Embeddings with +Semantic Knowledge », ACL, p. 545- 550, 2014 +- [Wang2014] Wang Z., Zhang J., Feng J., Chen Z., « Knowledge Graph and +Text Jointly Embedding », EMNLP, p. 1591- 1601, 2014 +- [Yamada2016] Yamada, I., Shindo, H., Takeda, H., Takefuji, Y., « Joint +Learning of the Embedding of Words and Entities for Named Entity +Disambiguation », CoNLL, p. 250-259, 2016" +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|uri|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|broader_prefLabel|Thèse IRIT-Renault: biblio +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|broader_related|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/these_irit_renault_biblio_initiale|broader_related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/virtualbox|creationTime|2017-11-20T09:15:54Z +http://www.semanlink.net/tag/virtualbox|prefLabel|VirtualBox +http://www.semanlink.net/tag/virtualbox|broader|http://www.semanlink.net/tag/oracle +http://www.semanlink.net/tag/virtualbox|creationDate|2017-11-20 +http://www.semanlink.net/tag/virtualbox|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtualbox|homepage|https://www.virtualbox.org/ +http://www.semanlink.net/tag/virtualbox|describedBy|https://en.wikipedia.org/wiki/VirtualBox +http://www.semanlink.net/tag/virtualbox|uri|http://www.semanlink.net/tag/virtualbox +http://www.semanlink.net/tag/virtualbox|broader_prefLabel|Oracle +http://www.semanlink.net/tag/entreprise|prefLabel|Entreprise +http://www.semanlink.net/tag/entreprise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entreprise|uri|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/ecole_montessori|creationTime|2011-10-05T21:28:18Z +http://www.semanlink.net/tag/ecole_montessori|prefLabel|Ecole Montessori +http://www.semanlink.net/tag/ecole_montessori|broader|http://www.semanlink.net/tag/ecole +http://www.semanlink.net/tag/ecole_montessori|creationDate|2011-10-05 +http://www.semanlink.net/tag/ecole_montessori|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecole_montessori|uri|http://www.semanlink.net/tag/ecole_montessori +http://www.semanlink.net/tag/ecole_montessori|broader_prefLabel|Ecole +http://www.semanlink.net/tag/python_nlp|creationTime|2017-05-23T10:27:13Z +http://www.semanlink.net/tag/python_nlp|prefLabel|Python-NLP +http://www.semanlink.net/tag/python_nlp|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/python_nlp|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_nlp|related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_nlp|related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/python_nlp|creationDate|2017-05-23 +http://www.semanlink.net/tag/python_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_nlp|uri|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/python_nlp|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/python_nlp|broader_prefLabel|Python +http://www.semanlink.net/tag/concise_bounded_description|creationTime|2008-11-25T12:21:30Z +http://www.semanlink.net/tag/concise_bounded_description|prefLabel|Concise Bounded Description +http://www.semanlink.net/tag/concise_bounded_description|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/concise_bounded_description|broader|http://www.semanlink.net/tag/rdf_graphs +http://www.semanlink.net/tag/concise_bounded_description|creationDate|2008-11-25 +http://www.semanlink.net/tag/concise_bounded_description|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concise_bounded_description|uri|http://www.semanlink.net/tag/concise_bounded_description +http://www.semanlink.net/tag/concise_bounded_description|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/concise_bounded_description|broader_prefLabel|RDF graphs +http://www.semanlink.net/tag/mixture_distribution|creationTime|2018-03-16T16:44:47Z +http://www.semanlink.net/tag/mixture_distribution|prefLabel|Mixture distribution +http://www.semanlink.net/tag/mixture_distribution|broader|http://www.semanlink.net/tag/statistics +http://www.semanlink.net/tag/mixture_distribution|creationDate|2018-03-16 +http://www.semanlink.net/tag/mixture_distribution|comment|probability distribution of a random variable that is derived from a collection of other random variables: first, a random variable is selected by chance from the collection according to given probabilities of selection, and then the value of the selected random variable is realized +http://www.semanlink.net/tag/mixture_distribution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mixture_distribution|describedBy|https://en.wikipedia.org/wiki/Mixture_distribution +http://www.semanlink.net/tag/mixture_distribution|uri|http://www.semanlink.net/tag/mixture_distribution +http://www.semanlink.net/tag/mixture_distribution|broader_prefLabel|Statistics +http://www.semanlink.net/tag/mixture_distribution|broader_altLabel|stats +http://www.semanlink.net/tag/mixture_distribution|broader_altLabel|Statistiques +http://www.semanlink.net/tag/candidate_sampling|creationTime|2018-07-07T15:05:01Z +http://www.semanlink.net/tag/candidate_sampling|prefLabel|Candidate Sampling +http://www.semanlink.net/tag/candidate_sampling|related|http://www.semanlink.net/tag/candidate_sampling +http://www.semanlink.net/tag/candidate_sampling|creationDate|2018-07-07 +http://www.semanlink.net/tag/candidate_sampling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/candidate_sampling|uri|http://www.semanlink.net/tag/candidate_sampling +http://www.semanlink.net/tag/trou_noir|prefLabel|Trou noir +http://www.semanlink.net/tag/trou_noir|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/trou_noir|broader|http://www.semanlink.net/tag/gravitation +http://www.semanlink.net/tag/trou_noir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trou_noir|altLabel|Black hole +http://www.semanlink.net/tag/trou_noir|uri|http://www.semanlink.net/tag/trou_noir +http://www.semanlink.net/tag/trou_noir|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/trou_noir|broader_prefLabel|Gravitation +http://www.semanlink.net/tag/trou_noir|broader_altLabel|Gravity +http://www.semanlink.net/tag/google_deepmind|creationTime|2016-01-09T00:49:02Z +http://www.semanlink.net/tag/google_deepmind|prefLabel|DeepMind +http://www.semanlink.net/tag/google_deepmind|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/google_deepmind|related|http://www.semanlink.net/tag/reinforcement_learning +http://www.semanlink.net/tag/google_deepmind|creationDate|2016-01-09 +http://www.semanlink.net/tag/google_deepmind|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_deepmind|describedBy|https://en.wikipedia.org/wiki/Google_DeepMind +http://www.semanlink.net/tag/google_deepmind|uri|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/tag/google_deepmind|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/grece_mycenienne|prefLabel|Grèce mycénienne +http://www.semanlink.net/tag/grece_mycenienne|broader|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/grece_mycenienne|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/grece_mycenienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grece_mycenienne|uri|http://www.semanlink.net/tag/grece_mycenienne +http://www.semanlink.net/tag/grece_mycenienne|broader_prefLabel|Âge du bronze +http://www.semanlink.net/tag/grece_mycenienne|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/nosql_and_eventual_consistency|creationTime|2013-04-29T00:28:16Z +http://www.semanlink.net/tag/nosql_and_eventual_consistency|prefLabel|NoSQL and eventual consistency +http://www.semanlink.net/tag/nosql_and_eventual_consistency|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/nosql_and_eventual_consistency|broader|http://www.semanlink.net/tag/distributed_computing +http://www.semanlink.net/tag/nosql_and_eventual_consistency|related|http://www.semanlink.net/tag/mongodb +http://www.semanlink.net/tag/nosql_and_eventual_consistency|related|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/nosql_and_eventual_consistency|creationDate|2013-04-29 +http://www.semanlink.net/tag/nosql_and_eventual_consistency|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nosql_and_eventual_consistency|uri|http://www.semanlink.net/tag/nosql_and_eventual_consistency +http://www.semanlink.net/tag/nosql_and_eventual_consistency|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/nosql_and_eventual_consistency|broader_prefLabel|Distributed computing +http://www.semanlink.net/tag/nosql_and_eventual_consistency|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|prefLabel|Musée archéologique de Bagdad +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader|http://www.semanlink.net/tag/mesopotamie +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|uri|http://www.semanlink.net/tag/musee_archeologique_de_bagdad +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader_prefLabel|Mésopotamie +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader_prefLabel|Musée +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader_prefLabel|Irak +http://www.semanlink.net/tag/musee_archeologique_de_bagdad|broader_altLabel|Iraq +http://www.semanlink.net/tag/chine_ecologie|creationTime|2014-04-23T21:53:32Z +http://www.semanlink.net/tag/chine_ecologie|prefLabel|Chine : écologie +http://www.semanlink.net/tag/chine_ecologie|broader|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/chine_ecologie|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine_ecologie|creationDate|2014-04-23 +http://www.semanlink.net/tag/chine_ecologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_ecologie|uri|http://www.semanlink.net/tag/chine_ecologie +http://www.semanlink.net/tag/chine_ecologie|broader_prefLabel|Crise écologique +http://www.semanlink.net/tag/chine_ecologie|broader_prefLabel|Chine +http://www.semanlink.net/tag/chine_ecologie|broader_altLabel|China +http://www.semanlink.net/tag/cornell|prefLabel|Cornell +http://www.semanlink.net/tag/cornell|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/cornell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cornell|uri|http://www.semanlink.net/tag/cornell +http://www.semanlink.net/tag/cornell|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/manuscrits_de_tombouctou|prefLabel|Manuscrits de Tombouctou +http://www.semanlink.net/tag/manuscrits_de_tombouctou|broader|http://www.semanlink.net/tag/tombouctou +http://www.semanlink.net/tag/manuscrits_de_tombouctou|broader|http://www.semanlink.net/tag/manuscrits +http://www.semanlink.net/tag/manuscrits_de_tombouctou|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manuscrits_de_tombouctou|uri|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.semanlink.net/tag/manuscrits_de_tombouctou|broader_prefLabel|Tombouctou +http://www.semanlink.net/tag/manuscrits_de_tombouctou|broader_prefLabel|Manuscrits +http://www.semanlink.net/tag/manuscrits_de_tombouctou|broader_altLabel|Timbuktu +http://www.semanlink.net/tag/twitter|creationTime|2010-06-23T00:20:16Z +http://www.semanlink.net/tag/twitter|prefLabel|Twitter +http://www.semanlink.net/tag/twitter|broader|http://www.semanlink.net/tag/microblogs +http://www.semanlink.net/tag/twitter|creationDate|2010-06-23 +http://www.semanlink.net/tag/twitter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/twitter|uri|http://www.semanlink.net/tag/twitter +http://www.semanlink.net/tag/twitter|broader_prefLabel|Microblogs +http://www.semanlink.net/tag/twitter|broader_altLabel|Microblogging +http://www.semanlink.net/tag/pedra_furada|creationTime|2019-02-02T02:20:20Z +http://www.semanlink.net/tag/pedra_furada|prefLabel|Pedra Furada +http://www.semanlink.net/tag/pedra_furada|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/pedra_furada|broader|http://www.semanlink.net/tag/first_americans +http://www.semanlink.net/tag/pedra_furada|creationDate|2019-02-02 +http://www.semanlink.net/tag/pedra_furada|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pedra_furada|describedBy|https://en.wikipedia.org/wiki/Pedra_Furada +http://www.semanlink.net/tag/pedra_furada|uri|http://www.semanlink.net/tag/pedra_furada +http://www.semanlink.net/tag/pedra_furada|broader_prefLabel|Brésil +http://www.semanlink.net/tag/pedra_furada|broader_prefLabel|First Americans +http://www.semanlink.net/tag/pedra_furada|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/nearest_neighbor_search|creationTime|2017-07-26T13:37:39Z +http://www.semanlink.net/tag/nearest_neighbor_search|prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/nearest_neighbor_search|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/nearest_neighbor_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/nearest_neighbor_search|related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/nearest_neighbor_search|creationDate|2017-07-26 +http://www.semanlink.net/tag/nearest_neighbor_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nearest_neighbor_search|describedBy|https://en.wikipedia.org/wiki/Nearest_neighbor_search +http://www.semanlink.net/tag/nearest_neighbor_search|altLabel|Similarity search +http://www.semanlink.net/tag/nearest_neighbor_search|uri|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/nearest_neighbor_search|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/nearest_neighbor_search|broader_prefLabel|Search +http://www.semanlink.net/tag/annotation_tools|creationTime|2019-12-17T15:26:59Z +http://www.semanlink.net/tag/annotation_tools|prefLabel|Annotation tools +http://www.semanlink.net/tag/annotation_tools|broader|http://www.semanlink.net/tag/labeling_data +http://www.semanlink.net/tag/annotation_tools|creationDate|2019-12-17 +http://www.semanlink.net/tag/annotation_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/annotation_tools|uri|http://www.semanlink.net/tag/annotation_tools +http://www.semanlink.net/tag/annotation_tools|broader_prefLabel|Labeling data +http://www.semanlink.net/tag/annotation_tools|broader_altLabel|Labelling data +http://www.semanlink.net/tag/annotation_tools|broader_altLabel|Annotating data +http://www.semanlink.net/tag/turtle_in_html|creationTime|2013-09-06T18:25:10Z +http://www.semanlink.net/tag/turtle_in_html|prefLabel|Turtle in HTML +http://www.semanlink.net/tag/turtle_in_html|broader|http://www.semanlink.net/tag/turtle +http://www.semanlink.net/tag/turtle_in_html|creationDate|2013-09-06 +http://www.semanlink.net/tag/turtle_in_html|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turtle_in_html|uri|http://www.semanlink.net/tag/turtle_in_html +http://www.semanlink.net/tag/turtle_in_html|broader_prefLabel|Turtle +http://www.semanlink.net/tag/marklogic|creationTime|2016-05-26T15:37:04Z +http://www.semanlink.net/tag/marklogic|prefLabel|Marklogic +http://www.semanlink.net/tag/marklogic|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/marklogic|creationDate|2016-05-26 +http://www.semanlink.net/tag/marklogic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marklogic|homepage|http://www.marklogic.com +http://www.semanlink.net/tag/marklogic|uri|http://www.semanlink.net/tag/marklogic +http://www.semanlink.net/tag/marklogic|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/marklogic|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/learned_index_structures|creationTime|2019-01-01T13:22:40Z +http://www.semanlink.net/tag/learned_index_structures|prefLabel|Learned Index Structures +http://www.semanlink.net/tag/learned_index_structures|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/learned_index_structures|related|http://www.semanlink.net/tag/semantic_hashing +http://www.semanlink.net/tag/learned_index_structures|creationDate|2019-01-01 +http://www.semanlink.net/tag/learned_index_structures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/learned_index_structures|uri|http://www.semanlink.net/tag/learned_index_structures +http://www.semanlink.net/tag/learned_index_structures|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/chrome_extension|creationTime|2018-03-29T16:45:54Z +http://www.semanlink.net/tag/chrome_extension|prefLabel|Chrome extension +http://www.semanlink.net/tag/chrome_extension|broader|http://www.semanlink.net/tag/chrome +http://www.semanlink.net/tag/chrome_extension|creationDate|2018-03-29 +http://www.semanlink.net/tag/chrome_extension|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chrome_extension|uri|http://www.semanlink.net/tag/chrome_extension +http://www.semanlink.net/tag/chrome_extension|broader_prefLabel|Chrome +http://www.semanlink.net/tag/reformer|creationTime|2020-06-29T19:08:10Z +http://www.semanlink.net/tag/reformer|prefLabel|Reformer +http://www.semanlink.net/tag/reformer|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/reformer|broader|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/tag/reformer|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/reformer|creationDate|2020-06-29 +http://www.semanlink.net/tag/reformer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reformer|uri|http://www.semanlink.net/tag/reformer +http://www.semanlink.net/tag/reformer|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/reformer|broader_prefLabel|Transformers +http://www.semanlink.net/tag/reformer|broader_altLabel|Transformer +http://www.semanlink.net/tag/reformer|broader_altLabel|Transformers +http://www.semanlink.net/tag/reformer|broader_altLabel|Attention is All You Need +http://www.semanlink.net/tag/reformer|broader_related|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/norilsk|prefLabel|Norilsk +http://www.semanlink.net/tag/norilsk|broader|http://www.semanlink.net/tag/polluted_places +http://www.semanlink.net/tag/norilsk|broader|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/norilsk|broader|http://www.semanlink.net/tag/arctique +http://www.semanlink.net/tag/norilsk|broader|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/norilsk|broader|http://www.semanlink.net/tag/industrie_miniere +http://www.semanlink.net/tag/norilsk|creationDate|2006-10-28 +http://www.semanlink.net/tag/norilsk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/norilsk|uri|http://www.semanlink.net/tag/norilsk +http://www.semanlink.net/tag/norilsk|broader_prefLabel|Polluted places +http://www.semanlink.net/tag/norilsk|broader_prefLabel|Pollution +http://www.semanlink.net/tag/norilsk|broader_prefLabel|Arctique +http://www.semanlink.net/tag/norilsk|broader_prefLabel|Russie +http://www.semanlink.net/tag/norilsk|broader_prefLabel|Industrie minière +http://www.semanlink.net/tag/insecticide|creationTime|2007-11-21T23:22:03Z +http://www.semanlink.net/tag/insecticide|prefLabel|Insecticide +http://www.semanlink.net/tag/insecticide|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/insecticide|creationDate|2007-11-21 +http://www.semanlink.net/tag/insecticide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/insecticide|uri|http://www.semanlink.net/tag/insecticide +http://www.semanlink.net/tag/insecticide|broader_prefLabel|Insecte +http://www.semanlink.net/tag/ministere_de_la_culture|creationTime|2014-07-25T15:57:37Z +http://www.semanlink.net/tag/ministere_de_la_culture|prefLabel|Ministère de la culture +http://www.semanlink.net/tag/ministere_de_la_culture|creationDate|2014-07-25 +http://www.semanlink.net/tag/ministere_de_la_culture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ministere_de_la_culture|uri|http://www.semanlink.net/tag/ministere_de_la_culture +http://www.semanlink.net/tag/new_york_times|prefLabel|New York Times +http://www.semanlink.net/tag/new_york_times|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/new_york_times|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/new_york_times|uri|http://www.semanlink.net/tag/new_york_times +http://www.semanlink.net/tag/new_york_times|broader_prefLabel|Presse +http://www.semanlink.net/tag/new_york_times|broader_altLabel|Journal +http://www.semanlink.net/tag/sota|creationTime|2018-06-23T01:05:10Z +http://www.semanlink.net/tag/sota|prefLabel|SOTA +http://www.semanlink.net/tag/sota|creationDate|2018-06-23 +http://www.semanlink.net/tag/sota|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sota|uri|http://www.semanlink.net/tag/sota +http://www.semanlink.net/tag/fps|prefLabel|fps +http://www.semanlink.net/tag/fps|comment|"Hi, I am http://www.semanlink.net/tag/fps +" +http://www.semanlink.net/tag/fps|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/fps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps|sameAs|http://community.linkeddata.org/dataspace/person/fps#this +http://www.semanlink.net/tag/fps|sameAs|http://data.semanticweb.org/person/francois-paul-servant +http://www.semanlink.net/tag/fps|sameAs|http://dblp.l3s.de/d2r/resource/authors/Fran%C3%A7ois-Paul_Servant +http://www.semanlink.net/tag/fps|uri|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|creationTime|2018-02-26T10:03:08Z +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|prefLabel|Convolutional Knowledge Graph Embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/convolutional_neural_network +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|creationDate|2018-02-26 +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|uri|http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_prefLabel|Convolutional neural network +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|Convnets +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|CNN +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|Convolutional neural networks +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|Convnet +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|KGE +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_altLabel|KG embedding +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/spark_java_web_framework|creationTime|2017-05-15T19:06:50Z +http://www.semanlink.net/tag/spark_java_web_framework|prefLabel|Spark (Java web framework) +http://www.semanlink.net/tag/spark_java_web_framework|broader|http://www.semanlink.net/tag/java_microframeworks +http://www.semanlink.net/tag/spark_java_web_framework|broader|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/tag/spark_java_web_framework|broader|http://www.semanlink.net/tag/java_8_lambdas +http://www.semanlink.net/tag/spark_java_web_framework|broader|http://www.semanlink.net/tag/microservices +http://www.semanlink.net/tag/spark_java_web_framework|broader|http://www.semanlink.net/tag/web_dev_framework +http://www.semanlink.net/tag/spark_java_web_framework|creationDate|2017-05-15 +http://www.semanlink.net/tag/spark_java_web_framework|comment|A micro framework for creating web applications in Java 8 +http://www.semanlink.net/tag/spark_java_web_framework|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spark_java_web_framework|homepage|http://sparkjava.com/ +http://www.semanlink.net/tag/spark_java_web_framework|uri|http://www.semanlink.net/tag/spark_java_web_framework +http://www.semanlink.net/tag/spark_java_web_framework|broader_prefLabel|Java microframeworks +http://www.semanlink.net/tag/spark_java_web_framework|broader_prefLabel|RESTful Web Services +http://www.semanlink.net/tag/spark_java_web_framework|broader_prefLabel|Java 8 lambdas +http://www.semanlink.net/tag/spark_java_web_framework|broader_prefLabel|Microservices +http://www.semanlink.net/tag/spark_java_web_framework|broader_prefLabel|Web dev framework +http://www.semanlink.net/tag/ai_black_box|creationTime|2017-12-31T10:52:43Z +http://www.semanlink.net/tag/ai_black_box|prefLabel|AI black box +http://www.semanlink.net/tag/ai_black_box|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_black_box|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/ai_black_box|broader|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/tag/ai_black_box|creationDate|2017-12-31 +http://www.semanlink.net/tag/ai_black_box|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_black_box|uri|http://www.semanlink.net/tag/ai_black_box +http://www.semanlink.net/tag/ai_black_box|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_black_box|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/ai_black_box|broader_prefLabel|Explainable AI +http://www.semanlink.net/tag/ai_black_box|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_black_box|broader_altLabel|AI +http://www.semanlink.net/tag/ai_black_box|broader_altLabel|IA +http://www.semanlink.net/tag/ai_black_box|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/ai_black_box|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/ai_black_box|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/owl_full|prefLabel|OWL-Full +http://www.semanlink.net/tag/owl_full|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_full|creationDate|2006-12-01 +http://www.semanlink.net/tag/owl_full|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_full|uri|http://www.semanlink.net/tag/owl_full +http://www.semanlink.net/tag/owl_full|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_full|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/neelie_kroes|creationTime|2013-01-29T18:34:10Z +http://www.semanlink.net/tag/neelie_kroes|prefLabel|Neelie Kroes +http://www.semanlink.net/tag/neelie_kroes|related|http://www.semanlink.net/tag/commission_europeenne +http://www.semanlink.net/tag/neelie_kroes|creationDate|2013-01-29 +http://www.semanlink.net/tag/neelie_kroes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neelie_kroes|uri|http://www.semanlink.net/tag/neelie_kroes +http://www.semanlink.net/tag/pollution_de_l_eau|creationTime|2021-08-12T11:56:10Z +http://www.semanlink.net/tag/pollution_de_l_eau|prefLabel|Pollution de l'eau +http://www.semanlink.net/tag/pollution_de_l_eau|broader|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/pollution_de_l_eau|creationDate|2021-08-12 +http://www.semanlink.net/tag/pollution_de_l_eau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pollution_de_l_eau|uri|http://www.semanlink.net/tag/pollution_de_l_eau +http://www.semanlink.net/tag/pollution_de_l_eau|broader_prefLabel|Pollution +http://www.semanlink.net/tag/meta_reinforcement_learning|creationTime|2019-12-07T11:27:02Z +http://www.semanlink.net/tag/meta_reinforcement_learning|prefLabel|Meta Reinforcement Learning +http://www.semanlink.net/tag/meta_reinforcement_learning|broader|http://www.semanlink.net/tag/reinforcement_learning +http://www.semanlink.net/tag/meta_reinforcement_learning|creationDate|2019-12-07 +http://www.semanlink.net/tag/meta_reinforcement_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meta_reinforcement_learning|uri|http://www.semanlink.net/tag/meta_reinforcement_learning +http://www.semanlink.net/tag/meta_reinforcement_learning|broader_prefLabel|Reinforcement learning +http://www.semanlink.net/tag/meta_reinforcement_learning|broader_altLabel|RL +http://www.semanlink.net/tag/graph_based_text_representations|creationTime|2018-05-10T13:52:34Z +http://www.semanlink.net/tag/graph_based_text_representations|prefLabel|Graph-based Text Representations +http://www.semanlink.net/tag/graph_based_text_representations|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_based_text_representations|broader|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/tag/graph_based_text_representations|creationDate|2018-05-10 +http://www.semanlink.net/tag/graph_based_text_representations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_based_text_representations|uri|http://www.semanlink.net/tag/graph_based_text_representations +http://www.semanlink.net/tag/graph_based_text_representations|broader_prefLabel|Graph +http://www.semanlink.net/tag/graph_based_text_representations|broader_prefLabel|NLP: Text Representation +http://www.semanlink.net/tag/graph_based_text_representations|broader_altLabel|Text Representation +http://www.semanlink.net/tag/colombie|creationTime|2007-07-12T22:51:38Z +http://www.semanlink.net/tag/colombie|prefLabel|Colombie +http://www.semanlink.net/tag/colombie|creationDate|2007-07-12 +http://www.semanlink.net/tag/colombie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/colombie|uri|http://www.semanlink.net/tag/colombie +http://www.semanlink.net/tag/negociations_climat|creationTime|2012-12-08T13:33:19Z +http://www.semanlink.net/tag/negociations_climat|prefLabel|Négociations climat +http://www.semanlink.net/tag/negociations_climat|broader|http://www.semanlink.net/tag/changement_climatique +http://www.semanlink.net/tag/negociations_climat|broader|http://www.semanlink.net/tag/diplomatie +http://www.semanlink.net/tag/negociations_climat|creationDate|2012-12-08 +http://www.semanlink.net/tag/negociations_climat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/negociations_climat|uri|http://www.semanlink.net/tag/negociations_climat +http://www.semanlink.net/tag/negociations_climat|broader_prefLabel|Changement climatique +http://www.semanlink.net/tag/negociations_climat|broader_prefLabel|Diplomatie +http://www.semanlink.net/tag/extinction_de_masse|prefLabel|Extinction de masse +http://www.semanlink.net/tag/extinction_de_masse|broader|http://www.semanlink.net/tag/extinction_d_especes +http://www.semanlink.net/tag/extinction_de_masse|broader|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/extinction_de_masse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extinction_de_masse|uri|http://www.semanlink.net/tag/extinction_de_masse +http://www.semanlink.net/tag/extinction_de_masse|broader_prefLabel|Extinction d'espèces +http://www.semanlink.net/tag/extinction_de_masse|broader_prefLabel|Catastrophe +http://www.semanlink.net/tag/rest_security|creationTime|2015-11-16T12:00:22Z +http://www.semanlink.net/tag/rest_security|prefLabel|REST Security +http://www.semanlink.net/tag/rest_security|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/rest_security|related|http://www.semanlink.net/tag/oauth +http://www.semanlink.net/tag/rest_security|creationDate|2015-11-16 +http://www.semanlink.net/tag/rest_security|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rest_security|uri|http://www.semanlink.net/tag/rest_security +http://www.semanlink.net/tag/rest_security|broader_prefLabel|REST +http://www.semanlink.net/tag/nlp_long_documents|creationTime|2020-01-23T10:24:23Z +http://www.semanlink.net/tag/nlp_long_documents|prefLabel|Long documents +http://www.semanlink.net/tag/nlp_long_documents|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_long_documents|broader|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/tag/nlp_long_documents|related|http://www.semanlink.net/tag/scientific_information_extraction +http://www.semanlink.net/tag/nlp_long_documents|related|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/tag/nlp_long_documents|related|http://www.semanlink.net/tag/patent_landscaping +http://www.semanlink.net/tag/nlp_long_documents|related|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/nlp_long_documents|creationDate|2020-01-23 +http://www.semanlink.net/tag/nlp_long_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_long_documents|uri|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/tag/nlp_long_documents|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/nlp_long_documents|broader_prefLabel|NLP: Text Representation +http://www.semanlink.net/tag/nlp_long_documents|broader_altLabel|Text Representation +http://www.semanlink.net/tag/macron_et_l_ecologie|creationTime|2020-08-06T22:32:41Z +http://www.semanlink.net/tag/macron_et_l_ecologie|prefLabel|Macron et l'écologie +http://www.semanlink.net/tag/macron_et_l_ecologie|broader|http://www.semanlink.net/tag/politique_et_environnement +http://www.semanlink.net/tag/macron_et_l_ecologie|broader|http://www.semanlink.net/tag/macron +http://www.semanlink.net/tag/macron_et_l_ecologie|creationDate|2020-08-06 +http://www.semanlink.net/tag/macron_et_l_ecologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/macron_et_l_ecologie|uri|http://www.semanlink.net/tag/macron_et_l_ecologie +http://www.semanlink.net/tag/macron_et_l_ecologie|broader_prefLabel|Politique et environnement +http://www.semanlink.net/tag/macron_et_l_ecologie|broader_prefLabel|Macron +http://www.semanlink.net/tag/macron_et_l_ecologie|broader_related|http://www.semanlink.net/tag/verts +http://www.semanlink.net/tag/ihm|prefLabel|IHM +http://www.semanlink.net/tag/ihm|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/ihm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ihm|uri|http://www.semanlink.net/tag/ihm +http://www.semanlink.net/tag/ihm|broader_prefLabel|Informatique +http://www.semanlink.net/tag/eau_extraterrestre|creationTime|2012-06-30T01:04:07Z +http://www.semanlink.net/tag/eau_extraterrestre|prefLabel|Eau extraterrestre +http://www.semanlink.net/tag/eau_extraterrestre|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/eau_extraterrestre|related|http://www.semanlink.net/tag/vie_extraterrestre +http://www.semanlink.net/tag/eau_extraterrestre|creationDate|2012-06-30 +http://www.semanlink.net/tag/eau_extraterrestre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eau_extraterrestre|uri|http://www.semanlink.net/tag/eau_extraterrestre +http://www.semanlink.net/tag/eau_extraterrestre|broader_prefLabel|Eau +http://www.semanlink.net/tag/lego|prefLabel|Lego +http://www.semanlink.net/tag/lego|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/lego|creationDate|2006-09-12 +http://www.semanlink.net/tag/lego|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lego|uri|http://www.semanlink.net/tag/lego +http://www.semanlink.net/tag/lego|broader_prefLabel|Jeux +http://www.semanlink.net/tag/semantic_gap|creationTime|2018-04-01T14:53:23Z +http://www.semanlink.net/tag/semantic_gap|prefLabel|Semantic gap +http://www.semanlink.net/tag/semantic_gap|creationDate|2018-04-01 +http://www.semanlink.net/tag/semantic_gap|comment|gap between low-level document features and high-level meaning +http://www.semanlink.net/tag/semantic_gap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_gap|describedBy|https://en.wikipedia.org/wiki/Semantic_gap +http://www.semanlink.net/tag/semantic_gap|uri|http://www.semanlink.net/tag/semantic_gap +http://www.semanlink.net/tag/api_design|creationTime|2017-04-12T13:42:07Z +http://www.semanlink.net/tag/api_design|prefLabel|API design +http://www.semanlink.net/tag/api_design|creationDate|2017-04-12 +http://www.semanlink.net/tag/api_design|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/api_design|uri|http://www.semanlink.net/tag/api_design +http://www.semanlink.net/tag/riches|creationTime|2010-09-28T17:17:11Z +http://www.semanlink.net/tag/riches|prefLabel|Riches +http://www.semanlink.net/tag/riches|creationDate|2010-09-28 +http://www.semanlink.net/tag/riches|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/riches|uri|http://www.semanlink.net/tag/riches +http://www.semanlink.net/tag/javascript_closures|creationTime|2012-08-14T14:51:52Z +http://www.semanlink.net/tag/javascript_closures|prefLabel|Javascript closures +http://www.semanlink.net/tag/javascript_closures|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_closures|broader|http://www.semanlink.net/tag/closure +http://www.semanlink.net/tag/javascript_closures|broader|http://www.semanlink.net/tag/function_closures +http://www.semanlink.net/tag/javascript_closures|creationDate|2012-08-14 +http://www.semanlink.net/tag/javascript_closures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_closures|uri|http://www.semanlink.net/tag/javascript_closures +http://www.semanlink.net/tag/javascript_closures|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_closures|broader_prefLabel|Closure +http://www.semanlink.net/tag/javascript_closures|broader_prefLabel|Function closures +http://www.semanlink.net/tag/javascript_closures|broader_altLabel|js +http://www.semanlink.net/tag/dimensionality_reduction|creationTime|2015-10-16T11:32:35Z +http://www.semanlink.net/tag/dimensionality_reduction|prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/dimensionality_reduction|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/dimensionality_reduction|creationDate|2015-10-16 +http://www.semanlink.net/tag/dimensionality_reduction|comment|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. +http://www.semanlink.net/tag/dimensionality_reduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dimensionality_reduction|describedBy|https://en.wikipedia.org/wiki/Dimensionality_reduction +http://www.semanlink.net/tag/dimensionality_reduction|uri|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/dimensionality_reduction|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/liberation|creationTime|2011-06-03T18:08:21Z +http://www.semanlink.net/tag/liberation|prefLabel|Libération +http://www.semanlink.net/tag/liberation|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberation|creationDate|2011-06-03 +http://www.semanlink.net/tag/liberation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberation|uri|http://www.semanlink.net/tag/liberation +http://www.semanlink.net/tag/liberation|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberation|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/social_networkd_are_bad|creationTime|2021-01-10T18:11:06Z +http://www.semanlink.net/tag/social_networkd_are_bad|prefLabel|Social Networkd are bad +http://www.semanlink.net/tag/social_networkd_are_bad|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/social_networkd_are_bad|creationDate|2021-01-10 +http://www.semanlink.net/tag/social_networkd_are_bad|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_networkd_are_bad|uri|http://www.semanlink.net/tag/social_networkd_are_bad +http://www.semanlink.net/tag/social_networkd_are_bad|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/api_management|creationTime|2017-05-15T11:44:31Z +http://www.semanlink.net/tag/api_management|prefLabel|API management +http://www.semanlink.net/tag/api_management|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/api_management|creationDate|2017-05-15 +http://www.semanlink.net/tag/api_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/api_management|uri|http://www.semanlink.net/tag/api_management +http://www.semanlink.net/tag/api_management|broader_prefLabel|API +http://www.semanlink.net/tag/poincare|prefLabel|Poincaré +http://www.semanlink.net/tag/poincare|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/poincare|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/poincare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poincare|uri|http://www.semanlink.net/tag/poincare +http://www.semanlink.net/tag/poincare|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/poincare|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/poincare|broader_altLabel|Savant +http://www.semanlink.net/tag/noos|prefLabel|Noos +http://www.semanlink.net/tag/noos|broader|http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet +http://www.semanlink.net/tag/noos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/noos|uri|http://www.semanlink.net/tag/noos +http://www.semanlink.net/tag/noos|broader_prefLabel|Fournisseurs d'accès à internet +http://www.semanlink.net/tag/islam|prefLabel|Islam +http://www.semanlink.net/tag/islam|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/islam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/islam|uri|http://www.semanlink.net/tag/islam +http://www.semanlink.net/tag/islam|broader_prefLabel|Religion +http://www.semanlink.net/tag/arn|prefLabel|RNA +http://www.semanlink.net/tag/arn|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/arn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arn|altLabel|ARN +http://www.semanlink.net/tag/arn|uri|http://www.semanlink.net/tag/arn +http://www.semanlink.net/tag/arn|broader_prefLabel|Genetics +http://www.semanlink.net/tag/arn|broader_prefLabel|Génétique +http://www.semanlink.net/tag/sparql_tips|creationTime|2008-04-08T14:54:25Z +http://www.semanlink.net/tag/sparql_tips|prefLabel|SPARQL Tips +http://www.semanlink.net/tag/sparql_tips|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_tips|broader|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/sparql_tips|creationDate|2008-04-08 +http://www.semanlink.net/tag/sparql_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_tips|uri|http://www.semanlink.net/tag/sparql_tips +http://www.semanlink.net/tag/sparql_tips|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_tips|broader_prefLabel|Tips +http://www.semanlink.net/tag/cuba|creationTime|2013-01-12T23:56:45Z +http://www.semanlink.net/tag/cuba|prefLabel|Cuba +http://www.semanlink.net/tag/cuba|creationDate|2013-01-12 +http://www.semanlink.net/tag/cuba|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cuba|uri|http://www.semanlink.net/tag/cuba +http://www.semanlink.net/tag/common_tag|creationTime|2009-06-12T10:23:58Z +http://www.semanlink.net/tag/common_tag|prefLabel|Common Tag +http://www.semanlink.net/tag/common_tag|broader|http://www.semanlink.net/tag/semantic_tagging +http://www.semanlink.net/tag/common_tag|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/common_tag|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/common_tag|creationDate|2009-06-12 +http://www.semanlink.net/tag/common_tag|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/common_tag|describedBy|http://commontag.org/Home +http://www.semanlink.net/tag/common_tag|altLabel|CommonTag +http://www.semanlink.net/tag/common_tag|uri|http://www.semanlink.net/tag/common_tag +http://www.semanlink.net/tag/common_tag|broader_prefLabel|Semantic tagging +http://www.semanlink.net/tag/common_tag|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/common_tag|broader_prefLabel|RDFa +http://www.semanlink.net/tag/common_tag|broader_altLabel|RDF/A +http://www.semanlink.net/tag/common_tag|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/common_tag|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/common_tag|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/administration_francaise|creationTime|2007-09-25T22:00:20Z +http://www.semanlink.net/tag/administration_francaise|prefLabel|Administration française +http://www.semanlink.net/tag/administration_francaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/administration_francaise|broader|http://www.semanlink.net/tag/administration +http://www.semanlink.net/tag/administration_francaise|creationDate|2007-09-25 +http://www.semanlink.net/tag/administration_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/administration_francaise|uri|http://www.semanlink.net/tag/administration_francaise +http://www.semanlink.net/tag/administration_francaise|broader_prefLabel|France +http://www.semanlink.net/tag/administration_francaise|broader_prefLabel|Administration +http://www.semanlink.net/tag/automatic_tagging|creationTime|2012-03-23T22:34:01Z +http://www.semanlink.net/tag/automatic_tagging|prefLabel|Automatic tagging +http://www.semanlink.net/tag/automatic_tagging|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/automatic_tagging|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/automatic_tagging|creationDate|2012-03-23 +http://www.semanlink.net/tag/automatic_tagging|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automatic_tagging|uri|http://www.semanlink.net/tag/automatic_tagging +http://www.semanlink.net/tag/automatic_tagging|broader_prefLabel|Tagging +http://www.semanlink.net/tag/automatic_tagging|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/wiktionnaire|creationTime|2021-04-12T17:16:20Z +http://www.semanlink.net/tag/wiktionnaire|prefLabel|Wiktionnaire +http://www.semanlink.net/tag/wiktionnaire|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wiktionnaire|related|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/wiktionnaire|creationDate|2021-04-12 +http://www.semanlink.net/tag/wiktionnaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wiktionnaire|uri|http://www.semanlink.net/tag/wiktionnaire +http://www.semanlink.net/tag/wiktionnaire|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/wiktionnaire|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/pretrained_models|creationTime|2018-11-23T08:54:33Z +http://www.semanlink.net/tag/pretrained_models|prefLabel|Pretrained models +http://www.semanlink.net/tag/pretrained_models|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/pretrained_models|creationDate|2018-11-23 +http://www.semanlink.net/tag/pretrained_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pretrained_models|uri|http://www.semanlink.net/tag/pretrained_models +http://www.semanlink.net/tag/pretrained_models|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/scientific_information_extraction|creationTime|2018-11-04T18:10:51Z +http://www.semanlink.net/tag/scientific_information_extraction|prefLabel|Scientific information extraction +http://www.semanlink.net/tag/scientific_information_extraction|broader|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/scientific_information_extraction|creationDate|2018-11-04 +http://www.semanlink.net/tag/scientific_information_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scientific_information_extraction|uri|http://www.semanlink.net/tag/scientific_information_extraction +http://www.semanlink.net/tag/scientific_information_extraction|broader_prefLabel|Information extraction +http://www.semanlink.net/tag/scientific_information_extraction|broader_related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/statistical_data|creationTime|2010-07-16T14:05:00Z +http://www.semanlink.net/tag/statistical_data|prefLabel|Statistical data +http://www.semanlink.net/tag/statistical_data|creationDate|2010-07-16 +http://www.semanlink.net/tag/statistical_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistical_data|uri|http://www.semanlink.net/tag/statistical_data +http://www.semanlink.net/tag/maladie|prefLabel|Maladie +http://www.semanlink.net/tag/maladie|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/maladie|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/maladie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maladie|uri|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/maladie|broader_prefLabel|Santé +http://www.semanlink.net/tag/maladie|broader_prefLabel|Médecine +http://www.semanlink.net/tag/flash|creationTime|2013-08-23T15:00:30Z +http://www.semanlink.net/tag/flash|prefLabel|Flash +http://www.semanlink.net/tag/flash|creationDate|2013-08-23 +http://www.semanlink.net/tag/flash|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flash|uri|http://www.semanlink.net/tag/flash +http://www.semanlink.net/tag/national_geographic|creationTime|2007-07-08T02:46:46Z +http://www.semanlink.net/tag/national_geographic|prefLabel|National Geographic +http://www.semanlink.net/tag/national_geographic|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/national_geographic|creationDate|2007-07-08 +http://www.semanlink.net/tag/national_geographic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/national_geographic|uri|http://www.semanlink.net/tag/national_geographic +http://www.semanlink.net/tag/national_geographic|broader_prefLabel|Géographie +http://www.semanlink.net/tag/python_install|creationTime|2017-05-28T18:59:04Z +http://www.semanlink.net/tag/python_install|prefLabel|Python install +http://www.semanlink.net/tag/python_install|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_install|broader|http://www.semanlink.net/tag/notes_d_install +http://www.semanlink.net/tag/python_install|creationDate|2017-05-28 +http://www.semanlink.net/tag/python_install|comment|"- download from python.org, install +- open ~/.bash_profile -- the new version is here, but also the older one. Remove it +- cf : + +``` +unlink /usr/local/bin/python +ln -s /usr/local/bin/python3.9 /usr/local/bin/python +``` +" +http://www.semanlink.net/tag/python_install|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_install|uri|http://www.semanlink.net/tag/python_install +http://www.semanlink.net/tag/python_install|broader_prefLabel|Python +http://www.semanlink.net/tag/python_install|broader_prefLabel|Notes d'install +http://www.semanlink.net/tag/richesses_sous_marines|prefLabel|Richesses sous-marines +http://www.semanlink.net/tag/richesses_sous_marines|broader|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/richesses_sous_marines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/richesses_sous_marines|uri|http://www.semanlink.net/tag/richesses_sous_marines +http://www.semanlink.net/tag/richesses_sous_marines|broader_prefLabel|Océan +http://www.semanlink.net/tag/quora_question_pairs|creationTime|2019-07-02T01:11:48Z +http://www.semanlink.net/tag/quora_question_pairs|prefLabel|Quora Question Pairs +http://www.semanlink.net/tag/quora_question_pairs|broader|http://www.semanlink.net/tag/nlp_datasets +http://www.semanlink.net/tag/quora_question_pairs|broader|http://www.semanlink.net/tag/quora +http://www.semanlink.net/tag/quora_question_pairs|broader|http://www.semanlink.net/tag/kaggle +http://www.semanlink.net/tag/quora_question_pairs|broader|http://www.semanlink.net/tag/duplicate_detection +http://www.semanlink.net/tag/quora_question_pairs|creationDate|2019-07-02 +http://www.semanlink.net/tag/quora_question_pairs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quora_question_pairs|uri|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/tag/quora_question_pairs|broader_prefLabel|NLP datasets +http://www.semanlink.net/tag/quora_question_pairs|broader_prefLabel|Quora +http://www.semanlink.net/tag/quora_question_pairs|broader_prefLabel|Kaggle +http://www.semanlink.net/tag/quora_question_pairs|broader_prefLabel|Duplicate Detection +http://www.semanlink.net/tag/quora_question_pairs|broader_altLabel|Paraphrase identification +http://www.semanlink.net/tag/quora_question_pairs|broader_altLabel|Duplicate search +http://www.semanlink.net/tag/photo|creationTime|2017-10-28T10:29:28Z +http://www.semanlink.net/tag/photo|prefLabel|Photo +http://www.semanlink.net/tag/photo|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/photo|creationDate|2017-10-28 +http://www.semanlink.net/tag/photo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photo|altLabel|Images +http://www.semanlink.net/tag/photo|uri|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/photo|broader_prefLabel|Divers +http://www.semanlink.net/tag/obelisque|prefLabel|Obélisque +http://www.semanlink.net/tag/obelisque|broader|http://www.semanlink.net/tag/sculpture +http://www.semanlink.net/tag/obelisque|broader|http://www.semanlink.net/tag/architecture +http://www.semanlink.net/tag/obelisque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obelisque|uri|http://www.semanlink.net/tag/obelisque +http://www.semanlink.net/tag/obelisque|broader_prefLabel|Sculpture +http://www.semanlink.net/tag/obelisque|broader_prefLabel|Architecture +http://www.semanlink.net/tag/obelisque|broader_altLabel|Statuaire +http://www.semanlink.net/tag/citation|prefLabel|Quote +http://www.semanlink.net/tag/citation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/citation|altLabel|Citation +http://www.semanlink.net/tag/citation|uri|http://www.semanlink.net/tag/citation +http://www.semanlink.net/tag/emmanuel_ledinot|creationTime|2017-11-25T19:00:20Z +http://www.semanlink.net/tag/emmanuel_ledinot|prefLabel|Emmanuel Ledinot +http://www.semanlink.net/tag/emmanuel_ledinot|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/emmanuel_ledinot|related|http://www.semanlink.net/tag/lycee_alain +http://www.semanlink.net/tag/emmanuel_ledinot|creationDate|2017-11-25 +http://www.semanlink.net/tag/emmanuel_ledinot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emmanuel_ledinot|uri|http://www.semanlink.net/tag/emmanuel_ledinot +http://www.semanlink.net/tag/emmanuel_ledinot|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/emmanuel_ledinot|broader_altLabel|Technical guys +http://www.semanlink.net/tag/spotlight_osx|creationTime|2015-10-14T13:32:47Z +http://www.semanlink.net/tag/spotlight_osx|prefLabel|Spotlight (OSX) +http://www.semanlink.net/tag/spotlight_osx|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/spotlight_osx|creationDate|2015-10-14 +http://www.semanlink.net/tag/spotlight_osx|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spotlight_osx|uri|http://www.semanlink.net/tag/spotlight_osx +http://www.semanlink.net/tag/spotlight_osx|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/spotlight_osx|broader_altLabel|OS X +http://www.semanlink.net/tag/spotlight_osx|broader_altLabel|OSX +http://www.semanlink.net/tag/rockart|prefLabel|Rockart +http://www.semanlink.net/tag/rockart|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/rockart|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rockart|uri|http://www.semanlink.net/tag/rockart +http://www.semanlink.net/tag/rockart|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/rockart|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/mythologie|creationTime|2007-09-05T00:39:53Z +http://www.semanlink.net/tag/mythologie|prefLabel|Mythologie +http://www.semanlink.net/tag/mythologie|creationDate|2007-09-05 +http://www.semanlink.net/tag/mythologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mythologie|uri|http://www.semanlink.net/tag/mythologie +http://www.semanlink.net/tag/arxiv_doc|creationTime|2020-04-22T01:39:53Z +http://www.semanlink.net/tag/arxiv_doc|prefLabel|Arxiv Doc +http://www.semanlink.net/tag/arxiv_doc|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/arxiv_doc|related|http://www.semanlink.net/tag/arxiv +http://www.semanlink.net/tag/arxiv_doc|creationDate|2020-04-22 +http://www.semanlink.net/tag/arxiv_doc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arxiv_doc|uri|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/tag/arxiv_doc|broader_prefLabel|Favoris +http://www.semanlink.net/tag/arxiv_doc|broader_altLabel|favorites +http://www.semanlink.net/tag/photons_correles|prefLabel|Photons corrélés +http://www.semanlink.net/tag/photons_correles|broader|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/photons_correles|broader|http://www.semanlink.net/tag/photon +http://www.semanlink.net/tag/photons_correles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photons_correles|uri|http://www.semanlink.net/tag/photons_correles +http://www.semanlink.net/tag/photons_correles|broader_prefLabel|Mécanique quantique +http://www.semanlink.net/tag/photons_correles|broader_prefLabel|Photon +http://www.semanlink.net/tag/locality_sensitive_hashing|creationTime|2013-04-01T23:33:06Z +http://www.semanlink.net/tag/locality_sensitive_hashing|prefLabel|Locality Sensitive Hashing +http://www.semanlink.net/tag/locality_sensitive_hashing|broader|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/locality_sensitive_hashing|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/sparse_distributed_memory +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/locality_sensitive_hashing|related|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/locality_sensitive_hashing|creationDate|2013-04-01 +http://www.semanlink.net/tag/locality_sensitive_hashing|comment|"Locality-sensitive hashing (LSH) reduces the dimensionality of high-dimensional data. LSH hashes input items so that similar items map to the same “buckets” with high probability (the number of buckets being much smaller than the number of possible input items). LSH has much in common with data clustering and [#nearest neighbor search](nearest_neighbor_search). + +LSH employs random linear projections (followed by random thresholding) to map data points close in an Euclidean space to similar codes. + +See Also [#sparse distributed memory](/tag/sparse_distributed_memory), associative memory + + + + + + +" +http://www.semanlink.net/tag/locality_sensitive_hashing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/locality_sensitive_hashing|describedBy|https://en.wikipedia.org/wiki/Locality-sensitive_hashing +http://www.semanlink.net/tag/locality_sensitive_hashing|altLabel|LSH +http://www.semanlink.net/tag/locality_sensitive_hashing|uri|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/locality_sensitive_hashing|broader_prefLabel|Similarity queries +http://www.semanlink.net/tag/locality_sensitive_hashing|broader_prefLabel|Big Data +http://www.semanlink.net/tag/locality_sensitive_hashing|broader_altLabel|Vector similarity search +http://www.semanlink.net/tag/locality_sensitive_hashing|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/locality_sensitive_hashing|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/diffa|creationTime|2008-06-20T23:54:36Z +http://www.semanlink.net/tag/diffa|prefLabel|Diffa +http://www.semanlink.net/tag/diffa|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/diffa|creationDate|2008-06-20 +http://www.semanlink.net/tag/diffa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diffa|uri|http://www.semanlink.net/tag/diffa +http://www.semanlink.net/tag/diffa|broader_prefLabel|Niger +http://www.semanlink.net/tag/diffa|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/diffa|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/diffa|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/rdf123|creationTime|2007-08-30T02:43:35Z +http://www.semanlink.net/tag/rdf123|prefLabel|RDF123 +http://www.semanlink.net/tag/rdf123|broader|http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf +http://www.semanlink.net/tag/rdf123|broader|http://www.semanlink.net/tag/excel_and_sw +http://www.semanlink.net/tag/rdf123|creationDate|2007-08-30 +http://www.semanlink.net/tag/rdf123|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf123|homepage|http://ebiquity.umbc.edu/project/html/id/82/RDF123 +http://www.semanlink.net/tag/rdf123|uri|http://www.semanlink.net/tag/rdf123 +http://www.semanlink.net/tag/rdf123|broader_prefLabel|Mapping data from spreadsheets to RDF +http://www.semanlink.net/tag/rdf123|broader_prefLabel|Excel and SW +http://www.semanlink.net/tag/rdf123|broader_related|http://www.semanlink.net/tag/anzo +http://www.semanlink.net/tag/genocide_rwandais|prefLabel|Génocide rwandais +http://www.semanlink.net/tag/genocide_rwandais|broader|http://www.semanlink.net/tag/rwanda +http://www.semanlink.net/tag/genocide_rwandais|broader|http://www.semanlink.net/tag/la_communaute_internationale_est_une_garce +http://www.semanlink.net/tag/genocide_rwandais|broader|http://www.semanlink.net/tag/?and=genocide_rwandais&and=onu +http://www.semanlink.net/tag/genocide_rwandais|broader|http://www.semanlink.net/tag/genocide +http://www.semanlink.net/tag/genocide_rwandais|creationDate|2006-12-30 +http://www.semanlink.net/tag/genocide_rwandais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genocide_rwandais|uri|http://www.semanlink.net/tag/genocide_rwandais +http://www.semanlink.net/tag/genocide_rwandais|broader_prefLabel|Rwanda +http://www.semanlink.net/tag/genocide_rwandais|broader_prefLabel|La communauté internationale est une garce +http://www.semanlink.net/tag/genocide_rwandais|broader_prefLabel|Génocide +http://www.semanlink.net/tag/menace|creationTime|2010-05-28T00:54:33Z +http://www.semanlink.net/tag/menace|prefLabel|Menace +http://www.semanlink.net/tag/menace|creationDate|2010-05-28 +http://www.semanlink.net/tag/menace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/menace|uri|http://www.semanlink.net/tag/menace +http://www.semanlink.net/tag/or|prefLabel|Or +http://www.semanlink.net/tag/or|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/or|uri|http://www.semanlink.net/tag/or +http://www.semanlink.net/tag/lucene|creationTime|2011-11-13T14:27:11Z +http://www.semanlink.net/tag/lucene|prefLabel|Lucene +http://www.semanlink.net/tag/lucene|broader|http://www.semanlink.net/tag/text_search +http://www.semanlink.net/tag/lucene|related|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/lucene|creationDate|2011-11-13 +http://www.semanlink.net/tag/lucene|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lucene|uri|http://www.semanlink.net/tag/lucene +http://www.semanlink.net/tag/lucene|broader_prefLabel|Text Search +http://www.semanlink.net/tag/lucene|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/3d|prefLabel|3D +http://www.semanlink.net/tag/3d|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/3d|uri|http://www.semanlink.net/tag/3d +http://www.semanlink.net/tag/ghana|prefLabel|Ghana +http://www.semanlink.net/tag/ghana|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/ghana|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ghana|uri|http://www.semanlink.net/tag/ghana +http://www.semanlink.net/tag/ghana|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/maroc|prefLabel|Maroc +http://www.semanlink.net/tag/maroc|broader|http://www.semanlink.net/tag/afrique_du_nord +http://www.semanlink.net/tag/maroc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maroc|uri|http://www.semanlink.net/tag/maroc +http://www.semanlink.net/tag/maroc|broader_prefLabel|Afrique du Nord +http://www.semanlink.net/tag/richard_stallman|creationTime|2019-09-17T20:49:54Z +http://www.semanlink.net/tag/richard_stallman|prefLabel|Richard Stallman +http://www.semanlink.net/tag/richard_stallman|broader|http://www.semanlink.net/tag/logiciel_libre +http://www.semanlink.net/tag/richard_stallman|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/richard_stallman|related|http://www.semanlink.net/tag/mit +http://www.semanlink.net/tag/richard_stallman|creationDate|2019-09-17 +http://www.semanlink.net/tag/richard_stallman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/richard_stallman|uri|http://www.semanlink.net/tag/richard_stallman +http://www.semanlink.net/tag/richard_stallman|broader_prefLabel|Logiciel libre +http://www.semanlink.net/tag/richard_stallman|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/richard_stallman|broader_altLabel|Technical guys +http://www.semanlink.net/tag/solar_storm|creationTime|2013-07-14T17:06:33Z +http://www.semanlink.net/tag/solar_storm|prefLabel|Solar storm +http://www.semanlink.net/tag/solar_storm|broader|http://www.semanlink.net/tag/soleil +http://www.semanlink.net/tag/solar_storm|creationDate|2013-07-14 +http://www.semanlink.net/tag/solar_storm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solar_storm|uri|http://www.semanlink.net/tag/solar_storm +http://www.semanlink.net/tag/solar_storm|broader_prefLabel|Soleil +http://www.semanlink.net/tag/ai_girls_and_guys|creationTime|2017-06-13T00:49:47Z +http://www.semanlink.net/tag/ai_girls_and_guys|prefLabel|AI girls and guys +http://www.semanlink.net/tag/ai_girls_and_guys|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ai_girls_and_guys|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_girls_and_guys|creationDate|2017-06-13 +http://www.semanlink.net/tag/ai_girls_and_guys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_girls_and_guys|uri|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/ai_girls_and_guys|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ai_girls_and_guys|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_girls_and_guys|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ai_girls_and_guys|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_girls_and_guys|broader_altLabel|AI +http://www.semanlink.net/tag/ai_girls_and_guys|broader_altLabel|IA +http://www.semanlink.net/tag/ai_girls_and_guys|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/craig_venter_institute|creationTime|2007-09-13T22:11:35Z +http://www.semanlink.net/tag/craig_venter_institute|prefLabel|Craig Venter Institute +http://www.semanlink.net/tag/craig_venter_institute|broader|http://www.semanlink.net/tag/celera_ou_craig_venter +http://www.semanlink.net/tag/craig_venter_institute|creationDate|2007-09-13 +http://www.semanlink.net/tag/craig_venter_institute|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/craig_venter_institute|uri|http://www.semanlink.net/tag/craig_venter_institute +http://www.semanlink.net/tag/craig_venter_institute|broader_prefLabel|Celera ou Craig Venter +http://www.semanlink.net/tag/google_uber_alles|creationTime|2014-04-26T11:44:49Z +http://www.semanlink.net/tag/google_uber_alles|prefLabel|Google über alles +http://www.semanlink.net/tag/google_uber_alles|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_uber_alles|creationDate|2014-04-26 +http://www.semanlink.net/tag/google_uber_alles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_uber_alles|uri|http://www.semanlink.net/tag/google_uber_alles +http://www.semanlink.net/tag/google_uber_alles|broader_prefLabel|Google +http://www.semanlink.net/tag/google_uber_alles|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/social_software|prefLabel|Social software +http://www.semanlink.net/tag/social_software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_software|uri|http://www.semanlink.net/tag/social_software +http://www.semanlink.net/tag/email_classification|creationTime|2015-10-20T11:31:52Z +http://www.semanlink.net/tag/email_classification|prefLabel|email classification +http://www.semanlink.net/tag/email_classification|broader|http://www.semanlink.net/tag/email +http://www.semanlink.net/tag/email_classification|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/email_classification|creationDate|2015-10-20 +http://www.semanlink.net/tag/email_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/email_classification|uri|http://www.semanlink.net/tag/email_classification +http://www.semanlink.net/tag/email_classification|broader_prefLabel|email +http://www.semanlink.net/tag/email_classification|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/politique_et_environnement|creationTime|2021-09-19T10:21:06Z +http://www.semanlink.net/tag/politique_et_environnement|prefLabel|Politique et environnement +http://www.semanlink.net/tag/politique_et_environnement|broader|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/tag/politique_et_environnement|related|http://www.semanlink.net/tag/verts +http://www.semanlink.net/tag/politique_et_environnement|creationDate|2021-09-19 +http://www.semanlink.net/tag/politique_et_environnement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_et_environnement|uri|http://www.semanlink.net/tag/politique_et_environnement +http://www.semanlink.net/tag/politique_et_environnement|broader_prefLabel|Environnement +http://www.semanlink.net/tag/absurde|creationTime|2014-12-12T21:08:10Z +http://www.semanlink.net/tag/absurde|prefLabel|Absurde +http://www.semanlink.net/tag/absurde|creationDate|2014-12-12 +http://www.semanlink.net/tag/absurde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/absurde|uri|http://www.semanlink.net/tag/absurde +http://www.semanlink.net/tag/dietrich_schulten|creationTime|2015-02-19T14:50:42Z +http://www.semanlink.net/tag/dietrich_schulten|prefLabel|Dietrich Schulten +http://www.semanlink.net/tag/dietrich_schulten|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dietrich_schulten|related|http://www.semanlink.net/tag/public_hydra_w3_org +http://www.semanlink.net/tag/dietrich_schulten|creationDate|2015-02-19 +http://www.semanlink.net/tag/dietrich_schulten|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dietrich_schulten|uri|http://www.semanlink.net/tag/dietrich_schulten +http://www.semanlink.net/tag/dietrich_schulten|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dietrich_schulten|broader_altLabel|Technical guys +http://www.semanlink.net/tag/nlp_automotive|creationTime|2019-12-07T16:48:09Z +http://www.semanlink.net/tag/nlp_automotive|prefLabel|NLP+Automotive +http://www.semanlink.net/tag/nlp_automotive|broader|http://www.semanlink.net/tag/nlp_in_enterprise +http://www.semanlink.net/tag/nlp_automotive|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_automotive|creationDate|2019-12-07 +http://www.semanlink.net/tag/nlp_automotive|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_automotive|uri|http://www.semanlink.net/tag/nlp_automotive +http://www.semanlink.net/tag/nlp_automotive|broader_prefLabel|NLP in enterprise +http://www.semanlink.net/tag/nlp_automotive|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_automotive|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/nlp_automotive|broader_related|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/nicolas_hulot|prefLabel|Nicolas Hulot +http://www.semanlink.net/tag/nicolas_hulot|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/nicolas_hulot|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/nicolas_hulot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nicolas_hulot|uri|http://www.semanlink.net/tag/nicolas_hulot +http://www.semanlink.net/tag/nicolas_hulot|broader_prefLabel|Écologie +http://www.semanlink.net/tag/nicolas_hulot|broader_prefLabel|Télévision +http://www.semanlink.net/tag/nicolas_hulot|broader_altLabel|TV +http://www.semanlink.net/tag/patrimoine|creationTime|2011-09-01T11:40:16Z +http://www.semanlink.net/tag/patrimoine|prefLabel|Patrimoine +http://www.semanlink.net/tag/patrimoine|creationDate|2011-09-01 +http://www.semanlink.net/tag/patrimoine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patrimoine|uri|http://www.semanlink.net/tag/patrimoine +http://www.semanlink.net/tag/probabilistic_graphical_models|creationTime|2013-05-23T08:41:46Z +http://www.semanlink.net/tag/probabilistic_graphical_models|prefLabel|Probabilistic Graphical Models +http://www.semanlink.net/tag/probabilistic_graphical_models|broader|http://www.semanlink.net/tag/uncertainty_reasoning +http://www.semanlink.net/tag/probabilistic_graphical_models|creationDate|2013-05-23 +http://www.semanlink.net/tag/probabilistic_graphical_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/probabilistic_graphical_models|uri|http://www.semanlink.net/tag/probabilistic_graphical_models +http://www.semanlink.net/tag/probabilistic_graphical_models|broader_prefLabel|Uncertainty Reasoning +http://www.semanlink.net/tag/www_2013|creationTime|2013-03-02T16:45:19Z +http://www.semanlink.net/tag/www_2013|prefLabel|WWW 2013 +http://www.semanlink.net/tag/www_2013|broader|http://www.semanlink.net/tag/rio_de_janeiro +http://www.semanlink.net/tag/www_2013|broader|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/www_2013|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www_2013|creationDate|2013-03-02 +http://www.semanlink.net/tag/www_2013|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www_2013|homepage|http://www2013.org +http://www.semanlink.net/tag/www_2013|uri|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/www_2013|broader_prefLabel|Rio de Janeiro +http://www.semanlink.net/tag/www_2013|broader_prefLabel|J'y étais +http://www.semanlink.net/tag/www_2013|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www_2013|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/biologie|prefLabel|Biology +http://www.semanlink.net/tag/biologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/biologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biologie|altLabel|Biologie +http://www.semanlink.net/tag/biologie|uri|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/biologie|broader_prefLabel|Science +http://www.semanlink.net/tag/biologie|broader_altLabel|sciences +http://www.semanlink.net/tag/mit|prefLabel|MIT +http://www.semanlink.net/tag/mit|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/mit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mit|uri|http://www.semanlink.net/tag/mit +http://www.semanlink.net/tag/mit|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/knowledge_graph_ml|creationTime|2021-06-10T16:02:42Z +http://www.semanlink.net/tag/knowledge_graph_ml|prefLabel|Knowledge Graph + ML +http://www.semanlink.net/tag/knowledge_graph_ml|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/knowledge_graph_ml|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/knowledge_graph_ml|creationDate|2021-06-10 +http://www.semanlink.net/tag/knowledge_graph_ml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph_ml|uri|http://www.semanlink.net/tag/knowledge_graph_ml +http://www.semanlink.net/tag/knowledge_graph_ml|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/knowledge_graph_ml|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/knowledge_graph_ml|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/knowledge_graph_ml|broader_altLabel|KG +http://www.semanlink.net/tag/knowledge_graph_ml|broader_altLabel|ML +http://www.semanlink.net/tag/knowledge_graph_ml|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/knowledge_graph_ml|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/makolab|creationTime|2011-02-16T13:22:40Z +http://www.semanlink.net/tag/makolab|prefLabel|Makolab +http://www.semanlink.net/tag/makolab|broader|http://www.semanlink.net/tag/pologne +http://www.semanlink.net/tag/makolab|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/makolab|related|http://www.semanlink.net/tag/c2g +http://www.semanlink.net/tag/makolab|creationDate|2011-02-16 +http://www.semanlink.net/tag/makolab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/makolab|describedBy|http://www.makolab.com/ +http://www.semanlink.net/tag/makolab|uri|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/makolab|broader_prefLabel|Pologne +http://www.semanlink.net/tag/makolab|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/cross_origin_resource_sharing|creationTime|2011-06-28T16:13:04Z +http://www.semanlink.net/tag/cross_origin_resource_sharing|prefLabel|Cross-Origin Resource Sharing +http://www.semanlink.net/tag/cross_origin_resource_sharing|broader|http://www.semanlink.net/tag/cross_domain_data_fetching +http://www.semanlink.net/tag/cross_origin_resource_sharing|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/cross_origin_resource_sharing|related|http://www.semanlink.net/tag/jsonp +http://www.semanlink.net/tag/cross_origin_resource_sharing|creationDate|2011-06-28 +http://www.semanlink.net/tag/cross_origin_resource_sharing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_origin_resource_sharing|describedBy|https://en.wikipedia.org/wiki/Cross-Origin_Resource_Sharing +http://www.semanlink.net/tag/cross_origin_resource_sharing|altLabel|CORS +http://www.semanlink.net/tag/cross_origin_resource_sharing|uri|http://www.semanlink.net/tag/cross_origin_resource_sharing +http://www.semanlink.net/tag/cross_origin_resource_sharing|broader_prefLabel|cross-domain data fetching +http://www.semanlink.net/tag/cross_origin_resource_sharing|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/cross_origin_resource_sharing|broader_altLabel|js +http://www.semanlink.net/tag/dimitris|creationTime|2007-10-14T23:31:00Z +http://www.semanlink.net/tag/dimitris|prefLabel|Dimitris +http://www.semanlink.net/tag/dimitris|related|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/dimitris|related|http://www.semanlink.net/tag/sylvain +http://www.semanlink.net/tag/dimitris|creationDate|2007-10-14 +http://www.semanlink.net/tag/dimitris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dimitris|uri|http://www.semanlink.net/tag/dimitris +http://www.semanlink.net/tag/public_hydra_w3_org|creationTime|2015-02-14T09:55:34Z +http://www.semanlink.net/tag/public_hydra_w3_org|prefLabel|public-hydra@w3.org +http://www.semanlink.net/tag/public_hydra_w3_org|broader|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/public_hydra_w3_org|broader|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/public_hydra_w3_org|creationDate|2015-02-14 +http://www.semanlink.net/tag/public_hydra_w3_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_hydra_w3_org|homepage|http://lists.w3.org/Archives/Public/public-hydra/ +http://www.semanlink.net/tag/public_hydra_w3_org|uri|http://www.semanlink.net/tag/public_hydra_w3_org +http://www.semanlink.net/tag/public_hydra_w3_org|broader_prefLabel|Mailing list +http://www.semanlink.net/tag/public_hydra_w3_org|broader_prefLabel|Hydra +http://www.semanlink.net/tag/public_hydra_w3_org|broader_related|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/osiris_rex|creationTime|2020-10-26T23:39:10Z +http://www.semanlink.net/tag/osiris_rex|prefLabel|OSIRIS-REx +http://www.semanlink.net/tag/osiris_rex|broader|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/tag/osiris_rex|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/osiris_rex|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/osiris_rex|creationDate|2020-10-26 +http://www.semanlink.net/tag/osiris_rex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/osiris_rex|uri|http://www.semanlink.net/tag/osiris_rex +http://www.semanlink.net/tag/osiris_rex|broader_prefLabel|Astéroïde +http://www.semanlink.net/tag/osiris_rex|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/osiris_rex|broader_prefLabel|NASA +http://www.semanlink.net/tag/bureaucratie|creationTime|2020-06-06T13:56:04Z +http://www.semanlink.net/tag/bureaucratie|prefLabel|Bureaucratie +http://www.semanlink.net/tag/bureaucratie|creationDate|2020-06-06 +http://www.semanlink.net/tag/bureaucratie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bureaucratie|uri|http://www.semanlink.net/tag/bureaucratie +http://www.semanlink.net/tag/iphone_app|creationTime|2011-12-28T13:14:34Z +http://www.semanlink.net/tag/iphone_app|prefLabel|iphone app +http://www.semanlink.net/tag/iphone_app|broader|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/iphone_app|creationDate|2011-12-28 +http://www.semanlink.net/tag/iphone_app|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iphone_app|uri|http://www.semanlink.net/tag/iphone_app +http://www.semanlink.net/tag/iphone_app|broader_prefLabel|iphone +http://www.semanlink.net/tag/spritz|creationTime|2014-03-01T15:08:13Z +http://www.semanlink.net/tag/spritz|prefLabel|Spritz +http://www.semanlink.net/tag/spritz|broader|http://www.semanlink.net/tag/gui +http://www.semanlink.net/tag/spritz|broader|http://www.semanlink.net/tag/reading +http://www.semanlink.net/tag/spritz|creationDate|2014-03-01 +http://www.semanlink.net/tag/spritz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spritz|uri|http://www.semanlink.net/tag/spritz +http://www.semanlink.net/tag/spritz|broader_prefLabel|GUI +http://www.semanlink.net/tag/spritz|broader_prefLabel|Reading +http://www.semanlink.net/tag/burkina_faso|prefLabel|Burkina Faso +http://www.semanlink.net/tag/burkina_faso|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/burkina_faso|broader|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/burkina_faso|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/burkina_faso|uri|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/burkina_faso|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/burkina_faso|broader_prefLabel|Sahel +http://www.semanlink.net/tag/eccenca|creationTime|2021-05-19T14:14:18Z +http://www.semanlink.net/tag/eccenca|prefLabel|eccenca +http://www.semanlink.net/tag/eccenca|broader|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/eccenca|broader|http://www.semanlink.net/tag/enterprise_knowledge_graph_platform +http://www.semanlink.net/tag/eccenca|related|http://www.semanlink.net/tag/marcel_frohlich +http://www.semanlink.net/tag/eccenca|creationDate|2021-05-19 +http://www.semanlink.net/tag/eccenca|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eccenca|homepage|https://eccenca.com/ +http://www.semanlink.net/tag/eccenca|uri|http://www.semanlink.net/tag/eccenca +http://www.semanlink.net/tag/eccenca|broader_prefLabel|Enterprise Knowledge Graph +http://www.semanlink.net/tag/eccenca|broader_prefLabel|Enterprise Knowledge Graph Platform +http://www.semanlink.net/tag/eccenca|broader_related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/les_petites_cases|prefLabel|Les petites cases +http://www.semanlink.net/tag/les_petites_cases|broader|http://www.semanlink.net/tag/gautier_poupeau +http://www.semanlink.net/tag/les_petites_cases|broader|http://www.semanlink.net/tag/semantic_web_blog +http://www.semanlink.net/tag/les_petites_cases|type|http://rdfs.org/sioc/types#Weblog +http://www.semanlink.net/tag/les_petites_cases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/les_petites_cases|uri|http://www.semanlink.net/tag/les_petites_cases +http://www.semanlink.net/tag/les_petites_cases|broader_prefLabel|Gautier Poupeau +http://www.semanlink.net/tag/les_petites_cases|broader_prefLabel|Semantic Web blog +http://www.semanlink.net/tag/les_petites_cases|broader_related|http://www.semanlink.net/tag/atos_origin +http://www.semanlink.net/tag/digital_audio|creationTime|2012-12-30T00:32:52Z +http://www.semanlink.net/tag/digital_audio|prefLabel|Digital Audio +http://www.semanlink.net/tag/digital_audio|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/digital_audio|creationDate|2012-12-30 +http://www.semanlink.net/tag/digital_audio|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_audio|uri|http://www.semanlink.net/tag/digital_audio +http://www.semanlink.net/tag/digital_audio|broader_prefLabel|Musique +http://www.semanlink.net/tag/digital_audio|broader_altLabel|Music +http://www.semanlink.net/tag/moussa_poussi|prefLabel|Moussa Poussi +http://www.semanlink.net/tag/moussa_poussi|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/moussa_poussi|broader|http://www.semanlink.net/tag/musique_du_niger +http://www.semanlink.net/tag/moussa_poussi|type|http://purl.org/ontology/mo/MusicArtist +http://www.semanlink.net/tag/moussa_poussi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moussa_poussi|homepage|http://moussapoussy.planeteafrique.com +http://www.semanlink.net/tag/moussa_poussi|altLabel|Moussa Poussy +http://www.semanlink.net/tag/moussa_poussi|linkToMusicBrainz|http://musicbrainz.org/artist/19cab779-ccec-46fc-b1ac-1b4e3cbcb848.html +http://www.semanlink.net/tag/moussa_poussi|uri|http://www.semanlink.net/tag/moussa_poussi +http://www.semanlink.net/tag/moussa_poussi|broader_prefLabel|Musicien +http://www.semanlink.net/tag/moussa_poussi|broader_prefLabel|Musique du Niger +http://www.semanlink.net/tag/imbalanced_data|creationTime|2018-10-27T23:52:58Z +http://www.semanlink.net/tag/imbalanced_data|prefLabel|Imbalanced Data +http://www.semanlink.net/tag/imbalanced_data|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/imbalanced_data|creationDate|2018-10-27 +http://www.semanlink.net/tag/imbalanced_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imbalanced_data|uri|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/tag/imbalanced_data|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/londres|prefLabel|Londres +http://www.semanlink.net/tag/londres|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/londres|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/londres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/londres|uri|http://www.semanlink.net/tag/londres +http://www.semanlink.net/tag/londres|broader_prefLabel|Ville +http://www.semanlink.net/tag/londres|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/londres|broader_altLabel|UK +http://www.semanlink.net/tag/disparition_des_abeilles|creationTime|2019-01-22T23:36:49Z +http://www.semanlink.net/tag/disparition_des_abeilles|prefLabel|Disparition des abeilles +http://www.semanlink.net/tag/disparition_des_abeilles|broader|http://www.semanlink.net/tag/abeille +http://www.semanlink.net/tag/disparition_des_abeilles|broader|http://www.semanlink.net/tag/insect_collapse +http://www.semanlink.net/tag/disparition_des_abeilles|broader|http://www.semanlink.net/tag/catastrophe_ecologique +http://www.semanlink.net/tag/disparition_des_abeilles|creationDate|2019-01-22 +http://www.semanlink.net/tag/disparition_des_abeilles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disparition_des_abeilles|uri|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.semanlink.net/tag/disparition_des_abeilles|broader_prefLabel|Abeille +http://www.semanlink.net/tag/disparition_des_abeilles|broader_prefLabel|Insect collapse +http://www.semanlink.net/tag/disparition_des_abeilles|broader_prefLabel|Catastrophe écologique +http://www.semanlink.net/tag/disparition_des_abeilles|broader_altLabel|Désastre écologique +http://www.semanlink.net/tag/drm|prefLabel|DRM +http://www.semanlink.net/tag/drm|broader|http://www.semanlink.net/tag/content_industries +http://www.semanlink.net/tag/drm|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/drm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drm|uri|http://www.semanlink.net/tag/drm +http://www.semanlink.net/tag/drm|broader_prefLabel|Content industries +http://www.semanlink.net/tag/drm|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/carte|prefLabel|Carte +http://www.semanlink.net/tag/carte|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/carte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carte|uri|http://www.semanlink.net/tag/carte +http://www.semanlink.net/tag/carte|broader_prefLabel|Géographie +http://www.semanlink.net/tag/lod_cloud|creationTime|2011-09-23T09:58:07Z +http://www.semanlink.net/tag/lod_cloud|prefLabel|LOD cloud +http://www.semanlink.net/tag/lod_cloud|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/lod_cloud|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_cloud|creationDate|2011-09-23 +http://www.semanlink.net/tag/lod_cloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lod_cloud|homepage|http://richard.cyganiak.de/2007/10/lod/ +http://www.semanlink.net/tag/lod_cloud|uri|http://www.semanlink.net/tag/lod_cloud +http://www.semanlink.net/tag/lod_cloud|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/lod_cloud|broader_altLabel|LOD +http://www.semanlink.net/tag/lod_cloud|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/lod_cloud|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/lod_cloud|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/lod_cloud|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/grant_ingersoll|creationTime|2014-03-15T13:46:41Z +http://www.semanlink.net/tag/grant_ingersoll|prefLabel|Grant Ingersoll +http://www.semanlink.net/tag/grant_ingersoll|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/grant_ingersoll|related|http://www.semanlink.net/tag/apache_mahout +http://www.semanlink.net/tag/grant_ingersoll|creationDate|2014-03-15 +http://www.semanlink.net/tag/grant_ingersoll|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grant_ingersoll|uri|http://www.semanlink.net/tag/grant_ingersoll +http://www.semanlink.net/tag/grant_ingersoll|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/grant_ingersoll|broader_altLabel|Technical guys +http://www.semanlink.net/tag/nlp_amazon|creationTime|2021-01-10T19:29:36Z +http://www.semanlink.net/tag/nlp_amazon|prefLabel|NLP@Amazon +http://www.semanlink.net/tag/nlp_amazon|broader|http://www.semanlink.net/tag/ai_amazon +http://www.semanlink.net/tag/nlp_amazon|creationDate|2021-01-10 +http://www.semanlink.net/tag/nlp_amazon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_amazon|uri|http://www.semanlink.net/tag/nlp_amazon +http://www.semanlink.net/tag/nlp_amazon|broader_prefLabel|AI@Amazon +http://www.semanlink.net/tag/xbrl|creationTime|2010-07-01T17:09:35Z +http://www.semanlink.net/tag/xbrl|prefLabel|XBRL +http://www.semanlink.net/tag/xbrl|broader|http://www.semanlink.net/tag/financial_data +http://www.semanlink.net/tag/xbrl|creationDate|2010-07-01 +http://www.semanlink.net/tag/xbrl|comment|eXtensible Business Reporting Language +http://www.semanlink.net/tag/xbrl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xbrl|describedBy|https://en.wikipedia.org/wiki/XBRL +http://www.semanlink.net/tag/xbrl|uri|http://www.semanlink.net/tag/xbrl +http://www.semanlink.net/tag/xbrl|broader_prefLabel|Financial Data +http://www.semanlink.net/tag/octo|creationTime|2017-03-16T10:25:08Z +http://www.semanlink.net/tag/octo|prefLabel|OCTO +http://www.semanlink.net/tag/octo|related|http://www.semanlink.net/tag/christian_faure +http://www.semanlink.net/tag/octo|creationDate|2017-03-16 +http://www.semanlink.net/tag/octo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/octo|uri|http://www.semanlink.net/tag/octo +http://www.semanlink.net/tag/about_semanlink|prefLabel|About Semanlink +http://www.semanlink.net/tag/about_semanlink|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/about_semanlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/about_semanlink|uri|http://www.semanlink.net/tag/about_semanlink +http://www.semanlink.net/tag/about_semanlink|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/about_semanlink|broader_altLabel|SL +http://www.semanlink.net/tag/about_semanlink|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/vie_extraterrestre|prefLabel|Vie extraterrestre +http://www.semanlink.net/tag/vie_extraterrestre|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/vie_extraterrestre|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/vie_extraterrestre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vie_extraterrestre|uri|http://www.semanlink.net/tag/vie_extraterrestre +http://www.semanlink.net/tag/vie_extraterrestre|broader_prefLabel|Biology +http://www.semanlink.net/tag/vie_extraterrestre|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/vie_extraterrestre|broader_altLabel|Biologie +http://www.semanlink.net/tag/brad_pitt|creationTime|2020-04-17T19:14:59Z +http://www.semanlink.net/tag/brad_pitt|prefLabel|Brad Pitt +http://www.semanlink.net/tag/brad_pitt|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/brad_pitt|creationDate|2020-04-17 +http://www.semanlink.net/tag/brad_pitt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brad_pitt|describedBy|https://en.wikipedia.org/wiki/Brad_Pitt +http://www.semanlink.net/tag/brad_pitt|uri|http://www.semanlink.net/tag/brad_pitt +http://www.semanlink.net/tag/brad_pitt|broader_prefLabel|Acteur +http://www.semanlink.net/tag/nick_clegg|creationTime|2010-05-25T13:25:05Z +http://www.semanlink.net/tag/nick_clegg|prefLabel|Nick Clegg +http://www.semanlink.net/tag/nick_clegg|broader|http://www.semanlink.net/tag/grande_bretagne +http://www.semanlink.net/tag/nick_clegg|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/nick_clegg|creationDate|2010-05-25 +http://www.semanlink.net/tag/nick_clegg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nick_clegg|describedBy|https://en.wikipedia.org/wiki/Nick_Clegg +http://www.semanlink.net/tag/nick_clegg|uri|http://www.semanlink.net/tag/nick_clegg +http://www.semanlink.net/tag/nick_clegg|broader_prefLabel|Grande-Bretagne +http://www.semanlink.net/tag/nick_clegg|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/voile_mer|creationTime|2021-01-26T00:10:47Z +http://www.semanlink.net/tag/voile_mer|prefLabel|Voile (bateau) +http://www.semanlink.net/tag/voile_mer|broader|http://www.semanlink.net/tag/mer +http://www.semanlink.net/tag/voile_mer|creationDate|2021-01-26 +http://www.semanlink.net/tag/voile_mer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voile_mer|uri|http://www.semanlink.net/tag/voile_mer +http://www.semanlink.net/tag/voile_mer|broader_prefLabel|Mer +http://www.semanlink.net/tag/death_of_hyperlink|creationTime|2015-11-11T20:02:17Z +http://www.semanlink.net/tag/death_of_hyperlink|prefLabel|Death of Hyperlink +http://www.semanlink.net/tag/death_of_hyperlink|broader|http://www.semanlink.net/tag/hyperlinks +http://www.semanlink.net/tag/death_of_hyperlink|creationDate|2015-11-11 +http://www.semanlink.net/tag/death_of_hyperlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/death_of_hyperlink|uri|http://www.semanlink.net/tag/death_of_hyperlink +http://www.semanlink.net/tag/death_of_hyperlink|broader_prefLabel|Hyperlinks +http://www.semanlink.net/tag/court_metrage|creationTime|2017-06-03T17:33:50Z +http://www.semanlink.net/tag/court_metrage|prefLabel|Court métrage +http://www.semanlink.net/tag/court_metrage|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/court_metrage|creationDate|2017-06-03 +http://www.semanlink.net/tag/court_metrage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/court_metrage|uri|http://www.semanlink.net/tag/court_metrage +http://www.semanlink.net/tag/court_metrage|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/sourceforge|prefLabel|SourceForge +http://www.semanlink.net/tag/sourceforge|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/sourceforge|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/sourceforge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sourceforge|uri|http://www.semanlink.net/tag/sourceforge +http://www.semanlink.net/tag/sourceforge|broader_prefLabel|Software +http://www.semanlink.net/tag/sourceforge|broader_prefLabel|Open Source +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|creationTime|2017-12-06T16:58:58Z +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|prefLabel|Combining text and structured data (ML-NLP) +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|broader|http://www.semanlink.net/tag/features_machine_learning +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|creationDate|2017-12-06 +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|uri|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|broader_prefLabel|Features (Machine Learning) +http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/blogmarks|prefLabel|blogmarks +http://www.semanlink.net/tag/blogmarks|broader|http://www.semanlink.net/tag/social_bookmarking +http://www.semanlink.net/tag/blogmarks|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/blogmarks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blogmarks|uri|http://www.semanlink.net/tag/blogmarks +http://www.semanlink.net/tag/blogmarks|broader_prefLabel|Social bookmarking +http://www.semanlink.net/tag/blogmarks|broader_prefLabel|Tagging +http://www.semanlink.net/tag/indifference|prefLabel|Indifférence +http://www.semanlink.net/tag/indifference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/indifference|uri|http://www.semanlink.net/tag/indifference +http://www.semanlink.net/tag/kg_tasks|creationTime|2020-08-30T19:18:48Z +http://www.semanlink.net/tag/kg_tasks|prefLabel|KG: tasks +http://www.semanlink.net/tag/kg_tasks|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/kg_tasks|creationDate|2020-08-30 +http://www.semanlink.net/tag/kg_tasks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kg_tasks|altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/kg_tasks|uri|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/kg_tasks|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/kg_tasks|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/kg_tasks|broader_altLabel|KG +http://www.semanlink.net/tag/rembrandt|creationTime|2011-01-11T01:07:50Z +http://www.semanlink.net/tag/rembrandt|prefLabel|Rembrandt +http://www.semanlink.net/tag/rembrandt|broader|http://www.semanlink.net/tag/peintre +http://www.semanlink.net/tag/rembrandt|creationDate|2011-01-11 +http://www.semanlink.net/tag/rembrandt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rembrandt|describedBy|https://en.wikipedia.org/wiki/Rembrandt_Harmenszoon_van_Rijn +http://www.semanlink.net/tag/rembrandt|uri|http://www.semanlink.net/tag/rembrandt +http://www.semanlink.net/tag/rembrandt|broader_prefLabel|Peintre +http://www.semanlink.net/tag/facebook_cambridge_analytica|creationTime|2018-03-21T09:32:13Z +http://www.semanlink.net/tag/facebook_cambridge_analytica|prefLabel|Cambridge Analytica +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/personal_data +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/dark_side_of_tech +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/social_manipulation +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/facebook_cambridge_analytica|creationDate|2018-03-21 +http://www.semanlink.net/tag/facebook_cambridge_analytica|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facebook_cambridge_analytica|uri|http://www.semanlink.net/tag/facebook_cambridge_analytica +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|Personal data +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|Dark side of Tech +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|DeleteFB +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|Social manipulation +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|Privacy and internet +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_prefLabel|Facebook +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_altLabel|FB +http://www.semanlink.net/tag/facebook_cambridge_analytica|broader_related|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/coding|creationTime|2017-08-18T12:35:10Z +http://www.semanlink.net/tag/coding|prefLabel|Coding +http://www.semanlink.net/tag/coding|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/coding|creationDate|2017-08-18 +http://www.semanlink.net/tag/coding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coding|uri|http://www.semanlink.net/tag/coding +http://www.semanlink.net/tag/coding|broader_prefLabel|Software +http://www.semanlink.net/tag/francis_pisani|creationTime|2013-08-25T12:48:24Z +http://www.semanlink.net/tag/francis_pisani|prefLabel|Francis Pisani +http://www.semanlink.net/tag/francis_pisani|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/francis_pisani|creationDate|2013-08-25 +http://www.semanlink.net/tag/francis_pisani|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francis_pisani|uri|http://www.semanlink.net/tag/francis_pisani +http://www.semanlink.net/tag/francis_pisani|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/francis_pisani|broader_altLabel|Technical guys +http://www.semanlink.net/tag/visually_rich_documents|creationTime|2020-06-16T02:10:28Z +http://www.semanlink.net/tag/visually_rich_documents|prefLabel|Visually rich documents +http://www.semanlink.net/tag/visually_rich_documents|broader|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/tag/visually_rich_documents|creationDate|2020-06-16 +http://www.semanlink.net/tag/visually_rich_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/visually_rich_documents|uri|http://www.semanlink.net/tag/visually_rich_documents +http://www.semanlink.net/tag/visually_rich_documents|broader_prefLabel|2D-NLP +http://www.semanlink.net/tag/concept_hierarchies|creationTime|2021-09-20T18:23:55Z +http://www.semanlink.net/tag/concept_hierarchies|prefLabel|Concept hierarchies +http://www.semanlink.net/tag/concept_hierarchies|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/concept_hierarchies|related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/concept_hierarchies|creationDate|2021-09-20 +http://www.semanlink.net/tag/concept_hierarchies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/concept_hierarchies|uri|http://www.semanlink.net/tag/concept_hierarchies +http://www.semanlink.net/tag/concept_hierarchies|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/semantic_web_company|creationTime|2008-03-04T23:04:28Z +http://www.semanlink.net/tag/semantic_web_company|prefLabel|Semantic web company +http://www.semanlink.net/tag/semantic_web_company|broader|http://www.semanlink.net/tag/tech_company +http://www.semanlink.net/tag/semantic_web_company|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_company|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/semantic_web_company|creationDate|2008-03-04 +http://www.semanlink.net/tag/semantic_web_company|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_company|altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/semantic_web_company|altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/semantic_web_company|uri|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/semantic_web_company|broader_prefLabel|Tech company +http://www.semanlink.net/tag/semantic_web_company|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_company|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/semantic_web_company|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_company|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/drones|prefLabel|Drones +http://www.semanlink.net/tag/drones|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drones|uri|http://www.semanlink.net/tag/drones +http://www.semanlink.net/tag/pierres_precieuses|creationTime|2017-12-16T18:05:11Z +http://www.semanlink.net/tag/pierres_precieuses|prefLabel|Pierres précieuses +http://www.semanlink.net/tag/pierres_precieuses|creationDate|2017-12-16 +http://www.semanlink.net/tag/pierres_precieuses|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierres_precieuses|uri|http://www.semanlink.net/tag/pierres_precieuses +http://www.semanlink.net/tag/excel_and_sw|creationTime|2010-09-30T23:07:46Z +http://www.semanlink.net/tag/excel_and_sw|prefLabel|Excel and SW +http://www.semanlink.net/tag/excel_and_sw|broader|http://www.semanlink.net/tag/excel +http://www.semanlink.net/tag/excel_and_sw|related|http://www.semanlink.net/tag/anzo +http://www.semanlink.net/tag/excel_and_sw|creationDate|2010-09-30 +http://www.semanlink.net/tag/excel_and_sw|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/excel_and_sw|uri|http://www.semanlink.net/tag/excel_and_sw +http://www.semanlink.net/tag/excel_and_sw|broader_prefLabel|Excel +http://www.semanlink.net/tag/semantic_wiki|prefLabel|Semantic Wiki +http://www.semanlink.net/tag/semantic_wiki|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_wiki|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/semantic_wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_wiki|uri|http://www.semanlink.net/tag/semantic_wiki +http://www.semanlink.net/tag/semantic_wiki|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/semantic_wiki|broader_prefLabel|Wiki +http://www.semanlink.net/tag/net|creationTime|2008-05-06T22:15:51Z +http://www.semanlink.net/tag/net|prefLabel|.NET +http://www.semanlink.net/tag/net|creationDate|2008-05-06 +http://www.semanlink.net/tag/net|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/net|uri|http://www.semanlink.net/tag/net +http://www.semanlink.net/tag/dita|creationTime|2011-02-15T11:52:32Z +http://www.semanlink.net/tag/dita|prefLabel|DITA +http://www.semanlink.net/tag/dita|broader|http://www.semanlink.net/tag/technical_documentation +http://www.semanlink.net/tag/dita|creationDate|2011-02-15 +http://www.semanlink.net/tag/dita|comment|Darwin Information Typing Architecture +http://www.semanlink.net/tag/dita|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dita|homepage|http://dita.xml.org/ +http://www.semanlink.net/tag/dita|describedBy|https://en.wikipedia.org/wiki/Darwin_Information_Typing_Architecture +http://www.semanlink.net/tag/dita|uri|http://www.semanlink.net/tag/dita +http://www.semanlink.net/tag/dita|broader_prefLabel|Technical documentation +http://www.semanlink.net/tag/semweb_pro_2012|creationTime|2012-02-13T19:43:18Z +http://www.semanlink.net/tag/semweb_pro_2012|prefLabel|SemWeb Pro 2012 +http://www.semanlink.net/tag/semweb_pro_2012|broader|http://www.semanlink.net/tag/semweb_pro +http://www.semanlink.net/tag/semweb_pro_2012|creationDate|2012-02-13 +http://www.semanlink.net/tag/semweb_pro_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semweb_pro_2012|uri|http://www.semanlink.net/tag/semweb_pro_2012 +http://www.semanlink.net/tag/semweb_pro_2012|broader_prefLabel|SemWeb Pro +http://www.semanlink.net/tag/urbanisation|prefLabel|Urbanisation +http://www.semanlink.net/tag/urbanisation|broader|http://www.semanlink.net/tag/monde_moderne +http://www.semanlink.net/tag/urbanisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/urbanisation|uri|http://www.semanlink.net/tag/urbanisation +http://www.semanlink.net/tag/urbanisation|broader_prefLabel|Monde moderne +http://www.semanlink.net/tag/megalith|creationTime|2016-10-03T01:24:44Z +http://www.semanlink.net/tag/megalith|prefLabel|Megalith +http://www.semanlink.net/tag/megalith|creationDate|2016-10-03 +http://www.semanlink.net/tag/megalith|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/megalith|describedBy|https://en.wikipedia.org/wiki/Megalith +http://www.semanlink.net/tag/megalith|uri|http://www.semanlink.net/tag/megalith +http://www.semanlink.net/tag/language_model_fine_tuning|creationTime|2021-02-24T12:17:36Z +http://www.semanlink.net/tag/language_model_fine_tuning|prefLabel|Language Model Fine-tuning +http://www.semanlink.net/tag/language_model_fine_tuning|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/language_model_fine_tuning|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/language_model_fine_tuning|related|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/tag/language_model_fine_tuning|related|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/tag/language_model_fine_tuning|creationDate|2021-02-24 +http://www.semanlink.net/tag/language_model_fine_tuning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_model_fine_tuning|uri|http://www.semanlink.net/tag/language_model_fine_tuning +http://www.semanlink.net/tag/language_model_fine_tuning|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/language_model_fine_tuning|broader_prefLabel|Language model +http://www.semanlink.net/tag/language_model_fine_tuning|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/language_model_fine_tuning|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/language_model_fine_tuning|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/language_model_fine_tuning|broader_altLabel|LM +http://www.semanlink.net/tag/language_model_fine_tuning|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/language_model_fine_tuning|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/language_model_fine_tuning|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/language_model_fine_tuning|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/erich_maria_remarque|creationTime|2012-08-23T00:59:21Z +http://www.semanlink.net/tag/erich_maria_remarque|prefLabel|Erich Maria Remarque +http://www.semanlink.net/tag/erich_maria_remarque|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/erich_maria_remarque|creationDate|2012-08-23 +http://www.semanlink.net/tag/erich_maria_remarque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erich_maria_remarque|describedBy|https://en.wikipedia.org/wiki/Erich_Maria_Remarque +http://www.semanlink.net/tag/erich_maria_remarque|uri|http://www.semanlink.net/tag/erich_maria_remarque +http://www.semanlink.net/tag/erich_maria_remarque|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/whisky|creationTime|2021-03-17T22:32:53Z +http://www.semanlink.net/tag/whisky|prefLabel|Whisky +http://www.semanlink.net/tag/whisky|creationDate|2021-03-17 +http://www.semanlink.net/tag/whisky|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/whisky|uri|http://www.semanlink.net/tag/whisky +http://www.semanlink.net/tag/product_description|creationTime|2012-04-25T13:05:26Z +http://www.semanlink.net/tag/product_description|prefLabel|Product description +http://www.semanlink.net/tag/product_description|broader|http://www.semanlink.net/tag/e_commerce_data +http://www.semanlink.net/tag/product_description|related|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/product_description|creationDate|2012-04-25 +http://www.semanlink.net/tag/product_description|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/product_description|uri|http://www.semanlink.net/tag/product_description +http://www.semanlink.net/tag/product_description|broader_prefLabel|e-commerce data +http://www.semanlink.net/tag/training_data|creationTime|2019-06-06T08:45:23Z +http://www.semanlink.net/tag/training_data|prefLabel|Training data +http://www.semanlink.net/tag/training_data|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/training_data|creationDate|2019-06-06 +http://www.semanlink.net/tag/training_data|comment|the bottleneck of getting labeled training data +http://www.semanlink.net/tag/training_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/training_data|uri|http://www.semanlink.net/tag/training_data +http://www.semanlink.net/tag/training_data|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/training_data|broader_altLabel|ML +http://www.semanlink.net/tag/training_data|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/training_data|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/histoire_de_l_inde|creationTime|2021-04-03T12:11:44Z +http://www.semanlink.net/tag/histoire_de_l_inde|prefLabel|Histoire de l'Inde +http://www.semanlink.net/tag/histoire_de_l_inde|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/histoire_de_l_inde|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_de_l_inde|creationDate|2021-04-03 +http://www.semanlink.net/tag/histoire_de_l_inde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_inde|uri|http://www.semanlink.net/tag/histoire_de_l_inde +http://www.semanlink.net/tag/histoire_de_l_inde|broader_prefLabel|Inde +http://www.semanlink.net/tag/histoire_de_l_inde|broader_prefLabel|Histoire +http://www.semanlink.net/tag/canada|prefLabel|Canada +http://www.semanlink.net/tag/canada|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/canada|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/canada|uri|http://www.semanlink.net/tag/canada +http://www.semanlink.net/tag/canada|broader_prefLabel|Amérique +http://www.semanlink.net/tag/honda|prefLabel|Honda +http://www.semanlink.net/tag/honda|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/honda|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/honda|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/honda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/honda|uri|http://www.semanlink.net/tag/honda +http://www.semanlink.net/tag/honda|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/honda|broader_prefLabel|Automobile +http://www.semanlink.net/tag/honda|broader_prefLabel|Japon +http://www.semanlink.net/tag/honda|broader_altLabel|Automotive +http://www.semanlink.net/tag/honda|broader_altLabel|Japan +http://www.semanlink.net/tag/backplanejs|creationTime|2012-03-19T22:50:54Z +http://www.semanlink.net/tag/backplanejs|prefLabel|backplanejs +http://www.semanlink.net/tag/backplanejs|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/backplanejs|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/backplanejs|broader|http://www.semanlink.net/tag/unobtrusive_javascript +http://www.semanlink.net/tag/backplanejs|related|http://www.semanlink.net/tag/mark_birbeck +http://www.semanlink.net/tag/backplanejs|creationDate|2012-03-19 +http://www.semanlink.net/tag/backplanejs|comment|A JavaScript library that provides cross-browser XForms, RDFa, and SMIL support. +http://www.semanlink.net/tag/backplanejs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backplanejs|describedBy|http://code.google.com/p/backplanejs +http://www.semanlink.net/tag/backplanejs|uri|http://www.semanlink.net/tag/backplanejs +http://www.semanlink.net/tag/backplanejs|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/backplanejs|broader_prefLabel|RDFa +http://www.semanlink.net/tag/backplanejs|broader_prefLabel|Unobtrusive JavaScript +http://www.semanlink.net/tag/backplanejs|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/backplanejs|broader_altLabel|RDF/A +http://www.semanlink.net/tag/backplanejs|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/backplanejs|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/backplanejs|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/backplanejs|broader_related|http://www.semanlink.net/tag/jquery +http://www.semanlink.net/tag/tagged|creationTime|2020-07-18T13:31:45Z +http://www.semanlink.net/tag/tagged|prefLabel|Tagged +http://www.semanlink.net/tag/tagged|creationDate|2020-07-18 +http://www.semanlink.net/tag/tagged|comment|To be used when I still don't know how to tag a doc +http://www.semanlink.net/tag/tagged|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tagged|uri|http://www.semanlink.net/tag/tagged +http://www.semanlink.net/tag/formal_knowledge_representation_language|creationTime|2013-06-05T00:34:45Z +http://www.semanlink.net/tag/formal_knowledge_representation_language|prefLabel|Formal knowledge representation language +http://www.semanlink.net/tag/formal_knowledge_representation_language|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/formal_knowledge_representation_language|creationDate|2013-06-05 +http://www.semanlink.net/tag/formal_knowledge_representation_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/formal_knowledge_representation_language|uri|http://www.semanlink.net/tag/formal_knowledge_representation_language +http://www.semanlink.net/tag/formal_knowledge_representation_language|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/formal_knowledge_representation_language|broader_altLabel|KR +http://www.semanlink.net/tag/semantic_data_wiki|creationTime|2013-09-10T01:25:24Z +http://www.semanlink.net/tag/semantic_data_wiki|prefLabel|Semantic data wiki +http://www.semanlink.net/tag/semantic_data_wiki|creationDate|2013-09-10 +http://www.semanlink.net/tag/semantic_data_wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_data_wiki|uri|http://www.semanlink.net/tag/semantic_data_wiki +http://www.semanlink.net/tag/web_serving|prefLabel|Web Serving +http://www.semanlink.net/tag/web_serving|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_serving|uri|http://www.semanlink.net/tag/web_serving +http://www.semanlink.net/tag/machine_learning_semantic_web|creationTime|2014-10-06T00:23:27Z +http://www.semanlink.net/tag/machine_learning_semantic_web|prefLabel|Machine Learning + Semantic Web +http://www.semanlink.net/tag/machine_learning_semantic_web|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/machine_learning_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/machine_learning_semantic_web|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_semantic_web|creationDate|2014-10-06 +http://www.semanlink.net/tag/machine_learning_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_semantic_web|uri|http://www.semanlink.net/tag/machine_learning_semantic_web +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_altLabel|KG +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_semantic_web|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/sequence_to_sequence_learning|creationTime|2017-09-30T10:55:33Z +http://www.semanlink.net/tag/sequence_to_sequence_learning|prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/sequence_to_sequence_learning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/sequence_to_sequence_learning|related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/sequence_to_sequence_learning|related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/sequence_to_sequence_learning|related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/sequence_to_sequence_learning|related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/sequence_to_sequence_learning|creationDate|2017-09-30 +http://www.semanlink.net/tag/sequence_to_sequence_learning|comment|"Training models to convert sequences from one domain (e.g. sentences in English) to sequences in another domain (e.g. the same sentences translated to French). + +Example of transformation: translation from one language to another one (text or audio), QA answering, parsing sentences into grammar tree. + +The seq2seq model generally uses an encoder-decoder architecture, where both encoder and decoder are RNN: + +- the encoder encodes the input as a fixed length vector (the ""context vector"") +- the decoder is initialized with the context vector to emit the output + +Problems: + +- fixed-length context vector is unable to remember long sentences. [#Attention mechanism](/tag/deep_learning_attention) allows to solve this problem +- since RNN-based seq2seq model are sequential models, they cannot be parallelized. [#The Transformer](/tag/attention_is_all_you_need) solves this + + +" +http://www.semanlink.net/tag/sequence_to_sequence_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sequence_to_sequence_learning|altLabel|Sequence Modeling +http://www.semanlink.net/tag/sequence_to_sequence_learning|altLabel|Seq2Seq +http://www.semanlink.net/tag/sequence_to_sequence_learning|uri|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/sequence_to_sequence_learning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/linux_hosting|prefLabel|Linux hosting +http://www.semanlink.net/tag/linux_hosting|broader|http://www.semanlink.net/tag/isp +http://www.semanlink.net/tag/linux_hosting|broader|http://www.semanlink.net/tag/linux +http://www.semanlink.net/tag/linux_hosting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linux_hosting|uri|http://www.semanlink.net/tag/linux_hosting +http://www.semanlink.net/tag/linux_hosting|broader_prefLabel|ISP +http://www.semanlink.net/tag/linux_hosting|broader_prefLabel|Linux +http://www.semanlink.net/tag/number_of_neurons|creationTime|2008-06-22T02:01:51Z +http://www.semanlink.net/tag/number_of_neurons|prefLabel|Number of neurons +http://www.semanlink.net/tag/number_of_neurons|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/number_of_neurons|broader|http://www.semanlink.net/tag/neurones +http://www.semanlink.net/tag/number_of_neurons|creationDate|2008-06-22 +http://www.semanlink.net/tag/number_of_neurons|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/number_of_neurons|uri|http://www.semanlink.net/tag/number_of_neurons +http://www.semanlink.net/tag/number_of_neurons|broader_prefLabel|Brain +http://www.semanlink.net/tag/number_of_neurons|broader_prefLabel|Neurones +http://www.semanlink.net/tag/number_of_neurons|broader_altLabel|Cerveau +http://www.semanlink.net/tag/number_of_neurons|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/strange|creationTime|2012-02-01T01:23:48Z +http://www.semanlink.net/tag/strange|prefLabel|Strange +http://www.semanlink.net/tag/strange|creationDate|2012-02-01 +http://www.semanlink.net/tag/strange|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/strange|uri|http://www.semanlink.net/tag/strange +http://www.semanlink.net/tag/liberte|creationTime|2007-07-15T11:22:54Z +http://www.semanlink.net/tag/liberte|prefLabel|Liberté +http://www.semanlink.net/tag/liberte|related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/liberte|creationDate|2007-07-15 +http://www.semanlink.net/tag/liberte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte|uri|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/universites_americaines|prefLabel|Universités américaines +http://www.semanlink.net/tag/universites_americaines|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/universites_americaines|broader|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/universites_americaines|broader|http://www.semanlink.net/tag/enseignement_superieur +http://www.semanlink.net/tag/universites_americaines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/universites_americaines|uri|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/universites_americaines|broader_prefLabel|USA +http://www.semanlink.net/tag/universites_americaines|broader_prefLabel|Université +http://www.semanlink.net/tag/universites_americaines|broader_prefLabel|Enseignement supérieur +http://www.semanlink.net/tag/universites_americaines|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/universites_americaines|broader_altLabel|United States +http://www.semanlink.net/tag/business_case_semantic_web|creationTime|2010-07-30T14:56:39Z +http://www.semanlink.net/tag/business_case_semantic_web|prefLabel|Business case: semantic web +http://www.semanlink.net/tag/business_case_semantic_web|broader|http://www.semanlink.net/tag/semantic_web_business +http://www.semanlink.net/tag/business_case_semantic_web|broader|http://www.semanlink.net/tag/business_case +http://www.semanlink.net/tag/business_case_semantic_web|creationDate|2010-07-30 +http://www.semanlink.net/tag/business_case_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/business_case_semantic_web|uri|http://www.semanlink.net/tag/business_case_semantic_web +http://www.semanlink.net/tag/business_case_semantic_web|broader_prefLabel|Semantic Web : Business +http://www.semanlink.net/tag/business_case_semantic_web|broader_prefLabel|Business case +http://www.semanlink.net/tag/ranking|creationTime|2021-03-25T17:26:17Z +http://www.semanlink.net/tag/ranking|prefLabel|Ranking +http://www.semanlink.net/tag/ranking|creationDate|2021-03-25 +http://www.semanlink.net/tag/ranking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ranking|uri|http://www.semanlink.net/tag/ranking +http://www.semanlink.net/tag/bob_dylan|prefLabel|Bob Dylan +http://www.semanlink.net/tag/bob_dylan|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/bob_dylan|creationDate|2006-10-10 +http://www.semanlink.net/tag/bob_dylan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bob_dylan|uri|http://www.semanlink.net/tag/bob_dylan +http://www.semanlink.net/tag/bob_dylan|broader_prefLabel|Musicien +http://www.semanlink.net/tag/talis_rdf_json|creationTime|2012-03-20T17:47:12Z +http://www.semanlink.net/tag/talis_rdf_json|prefLabel|Talis RDF/JSON +http://www.semanlink.net/tag/talis_rdf_json|broader|http://www.semanlink.net/tag/talis +http://www.semanlink.net/tag/talis_rdf_json|broader|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/talis_rdf_json|creationDate|2012-03-20 +http://www.semanlink.net/tag/talis_rdf_json|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/talis_rdf_json|uri|http://www.semanlink.net/tag/talis_rdf_json +http://www.semanlink.net/tag/talis_rdf_json|broader_prefLabel|Talis +http://www.semanlink.net/tag/talis_rdf_json|broader_prefLabel|RDF-in-JSON +http://www.semanlink.net/tag/talis_rdf_json|broader_related|http://www.semanlink.net/tag/leigh_dodds +http://www.semanlink.net/tag/talis_rdf_json|broader_related|http://www.semanlink.net/tag/danny_ayers +http://www.semanlink.net/tag/talis_rdf_json|broader_related|http://www.semanlink.net/tag/paul_miller +http://www.semanlink.net/tag/talis_rdf_json|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/lynn_margulis|creationTime|2013-08-22T10:17:40Z +http://www.semanlink.net/tag/lynn_margulis|prefLabel|Lynn Margulis +http://www.semanlink.net/tag/lynn_margulis|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/lynn_margulis|broader|http://www.semanlink.net/tag/femme_celebre +http://www.semanlink.net/tag/lynn_margulis|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/lynn_margulis|related|http://www.semanlink.net/tag/genetique_et_evolution +http://www.semanlink.net/tag/lynn_margulis|creationDate|2013-08-22 +http://www.semanlink.net/tag/lynn_margulis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lynn_margulis|describedBy|https://fr.wikipedia.org/wiki/Lynn_Margulis +http://www.semanlink.net/tag/lynn_margulis|uri|http://www.semanlink.net/tag/lynn_margulis +http://www.semanlink.net/tag/lynn_margulis|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/lynn_margulis|broader_prefLabel|Femme célèbre (où qui mérite de l'être) +http://www.semanlink.net/tag/lynn_margulis|broader_prefLabel|Biology +http://www.semanlink.net/tag/lynn_margulis|broader_altLabel|Savant +http://www.semanlink.net/tag/lynn_margulis|broader_altLabel|Biologie +http://www.semanlink.net/tag/co_training|creationTime|2020-09-06T16:35:22Z +http://www.semanlink.net/tag/co_training|prefLabel|Co-training +http://www.semanlink.net/tag/co_training|broader|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/tag/co_training|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/co_training|related|http://www.semanlink.net/tag/mutual_learning +http://www.semanlink.net/tag/co_training|creationDate|2020-09-06 +http://www.semanlink.net/tag/co_training|comment|> Co-training (Blum and Mitchell, 1998) is a classic multi-view learning method for semi-supervised learning. In co-training, classifiers over different feature spaces are encouraged to agree in their predictions on a large pool of unlabeled examples. [src](doc:2020/09/leveraging_just_a_few_keywords_) +http://www.semanlink.net/tag/co_training|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/co_training|uri|http://www.semanlink.net/tag/co_training +http://www.semanlink.net/tag/co_training|broader_prefLabel|Semi-supervised learning +http://www.semanlink.net/tag/co_training|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/co_training|broader_related|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/tag/leo_sauermann|prefLabel|Leo Sauermann +http://www.semanlink.net/tag/leo_sauermann|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/leo_sauermann|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/leo_sauermann|broader|http://www.semanlink.net/tag/semantic_desktop +http://www.semanlink.net/tag/leo_sauermann|creationDate|2006-12-23 +http://www.semanlink.net/tag/leo_sauermann|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leo_sauermann|uri|http://www.semanlink.net/tag/leo_sauermann +http://www.semanlink.net/tag/leo_sauermann|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/leo_sauermann|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/leo_sauermann|broader_prefLabel|Semantic Desktop +http://www.semanlink.net/tag/leo_sauermann|broader_altLabel|Technical guys +http://www.semanlink.net/tag/leo_sauermann|broader_related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/learning_by_imitation|creationTime|2018-10-27T14:47:12Z +http://www.semanlink.net/tag/learning_by_imitation|prefLabel|Learning by imitation +http://www.semanlink.net/tag/learning_by_imitation|creationDate|2018-10-27 +http://www.semanlink.net/tag/learning_by_imitation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/learning_by_imitation|uri|http://www.semanlink.net/tag/learning_by_imitation +http://www.semanlink.net/tag/ranking_svm|creationTime|2017-06-27T12:49:04Z +http://www.semanlink.net/tag/ranking_svm|prefLabel|Ranking SVM +http://www.semanlink.net/tag/ranking_svm|broader|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/ranking_svm|broader|http://www.semanlink.net/tag/support_vector_machine +http://www.semanlink.net/tag/ranking_svm|creationDate|2017-06-27 +http://www.semanlink.net/tag/ranking_svm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ranking_svm|describedBy|https://en.wikipedia.org/wiki/Ranking_SVM +http://www.semanlink.net/tag/ranking_svm|uri|http://www.semanlink.net/tag/ranking_svm +http://www.semanlink.net/tag/ranking_svm|broader_prefLabel|Learning to rank +http://www.semanlink.net/tag/ranking_svm|broader_prefLabel|Support vector machine +http://www.semanlink.net/tag/ranking_svm|broader_altLabel|Machine learned ranking +http://www.semanlink.net/tag/ranking_svm|broader_altLabel|SVM +http://www.semanlink.net/tag/ranking_svm|broader_related|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/ranking_svm|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/ranking_svm|broader_related|http://www.semanlink.net/tag/pagerank +http://www.semanlink.net/tag/open_source|prefLabel|Open Source +http://www.semanlink.net/tag/open_source|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/open_source|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_source|uri|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/open_source|broader_prefLabel|Software +http://www.semanlink.net/tag/javascript|prefLabel|JavaScript +http://www.semanlink.net/tag/javascript|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/javascript|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/javascript|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript|altLabel|js +http://www.semanlink.net/tag/javascript|uri|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript|broader_prefLabel|Web dev +http://www.semanlink.net/tag/javascript|broader_prefLabel|Programming language +http://www.semanlink.net/tag/javascript|broader_prefLabel|Dev +http://www.semanlink.net/tag/javascript|broader_altLabel|Web app dev +http://www.semanlink.net/tag/javascript|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/text_editor|creationTime|2014-03-21T12:05:33Z +http://www.semanlink.net/tag/text_editor|prefLabel|Text Editor +http://www.semanlink.net/tag/text_editor|broader|http://www.semanlink.net/tag/text_tools +http://www.semanlink.net/tag/text_editor|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/text_editor|creationDate|2014-03-21 +http://www.semanlink.net/tag/text_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_editor|uri|http://www.semanlink.net/tag/text_editor +http://www.semanlink.net/tag/text_editor|broader_prefLabel|Text tools +http://www.semanlink.net/tag/text_editor|broader_prefLabel|Tools +http://www.semanlink.net/tag/aspect_target_sentiment_classification|creationTime|2021-10-21T15:39:42Z +http://www.semanlink.net/tag/aspect_target_sentiment_classification|prefLabel|Aspect-Target Sentiment Classification +http://www.semanlink.net/tag/aspect_target_sentiment_classification|broader|http://www.semanlink.net/tag/aspect_based_sentiment_analysis +http://www.semanlink.net/tag/aspect_target_sentiment_classification|creationDate|2021-10-21 +http://www.semanlink.net/tag/aspect_target_sentiment_classification|comment|classifying sentiment polarity into positive, negative, neutral with respect to an aspect-target. +http://www.semanlink.net/tag/aspect_target_sentiment_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aspect_target_sentiment_classification|uri|http://www.semanlink.net/tag/aspect_target_sentiment_classification +http://www.semanlink.net/tag/aspect_target_sentiment_classification|broader_prefLabel|Aspect-Based Sentiment Analysis +http://www.semanlink.net/tag/job_openings|creationTime|2012-05-15T09:51:16Z +http://www.semanlink.net/tag/job_openings|prefLabel|Job openings +http://www.semanlink.net/tag/job_openings|creationDate|2012-05-15 +http://www.semanlink.net/tag/job_openings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/job_openings|uri|http://www.semanlink.net/tag/job_openings +http://www.semanlink.net/tag/ws_vs_pox_http|prefLabel|WS-* vs. POX/HTTP +http://www.semanlink.net/tag/ws_vs_pox_http|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/ws_vs_pox_http|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ws_vs_pox_http|uri|http://www.semanlink.net/tag/ws_vs_pox_http +http://www.semanlink.net/tag/ws_vs_pox_http|broader_prefLabel|Web Services +http://www.semanlink.net/tag/ws_vs_pox_http|broader_altLabel|WS +http://www.semanlink.net/tag/femme_celebre|prefLabel|Femme célèbre (où qui mérite de l'être) +http://www.semanlink.net/tag/femme_celebre|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/femme_celebre|broader|http://www.semanlink.net/tag/femme +http://www.semanlink.net/tag/femme_celebre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/femme_celebre|uri|http://www.semanlink.net/tag/femme_celebre +http://www.semanlink.net/tag/femme_celebre|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/femme_celebre|broader_prefLabel|Femme +http://www.semanlink.net/tag/tony_blair|prefLabel|Tony Blair +http://www.semanlink.net/tag/tony_blair|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/tony_blair|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/tony_blair|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/tony_blair|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tony_blair|altLabel|Blair +http://www.semanlink.net/tag/tony_blair|uri|http://www.semanlink.net/tag/tony_blair +http://www.semanlink.net/tag/tony_blair|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/tony_blair|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/tony_blair|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/tony_blair|broader_altLabel|UK +http://www.semanlink.net/tag/coursera_deep_learning|creationTime|2017-08-16T12:51:04Z +http://www.semanlink.net/tag/coursera_deep_learning|prefLabel|Coursera: Deep Learning +http://www.semanlink.net/tag/coursera_deep_learning|broader|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/tag/coursera_deep_learning|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/coursera_deep_learning|broader|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/coursera_deep_learning|creationDate|2017-08-16 +http://www.semanlink.net/tag/coursera_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_deep_learning|uri|http://www.semanlink.net/tag/coursera_deep_learning +http://www.semanlink.net/tag/coursera_deep_learning|broader_prefLabel|Machine Learning Course +http://www.semanlink.net/tag/coursera_deep_learning|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/coursera_deep_learning|broader_prefLabel|Andrew Ng +http://www.semanlink.net/tag/coursera_deep_learning|broader_altLabel|Ng +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/coursera_machine_learning +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/tag/coursera_deep_learning|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/memory_networks|creationTime|2018-11-14T01:29:42Z +http://www.semanlink.net/tag/memory_networks|prefLabel|Memory networks +http://www.semanlink.net/tag/memory_networks|broader|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/tag/memory_networks|related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/memory_networks|related|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/memory_networks|creationDate|2018-11-14 +http://www.semanlink.net/tag/memory_networks|comment|"Neural networks with an explicit memory component that can be both read and written to by the network. + +Memory networks reason with inference components combined with a long-term memory component; they learn how to use these jointly. + +First presented in a [paper by Weston](https://arxiv.org/abs/1410.3916) (Bordes among the authors) in 2015" +http://www.semanlink.net/tag/memory_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_networks|uri|http://www.semanlink.net/tag/memory_networks +http://www.semanlink.net/tag/memory_networks|broader_prefLabel|Memory in deep learning +http://www.semanlink.net/tag/memory_networks|broader_related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/cnes|creationTime|2019-07-17T11:21:26Z +http://www.semanlink.net/tag/cnes|prefLabel|CNES +http://www.semanlink.net/tag/cnes|creationDate|2019-07-17 +http://www.semanlink.net/tag/cnes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cnes|uri|http://www.semanlink.net/tag/cnes +http://www.semanlink.net/tag/restful_semantic_web_services|creationTime|2007-07-04T23:12:20Z +http://www.semanlink.net/tag/restful_semantic_web_services|prefLabel|Restful semantic web services +http://www.semanlink.net/tag/restful_semantic_web_services|broader|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/restful_semantic_web_services|broader|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/tag/restful_semantic_web_services|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/restful_semantic_web_services|creationDate|2007-07-04 +http://www.semanlink.net/tag/restful_semantic_web_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/restful_semantic_web_services|uri|http://www.semanlink.net/tag/restful_semantic_web_services +http://www.semanlink.net/tag/restful_semantic_web_services|broader_prefLabel|Semantic Web Services +http://www.semanlink.net/tag/restful_semantic_web_services|broader_prefLabel|RESTful Web Services +http://www.semanlink.net/tag/restful_semantic_web_services|broader_prefLabel|REST +http://www.semanlink.net/tag/javascript_frameork|creationTime|2015-09-04T22:38:35Z +http://www.semanlink.net/tag/javascript_frameork|prefLabel|Javascript framework +http://www.semanlink.net/tag/javascript_frameork|broader|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/javascript_frameork|creationDate|2015-09-04 +http://www.semanlink.net/tag/javascript_frameork|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_frameork|uri|http://www.semanlink.net/tag/javascript_frameork +http://www.semanlink.net/tag/javascript_frameork|broader_prefLabel|Frameworks +http://www.semanlink.net/tag/altavista|creationTime|2007-08-23T23:50:35Z +http://www.semanlink.net/tag/altavista|prefLabel|AltaVista +http://www.semanlink.net/tag/altavista|creationDate|2007-08-23 +http://www.semanlink.net/tag/altavista|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/altavista|uri|http://www.semanlink.net/tag/altavista +http://www.semanlink.net/tag/fps_dev|prefLabel|fps dev +http://www.semanlink.net/tag/fps_dev|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/fps_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_dev|uri|http://www.semanlink.net/tag/fps_dev +http://www.semanlink.net/tag/fps_dev|broader_prefLabel|fps +http://www.semanlink.net/tag/fps_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/dictionnaire|prefLabel|Dictionnaire +http://www.semanlink.net/tag/dictionnaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dictionnaire|altLabel|Dictionnary +http://www.semanlink.net/tag/dictionnaire|uri|http://www.semanlink.net/tag/dictionnaire +http://www.semanlink.net/tag/ml_google|creationTime|2018-08-21T17:29:10Z +http://www.semanlink.net/tag/ml_google|prefLabel|AI@Google +http://www.semanlink.net/tag/ml_google|broader|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/ml_google|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ml_google|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/ml_google|creationDate|2018-08-21 +http://www.semanlink.net/tag/ml_google|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_google|uri|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/ml_google|broader_prefLabel|AI teams +http://www.semanlink.net/tag/ml_google|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ml_google|broader_prefLabel|Google +http://www.semanlink.net/tag/ml_google|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ml_google|broader_altLabel|AI +http://www.semanlink.net/tag/ml_google|broader_altLabel|IA +http://www.semanlink.net/tag/ml_google|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/ml_google|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/deforestation|prefLabel|Déforestation +http://www.semanlink.net/tag/deforestation|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/deforestation|broader|http://www.semanlink.net/tag/foret +http://www.semanlink.net/tag/deforestation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deforestation|uri|http://www.semanlink.net/tag/deforestation +http://www.semanlink.net/tag/deforestation|broader_prefLabel|Écologie +http://www.semanlink.net/tag/deforestation|broader_prefLabel|Forêt +http://www.semanlink.net/tag/reparation_automobile|prefLabel|Réparation automobile +http://www.semanlink.net/tag/reparation_automobile|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/reparation_automobile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reparation_automobile|uri|http://www.semanlink.net/tag/reparation_automobile +http://www.semanlink.net/tag/reparation_automobile|broader_prefLabel|Automobile +http://www.semanlink.net/tag/reparation_automobile|broader_altLabel|Automotive +http://www.semanlink.net/tag/cookie|creationTime|2008-01-15T23:24:13Z +http://www.semanlink.net/tag/cookie|prefLabel|Cookies +http://www.semanlink.net/tag/cookie|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/cookie|creationDate|2008-01-15 +http://www.semanlink.net/tag/cookie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cookie|altLabel|Cookie +http://www.semanlink.net/tag/cookie|uri|http://www.semanlink.net/tag/cookie +http://www.semanlink.net/tag/cookie|broader_prefLabel|Web dev +http://www.semanlink.net/tag/cookie|broader_altLabel|Web app dev +http://www.semanlink.net/tag/martin_hepp|creationTime|2011-06-30T22:39:58Z +http://www.semanlink.net/tag/martin_hepp|prefLabel|Martin Hepp +http://www.semanlink.net/tag/martin_hepp|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/martin_hepp|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/martin_hepp|creationDate|2011-06-30 +http://www.semanlink.net/tag/martin_hepp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/martin_hepp|uri|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/martin_hepp|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/martin_hepp|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/martin_hepp|broader_altLabel|Technical guys +http://www.semanlink.net/tag/the_limits_to_growth|creationTime|2013-01-04T13:48:00Z +http://www.semanlink.net/tag/the_limits_to_growth|prefLabel|The Limits to Growth +http://www.semanlink.net/tag/the_limits_to_growth|broader|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/the_limits_to_growth|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/the_limits_to_growth|broader|http://www.semanlink.net/tag/croissance +http://www.semanlink.net/tag/the_limits_to_growth|creationDate|2013-01-04 +http://www.semanlink.net/tag/the_limits_to_growth|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_limits_to_growth|describedBy|https://en.wikipedia.org/wiki/The_Limits_to_Growth +http://www.semanlink.net/tag/the_limits_to_growth|altLabel|Rapport Meadows +http://www.semanlink.net/tag/the_limits_to_growth|uri|http://www.semanlink.net/tag/the_limits_to_growth +http://www.semanlink.net/tag/the_limits_to_growth|broader_prefLabel|Crise écologique +http://www.semanlink.net/tag/the_limits_to_growth|broader_prefLabel|Economie +http://www.semanlink.net/tag/the_limits_to_growth|broader_prefLabel|Croissance +http://www.semanlink.net/tag/the_limits_to_growth|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/topbraid|creationTime|2010-08-27T18:27:16Z +http://www.semanlink.net/tag/topbraid|prefLabel|TopBraid +http://www.semanlink.net/tag/topbraid|broader|http://www.semanlink.net/tag/topquadrant +http://www.semanlink.net/tag/topbraid|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/topbraid|creationDate|2010-08-27 +http://www.semanlink.net/tag/topbraid|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topbraid|uri|http://www.semanlink.net/tag/topbraid +http://www.semanlink.net/tag/topbraid|broader_prefLabel|TopQuadrant +http://www.semanlink.net/tag/topbraid|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/topbraid|broader_altLabel|www.topquadrant.com +http://www.semanlink.net/tag/one_laptop_per_child|creationTime|2007-09-10T19:53:38Z +http://www.semanlink.net/tag/one_laptop_per_child|prefLabel|One Laptop Per Child +http://www.semanlink.net/tag/one_laptop_per_child|creationDate|2007-09-10 +http://www.semanlink.net/tag/one_laptop_per_child|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/one_laptop_per_child|describedBy|https://en.wikipedia.org/wiki/One_Laptop_per_Child#Criticism +http://www.semanlink.net/tag/one_laptop_per_child|uri|http://www.semanlink.net/tag/one_laptop_per_child +http://www.semanlink.net/tag/javascript_rdf|creationTime|2011-08-28T22:55:04Z +http://www.semanlink.net/tag/javascript_rdf|prefLabel|Javascript RDF +http://www.semanlink.net/tag/javascript_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/javascript_rdf|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_rdf|related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/javascript_rdf|creationDate|2011-08-28 +http://www.semanlink.net/tag/javascript_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_rdf|uri|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/javascript_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/javascript_rdf|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_rdf|broader_altLabel|js +http://www.semanlink.net/tag/javascript_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/javascript_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/javascript_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/javascript_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/javascript_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/erudition|creationTime|2013-12-16T15:35:09Z +http://www.semanlink.net/tag/erudition|prefLabel|Erudition +http://www.semanlink.net/tag/erudition|creationDate|2013-12-16 +http://www.semanlink.net/tag/erudition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erudition|uri|http://www.semanlink.net/tag/erudition +http://www.semanlink.net/tag/guillaume_lample|creationTime|2018-09-27T11:29:39Z +http://www.semanlink.net/tag/guillaume_lample|prefLabel|Guillaume Lample +http://www.semanlink.net/tag/guillaume_lample|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/guillaume_lample|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/guillaume_lample|creationDate|2018-09-27 +http://www.semanlink.net/tag/guillaume_lample|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guillaume_lample|uri|http://www.semanlink.net/tag/guillaume_lample +http://www.semanlink.net/tag/guillaume_lample|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/guillaume_lample|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|creationTime|2021-01-23T13:28:46Z +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|prefLabel|Not Encoding Factual Knowledge in Language Model +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|broader|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|related|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|related|http://www.semanlink.net/tag/how_much_information_in_a_language +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|creationDate|2021-01-23 +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|comment|Louis XIV died in 1715. This doesn't need to be encoded in the parameters of the Language Model. +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|uri|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|broader_prefLabel|Language Models + Knowledge +http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model|broader_related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/norman_walsh|prefLabel|Norman Walsh +http://www.semanlink.net/tag/norman_walsh|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/norman_walsh|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/norman_walsh|uri|http://www.semanlink.net/tag/norman_walsh +http://www.semanlink.net/tag/norman_walsh|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/norman_walsh|broader_altLabel|Technical guys +http://www.semanlink.net/tag/dataportability|creationTime|2008-05-08T03:05:58Z +http://www.semanlink.net/tag/dataportability|prefLabel|DataPortability +http://www.semanlink.net/tag/dataportability|creationDate|2008-05-08 +http://www.semanlink.net/tag/dataportability|comment|DataPortability.org - Share and remix data using open standards +http://www.semanlink.net/tag/dataportability|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dataportability|homepage|http://dataportability.org/ +http://www.semanlink.net/tag/dataportability|uri|http://www.semanlink.net/tag/dataportability +http://www.semanlink.net/tag/graphdb|creationTime|2019-02-02T12:04:21Z +http://www.semanlink.net/tag/graphdb|prefLabel|GraphDB +http://www.semanlink.net/tag/graphdb|broader|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/tag/graphdb|creationDate|2019-02-02 +http://www.semanlink.net/tag/graphdb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graphdb|uri|http://www.semanlink.net/tag/graphdb +http://www.semanlink.net/tag/graphdb|broader_prefLabel|Graph database +http://www.semanlink.net/tag/thought_alone_controlled_device|prefLabel|Thought alone controlled device +http://www.semanlink.net/tag/thought_alone_controlled_device|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/thought_alone_controlled_device|broader|http://www.semanlink.net/tag/pensee +http://www.semanlink.net/tag/thought_alone_controlled_device|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thought_alone_controlled_device|uri|http://www.semanlink.net/tag/thought_alone_controlled_device +http://www.semanlink.net/tag/thought_alone_controlled_device|broader_prefLabel|Robotique +http://www.semanlink.net/tag/thought_alone_controlled_device|broader_prefLabel|Pensée +http://www.semanlink.net/tag/thought_alone_controlled_device|broader_altLabel|Robotics +http://www.semanlink.net/tag/thought_alone_controlled_device|broader_altLabel|Robot +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|creationTime|2021-05-28T14:34:46Z +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|prefLabel|Bactria–Margiana Archaeological Complex +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|related|http://www.semanlink.net/tag/civilisation_de_l_indus +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|creationDate|2021-05-28 +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|describedBy|https://en.wikipedia.org/wiki/Bactria%E2%80%93Margiana_Archaeological_Complex +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|uri|http://www.semanlink.net/tag/bactria_margiana_archaeological_complex +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader_prefLabel|Asie centrale +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader_prefLabel|Âge du bronze +http://www.semanlink.net/tag/bactria_margiana_archaeological_complex|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/james_hendler|creationTime|2013-07-31T09:54:33Z +http://www.semanlink.net/tag/james_hendler|prefLabel|James Hendler +http://www.semanlink.net/tag/james_hendler|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/james_hendler|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/james_hendler|creationDate|2013-07-31 +http://www.semanlink.net/tag/james_hendler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/james_hendler|uri|http://www.semanlink.net/tag/james_hendler +http://www.semanlink.net/tag/james_hendler|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/james_hendler|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/james_hendler|broader_altLabel|Technical guys +http://www.semanlink.net/tag/robert|prefLabel|Robert +http://www.semanlink.net/tag/robert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robert|uri|http://www.semanlink.net/tag/robert +http://www.semanlink.net/tag/stochastic_parrots|creationTime|2021-01-23T16:49:13Z +http://www.semanlink.net/tag/stochastic_parrots|prefLabel|Stochastic Parrots +http://www.semanlink.net/tag/stochastic_parrots|creationDate|2021-01-23 +http://www.semanlink.net/tag/stochastic_parrots|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stochastic_parrots|uri|http://www.semanlink.net/tag/stochastic_parrots +http://www.semanlink.net/tag/film_britannique|creationTime|2021-03-17T22:32:24Z +http://www.semanlink.net/tag/film_britannique|prefLabel|Film britannique +http://www.semanlink.net/tag/film_britannique|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_britannique|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/film_britannique|creationDate|2021-03-17 +http://www.semanlink.net/tag/film_britannique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_britannique|uri|http://www.semanlink.net/tag/film_britannique +http://www.semanlink.net/tag/film_britannique|broader_prefLabel|Film +http://www.semanlink.net/tag/film_britannique|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/film_britannique|broader_altLabel|UK +http://www.semanlink.net/tag/sw_is_flawed|creationTime|2015-04-29T00:32:49Z +http://www.semanlink.net/tag/sw_is_flawed|prefLabel|SW is flawed +http://www.semanlink.net/tag/sw_is_flawed|creationDate|2015-04-29 +http://www.semanlink.net/tag/sw_is_flawed|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_is_flawed|uri|http://www.semanlink.net/tag/sw_is_flawed +http://www.semanlink.net/tag/google_knowledge_graph|creationTime|2012-05-19T22:33:10Z +http://www.semanlink.net/tag/google_knowledge_graph|prefLabel|Google Knowledge Graph +http://www.semanlink.net/tag/google_knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/google_knowledge_graph|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/google_knowledge_graph|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_knowledge_graph|creationDate|2012-05-19 +http://www.semanlink.net/tag/google_knowledge_graph|comment|"""Things, not strings"" +" +http://www.semanlink.net/tag/google_knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_knowledge_graph|uri|http://www.semanlink.net/tag/google_knowledge_graph +http://www.semanlink.net/tag/google_knowledge_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/google_knowledge_graph|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/google_knowledge_graph|broader_prefLabel|Google +http://www.semanlink.net/tag/google_knowledge_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/google_knowledge_graph|broader_altLabel|KG +http://www.semanlink.net/tag/google_knowledge_graph|broader_altLabel|sw +http://www.semanlink.net/tag/google_knowledge_graph|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/google_knowledge_graph|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/library_code|creationTime|2020-07-10T09:36:59Z +http://www.semanlink.net/tag/library_code|prefLabel|Library (code) +http://www.semanlink.net/tag/library_code|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/library_code|related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/library_code|creationDate|2020-07-10 +http://www.semanlink.net/tag/library_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/library_code|uri|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/library_code|broader_prefLabel|Programming +http://www.semanlink.net/tag/liberte_egalite_fraternite|creationTime|2007-07-14T01:11:23Z +http://www.semanlink.net/tag/liberte_egalite_fraternite|prefLabel|Liberté, égalité, fraternité +http://www.semanlink.net/tag/liberte_egalite_fraternite|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberte_egalite_fraternite|broader|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/liberte_egalite_fraternite|creationDate|2007-07-14 +http://www.semanlink.net/tag/liberte_egalite_fraternite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte_egalite_fraternite|uri|http://www.semanlink.net/tag/liberte_egalite_fraternite +http://www.semanlink.net/tag/liberte_egalite_fraternite|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberte_egalite_fraternite|broader_prefLabel|Révolution française +http://www.semanlink.net/tag/liberte_egalite_fraternite|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/mur_de_berlin|prefLabel|Mur de Berlin +http://www.semanlink.net/tag/mur_de_berlin|broader|http://www.semanlink.net/tag/communisme +http://www.semanlink.net/tag/mur_de_berlin|broader|http://www.semanlink.net/tag/rda +http://www.semanlink.net/tag/mur_de_berlin|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/mur_de_berlin|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/mur_de_berlin|broader|http://www.semanlink.net/tag/berlin +http://www.semanlink.net/tag/mur_de_berlin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mur_de_berlin|uri|http://www.semanlink.net/tag/mur_de_berlin +http://www.semanlink.net/tag/mur_de_berlin|broader_prefLabel|Communisme +http://www.semanlink.net/tag/mur_de_berlin|broader_prefLabel|RDA +http://www.semanlink.net/tag/mur_de_berlin|broader_prefLabel|Histoire +http://www.semanlink.net/tag/mur_de_berlin|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/mur_de_berlin|broader_prefLabel|Berlin +http://www.semanlink.net/tag/mur_de_berlin|broader_altLabel|Germany +http://www.semanlink.net/tag/mur_de_berlin|broader_altLabel|Deutschland +http://www.semanlink.net/tag/christopher_olah|creationTime|2015-10-16T16:36:38Z +http://www.semanlink.net/tag/christopher_olah|prefLabel|Chris Olah +http://www.semanlink.net/tag/christopher_olah|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/christopher_olah|creationDate|2015-10-16 +http://www.semanlink.net/tag/christopher_olah|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/christopher_olah|altLabel|Christopher Olah +http://www.semanlink.net/tag/christopher_olah|uri|http://www.semanlink.net/tag/christopher_olah +http://www.semanlink.net/tag/christopher_olah|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/robotisation|creationTime|2015-11-08T11:51:32Z +http://www.semanlink.net/tag/robotisation|prefLabel|Robotisation +http://www.semanlink.net/tag/robotisation|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/robotisation|creationDate|2015-11-08 +http://www.semanlink.net/tag/robotisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robotisation|uri|http://www.semanlink.net/tag/robotisation +http://www.semanlink.net/tag/robotisation|broader_prefLabel|Robotique +http://www.semanlink.net/tag/robotisation|broader_altLabel|Robotics +http://www.semanlink.net/tag/robotisation|broader_altLabel|Robot +http://www.semanlink.net/tag/lutte_anti_terroriste|prefLabel|Lutte anti-terroriste +http://www.semanlink.net/tag/lutte_anti_terroriste|broader|http://www.semanlink.net/tag/terrorisme +http://www.semanlink.net/tag/lutte_anti_terroriste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lutte_anti_terroriste|uri|http://www.semanlink.net/tag/lutte_anti_terroriste +http://www.semanlink.net/tag/lutte_anti_terroriste|broader_prefLabel|Terrorisme +http://www.semanlink.net/tag/regex|creationTime|2010-01-27T00:17:04Z +http://www.semanlink.net/tag/regex|prefLabel|Regex +http://www.semanlink.net/tag/regex|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/regex|creationDate|2010-01-27 +http://www.semanlink.net/tag/regex|comment|Test regex [here](https://www.regexpal.com/) +http://www.semanlink.net/tag/regex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/regex|altLabel|Regular Expression +http://www.semanlink.net/tag/regex|uri|http://www.semanlink.net/tag/regex +http://www.semanlink.net/tag/regex|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/jean_claude_juncker|creationTime|2007-09-18T22:27:19Z +http://www.semanlink.net/tag/jean_claude_juncker|prefLabel|Jean-Claude Juncker +http://www.semanlink.net/tag/jean_claude_juncker|broader|http://www.semanlink.net/tag/eurogroupe +http://www.semanlink.net/tag/jean_claude_juncker|creationDate|2007-09-18 +http://www.semanlink.net/tag/jean_claude_juncker|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_claude_juncker|uri|http://www.semanlink.net/tag/jean_claude_juncker +http://www.semanlink.net/tag/jean_claude_juncker|broader_prefLabel|Eurogroupe +http://www.semanlink.net/tag/java_microframeworks|creationTime|2017-05-16T02:07:12Z +http://www.semanlink.net/tag/java_microframeworks|prefLabel|Java microframeworks +http://www.semanlink.net/tag/java_microframeworks|broader|http://www.semanlink.net/tag/java_web_dev +http://www.semanlink.net/tag/java_microframeworks|creationDate|2017-05-16 +http://www.semanlink.net/tag/java_microframeworks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_microframeworks|uri|http://www.semanlink.net/tag/java_microframeworks +http://www.semanlink.net/tag/java_microframeworks|broader_prefLabel|Java web dev +http://www.semanlink.net/tag/musees_africains|creationTime|2010-05-17T12:31:54Z +http://www.semanlink.net/tag/musees_africains|prefLabel|Musées africains +http://www.semanlink.net/tag/musees_africains|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/musees_africains|creationDate|2010-05-17 +http://www.semanlink.net/tag/musees_africains|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musees_africains|uri|http://www.semanlink.net/tag/musees_africains +http://www.semanlink.net/tag/musees_africains|broader_prefLabel|Musée +http://www.semanlink.net/tag/chanson|creationTime|2007-02-22T23:20:47Z +http://www.semanlink.net/tag/chanson|prefLabel|Chanson +http://www.semanlink.net/tag/chanson|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/chanson|creationDate|2007-02-22 +http://www.semanlink.net/tag/chanson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chanson|uri|http://www.semanlink.net/tag/chanson +http://www.semanlink.net/tag/chanson|broader_prefLabel|Musique +http://www.semanlink.net/tag/chanson|broader_altLabel|Music +http://www.semanlink.net/tag/tasmanian_devil|creationTime|2008-04-13T13:05:49Z +http://www.semanlink.net/tag/tasmanian_devil|prefLabel|Tasmanian devil +http://www.semanlink.net/tag/tasmanian_devil|broader|http://www.semanlink.net/tag/cancer +http://www.semanlink.net/tag/tasmanian_devil|broader|http://www.semanlink.net/tag/tasmanie +http://www.semanlink.net/tag/tasmanian_devil|broader|http://www.semanlink.net/tag/especes_menacees +http://www.semanlink.net/tag/tasmanian_devil|creationDate|2008-04-13 +http://www.semanlink.net/tag/tasmanian_devil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tasmanian_devil|altLabel|Diable de Tasmanie +http://www.semanlink.net/tag/tasmanian_devil|uri|http://www.semanlink.net/tag/tasmanian_devil +http://www.semanlink.net/tag/tasmanian_devil|broader_prefLabel|Cancer +http://www.semanlink.net/tag/tasmanian_devil|broader_prefLabel|Tasmanie +http://www.semanlink.net/tag/tasmanian_devil|broader_prefLabel|Espèces menacées +http://www.semanlink.net/tag/tasmanian_devil|broader_altLabel|Endangered Species +http://www.semanlink.net/tag/tasmanian_devil|broader_related|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/brexit|creationTime|2016-06-26T01:11:07Z +http://www.semanlink.net/tag/brexit|prefLabel|Brexit +http://www.semanlink.net/tag/brexit|broader|http://www.semanlink.net/tag/europe_and_uk +http://www.semanlink.net/tag/brexit|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/brexit|creationDate|2016-06-26 +http://www.semanlink.net/tag/brexit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brexit|uri|http://www.semanlink.net/tag/brexit +http://www.semanlink.net/tag/brexit|broader_prefLabel|Europe and UK +http://www.semanlink.net/tag/brexit|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/brexit|broader_altLabel|UK +http://www.semanlink.net/tag/dark_side_of_tech|creationTime|2018-03-29T19:38:45Z +http://www.semanlink.net/tag/dark_side_of_tech|prefLabel|Dark side of Tech +http://www.semanlink.net/tag/dark_side_of_tech|creationDate|2018-03-29 +http://www.semanlink.net/tag/dark_side_of_tech|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dark_side_of_tech|uri|http://www.semanlink.net/tag/dark_side_of_tech +http://www.semanlink.net/tag/voaf|creationTime|2011-01-25T14:53:37Z +http://www.semanlink.net/tag/voaf|prefLabel|VOAF +http://www.semanlink.net/tag/voaf|broader|http://www.semanlink.net/tag/bernard_vatant +http://www.semanlink.net/tag/voaf|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/voaf|creationDate|2011-01-25 +http://www.semanlink.net/tag/voaf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voaf|uri|http://www.semanlink.net/tag/voaf +http://www.semanlink.net/tag/voaf|broader_prefLabel|Bernard Vatant +http://www.semanlink.net/tag/voaf|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/voaf|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/content_negotiation|prefLabel|Content negotiation +http://www.semanlink.net/tag/content_negotiation|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/content_negotiation|related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/content_negotiation|creationDate|2007-01-02 +http://www.semanlink.net/tag/content_negotiation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/content_negotiation|uri|http://www.semanlink.net/tag/content_negotiation +http://www.semanlink.net/tag/content_negotiation|broader_prefLabel|HTTP +http://www.semanlink.net/tag/java_7|creationTime|2010-04-28T14:13:54Z +http://www.semanlink.net/tag/java_7|prefLabel|Java 7 +http://www.semanlink.net/tag/java_7|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_7|creationDate|2010-04-28 +http://www.semanlink.net/tag/java_7|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_7|uri|http://www.semanlink.net/tag/java_7 +http://www.semanlink.net/tag/java_7|broader_prefLabel|Java +http://www.semanlink.net/tag/neural_models_for_information_retrieval|creationTime|2019-08-18T23:00:46Z +http://www.semanlink.net/tag/neural_models_for_information_retrieval|prefLabel|Neural Search +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader|http://www.semanlink.net/tag/nlp_based_ir +http://www.semanlink.net/tag/neural_models_for_information_retrieval|creationDate|2019-08-18 +http://www.semanlink.net/tag/neural_models_for_information_retrieval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_models_for_information_retrieval|altLabel|Neural IR models +http://www.semanlink.net/tag/neural_models_for_information_retrieval|altLabel|Neural retrieval +http://www.semanlink.net/tag/neural_models_for_information_retrieval|altLabel|Neural Models for Information Retrieval +http://www.semanlink.net/tag/neural_models_for_information_retrieval|uri|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_prefLabel|NLP based IR +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|IR +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|ANN +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|NN +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|AI & IR +http://www.semanlink.net/tag/neural_models_for_information_retrieval|broader_altLabel|NLP based Information Retrieval +http://www.semanlink.net/tag/conceptual_modeling|prefLabel|Conceptual modeling +http://www.semanlink.net/tag/conceptual_modeling|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/conceptual_modeling|creationDate|2006-12-01 +http://www.semanlink.net/tag/conceptual_modeling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conceptual_modeling|uri|http://www.semanlink.net/tag/conceptual_modeling +http://www.semanlink.net/tag/conceptual_modeling|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/conceptual_modeling|broader_altLabel|KR +http://www.semanlink.net/tag/mutualisme|creationTime|2013-11-21T02:17:34Z +http://www.semanlink.net/tag/mutualisme|prefLabel|Mutualisme +http://www.semanlink.net/tag/mutualisme|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/mutualisme|creationDate|2013-11-21 +http://www.semanlink.net/tag/mutualisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mutualisme|uri|http://www.semanlink.net/tag/mutualisme +http://www.semanlink.net/tag/mutualisme|broader_prefLabel|Biology +http://www.semanlink.net/tag/mutualisme|broader_altLabel|Biologie +http://www.semanlink.net/tag/denny_vrandecic|creationTime|2013-07-11T13:20:30Z +http://www.semanlink.net/tag/denny_vrandecic|prefLabel|Denny Vrandečić +http://www.semanlink.net/tag/denny_vrandecic|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/denny_vrandecic|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/denny_vrandecic|related|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/denny_vrandecic|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/denny_vrandecic|creationDate|2013-07-11 +http://www.semanlink.net/tag/denny_vrandecic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/denny_vrandecic|uri|http://www.semanlink.net/tag/denny_vrandecic +http://www.semanlink.net/tag/denny_vrandecic|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/apprendre_a_apprendre|creationTime|2014-02-27T00:25:44Z +http://www.semanlink.net/tag/apprendre_a_apprendre|prefLabel|Apprendre à apprendre +http://www.semanlink.net/tag/apprendre_a_apprendre|creationDate|2014-02-27 +http://www.semanlink.net/tag/apprendre_a_apprendre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apprendre_a_apprendre|uri|http://www.semanlink.net/tag/apprendre_a_apprendre +http://www.semanlink.net/tag/rechauffement_climatique|prefLabel|Climate crisis +http://www.semanlink.net/tag/rechauffement_climatique|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/rechauffement_climatique|broader|http://www.semanlink.net/tag/changement_climatique +http://www.semanlink.net/tag/rechauffement_climatique|related|http://www.semanlink.net/tag/anthropocene +http://www.semanlink.net/tag/rechauffement_climatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rechauffement_climatique|altLabel|Réchauffement climatique +http://www.semanlink.net/tag/rechauffement_climatique|altLabel|Global warming +http://www.semanlink.net/tag/rechauffement_climatique|uri|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/tag/rechauffement_climatique|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/rechauffement_climatique|broader_prefLabel|Changement climatique +http://www.semanlink.net/tag/katie_portwin|creationTime|2007-05-17T23:36:17Z +http://www.semanlink.net/tag/katie_portwin|prefLabel|Katie Portwin +http://www.semanlink.net/tag/katie_portwin|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/katie_portwin|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/katie_portwin|related|http://www.semanlink.net/tag/jena_user_conference +http://www.semanlink.net/tag/katie_portwin|creationDate|2007-05-17 +http://www.semanlink.net/tag/katie_portwin|comment|I saw her twice (Jena User Conference 2006 and XTech 2007), and twice she gave very good talks. +http://www.semanlink.net/tag/katie_portwin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/katie_portwin|uri|http://www.semanlink.net/tag/katie_portwin +http://www.semanlink.net/tag/katie_portwin|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/katie_portwin|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/katie_portwin|broader_altLabel|Technical guys +http://www.semanlink.net/tag/sent2vec|creationTime|2019-03-25T15:35:19Z +http://www.semanlink.net/tag/sent2vec|prefLabel|Sent2Vec +http://www.semanlink.net/tag/sent2vec|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/sent2vec|creationDate|2019-03-25 +http://www.semanlink.net/tag/sent2vec|comment|"Unsupervised method to learn sentence representations. + +> Conceptually, +the model can be interpreted as a natural +extension of the word-contexts from C-BOW +to a larger sentence context, +with the sentence words being specifically +optimized towards additive combination over the +sentence, by means of the unsupervised objective +function +" +http://www.semanlink.net/tag/sent2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sent2vec|uri|http://www.semanlink.net/tag/sent2vec +http://www.semanlink.net/tag/sent2vec|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/unit_test|creationTime|2011-06-29T18:08:53Z +http://www.semanlink.net/tag/unit_test|prefLabel|Unit test +http://www.semanlink.net/tag/unit_test|broader|http://www.semanlink.net/tag/tests +http://www.semanlink.net/tag/unit_test|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/unit_test|creationDate|2011-06-29 +http://www.semanlink.net/tag/unit_test|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unit_test|uri|http://www.semanlink.net/tag/unit_test +http://www.semanlink.net/tag/unit_test|broader_prefLabel|Tests +http://www.semanlink.net/tag/unit_test|broader_prefLabel|Dev +http://www.semanlink.net/tag/unit_test|broader_altLabel|Test +http://www.semanlink.net/tag/thewebconf_2020|creationTime|2020-04-25T10:12:13Z +http://www.semanlink.net/tag/thewebconf_2020|prefLabel|TheWebConf 2020 +http://www.semanlink.net/tag/thewebconf_2020|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/thewebconf_2020|creationDate|2020-04-25 +http://www.semanlink.net/tag/thewebconf_2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thewebconf_2020|uri|http://www.semanlink.net/tag/thewebconf_2020 +http://www.semanlink.net/tag/thewebconf_2020|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/thewebconf_2020|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/tomcat_in_eclipse|creationTime|2007-10-22T21:12:09Z +http://www.semanlink.net/tag/tomcat_in_eclipse|prefLabel|Tomcat in Eclipse +http://www.semanlink.net/tag/tomcat_in_eclipse|creationDate|2007-10-22 +http://www.semanlink.net/tag/tomcat_in_eclipse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tomcat_in_eclipse|uri|http://www.semanlink.net/tag/tomcat_in_eclipse +http://www.semanlink.net/tag/astronomie|prefLabel|Astronomie +http://www.semanlink.net/tag/astronomie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/astronomie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/astronomie|uri|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/astronomie|broader_prefLabel|Science +http://www.semanlink.net/tag/astronomie|broader_altLabel|sciences +http://www.semanlink.net/tag/hal|creationTime|2017-05-15T10:56:21Z +http://www.semanlink.net/tag/hal|prefLabel|HAL +http://www.semanlink.net/tag/hal|broader|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/tag/hal|creationDate|2017-05-15 +http://www.semanlink.net/tag/hal|comment| Hypertext Application Language +http://www.semanlink.net/tag/hal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hal|describedBy|https://en.wikipedia.org/wiki/Hypertext_Application_Language +http://www.semanlink.net/tag/hal|uri|http://www.semanlink.net/tag/hal +http://www.semanlink.net/tag/hal|broader_prefLabel|RESTful Web Services +http://www.semanlink.net/tag/accelerated_mobile_pages|creationTime|2018-08-05T15:54:51Z +http://www.semanlink.net/tag/accelerated_mobile_pages|prefLabel|Accelerated Mobile Pages (AMP) +http://www.semanlink.net/tag/accelerated_mobile_pages|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/accelerated_mobile_pages|creationDate|2018-08-05 +http://www.semanlink.net/tag/accelerated_mobile_pages|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/accelerated_mobile_pages|describedBy|https://en.wikipedia.org/wiki/Accelerated_Mobile_Pages +http://www.semanlink.net/tag/accelerated_mobile_pages|uri|http://www.semanlink.net/tag/accelerated_mobile_pages +http://www.semanlink.net/tag/accelerated_mobile_pages|broader_prefLabel|Google +http://www.semanlink.net/tag/accelerated_mobile_pages|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/espagne|prefLabel|Espagne +http://www.semanlink.net/tag/espagne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/espagne|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/espagne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/espagne|uri|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/espagne|broader_prefLabel|Europe +http://www.semanlink.net/tag/espagne|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/reboisement|creationTime|2020-07-02T15:19:49Z +http://www.semanlink.net/tag/reboisement|prefLabel|Reboisement +http://www.semanlink.net/tag/reboisement|broader|http://www.semanlink.net/tag/plantation_d_arbres +http://www.semanlink.net/tag/reboisement|creationDate|2020-07-02 +http://www.semanlink.net/tag/reboisement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reboisement|uri|http://www.semanlink.net/tag/reboisement +http://www.semanlink.net/tag/reboisement|broader_prefLabel|Plantation d'arbres +http://www.semanlink.net/tag/gravitation|prefLabel|Gravitation +http://www.semanlink.net/tag/gravitation|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/gravitation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gravitation|altLabel|Gravity +http://www.semanlink.net/tag/gravitation|uri|http://www.semanlink.net/tag/gravitation +http://www.semanlink.net/tag/gravitation|broader_prefLabel|Physique +http://www.semanlink.net/tag/gravitation|broader_altLabel|Physics +http://www.semanlink.net/tag/ours|prefLabel|Ours +http://www.semanlink.net/tag/ours|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/ours|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ours|uri|http://www.semanlink.net/tag/ours +http://www.semanlink.net/tag/ours|broader_prefLabel|Animal +http://www.semanlink.net/tag/dosso|prefLabel|Dosso +http://www.semanlink.net/tag/dosso|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/dosso|broader|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/tag/dosso|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dosso|sameAs|http://dbpedia.org/resource/Dosso%2C_Niger +http://www.semanlink.net/tag/dosso|uri|http://www.semanlink.net/tag/dosso +http://www.semanlink.net/tag/dosso|broader_prefLabel|Niger +http://www.semanlink.net/tag/dosso|broader_prefLabel|Jerma +http://www.semanlink.net/tag/dosso|broader_altLabel|Djerma +http://www.semanlink.net/tag/dosso|broader_altLabel|Zarma +http://www.semanlink.net/tag/dosso|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/dosso|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/dosso|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/parlement_europeen|prefLabel|Parlement européen +http://www.semanlink.net/tag/parlement_europeen|broader|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/parlement_europeen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parlement_europeen|uri|http://www.semanlink.net/tag/parlement_europeen +http://www.semanlink.net/tag/parlement_europeen|broader_prefLabel|Union européenne +http://www.semanlink.net/tag/parlement_europeen|broader_altLabel|UE +http://www.semanlink.net/tag/product_types_ontology|creationTime|2011-04-05T13:43:26Z +http://www.semanlink.net/tag/product_types_ontology|prefLabel|Product Types Ontology +http://www.semanlink.net/tag/product_types_ontology|broader|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/product_types_ontology|creationDate|2011-04-05 +http://www.semanlink.net/tag/product_types_ontology|comment|"Good identifiers for product types based on Wikipedia
+GoodRelations-compatible OWL DL class definitions for ca. 300,000 types of product or services that have an entry in the English Wikipedia +" +http://www.semanlink.net/tag/product_types_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/product_types_ontology|homepage|http://www.productontology.org/ +http://www.semanlink.net/tag/product_types_ontology|uri|http://www.semanlink.net/tag/product_types_ontology +http://www.semanlink.net/tag/product_types_ontology|broader_prefLabel|GoodRelations +http://www.semanlink.net/tag/product_types_ontology|broader_related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/charbon|creationTime|2016-11-17T00:11:58Z +http://www.semanlink.net/tag/charbon|prefLabel|Charbon +http://www.semanlink.net/tag/charbon|creationDate|2016-11-17 +http://www.semanlink.net/tag/charbon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/charbon|uri|http://www.semanlink.net/tag/charbon +http://www.semanlink.net/tag/etat_du_monde|prefLabel|Etat du monde +http://www.semanlink.net/tag/etat_du_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/etat_du_monde|uri|http://www.semanlink.net/tag/etat_du_monde +http://www.semanlink.net/tag/liberte_liberte_cherie|creationTime|2014-06-06T22:57:49Z +http://www.semanlink.net/tag/liberte_liberte_cherie|prefLabel|Liberté, liberté chérie +http://www.semanlink.net/tag/liberte_liberte_cherie|broader|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/tag/liberte_liberte_cherie|related|http://www.semanlink.net/tag/marseillaise +http://www.semanlink.net/tag/liberte_liberte_cherie|creationDate|2014-06-06 +http://www.semanlink.net/tag/liberte_liberte_cherie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/liberte_liberte_cherie|uri|http://www.semanlink.net/tag/liberte_liberte_cherie +http://www.semanlink.net/tag/liberte_liberte_cherie|broader_prefLabel|Liberté +http://www.semanlink.net/tag/liberte_liberte_cherie|broader_related|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/reto_bachmann_gmur|creationTime|2011-09-09T21:54:10Z +http://www.semanlink.net/tag/reto_bachmann_gmur|prefLabel|Reto Bachmann-Gmür +http://www.semanlink.net/tag/reto_bachmann_gmur|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/reto_bachmann_gmur|related|http://www.semanlink.net/tag/clerezza +http://www.semanlink.net/tag/reto_bachmann_gmur|related|http://www.semanlink.net/tag/jena_user_conference +http://www.semanlink.net/tag/reto_bachmann_gmur|creationDate|2011-09-09 +http://www.semanlink.net/tag/reto_bachmann_gmur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reto_bachmann_gmur|uri|http://www.semanlink.net/tag/reto_bachmann_gmur +http://www.semanlink.net/tag/reto_bachmann_gmur|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/squeak|creationTime|2007-09-10T19:48:54Z +http://www.semanlink.net/tag/squeak|prefLabel|Squeak +http://www.semanlink.net/tag/squeak|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/squeak|broader|http://www.semanlink.net/tag/smalltalk +http://www.semanlink.net/tag/squeak|related|http://www.semanlink.net/tag/one_laptop_per_child +http://www.semanlink.net/tag/squeak|related|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/squeak|related|http://www.semanlink.net/tag/alan_kay +http://www.semanlink.net/tag/squeak|creationDate|2007-09-10 +http://www.semanlink.net/tag/squeak|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/squeak|homepage|http://www.squeak.org +http://www.semanlink.net/tag/squeak|uri|http://www.semanlink.net/tag/squeak +http://www.semanlink.net/tag/squeak|broader_prefLabel|Open Source +http://www.semanlink.net/tag/squeak|broader_prefLabel|Smalltalk +http://www.semanlink.net/tag/favoris|prefLabel|Favoris +http://www.semanlink.net/tag/favoris|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/favoris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/favoris|altLabel|favorites +http://www.semanlink.net/tag/favoris|uri|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/favoris|broader_prefLabel|I like +http://www.semanlink.net/tag/favoris|broader_prefLabel|I like +http://www.semanlink.net/tag/separation_of_man_and_ape|prefLabel|Separation of man and ape +http://www.semanlink.net/tag/separation_of_man_and_ape|broader|http://www.semanlink.net/tag/grands_singes +http://www.semanlink.net/tag/separation_of_man_and_ape|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/separation_of_man_and_ape|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/separation_of_man_and_ape|uri|http://www.semanlink.net/tag/separation_of_man_and_ape +http://www.semanlink.net/tag/separation_of_man_and_ape|broader_prefLabel|Grands Singes +http://www.semanlink.net/tag/separation_of_man_and_ape|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/separation_of_man_and_ape|broader_altLabel|Apes +http://www.semanlink.net/tag/entity_discovery_and_linking|creationTime|2020-01-09T14:56:36Z +http://www.semanlink.net/tag/entity_discovery_and_linking|prefLabel|Entity discovery and linking +http://www.semanlink.net/tag/entity_discovery_and_linking|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_discovery_and_linking|broader|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/entity_discovery_and_linking|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/entity_discovery_and_linking|related|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/tag/entity_discovery_and_linking|creationDate|2020-01-09 +http://www.semanlink.net/tag/entity_discovery_and_linking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_discovery_and_linking|altLabel|Entity Retrieval +http://www.semanlink.net/tag/entity_discovery_and_linking|altLabel|Entity Analysis +http://www.semanlink.net/tag/entity_discovery_and_linking|uri|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/entity_discovery_and_linking|broader_prefLabel|Entities +http://www.semanlink.net/tag/entity_discovery_and_linking|broader_prefLabel|Information extraction +http://www.semanlink.net/tag/entity_discovery_and_linking|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/entity_discovery_and_linking|broader_related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|creationTime|2013-10-09T21:41:48Z +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|prefLabel|Coursera: A History of the World since 1300 +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|broader|http://www.semanlink.net/tag/histoire_du_monde +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|creationDate|2013-10-09 +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|homepage|https://class.coursera.org/wh1300-002/class +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|uri|http://www.semanlink.net/tag/a_history_of_the_world_since_1300 +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|broader_prefLabel|Histoire du monde +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|broader_prefLabel|Coursera +http://www.semanlink.net/tag/a_history_of_the_world_since_1300|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/nelson_mandela|prefLabel|Nelson Mandela +http://www.semanlink.net/tag/nelson_mandela|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/nelson_mandela|broader|http://www.semanlink.net/tag/afrique_du_sud +http://www.semanlink.net/tag/nelson_mandela|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nelson_mandela|uri|http://www.semanlink.net/tag/nelson_mandela +http://www.semanlink.net/tag/nelson_mandela|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/nelson_mandela|broader_prefLabel|Afrique du Sud +http://www.semanlink.net/tag/robotic_imitation|creationTime|2018-10-27T14:42:49Z +http://www.semanlink.net/tag/robotic_imitation|prefLabel|Robotic imitation +http://www.semanlink.net/tag/robotic_imitation|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/robotic_imitation|related|http://www.semanlink.net/tag/learning_by_imitation +http://www.semanlink.net/tag/robotic_imitation|creationDate|2018-10-27 +http://www.semanlink.net/tag/robotic_imitation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robotic_imitation|uri|http://www.semanlink.net/tag/robotic_imitation +http://www.semanlink.net/tag/robotic_imitation|broader_prefLabel|Robotique +http://www.semanlink.net/tag/robotic_imitation|broader_altLabel|Robotics +http://www.semanlink.net/tag/robotic_imitation|broader_altLabel|Robot +http://www.semanlink.net/tag/nlp_stanford|creationTime|2017-05-23T15:09:19Z +http://www.semanlink.net/tag/nlp_stanford|prefLabel|NLP@Stanford +http://www.semanlink.net/tag/nlp_stanford|broader|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/tag/nlp_stanford|broader|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/nlp_stanford|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_stanford|related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/nlp_stanford|related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/nlp_stanford|creationDate|2017-05-23 +http://www.semanlink.net/tag/nlp_stanford|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_stanford|uri|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/nlp_stanford|broader_prefLabel|AI@Stanford +http://www.semanlink.net/tag/nlp_stanford|broader_prefLabel|Stanford +http://www.semanlink.net/tag/nlp_stanford|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_stanford|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/jeopardy|creationTime|2014-02-18T01:03:37Z +http://www.semanlink.net/tag/jeopardy|prefLabel|Jeopardy +http://www.semanlink.net/tag/jeopardy|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/jeopardy|related|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.semanlink.net/tag/jeopardy|creationDate|2014-02-18 +http://www.semanlink.net/tag/jeopardy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeopardy|uri|http://www.semanlink.net/tag/jeopardy +http://www.semanlink.net/tag/jeopardy|broader_prefLabel|Jeux +http://www.semanlink.net/tag/missing_labels_ml|creationTime|2018-03-03T14:33:23Z +http://www.semanlink.net/tag/missing_labels_ml|prefLabel|Missing Labels (ML) +http://www.semanlink.net/tag/missing_labels_ml|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/missing_labels_ml|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/missing_labels_ml|creationDate|2018-03-03 +http://www.semanlink.net/tag/missing_labels_ml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/missing_labels_ml|uri|http://www.semanlink.net/tag/missing_labels_ml +http://www.semanlink.net/tag/missing_labels_ml|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/missing_labels_ml|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/goodrelations_renault|creationTime|2011-06-07T16:56:17Z +http://www.semanlink.net/tag/goodrelations_renault|prefLabel|GoodRelations/Renault +http://www.semanlink.net/tag/goodrelations_renault|broader|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/goodrelations_renault|creationDate|2011-06-07 +http://www.semanlink.net/tag/goodrelations_renault|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/goodrelations_renault|uri|http://www.semanlink.net/tag/goodrelations_renault +http://www.semanlink.net/tag/goodrelations_renault|broader_prefLabel|GoodRelations +http://www.semanlink.net/tag/goodrelations_renault|broader_related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/clustering_of_text_documents|creationTime|2017-05-22T12:07:14Z +http://www.semanlink.net/tag/clustering_of_text_documents|prefLabel|Clustering of text documents +http://www.semanlink.net/tag/clustering_of_text_documents|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/clustering_of_text_documents|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/clustering_of_text_documents|related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/clustering_of_text_documents|creationDate|2017-05-22 +http://www.semanlink.net/tag/clustering_of_text_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clustering_of_text_documents|altLabel|Text Clustering +http://www.semanlink.net/tag/clustering_of_text_documents|uri|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/clustering_of_text_documents|broader_prefLabel|Clustering +http://www.semanlink.net/tag/clustering_of_text_documents|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/clustering_of_text_documents|broader_altLabel|Data clustering +http://www.semanlink.net/tag/clustering_of_text_documents|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/clustering_of_text_documents|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/aldous_huxley|prefLabel|Aldous Huxley +http://www.semanlink.net/tag/aldous_huxley|broader|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/aldous_huxley|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/aldous_huxley|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aldous_huxley|uri|http://www.semanlink.net/tag/aldous_huxley +http://www.semanlink.net/tag/aldous_huxley|broader_prefLabel|Anticipation +http://www.semanlink.net/tag/aldous_huxley|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/uncontacted_peoples|creationTime|2017-05-26T00:42:17Z +http://www.semanlink.net/tag/uncontacted_peoples|prefLabel|Uncontacted peoples +http://www.semanlink.net/tag/uncontacted_peoples|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/uncontacted_peoples|creationDate|2017-05-26 +http://www.semanlink.net/tag/uncontacted_peoples|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uncontacted_peoples|describedBy|https://en.wikipedia.org/wiki/Uncontacted_peoples +http://www.semanlink.net/tag/uncontacted_peoples|uri|http://www.semanlink.net/tag/uncontacted_peoples +http://www.semanlink.net/tag/uncontacted_peoples|broader_prefLabel|Peuples +http://www.semanlink.net/tag/go_game|creationTime|2014-12-27T14:50:07Z +http://www.semanlink.net/tag/go_game|prefLabel|Go (Game) +http://www.semanlink.net/tag/go_game|broader|http://www.semanlink.net/tag/jeu +http://www.semanlink.net/tag/go_game|creationDate|2014-12-27 +http://www.semanlink.net/tag/go_game|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/go_game|uri|http://www.semanlink.net/tag/go_game +http://www.semanlink.net/tag/go_game|broader_prefLabel|Jeu +http://www.semanlink.net/tag/explosion_cambrienne|creationTime|2011-10-20T00:47:30Z +http://www.semanlink.net/tag/explosion_cambrienne|prefLabel|Explosion cambrienne +http://www.semanlink.net/tag/explosion_cambrienne|broader|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/explosion_cambrienne|creationDate|2011-10-20 +http://www.semanlink.net/tag/explosion_cambrienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/explosion_cambrienne|uri|http://www.semanlink.net/tag/explosion_cambrienne +http://www.semanlink.net/tag/explosion_cambrienne|broader_prefLabel|Histoire de la vie +http://www.semanlink.net/tag/personal_assistant|creationTime|2020-01-16T01:26:53Z +http://www.semanlink.net/tag/personal_assistant|prefLabel|Personal assistant +http://www.semanlink.net/tag/personal_assistant|broader|http://www.semanlink.net/tag/human_ai_collaboration +http://www.semanlink.net/tag/personal_assistant|related|http://www.semanlink.net/tag/human_ai_collaboration +http://www.semanlink.net/tag/personal_assistant|creationDate|2020-01-16 +http://www.semanlink.net/tag/personal_assistant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_assistant|uri|http://www.semanlink.net/tag/personal_assistant +http://www.semanlink.net/tag/personal_assistant|broader_prefLabel|Human-AI collaboration +http://www.semanlink.net/tag/economie_ecologique|creationTime|2008-10-25T00:37:21Z +http://www.semanlink.net/tag/economie_ecologique|prefLabel|Économie écologique +http://www.semanlink.net/tag/economie_ecologique|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/economie_ecologique|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/economie_ecologique|creationDate|2008-10-25 +http://www.semanlink.net/tag/economie_ecologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economie_ecologique|uri|http://www.semanlink.net/tag/economie_ecologique +http://www.semanlink.net/tag/economie_ecologique|broader_prefLabel|Écologie +http://www.semanlink.net/tag/economie_ecologique|broader_prefLabel|Economie +http://www.semanlink.net/tag/economie_ecologique|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/ldow2008|creationTime|2008-02-26T18:13:03Z +http://www.semanlink.net/tag/ldow2008|prefLabel|LDOW2008 +http://www.semanlink.net/tag/ldow2008|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/ldow2008|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/ldow2008|broader|http://www.semanlink.net/tag/www08 +http://www.semanlink.net/tag/ldow2008|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/ldow2008|broader|http://www.semanlink.net/tag/ldow +http://www.semanlink.net/tag/ldow2008|creationDate|2008-02-26 +http://www.semanlink.net/tag/ldow2008|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldow2008|homepage|http://events.linkeddata.org/ldow2008/ +http://www.semanlink.net/tag/ldow2008|uri|http://www.semanlink.net/tag/ldow2008 +http://www.semanlink.net/tag/ldow2008|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/ldow2008|broader_prefLabel|Workshop +http://www.semanlink.net/tag/ldow2008|broader_prefLabel|WWW 2008 +http://www.semanlink.net/tag/ldow2008|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/ldow2008|broader_prefLabel|LDOW +http://www.semanlink.net/tag/ldow2008|broader_altLabel|LD +http://www.semanlink.net/tag/ldow2008|broader_altLabel|LOD +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/ldow2008|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/nlp_and_search|creationTime|2020-01-12T17:21:04Z +http://www.semanlink.net/tag/nlp_and_search|prefLabel|NLP and Search +http://www.semanlink.net/tag/nlp_and_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/nlp_and_search|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_and_search|creationDate|2020-01-12 +http://www.semanlink.net/tag/nlp_and_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_and_search|uri|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/nlp_and_search|broader_prefLabel|Search +http://www.semanlink.net/tag/nlp_and_search|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_and_search|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/automotive_and_web_technologies|creationTime|2012-11-12T14:38:10Z +http://www.semanlink.net/tag/automotive_and_web_technologies|prefLabel|Automotive and web technologies +http://www.semanlink.net/tag/automotive_and_web_technologies|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/automotive_and_web_technologies|broader|http://www.semanlink.net/tag/automobile_2_0 +http://www.semanlink.net/tag/automotive_and_web_technologies|creationDate|2012-11-12 +http://www.semanlink.net/tag/automotive_and_web_technologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automotive_and_web_technologies|uri|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.semanlink.net/tag/automotive_and_web_technologies|broader_prefLabel|Automobile +http://www.semanlink.net/tag/automotive_and_web_technologies|broader_prefLabel|Automobile 2.0 +http://www.semanlink.net/tag/automotive_and_web_technologies|broader_altLabel|Automotive +http://www.semanlink.net/tag/afrique_du_nord|prefLabel|Afrique du Nord +http://www.semanlink.net/tag/afrique_du_nord|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_du_nord|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_du_nord|uri|http://www.semanlink.net/tag/afrique_du_nord +http://www.semanlink.net/tag/afrique_du_nord|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_du_nord|broader_altLabel|Africa +http://www.semanlink.net/tag/uml|creationTime|2010-09-24T16:51:40Z +http://www.semanlink.net/tag/uml|prefLabel|UML +http://www.semanlink.net/tag/uml|creationDate|2010-09-24 +http://www.semanlink.net/tag/uml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uml|uri|http://www.semanlink.net/tag/uml +http://www.semanlink.net/tag/wordpress|creationTime|2007-07-07T14:48:26Z +http://www.semanlink.net/tag/wordpress|prefLabel|WordPress +http://www.semanlink.net/tag/wordpress|broader|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/tag/wordpress|creationDate|2007-07-07 +http://www.semanlink.net/tag/wordpress|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wordpress|uri|http://www.semanlink.net/tag/wordpress +http://www.semanlink.net/tag/wordpress|broader_prefLabel|Blog software +http://www.semanlink.net/tag/greve_du_sexe|creationTime|2016-07-25T16:55:29Z +http://www.semanlink.net/tag/greve_du_sexe|prefLabel|Grève du sexe +http://www.semanlink.net/tag/greve_du_sexe|broader|http://www.semanlink.net/tag/sexe +http://www.semanlink.net/tag/greve_du_sexe|broader|http://www.semanlink.net/tag/greve +http://www.semanlink.net/tag/greve_du_sexe|creationDate|2016-07-25 +http://www.semanlink.net/tag/greve_du_sexe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greve_du_sexe|uri|http://www.semanlink.net/tag/greve_du_sexe +http://www.semanlink.net/tag/greve_du_sexe|broader_prefLabel|Sexe +http://www.semanlink.net/tag/greve_du_sexe|broader_prefLabel|Grève +http://www.semanlink.net/tag/semantic_desktop|prefLabel|Semantic Desktop +http://www.semanlink.net/tag/semantic_desktop|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_desktop|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/semantic_desktop|related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/semantic_desktop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_desktop|uri|http://www.semanlink.net/tag/semantic_desktop +http://www.semanlink.net/tag/semantic_desktop|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_desktop|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/semantic_desktop|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_desktop|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/google_play|creationTime|2013-08-18T13:03:43Z +http://www.semanlink.net/tag/google_play|prefLabel|Google Play +http://www.semanlink.net/tag/google_play|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_play|creationDate|2013-08-18 +http://www.semanlink.net/tag/google_play|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_play|uri|http://www.semanlink.net/tag/google_play +http://www.semanlink.net/tag/google_play|broader_prefLabel|Google +http://www.semanlink.net/tag/google_play|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/graph_parsing|creationTime|2021-08-02T08:22:41Z +http://www.semanlink.net/tag/graph_parsing|prefLabel|Graph Parsing +http://www.semanlink.net/tag/graph_parsing|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_parsing|creationDate|2021-08-02 +http://www.semanlink.net/tag/graph_parsing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_parsing|uri|http://www.semanlink.net/tag/graph_parsing +http://www.semanlink.net/tag/graph_parsing|broader_prefLabel|Graph +http://www.semanlink.net/tag/niger_agriculture|prefLabel|Niger : agriculture +http://www.semanlink.net/tag/niger_agriculture|broader|http://www.semanlink.net/tag/agriculture_africaine +http://www.semanlink.net/tag/niger_agriculture|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/niger_agriculture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niger_agriculture|uri|http://www.semanlink.net/tag/niger_agriculture +http://www.semanlink.net/tag/niger_agriculture|broader_prefLabel|Agriculture africaine +http://www.semanlink.net/tag/niger_agriculture|broader_prefLabel|Niger +http://www.semanlink.net/tag/niger_agriculture|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/niger_agriculture|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/niger_agriculture|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/dbtune|creationTime|2008-11-20T21:58:58Z +http://www.semanlink.net/tag/dbtune|prefLabel|DBTune +http://www.semanlink.net/tag/dbtune|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/dbtune|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/dbtune|broader|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/dbtune|related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/dbtune|creationDate|2008-11-20 +http://www.semanlink.net/tag/dbtune|comment|DBTune - Serving music-related RDF since 2007 +http://www.semanlink.net/tag/dbtune|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dbtune|describedBy|http://dbtune.org/ +http://www.semanlink.net/tag/dbtune|uri|http://www.semanlink.net/tag/dbtune +http://www.semanlink.net/tag/dbtune|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/dbtune|broader_prefLabel|Musique +http://www.semanlink.net/tag/dbtune|broader_prefLabel|Yves Raymond +http://www.semanlink.net/tag/dbtune|broader_altLabel|LOD +http://www.semanlink.net/tag/dbtune|broader_altLabel|Music +http://www.semanlink.net/tag/dbtune|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/dbtune|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/dbtune|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/dbtune|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/sustainable_materials_lifecycle|creationTime|2011-09-21T23:14:50Z +http://www.semanlink.net/tag/sustainable_materials_lifecycle|prefLabel|Sustainable materials lifecycle +http://www.semanlink.net/tag/sustainable_materials_lifecycle|broader|http://www.semanlink.net/tag/economie_ecologique +http://www.semanlink.net/tag/sustainable_materials_lifecycle|creationDate|2011-09-21 +http://www.semanlink.net/tag/sustainable_materials_lifecycle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sustainable_materials_lifecycle|uri|http://www.semanlink.net/tag/sustainable_materials_lifecycle +http://www.semanlink.net/tag/sustainable_materials_lifecycle|broader_prefLabel|Économie écologique +http://www.semanlink.net/tag/keep_new|creationTime|2010-05-20T01:00:43Z +http://www.semanlink.net/tag/keep_new|prefLabel|Keep new +http://www.semanlink.net/tag/keep_new|broader|http://www.semanlink.net/tag/to_do +http://www.semanlink.net/tag/keep_new|creationDate|2010-05-20 +http://www.semanlink.net/tag/keep_new|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keep_new|uri|http://www.semanlink.net/tag/keep_new +http://www.semanlink.net/tag/keep_new|broader_prefLabel|To do +http://www.semanlink.net/tag/keep_new|broader_altLabel|Todo +http://www.semanlink.net/tag/oiseau|prefLabel|Oiseau +http://www.semanlink.net/tag/oiseau|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/oiseau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oiseau|uri|http://www.semanlink.net/tag/oiseau +http://www.semanlink.net/tag/oiseau|broader_prefLabel|Animal +http://www.semanlink.net/tag/telescope|prefLabel|Télescope +http://www.semanlink.net/tag/telescope|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/telescope|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/telescope|uri|http://www.semanlink.net/tag/telescope +http://www.semanlink.net/tag/telescope|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/human_ai_collaboration|creationTime|2019-08-12T11:25:44Z +http://www.semanlink.net/tag/human_ai_collaboration|prefLabel|Human-AI collaboration +http://www.semanlink.net/tag/human_ai_collaboration|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/human_ai_collaboration|creationDate|2019-08-12 +http://www.semanlink.net/tag/human_ai_collaboration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/human_ai_collaboration|uri|http://www.semanlink.net/tag/human_ai_collaboration +http://www.semanlink.net/tag/human_ai_collaboration|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/human_ai_collaboration|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/human_ai_collaboration|broader_altLabel|AI +http://www.semanlink.net/tag/human_ai_collaboration|broader_altLabel|IA +http://www.semanlink.net/tag/human_ai_collaboration|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/grippe_aviaire|prefLabel|Grippe aviaire +http://www.semanlink.net/tag/grippe_aviaire|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/grippe_aviaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grippe_aviaire|uri|http://www.semanlink.net/tag/grippe_aviaire +http://www.semanlink.net/tag/grippe_aviaire|broader_prefLabel|Maladie +http://www.semanlink.net/tag/java_concurrency|creationTime|2014-09-17T17:35:13Z +http://www.semanlink.net/tag/java_concurrency|prefLabel|Java concurrency +http://www.semanlink.net/tag/java_concurrency|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_concurrency|creationDate|2014-09-17 +http://www.semanlink.net/tag/java_concurrency|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_concurrency|altLabel|Java synchronization +http://www.semanlink.net/tag/java_concurrency|uri|http://www.semanlink.net/tag/java_concurrency +http://www.semanlink.net/tag/java_concurrency|broader_prefLabel|Java +http://www.semanlink.net/tag/max_halford|creationTime|2020-10-05T00:11:07Z +http://www.semanlink.net/tag/max_halford|prefLabel|Max Halford +http://www.semanlink.net/tag/max_halford|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/max_halford|related|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/max_halford|creationDate|2020-10-05 +http://www.semanlink.net/tag/max_halford|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/max_halford|uri|http://www.semanlink.net/tag/max_halford +http://www.semanlink.net/tag/max_halford|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/nissan|prefLabel|Nissan +http://www.semanlink.net/tag/nissan|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/nissan|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/nissan|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/nissan|creationDate|2006-09-19 +http://www.semanlink.net/tag/nissan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nissan|uri|http://www.semanlink.net/tag/nissan +http://www.semanlink.net/tag/nissan|broader_prefLabel|Japon +http://www.semanlink.net/tag/nissan|broader_prefLabel|Automobile +http://www.semanlink.net/tag/nissan|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/nissan|broader_altLabel|Japan +http://www.semanlink.net/tag/nissan|broader_altLabel|Automotive +http://www.semanlink.net/tag/wikidata_rdf|creationTime|2013-09-12T00:07:47Z +http://www.semanlink.net/tag/wikidata_rdf|prefLabel|Wikidata/RDF +http://www.semanlink.net/tag/wikidata_rdf|broader|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/wikidata_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/wikidata_rdf|creationDate|2013-09-12 +http://www.semanlink.net/tag/wikidata_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikidata_rdf|uri|http://www.semanlink.net/tag/wikidata_rdf +http://www.semanlink.net/tag/wikidata_rdf|broader_prefLabel|Wikidata +http://www.semanlink.net/tag/wikidata_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/wikidata_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/docker|creationTime|2015-11-22T18:02:41Z +http://www.semanlink.net/tag/docker|prefLabel|Docker +http://www.semanlink.net/tag/docker|creationDate|2015-11-22 +http://www.semanlink.net/tag/docker|comment|"Open-source project that automates the deployment of applications inside software containers, by providing an additional layer of abstraction and automation of operating-system-level virtualization on Linux. +" +http://www.semanlink.net/tag/docker|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/docker|describedBy|https://en.wikipedia.org/wiki/Docker_(software) +http://www.semanlink.net/tag/docker|uri|http://www.semanlink.net/tag/docker +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|prefLabel|Découverte d'espèces inconnues +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|uri|http://www.semanlink.net/tag/decouverte_d_especes_inconnues +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|broader_prefLabel|Biology +http://www.semanlink.net/tag/decouverte_d_especes_inconnues|broader_altLabel|Biologie +http://www.semanlink.net/tag/jupyter|creationTime|2018-05-29T13:34:46Z +http://www.semanlink.net/tag/jupyter|prefLabel|Jupyter +http://www.semanlink.net/tag/jupyter|broader|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/tag/jupyter|creationDate|2018-05-29 +http://www.semanlink.net/tag/jupyter|comment|"- shift-tab (de 1 à 3 fois) +- ?xxx eg. ?learn.predict -> doc +- ??xxx eg. `??learn.predict -> source code +- H -> liste des raccourcis + +" +http://www.semanlink.net/tag/jupyter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jupyter|uri|http://www.semanlink.net/tag/jupyter +http://www.semanlink.net/tag/jupyter|broader_prefLabel|Python tools +http://www.semanlink.net/tag/junk_dna|prefLabel|Junk DNA +http://www.semanlink.net/tag/junk_dna|broader|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/junk_dna|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/junk_dna|uri|http://www.semanlink.net/tag/junk_dna +http://www.semanlink.net/tag/junk_dna|broader_prefLabel|ADN +http://www.semanlink.net/tag/junk_dna|broader_altLabel|DNA +http://www.semanlink.net/tag/pfia_2018|creationTime|2018-07-07T12:42:09Z +http://www.semanlink.net/tag/pfia_2018|prefLabel|PFIA 2018 +http://www.semanlink.net/tag/pfia_2018|broader|http://www.semanlink.net/tag/ai_conference +http://www.semanlink.net/tag/pfia_2018|creationDate|2018-07-07 +http://www.semanlink.net/tag/pfia_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pfia_2018|homepage|http://pfia2018.loria.fr +http://www.semanlink.net/tag/pfia_2018|uri|http://www.semanlink.net/tag/pfia_2018 +http://www.semanlink.net/tag/pfia_2018|broader_prefLabel|AI Conference +http://www.semanlink.net/tag/renato_matos|creationTime|2007-01-30T21:39:48Z +http://www.semanlink.net/tag/renato_matos|prefLabel|Renato Matos +http://www.semanlink.net/tag/renato_matos|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/renato_matos|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/renato_matos|creationDate|2007-01-30 +http://www.semanlink.net/tag/renato_matos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/renato_matos|describedBy|https://pt.wikipedia.org/wiki/Renato_Matos +http://www.semanlink.net/tag/renato_matos|uri|http://www.semanlink.net/tag/renato_matos +http://www.semanlink.net/tag/renato_matos|broader_prefLabel|Musicien +http://www.semanlink.net/tag/renato_matos|broader_prefLabel|Brésil +http://www.semanlink.net/tag/renato_matos|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/histoire_des_jermas|creationTime|2015-08-27T13:59:27Z +http://www.semanlink.net/tag/histoire_des_jermas|prefLabel|Histoire des Jermas +http://www.semanlink.net/tag/histoire_des_jermas|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/histoire_des_jermas|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/histoire_des_jermas|broader|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/tag/histoire_des_jermas|broader|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/histoire_des_jermas|creationDate|2015-08-27 +http://www.semanlink.net/tag/histoire_des_jermas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_des_jermas|uri|http://www.semanlink.net/tag/histoire_des_jermas +http://www.semanlink.net/tag/histoire_des_jermas|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/histoire_des_jermas|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/histoire_des_jermas|broader_prefLabel|Jerma +http://www.semanlink.net/tag/histoire_des_jermas|broader_prefLabel|Histoire du Niger +http://www.semanlink.net/tag/histoire_des_jermas|broader_altLabel|Djerma +http://www.semanlink.net/tag/histoire_des_jermas|broader_altLabel|Zarma +http://www.semanlink.net/tag/franceconnect|creationTime|2016-07-13T12:45:38Z +http://www.semanlink.net/tag/franceconnect|prefLabel|FranceConnect +http://www.semanlink.net/tag/franceconnect|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/franceconnect|related|http://www.semanlink.net/tag/government_data +http://www.semanlink.net/tag/franceconnect|creationDate|2016-07-13 +http://www.semanlink.net/tag/franceconnect|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/franceconnect|uri|http://www.semanlink.net/tag/franceconnect +http://www.semanlink.net/tag/franceconnect|broader_prefLabel|France +http://www.semanlink.net/tag/e_commerce|creationTime|2020-12-14T11:11:50Z +http://www.semanlink.net/tag/e_commerce|prefLabel|e-commerce +http://www.semanlink.net/tag/e_commerce|creationDate|2020-12-14 +http://www.semanlink.net/tag/e_commerce|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/e_commerce|uri|http://www.semanlink.net/tag/e_commerce +http://www.semanlink.net/tag/tours|creationTime|2010-11-04T01:15:49Z +http://www.semanlink.net/tag/tours|prefLabel|Tours +http://www.semanlink.net/tag/tours|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/tours|creationDate|2010-11-04 +http://www.semanlink.net/tag/tours|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tours|uri|http://www.semanlink.net/tag/tours +http://www.semanlink.net/tag/tours|broader_prefLabel|Ville +http://www.semanlink.net/tag/lingo|creationTime|2017-11-11T16:24:26Z +http://www.semanlink.net/tag/lingo|prefLabel|Lingo +http://www.semanlink.net/tag/lingo|broader|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/lingo|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/lingo|broader|http://www.semanlink.net/tag/carrot2 +http://www.semanlink.net/tag/lingo|creationDate|2017-11-11 +http://www.semanlink.net/tag/lingo|comment|> “a good cluster—or document grouping—is one, which possesses a good, readable description”. +http://www.semanlink.net/tag/lingo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lingo|uri|http://www.semanlink.net/tag/lingo +http://www.semanlink.net/tag/lingo|broader_prefLabel|Clustering of text documents +http://www.semanlink.net/tag/lingo|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/lingo|broader_prefLabel|Carrot2 +http://www.semanlink.net/tag/lingo|broader_altLabel|Text Clustering +http://www.semanlink.net/tag/lingo|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/lingo|broader_related|http://www.semanlink.net/tag/conceptual_clustering +http://www.semanlink.net/tag/lingo|broader_related|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/sparql_and_jena|creationTime|2008-01-04T00:45:26Z +http://www.semanlink.net/tag/sparql_and_jena|prefLabel|SPARQL AND Jena +http://www.semanlink.net/tag/sparql_and_jena|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_and_jena|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/sparql_and_jena|related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/sparql_and_jena|creationDate|2008-01-04 +http://www.semanlink.net/tag/sparql_and_jena|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_and_jena|uri|http://www.semanlink.net/tag/sparql_and_jena +http://www.semanlink.net/tag/sparql_and_jena|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_and_jena|broader_prefLabel|Jena +http://www.semanlink.net/tag/sparql_and_jena|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/big_data|creationTime|2012-04-25T14:06:12Z +http://www.semanlink.net/tag/big_data|prefLabel|Big Data +http://www.semanlink.net/tag/big_data|related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/big_data|creationDate|2012-04-25 +http://www.semanlink.net/tag/big_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/big_data|uri|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/julian_assange|creationTime|2013-04-28T00:09:44Z +http://www.semanlink.net/tag/julian_assange|prefLabel|Julian Assange +http://www.semanlink.net/tag/julian_assange|broader|http://www.semanlink.net/tag/wikileaks +http://www.semanlink.net/tag/julian_assange|creationDate|2013-04-28 +http://www.semanlink.net/tag/julian_assange|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/julian_assange|uri|http://www.semanlink.net/tag/julian_assange +http://www.semanlink.net/tag/julian_assange|broader_prefLabel|Wikileaks +http://www.semanlink.net/tag/julian_assange|broader_related|http://www.semanlink.net/tag/chelsea_manning +http://www.semanlink.net/tag/julie_grollier|creationTime|2018-10-21T16:37:29Z +http://www.semanlink.net/tag/julie_grollier|prefLabel|Julie Grollier +http://www.semanlink.net/tag/julie_grollier|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/julie_grollier|related|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/tag/julie_grollier|related|http://www.semanlink.net/tag/france_is_ai_2018 +http://www.semanlink.net/tag/julie_grollier|creationDate|2018-10-21 +http://www.semanlink.net/tag/julie_grollier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/julie_grollier|homepage|http://www.cnrs-thales.fr/spip.php?article59 +http://www.semanlink.net/tag/julie_grollier|uri|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/tag/julie_grollier|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/probing_ml|creationTime|2020-08-02T11:19:30Z +http://www.semanlink.net/tag/probing_ml|prefLabel|Probing (ML) +http://www.semanlink.net/tag/probing_ml|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/probing_ml|broader|http://www.semanlink.net/tag/neural_network_interpretability +http://www.semanlink.net/tag/probing_ml|creationDate|2020-08-02 +http://www.semanlink.net/tag/probing_ml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/probing_ml|altLabel|Probing +http://www.semanlink.net/tag/probing_ml|uri|http://www.semanlink.net/tag/probing_ml +http://www.semanlink.net/tag/probing_ml|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/probing_ml|broader_prefLabel|Neural network interpretability +http://www.semanlink.net/tag/mlm|creationTime|2021-10-01T10:09:30Z +http://www.semanlink.net/tag/mlm|prefLabel|MLM +http://www.semanlink.net/tag/mlm|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/mlm|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/mlm|related|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/mlm|creationDate|2021-10-01 +http://www.semanlink.net/tag/mlm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mlm|altLabel|Masked Language Model +http://www.semanlink.net/tag/mlm|uri|http://www.semanlink.net/tag/mlm +http://www.semanlink.net/tag/mlm|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/mlm|broader_prefLabel|Language model +http://www.semanlink.net/tag/mlm|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/mlm|broader_altLabel|LM +http://www.semanlink.net/tag/mlm|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/mlm|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/mlm|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/yuval_noah_harari|creationTime|2021-01-11T22:40:53Z +http://www.semanlink.net/tag/yuval_noah_harari|prefLabel|Yuval Noah Harari +http://www.semanlink.net/tag/yuval_noah_harari|creationDate|2021-01-11 +http://www.semanlink.net/tag/yuval_noah_harari|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yuval_noah_harari|describedBy|https://en.wikipedia.org/wiki/Yuval_Noah_Harari +http://www.semanlink.net/tag/yuval_noah_harari|uri|http://www.semanlink.net/tag/yuval_noah_harari +http://www.semanlink.net/tag/amerique_latine|prefLabel|Amérique latine +http://www.semanlink.net/tag/amerique_latine|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/amerique_latine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amerique_latine|uri|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/amerique_latine|broader_prefLabel|Amérique +http://www.semanlink.net/tag/film_allemand|creationTime|2007-04-11T22:46:40Z +http://www.semanlink.net/tag/film_allemand|prefLabel|Film allemand +http://www.semanlink.net/tag/film_allemand|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/film_allemand|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_allemand|creationDate|2007-04-11 +http://www.semanlink.net/tag/film_allemand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_allemand|uri|http://www.semanlink.net/tag/film_allemand +http://www.semanlink.net/tag/film_allemand|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/film_allemand|broader_prefLabel|Film +http://www.semanlink.net/tag/film_allemand|broader_altLabel|Germany +http://www.semanlink.net/tag/film_allemand|broader_altLabel|Deutschland +http://www.semanlink.net/tag/glaciologie|prefLabel|Glaciologie +http://www.semanlink.net/tag/glaciologie|broader|http://www.semanlink.net/tag/glacier +http://www.semanlink.net/tag/glaciologie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/glaciologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/glaciologie|uri|http://www.semanlink.net/tag/glaciologie +http://www.semanlink.net/tag/glaciologie|broader_prefLabel|Glacier +http://www.semanlink.net/tag/glaciologie|broader_prefLabel|Science +http://www.semanlink.net/tag/glaciologie|broader_altLabel|sciences +http://www.semanlink.net/tag/risks|creationTime|2017-09-18T14:34:37Z +http://www.semanlink.net/tag/risks|prefLabel|Risks +http://www.semanlink.net/tag/risks|creationDate|2017-09-18 +http://www.semanlink.net/tag/risks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/risks|uri|http://www.semanlink.net/tag/risks +http://www.semanlink.net/tag/mami_wata|creationTime|2007-09-05T00:39:28Z +http://www.semanlink.net/tag/mami_wata|prefLabel|Mami Wata +http://www.semanlink.net/tag/mami_wata|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/mami_wata|broader|http://www.semanlink.net/tag/mythologie +http://www.semanlink.net/tag/mami_wata|related|http://www.semanlink.net/tag/moussa_poussi +http://www.semanlink.net/tag/mami_wata|creationDate|2007-09-05 +http://www.semanlink.net/tag/mami_wata|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mami_wata|uri|http://www.semanlink.net/tag/mami_wata +http://www.semanlink.net/tag/mami_wata|broader_prefLabel|Afrique +http://www.semanlink.net/tag/mami_wata|broader_prefLabel|Mythologie +http://www.semanlink.net/tag/mami_wata|broader_altLabel|Africa +http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet|creationTime|2008-04-25T09:14:04Z +http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet|prefLabel|Fournisseurs d'accès à internet +http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet|creationDate|2008-04-25 +http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet|uri|http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet +http://www.semanlink.net/tag/samuel_goto|creationTime|2014-10-29T01:48:04Z +http://www.semanlink.net/tag/samuel_goto|prefLabel|Samuel Goto +http://www.semanlink.net/tag/samuel_goto|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/samuel_goto|related|http://www.semanlink.net/tag/schema_org_actions +http://www.semanlink.net/tag/samuel_goto|related|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/samuel_goto|creationDate|2014-10-29 +http://www.semanlink.net/tag/samuel_goto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/samuel_goto|homepage|http://blog.sgo.to +http://www.semanlink.net/tag/samuel_goto|altLabel|Sam Goto +http://www.semanlink.net/tag/samuel_goto|weblog|http://blog.sgo.to +http://www.semanlink.net/tag/samuel_goto|uri|http://www.semanlink.net/tag/samuel_goto +http://www.semanlink.net/tag/winch5|creationTime|2013-08-25T12:47:57Z +http://www.semanlink.net/tag/winch5|prefLabel|Winch 5 +http://www.semanlink.net/tag/winch5|broader|http://www.semanlink.net/tag/francis_pisani +http://www.semanlink.net/tag/winch5|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/winch5|creationDate|2013-08-25 +http://www.semanlink.net/tag/winch5|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/winch5|homepage|http://winch5.blog.lemonde.fr/ +http://www.semanlink.net/tag/winch5|uri|http://www.semanlink.net/tag/winch5 +http://www.semanlink.net/tag/winch5|broader_prefLabel|Francis Pisani +http://www.semanlink.net/tag/winch5|broader_prefLabel|Livre +http://www.semanlink.net/tag/winch5|broader_altLabel|Livres +http://www.semanlink.net/tag/deprecated|prefLabel|Deprecated +http://www.semanlink.net/tag/deprecated|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deprecated|uri|http://www.semanlink.net/tag/deprecated +http://www.semanlink.net/tag/conscience_artificielle|creationTime|2008-11-21T23:45:13Z +http://www.semanlink.net/tag/conscience_artificielle|prefLabel|Conscience artificielle +http://www.semanlink.net/tag/conscience_artificielle|broader|http://www.semanlink.net/tag/technological_singularity +http://www.semanlink.net/tag/conscience_artificielle|broader|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/conscience_artificielle|broader|http://www.semanlink.net/tag/conscience +http://www.semanlink.net/tag/conscience_artificielle|creationDate|2008-11-21 +http://www.semanlink.net/tag/conscience_artificielle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conscience_artificielle|altLabel|Machine consciousness +http://www.semanlink.net/tag/conscience_artificielle|uri|http://www.semanlink.net/tag/conscience_artificielle +http://www.semanlink.net/tag/conscience_artificielle|broader_prefLabel|Technological singularity +http://www.semanlink.net/tag/conscience_artificielle|broader_prefLabel|Anticipation +http://www.semanlink.net/tag/conscience_artificielle|broader_prefLabel|Consciousness +http://www.semanlink.net/tag/conscience_artificielle|broader_altLabel|Conscience +http://www.semanlink.net/tag/conscience_artificielle|broader_related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/lune|prefLabel|Lune +http://www.semanlink.net/tag/lune|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/lune|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lune|uri|http://www.semanlink.net/tag/lune +http://www.semanlink.net/tag/lune|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/memory_embeddings|creationTime|2017-10-24T14:47:58Z +http://www.semanlink.net/tag/memory_embeddings|prefLabel|Memory Embeddings +http://www.semanlink.net/tag/memory_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/memory_embeddings|broader|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/tag/memory_embeddings|creationDate|2017-10-24 +http://www.semanlink.net/tag/memory_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_embeddings|uri|http://www.semanlink.net/tag/memory_embeddings +http://www.semanlink.net/tag/memory_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/memory_embeddings|broader_prefLabel|Memory in deep learning +http://www.semanlink.net/tag/memory_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/memory_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/memory_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/memory_embeddings|broader_related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/boulgakov|creationTime|2013-12-16T20:46:45Z +http://www.semanlink.net/tag/boulgakov|prefLabel|Boulgakov +http://www.semanlink.net/tag/boulgakov|broader|http://www.semanlink.net/tag/litterature_russe +http://www.semanlink.net/tag/boulgakov|broader|http://www.semanlink.net/tag/ukraine +http://www.semanlink.net/tag/boulgakov|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/boulgakov|creationDate|2013-12-16 +http://www.semanlink.net/tag/boulgakov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boulgakov|uri|http://www.semanlink.net/tag/boulgakov +http://www.semanlink.net/tag/boulgakov|broader_prefLabel|Littérature russe +http://www.semanlink.net/tag/boulgakov|broader_prefLabel|Ukraine +http://www.semanlink.net/tag/boulgakov|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/agriculture_africaine|prefLabel|Agriculture africaine +http://www.semanlink.net/tag/agriculture_africaine|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/agriculture_africaine|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/agriculture_africaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agriculture_africaine|uri|http://www.semanlink.net/tag/agriculture_africaine +http://www.semanlink.net/tag/agriculture_africaine|broader_prefLabel|Afrique +http://www.semanlink.net/tag/agriculture_africaine|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/agriculture_africaine|broader_altLabel|Africa +http://www.semanlink.net/tag/france_inter|creationTime|2011-02-06T19:36:11Z +http://www.semanlink.net/tag/france_inter|prefLabel|France Inter +http://www.semanlink.net/tag/france_inter|broader|http://www.semanlink.net/tag/radio +http://www.semanlink.net/tag/france_inter|creationDate|2011-02-06 +http://www.semanlink.net/tag/france_inter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_inter|uri|http://www.semanlink.net/tag/france_inter +http://www.semanlink.net/tag/france_inter|broader_prefLabel|Radio +http://www.semanlink.net/tag/mobile_search|creationTime|2010-07-01T16:17:54Z +http://www.semanlink.net/tag/mobile_search|prefLabel|Mobile search +http://www.semanlink.net/tag/mobile_search|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/mobile_search|creationDate|2010-07-01 +http://www.semanlink.net/tag/mobile_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_search|uri|http://www.semanlink.net/tag/mobile_search +http://www.semanlink.net/tag/mobile_search|broader_prefLabel|Search +http://www.semanlink.net/tag/encyclopedie_collaborative|creationTime|2007-05-11T00:23:02Z +http://www.semanlink.net/tag/encyclopedie_collaborative|prefLabel|Encyclopédie collaborative +http://www.semanlink.net/tag/encyclopedie_collaborative|broader|http://www.semanlink.net/tag/encyclopedie +http://www.semanlink.net/tag/encyclopedie_collaborative|related|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/encyclopedie_collaborative|creationDate|2007-05-11 +http://www.semanlink.net/tag/encyclopedie_collaborative|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encyclopedie_collaborative|uri|http://www.semanlink.net/tag/encyclopedie_collaborative +http://www.semanlink.net/tag/encyclopedie_collaborative|broader_prefLabel|Encyclopédie +http://www.semanlink.net/tag/owl_rl|creationTime|2010-08-27T13:15:32Z +http://www.semanlink.net/tag/owl_rl|prefLabel|OWL RL +http://www.semanlink.net/tag/owl_rl|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_rl|broader|http://www.semanlink.net/tag/rules +http://www.semanlink.net/tag/owl_rl|creationDate|2010-08-27 +http://www.semanlink.net/tag/owl_rl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_rl|uri|http://www.semanlink.net/tag/owl_rl +http://www.semanlink.net/tag/owl_rl|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_rl|broader_prefLabel|Rules +http://www.semanlink.net/tag/owl_rl|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/quicktime|prefLabel|QuickTime +http://www.semanlink.net/tag/quicktime|broader|http://www.semanlink.net/tag/media_player +http://www.semanlink.net/tag/quicktime|broader|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/quicktime|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/quicktime|broader|http://www.semanlink.net/tag/multimedia +http://www.semanlink.net/tag/quicktime|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quicktime|uri|http://www.semanlink.net/tag/quicktime +http://www.semanlink.net/tag/quicktime|broader_prefLabel|Media Player +http://www.semanlink.net/tag/quicktime|broader_prefLabel|Apple Software +http://www.semanlink.net/tag/quicktime|broader_prefLabel|Apple +http://www.semanlink.net/tag/quicktime|broader_prefLabel|Multimedia +http://www.semanlink.net/tag/corruption|prefLabel|Corruption +http://www.semanlink.net/tag/corruption|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/corruption|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/corruption|uri|http://www.semanlink.net/tag/corruption +http://www.semanlink.net/tag/corruption|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/bayesian_deep_learning|creationTime|2018-11-05T09:39:36Z +http://www.semanlink.net/tag/bayesian_deep_learning|prefLabel|Bayesian Deep Learning +http://www.semanlink.net/tag/bayesian_deep_learning|broader|http://www.semanlink.net/tag/bayesian_reasoning +http://www.semanlink.net/tag/bayesian_deep_learning|broader|http://www.semanlink.net/tag/uncertainty_in_deep_learning +http://www.semanlink.net/tag/bayesian_deep_learning|creationDate|2018-11-05 +http://www.semanlink.net/tag/bayesian_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bayesian_deep_learning|altLabel|Bayesian Neural Networks +http://www.semanlink.net/tag/bayesian_deep_learning|uri|http://www.semanlink.net/tag/bayesian_deep_learning +http://www.semanlink.net/tag/bayesian_deep_learning|broader_prefLabel|Bayesian Reasoning +http://www.semanlink.net/tag/bayesian_deep_learning|broader_prefLabel|Uncertainty in Deep Learning +http://www.semanlink.net/tag/bayesian_deep_learning|broader_related|http://www.semanlink.net/tag/accountable_ai +http://www.semanlink.net/tag/nlp_text_representation|creationTime|2017-06-28T01:03:40Z +http://www.semanlink.net/tag/nlp_text_representation|prefLabel|NLP: Text Representation +http://www.semanlink.net/tag/nlp_text_representation|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nlp_text_representation|creationDate|2017-06-28 +http://www.semanlink.net/tag/nlp_text_representation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_text_representation|altLabel|Text Representation +http://www.semanlink.net/tag/nlp_text_representation|uri|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/tag/nlp_text_representation|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/eglise_catholique|creationTime|2008-06-01T18:02:00Z +http://www.semanlink.net/tag/eglise_catholique|prefLabel|Eglise catholique +http://www.semanlink.net/tag/eglise_catholique|broader|http://www.semanlink.net/tag/catholicisme +http://www.semanlink.net/tag/eglise_catholique|creationDate|2008-06-01 +http://www.semanlink.net/tag/eglise_catholique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eglise_catholique|uri|http://www.semanlink.net/tag/eglise_catholique +http://www.semanlink.net/tag/eglise_catholique|broader_prefLabel|Catholicisme +http://www.semanlink.net/tag/eglise_catholique|broader_altLabel|Catholique +http://www.semanlink.net/tag/jardinage|prefLabel|Jardinage +http://www.semanlink.net/tag/jardinage|broader|http://www.semanlink.net/tag/jardin +http://www.semanlink.net/tag/jardinage|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/jardinage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jardinage|uri|http://www.semanlink.net/tag/jardinage +http://www.semanlink.net/tag/jardinage|broader_prefLabel|Jardin +http://www.semanlink.net/tag/jardinage|broader_prefLabel|Divers +http://www.semanlink.net/tag/travailler_moins|creationTime|2009-01-31T20:36:50Z +http://www.semanlink.net/tag/travailler_moins|prefLabel|Travailler moins +http://www.semanlink.net/tag/travailler_moins|broader|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/travailler_moins|creationDate|2009-01-31 +http://www.semanlink.net/tag/travailler_moins|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/travailler_moins|uri|http://www.semanlink.net/tag/travailler_moins +http://www.semanlink.net/tag/travailler_moins|broader_prefLabel|Travail +http://www.semanlink.net/tag/fbi_v_apple|creationTime|2016-02-19T13:51:49Z +http://www.semanlink.net/tag/fbi_v_apple|prefLabel|FBI v. Apple +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/fbi +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/backdoor +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/tim_cook +http://www.semanlink.net/tag/fbi_v_apple|broader|http://www.semanlink.net/tag/terrorisme +http://www.semanlink.net/tag/fbi_v_apple|creationDate|2016-02-19 +http://www.semanlink.net/tag/fbi_v_apple|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fbi_v_apple|uri|http://www.semanlink.net/tag/fbi_v_apple +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|iphone +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Apple +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Privacy +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|FBI +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Backdoor +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Tim Cook +http://www.semanlink.net/tag/fbi_v_apple|broader_prefLabel|Terrorisme +http://www.semanlink.net/tag/fbi_v_apple|broader_altLabel|Vie privée +http://www.semanlink.net/tag/chimpanze|prefLabel|Chimpanzé +http://www.semanlink.net/tag/chimpanze|broader|http://www.semanlink.net/tag/grands_singes +http://www.semanlink.net/tag/chimpanze|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chimpanze|uri|http://www.semanlink.net/tag/chimpanze +http://www.semanlink.net/tag/chimpanze|broader_prefLabel|Grands Singes +http://www.semanlink.net/tag/chimpanze|broader_altLabel|Apes +http://www.semanlink.net/tag/artiste|prefLabel|Artiste +http://www.semanlink.net/tag/artiste|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/artiste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artiste|uri|http://www.semanlink.net/tag/artiste +http://www.semanlink.net/tag/artiste|broader_prefLabel|Art +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|creationTime|2017-06-20T13:37:19Z +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|prefLabel|Text Corpora and Lexical Resources +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|creationDate|2017-06-20 +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|altLabel|Text corpora +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|altLabel|Lexical Resource +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|altLabel|Text corpus +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|uri|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|broader_prefLabel|NLP +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|broader_altLabel|TALN +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/text_corpora_and_lexical_resources|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/pollution|prefLabel|Pollution +http://www.semanlink.net/tag/pollution|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/pollution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pollution|uri|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/pollution|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/brain_computer_interface|creationTime|2019-09-12T21:51:24Z +http://www.semanlink.net/tag/brain_computer_interface|prefLabel|Brain-computer interface +http://www.semanlink.net/tag/brain_computer_interface|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/brain_computer_interface|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/brain_computer_interface|creationDate|2019-09-12 +http://www.semanlink.net/tag/brain_computer_interface|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_computer_interface|uri|http://www.semanlink.net/tag/brain_computer_interface +http://www.semanlink.net/tag/brain_computer_interface|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/brain_computer_interface|broader_prefLabel|Brain +http://www.semanlink.net/tag/brain_computer_interface|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/brain_computer_interface|broader_altLabel|Cerveau +http://www.semanlink.net/tag/brain_computer_interface|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/converter|creationTime|2012-08-27T12:35:14Z +http://www.semanlink.net/tag/converter|prefLabel|Converter +http://www.semanlink.net/tag/converter|creationDate|2012-08-27 +http://www.semanlink.net/tag/converter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/converter|uri|http://www.semanlink.net/tag/converter +http://www.semanlink.net/tag/comet_wild_2|prefLabel|Comet Wild 2 +http://www.semanlink.net/tag/comet_wild_2|broader|http://www.semanlink.net/tag/comete +http://www.semanlink.net/tag/comet_wild_2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/comet_wild_2|uri|http://www.semanlink.net/tag/comet_wild_2 +http://www.semanlink.net/tag/comet_wild_2|broader_prefLabel|Comète +http://www.semanlink.net/tag/model_driven_development|creationTime|2010-09-24T12:58:29Z +http://www.semanlink.net/tag/model_driven_development|prefLabel|Model Driven Development +http://www.semanlink.net/tag/model_driven_development|creationDate|2010-09-24 +http://www.semanlink.net/tag/model_driven_development|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/model_driven_development|uri|http://www.semanlink.net/tag/model_driven_development +http://www.semanlink.net/tag/nodalities|creationTime|2009-12-02T00:49:56Z +http://www.semanlink.net/tag/nodalities|prefLabel|Nodalities +http://www.semanlink.net/tag/nodalities|broader|http://www.semanlink.net/tag/talis +http://www.semanlink.net/tag/nodalities|creationDate|2009-12-02 +http://www.semanlink.net/tag/nodalities|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nodalities|uri|http://www.semanlink.net/tag/nodalities +http://www.semanlink.net/tag/nodalities|broader_prefLabel|Talis +http://www.semanlink.net/tag/nodalities|broader_related|http://www.semanlink.net/tag/leigh_dodds +http://www.semanlink.net/tag/nodalities|broader_related|http://www.semanlink.net/tag/danny_ayers +http://www.semanlink.net/tag/nodalities|broader_related|http://www.semanlink.net/tag/paul_miller +http://www.semanlink.net/tag/nodalities|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/knn_in_mlc|creationTime|2020-09-08T23:54:33Z +http://www.semanlink.net/tag/knn_in_mlc|prefLabel|KNN in MLC +http://www.semanlink.net/tag/knn_in_mlc|broader|http://www.semanlink.net/tag/multi_label_classification +http://www.semanlink.net/tag/knn_in_mlc|broader|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/tag/knn_in_mlc|creationDate|2020-09-08 +http://www.semanlink.net/tag/knn_in_mlc|comment|Use of nearest neighbors in Multi-label classification +http://www.semanlink.net/tag/knn_in_mlc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knn_in_mlc|uri|http://www.semanlink.net/tag/knn_in_mlc +http://www.semanlink.net/tag/knn_in_mlc|broader_prefLabel|Multi-label classification +http://www.semanlink.net/tag/knn_in_mlc|broader_prefLabel|Nearest neighbor search +http://www.semanlink.net/tag/knn_in_mlc|broader_altLabel|Multilabel classification +http://www.semanlink.net/tag/knn_in_mlc|broader_altLabel|Similarity search +http://www.semanlink.net/tag/knn_in_mlc|broader_related|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/tag/knn_in_mlc|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/question_raciale|creationTime|2015-06-20T18:26:28Z +http://www.semanlink.net/tag/question_raciale|prefLabel|Question raciale +http://www.semanlink.net/tag/question_raciale|creationDate|2015-06-20 +http://www.semanlink.net/tag/question_raciale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/question_raciale|uri|http://www.semanlink.net/tag/question_raciale +http://www.semanlink.net/tag/temis|creationTime|2013-11-23T00:00:45Z +http://www.semanlink.net/tag/temis|prefLabel|TEMIS +http://www.semanlink.net/tag/temis|broader|http://www.semanlink.net/tag/french_semantic_web_company +http://www.semanlink.net/tag/temis|creationDate|2013-11-23 +http://www.semanlink.net/tag/temis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/temis|uri|http://www.semanlink.net/tag/temis +http://www.semanlink.net/tag/temis|broader_prefLabel|French Semantic web company +http://www.semanlink.net/tag/punk|prefLabel|Punk +http://www.semanlink.net/tag/punk|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/punk|creationDate|2006-09-01 +http://www.semanlink.net/tag/punk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/punk|altLabel|Punk Rock +http://www.semanlink.net/tag/punk|uri|http://www.semanlink.net/tag/punk +http://www.semanlink.net/tag/punk|broader_prefLabel|Musique +http://www.semanlink.net/tag/punk|broader_altLabel|Music +http://www.semanlink.net/tag/actrice|prefLabel|Actrice +http://www.semanlink.net/tag/actrice|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/actrice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/actrice|uri|http://www.semanlink.net/tag/actrice +http://www.semanlink.net/tag/actrice|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/rdf_context|creationTime|2010-08-20T12:25:29Z +http://www.semanlink.net/tag/rdf_context|prefLabel|RDF: context +http://www.semanlink.net/tag/rdf_context|creationDate|2010-08-20 +http://www.semanlink.net/tag/rdf_context|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_context|uri|http://www.semanlink.net/tag/rdf_context +http://www.semanlink.net/tag/nlp_microsoft|creationTime|2018-05-18T15:49:50Z +http://www.semanlink.net/tag/nlp_microsoft|prefLabel|NLP@Microsoft +http://www.semanlink.net/tag/nlp_microsoft|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_microsoft|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/nlp_microsoft|creationDate|2018-05-18 +http://www.semanlink.net/tag/nlp_microsoft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_microsoft|uri|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/tag/nlp_microsoft|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_microsoft|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/nlp_microsoft|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/nlp_microsoft|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/gilberto_gil|creationTime|2018-10-25T11:21:12Z +http://www.semanlink.net/tag/gilberto_gil|prefLabel|Gilberto Gil +http://www.semanlink.net/tag/gilberto_gil|broader|http://www.semanlink.net/tag/musique_bresilienne +http://www.semanlink.net/tag/gilberto_gil|creationDate|2018-10-25 +http://www.semanlink.net/tag/gilberto_gil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gilberto_gil|uri|http://www.semanlink.net/tag/gilberto_gil +http://www.semanlink.net/tag/gilberto_gil|broader_prefLabel|Musique brésilienne +http://www.semanlink.net/tag/bibliotheque|prefLabel|Bibliothèque +http://www.semanlink.net/tag/bibliotheque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bibliotheque|uri|http://www.semanlink.net/tag/bibliotheque +http://www.semanlink.net/tag/soa|prefLabel|SOA +http://www.semanlink.net/tag/soa|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/soa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soa|uri|http://www.semanlink.net/tag/soa +http://www.semanlink.net/tag/soa|broader_prefLabel|Web Services +http://www.semanlink.net/tag/soa|broader_altLabel|WS +http://www.semanlink.net/tag/pointwise_mutual_information|creationTime|2017-12-07T16:16:12Z +http://www.semanlink.net/tag/pointwise_mutual_information|prefLabel|Pointwise mutual information +http://www.semanlink.net/tag/pointwise_mutual_information|creationDate|2017-12-07 +http://www.semanlink.net/tag/pointwise_mutual_information|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pointwise_mutual_information|describedBy|https://en.wikipedia.org/wiki/Pointwise_mutual_information +http://www.semanlink.net/tag/pointwise_mutual_information|uri|http://www.semanlink.net/tag/pointwise_mutual_information +http://www.semanlink.net/tag/bug|creationTime|2007-11-10T03:40:24Z +http://www.semanlink.net/tag/bug|prefLabel|bug +http://www.semanlink.net/tag/bug|creationDate|2007-11-10 +http://www.semanlink.net/tag/bug|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bug|uri|http://www.semanlink.net/tag/bug +http://www.semanlink.net/tag/missions_spatiales|prefLabel|Missions spatiales +http://www.semanlink.net/tag/missions_spatiales|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/missions_spatiales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/missions_spatiales|uri|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/missions_spatiales|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/nlp_based_ir|creationTime|2019-12-01T23:30:40Z +http://www.semanlink.net/tag/nlp_based_ir|prefLabel|NLP based IR +http://www.semanlink.net/tag/nlp_based_ir|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/nlp_based_ir|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_based_ir|broader|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/nlp_based_ir|creationDate|2019-12-01 +http://www.semanlink.net/tag/nlp_based_ir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_based_ir|altLabel|AI & IR +http://www.semanlink.net/tag/nlp_based_ir|altLabel|NLP based Information Retrieval +http://www.semanlink.net/tag/nlp_based_ir|uri|http://www.semanlink.net/tag/nlp_based_ir +http://www.semanlink.net/tag/nlp_based_ir|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/nlp_based_ir|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_based_ir|broader_prefLabel|NLP and Search +http://www.semanlink.net/tag/nlp_based_ir|broader_altLabel|IR +http://www.semanlink.net/tag/nlp_based_ir|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_based_ir|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_based_ir|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/glyphosate|creationTime|2005-03-14-11T21:32:37Z +http://www.semanlink.net/tag/glyphosate|prefLabel|Glyphosate +http://www.semanlink.net/tag/glyphosate|broader|http://www.semanlink.net/tag/monsanto +http://www.semanlink.net/tag/glyphosate|creationDate|2005-03-14 +http://www.semanlink.net/tag/glyphosate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/glyphosate|altLabel|Roundup +http://www.semanlink.net/tag/glyphosate|uri|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/tag/glyphosate|broader_prefLabel|Monsanto +http://www.semanlink.net/tag/glyphosate|broader_related|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/danse|creationTime|2008-02-09T12:15:24Z +http://www.semanlink.net/tag/danse|prefLabel|Danse +http://www.semanlink.net/tag/danse|creationDate|2008-02-09 +http://www.semanlink.net/tag/danse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/danse|uri|http://www.semanlink.net/tag/danse +http://www.semanlink.net/tag/alan_kay|prefLabel|Alan Kay +http://www.semanlink.net/tag/alan_kay|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/alan_kay|comment|"""The best way to predict the future is to invent it.""" +http://www.semanlink.net/tag/alan_kay|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alan_kay|uri|http://www.semanlink.net/tag/alan_kay +http://www.semanlink.net/tag/alan_kay|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/alan_kay|broader_altLabel|Technical guys +http://www.semanlink.net/tag/t_sne|creationTime|2015-10-16T11:35:36Z +http://www.semanlink.net/tag/t_sne|prefLabel|t-SNE +http://www.semanlink.net/tag/t_sne|broader|http://www.semanlink.net/tag/dimensionality_reduction +http://www.semanlink.net/tag/t_sne|broader|http://www.semanlink.net/tag/data_visualisation +http://www.semanlink.net/tag/t_sne|creationDate|2015-10-16 +http://www.semanlink.net/tag/t_sne|comment|nonlinear dimensionality reduction technique that is particularly well suited for embedding high-dimensional data into a space of two or three dimensions, which can then be visualized in a scatter plot. Specifically, it models each high-dimensional object by a two- or three-dimensional point in such a way that similar objects are modeled by nearby points and dissimilar objects are modeled by distant points. +http://www.semanlink.net/tag/t_sne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/t_sne|describedBy|https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding +http://www.semanlink.net/tag/t_sne|altLabel|t-distributed stochastic neighbor embedding +http://www.semanlink.net/tag/t_sne|uri|http://www.semanlink.net/tag/t_sne +http://www.semanlink.net/tag/t_sne|broader_prefLabel|Dimensionality reduction +http://www.semanlink.net/tag/t_sne|broader_prefLabel|Data visualisation +http://www.semanlink.net/tag/sesame|prefLabel|Sesame +http://www.semanlink.net/tag/sesame|broader|http://www.semanlink.net/tag/rdf_framework +http://www.semanlink.net/tag/sesame|broader|http://www.semanlink.net/tag/rdf4j +http://www.semanlink.net/tag/sesame|comment|Replaced by [RDF4J](tag:rdf4j) +http://www.semanlink.net/tag/sesame|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sesame|uri|http://www.semanlink.net/tag/sesame +http://www.semanlink.net/tag/sesame|broader_prefLabel|RDF Framework +http://www.semanlink.net/tag/sesame|broader_prefLabel|RDF4J +http://www.semanlink.net/tag/afrique_francophone|creationTime|2007-06-09T11:26:40Z +http://www.semanlink.net/tag/afrique_francophone|prefLabel|Afrique francophone +http://www.semanlink.net/tag/afrique_francophone|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_francophone|broader|http://www.semanlink.net/tag/francophonie +http://www.semanlink.net/tag/afrique_francophone|creationDate|2007-06-09 +http://www.semanlink.net/tag/afrique_francophone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_francophone|uri|http://www.semanlink.net/tag/afrique_francophone +http://www.semanlink.net/tag/afrique_francophone|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_francophone|broader_prefLabel|Francophonie +http://www.semanlink.net/tag/afrique_francophone|broader_altLabel|Africa +http://www.semanlink.net/tag/amerindien|creationTime|2007-03-28T23:46:23Z +http://www.semanlink.net/tag/amerindien|prefLabel|Amérindien +http://www.semanlink.net/tag/amerindien|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/amerindien|creationDate|2007-03-28 +http://www.semanlink.net/tag/amerindien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amerindien|altLabel|Native americans +http://www.semanlink.net/tag/amerindien|uri|http://www.semanlink.net/tag/amerindien +http://www.semanlink.net/tag/amerindien|broader_prefLabel|Amérique +http://www.semanlink.net/tag/quora|creationTime|2019-07-02T01:08:01Z +http://www.semanlink.net/tag/quora|prefLabel|Quora +http://www.semanlink.net/tag/quora|creationDate|2019-07-02 +http://www.semanlink.net/tag/quora|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quora|uri|http://www.semanlink.net/tag/quora +http://www.semanlink.net/tag/poete|prefLabel|Poète +http://www.semanlink.net/tag/poete|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/poete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poete|uri|http://www.semanlink.net/tag/poete +http://www.semanlink.net/tag/poete|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/sparse_matrix|creationTime|2019-05-05T10:32:55Z +http://www.semanlink.net/tag/sparse_matrix|prefLabel|Sparse matrix +http://www.semanlink.net/tag/sparse_matrix|creationDate|2019-05-05 +http://www.semanlink.net/tag/sparse_matrix|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparse_matrix|uri|http://www.semanlink.net/tag/sparse_matrix +http://www.semanlink.net/tag/zinder|creationTime|2007-08-21T23:49:38Z +http://www.semanlink.net/tag/zinder|prefLabel|Zinder +http://www.semanlink.net/tag/zinder|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/zinder|creationDate|2007-08-21 +http://www.semanlink.net/tag/zinder|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zinder|altLabel|Damarangam +http://www.semanlink.net/tag/zinder|uri|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/tag/zinder|broader_prefLabel|Niger +http://www.semanlink.net/tag/zinder|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/zinder|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/zinder|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/xml|prefLabel|XML +http://www.semanlink.net/tag/xml|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/xml|broader|http://www.semanlink.net/tag/data_interchange_format +http://www.semanlink.net/tag/xml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xml|uri|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/xml|broader_prefLabel|Dev +http://www.semanlink.net/tag/xml|broader_prefLabel|Data Interchange Format +http://www.semanlink.net/tag/privacy_and_internet|creationTime|2008-03-29T18:51:33Z +http://www.semanlink.net/tag/privacy_and_internet|prefLabel|Privacy and internet +http://www.semanlink.net/tag/privacy_and_internet|broader|http://www.semanlink.net/tag/vie_privee +http://www.semanlink.net/tag/privacy_and_internet|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/privacy_and_internet|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/privacy_and_internet|related|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/privacy_and_internet|creationDate|2008-03-29 +http://www.semanlink.net/tag/privacy_and_internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/privacy_and_internet|uri|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/tag/privacy_and_internet|broader_prefLabel|Privacy +http://www.semanlink.net/tag/privacy_and_internet|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/privacy_and_internet|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/privacy_and_internet|broader_prefLabel|Internet +http://www.semanlink.net/tag/privacy_and_internet|broader_altLabel|Vie privée +http://www.semanlink.net/tag/personnage_historique|prefLabel|Personnage historique +http://www.semanlink.net/tag/personnage_historique|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/personnage_historique|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/personnage_historique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personnage_historique|uri|http://www.semanlink.net/tag/personnage_historique +http://www.semanlink.net/tag/personnage_historique|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/personnage_historique|broader_prefLabel|Histoire +http://www.semanlink.net/tag/late_bronze_age_collapse|creationTime|2020-11-14T15:28:31Z +http://www.semanlink.net/tag/late_bronze_age_collapse|prefLabel|Late Bronze Age collapse +http://www.semanlink.net/tag/late_bronze_age_collapse|broader|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/late_bronze_age_collapse|creationDate|2020-11-14 +http://www.semanlink.net/tag/late_bronze_age_collapse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/late_bronze_age_collapse|describedBy|https://en.wikipedia.org/wiki/Late_Bronze_Age_collapse +http://www.semanlink.net/tag/late_bronze_age_collapse|uri|http://www.semanlink.net/tag/late_bronze_age_collapse +http://www.semanlink.net/tag/late_bronze_age_collapse|broader_prefLabel|Âge du bronze +http://www.semanlink.net/tag/bag_of_words|creationTime|2015-10-20T16:21:36Z +http://www.semanlink.net/tag/bag_of_words|prefLabel|Bag-of-words +http://www.semanlink.net/tag/bag_of_words|broader|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/tag/bag_of_words|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/bag_of_words|creationDate|2015-10-20 +http://www.semanlink.net/tag/bag_of_words|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bag_of_words|describedBy|https://en.wikipedia.org/wiki/Bag-of-words_model +http://www.semanlink.net/tag/bag_of_words|uri|http://www.semanlink.net/tag/bag_of_words +http://www.semanlink.net/tag/bag_of_words|broader_prefLabel|NLP: Text Representation +http://www.semanlink.net/tag/bag_of_words|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/bag_of_words|broader_altLabel|Text Representation +http://www.semanlink.net/tag/sparql_endpoint|creationTime|2007-06-23T17:46:35Z +http://www.semanlink.net/tag/sparql_endpoint|prefLabel|SPARQL endpoint +http://www.semanlink.net/tag/sparql_endpoint|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_endpoint|creationDate|2007-06-23 +http://www.semanlink.net/tag/sparql_endpoint|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_endpoint|uri|http://www.semanlink.net/tag/sparql_endpoint +http://www.semanlink.net/tag/sparql_endpoint|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/fulani|prefLabel|Fulani +http://www.semanlink.net/tag/fulani|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/fulani|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/fulani|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fulani|altLabel|Peul +http://www.semanlink.net/tag/fulani|uri|http://www.semanlink.net/tag/fulani +http://www.semanlink.net/tag/fulani|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/fulani|broader_prefLabel|Peuples +http://www.semanlink.net/tag/eve_africaine|prefLabel|Eve africaine +http://www.semanlink.net/tag/eve_africaine|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/eve_africaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eve_africaine|uri|http://www.semanlink.net/tag/eve_africaine +http://www.semanlink.net/tag/eve_africaine|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/aws|creationTime|2014-07-02T09:10:42Z +http://www.semanlink.net/tag/aws|prefLabel|AWS +http://www.semanlink.net/tag/aws|broader|http://www.semanlink.net/tag/cloud +http://www.semanlink.net/tag/aws|broader|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/tag/aws|creationDate|2014-07-02 +http://www.semanlink.net/tag/aws|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aws|uri|http://www.semanlink.net/tag/aws +http://www.semanlink.net/tag/aws|broader_prefLabel|Cloud +http://www.semanlink.net/tag/aws|broader_prefLabel|Amazon +http://www.semanlink.net/tag/aws|broader_altLabel|Cloud computing +http://www.semanlink.net/tag/jarriel_perlman|creationTime|2015-05-12T01:51:07Z +http://www.semanlink.net/tag/jarriel_perlman|prefLabel|Jarriel Perlman +http://www.semanlink.net/tag/jarriel_perlman|creationDate|2015-05-12 +http://www.semanlink.net/tag/jarriel_perlman|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jarriel_perlman|uri|http://www.semanlink.net/tag/jarriel_perlman +http://www.semanlink.net/tag/museum_d_histoire_naturelle|prefLabel|Museum d'Histoire Naturelle +http://www.semanlink.net/tag/museum_d_histoire_naturelle|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/museum_d_histoire_naturelle|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/museum_d_histoire_naturelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/museum_d_histoire_naturelle|uri|http://www.semanlink.net/tag/museum_d_histoire_naturelle +http://www.semanlink.net/tag/museum_d_histoire_naturelle|broader_prefLabel|Musée +http://www.semanlink.net/tag/museum_d_histoire_naturelle|broader_prefLabel|Biology +http://www.semanlink.net/tag/museum_d_histoire_naturelle|broader_altLabel|Biologie +http://www.semanlink.net/tag/maltraitance_animale|creationTime|2017-10-06T21:53:59Z +http://www.semanlink.net/tag/maltraitance_animale|prefLabel|Maltraitance animale +http://www.semanlink.net/tag/maltraitance_animale|creationDate|2017-10-06 +http://www.semanlink.net/tag/maltraitance_animale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maltraitance_animale|uri|http://www.semanlink.net/tag/maltraitance_animale +http://www.semanlink.net/tag/jquery|creationTime|2011-02-03T23:14:49Z +http://www.semanlink.net/tag/jquery|prefLabel|jQuery +http://www.semanlink.net/tag/jquery|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/jquery|creationDate|2011-02-03 +http://www.semanlink.net/tag/jquery|comment|The Write Less, Do More, JavaScript Library +http://www.semanlink.net/tag/jquery|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jquery|describedBy|http://jquery.com/ +http://www.semanlink.net/tag/jquery|uri|http://www.semanlink.net/tag/jquery +http://www.semanlink.net/tag/jquery|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/jquery|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/pdf_extract|creationTime|2020-04-02T15:26:43Z +http://www.semanlink.net/tag/pdf_extract|prefLabel|pdf extract +http://www.semanlink.net/tag/pdf_extract|related|http://www.semanlink.net/tag/pdf_format +http://www.semanlink.net/tag/pdf_extract|creationDate|2020-04-02 +http://www.semanlink.net/tag/pdf_extract|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pdf_extract|uri|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/tag/antiquite_africaine|prefLabel|Archéologie africaine +http://www.semanlink.net/tag/antiquite_africaine|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/antiquite_africaine|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/antiquite_africaine|related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/antiquite_africaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite_africaine|uri|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/antiquite_africaine|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/antiquite_africaine|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/antiquite_africaine|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/publishing_rdf_vocabularies|prefLabel|Publishing RDF Vocabularies +http://www.semanlink.net/tag/publishing_rdf_vocabularies|broader|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/publishing_rdf_vocabularies|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/publishing_rdf_vocabularies|creationDate|2007-01-02 +http://www.semanlink.net/tag/publishing_rdf_vocabularies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/publishing_rdf_vocabularies|uri|http://www.semanlink.net/tag/publishing_rdf_vocabularies +http://www.semanlink.net/tag/publishing_rdf_vocabularies|broader_prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/publishing_rdf_vocabularies|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/fact_checking|creationTime|2015-10-31T10:17:19Z +http://www.semanlink.net/tag/fact_checking|prefLabel|Fact-checking +http://www.semanlink.net/tag/fact_checking|broader|http://www.semanlink.net/tag/verite +http://www.semanlink.net/tag/fact_checking|broader|http://www.semanlink.net/tag/medias +http://www.semanlink.net/tag/fact_checking|creationDate|2015-10-31 +http://www.semanlink.net/tag/fact_checking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fact_checking|uri|http://www.semanlink.net/tag/fact_checking +http://www.semanlink.net/tag/fact_checking|broader_prefLabel|Vérité +http://www.semanlink.net/tag/fact_checking|broader_prefLabel|Médias +http://www.semanlink.net/tag/responsabilite|creationTime|2020-12-14T16:37:41Z +http://www.semanlink.net/tag/responsabilite|prefLabel|Responsabilité +http://www.semanlink.net/tag/responsabilite|creationDate|2020-12-14 +http://www.semanlink.net/tag/responsabilite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/responsabilite|uri|http://www.semanlink.net/tag/responsabilite +http://www.semanlink.net/tag/antiwork|creationTime|2015-02-03T18:35:34Z +http://www.semanlink.net/tag/antiwork|prefLabel|Antiwork +http://www.semanlink.net/tag/antiwork|broader|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/antiwork|broader|http://www.semanlink.net/tag/travailler_moins +http://www.semanlink.net/tag/antiwork|creationDate|2015-02-03 +http://www.semanlink.net/tag/antiwork|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiwork|uri|http://www.semanlink.net/tag/antiwork +http://www.semanlink.net/tag/antiwork|broader_prefLabel|Travail +http://www.semanlink.net/tag/antiwork|broader_prefLabel|Travailler moins +http://www.semanlink.net/tag/us_vs_europe|creationTime|2010-01-12T14:51:02Z +http://www.semanlink.net/tag/us_vs_europe|prefLabel|US vs Europe +http://www.semanlink.net/tag/us_vs_europe|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/us_vs_europe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/us_vs_europe|creationDate|2010-01-12 +http://www.semanlink.net/tag/us_vs_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/us_vs_europe|uri|http://www.semanlink.net/tag/us_vs_europe +http://www.semanlink.net/tag/us_vs_europe|broader_prefLabel|USA +http://www.semanlink.net/tag/us_vs_europe|broader_prefLabel|Europe +http://www.semanlink.net/tag/us_vs_europe|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/us_vs_europe|broader_altLabel|United States +http://www.semanlink.net/tag/juif|prefLabel|Juifs +http://www.semanlink.net/tag/juif|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/juif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/juif|uri|http://www.semanlink.net/tag/juif +http://www.semanlink.net/tag/juif|broader_prefLabel|Peuples +http://www.semanlink.net/tag/clandestins|prefLabel|Clandestins +http://www.semanlink.net/tag/clandestins|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/clandestins|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clandestins|uri|http://www.semanlink.net/tag/clandestins +http://www.semanlink.net/tag/clandestins|broader_prefLabel|Immigration +http://www.semanlink.net/tag/quizz|creationTime|2009-02-23T22:51:38Z +http://www.semanlink.net/tag/quizz|prefLabel|Quizz +http://www.semanlink.net/tag/quizz|creationDate|2009-02-23 +http://www.semanlink.net/tag/quizz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quizz|uri|http://www.semanlink.net/tag/quizz +http://www.semanlink.net/tag/areva|prefLabel|Areva +http://www.semanlink.net/tag/areva|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/areva|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/areva|altLabel|Cogema +http://www.semanlink.net/tag/areva|uri|http://www.semanlink.net/tag/areva +http://www.semanlink.net/tag/areva|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/areva|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/histoire|prefLabel|Histoire +http://www.semanlink.net/tag/histoire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire|uri|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/linked_data_gui|creationTime|2012-04-16T15:14:33Z +http://www.semanlink.net/tag/linked_data_gui|prefLabel|Linked Data GUI +http://www.semanlink.net/tag/linked_data_gui|creationDate|2012-04-16 +http://www.semanlink.net/tag/linked_data_gui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_gui|uri|http://www.semanlink.net/tag/linked_data_gui +http://www.semanlink.net/tag/commission_europeenne|prefLabel|Commission européenne +http://www.semanlink.net/tag/commission_europeenne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/commission_europeenne|broader|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/commission_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/commission_europeenne|altLabel|European Commission +http://www.semanlink.net/tag/commission_europeenne|uri|http://www.semanlink.net/tag/commission_europeenne +http://www.semanlink.net/tag/commission_europeenne|broader_prefLabel|Europe +http://www.semanlink.net/tag/commission_europeenne|broader_prefLabel|Institutions européennes +http://www.semanlink.net/tag/jobbotization|creationTime|2015-01-02T17:24:31Z +http://www.semanlink.net/tag/jobbotization|prefLabel|AI, robots and jobs +http://www.semanlink.net/tag/jobbotization|broader|http://www.semanlink.net/tag/robotisation +http://www.semanlink.net/tag/jobbotization|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/jobbotization|broader|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/jobbotization|creationDate|2015-01-02 +http://www.semanlink.net/tag/jobbotization|comment|"we will soon be looking at hordes of citizens of zero economic value. Figuring out how to deal with the impacts of this development +will be the greatest challenge facing free market economies in this century." +http://www.semanlink.net/tag/jobbotization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jobbotization|altLabel|Robots and society +http://www.semanlink.net/tag/jobbotization|altLabel|Jobbotization +http://www.semanlink.net/tag/jobbotization|uri|http://www.semanlink.net/tag/jobbotization +http://www.semanlink.net/tag/jobbotization|broader_prefLabel|Robotisation +http://www.semanlink.net/tag/jobbotization|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/jobbotization|broader_prefLabel|Travail +http://www.semanlink.net/tag/jobbotization|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/jobbotization|broader_altLabel|AI +http://www.semanlink.net/tag/jobbotization|broader_altLabel|IA +http://www.semanlink.net/tag/jobbotization|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/unsupervised_machine_learning|creationTime|2013-06-06T14:58:01Z +http://www.semanlink.net/tag/unsupervised_machine_learning|prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/unsupervised_machine_learning|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/unsupervised_machine_learning|creationDate|2013-06-06 +http://www.semanlink.net/tag/unsupervised_machine_learning|comment|In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data +http://www.semanlink.net/tag/unsupervised_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_machine_learning|describedBy|https://en.wikipedia.org/wiki/Unsupervised_learning +http://www.semanlink.net/tag/unsupervised_machine_learning|uri|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_machine_learning|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/unsupervised_machine_learning|broader_altLabel|ML +http://www.semanlink.net/tag/unsupervised_machine_learning|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/unsupervised_machine_learning|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/recurrent_neural_network|creationTime|2015-11-08T18:38:00Z +http://www.semanlink.net/tag/recurrent_neural_network|prefLabel|Recurrent neural network +http://www.semanlink.net/tag/recurrent_neural_network|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/recurrent_neural_network|related|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/recurrent_neural_network|related|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/tag/recurrent_neural_network|creationDate|2015-11-08 +http://www.semanlink.net/tag/recurrent_neural_network|comment|"the natural architecture of NN to deal with sequences. + +NN where **connections between units form a directed cycle**. This creates an **internal state of the network** which allows it to exhibit **dynamic temporal behavior**. Unlike feedforward neural networks, RNNs can use their internal memory to process arbitrary sequences of inputs. This makes them applicable to tasks such as unsegmented connected handwriting recognition or speech recognition. + +2 broad classes: finite impulse and infinite impulse (a finite impulse RNN can be unrolled and replaced with a strictly feedforward neural network) + +Problems with RNNs: + +- they suffer from the **vanishing gradient problem** that prevents +them from learning long-range dependencies. [#LSTMs](/tag/lstm_networks) improve upon this +by using a gating mechanism that allows for explicit memory deletes and +updates. +- inherently sequential computation which prevents parallelization across elements of the input sequence + +RNN in NLP: + +- Goal: reprenting a sequence of words as dense vectors +- input: seq of words (or chars) +- ouput: a seq of hidden states with each a representation of the seq from the beginning to a specific posiition +- advantages: encoding sequential relationships and dependency among words + + +" +http://www.semanlink.net/tag/recurrent_neural_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recurrent_neural_network|describedBy|https://en.wikipedia.org/wiki/Recurrent_neural_network +http://www.semanlink.net/tag/recurrent_neural_network|altLabel|RNN +http://www.semanlink.net/tag/recurrent_neural_network|uri|http://www.semanlink.net/tag/recurrent_neural_network +http://www.semanlink.net/tag/recurrent_neural_network|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/recurrent_neural_network|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/recurrent_neural_network|broader_altLabel|ANN +http://www.semanlink.net/tag/recurrent_neural_network|broader_altLabel|NN +http://www.semanlink.net/tag/sproutcore|creationTime|2008-06-17T23:08:31Z +http://www.semanlink.net/tag/sproutcore|prefLabel|SproutCore +http://www.semanlink.net/tag/sproutcore|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/sproutcore|related|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/sproutcore|related|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/sproutcore|creationDate|2008-06-17 +http://www.semanlink.net/tag/sproutcore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sproutcore|describedBy|http://www.sproutcore.com/ +http://www.semanlink.net/tag/sproutcore|uri|http://www.semanlink.net/tag/sproutcore +http://www.semanlink.net/tag/sproutcore|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/sproutcore|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/internet_of_things|creationTime|2012-12-01T13:44:00Z +http://www.semanlink.net/tag/internet_of_things|prefLabel|Internet of Things +http://www.semanlink.net/tag/internet_of_things|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet_of_things|creationDate|2012-12-01 +http://www.semanlink.net/tag/internet_of_things|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_of_things|uri|http://www.semanlink.net/tag/internet_of_things +http://www.semanlink.net/tag/internet_of_things|broader_prefLabel|Internet +http://www.semanlink.net/tag/excel|creationTime|2007-07-06T22:40:18Z +http://www.semanlink.net/tag/excel|prefLabel|Excel +http://www.semanlink.net/tag/excel|broader|http://www.semanlink.net/tag/spreadsheets +http://www.semanlink.net/tag/excel|creationDate|2007-07-06 +http://www.semanlink.net/tag/excel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/excel|uri|http://www.semanlink.net/tag/excel +http://www.semanlink.net/tag/excel|broader_prefLabel|Spreadsheets +http://www.semanlink.net/tag/excel|broader_altLabel|Spreadsheet +http://www.semanlink.net/tag/microservices|creationTime|2017-05-15T18:23:36Z +http://www.semanlink.net/tag/microservices|prefLabel|Microservices +http://www.semanlink.net/tag/microservices|creationDate|2017-05-15 +http://www.semanlink.net/tag/microservices|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microservices|uri|http://www.semanlink.net/tag/microservices +http://www.semanlink.net/tag/boston_dynamics|creationTime|2020-12-30T13:12:37Z +http://www.semanlink.net/tag/boston_dynamics|prefLabel|Boston Dynamics +http://www.semanlink.net/tag/boston_dynamics|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/boston_dynamics|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/boston_dynamics|creationDate|2020-12-30 +http://www.semanlink.net/tag/boston_dynamics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boston_dynamics|describedBy|https://en.wikipedia.org/wiki/Boston_Dynamics +http://www.semanlink.net/tag/boston_dynamics|uri|http://www.semanlink.net/tag/boston_dynamics +http://www.semanlink.net/tag/boston_dynamics|broader_prefLabel|Robotique +http://www.semanlink.net/tag/boston_dynamics|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/boston_dynamics|broader_altLabel|Robotics +http://www.semanlink.net/tag/boston_dynamics|broader_altLabel|Robot +http://www.semanlink.net/tag/log4j|creationTime|2009-04-24T23:42:55Z +http://www.semanlink.net/tag/log4j|prefLabel|log4j +http://www.semanlink.net/tag/log4j|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/log4j|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/log4j|creationDate|2009-04-24 +http://www.semanlink.net/tag/log4j|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/log4j|uri|http://www.semanlink.net/tag/log4j +http://www.semanlink.net/tag/log4j|broader_prefLabel|apache.org +http://www.semanlink.net/tag/log4j|broader_prefLabel|Java dev +http://www.semanlink.net/tag/closure|creationTime|2010-04-28T14:16:13Z +http://www.semanlink.net/tag/closure|prefLabel|Closure +http://www.semanlink.net/tag/closure|creationDate|2010-04-28 +http://www.semanlink.net/tag/closure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/closure|describedBy|https://en.wikipedia.org/wiki/Closure_(computer_science) +http://www.semanlink.net/tag/closure|uri|http://www.semanlink.net/tag/closure +http://www.semanlink.net/tag/tweet|creationTime|2019-07-06T16:46:46Z +http://www.semanlink.net/tag/tweet|prefLabel|Tweet +http://www.semanlink.net/tag/tweet|creationDate|2019-07-06 +http://www.semanlink.net/tag/tweet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tweet|uri|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/tag/dette_covid|creationTime|2020-10-10T14:37:12Z +http://www.semanlink.net/tag/dette_covid|prefLabel|Dette Covid +http://www.semanlink.net/tag/dette_covid|broader|http://www.semanlink.net/tag/dette +http://www.semanlink.net/tag/dette_covid|broader|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/tag/dette_covid|creationDate|2020-10-10 +http://www.semanlink.net/tag/dette_covid|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dette_covid|uri|http://www.semanlink.net/tag/dette_covid +http://www.semanlink.net/tag/dette_covid|broader_prefLabel|Debt +http://www.semanlink.net/tag/dette_covid|broader_prefLabel|Covid19 +http://www.semanlink.net/tag/dette_covid|broader_altLabel|Dette +http://www.semanlink.net/tag/dette_covid|broader_altLabel|covid-19 +http://www.semanlink.net/tag/dette_covid|broader_altLabel|Covid +http://www.semanlink.net/tag/dette_covid|broader_altLabel|Coronavirus +http://www.semanlink.net/tag/lagos|creationTime|2007-08-08T17:04:11Z +http://www.semanlink.net/tag/lagos|prefLabel|Lagos +http://www.semanlink.net/tag/lagos|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/lagos|broader|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/lagos|creationDate|2007-08-08 +http://www.semanlink.net/tag/lagos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lagos|uri|http://www.semanlink.net/tag/lagos +http://www.semanlink.net/tag/lagos|broader_prefLabel|Ville +http://www.semanlink.net/tag/lagos|broader_prefLabel|Nigeria +http://www.semanlink.net/tag/sacha_guitry|prefLabel|Sacha Guitry +http://www.semanlink.net/tag/sacha_guitry|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sacha_guitry|uri|http://www.semanlink.net/tag/sacha_guitry +http://www.semanlink.net/tag/medecine|prefLabel|Médecine +http://www.semanlink.net/tag/medecine|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/medecine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medecine|uri|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/medecine|broader_prefLabel|Santé +http://www.semanlink.net/tag/rebellion_touaregue|creationTime|2007-12-05T23:20:18Z +http://www.semanlink.net/tag/rebellion_touaregue|prefLabel|Rébellion touarègue +http://www.semanlink.net/tag/rebellion_touaregue|broader|http://www.semanlink.net/tag/touareg +http://www.semanlink.net/tag/rebellion_touaregue|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/rebellion_touaregue|creationDate|2007-12-05 +http://www.semanlink.net/tag/rebellion_touaregue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rebellion_touaregue|uri|http://www.semanlink.net/tag/rebellion_touaregue +http://www.semanlink.net/tag/rebellion_touaregue|broader_prefLabel|Touareg +http://www.semanlink.net/tag/rebellion_touaregue|broader_prefLabel|Niger +http://www.semanlink.net/tag/rebellion_touaregue|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/rebellion_touaregue|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/rebellion_touaregue|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/voip|prefLabel|VoIP +http://www.semanlink.net/tag/voip|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/voip|broader|http://www.semanlink.net/tag/telephone +http://www.semanlink.net/tag/voip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voip|uri|http://www.semanlink.net/tag/voip +http://www.semanlink.net/tag/voip|broader_prefLabel|Internet +http://www.semanlink.net/tag/voip|broader_prefLabel|Téléphone +http://www.semanlink.net/tag/voip|broader_altLabel|Téléphonie +http://www.semanlink.net/tag/neural_network_interpretability|creationTime|2018-03-07T14:34:40Z +http://www.semanlink.net/tag/neural_network_interpretability|prefLabel|Neural network interpretability +http://www.semanlink.net/tag/neural_network_interpretability|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/neural_network_interpretability|creationDate|2018-03-07 +http://www.semanlink.net/tag/neural_network_interpretability|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_network_interpretability|uri|http://www.semanlink.net/tag/neural_network_interpretability +http://www.semanlink.net/tag/neural_network_interpretability|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/neural_network_interpretability|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/neural_network_interpretability|broader_altLabel|ANN +http://www.semanlink.net/tag/neural_network_interpretability|broader_altLabel|NN +http://www.semanlink.net/tag/soja|prefLabel|Soja +http://www.semanlink.net/tag/soja|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/soja|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soja|uri|http://www.semanlink.net/tag/soja +http://www.semanlink.net/tag/soja|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/thomas_more|creationTime|2017-01-29T18:48:45Z +http://www.semanlink.net/tag/thomas_more|prefLabel|Thomas More +http://www.semanlink.net/tag/thomas_more|creationDate|2017-01-29 +http://www.semanlink.net/tag/thomas_more|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thomas_more|describedBy|https://en.wikipedia.org/wiki/Thomas_More +http://www.semanlink.net/tag/thomas_more|uri|http://www.semanlink.net/tag/thomas_more +http://www.semanlink.net/tag/geometrie|prefLabel|Géométrie +http://www.semanlink.net/tag/geometrie|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/geometrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geometrie|uri|http://www.semanlink.net/tag/geometrie +http://www.semanlink.net/tag/geometrie|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/geometrie|broader_altLabel|Math +http://www.semanlink.net/tag/flask|creationTime|2018-03-26T08:37:54Z +http://www.semanlink.net/tag/flask|prefLabel|Flask +http://www.semanlink.net/tag/flask|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/flask|broader|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/tag/flask|creationDate|2018-03-26 +http://www.semanlink.net/tag/flask|comment|"- [how to serve static files](https://stackoverflow.com/questions/20646822/how-to-serve-static-files-in-flask) +- [config](http://flask.pocoo.org/docs/config/)" +http://www.semanlink.net/tag/flask|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flask|homepage|http://flask.pocoo.org/ +http://www.semanlink.net/tag/flask|uri|http://www.semanlink.net/tag/flask +http://www.semanlink.net/tag/flask|broader_prefLabel|Web dev +http://www.semanlink.net/tag/flask|broader_prefLabel|Python tools +http://www.semanlink.net/tag/flask|broader_altLabel|Web app dev +http://www.semanlink.net/tag/responsabilite_de_la_france|creationTime|2009-01-19T00:36:07Z +http://www.semanlink.net/tag/responsabilite_de_la_france|prefLabel|Responsabilité de la France +http://www.semanlink.net/tag/responsabilite_de_la_france|creationDate|2009-01-19 +http://www.semanlink.net/tag/responsabilite_de_la_france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/responsabilite_de_la_france|uri|http://www.semanlink.net/tag/responsabilite_de_la_france +http://www.semanlink.net/tag/apprendre_une_langue|creationTime|2016-05-07T00:15:59Z +http://www.semanlink.net/tag/apprendre_une_langue|prefLabel|Apprendre une langue +http://www.semanlink.net/tag/apprendre_une_langue|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/apprendre_une_langue|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/apprendre_une_langue|creationDate|2016-05-07 +http://www.semanlink.net/tag/apprendre_une_langue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apprendre_une_langue|uri|http://www.semanlink.net/tag/apprendre_une_langue +http://www.semanlink.net/tag/apprendre_une_langue|broader_prefLabel|Langues +http://www.semanlink.net/tag/apprendre_une_langue|broader_prefLabel|Education +http://www.semanlink.net/tag/apprendre_une_langue|broader_altLabel|Enseignement +http://www.semanlink.net/tag/information_sur_internet|creationTime|2016-09-19T11:29:30Z +http://www.semanlink.net/tag/information_sur_internet|prefLabel|Information sur internet +http://www.semanlink.net/tag/information_sur_internet|broader|http://www.semanlink.net/tag/journalisme +http://www.semanlink.net/tag/information_sur_internet|related|http://www.semanlink.net/tag/post_verite +http://www.semanlink.net/tag/information_sur_internet|creationDate|2016-09-19 +http://www.semanlink.net/tag/information_sur_internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_sur_internet|uri|http://www.semanlink.net/tag/information_sur_internet +http://www.semanlink.net/tag/information_sur_internet|broader_prefLabel|Journalisme +http://www.semanlink.net/tag/chine_vs_occident|creationTime|2021-07-10T13:39:10Z +http://www.semanlink.net/tag/chine_vs_occident|prefLabel|Chine vs Occident +http://www.semanlink.net/tag/chine_vs_occident|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine_vs_occident|creationDate|2021-07-10 +http://www.semanlink.net/tag/chine_vs_occident|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_vs_occident|uri|http://www.semanlink.net/tag/chine_vs_occident +http://www.semanlink.net/tag/chine_vs_occident|broader_prefLabel|Chine +http://www.semanlink.net/tag/chine_vs_occident|broader_altLabel|China +http://www.semanlink.net/tag/daphne_koller|creationTime|2012-08-09T23:17:37Z +http://www.semanlink.net/tag/daphne_koller|prefLabel|Daphne Koller +http://www.semanlink.net/tag/daphne_koller|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/daphne_koller|related|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/daphne_koller|creationDate|2012-08-09 +http://www.semanlink.net/tag/daphne_koller|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/daphne_koller|uri|http://www.semanlink.net/tag/daphne_koller +http://www.semanlink.net/tag/daphne_koller|broader_prefLabel|Education +http://www.semanlink.net/tag/daphne_koller|broader_altLabel|Enseignement +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|prefLabel|Le gouvernement Chirac est trop con +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|broader|http://www.semanlink.net/tag/con_de_chirac +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|broader|http://www.semanlink.net/tag/gouvernement_chirac +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|uri|http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|broader_prefLabel|Con de Chirac +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|broader_prefLabel|Gouvernement Chirac +http://www.semanlink.net/tag/le_gouvernement_chirac_est_trop_con|broader_altLabel|Chirac est nul +http://www.semanlink.net/tag/heredite|creationTime|2011-03-27T12:25:33Z +http://www.semanlink.net/tag/heredite|prefLabel|Hérédité +http://www.semanlink.net/tag/heredite|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/heredite|creationDate|2011-03-27 +http://www.semanlink.net/tag/heredite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/heredite|uri|http://www.semanlink.net/tag/heredite +http://www.semanlink.net/tag/heredite|broader_prefLabel|Genetics +http://www.semanlink.net/tag/heredite|broader_prefLabel|Génétique +http://www.semanlink.net/tag/catastrophic_forgetting|creationTime|2019-06-08T14:06:33Z +http://www.semanlink.net/tag/catastrophic_forgetting|prefLabel|Catastrophic forgetting +http://www.semanlink.net/tag/catastrophic_forgetting|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/catastrophic_forgetting|creationDate|2019-06-08 +http://www.semanlink.net/tag/catastrophic_forgetting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophic_forgetting|uri|http://www.semanlink.net/tag/catastrophic_forgetting +http://www.semanlink.net/tag/catastrophic_forgetting|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/catastrophic_forgetting|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/catastrophic_forgetting|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/ruben_verborgh|creationTime|2015-01-30T15:34:14Z +http://www.semanlink.net/tag/ruben_verborgh|prefLabel|Ruben Verborgh +http://www.semanlink.net/tag/ruben_verborgh|creationDate|2015-01-30 +http://www.semanlink.net/tag/ruben_verborgh|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ruben_verborgh|homepage|http://ruben.verborgh.org/ +http://www.semanlink.net/tag/ruben_verborgh|weblog|http://ruben.verborgh.org/blog/ +http://www.semanlink.net/tag/ruben_verborgh|uri|http://www.semanlink.net/tag/ruben_verborgh +http://www.semanlink.net/tag/c2gweb_on_the_web|creationTime|2014-04-09T10:16:00Z +http://www.semanlink.net/tag/c2gweb_on_the_web|prefLabel|C2GWeb on the web +http://www.semanlink.net/tag/c2gweb_on_the_web|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_on_the_web|creationDate|2014-04-09 +http://www.semanlink.net/tag/c2gweb_on_the_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_on_the_web|homepage|http://doc.rplug.renault.com/car-configurator/overview.html +http://www.semanlink.net/tag/c2gweb_on_the_web|uri|http://www.semanlink.net/tag/c2gweb_on_the_web +http://www.semanlink.net/tag/c2gweb_on_the_web|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/guerres_de_religion|creationTime|2007-08-06T17:33:07Z +http://www.semanlink.net/tag/guerres_de_religion|prefLabel|Guerres de religion +http://www.semanlink.net/tag/guerres_de_religion|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/guerres_de_religion|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/guerres_de_religion|creationDate|2007-08-06 +http://www.semanlink.net/tag/guerres_de_religion|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guerres_de_religion|uri|http://www.semanlink.net/tag/guerres_de_religion +http://www.semanlink.net/tag/guerres_de_religion|broader_prefLabel|Religion +http://www.semanlink.net/tag/guerres_de_religion|broader_prefLabel|War +http://www.semanlink.net/tag/guerres_de_religion|broader_altLabel|Guerre +http://www.semanlink.net/tag/wikidata_browser|creationTime|2021-03-08T08:22:13Z +http://www.semanlink.net/tag/wikidata_browser|prefLabel|Wikidata browser +http://www.semanlink.net/tag/wikidata_browser|broader|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/wikidata_browser|creationDate|2021-03-08 +http://www.semanlink.net/tag/wikidata_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikidata_browser|uri|http://www.semanlink.net/tag/wikidata_browser +http://www.semanlink.net/tag/wikidata_browser|broader_prefLabel|Wikidata +http://www.semanlink.net/tag/wikidata_browser|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/neonicotinoides|creationTime|2020-09-13T09:49:31Z +http://www.semanlink.net/tag/neonicotinoides|prefLabel|Néonicotinoïdes +http://www.semanlink.net/tag/neonicotinoides|broader|http://www.semanlink.net/tag/pesticide +http://www.semanlink.net/tag/neonicotinoides|related|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.semanlink.net/tag/neonicotinoides|creationDate|2020-09-13 +http://www.semanlink.net/tag/neonicotinoides|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neonicotinoides|uri|http://www.semanlink.net/tag/neonicotinoides +http://www.semanlink.net/tag/neonicotinoides|broader_prefLabel|Pesticides +http://www.semanlink.net/tag/epimorphics_json_rdf|creationTime|2012-03-20T17:48:30Z +http://www.semanlink.net/tag/epimorphics_json_rdf|prefLabel|Epimorphics json-rdf +http://www.semanlink.net/tag/epimorphics_json_rdf|broader|http://www.semanlink.net/tag/epimorphics +http://www.semanlink.net/tag/epimorphics_json_rdf|broader|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/epimorphics_json_rdf|creationDate|2012-03-20 +http://www.semanlink.net/tag/epimorphics_json_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/epimorphics_json_rdf|uri|http://www.semanlink.net/tag/epimorphics_json_rdf +http://www.semanlink.net/tag/epimorphics_json_rdf|broader_prefLabel|Epimorphics +http://www.semanlink.net/tag/epimorphics_json_rdf|broader_prefLabel|RDF-in-JSON +http://www.semanlink.net/tag/epimorphics_json_rdf|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/epimorphics_json_rdf|broader_related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/apache_stanbol|creationTime|2012-05-02T15:56:45Z +http://www.semanlink.net/tag/apache_stanbol|prefLabel|Stanbol +http://www.semanlink.net/tag/apache_stanbol|broader|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/apache_stanbol|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_stanbol|related|http://www.semanlink.net/tag/rupert_westenthaler +http://www.semanlink.net/tag/apache_stanbol|creationDate|2012-05-02 +http://www.semanlink.net/tag/apache_stanbol|comment|components for semantic content management +http://www.semanlink.net/tag/apache_stanbol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_stanbol|describedBy|http://incubator.apache.org/stanbol/ +http://www.semanlink.net/tag/apache_stanbol|uri|http://www.semanlink.net/tag/apache_stanbol +http://www.semanlink.net/tag/apache_stanbol|broader_prefLabel|Interactive Knowledge Stack +http://www.semanlink.net/tag/apache_stanbol|broader_prefLabel|apache.org +http://www.semanlink.net/tag/apache_stanbol|broader_altLabel|IKS +http://www.semanlink.net/tag/langue_electronique|creationTime|2014-11-25T15:35:41Z +http://www.semanlink.net/tag/langue_electronique|prefLabel|Langue électronique +http://www.semanlink.net/tag/langue_electronique|creationDate|2014-11-25 +http://www.semanlink.net/tag/langue_electronique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langue_electronique|uri|http://www.semanlink.net/tag/langue_electronique +http://www.semanlink.net/tag/owl_introduction|creationTime|2008-04-07T17:01:39Z +http://www.semanlink.net/tag/owl_introduction|prefLabel|OWL: Introduction +http://www.semanlink.net/tag/owl_introduction|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_introduction|broader|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/tag/owl_introduction|creationDate|2008-04-07 +http://www.semanlink.net/tag/owl_introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_introduction|uri|http://www.semanlink.net/tag/owl_introduction +http://www.semanlink.net/tag/owl_introduction|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_introduction|broader_prefLabel|Introduction +http://www.semanlink.net/tag/owl_introduction|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/medaille_fields|prefLabel|Médaille Fields +http://www.semanlink.net/tag/medaille_fields|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/medaille_fields|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medaille_fields|uri|http://www.semanlink.net/tag/medaille_fields +http://www.semanlink.net/tag/medaille_fields|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/medaille_fields|broader_altLabel|Math +http://www.semanlink.net/tag/trust|creationTime|2014-07-26T00:10:57Z +http://www.semanlink.net/tag/trust|prefLabel|Trust +http://www.semanlink.net/tag/trust|creationDate|2014-07-26 +http://www.semanlink.net/tag/trust|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trust|uri|http://www.semanlink.net/tag/trust +http://www.semanlink.net/tag/philae|creationTime|2014-11-11T14:51:03Z +http://www.semanlink.net/tag/philae|prefLabel|Philae +http://www.semanlink.net/tag/philae|broader|http://www.semanlink.net/tag/rosetta +http://www.semanlink.net/tag/philae|creationDate|2014-11-11 +http://www.semanlink.net/tag/philae|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/philae|describedBy|https://en.wikipedia.org/wiki/Philae_(spacecraft) +http://www.semanlink.net/tag/philae|uri|http://www.semanlink.net/tag/philae +http://www.semanlink.net/tag/philae|broader_prefLabel|Rosetta +http://www.semanlink.net/tag/textrank|creationTime|2017-06-14T00:24:38Z +http://www.semanlink.net/tag/textrank|prefLabel|TextRank +http://www.semanlink.net/tag/textrank|broader|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/textrank|broader|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/textrank|creationDate|2017-06-14 +http://www.semanlink.net/tag/textrank|comment|"Unsupervised keyword/keyphrase extraction algorithm. Creates a graph of the words and relationships between them from a document (using a sliding window), then identifies the most important vertices of the graph (words) based on importance scores calculated recursively from the entire graph. + + + +" +http://www.semanlink.net/tag/textrank|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/textrank|uri|http://www.semanlink.net/tag/textrank +http://www.semanlink.net/tag/textrank|broader_prefLabel|Learning to rank +http://www.semanlink.net/tag/textrank|broader_prefLabel|Keyword/keyphrase extraction +http://www.semanlink.net/tag/textrank|broader_altLabel|Machine learned ranking +http://www.semanlink.net/tag/textrank|broader_altLabel|Topic extraction +http://www.semanlink.net/tag/textrank|broader_altLabel|Keyword extraction +http://www.semanlink.net/tag/textrank|broader_altLabel|Keyphrase extraction +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/pagerank +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/textrank|broader_related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/koskas|prefLabel|Koskas +http://www.semanlink.net/tag/koskas|broader|http://www.semanlink.net/tag/kode +http://www.semanlink.net/tag/koskas|creationDate|2006-10-18 +http://www.semanlink.net/tag/koskas|comment|Le mathématicien de Normale Sup présenté par Philippe Reynaud. +http://www.semanlink.net/tag/koskas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/koskas|uri|http://www.semanlink.net/tag/koskas +http://www.semanlink.net/tag/koskas|broader_prefLabel|KODE +http://www.semanlink.net/tag/bibliotheque_numerique|prefLabel|Bibliothèque numérique +http://www.semanlink.net/tag/bibliotheque_numerique|broader|http://www.semanlink.net/tag/bibliotheque +http://www.semanlink.net/tag/bibliotheque_numerique|broader|http://www.semanlink.net/tag/digital_media +http://www.semanlink.net/tag/bibliotheque_numerique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bibliotheque_numerique|uri|http://www.semanlink.net/tag/bibliotheque_numerique +http://www.semanlink.net/tag/bibliotheque_numerique|broader_prefLabel|Bibliothèque +http://www.semanlink.net/tag/bibliotheque_numerique|broader_prefLabel|Digital Media +http://www.semanlink.net/tag/romeo_dallaire|prefLabel|Roméo Dallaire +http://www.semanlink.net/tag/romeo_dallaire|broader|http://www.semanlink.net/tag/onu +http://www.semanlink.net/tag/romeo_dallaire|broader|http://www.semanlink.net/tag/genocide_rwandais +http://www.semanlink.net/tag/romeo_dallaire|creationDate|2006-12-30 +http://www.semanlink.net/tag/romeo_dallaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/romeo_dallaire|uri|http://www.semanlink.net/tag/romeo_dallaire +http://www.semanlink.net/tag/romeo_dallaire|broader_prefLabel|ONU +http://www.semanlink.net/tag/romeo_dallaire|broader_prefLabel|Génocide rwandais +http://www.semanlink.net/tag/romeo_dallaire|broader_altLabel|Nations unies +http://www.semanlink.net/tag/gmail|creationTime|2013-05-17T13:10:14Z +http://www.semanlink.net/tag/gmail|prefLabel|Gmail +http://www.semanlink.net/tag/gmail|creationDate|2013-05-17 +http://www.semanlink.net/tag/gmail|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gmail|uri|http://www.semanlink.net/tag/gmail +http://www.semanlink.net/tag/aws_machine_learning|creationTime|2018-11-10T11:53:26Z +http://www.semanlink.net/tag/aws_machine_learning|prefLabel|AWS Machine Learning +http://www.semanlink.net/tag/aws_machine_learning|broader|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/aws_machine_learning|broader|http://www.semanlink.net/tag/aws +http://www.semanlink.net/tag/aws_machine_learning|creationDate|2018-11-10 +http://www.semanlink.net/tag/aws_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aws_machine_learning|uri|http://www.semanlink.net/tag/aws_machine_learning +http://www.semanlink.net/tag/aws_machine_learning|broader_prefLabel|Machine Learning tool +http://www.semanlink.net/tag/aws_machine_learning|broader_prefLabel|AWS +http://www.semanlink.net/tag/ai_chip|creationTime|2018-02-22T00:43:40Z +http://www.semanlink.net/tag/ai_chip|prefLabel|AI Chip +http://www.semanlink.net/tag/ai_chip|broader|http://www.semanlink.net/tag/chip +http://www.semanlink.net/tag/ai_chip|creationDate|2018-02-22 +http://www.semanlink.net/tag/ai_chip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_chip|uri|http://www.semanlink.net/tag/ai_chip +http://www.semanlink.net/tag/ai_chip|broader_prefLabel|Chip +http://www.semanlink.net/tag/genetique_humaine|creationTime|2009-05-06T14:34:19Z +http://www.semanlink.net/tag/genetique_humaine|prefLabel|Génétique humaine +http://www.semanlink.net/tag/genetique_humaine|broader|http://www.semanlink.net/tag/genome +http://www.semanlink.net/tag/genetique_humaine|creationDate|2009-05-06 +http://www.semanlink.net/tag/genetique_humaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetique_humaine|altLabel|Génome humain +http://www.semanlink.net/tag/genetique_humaine|uri|http://www.semanlink.net/tag/genetique_humaine +http://www.semanlink.net/tag/genetique_humaine|broader_prefLabel|Génome +http://www.semanlink.net/tag/sentence_embeddings|creationTime|2018-05-10T17:13:58Z +http://www.semanlink.net/tag/sentence_embeddings|prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/sentence_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/sentence_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/sentence_embeddings|broader|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/tag/sentence_embeddings|creationDate|2018-05-10 +http://www.semanlink.net/tag/sentence_embeddings|comment|"Sentence embedding techniques encode sentences +into a fixed-sized, dense vector space such that semantically +similar sentences are close. + +In practice, many NLP applications rely on a simple sentence embedding: the average of the +embeddings of the words in it. We can do better. + +Ex of use (besides trivial ones such as classification and similarity): use sentence embeddings to cluster +sentences in documents, which aids in the automatic extraction +of key information from large bodies of text. +" +http://www.semanlink.net/tag/sentence_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sentence_embeddings|uri|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/sentence_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/sentence_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/sentence_embeddings|broader_prefLabel|Document embeddings +http://www.semanlink.net/tag/sentence_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/sentence_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/sentence_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/sentence_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/digital_collections|creationTime|2011-09-12T21:39:34Z +http://www.semanlink.net/tag/digital_collections|prefLabel|Digital Collections +http://www.semanlink.net/tag/digital_collections|related|http://www.semanlink.net/tag/cms +http://www.semanlink.net/tag/digital_collections|creationDate|2011-09-12 +http://www.semanlink.net/tag/digital_collections|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_collections|uri|http://www.semanlink.net/tag/digital_collections +http://www.semanlink.net/tag/images_stereoscopiques|creationTime|2019-03-31T16:01:52Z +http://www.semanlink.net/tag/images_stereoscopiques|prefLabel|Images stéréoscopiques +http://www.semanlink.net/tag/images_stereoscopiques|creationDate|2019-03-31 +http://www.semanlink.net/tag/images_stereoscopiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/images_stereoscopiques|uri|http://www.semanlink.net/tag/images_stereoscopiques +http://www.semanlink.net/tag/imovie|creationTime|2007-09-15T19:06:22Z +http://www.semanlink.net/tag/imovie|prefLabel|iMovie +http://www.semanlink.net/tag/imovie|broader|http://www.semanlink.net/tag/digital_video +http://www.semanlink.net/tag/imovie|broader|http://www.semanlink.net/tag/mac_software +http://www.semanlink.net/tag/imovie|creationDate|2007-09-15 +http://www.semanlink.net/tag/imovie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imovie|uri|http://www.semanlink.net/tag/imovie +http://www.semanlink.net/tag/imovie|broader_prefLabel|Digital Video +http://www.semanlink.net/tag/imovie|broader_prefLabel|Mac software +http://www.semanlink.net/tag/nkos|creationTime|2008-05-06T21:32:11Z +http://www.semanlink.net/tag/nkos|prefLabel|NKOS +http://www.semanlink.net/tag/nkos|related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/nkos|creationDate|2008-05-06 +http://www.semanlink.net/tag/nkos|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nkos|uri|http://www.semanlink.net/tag/nkos +http://www.semanlink.net/tag/fake_news|creationTime|2017-01-08T13:46:12Z +http://www.semanlink.net/tag/fake_news|prefLabel|Fake news +http://www.semanlink.net/tag/fake_news|creationDate|2017-01-08 +http://www.semanlink.net/tag/fake_news|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fake_news|uri|http://www.semanlink.net/tag/fake_news +http://www.semanlink.net/tag/agro_industrie|creationTime|2021-04-11T16:18:27Z +http://www.semanlink.net/tag/agro_industrie|prefLabel|Agro-industrie +http://www.semanlink.net/tag/agro_industrie|creationDate|2021-04-11 +http://www.semanlink.net/tag/agro_industrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agro_industrie|uri|http://www.semanlink.net/tag/agro_industrie +http://www.semanlink.net/tag/parrot|creationTime|2013-08-28T00:49:40Z +http://www.semanlink.net/tag/parrot|prefLabel|Parrot +http://www.semanlink.net/tag/parrot|broader|http://www.semanlink.net/tag/owl_tool +http://www.semanlink.net/tag/parrot|broader|http://www.semanlink.net/tag/rif +http://www.semanlink.net/tag/parrot|broader|http://www.semanlink.net/tag/rdf_owl_documentation_tool +http://www.semanlink.net/tag/parrot|creationDate|2013-08-28 +http://www.semanlink.net/tag/parrot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parrot|homepage|http://ontorule-project.eu/parrot/parrot +http://www.semanlink.net/tag/parrot|uri|http://www.semanlink.net/tag/parrot +http://www.semanlink.net/tag/parrot|broader_prefLabel|OWL tool +http://www.semanlink.net/tag/parrot|broader_prefLabel|RIF +http://www.semanlink.net/tag/parrot|broader_prefLabel|RDF-OWL documentation tool +http://www.semanlink.net/tag/using_word_embedding|creationTime|2017-07-21T01:38:08Z +http://www.semanlink.net/tag/using_word_embedding|prefLabel|Using word embeddings +http://www.semanlink.net/tag/using_word_embedding|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/using_word_embedding|creationDate|2017-07-21 +http://www.semanlink.net/tag/using_word_embedding|comment|"Aggregating word embeddings through a mean, max, +min. . . function is still one of the most easy and widely used techniques to derive sentence embeddings, often in combination with an MLP or convolutional network (Weston et al. (2014); dos Santos and Gatti (2014); Yin and Schu ̈tze (2015); Collobert et al. (2011)). On one hand, the word order is lost, which can be important in e.g. paraphrase identification. On the other hand, the methods are simple, out-of-the-box and do not require a fixed length input." +http://www.semanlink.net/tag/using_word_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/using_word_embedding|uri|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/tag/using_word_embedding|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/using_word_embedding|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/using_word_embedding|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/using_word_embedding|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/using_word_embedding|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/using_word_embedding|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/twine|creationTime|2007-11-08T16:25:37Z +http://www.semanlink.net/tag/twine|prefLabel|Twine +http://www.semanlink.net/tag/twine|broader|http://www.semanlink.net/tag/nova_spivak +http://www.semanlink.net/tag/twine|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/twine|broader|http://www.semanlink.net/tag/social_software +http://www.semanlink.net/tag/twine|creationDate|2007-11-08 +http://www.semanlink.net/tag/twine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/twine|homepage|http://www.twine.com/ +http://www.semanlink.net/tag/twine|uri|http://www.semanlink.net/tag/twine +http://www.semanlink.net/tag/twine|broader_prefLabel|Nova Spivak +http://www.semanlink.net/tag/twine|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/twine|broader_prefLabel|Social software +http://www.semanlink.net/tag/sommet_de_copenhague|creationTime|2009-11-21T17:36:00Z +http://www.semanlink.net/tag/sommet_de_copenhague|prefLabel|Sommet de Copenhague +http://www.semanlink.net/tag/sommet_de_copenhague|broader|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/tag/sommet_de_copenhague|broader|http://www.semanlink.net/tag/negociations_climat +http://www.semanlink.net/tag/sommet_de_copenhague|creationDate|2009-11-21 +http://www.semanlink.net/tag/sommet_de_copenhague|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sommet_de_copenhague|uri|http://www.semanlink.net/tag/sommet_de_copenhague +http://www.semanlink.net/tag/sommet_de_copenhague|broader_prefLabel|Climate crisis +http://www.semanlink.net/tag/sommet_de_copenhague|broader_prefLabel|Négociations climat +http://www.semanlink.net/tag/sommet_de_copenhague|broader_altLabel|Réchauffement climatique +http://www.semanlink.net/tag/sommet_de_copenhague|broader_altLabel|Global warming +http://www.semanlink.net/tag/sommet_de_copenhague|broader_related|http://www.semanlink.net/tag/anthropocene +http://www.semanlink.net/tag/banco|creationTime|2013-03-05T00:18:42Z +http://www.semanlink.net/tag/banco|prefLabel|Banco +http://www.semanlink.net/tag/banco|broader|http://www.semanlink.net/tag/architecture_en_terre +http://www.semanlink.net/tag/banco|creationDate|2013-03-05 +http://www.semanlink.net/tag/banco|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banco|uri|http://www.semanlink.net/tag/banco +http://www.semanlink.net/tag/banco|broader_prefLabel|Architecture en terre +http://www.semanlink.net/tag/banco|broader_related|http://www.semanlink.net/tag/banco +http://www.semanlink.net/tag/booking_com|creationTime|2020-06-16T08:22:16Z +http://www.semanlink.net/tag/booking_com|prefLabel|booking.com +http://www.semanlink.net/tag/booking_com|broader|http://www.semanlink.net/tag/uberisation +http://www.semanlink.net/tag/booking_com|broader|http://www.semanlink.net/tag/hotel +http://www.semanlink.net/tag/booking_com|creationDate|2020-06-16 +http://www.semanlink.net/tag/booking_com|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/booking_com|uri|http://www.semanlink.net/tag/booking_com +http://www.semanlink.net/tag/booking_com|broader_prefLabel|Uberisation +http://www.semanlink.net/tag/booking_com|broader_prefLabel|Hôtel +http://www.semanlink.net/tag/rwanda|prefLabel|Rwanda +http://www.semanlink.net/tag/rwanda|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/rwanda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rwanda|uri|http://www.semanlink.net/tag/rwanda +http://www.semanlink.net/tag/rwanda|broader_prefLabel|Afrique +http://www.semanlink.net/tag/rwanda|broader_altLabel|Africa +http://www.semanlink.net/tag/fun|creationTime|2008-04-01T15:31:48Z +http://www.semanlink.net/tag/fun|prefLabel|Fun +http://www.semanlink.net/tag/fun|creationDate|2008-04-01 +http://www.semanlink.net/tag/fun|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fun|uri|http://www.semanlink.net/tag/fun +http://www.semanlink.net/tag/bayesian_classification|creationTime|2014-05-18T11:34:39Z +http://www.semanlink.net/tag/bayesian_classification|prefLabel|Bayesian classification +http://www.semanlink.net/tag/bayesian_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/bayesian_classification|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/bayesian_classification|related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/bayesian_classification|creationDate|2014-05-18 +http://www.semanlink.net/tag/bayesian_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bayesian_classification|uri|http://www.semanlink.net/tag/bayesian_classification +http://www.semanlink.net/tag/bayesian_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/bayesian_classification|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/himalaya|prefLabel|Himalaya +http://www.semanlink.net/tag/himalaya|broader|http://www.semanlink.net/tag/montagne +http://www.semanlink.net/tag/himalaya|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/himalaya|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/himalaya|uri|http://www.semanlink.net/tag/himalaya +http://www.semanlink.net/tag/himalaya|broader_prefLabel|Montagne +http://www.semanlink.net/tag/himalaya|broader_prefLabel|Asie +http://www.semanlink.net/tag/uri_identity|creationTime|2007-11-20T21:37:34Z +http://www.semanlink.net/tag/uri_identity|prefLabel|URI Identity +http://www.semanlink.net/tag/uri_identity|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_identity|related|http://www.semanlink.net/tag/uri_synonymity +http://www.semanlink.net/tag/uri_identity|creationDate|2007-11-20 +http://www.semanlink.net/tag/uri_identity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_identity|uri|http://www.semanlink.net/tag/uri_identity +http://www.semanlink.net/tag/uri_identity|broader_prefLabel|URI +http://www.semanlink.net/tag/rdf_data_visualization|creationTime|2010-08-27T17:45:13Z +http://www.semanlink.net/tag/rdf_data_visualization|prefLabel|RDF data visualization +http://www.semanlink.net/tag/rdf_data_visualization|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_data_visualization|broader|http://www.semanlink.net/tag/data_visualisation +http://www.semanlink.net/tag/rdf_data_visualization|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdf_data_visualization|broader|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/rdf_data_visualization|creationDate|2010-08-27 +http://www.semanlink.net/tag/rdf_data_visualization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_data_visualization|uri|http://www.semanlink.net/tag/rdf_data_visualization +http://www.semanlink.net/tag/rdf_data_visualization|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_data_visualization|broader_prefLabel|Data visualisation +http://www.semanlink.net/tag/rdf_data_visualization|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/rdf_data_visualization|broader_prefLabel|Visualization Tools +http://www.semanlink.net/tag/rdf_data_visualization|broader_altLabel|Data Visualization Tools +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/rdf_data_visualization|broader_related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/president_des_usa|prefLabel|Président des USA +http://www.semanlink.net/tag/president_des_usa|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/president_des_usa|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/president_des_usa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/president_des_usa|uri|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/president_des_usa|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/president_des_usa|broader_prefLabel|USA +http://www.semanlink.net/tag/president_des_usa|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/president_des_usa|broader_altLabel|United States +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|creationTime|2021-06-06T09:51:56Z +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|creationDate|2021-06-06 +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|uri|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/insolite|prefLabel|Insolite +http://www.semanlink.net/tag/insolite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/insolite|uri|http://www.semanlink.net/tag/insolite +http://www.semanlink.net/tag/personal_ontology|prefLabel|Personal ontology +http://www.semanlink.net/tag/personal_ontology|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/personal_ontology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_ontology|uri|http://www.semanlink.net/tag/personal_ontology +http://www.semanlink.net/tag/personal_ontology|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/personal_ontology|broader_altLabel|Ontology +http://www.semanlink.net/tag/rdf_and_social_networks|creationTime|2008-04-07T16:55:19Z +http://www.semanlink.net/tag/rdf_and_social_networks|prefLabel|RDF and social networks +http://www.semanlink.net/tag/rdf_and_social_networks|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_and_social_networks|broader|http://www.semanlink.net/tag/rdf_application +http://www.semanlink.net/tag/rdf_and_social_networks|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/rdf_and_social_networks|related|http://www.semanlink.net/tag/foaf +http://www.semanlink.net/tag/rdf_and_social_networks|related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/rdf_and_social_networks|creationDate|2008-04-07 +http://www.semanlink.net/tag/rdf_and_social_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_and_social_networks|uri|http://www.semanlink.net/tag/rdf_and_social_networks +http://www.semanlink.net/tag/rdf_and_social_networks|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_and_social_networks|broader_prefLabel|RDF Application +http://www.semanlink.net/tag/rdf_and_social_networks|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/rdf_and_social_networks|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_and_social_networks|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_and_social_networks|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_and_social_networks|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_and_social_networks|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/bing|creationTime|2011-06-07T14:02:20Z +http://www.semanlink.net/tag/bing|prefLabel|Bing +http://www.semanlink.net/tag/bing|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/bing|creationDate|2011-06-07 +http://www.semanlink.net/tag/bing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bing|uri|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/bing|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/bing|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/bert_and_sentence_embeddings|creationTime|2020-07-12T15:21:35Z +http://www.semanlink.net/tag/bert_and_sentence_embeddings|prefLabel|BERT + Sentence Embeddings +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader|http://www.semanlink.net/tag/sentence_similarity +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/bert_and_sentence_embeddings|creationDate|2020-07-12 +http://www.semanlink.net/tag/bert_and_sentence_embeddings|comment|Looks like everything (up to 2020-07-14) refers to this [github project](doc:2020/07/ukplab_sentence_transformers_s), [paper (Sentence-BERT)](doc:2019/08/_1908_10084_sentence_bert_sen) +http://www.semanlink.net/tag/bert_and_sentence_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bert_and_sentence_embeddings|uri|http://www.semanlink.net/tag/bert_and_sentence_embeddings +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader_prefLabel|Sentence Similarity +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader_prefLabel|BERT +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader_related|http://www.semanlink.net/tag/doc2vec +http://www.semanlink.net/tag/bert_and_sentence_embeddings|broader_related|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/prism_surveillance_program|creationTime|2013-06-11T10:26:51Z +http://www.semanlink.net/tag/prism_surveillance_program|prefLabel|PRISM +http://www.semanlink.net/tag/prism_surveillance_program|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/prism_surveillance_program|broader|http://www.semanlink.net/tag/nsa +http://www.semanlink.net/tag/prism_surveillance_program|creationDate|2013-06-11 +http://www.semanlink.net/tag/prism_surveillance_program|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prism_surveillance_program|describedBy|https://en.wikipedia.org/wiki/PRISM_(surveillance_program) +http://www.semanlink.net/tag/prism_surveillance_program|uri|http://www.semanlink.net/tag/prism_surveillance_program +http://www.semanlink.net/tag/prism_surveillance_program|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/prism_surveillance_program|broader_prefLabel|NSA +http://www.semanlink.net/tag/ai_business_perspectives|creationTime|2018-06-07T23:59:11Z +http://www.semanlink.net/tag/ai_business_perspectives|prefLabel|AI: business perspectives +http://www.semanlink.net/tag/ai_business_perspectives|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_business_perspectives|creationDate|2018-06-07 +http://www.semanlink.net/tag/ai_business_perspectives|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_business_perspectives|uri|http://www.semanlink.net/tag/ai_business_perspectives +http://www.semanlink.net/tag/ai_business_perspectives|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_business_perspectives|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_business_perspectives|broader_altLabel|AI +http://www.semanlink.net/tag/ai_business_perspectives|broader_altLabel|IA +http://www.semanlink.net/tag/ai_business_perspectives|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/rdf_working_group|creationTime|2011-04-18T20:32:11Z +http://www.semanlink.net/tag/rdf_working_group|prefLabel|RDF Working Group +http://www.semanlink.net/tag/rdf_working_group|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_working_group|creationDate|2011-04-18 +http://www.semanlink.net/tag/rdf_working_group|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_working_group|uri|http://www.semanlink.net/tag/rdf_working_group +http://www.semanlink.net/tag/rdf_working_group|broader_prefLabel|W3C +http://www.semanlink.net/tag/rdf_working_group|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/rdf_working_group|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/john_calvin|creationTime|2019-08-25T19:40:17Z +http://www.semanlink.net/tag/john_calvin|prefLabel|Calvin +http://www.semanlink.net/tag/john_calvin|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/john_calvin|broader|http://www.semanlink.net/tag/chretiente +http://www.semanlink.net/tag/john_calvin|broader|http://www.semanlink.net/tag/fanatisme +http://www.semanlink.net/tag/john_calvin|creationDate|2019-08-25 +http://www.semanlink.net/tag/john_calvin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/john_calvin|describedBy|https://en.wikipedia.org/wiki/John_Calvin +http://www.semanlink.net/tag/john_calvin|uri|http://www.semanlink.net/tag/john_calvin +http://www.semanlink.net/tag/john_calvin|broader_prefLabel|Religion +http://www.semanlink.net/tag/john_calvin|broader_prefLabel|Chrétienté +http://www.semanlink.net/tag/john_calvin|broader_prefLabel|Fanatisme +http://www.semanlink.net/tag/patent_finding|creationTime|2019-02-20T11:59:31Z +http://www.semanlink.net/tag/patent_finding|prefLabel|Patent finding +http://www.semanlink.net/tag/patent_finding|broader|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.semanlink.net/tag/patent_finding|broader|http://www.semanlink.net/tag/patent +http://www.semanlink.net/tag/patent_finding|creationDate|2019-02-20 +http://www.semanlink.net/tag/patent_finding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patent_finding|uri|http://www.semanlink.net/tag/patent_finding +http://www.semanlink.net/tag/patent_finding|broader_prefLabel|AI 4 IP +http://www.semanlink.net/tag/patent_finding|broader_prefLabel|Patent +http://www.semanlink.net/tag/patent_finding|broader_altLabel|Brevet +http://www.semanlink.net/tag/ld_patch|creationTime|2014-09-25T12:01:41Z +http://www.semanlink.net/tag/ld_patch|prefLabel|LD-PATCH +http://www.semanlink.net/tag/ld_patch|broader|http://www.semanlink.net/tag/ldp_updates +http://www.semanlink.net/tag/ld_patch|broader|http://www.semanlink.net/tag/http_patch +http://www.semanlink.net/tag/ld_patch|creationDate|2014-09-25 +http://www.semanlink.net/tag/ld_patch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ld_patch|uri|http://www.semanlink.net/tag/ld_patch +http://www.semanlink.net/tag/ld_patch|broader_prefLabel|LDP: updates +http://www.semanlink.net/tag/ld_patch|broader_prefLabel|HTTP PATCH +http://www.semanlink.net/tag/cluster_analysis|creationTime|2015-10-10T14:45:34Z +http://www.semanlink.net/tag/cluster_analysis|prefLabel|Clustering +http://www.semanlink.net/tag/cluster_analysis|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/cluster_analysis|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/cluster_analysis|related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/cluster_analysis|creationDate|2015-10-10 +http://www.semanlink.net/tag/cluster_analysis|comment|"the task of grouping a set of objects in such a way that objects in the same group (cluster) are more similar (in some sense or another) to each other than to those in other groups. +" +http://www.semanlink.net/tag/cluster_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cluster_analysis|describedBy|https://en.wikipedia.org/wiki/Cluster_analysis +http://www.semanlink.net/tag/cluster_analysis|altLabel|Data clustering +http://www.semanlink.net/tag/cluster_analysis|altLabel|Cluster analysis +http://www.semanlink.net/tag/cluster_analysis|uri|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/cluster_analysis|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/cluster_analysis|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/embeddings_in_ir|creationTime|2018-01-28T17:20:14Z +http://www.semanlink.net/tag/embeddings_in_ir|prefLabel|Embeddings in Information Retrieval +http://www.semanlink.net/tag/embeddings_in_ir|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/embeddings_in_ir|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/embeddings_in_ir|broader|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/embeddings_in_ir|creationDate|2018-01-28 +http://www.semanlink.net/tag/embeddings_in_ir|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/embeddings_in_ir|altLabel|Embeddings in IR +http://www.semanlink.net/tag/embeddings_in_ir|uri|http://www.semanlink.net/tag/embeddings_in_ir +http://www.semanlink.net/tag/embeddings_in_ir|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/embeddings_in_ir|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/embeddings_in_ir|broader_prefLabel|Neural Search +http://www.semanlink.net/tag/embeddings_in_ir|broader_altLabel|IR +http://www.semanlink.net/tag/embeddings_in_ir|broader_altLabel|embedding +http://www.semanlink.net/tag/embeddings_in_ir|broader_altLabel|Neural IR models +http://www.semanlink.net/tag/embeddings_in_ir|broader_altLabel|Neural retrieval +http://www.semanlink.net/tag/embeddings_in_ir|broader_altLabel|Neural Models for Information Retrieval +http://www.semanlink.net/tag/embeddings_in_ir|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/embeddings_in_ir|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nasca|prefLabel|Nasca +http://www.semanlink.net/tag/nasca|broader|http://www.semanlink.net/tag/perou +http://www.semanlink.net/tag/nasca|broader|http://www.semanlink.net/tag/civilisations_precolombiennes +http://www.semanlink.net/tag/nasca|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nasca|uri|http://www.semanlink.net/tag/nasca +http://www.semanlink.net/tag/nasca|broader_prefLabel|Pérou +http://www.semanlink.net/tag/nasca|broader_prefLabel|Civilisations précolombiennes +http://www.semanlink.net/tag/combining_knowledge_graphs|creationTime|2020-03-19T22:10:45Z +http://www.semanlink.net/tag/combining_knowledge_graphs|prefLabel|Combining knowledge graphs +http://www.semanlink.net/tag/combining_knowledge_graphs|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/combining_knowledge_graphs|broader|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/combining_knowledge_graphs|broader|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/combining_knowledge_graphs|creationDate|2020-03-19 +http://www.semanlink.net/tag/combining_knowledge_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combining_knowledge_graphs|uri|http://www.semanlink.net/tag/combining_knowledge_graphs +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_prefLabel|Multiple Knowledge Bases +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_prefLabel|KG: tasks +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_altLabel|KG +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_altLabel|Multiple KB +http://www.semanlink.net/tag/combining_knowledge_graphs|broader_altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/restful_web_services|creationTime|2008-05-08T14:08:34Z +http://www.semanlink.net/tag/restful_web_services|prefLabel|RESTful Web Services +http://www.semanlink.net/tag/restful_web_services|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/restful_web_services|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/restful_web_services|creationDate|2008-05-08 +http://www.semanlink.net/tag/restful_web_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/restful_web_services|uri|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/tag/restful_web_services|broader_prefLabel|Web Services +http://www.semanlink.net/tag/restful_web_services|broader_prefLabel|REST +http://www.semanlink.net/tag/restful_web_services|broader_altLabel|WS +http://www.semanlink.net/tag/yves_roth|creationTime|2014-11-10T12:43:58Z +http://www.semanlink.net/tag/yves_roth|prefLabel|Yves Roth +http://www.semanlink.net/tag/yves_roth|broader|http://www.semanlink.net/tag/bon_prof +http://www.semanlink.net/tag/yves_roth|broader|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/yves_roth|broader|http://www.semanlink.net/tag/lycee_alain +http://www.semanlink.net/tag/yves_roth|creationDate|2014-11-10 +http://www.semanlink.net/tag/yves_roth|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yves_roth|uri|http://www.semanlink.net/tag/yves_roth +http://www.semanlink.net/tag/yves_roth|broader_prefLabel|Bon prof +http://www.semanlink.net/tag/yves_roth|broader_prefLabel|Souvenirs +http://www.semanlink.net/tag/yves_roth|broader_prefLabel|Lycée Alain +http://www.semanlink.net/tag/yves_roth|broader_altLabel|Souvenir +http://www.semanlink.net/tag/kd_mkb|creationTime|2020-04-22T17:49:30Z +http://www.semanlink.net/tag/kd_mkb|prefLabel|KD-MKB +http://www.semanlink.net/tag/kd_mkb|broader|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/kd_mkb|related|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/kd_mkb|related|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/kd_mkb|creationDate|2020-04-22 +http://www.semanlink.net/tag/kd_mkb|comment|"Cooperative Knowledge Distillation for Representation Learning Across Multiple Knowledge Bases + +- [GitHub](doc:2020/07/raphaelsty_kdmkb) +- [Paper](doc:2020/12/knowledge_base_embedding_by_coo)" +http://www.semanlink.net/tag/kd_mkb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kd_mkb|altLabel|KDMKB +http://www.semanlink.net/tag/kd_mkb|altLabel|KD-MKR +http://www.semanlink.net/tag/kd_mkb|uri|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/kd_mkb|broader_prefLabel|Thèse IRIT-Renault NLP-KB +http://www.semanlink.net/tag/kd_mkb|broader_altLabel|Thèse IRIT-Renault +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/jose_moreno +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/irit +http://www.semanlink.net/tag/kd_mkb|broader_related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/rdf_data_source|prefLabel|RDF Data source +http://www.semanlink.net/tag/rdf_data_source|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_data_source|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_data_source|uri|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/rdf_data_source|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_data_source|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_data_source|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_data_source|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_data_source|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_data_source|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/apple_developer_connection|prefLabel|Apple Developer Connection +http://www.semanlink.net/tag/apple_developer_connection|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple_developer_connection|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/apple_developer_connection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_developer_connection|uri|http://www.semanlink.net/tag/apple_developer_connection +http://www.semanlink.net/tag/apple_developer_connection|broader_prefLabel|Apple +http://www.semanlink.net/tag/apple_developer_connection|broader_prefLabel|Dev +http://www.semanlink.net/tag/seo|creationTime|2011-06-10T12:04:31Z +http://www.semanlink.net/tag/seo|prefLabel|SEO +http://www.semanlink.net/tag/seo|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/seo|creationDate|2011-06-10 +http://www.semanlink.net/tag/seo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seo|uri|http://www.semanlink.net/tag/seo +http://www.semanlink.net/tag/seo|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/seo|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/lime|creationTime|2018-09-09T15:23:11Z +http://www.semanlink.net/tag/lime|prefLabel|LIME +http://www.semanlink.net/tag/lime|broader|http://www.semanlink.net/tag/ai_black_box +http://www.semanlink.net/tag/lime|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/lime|creationDate|2018-09-09 +http://www.semanlink.net/tag/lime|comment|explains the predictions of a classifier by learning an interpretable model locally around the prediction +http://www.semanlink.net/tag/lime|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lime|uri|http://www.semanlink.net/tag/lime +http://www.semanlink.net/tag/lime|broader_prefLabel|AI black box +http://www.semanlink.net/tag/lime|broader_prefLabel|Classification +http://www.semanlink.net/tag/mongodb|creationTime|2013-03-12T16:10:18Z +http://www.semanlink.net/tag/mongodb|prefLabel|MongoDB +http://www.semanlink.net/tag/mongodb|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/mongodb|creationDate|2013-03-12 +http://www.semanlink.net/tag/mongodb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mongodb|uri|http://www.semanlink.net/tag/mongodb +http://www.semanlink.net/tag/mongodb|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/mongodb|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/internet|prefLabel|Internet +http://www.semanlink.net/tag/internet|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/internet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet|uri|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet|broader_prefLabel|NTIC +http://www.semanlink.net/tag/amy_winehouse|creationTime|2007-10-29T11:27:13Z +http://www.semanlink.net/tag/amy_winehouse|prefLabel|Amy Winehouse +http://www.semanlink.net/tag/amy_winehouse|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/amy_winehouse|creationDate|2007-10-29 +http://www.semanlink.net/tag/amy_winehouse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amy_winehouse|uri|http://www.semanlink.net/tag/amy_winehouse +http://www.semanlink.net/tag/amy_winehouse|broader_prefLabel|Musicien +http://www.semanlink.net/tag/manuscrits|prefLabel|Manuscrits +http://www.semanlink.net/tag/manuscrits|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manuscrits|uri|http://www.semanlink.net/tag/manuscrits +http://www.semanlink.net/tag/ai_stanford|creationTime|2019-03-12T13:33:59Z +http://www.semanlink.net/tag/ai_stanford|prefLabel|AI@Stanford +http://www.semanlink.net/tag/ai_stanford|broader|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/ai_stanford|broader|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/ai_stanford|creationDate|2019-03-12 +http://www.semanlink.net/tag/ai_stanford|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_stanford|uri|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/tag/ai_stanford|broader_prefLabel|AI teams +http://www.semanlink.net/tag/ai_stanford|broader_prefLabel|Stanford +http://www.semanlink.net/tag/subventions_agricoles|prefLabel|Subventions agricoles +http://www.semanlink.net/tag/subventions_agricoles|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/subventions_agricoles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/subventions_agricoles|uri|http://www.semanlink.net/tag/subventions_agricoles +http://www.semanlink.net/tag/subventions_agricoles|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/accident_climatique|creationTime|2008-10-22T11:38:54Z +http://www.semanlink.net/tag/accident_climatique|prefLabel|Accident climatique +http://www.semanlink.net/tag/accident_climatique|broader|http://www.semanlink.net/tag/climat +http://www.semanlink.net/tag/accident_climatique|broader|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/accident_climatique|creationDate|2008-10-22 +http://www.semanlink.net/tag/accident_climatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/accident_climatique|uri|http://www.semanlink.net/tag/accident_climatique +http://www.semanlink.net/tag/accident_climatique|broader_prefLabel|Climat +http://www.semanlink.net/tag/accident_climatique|broader_prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/garbage_collector|creationTime|2013-07-11T00:53:29Z +http://www.semanlink.net/tag/garbage_collector|prefLabel|Garbage Collector +http://www.semanlink.net/tag/garbage_collector|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/garbage_collector|creationDate|2013-07-11 +http://www.semanlink.net/tag/garbage_collector|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/garbage_collector|uri|http://www.semanlink.net/tag/garbage_collector +http://www.semanlink.net/tag/garbage_collector|broader_prefLabel|Programming language +http://www.semanlink.net/tag/garbage_collector|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/synonym_uris|creationTime|2007-06-13T23:36:18Z +http://www.semanlink.net/tag/synonym_uris|prefLabel|Synonym URIs +http://www.semanlink.net/tag/synonym_uris|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/synonym_uris|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/synonym_uris|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/synonym_uris|creationDate|2007-06-13 +http://www.semanlink.net/tag/synonym_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synonym_uris|uri|http://www.semanlink.net/tag/synonym_uris +http://www.semanlink.net/tag/synonym_uris|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/synonym_uris|broader_prefLabel|URI +http://www.semanlink.net/tag/synonym_uris|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/synonym_uris|broader_altLabel|LOD +http://www.semanlink.net/tag/synonym_uris|broader_altLabel|LD +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/synonym_uris|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/general_motors|prefLabel|General Motors +http://www.semanlink.net/tag/general_motors|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/general_motors|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/general_motors|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/general_motors|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/general_motors|uri|http://www.semanlink.net/tag/general_motors +http://www.semanlink.net/tag/general_motors|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/general_motors|broader_prefLabel|USA +http://www.semanlink.net/tag/general_motors|broader_prefLabel|Automobile +http://www.semanlink.net/tag/general_motors|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/general_motors|broader_altLabel|United States +http://www.semanlink.net/tag/general_motors|broader_altLabel|Automotive +http://www.semanlink.net/tag/modeling_car_diversity|creationTime|2008-10-24T18:10:29Z +http://www.semanlink.net/tag/modeling_car_diversity|prefLabel|Modeling car diversity +http://www.semanlink.net/tag/modeling_car_diversity|broader|http://www.semanlink.net/tag/car_diversity +http://www.semanlink.net/tag/modeling_car_diversity|related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/modeling_car_diversity|creationDate|2008-10-24 +http://www.semanlink.net/tag/modeling_car_diversity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/modeling_car_diversity|uri|http://www.semanlink.net/tag/modeling_car_diversity +http://www.semanlink.net/tag/modeling_car_diversity|broader_prefLabel|Car diversity +http://www.semanlink.net/tag/college|prefLabel|Collège +http://www.semanlink.net/tag/college|broader|http://www.semanlink.net/tag/ecole +http://www.semanlink.net/tag/college|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/college|uri|http://www.semanlink.net/tag/college +http://www.semanlink.net/tag/college|broader_prefLabel|Ecole +http://www.semanlink.net/tag/constraint_satisfaction_problem|creationTime|2010-09-04T14:19:49Z +http://www.semanlink.net/tag/constraint_satisfaction_problem|prefLabel|Constraint Satisfaction Problem +http://www.semanlink.net/tag/constraint_satisfaction_problem|broader|http://www.semanlink.net/tag/constraint_programming +http://www.semanlink.net/tag/constraint_satisfaction_problem|related|http://www.semanlink.net/tag/configuration +http://www.semanlink.net/tag/constraint_satisfaction_problem|creationDate|2010-09-04 +http://www.semanlink.net/tag/constraint_satisfaction_problem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/constraint_satisfaction_problem|altLabel|CSP +http://www.semanlink.net/tag/constraint_satisfaction_problem|uri|http://www.semanlink.net/tag/constraint_satisfaction_problem +http://www.semanlink.net/tag/constraint_satisfaction_problem|broader_prefLabel|Constraint Programming +http://www.semanlink.net/tag/technologie|prefLabel|Technologie +http://www.semanlink.net/tag/technologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technologie|uri|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/bonne_nouvelle|prefLabel|Bonne nouvelle +http://www.semanlink.net/tag/bonne_nouvelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bonne_nouvelle|uri|http://www.semanlink.net/tag/bonne_nouvelle +http://www.semanlink.net/tag/critique_du_capitalisme|prefLabel|Critique du capitalisme +http://www.semanlink.net/tag/critique_du_capitalisme|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/critique_du_capitalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/critique_du_capitalisme|uri|http://www.semanlink.net/tag/critique_du_capitalisme +http://www.semanlink.net/tag/critique_du_capitalisme|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/tv_advertising|prefLabel|TV advertising +http://www.semanlink.net/tag/tv_advertising|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/tv_advertising|broader|http://www.semanlink.net/tag/publicite +http://www.semanlink.net/tag/tv_advertising|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tv_advertising|uri|http://www.semanlink.net/tag/tv_advertising +http://www.semanlink.net/tag/tv_advertising|broader_prefLabel|Télévision +http://www.semanlink.net/tag/tv_advertising|broader_prefLabel|Publicité +http://www.semanlink.net/tag/tv_advertising|broader_altLabel|TV +http://www.semanlink.net/tag/tv_advertising|broader_altLabel|Advertising +http://www.semanlink.net/tag/tv_advertising|broader_altLabel|Pub +http://www.semanlink.net/tag/low_resource_languages|creationTime|2021-07-06T12:52:08Z +http://www.semanlink.net/tag/low_resource_languages|prefLabel|Low-Resource Languages +http://www.semanlink.net/tag/low_resource_languages|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/low_resource_languages|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/low_resource_languages|broader|http://www.semanlink.net/tag/nlp_low_resource_scenarios +http://www.semanlink.net/tag/low_resource_languages|related|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/tag/low_resource_languages|related|http://www.semanlink.net/tag/multilingual_nlp +http://www.semanlink.net/tag/low_resource_languages|related|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/tag/low_resource_languages|creationDate|2021-07-06 +http://www.semanlink.net/tag/low_resource_languages|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/low_resource_languages|uri|http://www.semanlink.net/tag/low_resource_languages +http://www.semanlink.net/tag/low_resource_languages|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/low_resource_languages|broader_prefLabel|Langues +http://www.semanlink.net/tag/low_resource_languages|broader_prefLabel|Low-Resource NLP +http://www.semanlink.net/tag/nlp_current_state|creationTime|2018-11-15T23:13:53Z +http://www.semanlink.net/tag/nlp_current_state|prefLabel|NLP: current state +http://www.semanlink.net/tag/nlp_current_state|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_current_state|creationDate|2018-11-15 +http://www.semanlink.net/tag/nlp_current_state|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_current_state|uri|http://www.semanlink.net/tag/nlp_current_state +http://www.semanlink.net/tag/nlp_current_state|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_current_state|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_current_state|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_current_state|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/mission_voulet_chanoine|prefLabel|"Mission ""Voulet-Chanoine""" +http://www.semanlink.net/tag/mission_voulet_chanoine|broader|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/mission_voulet_chanoine|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/mission_voulet_chanoine|broader|http://www.semanlink.net/tag/tchad +http://www.semanlink.net/tag/mission_voulet_chanoine|broader|http://www.semanlink.net/tag/empire_colonial_francais +http://www.semanlink.net/tag/mission_voulet_chanoine|related|http://www.semanlink.net/tag/sarraounia_mangou +http://www.semanlink.net/tag/mission_voulet_chanoine|comment|En 1899, deux officiers français partis avec leurs hommes à la conquête du Tchad massacrèrent tout sur leur passage. +http://www.semanlink.net/tag/mission_voulet_chanoine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mission_voulet_chanoine|uri|http://www.semanlink.net/tag/mission_voulet_chanoine +http://www.semanlink.net/tag/mission_voulet_chanoine|broader_prefLabel|Histoire du Niger +http://www.semanlink.net/tag/mission_voulet_chanoine|broader_prefLabel|Horreur +http://www.semanlink.net/tag/mission_voulet_chanoine|broader_prefLabel|Tchad +http://www.semanlink.net/tag/mission_voulet_chanoine|broader_prefLabel|Empire colonial français +http://www.semanlink.net/tag/hugging_face|creationTime|2019-08-28T22:47:26Z +http://www.semanlink.net/tag/hugging_face|prefLabel|Hugging Face +http://www.semanlink.net/tag/hugging_face|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/hugging_face|creationDate|2019-08-28 +http://www.semanlink.net/tag/hugging_face|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hugging_face|homepage|https://huggingface.co/ +http://www.semanlink.net/tag/hugging_face|altLabel|HuggingFace +http://www.semanlink.net/tag/hugging_face|uri|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/tag/hugging_face|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/hugging_face|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/e_learning|creationTime|2007-11-20T21:51:36Z +http://www.semanlink.net/tag/e_learning|prefLabel|Online Learning +http://www.semanlink.net/tag/e_learning|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/e_learning|broader|http://www.semanlink.net/tag/technology_enhanced_learning +http://www.semanlink.net/tag/e_learning|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/e_learning|creationDate|2007-11-20 +http://www.semanlink.net/tag/e_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/e_learning|altLabel|Pédagogie numérique +http://www.semanlink.net/tag/e_learning|uri|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/tag/e_learning|broader_prefLabel|NTIC +http://www.semanlink.net/tag/e_learning|broader_prefLabel|Technology Enhanced Learning +http://www.semanlink.net/tag/e_learning|broader_prefLabel|Education +http://www.semanlink.net/tag/e_learning|broader_altLabel|Enseignement +http://www.semanlink.net/tag/corse|creationTime|2007-09-02T00:35:50Z +http://www.semanlink.net/tag/corse|prefLabel|Corse +http://www.semanlink.net/tag/corse|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/corse|creationDate|2007-09-02 +http://www.semanlink.net/tag/corse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/corse|uri|http://www.semanlink.net/tag/corse +http://www.semanlink.net/tag/corse|broader_prefLabel|France +http://www.semanlink.net/tag/blog_software|prefLabel|Blog software +http://www.semanlink.net/tag/blog_software|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/blog_software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blog_software|uri|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/tag/blog_software|broader_prefLabel|Blog +http://www.semanlink.net/tag/juliana_rotich|creationTime|2013-08-24T18:03:52Z +http://www.semanlink.net/tag/juliana_rotich|prefLabel|Juliana Rotich +http://www.semanlink.net/tag/juliana_rotich|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/juliana_rotich|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/juliana_rotich|broader|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/tag/juliana_rotich|related|http://www.semanlink.net/tag/internet_en_afrique +http://www.semanlink.net/tag/juliana_rotich|creationDate|2013-08-24 +http://www.semanlink.net/tag/juliana_rotich|comment|"""I firmly believe we are in the New African Century. Tech is transformative and can help African countries leapfrog to a better future.""" +http://www.semanlink.net/tag/juliana_rotich|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/juliana_rotich|uri|http://www.semanlink.net/tag/juliana_rotich +http://www.semanlink.net/tag/juliana_rotich|broader_prefLabel|Afrique +http://www.semanlink.net/tag/juliana_rotich|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/juliana_rotich|broader_prefLabel|New Africa +http://www.semanlink.net/tag/juliana_rotich|broader_altLabel|Africa +http://www.semanlink.net/tag/juliana_rotich|broader_altLabel|Technical guys +http://www.semanlink.net/tag/introduction|creationTime|2007-08-07T10:08:50Z +http://www.semanlink.net/tag/introduction|prefLabel|Introduction +http://www.semanlink.net/tag/introduction|creationDate|2007-08-07 +http://www.semanlink.net/tag/introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/introduction|uri|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/tag/match_de_legende|creationTime|2020-06-24T15:26:48Z +http://www.semanlink.net/tag/match_de_legende|prefLabel|Match de légende +http://www.semanlink.net/tag/match_de_legende|broader|http://www.semanlink.net/tag/football +http://www.semanlink.net/tag/match_de_legende|creationDate|2020-06-24 +http://www.semanlink.net/tag/match_de_legende|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/match_de_legende|uri|http://www.semanlink.net/tag/match_de_legende +http://www.semanlink.net/tag/match_de_legende|broader_prefLabel|Football +http://www.semanlink.net/tag/street_art|creationTime|2014-03-03T23:01:32Z +http://www.semanlink.net/tag/street_art|prefLabel|Street art +http://www.semanlink.net/tag/street_art|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/street_art|creationDate|2014-03-03 +http://www.semanlink.net/tag/street_art|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/street_art|uri|http://www.semanlink.net/tag/street_art +http://www.semanlink.net/tag/street_art|broader_prefLabel|Art +http://www.semanlink.net/tag/pierre_fresnay|creationTime|2010-10-12T01:22:29Z +http://www.semanlink.net/tag/pierre_fresnay|prefLabel|Pierre Fresnay +http://www.semanlink.net/tag/pierre_fresnay|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/pierre_fresnay|creationDate|2010-10-12 +http://www.semanlink.net/tag/pierre_fresnay|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierre_fresnay|uri|http://www.semanlink.net/tag/pierre_fresnay +http://www.semanlink.net/tag/pierre_fresnay|broader_prefLabel|Acteur +http://www.semanlink.net/tag/linked_learning_2012|creationTime|2012-04-17T09:25:08Z +http://www.semanlink.net/tag/linked_learning_2012|prefLabel|Linked Learning 2012 +http://www.semanlink.net/tag/linked_learning_2012|broader|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/linked_learning_2012|broader|http://www.semanlink.net/tag/www_2012 +http://www.semanlink.net/tag/linked_learning_2012|broader|http://www.semanlink.net/tag/linked_learning +http://www.semanlink.net/tag/linked_learning_2012|related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_learning_2012|creationDate|2012-04-17 +http://www.semanlink.net/tag/linked_learning_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_learning_2012|homepage|http://lile2012.linkededucation.org/ +http://www.semanlink.net/tag/linked_learning_2012|uri|http://www.semanlink.net/tag/linked_learning_2012 +http://www.semanlink.net/tag/linked_learning_2012|broader_prefLabel|Workshop +http://www.semanlink.net/tag/linked_learning_2012|broader_prefLabel|WWW 2012 +http://www.semanlink.net/tag/linked_learning_2012|broader_prefLabel|Linked Learning +http://www.semanlink.net/tag/eswc_2007|creationTime|2007-06-13T23:39:10Z +http://www.semanlink.net/tag/eswc_2007|prefLabel|ESWC 2007 +http://www.semanlink.net/tag/eswc_2007|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2007|creationDate|2007-06-13 +http://www.semanlink.net/tag/eswc_2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2007|uri|http://www.semanlink.net/tag/eswc_2007 +http://www.semanlink.net/tag/eswc_2007|broader_prefLabel|ESWC +http://www.semanlink.net/tag/rss|prefLabel|RSS +http://www.semanlink.net/tag/rss|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rss|uri|http://www.semanlink.net/tag/rss +http://www.semanlink.net/tag/fao|prefLabel|FAO +http://www.semanlink.net/tag/fao|broader|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/fao|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fao|uri|http://www.semanlink.net/tag/fao +http://www.semanlink.net/tag/fao|broader_prefLabel|Institutions internationales +http://www.semanlink.net/tag/enfants_soldats|creationTime|2015-12-10T02:30:14Z +http://www.semanlink.net/tag/enfants_soldats|prefLabel|Enfants soldats +http://www.semanlink.net/tag/enfants_soldats|creationDate|2015-12-10 +http://www.semanlink.net/tag/enfants_soldats|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enfants_soldats|uri|http://www.semanlink.net/tag/enfants_soldats +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|creationTime|2007-03-08T00:09:36Z +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|prefLabel|Technique de l'insecte stérile +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|creationDate|2007-03-08 +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|uri|http://www.semanlink.net/tag/technique_de_l_insecte_sterile +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|broader_prefLabel|Insecte +http://www.semanlink.net/tag/technique_de_l_insecte_sterile|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/fibo|creationTime|2013-04-17T10:17:33Z +http://www.semanlink.net/tag/fibo|prefLabel|FIBO +http://www.semanlink.net/tag/fibo|broader|http://www.semanlink.net/tag/financial_data +http://www.semanlink.net/tag/fibo|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/fibo|related|http://www.semanlink.net/tag/mirek_sopek +http://www.semanlink.net/tag/fibo|creationDate|2013-04-17 +http://www.semanlink.net/tag/fibo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fibo|uri|http://www.semanlink.net/tag/fibo +http://www.semanlink.net/tag/fibo|broader_prefLabel|Financial Data +http://www.semanlink.net/tag/fibo|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/fibo|broader_altLabel|Ontology +http://www.semanlink.net/tag/plantation_d_arbres|creationTime|2007-09-20T22:10:28Z +http://www.semanlink.net/tag/plantation_d_arbres|prefLabel|Plantation d'arbres +http://www.semanlink.net/tag/plantation_d_arbres|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/plantation_d_arbres|creationDate|2007-09-20 +http://www.semanlink.net/tag/plantation_d_arbres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plantation_d_arbres|uri|http://www.semanlink.net/tag/plantation_d_arbres +http://www.semanlink.net/tag/plantation_d_arbres|broader_prefLabel|Arbres +http://www.semanlink.net/tag/chris_welty|creationTime|2012-07-31T00:02:05Z +http://www.semanlink.net/tag/chris_welty|prefLabel|Chris Welty +http://www.semanlink.net/tag/chris_welty|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/chris_welty|related|http://www.semanlink.net/tag/www_2012 +http://www.semanlink.net/tag/chris_welty|creationDate|2012-07-31 +http://www.semanlink.net/tag/chris_welty|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chris_welty|uri|http://www.semanlink.net/tag/chris_welty +http://www.semanlink.net/tag/chris_welty|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/chris_welty|broader_altLabel|Technical guys +http://www.semanlink.net/tag/charlottesville|creationTime|2017-08-17T13:18:53Z +http://www.semanlink.net/tag/charlottesville|prefLabel|Charlottesville +http://www.semanlink.net/tag/charlottesville|creationDate|2017-08-17 +http://www.semanlink.net/tag/charlottesville|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/charlottesville|uri|http://www.semanlink.net/tag/charlottesville +http://www.semanlink.net/tag/firefox_extension|prefLabel|Firefox extension +http://www.semanlink.net/tag/firefox_extension|broader|http://www.semanlink.net/tag/firefox +http://www.semanlink.net/tag/firefox_extension|creationDate|2006-07-21 +http://www.semanlink.net/tag/firefox_extension|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/firefox_extension|uri|http://www.semanlink.net/tag/firefox_extension +http://www.semanlink.net/tag/firefox_extension|broader_prefLabel|Firefox +http://www.semanlink.net/tag/semantic_web_introduction|prefLabel|Semantic Web : introduction +http://www.semanlink.net/tag/semantic_web_introduction|broader|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.semanlink.net/tag/semantic_web_introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_introduction|uri|http://www.semanlink.net/tag/semantic_web_introduction +http://www.semanlink.net/tag/semantic_web_introduction|broader_prefLabel|Semantic web: evangelization +http://www.semanlink.net/tag/con_de_chirac|prefLabel|Con de Chirac +http://www.semanlink.net/tag/con_de_chirac|broader|http://www.semanlink.net/tag/chirac +http://www.semanlink.net/tag/con_de_chirac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/con_de_chirac|altLabel|Chirac est nul +http://www.semanlink.net/tag/con_de_chirac|uri|http://www.semanlink.net/tag/con_de_chirac +http://www.semanlink.net/tag/con_de_chirac|broader_prefLabel|Chirac +http://www.semanlink.net/tag/redland|prefLabel|Redland +http://www.semanlink.net/tag/redland|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/redland|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/redland|uri|http://www.semanlink.net/tag/redland +http://www.semanlink.net/tag/redland|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/fire|creationTime|2015-12-30T20:28:53Z +http://www.semanlink.net/tag/fire|prefLabel|Fire +http://www.semanlink.net/tag/fire|creationDate|2015-12-30 +http://www.semanlink.net/tag/fire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fire|uri|http://www.semanlink.net/tag/fire +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|creationTime|2017-11-07T10:08:09Z +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|prefLabel|CNN 4 NLP +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader|http://www.semanlink.net/tag/nn_4_nlp +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader|http://www.semanlink.net/tag/convolutional_neural_network +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|creationDate|2017-11-07 +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|uri|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_prefLabel|NN 4 NLP +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_prefLabel|Convolutional neural network +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_altLabel|Convnets +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_altLabel|CNN +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_altLabel|Convolutional neural networks +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_altLabel|Convnet +http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp|broader_related|http://www.semanlink.net/tag/yann_lecun +http://www.semanlink.net/tag/text_classification_using_label_names_only|creationTime|2021-10-16T13:51:53Z +http://www.semanlink.net/tag/text_classification_using_label_names_only|prefLabel|Text Classification Using Label Names Only +http://www.semanlink.net/tag/text_classification_using_label_names_only|broader|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/tag/text_classification_using_label_names_only|related|http://www.semanlink.net/tag/zero_shot_text_classifier +http://www.semanlink.net/tag/text_classification_using_label_names_only|creationDate|2021-10-16 +http://www.semanlink.net/tag/text_classification_using_label_names_only|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_classification_using_label_names_only|uri|http://www.semanlink.net/tag/text_classification_using_label_names_only +http://www.semanlink.net/tag/text_classification_using_label_names_only|broader_prefLabel|Unsupervised Text Classification +http://www.semanlink.net/tag/text_classification_using_label_names_only|broader_altLabel|Dataless Text Classication +http://www.semanlink.net/tag/amazon|prefLabel|Amazon +http://www.semanlink.net/tag/amazon|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/amazon|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/amazon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amazon|uri|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/tag/amazon|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/amazon|broader_prefLabel|Internet +http://www.semanlink.net/tag/punition_des_mechants|creationTime|2020-12-14T16:37:59Z +http://www.semanlink.net/tag/punition_des_mechants|prefLabel|Punition des méchants +http://www.semanlink.net/tag/punition_des_mechants|creationDate|2020-12-14 +http://www.semanlink.net/tag/punition_des_mechants|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/punition_des_mechants|uri|http://www.semanlink.net/tag/punition_des_mechants +http://www.semanlink.net/tag/what_is_life|prefLabel|What is life ? +http://www.semanlink.net/tag/what_is_life|broader|http://www.semanlink.net/tag/origine_de_la_vie +http://www.semanlink.net/tag/what_is_life|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/what_is_life|related|http://www.semanlink.net/tag/synthetic_life +http://www.semanlink.net/tag/what_is_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/what_is_life|altLabel|Essence of life +http://www.semanlink.net/tag/what_is_life|uri|http://www.semanlink.net/tag/what_is_life +http://www.semanlink.net/tag/what_is_life|broader_prefLabel|Origine de la vie +http://www.semanlink.net/tag/what_is_life|broader_prefLabel|Biology +http://www.semanlink.net/tag/what_is_life|broader_altLabel|Biologie +http://www.semanlink.net/tag/multiple_knowledge_bases|creationTime|2020-05-11T22:37:16Z +http://www.semanlink.net/tag/multiple_knowledge_bases|prefLabel|Multiple Knowledge Bases +http://www.semanlink.net/tag/multiple_knowledge_bases|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/multiple_knowledge_bases|creationDate|2020-05-11 +http://www.semanlink.net/tag/multiple_knowledge_bases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multiple_knowledge_bases|altLabel|Multiple KB +http://www.semanlink.net/tag/multiple_knowledge_bases|uri|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/multiple_knowledge_bases|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/multiple_knowledge_bases|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/sap|creationTime|2010-09-24T11:54:27Z +http://www.semanlink.net/tag/sap|prefLabel|SAP +http://www.semanlink.net/tag/sap|creationDate|2010-09-24 +http://www.semanlink.net/tag/sap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sap|uri|http://www.semanlink.net/tag/sap +http://www.semanlink.net/tag/paleoclimatologie|creationTime|2021-06-06T09:48:18Z +http://www.semanlink.net/tag/paleoclimatologie|prefLabel|Paleoclimatologie +http://www.semanlink.net/tag/paleoclimatologie|creationDate|2021-06-06 +http://www.semanlink.net/tag/paleoclimatologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paleoclimatologie|altLabel|paleo-climatologie +http://www.semanlink.net/tag/paleoclimatologie|uri|http://www.semanlink.net/tag/paleoclimatologie +http://www.semanlink.net/tag/live_clipboard|prefLabel|Live Clipboard +http://www.semanlink.net/tag/live_clipboard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/live_clipboard|uri|http://www.semanlink.net/tag/live_clipboard +http://www.semanlink.net/tag/michel_serres|creationTime|2011-03-07T08:59:29Z +http://www.semanlink.net/tag/michel_serres|prefLabel|Michel Serres +http://www.semanlink.net/tag/michel_serres|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/michel_serres|creationDate|2011-03-07 +http://www.semanlink.net/tag/michel_serres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/michel_serres|uri|http://www.semanlink.net/tag/michel_serres +http://www.semanlink.net/tag/michel_serres|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/demographie|prefLabel|Démographie +http://www.semanlink.net/tag/demographie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/demographie|uri|http://www.semanlink.net/tag/demographie +http://www.semanlink.net/tag/chinois|creationTime|2018-03-04T11:05:24Z +http://www.semanlink.net/tag/chinois|prefLabel|Chinois +http://www.semanlink.net/tag/chinois|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chinois|creationDate|2018-03-04 +http://www.semanlink.net/tag/chinois|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chinois|altLabel|Chinese +http://www.semanlink.net/tag/chinois|uri|http://www.semanlink.net/tag/chinois +http://www.semanlink.net/tag/chinois|broader_prefLabel|Chine +http://www.semanlink.net/tag/chinois|broader_altLabel|China +http://www.semanlink.net/tag/prison|creationTime|2008-11-20T21:55:43Z +http://www.semanlink.net/tag/prison|prefLabel|Prison +http://www.semanlink.net/tag/prison|creationDate|2008-11-20 +http://www.semanlink.net/tag/prison|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prison|uri|http://www.semanlink.net/tag/prison +http://www.semanlink.net/tag/solr_not_english_only|creationTime|2012-05-03T16:29:01Z +http://www.semanlink.net/tag/solr_not_english_only|prefLabel|Solr (not english only) +http://www.semanlink.net/tag/solr_not_english_only|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr_not_english_only|creationDate|2012-05-03 +http://www.semanlink.net/tag/solr_not_english_only|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr_not_english_only|uri|http://www.semanlink.net/tag/solr_not_english_only +http://www.semanlink.net/tag/solr_not_english_only|broader_prefLabel|Solr +http://www.semanlink.net/tag/jeni_tennison|creationTime|2012-04-11T17:12:09Z +http://www.semanlink.net/tag/jeni_tennison|prefLabel|Jeni Tennison +http://www.semanlink.net/tag/jeni_tennison|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/jeni_tennison|creationDate|2012-04-11 +http://www.semanlink.net/tag/jeni_tennison|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeni_tennison|uri|http://www.semanlink.net/tag/jeni_tennison +http://www.semanlink.net/tag/jeni_tennison|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/animal_rights|creationTime|2009-01-16T01:08:54Z +http://www.semanlink.net/tag/animal_rights|prefLabel|Animal rights +http://www.semanlink.net/tag/animal_rights|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/animal_rights|creationDate|2009-01-16 +http://www.semanlink.net/tag/animal_rights|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/animal_rights|uri|http://www.semanlink.net/tag/animal_rights +http://www.semanlink.net/tag/animal_rights|broader_prefLabel|Animal +http://www.semanlink.net/tag/glue|creationTime|2008-10-31T11:27:49Z +http://www.semanlink.net/tag/glue|prefLabel|Glue +http://www.semanlink.net/tag/glue|broader|http://www.semanlink.net/tag/firefox_extension +http://www.semanlink.net/tag/glue|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/glue|creationDate|2008-10-31 +http://www.semanlink.net/tag/glue|comment|"""Connect with friends around things you visit!""" +http://www.semanlink.net/tag/glue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/glue|describedBy|http://www.getglue.com/ +http://www.semanlink.net/tag/glue|uri|http://www.semanlink.net/tag/glue +http://www.semanlink.net/tag/glue|broader_prefLabel|Firefox extension +http://www.semanlink.net/tag/glue|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/genetique|prefLabel|Genetics +http://www.semanlink.net/tag/genetique|prefLabel|Génétique +http://www.semanlink.net/tag/genetique|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/genetique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/genetique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetique|uri|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genetique|broader_prefLabel|Biology +http://www.semanlink.net/tag/genetique|broader_prefLabel|Science +http://www.semanlink.net/tag/genetique|broader_altLabel|Biologie +http://www.semanlink.net/tag/genetique|broader_altLabel|sciences +http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles|creationTime|2013-03-30T10:18:35Z +http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles|prefLabel|"Energies fossiles ""non conventionnelles""" +http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles|creationDate|2013-03-30 +http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles|uri|http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles +http://www.semanlink.net/tag/semantic_web_assisted_learning|creationTime|2011-04-04T15:43:30Z +http://www.semanlink.net/tag/semantic_web_assisted_learning|prefLabel|Semantic Web Assisted Learning +http://www.semanlink.net/tag/semantic_web_assisted_learning|creationDate|2011-04-04 +http://www.semanlink.net/tag/semantic_web_assisted_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_assisted_learning|uri|http://www.semanlink.net/tag/semantic_web_assisted_learning +http://www.semanlink.net/tag/online_tool|creationTime|2012-10-09T14:44:05Z +http://www.semanlink.net/tag/online_tool|prefLabel|Online tool +http://www.semanlink.net/tag/online_tool|creationDate|2012-10-09 +http://www.semanlink.net/tag/online_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/online_tool|uri|http://www.semanlink.net/tag/online_tool +http://www.semanlink.net/tag/gbif|creationTime|2007-11-14T13:49:49Z +http://www.semanlink.net/tag/gbif|prefLabel|GBIF +http://www.semanlink.net/tag/gbif|broader|http://www.semanlink.net/tag/biodiversity_data +http://www.semanlink.net/tag/gbif|broader|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/gbif|related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/gbif|creationDate|2007-11-14 +http://www.semanlink.net/tag/gbif|comment|Global Biodiversity Information Facility +http://www.semanlink.net/tag/gbif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gbif|homepage|http://www.gbif.org/ +http://www.semanlink.net/tag/gbif|uri|http://www.semanlink.net/tag/gbif +http://www.semanlink.net/tag/gbif|broader_prefLabel|Biodiversity data +http://www.semanlink.net/tag/gbif|broader_prefLabel|Biodiversité +http://www.semanlink.net/tag/gbif|broader_altLabel|Biodiversity +http://www.semanlink.net/tag/eric_baetens|creationTime|2016-05-03T02:09:30Z +http://www.semanlink.net/tag/eric_baetens|prefLabel|Eric Baetens +http://www.semanlink.net/tag/eric_baetens|broader|http://www.semanlink.net/tag/ecole_des_mines +http://www.semanlink.net/tag/eric_baetens|creationDate|2016-05-03 +http://www.semanlink.net/tag/eric_baetens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eric_baetens|uri|http://www.semanlink.net/tag/eric_baetens +http://www.semanlink.net/tag/eric_baetens|broader_prefLabel|Ecole des Mines +http://www.semanlink.net/tag/socrate|creationTime|2010-09-15T13:31:40Z +http://www.semanlink.net/tag/socrate|prefLabel|Socrate +http://www.semanlink.net/tag/socrate|broader|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/socrate|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/socrate|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/socrate|creationDate|2010-09-15 +http://www.semanlink.net/tag/socrate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/socrate|uri|http://www.semanlink.net/tag/socrate +http://www.semanlink.net/tag/socrate|broader_prefLabel|Philosophe +http://www.semanlink.net/tag/socrate|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/socrate|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/fake_blogs|prefLabel|Fake Blogs +http://www.semanlink.net/tag/fake_blogs|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/fake_blogs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fake_blogs|uri|http://www.semanlink.net/tag/fake_blogs +http://www.semanlink.net/tag/fake_blogs|broader_prefLabel|Blog +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|prefLabel|Extinction de masse de la fin du permien +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|broader|http://www.semanlink.net/tag/extinction_de_masse +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|uri|http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|broader_prefLabel|Extinction de masse +http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/sdmx_rdf|creationTime|2010-07-16T14:13:47Z +http://www.semanlink.net/tag/sdmx_rdf|prefLabel|SDMX-RDF +http://www.semanlink.net/tag/sdmx_rdf|broader|http://www.semanlink.net/tag/semantic_statistics +http://www.semanlink.net/tag/sdmx_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/sdmx_rdf|broader|http://www.semanlink.net/tag/sdmx +http://www.semanlink.net/tag/sdmx_rdf|related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/sdmx_rdf|creationDate|2010-07-16 +http://www.semanlink.net/tag/sdmx_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sdmx_rdf|uri|http://www.semanlink.net/tag/sdmx_rdf +http://www.semanlink.net/tag/sdmx_rdf|broader_prefLabel|Semantic Statistics +http://www.semanlink.net/tag/sdmx_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/sdmx_rdf|broader_prefLabel|SDMX +http://www.semanlink.net/tag/sdmx_rdf|broader_altLabel|RDF and statistics +http://www.semanlink.net/tag/sdmx_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/sdmx_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/sdmx_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sdmx_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/sdmx_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/owl_1_1|creationTime|2007-06-27T21:44:49Z +http://www.semanlink.net/tag/owl_1_1|prefLabel|OWL 1.1 +http://www.semanlink.net/tag/owl_1_1|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_1_1|creationDate|2007-06-27 +http://www.semanlink.net/tag/owl_1_1|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_1_1|uri|http://www.semanlink.net/tag/owl_1_1 +http://www.semanlink.net/tag/owl_1_1|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_1_1|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/google_refine|creationTime|2011-01-18T15:39:23Z +http://www.semanlink.net/tag/google_refine|prefLabel|Google Refine +http://www.semanlink.net/tag/google_refine|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_refine|broader|http://www.semanlink.net/tag/openrefine +http://www.semanlink.net/tag/google_refine|related|http://www.semanlink.net/tag/csv +http://www.semanlink.net/tag/google_refine|related|http://www.semanlink.net/tag/freebase +http://www.semanlink.net/tag/google_refine|creationDate|2011-01-18 +http://www.semanlink.net/tag/google_refine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_refine|uri|http://www.semanlink.net/tag/google_refine +http://www.semanlink.net/tag/google_refine|broader_prefLabel|Google +http://www.semanlink.net/tag/google_refine|broader_prefLabel|OpenRefine +http://www.semanlink.net/tag/google_refine|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/urbanisme|creationTime|2020-11-16T12:09:12Z +http://www.semanlink.net/tag/urbanisme|prefLabel|Urbanisme +http://www.semanlink.net/tag/urbanisme|creationDate|2020-11-16 +http://www.semanlink.net/tag/urbanisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/urbanisme|uri|http://www.semanlink.net/tag/urbanisme +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|creationTime|2021-09-02T00:58:56Z +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|prefLabel|Unsupervised Sentence Embedding Learning +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|creationDate|2021-09-02 +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|uri|http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/unsupervised_sentence_embedding_learning|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/boris_johnson|creationTime|2020-04-19T12:21:14Z +http://www.semanlink.net/tag/boris_johnson|prefLabel|Boris Johnson +http://www.semanlink.net/tag/boris_johnson|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/boris_johnson|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/boris_johnson|creationDate|2020-04-19 +http://www.semanlink.net/tag/boris_johnson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boris_johnson|uri|http://www.semanlink.net/tag/boris_johnson +http://www.semanlink.net/tag/boris_johnson|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/boris_johnson|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/boris_johnson|broader_altLabel|UK +http://www.semanlink.net/tag/axel_polleres|creationTime|2010-12-15T13:32:44Z +http://www.semanlink.net/tag/axel_polleres|prefLabel|Axel Polleres +http://www.semanlink.net/tag/axel_polleres|creationDate|2010-12-15 +http://www.semanlink.net/tag/axel_polleres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/axel_polleres|uri|http://www.semanlink.net/tag/axel_polleres +http://www.semanlink.net/tag/rats|creationTime|2019-09-16T01:51:54Z +http://www.semanlink.net/tag/rats|prefLabel|Rats +http://www.semanlink.net/tag/rats|creationDate|2019-09-16 +http://www.semanlink.net/tag/rats|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rats|uri|http://www.semanlink.net/tag/rats +http://www.semanlink.net/tag/ec_web_14|creationTime|2014-04-18T23:36:22Z +http://www.semanlink.net/tag/ec_web_14|prefLabel|EC-Web'14 +http://www.semanlink.net/tag/ec_web_14|broader|http://www.semanlink.net/tag/munich +http://www.semanlink.net/tag/ec_web_14|broader|http://www.semanlink.net/tag/ec_web +http://www.semanlink.net/tag/ec_web_14|creationDate|2014-04-18 +http://www.semanlink.net/tag/ec_web_14|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ec_web_14|homepage|http://www.ebusiness-unibw.org/events/ecweb2014/ +http://www.semanlink.net/tag/ec_web_14|uri|http://www.semanlink.net/tag/ec_web_14 +http://www.semanlink.net/tag/ec_web_14|broader_prefLabel|Munich +http://www.semanlink.net/tag/ec_web_14|broader_prefLabel|EC-Web +http://www.semanlink.net/tag/exil|creationTime|2020-11-13T16:46:16Z +http://www.semanlink.net/tag/exil|prefLabel|Exil +http://www.semanlink.net/tag/exil|creationDate|2020-11-13 +http://www.semanlink.net/tag/exil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exil|uri|http://www.semanlink.net/tag/exil +http://www.semanlink.net/tag/rare_events|creationTime|2019-05-22T13:03:36Z +http://www.semanlink.net/tag/rare_events|prefLabel|Rare events +http://www.semanlink.net/tag/rare_events|broader|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/tag/rare_events|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/rare_events|creationDate|2019-05-22 +http://www.semanlink.net/tag/rare_events|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rare_events|uri|http://www.semanlink.net/tag/rare_events +http://www.semanlink.net/tag/rare_events|broader_prefLabel|Imbalanced Data +http://www.semanlink.net/tag/rare_events|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/wikipedia2vec|creationTime|2020-09-02T16:44:12Z +http://www.semanlink.net/tag/wikipedia2vec|prefLabel|Wikipedia2Vec +http://www.semanlink.net/tag/wikipedia2vec|broader|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://www.semanlink.net/tag/wikipedia2vec|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikipedia2vec|related|http://www.semanlink.net/tag/ikuya_yamada +http://www.semanlink.net/tag/wikipedia2vec|creationDate|2020-09-02 +http://www.semanlink.net/tag/wikipedia2vec|comment|Embeds words and entities (Wikipedia URLs) into a common space. +http://www.semanlink.net/tag/wikipedia2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikipedia2vec|uri|http://www.semanlink.net/tag/wikipedia2vec +http://www.semanlink.net/tag/wikipedia2vec|broader_prefLabel|Word + Entity embeddings +http://www.semanlink.net/tag/wikipedia2vec|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/wikipedia2vec|broader_related|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/tag/wikipedia2vec|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/job_matching|creationTime|2020-01-23T22:56:33Z +http://www.semanlink.net/tag/job_matching|prefLabel|Job matching +http://www.semanlink.net/tag/job_matching|broader|http://www.semanlink.net/tag/nlp_human_resources +http://www.semanlink.net/tag/job_matching|creationDate|2020-01-23 +http://www.semanlink.net/tag/job_matching|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/job_matching|uri|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/tag/job_matching|broader_prefLabel|NLP + Human Resources +http://www.semanlink.net/tag/universal_decimal_classification|creationTime|2010-07-16T13:54:34Z +http://www.semanlink.net/tag/universal_decimal_classification|prefLabel|Universal Decimal Classification +http://www.semanlink.net/tag/universal_decimal_classification|creationDate|2010-07-16 +http://www.semanlink.net/tag/universal_decimal_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/universal_decimal_classification|describedBy|https://en.wikipedia.org/wiki/Universal_Decimal_Classification +http://www.semanlink.net/tag/universal_decimal_classification|uri|http://www.semanlink.net/tag/universal_decimal_classification +http://www.semanlink.net/tag/self_attention|creationTime|2018-11-07T00:27:07Z +http://www.semanlink.net/tag/self_attention|prefLabel|Self-Attention +http://www.semanlink.net/tag/self_attention|broader|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/self_attention|creationDate|2018-11-07 +http://www.semanlink.net/tag/self_attention|comment|"Attention mechanism relating different positions of a sequence in order to compute a representation of the same sequence. + +Useful in machine reading, abstractive summarization, or image description generation +" +http://www.semanlink.net/tag/self_attention|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/self_attention|uri|http://www.semanlink.net/tag/self_attention +http://www.semanlink.net/tag/self_attention|broader_prefLabel|Attention mechanism +http://www.semanlink.net/tag/eswc_2019|creationTime|2019-05-30T11:39:15Z +http://www.semanlink.net/tag/eswc_2019|prefLabel|ESWC 2019 +http://www.semanlink.net/tag/eswc_2019|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2019|creationDate|2019-05-30 +http://www.semanlink.net/tag/eswc_2019|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2019|uri|http://www.semanlink.net/tag/eswc_2019 +http://www.semanlink.net/tag/eswc_2019|broader_prefLabel|ESWC +http://www.semanlink.net/tag/transposon|creationTime|2011-03-27T12:30:11Z +http://www.semanlink.net/tag/transposon|prefLabel|Transposon +http://www.semanlink.net/tag/transposon|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/transposon|creationDate|2011-03-27 +http://www.semanlink.net/tag/transposon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transposon|describedBy|https://en.wikipedia.org/wiki/Transposable_elements +http://www.semanlink.net/tag/transposon|uri|http://www.semanlink.net/tag/transposon +http://www.semanlink.net/tag/transposon|broader_prefLabel|Genetics +http://www.semanlink.net/tag/transposon|broader_prefLabel|Génétique +http://www.semanlink.net/tag/jcs_java_caching_system|creationTime|2008-12-11T18:28:53Z +http://www.semanlink.net/tag/jcs_java_caching_system|prefLabel|JCS - Java Caching System +http://www.semanlink.net/tag/jcs_java_caching_system|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/jcs_java_caching_system|broader|http://www.semanlink.net/tag/cache +http://www.semanlink.net/tag/jcs_java_caching_system|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/jcs_java_caching_system|creationDate|2008-12-11 +http://www.semanlink.net/tag/jcs_java_caching_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jcs_java_caching_system|describedBy|http://jakarta.apache.org/jcs/ +http://www.semanlink.net/tag/jcs_java_caching_system|uri|http://www.semanlink.net/tag/jcs_java_caching_system +http://www.semanlink.net/tag/jcs_java_caching_system|broader_prefLabel|Java dev +http://www.semanlink.net/tag/jcs_java_caching_system|broader_prefLabel|Cache +http://www.semanlink.net/tag/jcs_java_caching_system|broader_prefLabel|apache.org +http://www.semanlink.net/tag/epigenetics|creationTime|2020-11-05T00:26:25Z +http://www.semanlink.net/tag/epigenetics|prefLabel|Epigenetics +http://www.semanlink.net/tag/epigenetics|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/epigenetics|creationDate|2020-11-05 +http://www.semanlink.net/tag/epigenetics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/epigenetics|describedBy|https://en.wikipedia.org/wiki/Epigenetics +http://www.semanlink.net/tag/epigenetics|uri|http://www.semanlink.net/tag/epigenetics +http://www.semanlink.net/tag/epigenetics|broader_prefLabel|Genetics +http://www.semanlink.net/tag/epigenetics|broader_prefLabel|Génétique +http://www.semanlink.net/tag/semantic_mashups|creationTime|2008-07-17T10:39:32Z +http://www.semanlink.net/tag/semantic_mashups|prefLabel|Semantic mashups +http://www.semanlink.net/tag/semantic_mashups|broader|http://www.semanlink.net/tag/mashups +http://www.semanlink.net/tag/semantic_mashups|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_mashups|creationDate|2008-07-17 +http://www.semanlink.net/tag/semantic_mashups|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_mashups|altLabel|LD Mashup +http://www.semanlink.net/tag/semantic_mashups|altLabel|Linked Data mashup +http://www.semanlink.net/tag/semantic_mashups|uri|http://www.semanlink.net/tag/semantic_mashups +http://www.semanlink.net/tag/semantic_mashups|broader_prefLabel|Mashups +http://www.semanlink.net/tag/semantic_mashups|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/sahara|prefLabel|Sahara +http://www.semanlink.net/tag/sahara|broader|http://www.semanlink.net/tag/desert +http://www.semanlink.net/tag/sahara|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/sahara|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sahara|uri|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/sahara|broader_prefLabel|Désert +http://www.semanlink.net/tag/sahara|broader_prefLabel|Afrique +http://www.semanlink.net/tag/sahara|broader_altLabel|Africa +http://www.semanlink.net/tag/computer_game|prefLabel|Computer game +http://www.semanlink.net/tag/computer_game|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computer_game|uri|http://www.semanlink.net/tag/computer_game +http://www.semanlink.net/tag/neuroscience_and_ai|creationTime|2019-10-31T13:49:51Z +http://www.semanlink.net/tag/neuroscience_and_ai|prefLabel|Neuroscience AND AI +http://www.semanlink.net/tag/neuroscience_and_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/neuroscience_and_ai|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/neuroscience_and_ai|related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/neuroscience_and_ai|creationDate|2019-10-31 +http://www.semanlink.net/tag/neuroscience_and_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neuroscience_and_ai|altLabel|Neuroscience AND Machine learning +http://www.semanlink.net/tag/neuroscience_and_ai|uri|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/tag/neuroscience_and_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/neuroscience_and_ai|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/neuroscience_and_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/neuroscience_and_ai|broader_altLabel|AI +http://www.semanlink.net/tag/neuroscience_and_ai|broader_altLabel|IA +http://www.semanlink.net/tag/neuroscience_and_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/pont_couvert|prefLabel|Pont couvert +http://www.semanlink.net/tag/pont_couvert|broader|http://www.semanlink.net/tag/pont +http://www.semanlink.net/tag/pont_couvert|creationDate|2007-01-14 +http://www.semanlink.net/tag/pont_couvert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pont_couvert|uri|http://www.semanlink.net/tag/pont_couvert +http://www.semanlink.net/tag/pont_couvert|broader_prefLabel|Pont +http://www.semanlink.net/tag/sparql_clipboard|prefLabel|SPARQL Clipboard +http://www.semanlink.net/tag/sparql_clipboard|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_clipboard|broader|http://www.semanlink.net/tag/live_clipboard +http://www.semanlink.net/tag/sparql_clipboard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_clipboard|altLabel|RDF Clipboard +http://www.semanlink.net/tag/sparql_clipboard|uri|http://www.semanlink.net/tag/sparql_clipboard +http://www.semanlink.net/tag/sparql_clipboard|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_clipboard|broader_prefLabel|Live Clipboard +http://www.semanlink.net/tag/triple_pattern_fragment|creationTime|2015-01-30T23:24:19Z +http://www.semanlink.net/tag/triple_pattern_fragment|prefLabel|Triple Pattern Fragment +http://www.semanlink.net/tag/triple_pattern_fragment|broader|http://www.semanlink.net/tag/linked_data_fragments +http://www.semanlink.net/tag/triple_pattern_fragment|creationDate|2015-01-30 +http://www.semanlink.net/tag/triple_pattern_fragment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/triple_pattern_fragment|uri|http://www.semanlink.net/tag/triple_pattern_fragment +http://www.semanlink.net/tag/triple_pattern_fragment|broader_prefLabel|Linked Data Fragments +http://www.semanlink.net/tag/triple_pattern_fragment|broader_related|http://www.semanlink.net/tag/ruben_verborgh +http://www.semanlink.net/tag/france_delabrement|creationTime|2020-12-17T12:47:30Z +http://www.semanlink.net/tag/france_delabrement|prefLabel|France : délabrement +http://www.semanlink.net/tag/france_delabrement|broader|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/tag/france_delabrement|creationDate|2020-12-17 +http://www.semanlink.net/tag/france_delabrement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_delabrement|altLabel|La France ne marche pas +http://www.semanlink.net/tag/france_delabrement|uri|http://www.semanlink.net/tag/france_delabrement +http://www.semanlink.net/tag/france_delabrement|broader_prefLabel|Etat de la France +http://www.semanlink.net/tag/livesearch|prefLabel|Livesearch +http://www.semanlink.net/tag/livesearch|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/livesearch|broader|http://www.semanlink.net/tag/ihm_web +http://www.semanlink.net/tag/livesearch|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/livesearch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/livesearch|uri|http://www.semanlink.net/tag/livesearch +http://www.semanlink.net/tag/livesearch|broader_prefLabel|Web dev +http://www.semanlink.net/tag/livesearch|broader_prefLabel|IHM web +http://www.semanlink.net/tag/livesearch|broader_prefLabel|Ajax +http://www.semanlink.net/tag/livesearch|broader_altLabel|Web app dev +http://www.semanlink.net/tag/livesearch|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/psychanalyse|prefLabel|Psychanalyse +http://www.semanlink.net/tag/psychanalyse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/psychanalyse|uri|http://www.semanlink.net/tag/psychanalyse +http://www.semanlink.net/tag/relations_europe_usa|prefLabel|Relations Europe-USA +http://www.semanlink.net/tag/relations_europe_usa|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/relations_europe_usa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relations_europe_usa|uri|http://www.semanlink.net/tag/relations_europe_usa +http://www.semanlink.net/tag/relations_europe_usa|broader_prefLabel|USA +http://www.semanlink.net/tag/relations_europe_usa|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/relations_europe_usa|broader_altLabel|United States +http://www.semanlink.net/tag/right_to_be_forgotten|creationTime|2014-05-21T21:39:24Z +http://www.semanlink.net/tag/right_to_be_forgotten|prefLabel|Right to Be Forgotten +http://www.semanlink.net/tag/right_to_be_forgotten|creationDate|2014-05-21 +http://www.semanlink.net/tag/right_to_be_forgotten|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/right_to_be_forgotten|uri|http://www.semanlink.net/tag/right_to_be_forgotten +http://www.semanlink.net/tag/sylvain_gugger|creationTime|2019-01-03T17:46:23Z +http://www.semanlink.net/tag/sylvain_gugger|prefLabel|Sylvain Gugger +http://www.semanlink.net/tag/sylvain_gugger|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/sylvain_gugger|related|http://www.semanlink.net/tag/normale_sup +http://www.semanlink.net/tag/sylvain_gugger|related|http://www.semanlink.net/tag/nlp_ens +http://www.semanlink.net/tag/sylvain_gugger|related|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/sylvain_gugger|related|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/tag/sylvain_gugger|creationDate|2019-01-03 +http://www.semanlink.net/tag/sylvain_gugger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sylvain_gugger|uri|http://www.semanlink.net/tag/sylvain_gugger +http://www.semanlink.net/tag/sylvain_gugger|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/jean_jacques_annaud|creationTime|2015-09-22T23:07:14Z +http://www.semanlink.net/tag/jean_jacques_annaud|prefLabel|Jean-Jacques Annaud +http://www.semanlink.net/tag/jean_jacques_annaud|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/jean_jacques_annaud|creationDate|2015-09-22 +http://www.semanlink.net/tag/jean_jacques_annaud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_jacques_annaud|describedBy|https://fr.wikipedia.org/wiki/Jean-Jacques_Annaud +http://www.semanlink.net/tag/jean_jacques_annaud|uri|http://www.semanlink.net/tag/jean_jacques_annaud +http://www.semanlink.net/tag/jean_jacques_annaud|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/jean_jacques_annaud|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/semences_paysanes|creationTime|2019-06-18T23:13:00Z +http://www.semanlink.net/tag/semences_paysanes|prefLabel|Semences paysanes +http://www.semanlink.net/tag/semences_paysanes|broader|http://www.semanlink.net/tag/semencier +http://www.semanlink.net/tag/semences_paysanes|creationDate|2019-06-18 +http://www.semanlink.net/tag/semences_paysanes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semences_paysanes|uri|http://www.semanlink.net/tag/semences_paysanes +http://www.semanlink.net/tag/semences_paysanes|broader_prefLabel|Semencier +http://www.semanlink.net/tag/ammonite|prefLabel|Ammonite +http://www.semanlink.net/tag/ammonite|broader|http://www.semanlink.net/tag/fossile +http://www.semanlink.net/tag/ammonite|creationDate|2007-01-19 +http://www.semanlink.net/tag/ammonite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ammonite|uri|http://www.semanlink.net/tag/ammonite +http://www.semanlink.net/tag/ammonite|broader_prefLabel|Fossile +http://www.semanlink.net/tag/hollywood|prefLabel|Hollywood +http://www.semanlink.net/tag/hollywood|broader|http://www.semanlink.net/tag/content_industries +http://www.semanlink.net/tag/hollywood|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hollywood|uri|http://www.semanlink.net/tag/hollywood +http://www.semanlink.net/tag/hollywood|broader_prefLabel|Content industries +http://www.semanlink.net/tag/semantic_web_presentation|creationTime|2007-03-20T22:33:01Z +http://www.semanlink.net/tag/semantic_web_presentation|prefLabel|Semantic web : présentation +http://www.semanlink.net/tag/semantic_web_presentation|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_presentation|creationDate|2007-03-20 +http://www.semanlink.net/tag/semantic_web_presentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_presentation|uri|http://www.semanlink.net/tag/semantic_web_presentation +http://www.semanlink.net/tag/semantic_web_presentation|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_presentation|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_presentation|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/vito|prefLabel|Vito +http://www.semanlink.net/tag/vito|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/vito|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vito|uri|http://www.semanlink.net/tag/vito +http://www.semanlink.net/tag/vito|broader_prefLabel|Ami +http://www.semanlink.net/tag/selfie|creationTime|2014-07-14T13:11:56Z +http://www.semanlink.net/tag/selfie|prefLabel|Selfie +http://www.semanlink.net/tag/selfie|creationDate|2014-07-14 +http://www.semanlink.net/tag/selfie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/selfie|uri|http://www.semanlink.net/tag/selfie +http://www.semanlink.net/tag/nlp_ens|creationTime|2021-04-30T23:07:57Z +http://www.semanlink.net/tag/nlp_ens|prefLabel|NLP@ENS +http://www.semanlink.net/tag/nlp_ens|broader|http://www.semanlink.net/tag/normale_sup +http://www.semanlink.net/tag/nlp_ens|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_ens|creationDate|2021-04-30 +http://www.semanlink.net/tag/nlp_ens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_ens|uri|http://www.semanlink.net/tag/nlp_ens +http://www.semanlink.net/tag/nlp_ens|broader_prefLabel|Normale Sup +http://www.semanlink.net/tag/nlp_ens|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_ens|broader_altLabel|ENS +http://www.semanlink.net/tag/nlp_ens|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/open_world_assumption|creationTime|2010-09-06T21:16:45Z +http://www.semanlink.net/tag/open_world_assumption|prefLabel|Open World Assumption +http://www.semanlink.net/tag/open_world_assumption|creationDate|2010-09-06 +http://www.semanlink.net/tag/open_world_assumption|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_world_assumption|uri|http://www.semanlink.net/tag/open_world_assumption +http://www.semanlink.net/tag/fleuve_niger|prefLabel|Fleuve Niger +http://www.semanlink.net/tag/fleuve_niger|broader|http://www.semanlink.net/tag/fleuve +http://www.semanlink.net/tag/fleuve_niger|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/fleuve_niger|related|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/fleuve_niger|related|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/fleuve_niger|related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/fleuve_niger|related|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/fleuve_niger|related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/fleuve_niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fleuve_niger|uri|http://www.semanlink.net/tag/fleuve_niger +http://www.semanlink.net/tag/fleuve_niger|broader_prefLabel|Fleuve +http://www.semanlink.net/tag/fleuve_niger|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/unix_howto|creationTime|2007-09-04T15:40:38Z +http://www.semanlink.net/tag/unix_howto|prefLabel|UNIX Tips +http://www.semanlink.net/tag/unix_howto|broader|http://www.semanlink.net/tag/unix +http://www.semanlink.net/tag/unix_howto|broader|http://www.semanlink.net/tag/howto +http://www.semanlink.net/tag/unix_howto|creationDate|2007-09-04 +http://www.semanlink.net/tag/unix_howto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unix_howto|altLabel|UNIX howto +http://www.semanlink.net/tag/unix_howto|uri|http://www.semanlink.net/tag/unix_howto +http://www.semanlink.net/tag/unix_howto|broader_prefLabel|Unix +http://www.semanlink.net/tag/unix_howto|broader_prefLabel|Howto +http://www.semanlink.net/tag/mallet|creationTime|2012-09-20T10:42:05Z +http://www.semanlink.net/tag/mallet|prefLabel|Mallet +http://www.semanlink.net/tag/mallet|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/mallet|broader|http://www.semanlink.net/tag/java_tool +http://www.semanlink.net/tag/mallet|broader|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/mallet|related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/mallet|creationDate|2012-09-20 +http://www.semanlink.net/tag/mallet|comment|A Java-based package for statistical natural language processing, document classification, clustering, topic modeling, information extraction, and other machine learning applications to text. +http://www.semanlink.net/tag/mallet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mallet|homepage|http://mallet.cs.umass.edu/ +http://www.semanlink.net/tag/mallet|uri|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/mallet|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/mallet|broader_prefLabel|Java tool +http://www.semanlink.net/tag/mallet|broader_prefLabel|Machine Learning library +http://www.semanlink.net/tag/apache_marmotta|creationTime|2013-04-26T13:22:28Z +http://www.semanlink.net/tag/apache_marmotta|prefLabel|Apache Marmotta +http://www.semanlink.net/tag/apache_marmotta|broader|http://www.semanlink.net/tag/ldp_implementations +http://www.semanlink.net/tag/apache_marmotta|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/apache_marmotta|related|http://www.semanlink.net/tag/linked_media_framework +http://www.semanlink.net/tag/apache_marmotta|creationDate|2013-04-26 +http://www.semanlink.net/tag/apache_marmotta|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_marmotta|homepage|http://marmotta.incubator.apache.org/ +http://www.semanlink.net/tag/apache_marmotta|uri|http://www.semanlink.net/tag/apache_marmotta +http://www.semanlink.net/tag/apache_marmotta|broader_prefLabel|LDP: implementations +http://www.semanlink.net/tag/apache_marmotta|broader_prefLabel|apache.org +http://www.semanlink.net/tag/girafe|prefLabel|Girafe +http://www.semanlink.net/tag/girafe|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/girafe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/girafe|uri|http://www.semanlink.net/tag/girafe +http://www.semanlink.net/tag/girafe|broader_prefLabel|Animal +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|creationTime|2010-11-07T12:58:39Z +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|creationDate|2010-11-07 +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|altLabel|RDF and database +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|uri|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_prefLabel|RDF +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_prefLabel|Database +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/relational_databases_and_the_semantic_web|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/noise_contrastive_estimation|creationTime|2018-07-07T15:04:02Z +http://www.semanlink.net/tag/noise_contrastive_estimation|prefLabel|Noise contrastive estimation +http://www.semanlink.net/tag/noise_contrastive_estimation|related|http://www.semanlink.net/tag/negative_sampling +http://www.semanlink.net/tag/noise_contrastive_estimation|related|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/noise_contrastive_estimation|creationDate|2018-07-07 +http://www.semanlink.net/tag/noise_contrastive_estimation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/noise_contrastive_estimation|altLabel|NCE +http://www.semanlink.net/tag/noise_contrastive_estimation|uri|http://www.semanlink.net/tag/noise_contrastive_estimation +http://www.semanlink.net/tag/open_education|creationTime|2013-12-18T16:30:43Z +http://www.semanlink.net/tag/open_education|prefLabel|Open Education +http://www.semanlink.net/tag/open_education|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/open_education|creationDate|2013-12-18 +http://www.semanlink.net/tag/open_education|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_education|uri|http://www.semanlink.net/tag/open_education +http://www.semanlink.net/tag/open_education|broader_prefLabel|Education +http://www.semanlink.net/tag/open_education|broader_altLabel|Enseignement +http://www.semanlink.net/tag/rdf_graph_versioning|creationTime|2010-06-16T08:43:58Z +http://www.semanlink.net/tag/rdf_graph_versioning|prefLabel|RDF graph versioning +http://www.semanlink.net/tag/rdf_graph_versioning|broader|http://www.semanlink.net/tag/rdf_dev +http://www.semanlink.net/tag/rdf_graph_versioning|creationDate|2010-06-16 +http://www.semanlink.net/tag/rdf_graph_versioning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_graph_versioning|uri|http://www.semanlink.net/tag/rdf_graph_versioning +http://www.semanlink.net/tag/rdf_graph_versioning|broader_prefLabel|RDF dev +http://www.semanlink.net/tag/domain_specific_bert|creationTime|2020-12-01T15:09:10Z +http://www.semanlink.net/tag/domain_specific_bert|prefLabel|Domain-Specific BERT +http://www.semanlink.net/tag/domain_specific_bert|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/domain_specific_bert|broader|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/tag/domain_specific_bert|creationDate|2020-12-01 +http://www.semanlink.net/tag/domain_specific_bert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/domain_specific_bert|uri|http://www.semanlink.net/tag/domain_specific_bert +http://www.semanlink.net/tag/domain_specific_bert|broader_prefLabel|BERT +http://www.semanlink.net/tag/domain_specific_bert|broader_prefLabel|Domain-Specific NLP +http://www.semanlink.net/tag/domain_specific_bert|broader_related|http://www.semanlink.net/tag/domain_knowledge_deep_learning +http://www.semanlink.net/tag/catastrophe_humanitaire|prefLabel|Catastrophe humanitaire +http://www.semanlink.net/tag/catastrophe_humanitaire|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/catastrophe_humanitaire|broader|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/catastrophe_humanitaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophe_humanitaire|uri|http://www.semanlink.net/tag/catastrophe_humanitaire +http://www.semanlink.net/tag/catastrophe_humanitaire|broader_prefLabel|Horreur +http://www.semanlink.net/tag/catastrophe_humanitaire|broader_prefLabel|Catastrophe +http://www.semanlink.net/tag/amerique_profonde|creationTime|2019-06-17T22:58:22Z +http://www.semanlink.net/tag/amerique_profonde|prefLabel|Amérique profonde +http://www.semanlink.net/tag/amerique_profonde|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/amerique_profonde|creationDate|2019-06-17 +http://www.semanlink.net/tag/amerique_profonde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amerique_profonde|uri|http://www.semanlink.net/tag/amerique_profonde +http://www.semanlink.net/tag/amerique_profonde|broader_prefLabel|USA +http://www.semanlink.net/tag/amerique_profonde|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/amerique_profonde|broader_altLabel|United States +http://www.semanlink.net/tag/catastrophe|prefLabel|Catastrophe +http://www.semanlink.net/tag/catastrophe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophe|uri|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/machine_learning_basics|creationTime|2015-12-26T01:13:39Z +http://www.semanlink.net/tag/machine_learning_basics|prefLabel|Machine Learning Basics +http://www.semanlink.net/tag/machine_learning_basics|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_basics|broader|http://www.semanlink.net/tag/pour_les_nuls +http://www.semanlink.net/tag/machine_learning_basics|creationDate|2015-12-26 +http://www.semanlink.net/tag/machine_learning_basics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_basics|uri|http://www.semanlink.net/tag/machine_learning_basics +http://www.semanlink.net/tag/machine_learning_basics|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_basics|broader_prefLabel|Pour les nuls +http://www.semanlink.net/tag/machine_learning_basics|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_basics|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_basics|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/france_bureaucratie|creationTime|2020-06-06T13:58:37Z +http://www.semanlink.net/tag/france_bureaucratie|prefLabel|France : bureaucratie +http://www.semanlink.net/tag/france_bureaucratie|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/france_bureaucratie|broader|http://www.semanlink.net/tag/bureaucratie +http://www.semanlink.net/tag/france_bureaucratie|related|http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions +http://www.semanlink.net/tag/france_bureaucratie|creationDate|2020-06-06 +http://www.semanlink.net/tag/france_bureaucratie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_bureaucratie|uri|http://www.semanlink.net/tag/france_bureaucratie +http://www.semanlink.net/tag/france_bureaucratie|broader_prefLabel|France +http://www.semanlink.net/tag/france_bureaucratie|broader_prefLabel|Bureaucratie +http://www.semanlink.net/tag/www_2015|creationTime|2015-05-22T00:20:24Z +http://www.semanlink.net/tag/www_2015|prefLabel|WWW 2015 +http://www.semanlink.net/tag/www_2015|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/www_2015|creationDate|2015-05-22 +http://www.semanlink.net/tag/www_2015|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/www_2015|uri|http://www.semanlink.net/tag/www_2015 +http://www.semanlink.net/tag/www_2015|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/www_2015|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/dengue|creationTime|2008-01-25T21:33:19Z +http://www.semanlink.net/tag/dengue|prefLabel|Dengue +http://www.semanlink.net/tag/dengue|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/dengue|creationDate|2008-01-25 +http://www.semanlink.net/tag/dengue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dengue|uri|http://www.semanlink.net/tag/dengue +http://www.semanlink.net/tag/dengue|broader_prefLabel|Maladie +http://www.semanlink.net/tag/ethereum|creationTime|2016-03-28T12:49:56Z +http://www.semanlink.net/tag/ethereum|prefLabel|Ethereum +http://www.semanlink.net/tag/ethereum|broader|http://www.semanlink.net/tag/blockchain +http://www.semanlink.net/tag/ethereum|broader|http://www.semanlink.net/tag/virtual_currency +http://www.semanlink.net/tag/ethereum|related|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/ethereum|creationDate|2016-03-28 +http://www.semanlink.net/tag/ethereum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ethereum|describedBy|https://en.wikipedia.org/wiki/Ethereum +http://www.semanlink.net/tag/ethereum|uri|http://www.semanlink.net/tag/ethereum +http://www.semanlink.net/tag/ethereum|broader_prefLabel|Blockchain +http://www.semanlink.net/tag/ethereum|broader_prefLabel|Digital currency +http://www.semanlink.net/tag/ethereum|broader_altLabel|Virtual currency +http://www.semanlink.net/tag/ethereum|broader_altLabel|Monnaie virtuelle +http://www.semanlink.net/tag/entity_attribute_value_model|creationTime|2010-04-26T11:13:28Z +http://www.semanlink.net/tag/entity_attribute_value_model|prefLabel|Entity-attribute-value model +http://www.semanlink.net/tag/entity_attribute_value_model|related|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/entity_attribute_value_model|creationDate|2010-04-26 +http://www.semanlink.net/tag/entity_attribute_value_model|comment|a data model to encode entities where the number of attributes that can be used to describe them is potentially vast, but the number that will actually apply to a given entity is relatively small. cf. sparse matrix. +http://www.semanlink.net/tag/entity_attribute_value_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_attribute_value_model|describedBy|https://en.wikipedia.org/wiki/Entity-attribute-value_model +http://www.semanlink.net/tag/entity_attribute_value_model|altLabel|EAV +http://www.semanlink.net/tag/entity_attribute_value_model|uri|http://www.semanlink.net/tag/entity_attribute_value_model +http://www.semanlink.net/tag/flair|creationTime|2019-10-28T11:46:29Z +http://www.semanlink.net/tag/flair|prefLabel|Flair +http://www.semanlink.net/tag/flair|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/flair|creationDate|2019-10-28 +http://www.semanlink.net/tag/flair|comment|Open-source NLP framework by Zalando Research. +http://www.semanlink.net/tag/flair|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flair|uri|http://www.semanlink.net/tag/flair +http://www.semanlink.net/tag/flair|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/mai_68|prefLabel|Mai 68 +http://www.semanlink.net/tag/mai_68|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/mai_68|broader|http://www.semanlink.net/tag/contestation +http://www.semanlink.net/tag/mai_68|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mai_68|uri|http://www.semanlink.net/tag/mai_68 +http://www.semanlink.net/tag/mai_68|broader_prefLabel|France +http://www.semanlink.net/tag/mai_68|broader_prefLabel|Contestation +http://www.semanlink.net/tag/louis_jouvet|creationTime|2007-12-27T22:18:42Z +http://www.semanlink.net/tag/louis_jouvet|prefLabel|Louis Jouvet +http://www.semanlink.net/tag/louis_jouvet|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/louis_jouvet|broader|http://www.semanlink.net/tag/cinema_francais +http://www.semanlink.net/tag/louis_jouvet|creationDate|2007-12-27 +http://www.semanlink.net/tag/louis_jouvet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/louis_jouvet|uri|http://www.semanlink.net/tag/louis_jouvet +http://www.semanlink.net/tag/louis_jouvet|broader_prefLabel|Acteur +http://www.semanlink.net/tag/louis_jouvet|broader_prefLabel|Cinéma français +http://www.semanlink.net/tag/security_and_rest|creationTime|2014-09-26T00:29:13Z +http://www.semanlink.net/tag/security_and_rest|prefLabel|Security and REST +http://www.semanlink.net/tag/security_and_rest|broader|http://www.semanlink.net/tag/rest +http://www.semanlink.net/tag/security_and_rest|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/security_and_rest|creationDate|2014-09-26 +http://www.semanlink.net/tag/security_and_rest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/security_and_rest|uri|http://www.semanlink.net/tag/security_and_rest +http://www.semanlink.net/tag/security_and_rest|broader_prefLabel|REST +http://www.semanlink.net/tag/security_and_rest|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/security_and_rest|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/songhai|prefLabel|Songhaï +http://www.semanlink.net/tag/songhai|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/songhai|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/songhai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/songhai|uri|http://www.semanlink.net/tag/songhai +http://www.semanlink.net/tag/songhai|broader_prefLabel|Peuples +http://www.semanlink.net/tag/songhai|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/no_more_drm|prefLabel|No more DRM +http://www.semanlink.net/tag/no_more_drm|broader|http://www.semanlink.net/tag/drm +http://www.semanlink.net/tag/no_more_drm|creationDate|2006-07-20 +http://www.semanlink.net/tag/no_more_drm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/no_more_drm|uri|http://www.semanlink.net/tag/no_more_drm +http://www.semanlink.net/tag/no_more_drm|broader_prefLabel|DRM +http://www.semanlink.net/tag/codec|creationTime|2010-05-03T09:32:17Z +http://www.semanlink.net/tag/codec|prefLabel|Codec +http://www.semanlink.net/tag/codec|creationDate|2010-05-03 +http://www.semanlink.net/tag/codec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/codec|uri|http://www.semanlink.net/tag/codec +http://www.semanlink.net/tag/justice|prefLabel|Justice +http://www.semanlink.net/tag/justice|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/justice|uri|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/memoire_humaine|creationTime|2017-05-28T12:27:03Z +http://www.semanlink.net/tag/memoire_humaine|prefLabel|Mémoire humaine +http://www.semanlink.net/tag/memoire_humaine|broader|http://www.semanlink.net/tag/memoire +http://www.semanlink.net/tag/memoire_humaine|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/memoire_humaine|creationDate|2017-05-28 +http://www.semanlink.net/tag/memoire_humaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memoire_humaine|uri|http://www.semanlink.net/tag/memoire_humaine +http://www.semanlink.net/tag/memoire_humaine|broader_prefLabel|Mémoire +http://www.semanlink.net/tag/memoire_humaine|broader_prefLabel|Brain +http://www.semanlink.net/tag/memoire_humaine|broader_altLabel|Cerveau +http://www.semanlink.net/tag/memoire_humaine|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/knowledge_graph|creationTime|2013-06-18T00:58:28Z +http://www.semanlink.net/tag/knowledge_graph|prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/knowledge_graph|broader|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/tag/knowledge_graph|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/knowledge_graph|creationDate|2013-06-18 +http://www.semanlink.net/tag/knowledge_graph|comment|[Surveys](/tag/?and=knowledge_graph&and=survey) (see also [surveys about graphs](/tag/?and=graph&and=survey)) +http://www.semanlink.net/tag/knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_graph|altLabel|Knowledge Graph +http://www.semanlink.net/tag/knowledge_graph|altLabel|KG +http://www.semanlink.net/tag/knowledge_graph|uri|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/knowledge_graph|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/knowledge_graph|broader_prefLabel|Knowledge +http://www.semanlink.net/tag/knowledge_graph|broader_prefLabel|Graph +http://www.semanlink.net/tag/knowledge_graph|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/knowledge_graph|broader_altLabel|KR +http://www.semanlink.net/tag/knowledge_graph|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/terrorisme|prefLabel|Terrorisme +http://www.semanlink.net/tag/terrorisme|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/terrorisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/terrorisme|uri|http://www.semanlink.net/tag/terrorisme +http://www.semanlink.net/tag/terrorisme|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/lituanie|creationTime|2021-08-24T15:07:37Z +http://www.semanlink.net/tag/lituanie|prefLabel|Lituanie +http://www.semanlink.net/tag/lituanie|creationDate|2021-08-24 +http://www.semanlink.net/tag/lituanie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lituanie|describedBy|https://fr.wikipedia.org/wiki/Lituanie +http://www.semanlink.net/tag/lituanie|altLabel|Lithuania +http://www.semanlink.net/tag/lituanie|uri|http://www.semanlink.net/tag/lituanie +http://www.semanlink.net/tag/kode|creationTime|2009-11-26T09:44:34Z +http://www.semanlink.net/tag/kode|prefLabel|KODE +http://www.semanlink.net/tag/kode|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/kode|broader|http://www.semanlink.net/tag/radix_trees +http://www.semanlink.net/tag/kode|creationDate|2009-11-26 +http://www.semanlink.net/tag/kode|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kode|homepage|http://www.kode.fr/ +http://www.semanlink.net/tag/kode|uri|http://www.semanlink.net/tag/kode +http://www.semanlink.net/tag/kode|broader_prefLabel|Database +http://www.semanlink.net/tag/kode|broader_prefLabel|Radix trees +http://www.semanlink.net/tag/herve_kempf|creationTime|2009-10-04T11:18:17Z +http://www.semanlink.net/tag/herve_kempf|prefLabel|Hervé Kempf +http://www.semanlink.net/tag/herve_kempf|broader|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/tag/herve_kempf|related|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.semanlink.net/tag/herve_kempf|related|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/herve_kempf|creationDate|2009-10-04 +http://www.semanlink.net/tag/herve_kempf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/herve_kempf|uri|http://www.semanlink.net/tag/herve_kempf +http://www.semanlink.net/tag/herve_kempf|broader_prefLabel|Journal Le Monde +http://www.semanlink.net/tag/mondialisation|prefLabel|Mondialisation +http://www.semanlink.net/tag/mondialisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mondialisation|uri|http://www.semanlink.net/tag/mondialisation +http://www.semanlink.net/tag/rda|creationTime|2007-04-11T22:44:58Z +http://www.semanlink.net/tag/rda|prefLabel|RDA +http://www.semanlink.net/tag/rda|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/rda|broader|http://www.semanlink.net/tag/communisme +http://www.semanlink.net/tag/rda|creationDate|2007-04-11 +http://www.semanlink.net/tag/rda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rda|uri|http://www.semanlink.net/tag/rda +http://www.semanlink.net/tag/rda|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/rda|broader_prefLabel|Communisme +http://www.semanlink.net/tag/rda|broader_altLabel|Germany +http://www.semanlink.net/tag/rda|broader_altLabel|Deutschland +http://www.semanlink.net/tag/facebook|creationTime|2007-07-06T21:00:10Z +http://www.semanlink.net/tag/facebook|prefLabel|Facebook +http://www.semanlink.net/tag/facebook|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/facebook|creationDate|2007-07-06 +http://www.semanlink.net/tag/facebook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facebook|altLabel|FB +http://www.semanlink.net/tag/facebook|uri|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/facebook|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/cifre|creationTime|2019-05-09T14:32:11Z +http://www.semanlink.net/tag/cifre|prefLabel|CIFRE +http://www.semanlink.net/tag/cifre|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/cifre|creationDate|2019-05-09 +http://www.semanlink.net/tag/cifre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cifre|uri|http://www.semanlink.net/tag/cifre +http://www.semanlink.net/tag/manaal_faruqui|creationTime|2018-05-10T12:36:13Z +http://www.semanlink.net/tag/manaal_faruqui|prefLabel|Manaal Faruqui +http://www.semanlink.net/tag/manaal_faruqui|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/manaal_faruqui|creationDate|2018-05-10 +http://www.semanlink.net/tag/manaal_faruqui|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manaal_faruqui|describedBy|http://www.manaalfaruqui.com/ +http://www.semanlink.net/tag/manaal_faruqui|uri|http://www.semanlink.net/tag/manaal_faruqui +http://www.semanlink.net/tag/manaal_faruqui|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/httpunit|creationTime|2008-11-18T14:41:03Z +http://www.semanlink.net/tag/httpunit|prefLabel|HttpUnit +http://www.semanlink.net/tag/httpunit|broader|http://www.semanlink.net/tag/junit +http://www.semanlink.net/tag/httpunit|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/httpunit|creationDate|2008-11-18 +http://www.semanlink.net/tag/httpunit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/httpunit|describedBy|http://httpunit.sourceforge.net/ +http://www.semanlink.net/tag/httpunit|uri|http://www.semanlink.net/tag/httpunit +http://www.semanlink.net/tag/httpunit|broader_prefLabel|JUnit +http://www.semanlink.net/tag/httpunit|broader_prefLabel|Web dev +http://www.semanlink.net/tag/httpunit|broader_altLabel|Web app dev +http://www.semanlink.net/tag/presidentielles_2012|creationTime|2012-05-06T09:32:37Z +http://www.semanlink.net/tag/presidentielles_2012|prefLabel|Présidentielles 2012 +http://www.semanlink.net/tag/presidentielles_2012|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/presidentielles_2012|creationDate|2012-05-06 +http://www.semanlink.net/tag/presidentielles_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/presidentielles_2012|uri|http://www.semanlink.net/tag/presidentielles_2012 +http://www.semanlink.net/tag/presidentielles_2012|broader_prefLabel|Politique française +http://www.semanlink.net/tag/extreme_classification|creationTime|2019-06-22T17:18:33Z +http://www.semanlink.net/tag/extreme_classification|prefLabel|Extreme classification +http://www.semanlink.net/tag/extreme_classification|broader|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/extreme_classification|creationDate|2019-06-22 +http://www.semanlink.net/tag/extreme_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extreme_classification|uri|http://www.semanlink.net/tag/extreme_classification +http://www.semanlink.net/tag/extreme_classification|broader_prefLabel|Classification +http://www.semanlink.net/tag/kd_mkb_paper|creationTime|2020-07-03T10:12:31Z +http://www.semanlink.net/tag/kd_mkb_paper|prefLabel|KD-MKB paper +http://www.semanlink.net/tag/kd_mkb_paper|broader|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/kd_mkb_paper|broader|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/kd_mkb_paper|broader|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/kd_mkb_paper|creationDate|2020-07-03 +http://www.semanlink.net/tag/kd_mkb_paper|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kd_mkb_paper|uri|http://www.semanlink.net/tag/kd_mkb_paper +http://www.semanlink.net/tag/kd_mkb_paper|broader_prefLabel|COLING2020 +http://www.semanlink.net/tag/kd_mkb_paper|broader_prefLabel|KD-MKB +http://www.semanlink.net/tag/kd_mkb_paper|broader_prefLabel|Raphaël Sourty +http://www.semanlink.net/tag/kd_mkb_paper|broader_altLabel|KDMKB +http://www.semanlink.net/tag/kd_mkb_paper|broader_altLabel|KD-MKR +http://www.semanlink.net/tag/kd_mkb_paper|broader_altLabel|raphaelsty +http://www.semanlink.net/tag/kd_mkb_paper|broader_related|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/tag/kd_mkb_paper|broader_related|http://www.semanlink.net/tag/coling2020 +http://www.semanlink.net/tag/kd_mkb_paper|broader_related|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/kd_mkb_paper|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/cancel_culture|creationTime|2020-12-26T13:01:40Z +http://www.semanlink.net/tag/cancel_culture|prefLabel|Cancel culture +http://www.semanlink.net/tag/cancel_culture|creationDate|2020-12-26 +http://www.semanlink.net/tag/cancel_culture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cancel_culture|describedBy|https://en.wikipedia.org/wiki/Cancel_culture +http://www.semanlink.net/tag/cancel_culture|altLabel|Call-out culture +http://www.semanlink.net/tag/cancel_culture|uri|http://www.semanlink.net/tag/cancel_culture +http://www.semanlink.net/tag/histoire_naturelle|creationTime|2007-11-04T11:12:30Z +http://www.semanlink.net/tag/histoire_naturelle|prefLabel|Histoire naturelle +http://www.semanlink.net/tag/histoire_naturelle|creationDate|2007-11-04 +http://www.semanlink.net/tag/histoire_naturelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_naturelle|uri|http://www.semanlink.net/tag/histoire_naturelle +http://www.semanlink.net/tag/folksonomies_ontologies|creationTime|2007-06-11T21:33:33Z +http://www.semanlink.net/tag/folksonomies_ontologies|prefLabel|folksonomies ontologies +http://www.semanlink.net/tag/folksonomies_ontologies|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/folksonomies_ontologies|creationDate|2007-06-11 +http://www.semanlink.net/tag/folksonomies_ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/folksonomies_ontologies|uri|http://www.semanlink.net/tag/folksonomies_ontologies +http://www.semanlink.net/tag/folksonomies_ontologies|broader_prefLabel|Tagging +http://www.semanlink.net/tag/google_app_engine|creationTime|2008-04-14T14:19:21Z +http://www.semanlink.net/tag/google_app_engine|prefLabel|Google App Engine +http://www.semanlink.net/tag/google_app_engine|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_app_engine|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/google_app_engine|creationDate|2008-04-14 +http://www.semanlink.net/tag/google_app_engine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_app_engine|uri|http://www.semanlink.net/tag/google_app_engine +http://www.semanlink.net/tag/google_app_engine|broader_prefLabel|Google +http://www.semanlink.net/tag/google_app_engine|broader_prefLabel|Web dev +http://www.semanlink.net/tag/google_app_engine|broader_altLabel|Web app dev +http://www.semanlink.net/tag/google_app_engine|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/knowledge_driven_embeddings|creationTime|2019-06-06T08:35:15Z +http://www.semanlink.net/tag/knowledge_driven_embeddings|prefLabel|Knowledge-driven embeddings +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/knowledge_driven_embeddings|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/knowledge_driven_embeddings|related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/knowledge_driven_embeddings|creationDate|2019-06-06 +http://www.semanlink.net/tag/knowledge_driven_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_driven_embeddings|uri|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_prefLabel|Knowledge bases +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_altLabel|Knowledge Base +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/knowledge_driven_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/hp|prefLabel|HP +http://www.semanlink.net/tag/hp|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/hp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hp|uri|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/hp|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/on_device_nlp|creationTime|2018-11-02T23:51:27Z +http://www.semanlink.net/tag/on_device_nlp|prefLabel|On device NLP +http://www.semanlink.net/tag/on_device_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/on_device_nlp|creationDate|2018-11-02 +http://www.semanlink.net/tag/on_device_nlp|comment|" +" +http://www.semanlink.net/tag/on_device_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/on_device_nlp|uri|http://www.semanlink.net/tag/on_device_nlp +http://www.semanlink.net/tag/on_device_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/on_device_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/on_device_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/on_device_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/fractales|prefLabel|Fractales +http://www.semanlink.net/tag/fractales|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/fractales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fractales|uri|http://www.semanlink.net/tag/fractales +http://www.semanlink.net/tag/fractales|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/fractales|broader_altLabel|Math +http://www.semanlink.net/tag/robot_humanoide|prefLabel|Robot humanoïde +http://www.semanlink.net/tag/robot_humanoide|broader|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/robot_humanoide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robot_humanoide|uri|http://www.semanlink.net/tag/robot_humanoide +http://www.semanlink.net/tag/robot_humanoide|broader_prefLabel|Robotique +http://www.semanlink.net/tag/robot_humanoide|broader_altLabel|Robotics +http://www.semanlink.net/tag/robot_humanoide|broader_altLabel|Robot +http://www.semanlink.net/tag/planeteafrique|prefLabel|PlaneteAfrique +http://www.semanlink.net/tag/planeteafrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/planeteafrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/planeteafrique|uri|http://www.semanlink.net/tag/planeteafrique +http://www.semanlink.net/tag/planeteafrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/planeteafrique|broader_altLabel|Africa +http://www.semanlink.net/tag/jerma|prefLabel|Jerma +http://www.semanlink.net/tag/jerma|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/jerma|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/jerma|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jerma|describedBy|https://fr.wikipedia.org/wiki/Zarmas +http://www.semanlink.net/tag/jerma|altLabel|Djerma +http://www.semanlink.net/tag/jerma|altLabel|Zarma +http://www.semanlink.net/tag/jerma|uri|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/tag/jerma|broader_prefLabel|Peuples +http://www.semanlink.net/tag/jerma|broader_prefLabel|Niger +http://www.semanlink.net/tag/jerma|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/jerma|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/jerma|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/pour_les_nuls|creationTime|2013-03-12T16:13:54Z +http://www.semanlink.net/tag/pour_les_nuls|prefLabel|Pour les nuls +http://www.semanlink.net/tag/pour_les_nuls|broader|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.semanlink.net/tag/pour_les_nuls|creationDate|2013-03-12 +http://www.semanlink.net/tag/pour_les_nuls|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pour_les_nuls|uri|http://www.semanlink.net/tag/pour_les_nuls +http://www.semanlink.net/tag/pour_les_nuls|broader_prefLabel|Howto, tutorial, FAQ +http://www.semanlink.net/tag/web_site_design|prefLabel|Web site design +http://www.semanlink.net/tag/web_site_design|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/web_site_design|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_site_design|uri|http://www.semanlink.net/tag/web_site_design +http://www.semanlink.net/tag/web_site_design|broader_prefLabel|Internet +http://www.semanlink.net/tag/sorbonne|prefLabel|Sorbonne +http://www.semanlink.net/tag/sorbonne|broader|http://www.semanlink.net/tag/universites_francaises +http://www.semanlink.net/tag/sorbonne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sorbonne|uri|http://www.semanlink.net/tag/sorbonne +http://www.semanlink.net/tag/sorbonne|broader_prefLabel|Universités françaises +http://www.semanlink.net/tag/sport_de_combat|prefLabel|Sport de combat +http://www.semanlink.net/tag/sport_de_combat|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/sport_de_combat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sport_de_combat|uri|http://www.semanlink.net/tag/sport_de_combat +http://www.semanlink.net/tag/sport_de_combat|broader_prefLabel|Sport +http://www.semanlink.net/tag/anisotropy_in_lm_space|creationTime|2021-10-26T15:58:14Z +http://www.semanlink.net/tag/anisotropy_in_lm_space|prefLabel|Anisotropy in LM space +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/tag/anisotropy_in_lm_space|related|http://www.semanlink.net/tag/sif_embeddings +http://www.semanlink.net/tag/anisotropy_in_lm_space|creationDate|2021-10-26 +http://www.semanlink.net/tag/anisotropy_in_lm_space|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anisotropy_in_lm_space|uri|http://www.semanlink.net/tag/anisotropy_in_lm_space +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_prefLabel|Language model +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_altLabel|Language Modeling +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_altLabel|LM +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_altLabel|Statistical Language Model +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_related|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/tag/anisotropy_in_lm_space|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/combinatorial_generalization|creationTime|2019-08-25T13:06:57Z +http://www.semanlink.net/tag/combinatorial_generalization|prefLabel|Combinatorial generalization +http://www.semanlink.net/tag/combinatorial_generalization|related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/combinatorial_generalization|creationDate|2019-08-25 +http://www.semanlink.net/tag/combinatorial_generalization|comment|"the ability to understand and produce novel combinations of already familiar elements +" +http://www.semanlink.net/tag/combinatorial_generalization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combinatorial_generalization|uri|http://www.semanlink.net/tag/combinatorial_generalization +http://www.semanlink.net/tag/library_of_alexandria|creationTime|2015-07-14T23:51:14Z +http://www.semanlink.net/tag/library_of_alexandria|prefLabel|Library of Alexandria +http://www.semanlink.net/tag/library_of_alexandria|broader|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/library_of_alexandria|broader|http://www.semanlink.net/tag/alexandria +http://www.semanlink.net/tag/library_of_alexandria|creationDate|2015-07-14 +http://www.semanlink.net/tag/library_of_alexandria|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/library_of_alexandria|describedBy|https://en.wikipedia.org/wiki/Library_of_Alexandria +http://www.semanlink.net/tag/library_of_alexandria|uri|http://www.semanlink.net/tag/library_of_alexandria +http://www.semanlink.net/tag/library_of_alexandria|broader_prefLabel|Egypte antique +http://www.semanlink.net/tag/library_of_alexandria|broader_prefLabel|Alexandria +http://www.semanlink.net/tag/html_dev|prefLabel|HTML Dev +http://www.semanlink.net/tag/html_dev|broader|http://www.semanlink.net/tag/html +http://www.semanlink.net/tag/html_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/html_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html_dev|uri|http://www.semanlink.net/tag/html_dev +http://www.semanlink.net/tag/html_dev|broader_prefLabel|HTML +http://www.semanlink.net/tag/html_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/mirek_sopek|creationTime|2013-01-24T01:20:20Z +http://www.semanlink.net/tag/mirek_sopek|prefLabel|Mirek Sopek +http://www.semanlink.net/tag/mirek_sopek|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/mirek_sopek|related|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/mirek_sopek|creationDate|2013-01-24 +http://www.semanlink.net/tag/mirek_sopek|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mirek_sopek|uri|http://www.semanlink.net/tag/mirek_sopek +http://www.semanlink.net/tag/mirek_sopek|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/democratie|prefLabel|Démocratie +http://www.semanlink.net/tag/democratie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/democratie|uri|http://www.semanlink.net/tag/democratie +http://www.semanlink.net/tag/aratta|prefLabel|Aratta +http://www.semanlink.net/tag/aratta|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aratta|uri|http://www.semanlink.net/tag/aratta +http://www.semanlink.net/tag/semantic_web_and_oop|prefLabel|Semantic Web and OOP +http://www.semanlink.net/tag/semantic_web_and_oop|broader|http://www.semanlink.net/tag/object_oriented_programming +http://www.semanlink.net/tag/semantic_web_and_oop|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_and_oop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_and_oop|uri|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.semanlink.net/tag/semantic_web_and_oop|broader_prefLabel|Object Oriented Programming +http://www.semanlink.net/tag/semantic_web_and_oop|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_and_oop|broader_altLabel|OOP +http://www.semanlink.net/tag/semantic_web_and_oop|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_and_oop|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/sportif|prefLabel|Sportif +http://www.semanlink.net/tag/sportif|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/sportif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sportif|uri|http://www.semanlink.net/tag/sportif +http://www.semanlink.net/tag/sportif|broader_prefLabel|Sport +http://www.semanlink.net/tag/exploration_test|creationTime|2013-03-12T16:12:50Z +http://www.semanlink.net/tag/exploration_test|prefLabel|Exploration test +http://www.semanlink.net/tag/exploration_test|creationDate|2013-03-12 +http://www.semanlink.net/tag/exploration_test|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exploration_test|uri|http://www.semanlink.net/tag/exploration_test +http://www.semanlink.net/tag/neo_nazis|creationTime|2017-08-17T13:19:33Z +http://www.semanlink.net/tag/neo_nazis|prefLabel|Neo-Nazis +http://www.semanlink.net/tag/neo_nazis|creationDate|2017-08-17 +http://www.semanlink.net/tag/neo_nazis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neo_nazis|uri|http://www.semanlink.net/tag/neo_nazis +http://www.semanlink.net/tag/apple_sucks|creationTime|2010-10-26T00:31:42Z +http://www.semanlink.net/tag/apple_sucks|prefLabel|Apple sucks +http://www.semanlink.net/tag/apple_sucks|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple_sucks|creationDate|2010-10-26 +http://www.semanlink.net/tag/apple_sucks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_sucks|uri|http://www.semanlink.net/tag/apple_sucks +http://www.semanlink.net/tag/apple_sucks|broader_prefLabel|Apple +http://www.semanlink.net/tag/pierre_yves_vandenbussche|creationTime|2018-04-10T17:49:23Z +http://www.semanlink.net/tag/pierre_yves_vandenbussche|prefLabel| Pierre-Yves Vandenbussche +http://www.semanlink.net/tag/pierre_yves_vandenbussche|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/pierre_yves_vandenbussche|related|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://www.semanlink.net/tag/pierre_yves_vandenbussche|creationDate|2018-04-10 +http://www.semanlink.net/tag/pierre_yves_vandenbussche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierre_yves_vandenbussche|uri|http://www.semanlink.net/tag/pierre_yves_vandenbussche +http://www.semanlink.net/tag/pierre_yves_vandenbussche|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/chico_buarque|prefLabel|Chico Buarque +http://www.semanlink.net/tag/chico_buarque|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/chico_buarque|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/chico_buarque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chico_buarque|uri|http://www.semanlink.net/tag/chico_buarque +http://www.semanlink.net/tag/chico_buarque|broader_prefLabel|Brésil +http://www.semanlink.net/tag/chico_buarque|broader_prefLabel|Musicien +http://www.semanlink.net/tag/chico_buarque|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/serbie|prefLabel|Serbie +http://www.semanlink.net/tag/serbie|broader|http://www.semanlink.net/tag/yougoslavie +http://www.semanlink.net/tag/serbie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/serbie|uri|http://www.semanlink.net/tag/serbie +http://www.semanlink.net/tag/serbie|broader_prefLabel|Yougoslavie +http://www.semanlink.net/tag/serbie|broader_prefLabel|Ex Yougoslavie +http://www.semanlink.net/tag/bolsonaro|creationTime|2018-10-26T00:14:49Z +http://www.semanlink.net/tag/bolsonaro|prefLabel|Bolsonaro +http://www.semanlink.net/tag/bolsonaro|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/bolsonaro|broader|http://www.semanlink.net/tag/neo_fascites +http://www.semanlink.net/tag/bolsonaro|creationDate|2018-10-26 +http://www.semanlink.net/tag/bolsonaro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bolsonaro|uri|http://www.semanlink.net/tag/bolsonaro +http://www.semanlink.net/tag/bolsonaro|broader_prefLabel|Brésil +http://www.semanlink.net/tag/bolsonaro|broader_prefLabel|Neo-fascites +http://www.semanlink.net/tag/bolsonaro|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/dispersion_des_graines|creationTime|2020-10-15T02:04:35Z +http://www.semanlink.net/tag/dispersion_des_graines|prefLabel|Dispersion des graines +http://www.semanlink.net/tag/dispersion_des_graines|broader|http://www.semanlink.net/tag/plante +http://www.semanlink.net/tag/dispersion_des_graines|creationDate|2020-10-15 +http://www.semanlink.net/tag/dispersion_des_graines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dispersion_des_graines|uri|http://www.semanlink.net/tag/dispersion_des_graines +http://www.semanlink.net/tag/dispersion_des_graines|broader_prefLabel|Plante +http://www.semanlink.net/tag/rachel_thomas|creationTime|2019-03-14T21:02:52Z +http://www.semanlink.net/tag/rachel_thomas|prefLabel|Rachel Thomas +http://www.semanlink.net/tag/rachel_thomas|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/rachel_thomas|related|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/rachel_thomas|creationDate|2019-03-14 +http://www.semanlink.net/tag/rachel_thomas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rachel_thomas|uri|http://www.semanlink.net/tag/rachel_thomas +http://www.semanlink.net/tag/rachel_thomas|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/inria|prefLabel|INRIA +http://www.semanlink.net/tag/inria|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/inria|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/inria|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/inria|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inria|uri|http://www.semanlink.net/tag/inria +http://www.semanlink.net/tag/inria|broader_prefLabel|Recherche +http://www.semanlink.net/tag/inria|broader_prefLabel|Informatique +http://www.semanlink.net/tag/inria|broader_prefLabel|France +http://www.semanlink.net/tag/inria|broader_altLabel|Research +http://www.semanlink.net/tag/archeologie_chinoise|prefLabel|Archéologie chinoise +http://www.semanlink.net/tag/archeologie_chinoise|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/archeologie_chinoise|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/archeologie_chinoise|creationDate|2006-08-29 +http://www.semanlink.net/tag/archeologie_chinoise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie_chinoise|uri|http://www.semanlink.net/tag/archeologie_chinoise +http://www.semanlink.net/tag/archeologie_chinoise|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/archeologie_chinoise|broader_prefLabel|Chine +http://www.semanlink.net/tag/archeologie_chinoise|broader_altLabel|China +http://www.semanlink.net/tag/archeologie_chinoise|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/amsterdam|prefLabel|Amsterdam +http://www.semanlink.net/tag/amsterdam|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/amsterdam|broader|http://www.semanlink.net/tag/pays_bas +http://www.semanlink.net/tag/amsterdam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amsterdam|uri|http://www.semanlink.net/tag/amsterdam +http://www.semanlink.net/tag/amsterdam|broader_prefLabel|Ville +http://www.semanlink.net/tag/amsterdam|broader_prefLabel|Pays-Bas +http://www.semanlink.net/tag/son_3d|creationTime|2010-11-12T15:36:03Z +http://www.semanlink.net/tag/son_3d|prefLabel|Son 3D +http://www.semanlink.net/tag/son_3d|creationDate|2010-11-12 +http://www.semanlink.net/tag/son_3d|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/son_3d|uri|http://www.semanlink.net/tag/son_3d +http://www.semanlink.net/tag/mooc|creationTime|2012-11-30T13:51:14Z +http://www.semanlink.net/tag/mooc|prefLabel|MOOC +http://www.semanlink.net/tag/mooc|broader|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/tag/mooc|creationDate|2012-11-30 +http://www.semanlink.net/tag/mooc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mooc|uri|http://www.semanlink.net/tag/mooc +http://www.semanlink.net/tag/mooc|broader_prefLabel|Online Learning +http://www.semanlink.net/tag/mooc|broader_altLabel|Pédagogie numérique +http://www.semanlink.net/tag/blink|creationTime|2020-05-02T11:35:43Z +http://www.semanlink.net/tag/blink|prefLabel|BLINK +http://www.semanlink.net/tag/blink|broader|http://www.semanlink.net/tag/zero_shot_entity_linking +http://www.semanlink.net/tag/blink|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/blink|broader|http://www.semanlink.net/tag/wikification +http://www.semanlink.net/tag/blink|related|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/blink|creationDate|2020-05-02 +http://www.semanlink.net/tag/blink|comment|"""Better entity LINKing"", @facebookai open-source entity linker. [GitHub](https://github.com/facebookresearch/BLINK)" +http://www.semanlink.net/tag/blink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blink|uri|http://www.semanlink.net/tag/blink +http://www.semanlink.net/tag/blink|broader_prefLabel|Zero-shot Entity Linking +http://www.semanlink.net/tag/blink|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/blink|broader_prefLabel|Wikification +http://www.semanlink.net/tag/aqmi|creationTime|2011-01-08T12:31:15Z +http://www.semanlink.net/tag/aqmi|prefLabel|AQMI +http://www.semanlink.net/tag/aqmi|broader|http://www.semanlink.net/tag/al_qaida +http://www.semanlink.net/tag/aqmi|broader|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/aqmi|related|http://www.semanlink.net/tag/ben_laden +http://www.semanlink.net/tag/aqmi|creationDate|2011-01-08 +http://www.semanlink.net/tag/aqmi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aqmi|uri|http://www.semanlink.net/tag/aqmi +http://www.semanlink.net/tag/aqmi|broader_prefLabel|Al-Qaida +http://www.semanlink.net/tag/aqmi|broader_prefLabel|Sahel +http://www.semanlink.net/tag/zouk|creationTime|2016-05-15T03:00:08Z +http://www.semanlink.net/tag/zouk|prefLabel|Zouk +http://www.semanlink.net/tag/zouk|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/zouk|broader|http://www.semanlink.net/tag/antilles +http://www.semanlink.net/tag/zouk|creationDate|2016-05-15 +http://www.semanlink.net/tag/zouk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zouk|describedBy|https://fr.wikipedia.org/wiki/Zouk +http://www.semanlink.net/tag/zouk|uri|http://www.semanlink.net/tag/zouk +http://www.semanlink.net/tag/zouk|broader_prefLabel|Musique +http://www.semanlink.net/tag/zouk|broader_prefLabel|Antilles +http://www.semanlink.net/tag/zouk|broader_altLabel|Music +http://www.semanlink.net/tag/police|creationTime|2007-09-18T22:37:15Z +http://www.semanlink.net/tag/police|prefLabel|Police +http://www.semanlink.net/tag/police|creationDate|2007-09-18 +http://www.semanlink.net/tag/police|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/police|uri|http://www.semanlink.net/tag/police +http://www.semanlink.net/tag/keyword_spotting|creationTime|2017-12-15T09:04:58Z +http://www.semanlink.net/tag/keyword_spotting|prefLabel|Keyword Spotting +http://www.semanlink.net/tag/keyword_spotting|broader|http://www.semanlink.net/tag/keywords +http://www.semanlink.net/tag/keyword_spotting|creationDate|2017-12-15 +http://www.semanlink.net/tag/keyword_spotting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keyword_spotting|uri|http://www.semanlink.net/tag/keyword_spotting +http://www.semanlink.net/tag/keyword_spotting|broader_prefLabel|Keywords +http://www.semanlink.net/tag/keyword_spotting|broader_related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/in_memory_computing|creationTime|2015-10-14T12:47:23Z +http://www.semanlink.net/tag/in_memory_computing|prefLabel|In-memory computing +http://www.semanlink.net/tag/in_memory_computing|related|http://www.semanlink.net/tag/bitmap_index +http://www.semanlink.net/tag/in_memory_computing|creationDate|2015-10-14 +http://www.semanlink.net/tag/in_memory_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/in_memory_computing|uri|http://www.semanlink.net/tag/in_memory_computing +http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre|creationTime|2019-12-26T13:02:43Z +http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre|prefLabel|L'humanité mérite de disparaître +http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre|creationDate|2019-12-26 +http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre|uri|http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre +http://www.semanlink.net/tag/eau|prefLabel|Eau +http://www.semanlink.net/tag/eau|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/eau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eau|uri|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/eau|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/decisions_en_entreprise|creationTime|2012-12-06T23:46:27Z +http://www.semanlink.net/tag/decisions_en_entreprise|prefLabel|Décisions en entreprise +http://www.semanlink.net/tag/decisions_en_entreprise|broader|http://www.semanlink.net/tag/management +http://www.semanlink.net/tag/decisions_en_entreprise|creationDate|2012-12-06 +http://www.semanlink.net/tag/decisions_en_entreprise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decisions_en_entreprise|uri|http://www.semanlink.net/tag/decisions_en_entreprise +http://www.semanlink.net/tag/decisions_en_entreprise|broader_prefLabel|Management +http://www.semanlink.net/tag/decentralized_social_network|creationTime|2014-09-17T00:00:26Z +http://www.semanlink.net/tag/decentralized_social_network|prefLabel|Decentralized social network +http://www.semanlink.net/tag/decentralized_social_network|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/decentralized_social_network|creationDate|2014-09-17 +http://www.semanlink.net/tag/decentralized_social_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decentralized_social_network|uri|http://www.semanlink.net/tag/decentralized_social_network +http://www.semanlink.net/tag/decentralized_social_network|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/ai_ibm|creationTime|2020-09-19T10:01:54Z +http://www.semanlink.net/tag/ai_ibm|prefLabel|AI@IBM +http://www.semanlink.net/tag/ai_ibm|broader|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/ai_ibm|creationDate|2020-09-19 +http://www.semanlink.net/tag/ai_ibm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_ibm|uri|http://www.semanlink.net/tag/ai_ibm +http://www.semanlink.net/tag/ai_ibm|broader_prefLabel|IBM +http://www.semanlink.net/tag/brain_to_brain_interface|creationTime|2013-02-28T22:50:26Z +http://www.semanlink.net/tag/brain_to_brain_interface|prefLabel|Brain-to-Brain Interface +http://www.semanlink.net/tag/brain_to_brain_interface|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/brain_to_brain_interface|creationDate|2013-02-28 +http://www.semanlink.net/tag/brain_to_brain_interface|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_to_brain_interface|uri|http://www.semanlink.net/tag/brain_to_brain_interface +http://www.semanlink.net/tag/brain_to_brain_interface|broader_prefLabel|Brain +http://www.semanlink.net/tag/brain_to_brain_interface|broader_altLabel|Cerveau +http://www.semanlink.net/tag/brain_to_brain_interface|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/problemes_sanitaires|creationTime|2016-08-28T15:58:51Z +http://www.semanlink.net/tag/problemes_sanitaires|prefLabel|Problèmes sanitaires +http://www.semanlink.net/tag/problemes_sanitaires|creationDate|2016-08-28 +http://www.semanlink.net/tag/problemes_sanitaires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/problemes_sanitaires|uri|http://www.semanlink.net/tag/problemes_sanitaires +http://www.semanlink.net/tag/epr|creationTime|2012-12-10T20:03:57Z +http://www.semanlink.net/tag/epr|prefLabel|EPR +http://www.semanlink.net/tag/epr|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/epr|broader|http://www.semanlink.net/tag/projet_pharaonique +http://www.semanlink.net/tag/epr|creationDate|2012-12-10 +http://www.semanlink.net/tag/epr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/epr|uri|http://www.semanlink.net/tag/epr +http://www.semanlink.net/tag/epr|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/epr|broader_prefLabel|Projet pharaonique +http://www.semanlink.net/tag/epr|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/sitemaps|creationTime|2011-07-25T17:29:40Z +http://www.semanlink.net/tag/sitemaps|prefLabel|Sitemaps +http://www.semanlink.net/tag/sitemaps|creationDate|2011-07-25 +http://www.semanlink.net/tag/sitemaps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sitemaps|describedBy|https://en.wikipedia.org/wiki/Sitemaps +http://www.semanlink.net/tag/sitemaps|uri|http://www.semanlink.net/tag/sitemaps +http://www.semanlink.net/tag/south_by_southwest|creationTime|2014-03-08T10:41:22Z +http://www.semanlink.net/tag/south_by_southwest|prefLabel|South by Southwest +http://www.semanlink.net/tag/south_by_southwest|creationDate|2014-03-08 +http://www.semanlink.net/tag/south_by_southwest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/south_by_southwest|altLabel|SXSW +http://www.semanlink.net/tag/south_by_southwest|uri|http://www.semanlink.net/tag/south_by_southwest +http://www.semanlink.net/tag/semantic_cms|creationTime|2011-09-09T21:50:26Z +http://www.semanlink.net/tag/semantic_cms|prefLabel|Semantic CMS +http://www.semanlink.net/tag/semantic_cms|broader|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/semantic_cms|broader|http://www.semanlink.net/tag/cms +http://www.semanlink.net/tag/semantic_cms|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_cms|creationDate|2011-09-09 +http://www.semanlink.net/tag/semantic_cms|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_cms|uri|http://www.semanlink.net/tag/semantic_cms +http://www.semanlink.net/tag/semantic_cms|broader_prefLabel|Semantic technology +http://www.semanlink.net/tag/semantic_cms|broader_prefLabel|CMS +http://www.semanlink.net/tag/semantic_cms|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/norvege|prefLabel|Norvège +http://www.semanlink.net/tag/norvege|broader|http://www.semanlink.net/tag/scandinavie +http://www.semanlink.net/tag/norvege|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/norvege|uri|http://www.semanlink.net/tag/norvege +http://www.semanlink.net/tag/norvege|broader_prefLabel|Scandinavie +http://www.semanlink.net/tag/photo_numerique|prefLabel|Photo numérique +http://www.semanlink.net/tag/photo_numerique|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/photo_numerique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photo_numerique|uri|http://www.semanlink.net/tag/photo_numerique +http://www.semanlink.net/tag/photo_numerique|broader_prefLabel|Photo +http://www.semanlink.net/tag/photo_numerique|broader_altLabel|Images +http://www.semanlink.net/tag/olivier_grisel|creationTime|2012-07-26T16:00:15Z +http://www.semanlink.net/tag/olivier_grisel|prefLabel|Olivier Grisel +http://www.semanlink.net/tag/olivier_grisel|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/olivier_grisel|related|http://www.semanlink.net/tag/apache_stanbol +http://www.semanlink.net/tag/olivier_grisel|related|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.semanlink.net/tag/olivier_grisel|related|http://www.semanlink.net/tag/semweb_pro_2012 +http://www.semanlink.net/tag/olivier_grisel|creationDate|2012-07-26 +http://www.semanlink.net/tag/olivier_grisel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/olivier_grisel|uri|http://www.semanlink.net/tag/olivier_grisel +http://www.semanlink.net/tag/olivier_grisel|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/maxent_for_nlp|creationTime|2017-11-03T12:28:24Z +http://www.semanlink.net/tag/maxent_for_nlp|prefLabel|MaxEnt for NLP +http://www.semanlink.net/tag/maxent_for_nlp|broader|http://www.semanlink.net/tag/maxent_models +http://www.semanlink.net/tag/maxent_for_nlp|creationDate|2017-11-03 +http://www.semanlink.net/tag/maxent_for_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maxent_for_nlp|uri|http://www.semanlink.net/tag/maxent_for_nlp +http://www.semanlink.net/tag/maxent_for_nlp|broader_prefLabel|Maxent models +http://www.semanlink.net/tag/singe|prefLabel|Singe +http://www.semanlink.net/tag/singe|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/singe|broader|http://www.semanlink.net/tag/primate +http://www.semanlink.net/tag/singe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/singe|uri|http://www.semanlink.net/tag/singe +http://www.semanlink.net/tag/singe|broader_prefLabel|Animal +http://www.semanlink.net/tag/singe|broader_prefLabel|Primate +http://www.semanlink.net/tag/factory_farming|creationTime|2013-09-29T13:25:38Z +http://www.semanlink.net/tag/factory_farming|prefLabel|Factory farming +http://www.semanlink.net/tag/factory_farming|broader|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/tag/factory_farming|creationDate|2013-09-29 +http://www.semanlink.net/tag/factory_farming|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/factory_farming|describedBy|https://en.wikipedia.org/wiki/Factory_farming +http://www.semanlink.net/tag/factory_farming|uri|http://www.semanlink.net/tag/factory_farming +http://www.semanlink.net/tag/factory_farming|broader_prefLabel|Agriculture industrielle +http://www.semanlink.net/tag/lombok|creationTime|2021-07-28T09:25:58Z +http://www.semanlink.net/tag/lombok|prefLabel|Lombok +http://www.semanlink.net/tag/lombok|creationDate|2021-07-28 +http://www.semanlink.net/tag/lombok|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lombok|uri|http://www.semanlink.net/tag/lombok +http://www.semanlink.net/tag/ville_sans_voiture|creationTime|2020-11-16T12:09:37Z +http://www.semanlink.net/tag/ville_sans_voiture|prefLabel|Ville sans voiture +http://www.semanlink.net/tag/ville_sans_voiture|broader|http://www.semanlink.net/tag/urbanisme +http://www.semanlink.net/tag/ville_sans_voiture|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/ville_sans_voiture|creationDate|2020-11-16 +http://www.semanlink.net/tag/ville_sans_voiture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ville_sans_voiture|uri|http://www.semanlink.net/tag/ville_sans_voiture +http://www.semanlink.net/tag/ville_sans_voiture|broader_prefLabel|Urbanisme +http://www.semanlink.net/tag/ville_sans_voiture|broader_prefLabel|Automobile +http://www.semanlink.net/tag/ville_sans_voiture|broader_altLabel|Automotive +http://www.semanlink.net/tag/c2gweb_seo|creationTime|2017-10-18T13:53:32Z +http://www.semanlink.net/tag/c2gweb_seo|prefLabel|C2GWeb: SEO +http://www.semanlink.net/tag/c2gweb_seo|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_seo|creationDate|2017-10-18 +http://www.semanlink.net/tag/c2gweb_seo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_seo|uri|http://www.semanlink.net/tag/c2gweb_seo +http://www.semanlink.net/tag/c2gweb_seo|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/good|prefLabel|Good +http://www.semanlink.net/tag/good|broader|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/good|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/good|uri|http://www.semanlink.net/tag/good +http://www.semanlink.net/tag/good|broader_prefLabel|I like +http://www.semanlink.net/tag/good|broader_prefLabel|I like +http://www.semanlink.net/tag/acoustique|creationTime|2007-11-28T01:32:41Z +http://www.semanlink.net/tag/acoustique|prefLabel|Acoustique +http://www.semanlink.net/tag/acoustique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/acoustique|creationDate|2007-11-28 +http://www.semanlink.net/tag/acoustique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acoustique|uri|http://www.semanlink.net/tag/acoustique +http://www.semanlink.net/tag/acoustique|broader_prefLabel|Physique +http://www.semanlink.net/tag/acoustique|broader_altLabel|Physics +http://www.semanlink.net/tag/income_inequality|creationTime|2014-07-29T19:23:02Z +http://www.semanlink.net/tag/income_inequality|prefLabel|Income inequality +http://www.semanlink.net/tag/income_inequality|broader|http://www.semanlink.net/tag/inegalites +http://www.semanlink.net/tag/income_inequality|related|http://www.semanlink.net/tag/thomas_piketty +http://www.semanlink.net/tag/income_inequality|creationDate|2014-07-29 +http://www.semanlink.net/tag/income_inequality|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/income_inequality|uri|http://www.semanlink.net/tag/income_inequality +http://www.semanlink.net/tag/income_inequality|broader_prefLabel|Inégalités +http://www.semanlink.net/tag/prix_nobel_d_economie|prefLabel|Prix Nobel d'économie +http://www.semanlink.net/tag/prix_nobel_d_economie|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/prix_nobel_d_economie|broader|http://www.semanlink.net/tag/prix_nobel +http://www.semanlink.net/tag/prix_nobel_d_economie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prix_nobel_d_economie|uri|http://www.semanlink.net/tag/prix_nobel_d_economie +http://www.semanlink.net/tag/prix_nobel_d_economie|broader_prefLabel|Economie +http://www.semanlink.net/tag/prix_nobel_d_economie|broader_prefLabel|Prix Nobel +http://www.semanlink.net/tag/prix_nobel_d_economie|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/pentagon|prefLabel|Pentagon +http://www.semanlink.net/tag/pentagon|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/pentagon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pentagon|uri|http://www.semanlink.net/tag/pentagon +http://www.semanlink.net/tag/pentagon|broader_prefLabel|USA +http://www.semanlink.net/tag/pentagon|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/pentagon|broader_altLabel|United States +http://www.semanlink.net/tag/internet_libre|creationTime|2012-01-08T10:56:17Z +http://www.semanlink.net/tag/internet_libre|prefLabel|Internet libre +http://www.semanlink.net/tag/internet_libre|creationDate|2012-01-08 +http://www.semanlink.net/tag/internet_libre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_libre|uri|http://www.semanlink.net/tag/internet_libre +http://www.semanlink.net/tag/fonds_speculatifs|prefLabel|Fonds spéculatifs +http://www.semanlink.net/tag/fonds_speculatifs|broader|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/tag/fonds_speculatifs|broader|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/fonds_speculatifs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fonds_speculatifs|altLabel|Hedge funds +http://www.semanlink.net/tag/fonds_speculatifs|uri|http://www.semanlink.net/tag/fonds_speculatifs +http://www.semanlink.net/tag/fonds_speculatifs|broader_prefLabel|Spéculation +http://www.semanlink.net/tag/fonds_speculatifs|broader_prefLabel|Marchés financiers +http://www.semanlink.net/tag/ebola|creationTime|2014-09-15T11:55:44Z +http://www.semanlink.net/tag/ebola|prefLabel|Ebola +http://www.semanlink.net/tag/ebola|broader|http://www.semanlink.net/tag/epidemie +http://www.semanlink.net/tag/ebola|creationDate|2014-09-15 +http://www.semanlink.net/tag/ebola|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ebola|uri|http://www.semanlink.net/tag/ebola +http://www.semanlink.net/tag/ebola|broader_prefLabel|Épidémie +http://www.semanlink.net/tag/sig_ma|creationTime|2009-08-27T14:27:50Z +http://www.semanlink.net/tag/sig_ma|prefLabel|sig.ma +http://www.semanlink.net/tag/sig_ma|broader|http://www.semanlink.net/tag/mashups +http://www.semanlink.net/tag/sig_ma|broader|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/sig_ma|broader|http://www.semanlink.net/tag/deri +http://www.semanlink.net/tag/sig_ma|broader|http://www.semanlink.net/tag/linked_data_browser +http://www.semanlink.net/tag/sig_ma|broader|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/sig_ma|creationDate|2009-08-27 +http://www.semanlink.net/tag/sig_ma|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sig_ma|uri|http://www.semanlink.net/tag/sig_ma +http://www.semanlink.net/tag/sig_ma|broader_prefLabel|Mashups +http://www.semanlink.net/tag/sig_ma|broader_prefLabel|Richard Cyganiak +http://www.semanlink.net/tag/sig_ma|broader_prefLabel|DERI +http://www.semanlink.net/tag/sig_ma|broader_prefLabel|Linked Data Browser +http://www.semanlink.net/tag/sig_ma|broader_prefLabel|Giovanni Tummarello +http://www.semanlink.net/tag/sig_ma|broader_altLabel|dowhatimean.net +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/edward_curry +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/axel_polleres +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/fadi_badra +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/sig_ma|broader_related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/emnlp_2020|creationTime|2020-11-24T09:46:27Z +http://www.semanlink.net/tag/emnlp_2020|prefLabel|EMNLP 2020 +http://www.semanlink.net/tag/emnlp_2020|broader|http://www.semanlink.net/tag/emnlp +http://www.semanlink.net/tag/emnlp_2020|creationDate|2020-11-24 +http://www.semanlink.net/tag/emnlp_2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emnlp_2020|uri|http://www.semanlink.net/tag/emnlp_2020 +http://www.semanlink.net/tag/emnlp_2020|broader_prefLabel|EMNLP +http://www.semanlink.net/tag/enseignement_superieur|prefLabel|Enseignement supérieur +http://www.semanlink.net/tag/enseignement_superieur|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/enseignement_superieur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enseignement_superieur|uri|http://www.semanlink.net/tag/enseignement_superieur +http://www.semanlink.net/tag/enseignement_superieur|broader_prefLabel|Education +http://www.semanlink.net/tag/enseignement_superieur|broader_altLabel|Enseignement +http://www.semanlink.net/tag/nn_tips|creationTime|2018-07-01T11:03:46Z +http://www.semanlink.net/tag/nn_tips|prefLabel|NN tips +http://www.semanlink.net/tag/nn_tips|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/nn_tips|creationDate|2018-07-01 +http://www.semanlink.net/tag/nn_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nn_tips|uri|http://www.semanlink.net/tag/nn_tips +http://www.semanlink.net/tag/nn_tips|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/nn_tips|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/nn_tips|broader_altLabel|ANN +http://www.semanlink.net/tag/nn_tips|broader_altLabel|NN +http://www.semanlink.net/tag/read_write_secure_data_web|creationTime|2014-11-08T15:09:11Z +http://www.semanlink.net/tag/read_write_secure_data_web|prefLabel|Read-Write Secure Data Web +http://www.semanlink.net/tag/read_write_secure_data_web|broader|http://www.semanlink.net/tag/security_and_rest +http://www.semanlink.net/tag/read_write_secure_data_web|broader|http://www.semanlink.net/tag/read_write_linked_data +http://www.semanlink.net/tag/read_write_secure_data_web|creationDate|2014-11-08 +http://www.semanlink.net/tag/read_write_secure_data_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/read_write_secure_data_web|uri|http://www.semanlink.net/tag/read_write_secure_data_web +http://www.semanlink.net/tag/read_write_secure_data_web|broader_prefLabel|Security and REST +http://www.semanlink.net/tag/read_write_secure_data_web|broader_prefLabel|Read-Write Linked Data +http://www.semanlink.net/tag/read_write_secure_data_web|broader_altLabel|RW Linked Data +http://www.semanlink.net/tag/evernote|creationTime|2013-10-19T00:16:43Z +http://www.semanlink.net/tag/evernote|prefLabel|Evernote +http://www.semanlink.net/tag/evernote|creationDate|2013-10-19 +http://www.semanlink.net/tag/evernote|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evernote|homepage|http://evernote.com +http://www.semanlink.net/tag/evernote|uri|http://www.semanlink.net/tag/evernote +http://www.semanlink.net/tag/graph_database_and_nlp|creationTime|2019-12-07T18:59:07Z +http://www.semanlink.net/tag/graph_database_and_nlp|prefLabel|Graph database and NLP +http://www.semanlink.net/tag/graph_database_and_nlp|broader|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/graph_database_and_nlp|broader|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/tag/graph_database_and_nlp|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/graph_database_and_nlp|related|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/tag/graph_database_and_nlp|creationDate|2019-12-07 +http://www.semanlink.net/tag/graph_database_and_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_database_and_nlp|uri|http://www.semanlink.net/tag/graph_database_and_nlp +http://www.semanlink.net/tag/graph_database_and_nlp|broader_prefLabel|Knowledge Representation +http://www.semanlink.net/tag/graph_database_and_nlp|broader_prefLabel|Graph database +http://www.semanlink.net/tag/graph_database_and_nlp|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/graph_database_and_nlp|broader_altLabel|KR +http://www.semanlink.net/tag/war_on_drugs|creationTime|2016-07-11T19:21:03Z +http://www.semanlink.net/tag/war_on_drugs|prefLabel|war on drugs +http://www.semanlink.net/tag/war_on_drugs|broader|http://www.semanlink.net/tag/prohibition_des_narcotiques +http://www.semanlink.net/tag/war_on_drugs|creationDate|2016-07-11 +http://www.semanlink.net/tag/war_on_drugs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/war_on_drugs|uri|http://www.semanlink.net/tag/war_on_drugs +http://www.semanlink.net/tag/war_on_drugs|broader_prefLabel|Prohibition des narcotiques +http://www.semanlink.net/tag/flippant|creationTime|2017-09-25T19:52:26Z +http://www.semanlink.net/tag/flippant|prefLabel|Flippant +http://www.semanlink.net/tag/flippant|broader|http://www.semanlink.net/tag/ca_craint +http://www.semanlink.net/tag/flippant|creationDate|2017-09-25 +http://www.semanlink.net/tag/flippant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flippant|uri|http://www.semanlink.net/tag/flippant +http://www.semanlink.net/tag/flippant|broader_prefLabel|Ca craint +http://www.semanlink.net/tag/https|creationTime|2014-08-30T12:42:49Z +http://www.semanlink.net/tag/https|prefLabel|HTTPS +http://www.semanlink.net/tag/https|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/https|creationDate|2014-08-30 +http://www.semanlink.net/tag/https|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/https|uri|http://www.semanlink.net/tag/https +http://www.semanlink.net/tag/https|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/https|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/al_qaida|prefLabel|Al-Qaida +http://www.semanlink.net/tag/al_qaida|broader|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/al_qaida|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/al_qaida|uri|http://www.semanlink.net/tag/al_qaida +http://www.semanlink.net/tag/al_qaida|broader_prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/ecosse|creationTime|2012-07-17T18:07:19Z +http://www.semanlink.net/tag/ecosse|prefLabel|Ecosse +http://www.semanlink.net/tag/ecosse|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/ecosse|creationDate|2012-07-17 +http://www.semanlink.net/tag/ecosse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecosse|uri|http://www.semanlink.net/tag/ecosse +http://www.semanlink.net/tag/ecosse|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/ecosse|broader_altLabel|UK +http://www.semanlink.net/tag/erdf|creationTime|2007-05-31T01:21:05Z +http://www.semanlink.net/tag/erdf|prefLabel|eRDF +http://www.semanlink.net/tag/erdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/erdf|related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/erdf|creationDate|2007-05-31 +http://www.semanlink.net/tag/erdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/erdf|uri|http://www.semanlink.net/tag/erdf +http://www.semanlink.net/tag/erdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/erdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/erdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/erdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/erdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/erdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/ridley_scott|creationTime|2010-02-12T23:57:37Z +http://www.semanlink.net/tag/ridley_scott|prefLabel|Ridley Scott +http://www.semanlink.net/tag/ridley_scott|creationDate|2010-02-12 +http://www.semanlink.net/tag/ridley_scott|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ridley_scott|describedBy|https://en.wikipedia.org/wiki/Ridley_Scott +http://www.semanlink.net/tag/ridley_scott|uri|http://www.semanlink.net/tag/ridley_scott +http://www.semanlink.net/tag/google_seo|creationTime|2017-10-19T14:07:53Z +http://www.semanlink.net/tag/google_seo|prefLabel|Google: SEO +http://www.semanlink.net/tag/google_seo|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_seo|broader|http://www.semanlink.net/tag/seo +http://www.semanlink.net/tag/google_seo|related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/google_seo|creationDate|2017-10-19 +http://www.semanlink.net/tag/google_seo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_seo|uri|http://www.semanlink.net/tag/google_seo +http://www.semanlink.net/tag/google_seo|broader_prefLabel|Google +http://www.semanlink.net/tag/google_seo|broader_prefLabel|SEO +http://www.semanlink.net/tag/google_seo|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/afrique_australe|prefLabel|Afrique australe +http://www.semanlink.net/tag/afrique_australe|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_australe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_australe|uri|http://www.semanlink.net/tag/afrique_australe +http://www.semanlink.net/tag/afrique_australe|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_australe|broader_altLabel|Africa +http://www.semanlink.net/tag/semantic_statistics|creationTime|2010-07-16T14:23:03Z +http://www.semanlink.net/tag/semantic_statistics|prefLabel|Semantic Statistics +http://www.semanlink.net/tag/semantic_statistics|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_statistics|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/semantic_statistics|broader|http://www.semanlink.net/tag/statistical_data +http://www.semanlink.net/tag/semantic_statistics|creationDate|2010-07-16 +http://www.semanlink.net/tag/semantic_statistics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_statistics|altLabel|RDF and statistics +http://www.semanlink.net/tag/semantic_statistics|uri|http://www.semanlink.net/tag/semantic_statistics +http://www.semanlink.net/tag/semantic_statistics|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_statistics|broader_prefLabel|RDF +http://www.semanlink.net/tag/semantic_statistics|broader_prefLabel|Statistical data +http://www.semanlink.net/tag/semantic_statistics|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_statistics|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/semantic_statistics|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/semantic_statistics|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/semantic_statistics|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/semantic_statistics|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/semantic_statistics|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/grands_problemes_mathematiques|creationTime|2012-05-20T23:06:34Z +http://www.semanlink.net/tag/grands_problemes_mathematiques|prefLabel|Grands problèmes mathématiques +http://www.semanlink.net/tag/grands_problemes_mathematiques|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/grands_problemes_mathematiques|creationDate|2012-05-20 +http://www.semanlink.net/tag/grands_problemes_mathematiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grands_problemes_mathematiques|uri|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://www.semanlink.net/tag/grands_problemes_mathematiques|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/grands_problemes_mathematiques|broader_altLabel|Math +http://www.semanlink.net/tag/australie|prefLabel|Australie +http://www.semanlink.net/tag/australie|broader|http://www.semanlink.net/tag/oceanie +http://www.semanlink.net/tag/australie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/australie|uri|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/australie|broader_prefLabel|Océanie +http://www.semanlink.net/tag/bias|creationTime|2018-04-14T11:38:04Z +http://www.semanlink.net/tag/bias|prefLabel|Bias +http://www.semanlink.net/tag/bias|creationDate|2018-04-14 +http://www.semanlink.net/tag/bias|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bias|uri|http://www.semanlink.net/tag/bias +http://www.semanlink.net/tag/text_similarity|creationTime|2017-05-18T01:34:15Z +http://www.semanlink.net/tag/text_similarity|prefLabel|Text Similarity +http://www.semanlink.net/tag/text_similarity|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/text_similarity|related|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/text_similarity|related|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/tag/text_similarity|related|http://www.semanlink.net/tag/okapi_bm25 +http://www.semanlink.net/tag/text_similarity|creationDate|2017-05-18 +http://www.semanlink.net/tag/text_similarity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_similarity|uri|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/text_similarity|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/hypios|creationTime|2010-05-14T09:05:32Z +http://www.semanlink.net/tag/hypios|prefLabel|Hypios +http://www.semanlink.net/tag/hypios|creationDate|2010-05-14 +http://www.semanlink.net/tag/hypios|comment|The First Social Marketplace for Solutions +http://www.semanlink.net/tag/hypios|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypios|describedBy|https://www.hypios.com/ +http://www.semanlink.net/tag/hypios|uri|http://www.semanlink.net/tag/hypios +http://www.semanlink.net/tag/lucilie_bouchere|prefLabel|Lucilie bouchère +http://www.semanlink.net/tag/lucilie_bouchere|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/lucilie_bouchere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lucilie_bouchere|uri|http://www.semanlink.net/tag/lucilie_bouchere +http://www.semanlink.net/tag/lucilie_bouchere|broader_prefLabel|Insecte +http://www.semanlink.net/tag/epidemie|prefLabel|Épidémie +http://www.semanlink.net/tag/epidemie|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/epidemie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/epidemie|uri|http://www.semanlink.net/tag/epidemie +http://www.semanlink.net/tag/epidemie|broader_prefLabel|Maladie +http://www.semanlink.net/tag/blog|prefLabel|Blog +http://www.semanlink.net/tag/blog|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blog|uri|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/blog|broader_prefLabel|Internet +http://www.semanlink.net/tag/equivalence_mining|creationTime|2007-06-13T23:25:08Z +http://www.semanlink.net/tag/equivalence_mining|prefLabel|Equivalence mining +http://www.semanlink.net/tag/equivalence_mining|broader|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/equivalence_mining|broader|http://www.semanlink.net/tag/synonym_uris +http://www.semanlink.net/tag/equivalence_mining|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/equivalence_mining|creationDate|2007-06-13 +http://www.semanlink.net/tag/equivalence_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/equivalence_mining|uri|http://www.semanlink.net/tag/equivalence_mining +http://www.semanlink.net/tag/equivalence_mining|broader_prefLabel|Linking Open Data +http://www.semanlink.net/tag/equivalence_mining|broader_prefLabel|Synonym URIs +http://www.semanlink.net/tag/equivalence_mining|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/equivalence_mining|broader_altLabel|LOD +http://www.semanlink.net/tag/equivalence_mining|broader_altLabel|LD +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/equivalence_mining|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/cinema|prefLabel|Cinéma +http://www.semanlink.net/tag/cinema|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/cinema|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cinema|uri|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/cinema|broader_prefLabel|Art +http://www.semanlink.net/tag/kz|creationTime|2012-08-23T01:00:24Z +http://www.semanlink.net/tag/kz|prefLabel|KZ +http://www.semanlink.net/tag/kz|broader|http://www.semanlink.net/tag/nazisme +http://www.semanlink.net/tag/kz|creationDate|2012-08-23 +http://www.semanlink.net/tag/kz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kz|uri|http://www.semanlink.net/tag/kz +http://www.semanlink.net/tag/kz|broader_prefLabel|Nazisme +http://www.semanlink.net/tag/kz|broader_altLabel|Nazi +http://www.semanlink.net/tag/kz|broader_related|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/kz|broader_related|http://www.semanlink.net/tag/antisemitisme +http://www.semanlink.net/tag/toumai|creationTime|2018-01-23T13:45:44Z +http://www.semanlink.net/tag/toumai|prefLabel|Toumaï +http://www.semanlink.net/tag/toumai|broader|http://www.semanlink.net/tag/separation_of_man_and_ape +http://www.semanlink.net/tag/toumai|broader|http://www.semanlink.net/tag/origines_de_l_homme +http://www.semanlink.net/tag/toumai|creationDate|2018-01-23 +http://www.semanlink.net/tag/toumai|comment|Seize ans après la découverte du crâne du plus ancien hominidé, pourquoi son fémur n’a-t-il jamais été publié ? +http://www.semanlink.net/tag/toumai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/toumai|describedBy|https://fr.wikipedia.org/wiki/Touma%C3%AF +http://www.semanlink.net/tag/toumai|uri|http://www.semanlink.net/tag/toumai +http://www.semanlink.net/tag/toumai|broader_prefLabel|Separation of man and ape +http://www.semanlink.net/tag/toumai|broader_prefLabel|Origines de l'homme +http://www.semanlink.net/tag/todo_list|prefLabel|Todo list +http://www.semanlink.net/tag/todo_list|broader|http://www.semanlink.net/tag/to_do +http://www.semanlink.net/tag/todo_list|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/todo_list|uri|http://www.semanlink.net/tag/todo_list +http://www.semanlink.net/tag/todo_list|broader_prefLabel|To do +http://www.semanlink.net/tag/todo_list|broader_altLabel|Todo +http://www.semanlink.net/tag/origine_de_la_vie|prefLabel|Origine de la vie +http://www.semanlink.net/tag/origine_de_la_vie|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/origine_de_la_vie|broader|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/origine_de_la_vie|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/origine_de_la_vie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/origine_de_la_vie|uri|http://www.semanlink.net/tag/origine_de_la_vie +http://www.semanlink.net/tag/origine_de_la_vie|broader_prefLabel|Evolution +http://www.semanlink.net/tag/origine_de_la_vie|broader_prefLabel|Histoire de la vie +http://www.semanlink.net/tag/origine_de_la_vie|broader_prefLabel|Science +http://www.semanlink.net/tag/origine_de_la_vie|broader_altLabel|sciences +http://www.semanlink.net/tag/shacl|creationTime|2017-08-15T15:51:36Z +http://www.semanlink.net/tag/shacl|prefLabel|SHACL +http://www.semanlink.net/tag/shacl|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/shacl|broader|http://www.semanlink.net/tag/w3c_recommendation +http://www.semanlink.net/tag/shacl|related|http://www.semanlink.net/tag/topbraid_spin +http://www.semanlink.net/tag/shacl|creationDate|2017-08-15 +http://www.semanlink.net/tag/shacl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shacl|uri|http://www.semanlink.net/tag/shacl +http://www.semanlink.net/tag/shacl|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/shacl|broader_prefLabel|W3C Recommendation +http://www.semanlink.net/tag/shacl|broader_altLabel|sw +http://www.semanlink.net/tag/shacl|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|creationTime|2016-01-04T14:03:36Z +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|prefLabel|“one learning algorithm” hypothesis +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|broader|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|creationDate|2016-01-04 +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|uri|http://www.semanlink.net/tag/one_learning_algorithm_hypothesis +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|broader_prefLabel|Computational Neuroscience +http://www.semanlink.net/tag/one_learning_algorithm_hypothesis|broader_related|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/hubble|prefLabel|Hubble +http://www.semanlink.net/tag/hubble|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/hubble|broader|http://www.semanlink.net/tag/telescope +http://www.semanlink.net/tag/hubble|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/hubble|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hubble|uri|http://www.semanlink.net/tag/hubble +http://www.semanlink.net/tag/hubble|broader_prefLabel|NASA +http://www.semanlink.net/tag/hubble|broader_prefLabel|Télescope +http://www.semanlink.net/tag/hubble|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/feed_aggregator|prefLabel|Feed aggregator +http://www.semanlink.net/tag/feed_aggregator|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/feed_aggregator|uri|http://www.semanlink.net/tag/feed_aggregator +http://www.semanlink.net/tag/activity_streams|creationTime|2014-10-23T23:54:26Z +http://www.semanlink.net/tag/activity_streams|prefLabel|Activity Streams +http://www.semanlink.net/tag/activity_streams|creationDate|2014-10-23 +http://www.semanlink.net/tag/activity_streams|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/activity_streams|uri|http://www.semanlink.net/tag/activity_streams +http://www.semanlink.net/tag/la_france_vue_de_l_etranger|prefLabel|La France vue de l'étranger +http://www.semanlink.net/tag/la_france_vue_de_l_etranger|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/la_france_vue_de_l_etranger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_france_vue_de_l_etranger|uri|http://www.semanlink.net/tag/la_france_vue_de_l_etranger +http://www.semanlink.net/tag/la_france_vue_de_l_etranger|broader_prefLabel|France +http://www.semanlink.net/tag/multilingual_language_models|creationTime|2021-07-13T13:36:57Z +http://www.semanlink.net/tag/multilingual_language_models|prefLabel|Multilingual Language Models +http://www.semanlink.net/tag/multilingual_language_models|broader|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/multilingual_language_models|broader|http://www.semanlink.net/tag/multilingual_embeddings +http://www.semanlink.net/tag/multilingual_language_models|broader|http://www.semanlink.net/tag/multilingual_nlp +http://www.semanlink.net/tag/multilingual_language_models|creationDate|2021-07-13 +http://www.semanlink.net/tag/multilingual_language_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multilingual_language_models|uri|http://www.semanlink.net/tag/multilingual_language_models +http://www.semanlink.net/tag/multilingual_language_models|broader_prefLabel|Pre-Trained Language Models +http://www.semanlink.net/tag/multilingual_language_models|broader_prefLabel|Multilingual embeddings +http://www.semanlink.net/tag/multilingual_language_models|broader_prefLabel|Multilingual NLP +http://www.semanlink.net/tag/multilingual_language_models|broader_altLabel|PreTrained Language Models +http://www.semanlink.net/tag/multilingual_language_models|broader_altLabel|Deep pre-training in NLP +http://www.semanlink.net/tag/multilingual_language_models|broader_related|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/archeologie_du_niger|prefLabel|Archéologie du Niger +http://www.semanlink.net/tag/archeologie_du_niger|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/archeologie_du_niger|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/archeologie_du_niger|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/archeologie_du_niger|broader|http://www.semanlink.net/tag/histoire_du_niger +http://www.semanlink.net/tag/archeologie_du_niger|related|http://www.semanlink.net/tag/anne_haour +http://www.semanlink.net/tag/archeologie_du_niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie_du_niger|uri|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/tag/archeologie_du_niger|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/archeologie_du_niger|broader_prefLabel|Niger +http://www.semanlink.net/tag/archeologie_du_niger|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/archeologie_du_niger|broader_prefLabel|Histoire du Niger +http://www.semanlink.net/tag/archeologie_du_niger|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/archeologie_du_niger|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/archeologie_du_niger|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/archeologie_du_niger|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/service_descriptors|creationTime|2017-02-04T11:37:09Z +http://www.semanlink.net/tag/service_descriptors|prefLabel|Service Descriptors +http://www.semanlink.net/tag/service_descriptors|creationDate|2017-02-04 +http://www.semanlink.net/tag/service_descriptors|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/service_descriptors|uri|http://www.semanlink.net/tag/service_descriptors +http://www.semanlink.net/tag/sharing_economy|creationTime|2015-09-23T22:17:16Z +http://www.semanlink.net/tag/sharing_economy|prefLabel|Sharing economy +http://www.semanlink.net/tag/sharing_economy|creationDate|2015-09-23 +http://www.semanlink.net/tag/sharing_economy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sharing_economy|uri|http://www.semanlink.net/tag/sharing_economy +http://www.semanlink.net/tag/probabilistic_relevance_model|creationTime|2017-07-17T17:59:03Z +http://www.semanlink.net/tag/probabilistic_relevance_model|prefLabel|Probabilistic relevance model +http://www.semanlink.net/tag/probabilistic_relevance_model|broader|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/probabilistic_relevance_model|broader|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/probabilistic_relevance_model|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/probabilistic_relevance_model|creationDate|2017-07-17 +http://www.semanlink.net/tag/probabilistic_relevance_model|comment|formalism of information retrieval useful to derive functions that rank matching documents according to their relevance to a given search query. +http://www.semanlink.net/tag/probabilistic_relevance_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/probabilistic_relevance_model|describedBy|https://fr.wikipedia.org/wiki/Mod%C3%A8le_probabiliste_de_pertinence +http://www.semanlink.net/tag/probabilistic_relevance_model|uri|http://www.semanlink.net/tag/probabilistic_relevance_model +http://www.semanlink.net/tag/probabilistic_relevance_model|broader_prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/probabilistic_relevance_model|broader_prefLabel|Similarity queries +http://www.semanlink.net/tag/probabilistic_relevance_model|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/probabilistic_relevance_model|broader_altLabel|Vector similarity search +http://www.semanlink.net/tag/probabilistic_relevance_model|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/metric_learning|creationTime|2020-05-10T11:00:48Z +http://www.semanlink.net/tag/metric_learning|prefLabel|Metric Learning +http://www.semanlink.net/tag/metric_learning|broader|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/tag/metric_learning|creationDate|2020-05-10 +http://www.semanlink.net/tag/metric_learning|comment|"Distance metric learning: the task of learning a distance function over objects consistent with a notion of similarity. + +> Suppose you want to implement a ML-based search engine that, given a query, ranks a variable number of documents by relevance. Metric learning is essentially learning a function that, given two inputs, tells you how ""relevant"" they are. [src](https://twitter.com/ericjang11/status/1259207970916667392?s=20)" +http://www.semanlink.net/tag/metric_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metric_learning|uri|http://www.semanlink.net/tag/metric_learning +http://www.semanlink.net/tag/metric_learning|broader_prefLabel|Similarity learning +http://www.semanlink.net/tag/metric_learning|broader_related|http://www.semanlink.net/tag/machine_learned_ranking +http://www.semanlink.net/tag/metric_learning|broader_related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/conferences|creationTime|2007-05-24T13:14:22Z +http://www.semanlink.net/tag/conferences|prefLabel|Conférences +http://www.semanlink.net/tag/conferences|broader|http://www.semanlink.net/tag/event +http://www.semanlink.net/tag/conferences|related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/conferences|creationDate|2007-05-24 +http://www.semanlink.net/tag/conferences|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conferences|uri|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/conferences|broader_prefLabel|Event +http://www.semanlink.net/tag/nothing_to_hide_argument|creationTime|2020-04-24T12:41:04Z +http://www.semanlink.net/tag/nothing_to_hide_argument|prefLabel|Nothing to hide argument +http://www.semanlink.net/tag/nothing_to_hide_argument|creationDate|2020-04-24 +http://www.semanlink.net/tag/nothing_to_hide_argument|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nothing_to_hide_argument|describedBy|https://en.wikipedia.org/wiki/Nothing_to_hide_argument +http://www.semanlink.net/tag/nothing_to_hide_argument|uri|http://www.semanlink.net/tag/nothing_to_hide_argument +http://www.semanlink.net/tag/windows_media_player|prefLabel|Windows Media Player +http://www.semanlink.net/tag/windows_media_player|broader|http://www.semanlink.net/tag/media_player +http://www.semanlink.net/tag/windows_media_player|broader|http://www.semanlink.net/tag/windows +http://www.semanlink.net/tag/windows_media_player|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/windows_media_player|uri|http://www.semanlink.net/tag/windows_media_player +http://www.semanlink.net/tag/windows_media_player|broader_prefLabel|Media Player +http://www.semanlink.net/tag/windows_media_player|broader_prefLabel|Windows +http://www.semanlink.net/tag/abstract_meaning_representation|creationTime|2020-01-07T12:28:13Z +http://www.semanlink.net/tag/abstract_meaning_representation|prefLabel|Abstract Meaning Representation +http://www.semanlink.net/tag/abstract_meaning_representation|creationDate|2020-01-07 +http://www.semanlink.net/tag/abstract_meaning_representation|comment|a semantic representation language +http://www.semanlink.net/tag/abstract_meaning_representation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abstract_meaning_representation|describedBy|https://en.wikipedia.org/wiki/Abstract_Meaning_Representation +http://www.semanlink.net/tag/abstract_meaning_representation|uri|http://www.semanlink.net/tag/abstract_meaning_representation +http://www.semanlink.net/tag/j_y_etais|creationTime|2015-10-02T21:08:23Z +http://www.semanlink.net/tag/j_y_etais|prefLabel|J'y étais +http://www.semanlink.net/tag/j_y_etais|broader|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/j_y_etais|creationDate|2015-10-02 +http://www.semanlink.net/tag/j_y_etais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/j_y_etais|uri|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/tag/j_y_etais|broader_prefLabel|Souvenirs +http://www.semanlink.net/tag/j_y_etais|broader_altLabel|Souvenir +http://www.semanlink.net/tag/hannibal|prefLabel|Hannibal +http://www.semanlink.net/tag/hannibal|broader|http://www.semanlink.net/tag/guerres_puniques +http://www.semanlink.net/tag/hannibal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hannibal|uri|http://www.semanlink.net/tag/hannibal +http://www.semanlink.net/tag/hannibal|broader_prefLabel|Guerres puniques +http://www.semanlink.net/tag/vie_vienna_iks_editables|creationTime|2012-06-15T14:18:00Z +http://www.semanlink.net/tag/vie_vienna_iks_editables|prefLabel|VIE Vienna IKS Editables +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/vie_vienna_iks_editables|related|http://www.semanlink.net/tag/henri_bergius +http://www.semanlink.net/tag/vie_vienna_iks_editables|creationDate|2012-06-15 +http://www.semanlink.net/tag/vie_vienna_iks_editables|comment|VIE is a JavaScript library for implementing decoupled Content Management Systems and semantic interaction in web applications. +http://www.semanlink.net/tag/vie_vienna_iks_editables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vie_vienna_iks_editables|describedBy|http://viejs.org/ +http://www.semanlink.net/tag/vie_vienna_iks_editables|uri|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader_prefLabel|Interactive Knowledge Stack +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader_altLabel|IKS +http://www.semanlink.net/tag/vie_vienna_iks_editables|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/paris_nlp_meetup|creationTime|2018-07-26T00:27:00Z +http://www.semanlink.net/tag/paris_nlp_meetup|prefLabel|Paris NLP meetup +http://www.semanlink.net/tag/paris_nlp_meetup|broader|http://www.semanlink.net/tag/nlp_event +http://www.semanlink.net/tag/paris_nlp_meetup|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/paris_nlp_meetup|broader|http://www.semanlink.net/tag/meetup +http://www.semanlink.net/tag/paris_nlp_meetup|creationDate|2018-07-26 +http://www.semanlink.net/tag/paris_nlp_meetup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paris_nlp_meetup|homepage|https://www.meetup.com/fr-FR/Paris-NLP/ +http://www.semanlink.net/tag/paris_nlp_meetup|weblog|https://nlpparis.wordpress.com/ +http://www.semanlink.net/tag/paris_nlp_meetup|uri|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/tag/paris_nlp_meetup|broader_prefLabel|NLP event +http://www.semanlink.net/tag/paris_nlp_meetup|broader_prefLabel|Paris +http://www.semanlink.net/tag/paris_nlp_meetup|broader_prefLabel|Meetup +http://www.semanlink.net/tag/computer_vision|creationTime|2013-04-23T17:03:32Z +http://www.semanlink.net/tag/computer_vision|prefLabel|Computer vision +http://www.semanlink.net/tag/computer_vision|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/computer_vision|creationDate|2013-04-23 +http://www.semanlink.net/tag/computer_vision|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computer_vision|altLabel|Visual Recognition +http://www.semanlink.net/tag/computer_vision|uri|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/tag/computer_vision|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/computer_vision|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/computer_vision|broader_altLabel|AI +http://www.semanlink.net/tag/computer_vision|broader_altLabel|IA +http://www.semanlink.net/tag/computer_vision|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/kindle|creationTime|2013-09-09T14:14:10Z +http://www.semanlink.net/tag/kindle|prefLabel|Kindle +http://www.semanlink.net/tag/kindle|broader|http://www.semanlink.net/tag/amazon +http://www.semanlink.net/tag/kindle|creationDate|2013-09-09 +http://www.semanlink.net/tag/kindle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kindle|uri|http://www.semanlink.net/tag/kindle +http://www.semanlink.net/tag/kindle|broader_prefLabel|Amazon +http://www.semanlink.net/tag/rfid|prefLabel|RFID +http://www.semanlink.net/tag/rfid|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/rfid|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rfid|uri|http://www.semanlink.net/tag/rfid +http://www.semanlink.net/tag/rfid|broader_prefLabel|NTIC +http://www.semanlink.net/tag/ipv6|prefLabel|IPv6 +http://www.semanlink.net/tag/ipv6|broader|http://www.semanlink.net/tag/ip_address +http://www.semanlink.net/tag/ipv6|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/ipv6|creationDate|2006-11-06 +http://www.semanlink.net/tag/ipv6|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ipv6|uri|http://www.semanlink.net/tag/ipv6 +http://www.semanlink.net/tag/ipv6|broader_prefLabel|IP address +http://www.semanlink.net/tag/ipv6|broader_prefLabel|Internet +http://www.semanlink.net/tag/acronyms_nlp|creationTime|2019-04-03T09:37:00Z +http://www.semanlink.net/tag/acronyms_nlp|prefLabel|Acronyms (NLP) +http://www.semanlink.net/tag/acronyms_nlp|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/acronyms_nlp|related|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/acronyms_nlp|creationDate|2019-04-03 +http://www.semanlink.net/tag/acronyms_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acronyms_nlp|uri|http://www.semanlink.net/tag/acronyms_nlp +http://www.semanlink.net/tag/acronyms_nlp|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/acronyms_nlp|broader_altLabel|NER +http://www.semanlink.net/tag/thermodynamique|creationTime|2014-01-26T20:32:44Z +http://www.semanlink.net/tag/thermodynamique|prefLabel|Thermodynamique +http://www.semanlink.net/tag/thermodynamique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/thermodynamique|creationDate|2014-01-26 +http://www.semanlink.net/tag/thermodynamique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thermodynamique|uri|http://www.semanlink.net/tag/thermodynamique +http://www.semanlink.net/tag/thermodynamique|broader_prefLabel|Physique +http://www.semanlink.net/tag/thermodynamique|broader_altLabel|Physics +http://www.semanlink.net/tag/bernard_stiegler|creationTime|2013-10-04T13:41:41Z +http://www.semanlink.net/tag/bernard_stiegler|prefLabel|Bernard Stiegler +http://www.semanlink.net/tag/bernard_stiegler|broader|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/bernard_stiegler|creationDate|2013-10-04 +http://www.semanlink.net/tag/bernard_stiegler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bernard_stiegler|describedBy|https://fr.wikipedia.org/wiki/Bernard_Stiegler +http://www.semanlink.net/tag/bernard_stiegler|altLabel|Stiegler +http://www.semanlink.net/tag/bernard_stiegler|uri|http://www.semanlink.net/tag/bernard_stiegler +http://www.semanlink.net/tag/bernard_stiegler|broader_prefLabel|Philosophe +http://www.semanlink.net/tag/accueil_etranger|prefLabel|Accueil étranger +http://www.semanlink.net/tag/accueil_etranger|broader|http://www.semanlink.net/tag/cons_de_francais +http://www.semanlink.net/tag/accueil_etranger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/accueil_etranger|uri|http://www.semanlink.net/tag/accueil_etranger +http://www.semanlink.net/tag/accueil_etranger|broader_prefLabel|Cons de Français +http://www.semanlink.net/tag/industrie_de_l_armement|prefLabel|Industrie de l'armement +http://www.semanlink.net/tag/industrie_de_l_armement|broader|http://www.semanlink.net/tag/armement +http://www.semanlink.net/tag/industrie_de_l_armement|broader|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/industrie_de_l_armement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_de_l_armement|uri|http://www.semanlink.net/tag/industrie_de_l_armement +http://www.semanlink.net/tag/industrie_de_l_armement|broader_prefLabel|Armement +http://www.semanlink.net/tag/industrie_de_l_armement|broader_prefLabel|industrie +http://www.semanlink.net/tag/reputation_system|creationTime|2018-01-03T23:37:07Z +http://www.semanlink.net/tag/reputation_system|prefLabel|Reputation system +http://www.semanlink.net/tag/reputation_system|creationDate|2018-01-03 +http://www.semanlink.net/tag/reputation_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reputation_system|describedBy|https://en.wikipedia.org/wiki/Reputation_system +http://www.semanlink.net/tag/reputation_system|uri|http://www.semanlink.net/tag/reputation_system +http://www.semanlink.net/tag/lula|prefLabel|Lula +http://www.semanlink.net/tag/lula|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/lula|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/lula|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lula|uri|http://www.semanlink.net/tag/lula +http://www.semanlink.net/tag/lula|broader_prefLabel|Brésil +http://www.semanlink.net/tag/lula|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/lula|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/synthetic_life|creationTime|2007-07-03T21:09:42Z +http://www.semanlink.net/tag/synthetic_life|prefLabel|Synthetic life +http://www.semanlink.net/tag/synthetic_life|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/synthetic_life|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/synthetic_life|broader|http://www.semanlink.net/tag/synthetic_biology +http://www.semanlink.net/tag/synthetic_life|creationDate|2007-07-03 +http://www.semanlink.net/tag/synthetic_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synthetic_life|uri|http://www.semanlink.net/tag/synthetic_life +http://www.semanlink.net/tag/synthetic_life|broader_prefLabel|Biology +http://www.semanlink.net/tag/synthetic_life|broader_prefLabel|Genetics +http://www.semanlink.net/tag/synthetic_life|broader_prefLabel|Génétique +http://www.semanlink.net/tag/synthetic_life|broader_prefLabel|Synthetic biology +http://www.semanlink.net/tag/synthetic_life|broader_altLabel|Biologie +http://www.semanlink.net/tag/antiscience|creationTime|2021-05-24T14:49:37Z +http://www.semanlink.net/tag/antiscience|prefLabel|Antiscience +http://www.semanlink.net/tag/antiscience|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/antiscience|creationDate|2021-05-24 +http://www.semanlink.net/tag/antiscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiscience|uri|http://www.semanlink.net/tag/antiscience +http://www.semanlink.net/tag/antiscience|broader_prefLabel|Science +http://www.semanlink.net/tag/antiscience|broader_altLabel|sciences +http://www.semanlink.net/tag/genomique|prefLabel|Génomique +http://www.semanlink.net/tag/genomique|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/genomique|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genomique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genomique|uri|http://www.semanlink.net/tag/genomique +http://www.semanlink.net/tag/genomique|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/genomique|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/genomique|broader_prefLabel|Genetics +http://www.semanlink.net/tag/genomique|broader_prefLabel|Génétique +http://www.semanlink.net/tag/d3js|creationTime|2017-06-28T15:02:28Z +http://www.semanlink.net/tag/d3js|prefLabel|D3js +http://www.semanlink.net/tag/d3js|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/d3js|broader|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/d3js|related|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/tag/d3js|related|http://www.semanlink.net/tag/jquery +http://www.semanlink.net/tag/d3js|creationDate|2017-06-28 +http://www.semanlink.net/tag/d3js|comment|"JavaScript library for manipulating documents based on data and for visualizing data using web standards (SVG, Canvas and HTML). Combines visualization and interaction techniques with a data-driven approach to DOM manipulation +" +http://www.semanlink.net/tag/d3js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/d3js|homepage|https://d3js.org/ +http://www.semanlink.net/tag/d3js|uri|http://www.semanlink.net/tag/d3js +http://www.semanlink.net/tag/d3js|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/d3js|broader_prefLabel|Visualization Tools +http://www.semanlink.net/tag/d3js|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/d3js|broader_altLabel|Data Visualization Tools +http://www.semanlink.net/tag/d3js|broader_related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/scraping|creationTime|2020-01-23T18:15:24Z +http://www.semanlink.net/tag/scraping|prefLabel|Scraping +http://www.semanlink.net/tag/scraping|creationDate|2020-01-23 +http://www.semanlink.net/tag/scraping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scraping|uri|http://www.semanlink.net/tag/scraping +http://www.semanlink.net/tag/minoen|prefLabel|Minoen +http://www.semanlink.net/tag/minoen|broader|http://www.semanlink.net/tag/crete_antique +http://www.semanlink.net/tag/minoen|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/minoen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minoen|uri|http://www.semanlink.net/tag/minoen +http://www.semanlink.net/tag/minoen|broader_prefLabel|Crète antique +http://www.semanlink.net/tag/minoen|broader_prefLabel|Langues +http://www.semanlink.net/tag/named_entity_recognition|creationTime|2011-03-07T09:58:00Z +http://www.semanlink.net/tag/named_entity_recognition|prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/named_entity_recognition|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/named_entity_recognition|broader|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/named_entity_recognition|broader|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/named_entity_recognition|creationDate|2011-03-07 +http://www.semanlink.net/tag/named_entity_recognition|comment|"sequence labelling tasks where the goal is to identify +the names of entities in a sentence. Named entities can +be proper nouns (locations, people, organizations...), or can be much more +domain-specific, such as diseases or genes in +biomedical NLP." +http://www.semanlink.net/tag/named_entity_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/named_entity_recognition|describedBy|https://en.wikipedia.org/wiki/Named-entity_recognition +http://www.semanlink.net/tag/named_entity_recognition|altLabel|NER +http://www.semanlink.net/tag/named_entity_recognition|uri|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/named_entity_recognition|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/named_entity_recognition|broader_prefLabel|Sequence labeling +http://www.semanlink.net/tag/named_entity_recognition|broader_prefLabel|Entity discovery and linking +http://www.semanlink.net/tag/named_entity_recognition|broader_altLabel|Sequence Tagging +http://www.semanlink.net/tag/named_entity_recognition|broader_altLabel|Entity Retrieval +http://www.semanlink.net/tag/named_entity_recognition|broader_altLabel|Entity Analysis +http://www.semanlink.net/tag/named_entity_recognition|broader_related|http://www.semanlink.net/tag/ml_sequential_data +http://www.semanlink.net/tag/named_entity_recognition|broader_related|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/tag/modeles_economiques|creationTime|2016-05-15T13:53:08Z +http://www.semanlink.net/tag/modeles_economiques|prefLabel|Modèles économiques +http://www.semanlink.net/tag/modeles_economiques|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/modeles_economiques|creationDate|2016-05-15 +http://www.semanlink.net/tag/modeles_economiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/modeles_economiques|uri|http://www.semanlink.net/tag/modeles_economiques +http://www.semanlink.net/tag/modeles_economiques|broader_prefLabel|Economie +http://www.semanlink.net/tag/modeles_economiques|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/enigme|creationTime|2011-09-21T23:21:55Z +http://www.semanlink.net/tag/enigme|prefLabel|Enigme +http://www.semanlink.net/tag/enigme|creationDate|2011-09-21 +http://www.semanlink.net/tag/enigme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enigme|uri|http://www.semanlink.net/tag/enigme +http://www.semanlink.net/tag/dynamic_topic_model|creationTime|2013-08-21T18:34:44Z +http://www.semanlink.net/tag/dynamic_topic_model|prefLabel|Dynamic topic model +http://www.semanlink.net/tag/dynamic_topic_model|broader|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/dynamic_topic_model|related|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://www.semanlink.net/tag/dynamic_topic_model|related|http://www.semanlink.net/tag/david_blei +http://www.semanlink.net/tag/dynamic_topic_model|creationDate|2013-08-21 +http://www.semanlink.net/tag/dynamic_topic_model|comment|"Generative models that can be used to analyze the evolution of (unobserved) topics of a collection of documents over time.
+extension to Latent Dirichlet Allocation (LDA) that can handle sequential documents" +http://www.semanlink.net/tag/dynamic_topic_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dynamic_topic_model|describedBy|https://en.wikipedia.org/wiki/Dynamic_topic_model +http://www.semanlink.net/tag/dynamic_topic_model|uri|http://www.semanlink.net/tag/dynamic_topic_model +http://www.semanlink.net/tag/dynamic_topic_model|broader_prefLabel|Topic Modeling +http://www.semanlink.net/tag/dynamic_topic_model|broader_altLabel|Topic model +http://www.semanlink.net/tag/dynamic_topic_model|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/dynamic_topic_model|broader_related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/dynamic_topic_model|broader_related|http://www.semanlink.net/tag/nlp_and_humanities +http://www.semanlink.net/tag/dynamic_topic_model|broader_related|http://www.semanlink.net/tag/mallet +http://www.semanlink.net/tag/onu|prefLabel|ONU +http://www.semanlink.net/tag/onu|broader|http://www.semanlink.net/tag/etat_du_monde +http://www.semanlink.net/tag/onu|broader|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/onu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/onu|altLabel|Nations unies +http://www.semanlink.net/tag/onu|uri|http://www.semanlink.net/tag/onu +http://www.semanlink.net/tag/onu|broader_prefLabel|Etat du monde +http://www.semanlink.net/tag/onu|broader_prefLabel|Institutions internationales +http://www.semanlink.net/tag/2020|creationTime|2020-12-28T19:11:40Z +http://www.semanlink.net/tag/2020|prefLabel|2020 +http://www.semanlink.net/tag/2020|creationDate|2020-12-28 +http://www.semanlink.net/tag/2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/2020|uri|http://www.semanlink.net/tag/2020 +http://www.semanlink.net/tag/startups|creationTime|2017-06-17T23:47:56Z +http://www.semanlink.net/tag/startups|prefLabel|Startups +http://www.semanlink.net/tag/startups|creationDate|2017-06-17 +http://www.semanlink.net/tag/startups|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/startups|altLabel|Startup +http://www.semanlink.net/tag/startups|uri|http://www.semanlink.net/tag/startups +http://www.semanlink.net/tag/ondes_gravitationnelles|creationTime|2017-10-16T18:01:17Z +http://www.semanlink.net/tag/ondes_gravitationnelles|prefLabel|Ondes gravitationnelles +http://www.semanlink.net/tag/ondes_gravitationnelles|broader|http://www.semanlink.net/tag/gravitation +http://www.semanlink.net/tag/ondes_gravitationnelles|creationDate|2017-10-16 +http://www.semanlink.net/tag/ondes_gravitationnelles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ondes_gravitationnelles|uri|http://www.semanlink.net/tag/ondes_gravitationnelles +http://www.semanlink.net/tag/ondes_gravitationnelles|broader_prefLabel|Gravitation +http://www.semanlink.net/tag/ondes_gravitationnelles|broader_altLabel|Gravity +http://www.semanlink.net/tag/loi_sur_le_voile|prefLabel|Loi sur le voile +http://www.semanlink.net/tag/loi_sur_le_voile|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/loi_sur_le_voile|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/loi_sur_le_voile|broader|http://www.semanlink.net/tag/con_de_chirac +http://www.semanlink.net/tag/loi_sur_le_voile|broader|http://www.semanlink.net/tag/islam +http://www.semanlink.net/tag/loi_sur_le_voile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/loi_sur_le_voile|uri|http://www.semanlink.net/tag/loi_sur_le_voile +http://www.semanlink.net/tag/loi_sur_le_voile|broader_prefLabel|France +http://www.semanlink.net/tag/loi_sur_le_voile|broader_prefLabel|Société +http://www.semanlink.net/tag/loi_sur_le_voile|broader_prefLabel|Con de Chirac +http://www.semanlink.net/tag/loi_sur_le_voile|broader_prefLabel|Islam +http://www.semanlink.net/tag/loi_sur_le_voile|broader_altLabel|Chirac est nul +http://www.semanlink.net/tag/benjamin_franklin|creationTime|2007-07-15T11:23:53Z +http://www.semanlink.net/tag/benjamin_franklin|prefLabel|Benjamin Franklin +http://www.semanlink.net/tag/benjamin_franklin|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/benjamin_franklin|creationDate|2007-07-15 +http://www.semanlink.net/tag/benjamin_franklin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/benjamin_franklin|uri|http://www.semanlink.net/tag/benjamin_franklin +http://www.semanlink.net/tag/benjamin_franklin|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|creationTime|2020-12-22T21:01:24Z +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|prefLabel|USA: Foreign policy +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|related|http://www.semanlink.net/tag/imperialisme_americain +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|creationDate|2020-12-22 +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|describedBy|https://en.wikipedia.org/wiki/Foreign_policy_of_the_United_States +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|altLabel|Politique extérieure américaine +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|uri|http://www.semanlink.net/tag/foreign_policy_of_the_united_states +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|broader_prefLabel|USA +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/foreign_policy_of_the_united_states|broader_altLabel|United States +http://www.semanlink.net/tag/text|creationTime|2015-03-27T23:03:19Z +http://www.semanlink.net/tag/text|prefLabel|Text +http://www.semanlink.net/tag/text|creationDate|2015-03-27 +http://www.semanlink.net/tag/text|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text|uri|http://www.semanlink.net/tag/text +http://www.semanlink.net/tag/perse|prefLabel|Perse +http://www.semanlink.net/tag/perse|broader|http://www.semanlink.net/tag/antiquite_iranienne +http://www.semanlink.net/tag/perse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perse|uri|http://www.semanlink.net/tag/perse +http://www.semanlink.net/tag/perse|broader_prefLabel|Antiquité iranienne +http://www.semanlink.net/tag/maxwell_s_demon|creationTime|2021-01-13T11:50:18Z +http://www.semanlink.net/tag/maxwell_s_demon|prefLabel|Maxwell's demon +http://www.semanlink.net/tag/maxwell_s_demon|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/maxwell_s_demon|creationDate|2021-01-13 +http://www.semanlink.net/tag/maxwell_s_demon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maxwell_s_demon|describedBy|https://en.wikipedia.org/wiki/Maxwell%27s_demon +http://www.semanlink.net/tag/maxwell_s_demon|altLabel|Démon de Maxwell +http://www.semanlink.net/tag/maxwell_s_demon|uri|http://www.semanlink.net/tag/maxwell_s_demon +http://www.semanlink.net/tag/maxwell_s_demon|broader_prefLabel|Physique +http://www.semanlink.net/tag/maxwell_s_demon|broader_altLabel|Physics +http://www.semanlink.net/tag/ils_commencent_a_me_gonfler|creationTime|2020-04-25T21:36:00Z +http://www.semanlink.net/tag/ils_commencent_a_me_gonfler|prefLabel|Ils commencent à me gonfler +http://www.semanlink.net/tag/ils_commencent_a_me_gonfler|creationDate|2020-04-25 +http://www.semanlink.net/tag/ils_commencent_a_me_gonfler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ils_commencent_a_me_gonfler|uri|http://www.semanlink.net/tag/ils_commencent_a_me_gonfler +http://www.semanlink.net/tag/orwell|creationTime|2021-06-05T15:49:26Z +http://www.semanlink.net/tag/orwell|prefLabel|Orwell +http://www.semanlink.net/tag/orwell|creationDate|2021-06-05 +http://www.semanlink.net/tag/orwell|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orwell|describedBy|https://fr.wikipedia.org/wiki/George_Orwell +http://www.semanlink.net/tag/orwell|uri|http://www.semanlink.net/tag/orwell +http://www.semanlink.net/tag/henri_verdier|creationTime|2014-03-27T16:21:18Z +http://www.semanlink.net/tag/henri_verdier|prefLabel|Henri Verdier +http://www.semanlink.net/tag/henri_verdier|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/henri_verdier|related|http://www.semanlink.net/tag/data_gouv_fr +http://www.semanlink.net/tag/henri_verdier|creationDate|2014-03-27 +http://www.semanlink.net/tag/henri_verdier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/henri_verdier|describedBy|https://fr.wikipedia.org/wiki/Henri_Verdier +http://www.semanlink.net/tag/henri_verdier|uri|http://www.semanlink.net/tag/henri_verdier +http://www.semanlink.net/tag/henri_verdier|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/henri_verdier|broader_altLabel|Technical guys +http://www.semanlink.net/tag/microformats|prefLabel|Microformats +http://www.semanlink.net/tag/microformats|broader|http://www.semanlink.net/tag/html_data +http://www.semanlink.net/tag/microformats|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/microformats|uri|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/microformats|broader_prefLabel|HTML Data +http://www.semanlink.net/tag/differentiable_reasoning_over_text|creationTime|2020-05-16T12:18:14Z +http://www.semanlink.net/tag/differentiable_reasoning_over_text|prefLabel|Differentiable Reasoning over Text +http://www.semanlink.net/tag/differentiable_reasoning_over_text|creationDate|2020-05-16 +http://www.semanlink.net/tag/differentiable_reasoning_over_text|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/differentiable_reasoning_over_text|uri|http://www.semanlink.net/tag/differentiable_reasoning_over_text +http://www.semanlink.net/tag/handwriting|creationTime|2014-12-17T23:42:16Z +http://www.semanlink.net/tag/handwriting|prefLabel|Handwriting +http://www.semanlink.net/tag/handwriting|creationDate|2014-12-17 +http://www.semanlink.net/tag/handwriting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/handwriting|uri|http://www.semanlink.net/tag/handwriting +http://www.semanlink.net/tag/uri_opacity|creationTime|2010-04-27T10:24:02Z +http://www.semanlink.net/tag/uri_opacity|prefLabel|URI opacity +http://www.semanlink.net/tag/uri_opacity|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_opacity|creationDate|2010-04-27 +http://www.semanlink.net/tag/uri_opacity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_opacity|uri|http://www.semanlink.net/tag/uri_opacity +http://www.semanlink.net/tag/uri_opacity|broader_prefLabel|URI +http://www.semanlink.net/tag/raphaelsty|creationTime|2019-04-18T09:56:04Z +http://www.semanlink.net/tag/raphaelsty|prefLabel|Raphaël Sourty +http://www.semanlink.net/tag/raphaelsty|related|http://www.semanlink.net/tag/kd_mkb +http://www.semanlink.net/tag/raphaelsty|related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/raphaelsty|creationDate|2019-04-18 +http://www.semanlink.net/tag/raphaelsty|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/raphaelsty|altLabel|raphaelsty +http://www.semanlink.net/tag/raphaelsty|weblog|https://raphaelsty.github.io/ +http://www.semanlink.net/tag/raphaelsty|uri|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/tag/nanotechnologies|prefLabel|Nanotechnologies +http://www.semanlink.net/tag/nanotechnologies|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/nanotechnologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nanotechnologies|uri|http://www.semanlink.net/tag/nanotechnologies +http://www.semanlink.net/tag/nanotechnologies|broader_prefLabel|Technologie +http://www.semanlink.net/tag/keywords|creationTime|2019-02-09T01:45:27Z +http://www.semanlink.net/tag/keywords|prefLabel|Keywords +http://www.semanlink.net/tag/keywords|related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/keywords|creationDate|2019-02-09 +http://www.semanlink.net/tag/keywords|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/keywords|uri|http://www.semanlink.net/tag/keywords +http://www.semanlink.net/tag/perrier|creationTime|2009-06-25T08:33:07Z +http://www.semanlink.net/tag/perrier|prefLabel|Perrier +http://www.semanlink.net/tag/perrier|creationDate|2009-06-25 +http://www.semanlink.net/tag/perrier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perrier|uri|http://www.semanlink.net/tag/perrier +http://www.semanlink.net/tag/universal_income|creationTime|2016-06-09T23:01:48Z +http://www.semanlink.net/tag/universal_income|prefLabel|Universal income +http://www.semanlink.net/tag/universal_income|creationDate|2016-06-09 +http://www.semanlink.net/tag/universal_income|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/universal_income|uri|http://www.semanlink.net/tag/universal_income +http://www.semanlink.net/tag/ajax|prefLabel|Ajax +http://www.semanlink.net/tag/ajax|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/ajax|broader|http://www.semanlink.net/tag/asynchronous +http://www.semanlink.net/tag/ajax|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/ajax|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/ajax|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/ajax|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ajax|altLabel|XMLHttpRequest +http://www.semanlink.net/tag/ajax|uri|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/ajax|broader_prefLabel|XML +http://www.semanlink.net/tag/ajax|broader_prefLabel|Asynchronous +http://www.semanlink.net/tag/ajax|broader_prefLabel|Web dev +http://www.semanlink.net/tag/ajax|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/ajax|broader_prefLabel|Dev +http://www.semanlink.net/tag/ajax|broader_altLabel|Web app dev +http://www.semanlink.net/tag/ajax|broader_altLabel|js +http://www.semanlink.net/tag/clonage|prefLabel|Clonage +http://www.semanlink.net/tag/clonage|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/clonage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clonage|uri|http://www.semanlink.net/tag/clonage +http://www.semanlink.net/tag/clonage|broader_prefLabel|Genetics +http://www.semanlink.net/tag/clonage|broader_prefLabel|Génétique +http://www.semanlink.net/tag/petrole_et_corruption|creationTime|2015-06-10T19:35:25Z +http://www.semanlink.net/tag/petrole_et_corruption|prefLabel|Pétrole et corruption +http://www.semanlink.net/tag/petrole_et_corruption|broader|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/petrole_et_corruption|broader|http://www.semanlink.net/tag/corruption +http://www.semanlink.net/tag/petrole_et_corruption|creationDate|2015-06-10 +http://www.semanlink.net/tag/petrole_et_corruption|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/petrole_et_corruption|uri|http://www.semanlink.net/tag/petrole_et_corruption +http://www.semanlink.net/tag/petrole_et_corruption|broader_prefLabel|Pétrole +http://www.semanlink.net/tag/petrole_et_corruption|broader_prefLabel|Corruption +http://www.semanlink.net/tag/euro_crisis|creationTime|2010-05-20T00:36:34Z +http://www.semanlink.net/tag/euro_crisis|prefLabel|Euro Crisis +http://www.semanlink.net/tag/euro_crisis|broader|http://www.semanlink.net/tag/crise_financiere +http://www.semanlink.net/tag/euro_crisis|broader|http://www.semanlink.net/tag/euro +http://www.semanlink.net/tag/euro_crisis|creationDate|2010-05-20 +http://www.semanlink.net/tag/euro_crisis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/euro_crisis|uri|http://www.semanlink.net/tag/euro_crisis +http://www.semanlink.net/tag/euro_crisis|broader_prefLabel|Crise financière +http://www.semanlink.net/tag/euro_crisis|broader_prefLabel|Euro +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|creationTime|2017-07-18T16:15:54Z +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|prefLabel|Hierarchical clustering of text documents +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader|http://www.semanlink.net/tag/hierarchical_clustering +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|creationDate|2017-07-18 +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|comment|"First introduced by vivisimo (bought by IBM in 2012 -> ""Now, Vivisimo Velocity Platform is IBM Watson Explorer"")" +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|uri|http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader_prefLabel|Clustering of text documents +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader_prefLabel|Hierarchical Clustering +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader_altLabel|Text Clustering +http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents|broader_related|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/tag/alzheimer|creationTime|2012-12-03T10:48:23Z +http://www.semanlink.net/tag/alzheimer|prefLabel|Alzheimer +http://www.semanlink.net/tag/alzheimer|creationDate|2012-12-03 +http://www.semanlink.net/tag/alzheimer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alzheimer|uri|http://www.semanlink.net/tag/alzheimer +http://www.semanlink.net/tag/verite|creationTime|2016-09-18T11:21:28Z +http://www.semanlink.net/tag/verite|prefLabel|Vérité +http://www.semanlink.net/tag/verite|creationDate|2016-09-18 +http://www.semanlink.net/tag/verite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/verite|uri|http://www.semanlink.net/tag/verite +http://www.semanlink.net/tag/youtube_video|creationTime|2008-11-10T10:30:17Z +http://www.semanlink.net/tag/youtube_video|prefLabel|YouTube video +http://www.semanlink.net/tag/youtube_video|broader|http://www.semanlink.net/tag/youtube +http://www.semanlink.net/tag/youtube_video|creationDate|2008-11-10 +http://www.semanlink.net/tag/youtube_video|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/youtube_video|uri|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/tag/youtube_video|broader_prefLabel|YouTube +http://www.semanlink.net/tag/faille_de_securite|creationTime|2014-10-03T00:13:13Z +http://www.semanlink.net/tag/faille_de_securite|prefLabel|Faille de sécurité +http://www.semanlink.net/tag/faille_de_securite|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/faille_de_securite|creationDate|2014-10-03 +http://www.semanlink.net/tag/faille_de_securite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/faille_de_securite|uri|http://www.semanlink.net/tag/faille_de_securite +http://www.semanlink.net/tag/faille_de_securite|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/faille_de_securite|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/covid19_vaccin|creationTime|2020-11-28T16:22:35Z +http://www.semanlink.net/tag/covid19_vaccin|prefLabel|Covid19 : vaccin +http://www.semanlink.net/tag/covid19_vaccin|broader|http://www.semanlink.net/tag/vaccin +http://www.semanlink.net/tag/covid19_vaccin|broader|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/tag/covid19_vaccin|creationDate|2020-11-28 +http://www.semanlink.net/tag/covid19_vaccin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/covid19_vaccin|uri|http://www.semanlink.net/tag/covid19_vaccin +http://www.semanlink.net/tag/covid19_vaccin|broader_prefLabel|Vaccin +http://www.semanlink.net/tag/covid19_vaccin|broader_prefLabel|Covid19 +http://www.semanlink.net/tag/covid19_vaccin|broader_altLabel|covid-19 +http://www.semanlink.net/tag/covid19_vaccin|broader_altLabel|Covid +http://www.semanlink.net/tag/covid19_vaccin|broader_altLabel|Coronavirus +http://www.semanlink.net/tag/stardust|prefLabel|Stardust +http://www.semanlink.net/tag/stardust|broader|http://www.semanlink.net/tag/comet_wild_2 +http://www.semanlink.net/tag/stardust|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/stardust|comment|On January 15, 2006, after more than 7 years and billions of miles of travel through space, the Stardust spacecraft will finally return to Earth with some precious cargo -- pristine samples of comet and interstellar dust. +http://www.semanlink.net/tag/stardust|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stardust|uri|http://www.semanlink.net/tag/stardust +http://www.semanlink.net/tag/stardust|broader_prefLabel|Comet Wild 2 +http://www.semanlink.net/tag/stardust|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/sgnn|creationTime|2018-11-02T23:50:37Z +http://www.semanlink.net/tag/sgnn|prefLabel|SGNN +http://www.semanlink.net/tag/sgnn|broader|http://www.semanlink.net/tag/on_device_nlp +http://www.semanlink.net/tag/sgnn|broader|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/tag/sgnn|related|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/tag/sgnn|related|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/tag/sgnn|creationDate|2018-11-02 +http://www.semanlink.net/tag/sgnn|comment|"""Self-Governing Neural Networks""" +http://www.semanlink.net/tag/sgnn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sgnn|uri|http://www.semanlink.net/tag/sgnn +http://www.semanlink.net/tag/sgnn|broader_prefLabel|On device NLP +http://www.semanlink.net/tag/sgnn|broader_prefLabel|NLP@Google +http://www.semanlink.net/tag/photon|creationTime|2007-03-15T22:37:59Z +http://www.semanlink.net/tag/photon|prefLabel|Photon +http://www.semanlink.net/tag/photon|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/photon|creationDate|2007-03-15 +http://www.semanlink.net/tag/photon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photon|uri|http://www.semanlink.net/tag/photon +http://www.semanlink.net/tag/photon|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/text_mining|creationTime|2013-09-05T11:40:59Z +http://www.semanlink.net/tag/text_mining|prefLabel|Text mining +http://www.semanlink.net/tag/text_mining|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/text_mining|broader|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/text_mining|creationDate|2013-09-05 +http://www.semanlink.net/tag/text_mining|comment|Automatic extraction of information from written resources +http://www.semanlink.net/tag/text_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_mining|describedBy|https://en.wikipedia.org/wiki/Text_mining +http://www.semanlink.net/tag/text_mining|uri|http://www.semanlink.net/tag/text_mining +http://www.semanlink.net/tag/text_mining|broader_prefLabel|NLP +http://www.semanlink.net/tag/text_mining|broader_prefLabel|Data mining +http://www.semanlink.net/tag/text_mining|broader_altLabel|TALN +http://www.semanlink.net/tag/text_mining|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/text_mining|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/ieml|creationTime|2007-06-27T21:33:56Z +http://www.semanlink.net/tag/ieml|prefLabel|IEML +http://www.semanlink.net/tag/ieml|related|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/ieml|creationDate|2007-06-27 +http://www.semanlink.net/tag/ieml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ieml|uri|http://www.semanlink.net/tag/ieml +http://www.semanlink.net/tag/propriete_intellectuelle|prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/propriete_intellectuelle|broader|http://www.semanlink.net/tag/juridique +http://www.semanlink.net/tag/propriete_intellectuelle|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/propriete_intellectuelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/propriete_intellectuelle|uri|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/propriete_intellectuelle|broader_prefLabel|Juridique +http://www.semanlink.net/tag/propriete_intellectuelle|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/ginco_culture|creationTime|2014-07-26T02:21:27Z +http://www.semanlink.net/tag/ginco_culture|prefLabel|GINCO (Culture) +http://www.semanlink.net/tag/ginco_culture|broader|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/ginco_culture|broader|http://www.semanlink.net/tag/culture_et_sem_web +http://www.semanlink.net/tag/ginco_culture|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/ginco_culture|broader|http://www.semanlink.net/tag/ministere_de_la_culture +http://www.semanlink.net/tag/ginco_culture|related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/ginco_culture|creationDate|2014-07-26 +http://www.semanlink.net/tag/ginco_culture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ginco_culture|uri|http://www.semanlink.net/tag/ginco_culture +http://www.semanlink.net/tag/ginco_culture|broader_prefLabel|Thesaurus +http://www.semanlink.net/tag/ginco_culture|broader_prefLabel|Culture et sem web +http://www.semanlink.net/tag/ginco_culture|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/ginco_culture|broader_prefLabel|Ministère de la culture +http://www.semanlink.net/tag/ginco_culture|broader_related|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/tag/ginco_culture|broader_related|http://www.semanlink.net/tag/bertrand_sajus +http://www.semanlink.net/tag/epimorphics|creationTime|2011-01-24T19:02:22Z +http://www.semanlink.net/tag/epimorphics|prefLabel|Epimorphics +http://www.semanlink.net/tag/epimorphics|related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/epimorphics|related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/epimorphics|creationDate|2011-01-24 +http://www.semanlink.net/tag/epimorphics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/epimorphics|uri|http://www.semanlink.net/tag/epimorphics +http://www.semanlink.net/tag/altruisme|creationTime|2012-11-26T12:09:29Z +http://www.semanlink.net/tag/altruisme|prefLabel|Altruisme +http://www.semanlink.net/tag/altruisme|creationDate|2012-11-26 +http://www.semanlink.net/tag/altruisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/altruisme|uri|http://www.semanlink.net/tag/altruisme +http://www.semanlink.net/tag/react_js|creationTime|2016-03-28T18:16:25Z +http://www.semanlink.net/tag/react_js|prefLabel|React.js +http://www.semanlink.net/tag/react_js|broader|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/react_js|broader|http://www.semanlink.net/tag/javascript_frameork +http://www.semanlink.net/tag/react_js|related|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/react_js|creationDate|2016-03-28 +http://www.semanlink.net/tag/react_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/react_js|describedBy|https://en.wikipedia.org/wiki/React_(JavaScript_library) +http://www.semanlink.net/tag/react_js|altLabel|ReactJS +http://www.semanlink.net/tag/react_js|uri|http://www.semanlink.net/tag/react_js +http://www.semanlink.net/tag/react_js|broader_prefLabel|JavaScript librairies +http://www.semanlink.net/tag/react_js|broader_prefLabel|Javascript framework +http://www.semanlink.net/tag/react_js|broader_altLabel|JavaScript framework +http://www.semanlink.net/tag/python|prefLabel|Python +http://www.semanlink.net/tag/python|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/python|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python|uri|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python|broader_prefLabel|Programming language +http://www.semanlink.net/tag/python|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/musique|prefLabel|Musique +http://www.semanlink.net/tag/musique|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/musique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musique|altLabel|Music +http://www.semanlink.net/tag/musique|uri|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/musique|broader_prefLabel|Art +http://www.semanlink.net/tag/bernhard_haslhofer|creationTime|2013-05-30T09:26:57Z +http://www.semanlink.net/tag/bernhard_haslhofer|prefLabel|Bernhard Haslhofer +http://www.semanlink.net/tag/bernhard_haslhofer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/bernhard_haslhofer|related|http://www.semanlink.net/tag/www_2013 +http://www.semanlink.net/tag/bernhard_haslhofer|creationDate|2013-05-30 +http://www.semanlink.net/tag/bernhard_haslhofer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bernhard_haslhofer|uri|http://www.semanlink.net/tag/bernhard_haslhofer +http://www.semanlink.net/tag/bernhard_haslhofer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/authentication|creationTime|2008-04-07T22:44:10Z +http://www.semanlink.net/tag/authentication|prefLabel|Authentication +http://www.semanlink.net/tag/authentication|creationDate|2008-04-07 +http://www.semanlink.net/tag/authentication|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/authentication|uri|http://www.semanlink.net/tag/authentication +http://www.semanlink.net/tag/payment|creationTime|2011-10-12T23:52:57Z +http://www.semanlink.net/tag/payment|prefLabel|Payment +http://www.semanlink.net/tag/payment|related|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/payment|creationDate|2011-10-12 +http://www.semanlink.net/tag/payment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/payment|uri|http://www.semanlink.net/tag/payment +http://www.semanlink.net/tag/knowledge_management|prefLabel|Knowledge management +http://www.semanlink.net/tag/knowledge_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_management|uri|http://www.semanlink.net/tag/knowledge_management +http://www.semanlink.net/tag/semantic_web_application|prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/semantic_web_application|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_application|uri|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantic_web_application|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_application|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_application|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/crise_de_la_dette|creationTime|2011-10-02T16:06:45Z +http://www.semanlink.net/tag/crise_de_la_dette|prefLabel|Crise de la dette +http://www.semanlink.net/tag/crise_de_la_dette|broader|http://www.semanlink.net/tag/crise_financiere +http://www.semanlink.net/tag/crise_de_la_dette|creationDate|2011-10-02 +http://www.semanlink.net/tag/crise_de_la_dette|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crise_de_la_dette|uri|http://www.semanlink.net/tag/crise_de_la_dette +http://www.semanlink.net/tag/crise_de_la_dette|broader_prefLabel|Crise financière +http://www.semanlink.net/tag/docker_volumes|creationTime|2016-04-13T17:57:29Z +http://www.semanlink.net/tag/docker_volumes|prefLabel|Docker-Volumes +http://www.semanlink.net/tag/docker_volumes|broader|http://www.semanlink.net/tag/docker +http://www.semanlink.net/tag/docker_volumes|creationDate|2016-04-13 +http://www.semanlink.net/tag/docker_volumes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/docker_volumes|uri|http://www.semanlink.net/tag/docker_volumes +http://www.semanlink.net/tag/docker_volumes|broader_prefLabel|Docker +http://www.semanlink.net/tag/franco_allemand|creationTime|2016-07-31T18:41:17Z +http://www.semanlink.net/tag/franco_allemand|prefLabel|Franco-Allemand +http://www.semanlink.net/tag/franco_allemand|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/franco_allemand|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/franco_allemand|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/franco_allemand|creationDate|2016-07-31 +http://www.semanlink.net/tag/franco_allemand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/franco_allemand|uri|http://www.semanlink.net/tag/franco_allemand +http://www.semanlink.net/tag/franco_allemand|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/franco_allemand|broader_prefLabel|France +http://www.semanlink.net/tag/franco_allemand|broader_prefLabel|Europe +http://www.semanlink.net/tag/franco_allemand|broader_altLabel|Germany +http://www.semanlink.net/tag/franco_allemand|broader_altLabel|Deutschland +http://www.semanlink.net/tag/shanghai|prefLabel|Shanghaï +http://www.semanlink.net/tag/shanghai|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/shanghai|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/shanghai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shanghai|uri|http://www.semanlink.net/tag/shanghai +http://www.semanlink.net/tag/shanghai|broader_prefLabel|Ville +http://www.semanlink.net/tag/shanghai|broader_prefLabel|Chine +http://www.semanlink.net/tag/shanghai|broader_altLabel|China +http://www.semanlink.net/tag/education|prefLabel|Education +http://www.semanlink.net/tag/education|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/education|altLabel|Enseignement +http://www.semanlink.net/tag/education|uri|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/self_training|creationTime|2021-03-12T06:19:03Z +http://www.semanlink.net/tag/self_training|prefLabel|Self-training +http://www.semanlink.net/tag/self_training|creationDate|2021-03-12 +http://www.semanlink.net/tag/self_training|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/self_training|uri|http://www.semanlink.net/tag/self_training +http://www.semanlink.net/tag/optique|creationTime|2009-11-22T15:12:36Z +http://www.semanlink.net/tag/optique|prefLabel|Optique +http://www.semanlink.net/tag/optique|creationDate|2009-11-22 +http://www.semanlink.net/tag/optique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/optique|uri|http://www.semanlink.net/tag/optique +http://www.semanlink.net/tag/obscurantisme|creationTime|2012-06-30T16:09:31Z +http://www.semanlink.net/tag/obscurantisme|prefLabel|Obscurantisme +http://www.semanlink.net/tag/obscurantisme|creationDate|2012-06-30 +http://www.semanlink.net/tag/obscurantisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obscurantisme|uri|http://www.semanlink.net/tag/obscurantisme +http://www.semanlink.net/tag/barcamp|creationTime|2008-05-31T14:21:05Z +http://www.semanlink.net/tag/barcamp|prefLabel|Barcamp +http://www.semanlink.net/tag/barcamp|broader|http://www.semanlink.net/tag/event +http://www.semanlink.net/tag/barcamp|creationDate|2008-05-31 +http://www.semanlink.net/tag/barcamp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/barcamp|uri|http://www.semanlink.net/tag/barcamp +http://www.semanlink.net/tag/barcamp|broader_prefLabel|Event +http://www.semanlink.net/tag/nlp_teams|creationTime|2020-01-12T10:52:37Z +http://www.semanlink.net/tag/nlp_teams|prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_teams|broader|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/nlp_teams|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_teams|creationDate|2020-01-12 +http://www.semanlink.net/tag/nlp_teams|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_teams|altLabel|NLP Groups +http://www.semanlink.net/tag/nlp_teams|uri|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_teams|broader_prefLabel|AI teams +http://www.semanlink.net/tag/nlp_teams|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_teams|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_teams|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_teams|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/sparql_tutorial|creationTime|2007-07-07T13:46:00Z +http://www.semanlink.net/tag/sparql_tutorial|prefLabel|SPARQL Tutorial +http://www.semanlink.net/tag/sparql_tutorial|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_tutorial|broader|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/tag/sparql_tutorial|creationDate|2007-07-07 +http://www.semanlink.net/tag/sparql_tutorial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_tutorial|uri|http://www.semanlink.net/tag/sparql_tutorial +http://www.semanlink.net/tag/sparql_tutorial|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_tutorial|broader_prefLabel|Tutorial +http://www.semanlink.net/tag/musicien|prefLabel|Musicien +http://www.semanlink.net/tag/musicien|broader|http://www.semanlink.net/tag/artiste +http://www.semanlink.net/tag/musicien|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/musicien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musicien|uri|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/musicien|broader_prefLabel|Artiste +http://www.semanlink.net/tag/musicien|broader_prefLabel|Musique +http://www.semanlink.net/tag/musicien|broader_altLabel|Music +http://www.semanlink.net/tag/simile_exhibit|prefLabel|SIMILE Exhibit +http://www.semanlink.net/tag/simile_exhibit|broader|http://www.semanlink.net/tag/json +http://www.semanlink.net/tag/simile_exhibit|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/simile_exhibit|broader|http://www.semanlink.net/tag/simile +http://www.semanlink.net/tag/simile_exhibit|creationDate|2007-01-24 +http://www.semanlink.net/tag/simile_exhibit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/simile_exhibit|uri|http://www.semanlink.net/tag/simile_exhibit +http://www.semanlink.net/tag/simile_exhibit|broader_prefLabel|JSON +http://www.semanlink.net/tag/simile_exhibit|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/simile_exhibit|broader_prefLabel|SIMILE +http://www.semanlink.net/tag/simile_exhibit|broader_altLabel|js +http://www.semanlink.net/tag/antitrust|creationTime|2015-11-02T01:41:19Z +http://www.semanlink.net/tag/antitrust|prefLabel|Antitrust +http://www.semanlink.net/tag/antitrust|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/antitrust|creationDate|2015-11-02 +http://www.semanlink.net/tag/antitrust|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antitrust|uri|http://www.semanlink.net/tag/antitrust +http://www.semanlink.net/tag/antitrust|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/retrieval_based_nlp|creationTime|2021-10-07T02:09:13Z +http://www.semanlink.net/tag/retrieval_based_nlp|prefLabel|Retrieval-based NLP +http://www.semanlink.net/tag/retrieval_based_nlp|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/retrieval_based_nlp|related|http://www.semanlink.net/tag/dense_passage_retrieval +http://www.semanlink.net/tag/retrieval_based_nlp|creationDate|2021-10-07 +http://www.semanlink.net/tag/retrieval_based_nlp|comment|Mentionned in [Building Scalable, Explainable, and Adaptive NLP Models with Retrieval SAIL Blog](doc:2021/10/building_scalable_explainable_) +http://www.semanlink.net/tag/retrieval_based_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/retrieval_based_nlp|uri|http://www.semanlink.net/tag/retrieval_based_nlp +http://www.semanlink.net/tag/retrieval_based_nlp|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/internet_explorer|prefLabel|Internet Explorer +http://www.semanlink.net/tag/internet_explorer|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/internet_explorer|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/internet_explorer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_explorer|uri|http://www.semanlink.net/tag/internet_explorer +http://www.semanlink.net/tag/internet_explorer|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/internet_explorer|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/internet_explorer|broader_altLabel|Browser +http://www.semanlink.net/tag/internet_explorer|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/topquadrant|creationTime|2010-08-24T22:42:03Z +http://www.semanlink.net/tag/topquadrant|prefLabel|TopQuadrant +http://www.semanlink.net/tag/topquadrant|broader|http://www.semanlink.net/tag/semantic_web_company +http://www.semanlink.net/tag/topquadrant|creationDate|2010-08-24 +http://www.semanlink.net/tag/topquadrant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/topquadrant|altLabel|www.topquadrant.com +http://www.semanlink.net/tag/topquadrant|uri|http://www.semanlink.net/tag/topquadrant +http://www.semanlink.net/tag/topquadrant|broader_prefLabel|Semantic web company +http://www.semanlink.net/tag/topquadrant|broader_altLabel|Semantic Web : entreprise +http://www.semanlink.net/tag/topquadrant|broader_altLabel|Semantic Web: enterprise +http://www.semanlink.net/tag/extinction_des_dinosaures|prefLabel|Extinction des dinosaures +http://www.semanlink.net/tag/extinction_des_dinosaures|broader|http://www.semanlink.net/tag/meteorite +http://www.semanlink.net/tag/extinction_des_dinosaures|broader|http://www.semanlink.net/tag/extinction_de_masse +http://www.semanlink.net/tag/extinction_des_dinosaures|broader|http://www.semanlink.net/tag/dinosaures +http://www.semanlink.net/tag/extinction_des_dinosaures|broader|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/extinction_des_dinosaures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extinction_des_dinosaures|uri|http://www.semanlink.net/tag/extinction_des_dinosaures +http://www.semanlink.net/tag/extinction_des_dinosaures|broader_prefLabel|Météorite +http://www.semanlink.net/tag/extinction_des_dinosaures|broader_prefLabel|Extinction de masse +http://www.semanlink.net/tag/extinction_des_dinosaures|broader_prefLabel|Dinosaures +http://www.semanlink.net/tag/extinction_des_dinosaures|broader_prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/venus_divinite|creationTime|2007-03-22T22:18:33Z +http://www.semanlink.net/tag/venus_divinite|prefLabel|Vénus (divinité) +http://www.semanlink.net/tag/venus_divinite|creationDate|2007-03-22 +http://www.semanlink.net/tag/venus_divinite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venus_divinite|uri|http://www.semanlink.net/tag/venus_divinite +http://www.semanlink.net/tag/webid|creationTime|2011-01-18T09:17:42Z +http://www.semanlink.net/tag/webid|prefLabel|WebID +http://www.semanlink.net/tag/webid|related|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/webid|creationDate|2011-01-18 +http://www.semanlink.net/tag/webid|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/webid|uri|http://www.semanlink.net/tag/webid +http://www.semanlink.net/tag/future_of_the_web|creationTime|2017-11-16T22:58:57Z +http://www.semanlink.net/tag/future_of_the_web|prefLabel|Future of the web +http://www.semanlink.net/tag/future_of_the_web|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/future_of_the_web|creationDate|2017-11-16 +http://www.semanlink.net/tag/future_of_the_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/future_of_the_web|uri|http://www.semanlink.net/tag/future_of_the_web +http://www.semanlink.net/tag/future_of_the_web|broader_prefLabel|Web +http://www.semanlink.net/tag/kurdes|prefLabel|Kurdes +http://www.semanlink.net/tag/kurdes|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/kurdes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kurdes|uri|http://www.semanlink.net/tag/kurdes +http://www.semanlink.net/tag/kurdes|broader_prefLabel|Peuples +http://www.semanlink.net/tag/memory_in_deep_learning|creationTime|2018-12-02T10:00:08Z +http://www.semanlink.net/tag/memory_in_deep_learning|prefLabel|Memory in deep learning +http://www.semanlink.net/tag/memory_in_deep_learning|broader|http://www.semanlink.net/tag/memoire_informatique +http://www.semanlink.net/tag/memory_in_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/memory_in_deep_learning|related|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/tag/memory_in_deep_learning|creationDate|2018-12-02 +http://www.semanlink.net/tag/memory_in_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memory_in_deep_learning|uri|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/tag/memory_in_deep_learning|broader_prefLabel|Mémoire (informatique) +http://www.semanlink.net/tag/memory_in_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/memory_in_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/memory_in_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/agriculture|prefLabel|Agriculture +http://www.semanlink.net/tag/agriculture|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/agriculture|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/agriculture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agriculture|uri|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/agriculture|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/agriculture|broader_prefLabel|Economie +http://www.semanlink.net/tag/agriculture|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/reddit|creationTime|2021-01-26T21:03:34Z +http://www.semanlink.net/tag/reddit|prefLabel|Reddit +http://www.semanlink.net/tag/reddit|creationDate|2021-01-26 +http://www.semanlink.net/tag/reddit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reddit|uri|http://www.semanlink.net/tag/reddit +http://www.semanlink.net/tag/bien_envoye|creationTime|2009-05-12T23:03:14Z +http://www.semanlink.net/tag/bien_envoye|prefLabel|Bien envoyé +http://www.semanlink.net/tag/bien_envoye|creationDate|2009-05-12 +http://www.semanlink.net/tag/bien_envoye|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bien_envoye|uri|http://www.semanlink.net/tag/bien_envoye +http://www.semanlink.net/tag/luxembourg|creationTime|2021-02-08T21:06:33Z +http://www.semanlink.net/tag/luxembourg|prefLabel|Luxembourg +http://www.semanlink.net/tag/luxembourg|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/luxembourg|creationDate|2021-02-08 +http://www.semanlink.net/tag/luxembourg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/luxembourg|describedBy|https://fr.wikipedia.org/wiki/Luxembourg +http://www.semanlink.net/tag/luxembourg|uri|http://www.semanlink.net/tag/luxembourg +http://www.semanlink.net/tag/luxembourg|broader_prefLabel|Europe +http://www.semanlink.net/tag/baobab|prefLabel|Baobab +http://www.semanlink.net/tag/baobab|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/baobab|comment|La vie est douce près d'un baobab +http://www.semanlink.net/tag/baobab|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/baobab|uri|http://www.semanlink.net/tag/baobab +http://www.semanlink.net/tag/baobab|broader_prefLabel|Arbres +http://www.semanlink.net/tag/soren_auer|creationTime|2011-05-12T22:04:12Z +http://www.semanlink.net/tag/soren_auer|prefLabel|Sören Auer +http://www.semanlink.net/tag/soren_auer|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/soren_auer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/soren_auer|related|http://www.semanlink.net/tag/eccenca +http://www.semanlink.net/tag/soren_auer|creationDate|2011-05-12 +http://www.semanlink.net/tag/soren_auer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soren_auer|uri|http://www.semanlink.net/tag/soren_auer +http://www.semanlink.net/tag/soren_auer|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/soren_auer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/soren_auer|broader_altLabel|Technical guys +http://www.semanlink.net/tag/ai_book|creationTime|2017-12-16T14:26:46Z +http://www.semanlink.net/tag/ai_book|prefLabel|AI: books & journals +http://www.semanlink.net/tag/ai_book|broader|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/ai_book|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_book|creationDate|2017-12-16 +http://www.semanlink.net/tag/ai_book|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_book|uri|http://www.semanlink.net/tag/ai_book +http://www.semanlink.net/tag/ai_book|broader_prefLabel|Livre +http://www.semanlink.net/tag/ai_book|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_book|broader_altLabel|Livres +http://www.semanlink.net/tag/ai_book|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_book|broader_altLabel|AI +http://www.semanlink.net/tag/ai_book|broader_altLabel|IA +http://www.semanlink.net/tag/ai_book|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/data_warehouse|creationTime|2011-01-08T00:01:02Z +http://www.semanlink.net/tag/data_warehouse|prefLabel|Data Warehouse +http://www.semanlink.net/tag/data_warehouse|broader|http://www.semanlink.net/tag/enterprise_data +http://www.semanlink.net/tag/data_warehouse|related|http://www.semanlink.net/tag/etl +http://www.semanlink.net/tag/data_warehouse|creationDate|2011-01-08 +http://www.semanlink.net/tag/data_warehouse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_warehouse|uri|http://www.semanlink.net/tag/data_warehouse +http://www.semanlink.net/tag/data_warehouse|broader_prefLabel|Enterprise Data +http://www.semanlink.net/tag/cosmic_microwave_background|creationTime|2013-03-21T13:59:33Z +http://www.semanlink.net/tag/cosmic_microwave_background|prefLabel|Cosmic microwave background +http://www.semanlink.net/tag/cosmic_microwave_background|broader|http://www.semanlink.net/tag/big_bang +http://www.semanlink.net/tag/cosmic_microwave_background|creationDate|2013-03-21 +http://www.semanlink.net/tag/cosmic_microwave_background|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cosmic_microwave_background|uri|http://www.semanlink.net/tag/cosmic_microwave_background +http://www.semanlink.net/tag/cosmic_microwave_background|broader_prefLabel|Big bang +http://www.semanlink.net/tag/nlp_introduction|creationTime|2018-07-23T12:58:05Z +http://www.semanlink.net/tag/nlp_introduction|prefLabel|NLP: introduction +http://www.semanlink.net/tag/nlp_introduction|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_introduction|creationDate|2018-07-23 +http://www.semanlink.net/tag/nlp_introduction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_introduction|uri|http://www.semanlink.net/tag/nlp_introduction +http://www.semanlink.net/tag/nlp_introduction|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_introduction|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_introduction|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_introduction|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/xgboost|creationTime|2018-11-13T18:31:36Z +http://www.semanlink.net/tag/xgboost|prefLabel|xgboost +http://www.semanlink.net/tag/xgboost|broader|http://www.semanlink.net/tag/gradient_boosting +http://www.semanlink.net/tag/xgboost|creationDate|2018-11-13 +http://www.semanlink.net/tag/xgboost|comment|open-source framework for gradient boosting (java, python, etc) +http://www.semanlink.net/tag/xgboost|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xgboost|describedBy|https://en.wikipedia.org/wiki/XGBoost +http://www.semanlink.net/tag/xgboost|uri|http://www.semanlink.net/tag/xgboost +http://www.semanlink.net/tag/xgboost|broader_prefLabel|Gradient boosting +http://www.semanlink.net/tag/lac_de_lave|creationTime|2008-11-21T23:25:54Z +http://www.semanlink.net/tag/lac_de_lave|prefLabel|Lac de lave +http://www.semanlink.net/tag/lac_de_lave|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/lac_de_lave|creationDate|2008-11-21 +http://www.semanlink.net/tag/lac_de_lave|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lac_de_lave|uri|http://www.semanlink.net/tag/lac_de_lave +http://www.semanlink.net/tag/lac_de_lave|broader_prefLabel|Volcan +http://www.semanlink.net/tag/missoula_floods|creationTime|2008-08-25T14:08:19Z +http://www.semanlink.net/tag/missoula_floods|prefLabel|Missoula Floods +http://www.semanlink.net/tag/missoula_floods|broader|http://www.semanlink.net/tag/periodes_glacieres +http://www.semanlink.net/tag/missoula_floods|broader|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/missoula_floods|creationDate|2008-08-25 +http://www.semanlink.net/tag/missoula_floods|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/missoula_floods|describedBy|https://en.wikipedia.org/wiki/Missoula_Floods +http://www.semanlink.net/tag/missoula_floods|uri|http://www.semanlink.net/tag/missoula_floods +http://www.semanlink.net/tag/missoula_floods|broader_prefLabel|Périodes glacières +http://www.semanlink.net/tag/missoula_floods|broader_prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/missoula_floods|broader_related|http://www.semanlink.net/tag/glacier +http://www.semanlink.net/tag/python_sample_code|creationTime|2017-06-14T01:22:30Z +http://www.semanlink.net/tag/python_sample_code|prefLabel|Python sample code +http://www.semanlink.net/tag/python_sample_code|broader|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/python_sample_code|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_sample_code|creationDate|2017-06-14 +http://www.semanlink.net/tag/python_sample_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_sample_code|uri|http://www.semanlink.net/tag/python_sample_code +http://www.semanlink.net/tag/python_sample_code|broader_prefLabel|Sample code +http://www.semanlink.net/tag/python_sample_code|broader_prefLabel|Python +http://www.semanlink.net/tag/catalhoyuk|creationTime|2017-05-13T18:40:52Z +http://www.semanlink.net/tag/catalhoyuk|prefLabel|Çatalhöyük +http://www.semanlink.net/tag/catalhoyuk|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/catalhoyuk|broader|http://www.semanlink.net/tag/asie_mineure +http://www.semanlink.net/tag/catalhoyuk|broader|http://www.semanlink.net/tag/neolithique +http://www.semanlink.net/tag/catalhoyuk|creationDate|2017-05-13 +http://www.semanlink.net/tag/catalhoyuk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catalhoyuk|describedBy|https://en.wikipedia.org/wiki/%C3%87atalh%C3%B6y%C3%BCk +http://www.semanlink.net/tag/catalhoyuk|uri|http://www.semanlink.net/tag/catalhoyuk +http://www.semanlink.net/tag/catalhoyuk|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/catalhoyuk|broader_prefLabel|Asie mineure +http://www.semanlink.net/tag/catalhoyuk|broader_prefLabel|Néolithique +http://www.semanlink.net/tag/catalhoyuk|broader_altLabel|Anatolie +http://www.semanlink.net/tag/catalhoyuk|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/description_logic|prefLabel|Description Logic +http://www.semanlink.net/tag/description_logic|broader|http://www.semanlink.net/tag/formal_knowledge_representation_language +http://www.semanlink.net/tag/description_logic|broader|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/description_logic|related|http://www.semanlink.net/tag/first_order_logic +http://www.semanlink.net/tag/description_logic|related|http://www.semanlink.net/tag/owl_dl +http://www.semanlink.net/tag/description_logic|creationDate|2006-12-16 +http://www.semanlink.net/tag/description_logic|comment|"A family of formal knowledge representation languages. Models concepts, roles and individuals, and their relationships. More expressive than propositional logic but has more efficient decision problems than first-order predicate logic. +" +http://www.semanlink.net/tag/description_logic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/description_logic|describedBy|https://en.wikipedia.org/wiki/Description_logic +http://www.semanlink.net/tag/description_logic|uri|http://www.semanlink.net/tag/description_logic +http://www.semanlink.net/tag/description_logic|broader_prefLabel|Formal knowledge representation language +http://www.semanlink.net/tag/description_logic|broader_prefLabel|Logic +http://www.semanlink.net/tag/goodrelations|creationTime|2010-05-03T11:37:42Z +http://www.semanlink.net/tag/goodrelations|prefLabel|GoodRelations +http://www.semanlink.net/tag/goodrelations|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/goodrelations|broader|http://www.semanlink.net/tag/semantic_seo +http://www.semanlink.net/tag/goodrelations|related|http://www.semanlink.net/tag/martin_hepp +http://www.semanlink.net/tag/goodrelations|creationDate|2010-05-03 +http://www.semanlink.net/tag/goodrelations|comment|An ontology for linking product descriptions and business entities on the Web +http://www.semanlink.net/tag/goodrelations|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/goodrelations|describedBy|http://www.heppnetz.de/projects/goodrelations/ +http://www.semanlink.net/tag/goodrelations|uri|http://www.semanlink.net/tag/goodrelations +http://www.semanlink.net/tag/goodrelations|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/goodrelations|broader_prefLabel|Semantic SEO +http://www.semanlink.net/tag/goodrelations|broader_altLabel|Ontology +http://www.semanlink.net/tag/i_like|prefLabel|I like +http://www.semanlink.net/tag/i_like|prefLabel|I like +http://www.semanlink.net/tag/i_like|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/i_like|uri|http://www.semanlink.net/tag/i_like +http://www.semanlink.net/tag/ressources_halieutiques|prefLabel|Ressources halieutiques +http://www.semanlink.net/tag/ressources_halieutiques|broader|http://www.semanlink.net/tag/peche +http://www.semanlink.net/tag/ressources_halieutiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ressources_halieutiques|uri|http://www.semanlink.net/tag/ressources_halieutiques +http://www.semanlink.net/tag/ressources_halieutiques|broader_prefLabel|Pêche +http://www.semanlink.net/tag/future_combat_systems|prefLabel|Future Combat Systems +http://www.semanlink.net/tag/future_combat_systems|broader|http://www.semanlink.net/tag/armement +http://www.semanlink.net/tag/future_combat_systems|creationDate|2006-11-06 +http://www.semanlink.net/tag/future_combat_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/future_combat_systems|uri|http://www.semanlink.net/tag/future_combat_systems +http://www.semanlink.net/tag/future_combat_systems|broader_prefLabel|Armement +http://www.semanlink.net/tag/finlande|prefLabel|Finlande +http://www.semanlink.net/tag/finlande|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/finlande|broader|http://www.semanlink.net/tag/scandinavie +http://www.semanlink.net/tag/finlande|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/finlande|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/finlande|uri|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/tag/finlande|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/finlande|broader_prefLabel|Scandinavie +http://www.semanlink.net/tag/finlande|broader_prefLabel|Europe +http://www.semanlink.net/tag/jermakoye|prefLabel|Djermakoye +http://www.semanlink.net/tag/jermakoye|broader|http://www.semanlink.net/tag/dosso +http://www.semanlink.net/tag/jermakoye|broader|http://www.semanlink.net/tag/jerma +http://www.semanlink.net/tag/jermakoye|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jermakoye|altLabel|Zarmakoy +http://www.semanlink.net/tag/jermakoye|altLabel|Jermakoy +http://www.semanlink.net/tag/jermakoye|altLabel|Jermakoye +http://www.semanlink.net/tag/jermakoye|uri|http://www.semanlink.net/tag/jermakoye +http://www.semanlink.net/tag/jermakoye|broader_prefLabel|Dosso +http://www.semanlink.net/tag/jermakoye|broader_prefLabel|Jerma +http://www.semanlink.net/tag/jermakoye|broader_altLabel|Djerma +http://www.semanlink.net/tag/jermakoye|broader_altLabel|Zarma +http://www.semanlink.net/tag/jupiter|prefLabel|Jupiter +http://www.semanlink.net/tag/jupiter|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/jupiter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jupiter|uri|http://www.semanlink.net/tag/jupiter +http://www.semanlink.net/tag/jupiter|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/bas_salaires|prefLabel|Bas salaires +http://www.semanlink.net/tag/bas_salaires|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/bas_salaires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bas_salaires|uri|http://www.semanlink.net/tag/bas_salaires +http://www.semanlink.net/tag/bas_salaires|broader_prefLabel|Economie +http://www.semanlink.net/tag/bas_salaires|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/gpl|prefLabel|GPL +http://www.semanlink.net/tag/gpl|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/gpl|creationDate|2006-08-19 +http://www.semanlink.net/tag/gpl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gpl|uri|http://www.semanlink.net/tag/gpl +http://www.semanlink.net/tag/gpl|broader_prefLabel|Open Source +http://www.semanlink.net/tag/asteroide|prefLabel|Astéroïde +http://www.semanlink.net/tag/asteroide|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/asteroide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asteroide|uri|http://www.semanlink.net/tag/asteroide +http://www.semanlink.net/tag/asteroide|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/machine_translation|creationTime|2016-01-03T14:28:38Z +http://www.semanlink.net/tag/machine_translation|prefLabel|Machine translation +http://www.semanlink.net/tag/machine_translation|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/machine_translation|broader|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/tag/machine_translation|creationDate|2016-01-03 +http://www.semanlink.net/tag/machine_translation|comment|sub-field of computational linguistics that investigates the use of software to translate text or speech from one language to another +http://www.semanlink.net/tag/machine_translation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_translation|describedBy|https://en.wikipedia.org/wiki/Machine_translation +http://www.semanlink.net/tag/machine_translation|altLabel|Traduction automatique +http://www.semanlink.net/tag/machine_translation|uri|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/machine_translation|broader_prefLabel|NLP +http://www.semanlink.net/tag/machine_translation|broader_prefLabel|Cross-lingual NLP +http://www.semanlink.net/tag/machine_translation|broader_altLabel|TALN +http://www.semanlink.net/tag/machine_translation|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/machine_translation|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/machine_translation|broader_related|http://www.semanlink.net/tag/nlp_french +http://www.semanlink.net/tag/semencier|creationTime|2012-12-09T11:57:32Z +http://www.semanlink.net/tag/semencier|prefLabel|Semencier +http://www.semanlink.net/tag/semencier|broader|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/tag/semencier|creationDate|2012-12-09 +http://www.semanlink.net/tag/semencier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semencier|uri|http://www.semanlink.net/tag/semencier +http://www.semanlink.net/tag/semencier|broader_prefLabel|Agriculture industrielle +http://www.semanlink.net/tag/republique_tcheque|prefLabel|République Tchèque +http://www.semanlink.net/tag/republique_tcheque|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/republique_tcheque|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/republique_tcheque|creationDate|2006-09-11 +http://www.semanlink.net/tag/republique_tcheque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/republique_tcheque|uri|http://www.semanlink.net/tag/republique_tcheque +http://www.semanlink.net/tag/republique_tcheque|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/republique_tcheque|broader_prefLabel|Europe +http://www.semanlink.net/tag/fukushima|creationTime|2011-04-19T09:27:35Z +http://www.semanlink.net/tag/fukushima|prefLabel|Fukushima +http://www.semanlink.net/tag/fukushima|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/fukushima|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/fukushima|broader|http://www.semanlink.net/tag/catastrophe_ecologique +http://www.semanlink.net/tag/fukushima|broader|http://www.semanlink.net/tag/catastrophe_industrielle +http://www.semanlink.net/tag/fukushima|creationDate|2011-04-19 +http://www.semanlink.net/tag/fukushima|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fukushima|uri|http://www.semanlink.net/tag/fukushima +http://www.semanlink.net/tag/fukushima|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/fukushima|broader_prefLabel|Japon +http://www.semanlink.net/tag/fukushima|broader_prefLabel|Catastrophe écologique +http://www.semanlink.net/tag/fukushima|broader_prefLabel|Catastrophe industrielle +http://www.semanlink.net/tag/fukushima|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/fukushima|broader_altLabel|Japan +http://www.semanlink.net/tag/fukushima|broader_altLabel|Désastre écologique +http://www.semanlink.net/tag/billionaires|creationTime|2014-07-29T19:20:16Z +http://www.semanlink.net/tag/billionaires|prefLabel|Billionaires +http://www.semanlink.net/tag/billionaires|creationDate|2014-07-29 +http://www.semanlink.net/tag/billionaires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/billionaires|uri|http://www.semanlink.net/tag/billionaires +http://www.semanlink.net/tag/apple_carplay|creationTime|2014-03-05T00:05:25Z +http://www.semanlink.net/tag/apple_carplay|prefLabel|Apple CarPlay +http://www.semanlink.net/tag/apple_carplay|broader|http://www.semanlink.net/tag/automobile_2_0 +http://www.semanlink.net/tag/apple_carplay|broader|http://www.semanlink.net/tag/iphone +http://www.semanlink.net/tag/apple_carplay|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/apple_carplay|creationDate|2014-03-05 +http://www.semanlink.net/tag/apple_carplay|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_carplay|uri|http://www.semanlink.net/tag/apple_carplay +http://www.semanlink.net/tag/apple_carplay|broader_prefLabel|Automobile 2.0 +http://www.semanlink.net/tag/apple_carplay|broader_prefLabel|iphone +http://www.semanlink.net/tag/apple_carplay|broader_prefLabel|Automobile +http://www.semanlink.net/tag/apple_carplay|broader_altLabel|Automotive +http://www.semanlink.net/tag/andrej_karpathy|creationTime|2017-08-27T01:23:04Z +http://www.semanlink.net/tag/andrej_karpathy|prefLabel|Andrej Karpathy +http://www.semanlink.net/tag/andrej_karpathy|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/andrej_karpathy|related|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/andrej_karpathy|creationDate|2017-08-27 +http://www.semanlink.net/tag/andrej_karpathy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/andrej_karpathy|homepage|http://cs.stanford.edu/people/karpathy/ +http://www.semanlink.net/tag/andrej_karpathy|uri|http://www.semanlink.net/tag/andrej_karpathy +http://www.semanlink.net/tag/andrej_karpathy|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/compatibilite_javascript|creationTime|2007-11-30T16:37:52Z +http://www.semanlink.net/tag/compatibilite_javascript|prefLabel|Compatibilité Javascript +http://www.semanlink.net/tag/compatibilite_javascript|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/compatibilite_javascript|creationDate|2007-11-30 +http://www.semanlink.net/tag/compatibilite_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/compatibilite_javascript|uri|http://www.semanlink.net/tag/compatibilite_javascript +http://www.semanlink.net/tag/compatibilite_javascript|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/compatibilite_javascript|broader_altLabel|js +http://www.semanlink.net/tag/triple_classification|creationTime|2020-08-30T19:11:23Z +http://www.semanlink.net/tag/triple_classification|prefLabel|Triple Classification +http://www.semanlink.net/tag/triple_classification|broader|http://www.semanlink.net/tag/kg_tasks +http://www.semanlink.net/tag/triple_classification|related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/triple_classification|related|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/tag/triple_classification|creationDate|2020-08-30 +http://www.semanlink.net/tag/triple_classification|comment|Determine the truth value of an unknown Triple +http://www.semanlink.net/tag/triple_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/triple_classification|uri|http://www.semanlink.net/tag/triple_classification +http://www.semanlink.net/tag/triple_classification|broader_prefLabel|KG: tasks +http://www.semanlink.net/tag/triple_classification|broader_altLabel|Knowledge graphs: tasks +http://www.semanlink.net/tag/film|prefLabel|Film +http://www.semanlink.net/tag/film|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/film|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film|uri|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/enseignement_scientifique|prefLabel|Enseignement scientifique +http://www.semanlink.net/tag/enseignement_scientifique|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/enseignement_scientifique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/enseignement_scientifique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enseignement_scientifique|altLabel|Etudes scientifiques +http://www.semanlink.net/tag/enseignement_scientifique|uri|http://www.semanlink.net/tag/enseignement_scientifique +http://www.semanlink.net/tag/enseignement_scientifique|broader_prefLabel|Education +http://www.semanlink.net/tag/enseignement_scientifique|broader_prefLabel|Science +http://www.semanlink.net/tag/enseignement_scientifique|broader_altLabel|Enseignement +http://www.semanlink.net/tag/enseignement_scientifique|broader_altLabel|sciences +http://www.semanlink.net/tag/tomcat|prefLabel|Tomcat +http://www.semanlink.net/tag/tomcat|broader|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/tag/tomcat|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/tomcat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tomcat|uri|http://www.semanlink.net/tag/tomcat +http://www.semanlink.net/tag/tomcat|broader_prefLabel|Servlet +http://www.semanlink.net/tag/tomcat|broader_prefLabel|apache.org +http://www.semanlink.net/tag/nombres_premiers|prefLabel|Nombres premiers +http://www.semanlink.net/tag/nombres_premiers|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/nombres_premiers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nombres_premiers|uri|http://www.semanlink.net/tag/nombres_premiers +http://www.semanlink.net/tag/nombres_premiers|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/nombres_premiers|broader_altLabel|Math +http://www.semanlink.net/tag/eswc_2012|creationTime|2011-12-16T23:48:53Z +http://www.semanlink.net/tag/eswc_2012|prefLabel|ESWC 2012 +http://www.semanlink.net/tag/eswc_2012|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2012|creationDate|2011-12-16 +http://www.semanlink.net/tag/eswc_2012|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2012|homepage|http://2012.eswc-conferences.org +http://www.semanlink.net/tag/eswc_2012|uri|http://www.semanlink.net/tag/eswc_2012 +http://www.semanlink.net/tag/eswc_2012|broader_prefLabel|ESWC +http://www.semanlink.net/tag/enterprise_knowledge_graph|creationTime|2016-10-14T12:41:54Z +http://www.semanlink.net/tag/enterprise_knowledge_graph|prefLabel|Enterprise Knowledge Graph +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader|http://www.semanlink.net/tag/semantic_enterprise_architecture +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/enterprise_knowledge_graph|creationDate|2016-10-14 +http://www.semanlink.net/tag/enterprise_knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_knowledge_graph|uri|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader_prefLabel|Semantic Enterprise Architecture +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/enterprise_knowledge_graph|broader_altLabel|KG +http://www.semanlink.net/tag/internet_related_technologies|prefLabel|Internet Related Technologies +http://www.semanlink.net/tag/internet_related_technologies|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet_related_technologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_related_technologies|uri|http://www.semanlink.net/tag/internet_related_technologies +http://www.semanlink.net/tag/internet_related_technologies|broader_prefLabel|Internet +http://www.semanlink.net/tag/openrefine|creationTime|2014-02-13T22:47:16Z +http://www.semanlink.net/tag/openrefine|prefLabel|OpenRefine +http://www.semanlink.net/tag/openrefine|creationDate|2014-02-13 +http://www.semanlink.net/tag/openrefine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openrefine|homepage|http://openrefine.org +http://www.semanlink.net/tag/openrefine|uri|http://www.semanlink.net/tag/openrefine +http://www.semanlink.net/tag/montagne|prefLabel|Montagne +http://www.semanlink.net/tag/montagne|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/montagne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/montagne|uri|http://www.semanlink.net/tag/montagne +http://www.semanlink.net/tag/montagne|broader_prefLabel|Géographie +http://www.semanlink.net/tag/marlon_brando|creationTime|2017-02-05T00:58:07Z +http://www.semanlink.net/tag/marlon_brando|prefLabel|Marlon Brando +http://www.semanlink.net/tag/marlon_brando|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/marlon_brando|creationDate|2017-02-05 +http://www.semanlink.net/tag/marlon_brando|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marlon_brando|describedBy|https://en.wikipedia.org/wiki/Marlon_Brando +http://www.semanlink.net/tag/marlon_brando|uri|http://www.semanlink.net/tag/marlon_brando +http://www.semanlink.net/tag/marlon_brando|broader_prefLabel|Acteur +http://www.semanlink.net/tag/public_linked_json_w3_org|creationTime|2015-02-19T01:32:37Z +http://www.semanlink.net/tag/public_linked_json_w3_org|prefLabel|public-linked-json@w3.org +http://www.semanlink.net/tag/public_linked_json_w3_org|broader|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/public_linked_json_w3_org|creationDate|2015-02-19 +http://www.semanlink.net/tag/public_linked_json_w3_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_linked_json_w3_org|uri|http://www.semanlink.net/tag/public_linked_json_w3_org +http://www.semanlink.net/tag/public_linked_json_w3_org|broader_prefLabel|Mailing list +http://www.semanlink.net/tag/quoc_le|creationTime|2020-03-17T21:58:32Z +http://www.semanlink.net/tag/quoc_le|prefLabel|Quoc V. Le +http://www.semanlink.net/tag/quoc_le|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/quoc_le|related|http://www.semanlink.net/tag/google_brain +http://www.semanlink.net/tag/quoc_le|creationDate|2020-03-17 +http://www.semanlink.net/tag/quoc_le|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quoc_le|altLabel|Quoc Le +http://www.semanlink.net/tag/quoc_le|uri|http://www.semanlink.net/tag/quoc_le +http://www.semanlink.net/tag/quoc_le|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/iapetus|creationTime|2007-09-13T22:31:30Z +http://www.semanlink.net/tag/iapetus|prefLabel|Iapetus +http://www.semanlink.net/tag/iapetus|broader|http://www.semanlink.net/tag/saturne +http://www.semanlink.net/tag/iapetus|creationDate|2007-09-13 +http://www.semanlink.net/tag/iapetus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iapetus|uri|http://www.semanlink.net/tag/iapetus +http://www.semanlink.net/tag/iapetus|broader_prefLabel|Saturne +http://www.semanlink.net/tag/iapetus|broader_altLabel|Saturn +http://www.semanlink.net/tag/pont|prefLabel|Pont +http://www.semanlink.net/tag/pont|creationDate|2007-01-14 +http://www.semanlink.net/tag/pont|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pont|uri|http://www.semanlink.net/tag/pont +http://www.semanlink.net/tag/searchmonkey|creationTime|2008-05-17T23:34:06Z +http://www.semanlink.net/tag/searchmonkey|prefLabel|SearchMonkey +http://www.semanlink.net/tag/searchmonkey|broader|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/searchmonkey|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/searchmonkey|creationDate|2008-05-17 +http://www.semanlink.net/tag/searchmonkey|comment|Using SearchMonkey, developers and site owners can use structured data to make Yahoo! Search results more useful and visually appealing, and drive more relevant traffic to their sites. +http://www.semanlink.net/tag/searchmonkey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/searchmonkey|describedBy|http://developer.yahoo.com/searchmonkey/ +http://www.semanlink.net/tag/searchmonkey|uri|http://www.semanlink.net/tag/searchmonkey +http://www.semanlink.net/tag/searchmonkey|broader_prefLabel|Yahoo! +http://www.semanlink.net/tag/searchmonkey|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/webs_alternatifs|creationTime|2017-03-10T14:13:01Z +http://www.semanlink.net/tag/webs_alternatifs|prefLabel|webs alternatifs +http://www.semanlink.net/tag/webs_alternatifs|creationDate|2017-03-10 +http://www.semanlink.net/tag/webs_alternatifs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/webs_alternatifs|uri|http://www.semanlink.net/tag/webs_alternatifs +http://www.semanlink.net/tag/probability_distribution|creationTime|2021-01-13T11:47:24Z +http://www.semanlink.net/tag/probability_distribution|prefLabel|Probability distribution +http://www.semanlink.net/tag/probability_distribution|creationDate|2021-01-13 +http://www.semanlink.net/tag/probability_distribution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/probability_distribution|describedBy|https://en.wikipedia.org/wiki/Probability_distribution +http://www.semanlink.net/tag/probability_distribution|uri|http://www.semanlink.net/tag/probability_distribution +http://www.semanlink.net/tag/archive|creationTime|2007-11-13T22:51:17Z +http://www.semanlink.net/tag/archive|prefLabel|Archive +http://www.semanlink.net/tag/archive|creationDate|2007-11-13 +http://www.semanlink.net/tag/archive|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archive|uri|http://www.semanlink.net/tag/archive +http://www.semanlink.net/tag/fadi_badra|creationTime|2011-01-12T23:24:30Z +http://www.semanlink.net/tag/fadi_badra|prefLabel|Fadi Badra +http://www.semanlink.net/tag/fadi_badra|creationDate|2011-01-12 +http://www.semanlink.net/tag/fadi_badra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fadi_badra|homepage|http://www.fadi.lautre.net/ +http://www.semanlink.net/tag/fadi_badra|uri|http://www.semanlink.net/tag/fadi_badra +http://www.semanlink.net/tag/subword_embeddings|creationTime|2019-01-31T23:54:51Z +http://www.semanlink.net/tag/subword_embeddings|prefLabel|Subword embeddings +http://www.semanlink.net/tag/subword_embeddings|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/subword_embeddings|creationDate|2019-01-31 +http://www.semanlink.net/tag/subword_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/subword_embeddings|uri|http://www.semanlink.net/tag/subword_embeddings +http://www.semanlink.net/tag/subword_embeddings|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/subword_embeddings|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/subword_embeddings|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/subword_embeddings|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/subword_embeddings|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/subword_embeddings|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/pyramide|prefLabel|Pyramide +http://www.semanlink.net/tag/pyramide|broader|http://www.semanlink.net/tag/architecture +http://www.semanlink.net/tag/pyramide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pyramide|uri|http://www.semanlink.net/tag/pyramide +http://www.semanlink.net/tag/pyramide|broader_prefLabel|Architecture +http://www.semanlink.net/tag/function_closures|creationTime|2015-01-29T00:47:32Z +http://www.semanlink.net/tag/function_closures|prefLabel|Function closures +http://www.semanlink.net/tag/function_closures|creationDate|2015-01-29 +http://www.semanlink.net/tag/function_closures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/function_closures|uri|http://www.semanlink.net/tag/function_closures +http://www.semanlink.net/tag/france|prefLabel|France +http://www.semanlink.net/tag/france|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/france|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france|uri|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/france|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/france|broader_prefLabel|Europe +http://www.semanlink.net/tag/photo_journalisme|prefLabel|Photo journalisme +http://www.semanlink.net/tag/photo_journalisme|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/photo_journalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photo_journalisme|uri|http://www.semanlink.net/tag/photo_journalisme +http://www.semanlink.net/tag/photo_journalisme|broader_prefLabel|Photo +http://www.semanlink.net/tag/photo_journalisme|broader_altLabel|Images +http://www.semanlink.net/tag/bill_joy|prefLabel|Bill Joy +http://www.semanlink.net/tag/bill_joy|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bill_joy|related|http://www.semanlink.net/tag/sun_microsystems +http://www.semanlink.net/tag/bill_joy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bill_joy|uri|http://www.semanlink.net/tag/bill_joy +http://www.semanlink.net/tag/bill_joy|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bill_joy|broader_altLabel|Technical guys +http://www.semanlink.net/tag/web_tools|prefLabel|Web tools +http://www.semanlink.net/tag/web_tools|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/web_tools|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/web_tools|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/web_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_tools|uri|http://www.semanlink.net/tag/web_tools +http://www.semanlink.net/tag/web_tools|broader_prefLabel|Web +http://www.semanlink.net/tag/web_tools|broader_prefLabel|Tools +http://www.semanlink.net/tag/web_tools|broader_prefLabel|Dev +http://www.semanlink.net/tag/taxi|creationTime|2014-04-16T01:09:46Z +http://www.semanlink.net/tag/taxi|prefLabel|Taxi +http://www.semanlink.net/tag/taxi|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/taxi|creationDate|2014-04-16 +http://www.semanlink.net/tag/taxi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/taxi|uri|http://www.semanlink.net/tag/taxi +http://www.semanlink.net/tag/taxi|broader_prefLabel|Automobile +http://www.semanlink.net/tag/taxi|broader_altLabel|Automotive +http://www.semanlink.net/tag/critical_evaluation|creationTime|2020-07-28T11:30:27Z +http://www.semanlink.net/tag/critical_evaluation|prefLabel|Critical evaluation +http://www.semanlink.net/tag/critical_evaluation|related|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/tag/critical_evaluation|creationDate|2020-07-28 +http://www.semanlink.net/tag/critical_evaluation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/critical_evaluation|uri|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/tag/ontology_mapping|prefLabel|Ontology Mapping +http://www.semanlink.net/tag/ontology_mapping|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/ontology_mapping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ontology_mapping|uri|http://www.semanlink.net/tag/ontology_mapping +http://www.semanlink.net/tag/ontology_mapping|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/ontology_mapping|broader_altLabel|Ontology +http://www.semanlink.net/tag/ng|creationTime|2012-08-09T22:37:11Z +http://www.semanlink.net/tag/ng|prefLabel|Andrew Ng +http://www.semanlink.net/tag/ng|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ng|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/ng|related|http://www.semanlink.net/tag/coursera_machine_learning +http://www.semanlink.net/tag/ng|related|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/tag/ng|related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/ng|creationDate|2012-08-09 +http://www.semanlink.net/tag/ng|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ng|altLabel|Ng +http://www.semanlink.net/tag/ng|uri|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ng|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ng|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/ng|broader_altLabel|Technical guys +http://www.semanlink.net/tag/svg|creationTime|2007-05-21T23:02:00Z +http://www.semanlink.net/tag/svg|prefLabel|SVG +http://www.semanlink.net/tag/svg|creationDate|2007-05-21 +http://www.semanlink.net/tag/svg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/svg|uri|http://www.semanlink.net/tag/svg +http://www.semanlink.net/tag/new_africa|creationTime|2013-08-24T19:42:24Z +http://www.semanlink.net/tag/new_africa|prefLabel|New Africa +http://www.semanlink.net/tag/new_africa|broader|http://www.semanlink.net/tag/ntic_et_developpement +http://www.semanlink.net/tag/new_africa|broader|http://www.semanlink.net/tag/favoris +http://www.semanlink.net/tag/new_africa|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/new_africa|creationDate|2013-08-24 +http://www.semanlink.net/tag/new_africa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/new_africa|uri|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/tag/new_africa|broader_prefLabel|NTIC et développement +http://www.semanlink.net/tag/new_africa|broader_prefLabel|Favoris +http://www.semanlink.net/tag/new_africa|broader_prefLabel|Afrique +http://www.semanlink.net/tag/new_africa|broader_altLabel|Tech / developing world +http://www.semanlink.net/tag/new_africa|broader_altLabel|favorites +http://www.semanlink.net/tag/new_africa|broader_altLabel|Africa +http://www.semanlink.net/tag/cache_buster|creationTime|2012-02-01T00:39:00Z +http://www.semanlink.net/tag/cache_buster|prefLabel|Cache buster +http://www.semanlink.net/tag/cache_buster|broader|http://www.semanlink.net/tag/cache +http://www.semanlink.net/tag/cache_buster|creationDate|2012-02-01 +http://www.semanlink.net/tag/cache_buster|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cache_buster|uri|http://www.semanlink.net/tag/cache_buster +http://www.semanlink.net/tag/cache_buster|broader_prefLabel|Cache +http://www.semanlink.net/tag/entity_type_representation|creationTime|2021-05-17T16:48:04Z +http://www.semanlink.net/tag/entity_type_representation|prefLabel|Entity type representation +http://www.semanlink.net/tag/entity_type_representation|broader|http://www.semanlink.net/tag/entity_type +http://www.semanlink.net/tag/entity_type_representation|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/entity_type_representation|related|http://www.semanlink.net/tag/semantic_hierarchies +http://www.semanlink.net/tag/entity_type_representation|creationDate|2021-05-17 +http://www.semanlink.net/tag/entity_type_representation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_type_representation|uri|http://www.semanlink.net/tag/entity_type_representation +http://www.semanlink.net/tag/entity_type_representation|broader_prefLabel|Entity type +http://www.semanlink.net/tag/entity_type_representation|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/entity_type_representation|broader_altLabel|KGE +http://www.semanlink.net/tag/entity_type_representation|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/entity_type_representation|broader_altLabel|KG embedding +http://www.semanlink.net/tag/entity_type_representation|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/entity_type_representation|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/entity_type_representation|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/entity_type_representation|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/google_rich_snippets|creationTime|2009-05-13T22:20:26Z +http://www.semanlink.net/tag/google_rich_snippets|prefLabel|Google Rich Snippets +http://www.semanlink.net/tag/google_rich_snippets|broader|http://www.semanlink.net/tag/seo +http://www.semanlink.net/tag/google_rich_snippets|broader|http://www.semanlink.net/tag/google_seo +http://www.semanlink.net/tag/google_rich_snippets|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_rich_snippets|related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/google_rich_snippets|related|http://www.semanlink.net/tag/searchmonkey +http://www.semanlink.net/tag/google_rich_snippets|creationDate|2009-05-13 +http://www.semanlink.net/tag/google_rich_snippets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_rich_snippets|homepage|http://www.google.com/support/webmasters/bin/topic.py?topic=21997 +http://www.semanlink.net/tag/google_rich_snippets|uri|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/google_rich_snippets|broader_prefLabel|SEO +http://www.semanlink.net/tag/google_rich_snippets|broader_prefLabel|Google: SEO +http://www.semanlink.net/tag/google_rich_snippets|broader_prefLabel|Google +http://www.semanlink.net/tag/google_rich_snippets|broader_related|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/google_rich_snippets|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/etl|creationTime|2009-04-02T14:13:31Z +http://www.semanlink.net/tag/etl|prefLabel|ETL +http://www.semanlink.net/tag/etl|creationDate|2009-04-02 +http://www.semanlink.net/tag/etl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/etl|uri|http://www.semanlink.net/tag/etl +http://www.semanlink.net/tag/owl_tool|creationTime|2007-07-28T14:35:50Z +http://www.semanlink.net/tag/owl_tool|prefLabel|OWL tool +http://www.semanlink.net/tag/owl_tool|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/owl_tool|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_tool|creationDate|2007-07-28 +http://www.semanlink.net/tag/owl_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_tool|uri|http://www.semanlink.net/tag/owl_tool +http://www.semanlink.net/tag/owl_tool|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/owl_tool|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_tool|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/carnet_de_voyage|creationTime|2008-11-21T23:22:00Z +http://www.semanlink.net/tag/carnet_de_voyage|prefLabel|Carnet de voyage +http://www.semanlink.net/tag/carnet_de_voyage|creationDate|2008-11-21 +http://www.semanlink.net/tag/carnet_de_voyage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carnet_de_voyage|uri|http://www.semanlink.net/tag/carnet_de_voyage +http://www.semanlink.net/tag/representation_learning|creationTime|2015-10-24T00:52:56Z +http://www.semanlink.net/tag/representation_learning|prefLabel|Representation learning +http://www.semanlink.net/tag/representation_learning|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/representation_learning|related|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/representation_learning|creationDate|2015-10-24 +http://www.semanlink.net/tag/representation_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/representation_learning|uri|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/tag/representation_learning|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/orange_data_mining|creationTime|2013-05-30T02:17:49Z +http://www.semanlink.net/tag/orange_data_mining|prefLabel|Orange (data mining) +http://www.semanlink.net/tag/orange_data_mining|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/orange_data_mining|broader|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/orange_data_mining|broader|http://www.semanlink.net/tag/data_mining_tools +http://www.semanlink.net/tag/orange_data_mining|related|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://www.semanlink.net/tag/orange_data_mining|creationDate|2013-05-30 +http://www.semanlink.net/tag/orange_data_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orange_data_mining|uri|http://www.semanlink.net/tag/orange_data_mining +http://www.semanlink.net/tag/orange_data_mining|broader_prefLabel|Python +http://www.semanlink.net/tag/orange_data_mining|broader_prefLabel|Python 4 Data science +http://www.semanlink.net/tag/orange_data_mining|broader_prefLabel|Data mining tools +http://www.semanlink.net/tag/unsupervised_text_classification|creationTime|2020-10-05T00:44:12Z +http://www.semanlink.net/tag/unsupervised_text_classification|prefLabel|Unsupervised Text Classification +http://www.semanlink.net/tag/unsupervised_text_classification|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/unsupervised_text_classification|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_text_classification|creationDate|2020-10-05 +http://www.semanlink.net/tag/unsupervised_text_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_text_classification|altLabel|Dataless Text Classication +http://www.semanlink.net/tag/unsupervised_text_classification|uri|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/tag/unsupervised_text_classification|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/unsupervised_text_classification|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/allegrograph|creationTime|2010-08-25T00:00:05Z +http://www.semanlink.net/tag/allegrograph|prefLabel|AllegroGraph +http://www.semanlink.net/tag/allegrograph|broader|http://www.semanlink.net/tag/triplestore +http://www.semanlink.net/tag/allegrograph|creationDate|2010-08-25 +http://www.semanlink.net/tag/allegrograph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/allegrograph|describedBy|http://www.franz.com/agraph/allegrograph/ +http://www.semanlink.net/tag/allegrograph|uri|http://www.semanlink.net/tag/allegrograph +http://www.semanlink.net/tag/allegrograph|broader_prefLabel|TripleStore +http://www.semanlink.net/tag/allegrograph|broader_altLabel|RDF database +http://www.semanlink.net/tag/xslt|prefLabel|XSLT +http://www.semanlink.net/tag/xslt|broader|http://www.semanlink.net/tag/xsl +http://www.semanlink.net/tag/xslt|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xslt|uri|http://www.semanlink.net/tag/xslt +http://www.semanlink.net/tag/xslt|broader_prefLabel|XSL +http://www.semanlink.net/tag/janis_joplin|prefLabel|Janis Joplin +http://www.semanlink.net/tag/janis_joplin|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/janis_joplin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/janis_joplin|uri|http://www.semanlink.net/tag/janis_joplin +http://www.semanlink.net/tag/janis_joplin|broader_prefLabel|Musicien +http://www.semanlink.net/tag/music_store|prefLabel|Music store +http://www.semanlink.net/tag/music_store|broader|http://www.semanlink.net/tag/musique_en_ligne +http://www.semanlink.net/tag/music_store|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/music_store|uri|http://www.semanlink.net/tag/music_store +http://www.semanlink.net/tag/music_store|broader_prefLabel|Musique en ligne +http://www.semanlink.net/tag/chine_leadership|creationTime|2020-04-04T10:39:07Z +http://www.semanlink.net/tag/chine_leadership|prefLabel|Chine: leadership +http://www.semanlink.net/tag/chine_leadership|creationDate|2020-04-04 +http://www.semanlink.net/tag/chine_leadership|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_leadership|uri|http://www.semanlink.net/tag/chine_leadership +http://www.semanlink.net/tag/nlp_techniques|creationTime|2015-10-16T11:18:20Z +http://www.semanlink.net/tag/nlp_techniques|prefLabel|NLP techniques +http://www.semanlink.net/tag/nlp_techniques|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_techniques|creationDate|2015-10-16 +http://www.semanlink.net/tag/nlp_techniques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_techniques|uri|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/nlp_techniques|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_techniques|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_techniques|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_techniques|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/bosnie|creationTime|2007-09-10T19:38:26Z +http://www.semanlink.net/tag/bosnie|prefLabel|Bosnie +http://www.semanlink.net/tag/bosnie|broader|http://www.semanlink.net/tag/yougoslavie +http://www.semanlink.net/tag/bosnie|creationDate|2007-09-10 +http://www.semanlink.net/tag/bosnie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bosnie|uri|http://www.semanlink.net/tag/bosnie +http://www.semanlink.net/tag/bosnie|broader_prefLabel|Yougoslavie +http://www.semanlink.net/tag/bosnie|broader_prefLabel|Ex Yougoslavie +http://www.semanlink.net/tag/vie_sur_mars|prefLabel|Vie sur Mars +http://www.semanlink.net/tag/vie_sur_mars|broader|http://www.semanlink.net/tag/vie_extraterrestre +http://www.semanlink.net/tag/vie_sur_mars|broader|http://www.semanlink.net/tag/mars +http://www.semanlink.net/tag/vie_sur_mars|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vie_sur_mars|uri|http://www.semanlink.net/tag/vie_sur_mars +http://www.semanlink.net/tag/vie_sur_mars|broader_prefLabel|Vie extraterrestre +http://www.semanlink.net/tag/vie_sur_mars|broader_prefLabel|Mars +http://www.semanlink.net/tag/simile|prefLabel|SIMILE +http://www.semanlink.net/tag/simile|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/simile|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/simile|broader|http://www.semanlink.net/tag/mit +http://www.semanlink.net/tag/simile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/simile|uri|http://www.semanlink.net/tag/simile +http://www.semanlink.net/tag/simile|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/simile|broader_prefLabel|Open Source +http://www.semanlink.net/tag/simile|broader_prefLabel|MIT +http://www.semanlink.net/tag/beijing_genomics_institute|creationTime|2014-12-28T10:40:20Z +http://www.semanlink.net/tag/beijing_genomics_institute|prefLabel|Beijing Genomics Institute +http://www.semanlink.net/tag/beijing_genomics_institute|broader|http://www.semanlink.net/tag/genomique +http://www.semanlink.net/tag/beijing_genomics_institute|broader|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/tag/beijing_genomics_institute|broader|http://www.semanlink.net/tag/clonage +http://www.semanlink.net/tag/beijing_genomics_institute|broader|http://www.semanlink.net/tag/shenzhen +http://www.semanlink.net/tag/beijing_genomics_institute|broader|http://www.semanlink.net/tag/sequencage_du_genome +http://www.semanlink.net/tag/beijing_genomics_institute|creationDate|2014-12-28 +http://www.semanlink.net/tag/beijing_genomics_institute|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/beijing_genomics_institute|uri|http://www.semanlink.net/tag/beijing_genomics_institute +http://www.semanlink.net/tag/beijing_genomics_institute|broader_prefLabel|Génomique +http://www.semanlink.net/tag/beijing_genomics_institute|broader_prefLabel|Chine : technologie +http://www.semanlink.net/tag/beijing_genomics_institute|broader_prefLabel|Clonage +http://www.semanlink.net/tag/beijing_genomics_institute|broader_prefLabel|Shenzhen +http://www.semanlink.net/tag/beijing_genomics_institute|broader_prefLabel|Séquençage du génome +http://www.semanlink.net/tag/ca_craint|prefLabel|Ca craint +http://www.semanlink.net/tag/ca_craint|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ca_craint|uri|http://www.semanlink.net/tag/ca_craint +http://www.semanlink.net/tag/semantic_web_search_engine|creationTime|2007-11-09T10:01:35Z +http://www.semanlink.net/tag/semantic_web_search_engine|prefLabel|Semantic Web search engine +http://www.semanlink.net/tag/semantic_web_search_engine|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/semantic_web_search_engine|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/semantic_web_search_engine|creationDate|2007-11-09 +http://www.semanlink.net/tag/semantic_web_search_engine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_search_engine|altLabel|RDF search engine +http://www.semanlink.net/tag/semantic_web_search_engine|uri|http://www.semanlink.net/tag/semantic_web_search_engine +http://www.semanlink.net/tag/semantic_web_search_engine|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/semantic_web_search_engine|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/semantic_web_search_engine|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/documentaire_tv|creationTime|2008-11-27T01:22:34Z +http://www.semanlink.net/tag/documentaire_tv|prefLabel|Documentaire TV +http://www.semanlink.net/tag/documentaire_tv|broader|http://www.semanlink.net/tag/documentaire +http://www.semanlink.net/tag/documentaire_tv|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/documentaire_tv|creationDate|2008-11-27 +http://www.semanlink.net/tag/documentaire_tv|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/documentaire_tv|altLabel|Documentaire télé +http://www.semanlink.net/tag/documentaire_tv|uri|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/tag/documentaire_tv|broader_prefLabel|Documentaire +http://www.semanlink.net/tag/documentaire_tv|broader_prefLabel|Télévision +http://www.semanlink.net/tag/documentaire_tv|broader_altLabel|TV +http://www.semanlink.net/tag/laurent_lafforgue|prefLabel|Laurent Lafforgue +http://www.semanlink.net/tag/laurent_lafforgue|broader|http://www.semanlink.net/tag/medaille_fields +http://www.semanlink.net/tag/laurent_lafforgue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/laurent_lafforgue|uri|http://www.semanlink.net/tag/laurent_lafforgue +http://www.semanlink.net/tag/laurent_lafforgue|broader_prefLabel|Médaille Fields +http://www.semanlink.net/tag/ia_limites|creationTime|2017-10-07T21:51:21Z +http://www.semanlink.net/tag/ia_limites|prefLabel|AI: limits +http://www.semanlink.net/tag/ia_limites|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ia_limites|creationDate|2017-10-07 +http://www.semanlink.net/tag/ia_limites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ia_limites|altLabel|IA: limites +http://www.semanlink.net/tag/ia_limites|uri|http://www.semanlink.net/tag/ia_limites +http://www.semanlink.net/tag/ia_limites|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ia_limites|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ia_limites|broader_altLabel|AI +http://www.semanlink.net/tag/ia_limites|broader_altLabel|IA +http://www.semanlink.net/tag/ia_limites|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/dev|prefLabel|Dev +http://www.semanlink.net/tag/dev|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dev|uri|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/dev|broader_prefLabel|Technologie +http://www.semanlink.net/tag/rake|creationTime|2017-06-14T00:24:24Z +http://www.semanlink.net/tag/rake|prefLabel|RAKE +http://www.semanlink.net/tag/rake|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/rake|broader|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/tag/rake|creationDate|2017-06-14 +http://www.semanlink.net/tag/rake|comment|"""Rapid Automatic Keyword Extraction""" +http://www.semanlink.net/tag/rake|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rake|uri|http://www.semanlink.net/tag/rake +http://www.semanlink.net/tag/rake|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/rake|broader_prefLabel|Keyword/keyphrase extraction +http://www.semanlink.net/tag/rake|broader_altLabel|Topic extraction +http://www.semanlink.net/tag/rake|broader_altLabel|Keyword extraction +http://www.semanlink.net/tag/rake|broader_altLabel|Keyphrase extraction +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/rake|broader_related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/maven|creationTime|2012-06-15T09:53:51Z +http://www.semanlink.net/tag/maven|prefLabel|Maven +http://www.semanlink.net/tag/maven|broader|http://www.semanlink.net/tag/dev_tools +http://www.semanlink.net/tag/maven|creationDate|2012-06-15 +http://www.semanlink.net/tag/maven|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maven|homepage|http://maven.apache.org/ +http://www.semanlink.net/tag/maven|uri|http://www.semanlink.net/tag/maven +http://www.semanlink.net/tag/maven|broader_prefLabel|Dev tools +http://www.semanlink.net/tag/arima|creationTime|2017-06-15T13:35:47Z +http://www.semanlink.net/tag/arima|prefLabel|ARIMA +http://www.semanlink.net/tag/arima|broader|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/tag/arima|creationDate|2017-06-15 +http://www.semanlink.net/tag/arima|comment|"""Autoregressive integrated moving average"" + +- ""AR"": the evolving variable of interest is regressed on its own lagged (i.e., prior) values. +- ""MA"": the regression error is a linear combination of error terms whose values occurred contemporaneously and at various times in the past. +- ""I"" (""integrated""): the data values have been replaced with the difference between their values and the previous values +" +http://www.semanlink.net/tag/arima|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arima|describedBy|https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average +http://www.semanlink.net/tag/arima|uri|http://www.semanlink.net/tag/arima +http://www.semanlink.net/tag/arima|broader_prefLabel|Time Series +http://www.semanlink.net/tag/securite_sociale|creationTime|2008-01-14T13:23:17Z +http://www.semanlink.net/tag/securite_sociale|prefLabel|Sécurité sociale +http://www.semanlink.net/tag/securite_sociale|creationDate|2008-01-14 +http://www.semanlink.net/tag/securite_sociale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/securite_sociale|uri|http://www.semanlink.net/tag/securite_sociale +http://www.semanlink.net/tag/extreme_droite|prefLabel|Extrème droite +http://www.semanlink.net/tag/extreme_droite|broader|http://www.semanlink.net/tag/politique +http://www.semanlink.net/tag/extreme_droite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extreme_droite|uri|http://www.semanlink.net/tag/extreme_droite +http://www.semanlink.net/tag/extreme_droite|broader_prefLabel|Politique +http://www.semanlink.net/tag/chelsea_manning|creationTime|2017-05-13T18:30:24Z +http://www.semanlink.net/tag/chelsea_manning|prefLabel|Chelsea Manning +http://www.semanlink.net/tag/chelsea_manning|broader|http://www.semanlink.net/tag/whistleblower +http://www.semanlink.net/tag/chelsea_manning|creationDate|2017-05-13 +http://www.semanlink.net/tag/chelsea_manning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chelsea_manning|uri|http://www.semanlink.net/tag/chelsea_manning +http://www.semanlink.net/tag/chelsea_manning|broader_prefLabel|Whistleblower +http://www.semanlink.net/tag/tf1|creationTime|2009-05-11T23:02:43Z +http://www.semanlink.net/tag/tf1|prefLabel|TF1 +http://www.semanlink.net/tag/tf1|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/tf1|creationDate|2009-05-11 +http://www.semanlink.net/tag/tf1|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tf1|uri|http://www.semanlink.net/tag/tf1 +http://www.semanlink.net/tag/tf1|broader_prefLabel|Télévision +http://www.semanlink.net/tag/tf1|broader_altLabel|TV +http://www.semanlink.net/tag/yahoo|prefLabel|Yahoo! +http://www.semanlink.net/tag/yahoo|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/yahoo|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/yahoo|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/yahoo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yahoo|uri|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/yahoo|broader_prefLabel|Internet +http://www.semanlink.net/tag/yahoo|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/yahoo|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/yahoo|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/ina|creationTime|2009-06-25T08:23:19Z +http://www.semanlink.net/tag/ina|prefLabel|INA +http://www.semanlink.net/tag/ina|creationDate|2009-06-25 +http://www.semanlink.net/tag/ina|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ina|uri|http://www.semanlink.net/tag/ina +http://www.semanlink.net/tag/sif_embeddings|creationTime|2018-08-28T11:57:05Z +http://www.semanlink.net/tag/sif_embeddings|prefLabel|SIF embeddings +http://www.semanlink.net/tag/sif_embeddings|broader|http://www.semanlink.net/tag/sanjeev_arora +http://www.semanlink.net/tag/sif_embeddings|broader|http://www.semanlink.net/tag/word_embedding_compositionality +http://www.semanlink.net/tag/sif_embeddings|broader|http://www.semanlink.net/tag/sentence_embeddings +http://www.semanlink.net/tag/sif_embeddings|creationDate|2018-08-28 +http://www.semanlink.net/tag/sif_embeddings|comment|"""Smoothed Inverse Frequency"": a linear representation of a sentence which is better than the simple average of the embeddings of its words + +2 ideas: + +- assign to each word a weighting that depends on the frequency of the word it the corpus (reminiscent of TF-IDF) +- some denoising (removing the component from the top singular direction) + + + +Todo (?): check implementation as a [sklearn Vectorizer](https://github.com/ChristophAlt/embedding_vectorizer) +" +http://www.semanlink.net/tag/sif_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sif_embeddings|uri|http://www.semanlink.net/tag/sif_embeddings +http://www.semanlink.net/tag/sif_embeddings|broader_prefLabel|Sanjeev Arora +http://www.semanlink.net/tag/sif_embeddings|broader_prefLabel|Word Embedding Compositionality +http://www.semanlink.net/tag/sif_embeddings|broader_prefLabel|Sentence Embeddings +http://www.semanlink.net/tag/graphviz|creationTime|2011-06-08T16:47:10Z +http://www.semanlink.net/tag/graphviz|prefLabel|Graphviz +http://www.semanlink.net/tag/graphviz|broader|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/tag/graphviz|creationDate|2011-06-08 +http://www.semanlink.net/tag/graphviz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graphviz|uri|http://www.semanlink.net/tag/graphviz +http://www.semanlink.net/tag/graphviz|broader_prefLabel|Graph visualization +http://www.semanlink.net/tag/developer_documentation|prefLabel|Developer documentation +http://www.semanlink.net/tag/developer_documentation|broader|http://www.semanlink.net/tag/documentation +http://www.semanlink.net/tag/developer_documentation|creationDate|2006-09-15 +http://www.semanlink.net/tag/developer_documentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/developer_documentation|altLabel|Dev doc +http://www.semanlink.net/tag/developer_documentation|uri|http://www.semanlink.net/tag/developer_documentation +http://www.semanlink.net/tag/developer_documentation|broader_prefLabel|Documentation +http://www.semanlink.net/tag/elevage_porcin|prefLabel|Elevage porcin +http://www.semanlink.net/tag/elevage_porcin|broader|http://www.semanlink.net/tag/porc +http://www.semanlink.net/tag/elevage_porcin|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/elevage_porcin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elevage_porcin|uri|http://www.semanlink.net/tag/elevage_porcin +http://www.semanlink.net/tag/elevage_porcin|broader_prefLabel|Porc +http://www.semanlink.net/tag/elevage_porcin|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/irlande|prefLabel|Irlande +http://www.semanlink.net/tag/irlande|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/irlande|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/irlande|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/irlande|altLabel|Ireland +http://www.semanlink.net/tag/irlande|uri|http://www.semanlink.net/tag/irlande +http://www.semanlink.net/tag/irlande|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/irlande|broader_prefLabel|Europe +http://www.semanlink.net/tag/lumiere|prefLabel|Lumière +http://www.semanlink.net/tag/lumiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lumiere|uri|http://www.semanlink.net/tag/lumiere +http://www.semanlink.net/tag/jamendo|creationTime|2007-06-13T23:26:32Z +http://www.semanlink.net/tag/jamendo|prefLabel|Jamendo +http://www.semanlink.net/tag/jamendo|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/jamendo|related|http://www.semanlink.net/tag/linking_open_data +http://www.semanlink.net/tag/jamendo|creationDate|2007-06-13 +http://www.semanlink.net/tag/jamendo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jamendo|uri|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/jamendo|broader_prefLabel|Musique +http://www.semanlink.net/tag/jamendo|broader_altLabel|Music +http://www.semanlink.net/tag/paleoanthropology_genetics|creationTime|2020-08-21T17:10:09Z +http://www.semanlink.net/tag/paleoanthropology_genetics|prefLabel|Paleoanthropology + Genetics +http://www.semanlink.net/tag/paleoanthropology_genetics|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/paleoanthropology_genetics|related|http://www.semanlink.net/tag/genetique_histoire +http://www.semanlink.net/tag/paleoanthropology_genetics|creationDate|2020-08-21 +http://www.semanlink.net/tag/paleoanthropology_genetics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paleoanthropology_genetics|altLabel|Paleoanthropologie + Génétique +http://www.semanlink.net/tag/paleoanthropology_genetics|uri|http://www.semanlink.net/tag/paleoanthropology_genetics +http://www.semanlink.net/tag/paleoanthropology_genetics|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/paleoanthropology_genetics|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/francois_yvon|creationTime|2018-11-28T23:04:16Z +http://www.semanlink.net/tag/francois_yvon|prefLabel|François Yvon +http://www.semanlink.net/tag/francois_yvon|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/francois_yvon|creationDate|2018-11-28 +http://www.semanlink.net/tag/francois_yvon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francois_yvon|describedBy|https://cv.archives-ouvertes.fr/francois-yvon +http://www.semanlink.net/tag/francois_yvon|uri|http://www.semanlink.net/tag/francois_yvon +http://www.semanlink.net/tag/francois_yvon|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/gaz_de_schiste|creationTime|2013-03-30T10:18:07Z +http://www.semanlink.net/tag/gaz_de_schiste|prefLabel|Gaz de schiste +http://www.semanlink.net/tag/gaz_de_schiste|broader|http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles +http://www.semanlink.net/tag/gaz_de_schiste|creationDate|2013-03-30 +http://www.semanlink.net/tag/gaz_de_schiste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gaz_de_schiste|uri|http://www.semanlink.net/tag/gaz_de_schiste +http://www.semanlink.net/tag/gaz_de_schiste|broader_prefLabel|"Energies fossiles ""non conventionnelles""" +http://www.semanlink.net/tag/diplomatie_americaine|creationTime|2013-05-17T20:06:21Z +http://www.semanlink.net/tag/diplomatie_americaine|prefLabel|Diplomatie américaine +http://www.semanlink.net/tag/diplomatie_americaine|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/diplomatie_americaine|broader|http://www.semanlink.net/tag/diplomatie +http://www.semanlink.net/tag/diplomatie_americaine|creationDate|2013-05-17 +http://www.semanlink.net/tag/diplomatie_americaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diplomatie_americaine|uri|http://www.semanlink.net/tag/diplomatie_americaine +http://www.semanlink.net/tag/diplomatie_americaine|broader_prefLabel|USA +http://www.semanlink.net/tag/diplomatie_americaine|broader_prefLabel|Diplomatie +http://www.semanlink.net/tag/diplomatie_americaine|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/diplomatie_americaine|broader_altLabel|United States +http://www.semanlink.net/tag/blogger|prefLabel|Blogger +http://www.semanlink.net/tag/blogger|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/blogger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blogger|uri|http://www.semanlink.net/tag/blogger +http://www.semanlink.net/tag/blogger|broader_prefLabel|Blog +http://www.semanlink.net/tag/oliviers|creationTime|2017-07-09T10:33:43Z +http://www.semanlink.net/tag/oliviers|prefLabel|Oliviers +http://www.semanlink.net/tag/oliviers|creationDate|2017-07-09 +http://www.semanlink.net/tag/oliviers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oliviers|uri|http://www.semanlink.net/tag/oliviers +http://www.semanlink.net/tag/syndicalisme|creationTime|2007-07-12T22:52:36Z +http://www.semanlink.net/tag/syndicalisme|prefLabel|Syndicalisme +http://www.semanlink.net/tag/syndicalisme|creationDate|2007-07-12 +http://www.semanlink.net/tag/syndicalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/syndicalisme|uri|http://www.semanlink.net/tag/syndicalisme +http://www.semanlink.net/tag/decision_tree_learning|creationTime|2017-06-19T11:11:20Z +http://www.semanlink.net/tag/decision_tree_learning|prefLabel|Decision tree learning +http://www.semanlink.net/tag/decision_tree_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/decision_tree_learning|broader|http://www.semanlink.net/tag/supervised_machine_learning +http://www.semanlink.net/tag/decision_tree_learning|creationDate|2017-06-19 +http://www.semanlink.net/tag/decision_tree_learning|comment|"construction of a decision tree from class-labeled training tuples + +frequent problem: overfitting (=high variance) + +" +http://www.semanlink.net/tag/decision_tree_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decision_tree_learning|describedBy|https://en.wikipedia.org/wiki/Decision_tree_learning +http://www.semanlink.net/tag/decision_tree_learning|uri|http://www.semanlink.net/tag/decision_tree_learning +http://www.semanlink.net/tag/decision_tree_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/decision_tree_learning|broader_prefLabel|Supervised machine learning +http://www.semanlink.net/tag/ian_goodfellow|creationTime|2017-12-16T14:28:31Z +http://www.semanlink.net/tag/ian_goodfellow|prefLabel|Ian Goodfellow +http://www.semanlink.net/tag/ian_goodfellow|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/ian_goodfellow|creationDate|2017-12-16 +http://www.semanlink.net/tag/ian_goodfellow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ian_goodfellow|uri|http://www.semanlink.net/tag/ian_goodfellow +http://www.semanlink.net/tag/ian_goodfellow|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/data_model|creationTime|2011-11-22T13:15:08Z +http://www.semanlink.net/tag/data_model|prefLabel|Data model +http://www.semanlink.net/tag/data_model|creationDate|2011-11-22 +http://www.semanlink.net/tag/data_model|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_model|uri|http://www.semanlink.net/tag/data_model +http://www.semanlink.net/tag/firewall|creationTime|2008-01-03T23:18:35Z +http://www.semanlink.net/tag/firewall|prefLabel|Firewall +http://www.semanlink.net/tag/firewall|creationDate|2008-01-03 +http://www.semanlink.net/tag/firewall|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/firewall|uri|http://www.semanlink.net/tag/firewall +http://www.semanlink.net/tag/eswc|creationTime|2007-05-03T00:08:21Z +http://www.semanlink.net/tag/eswc|prefLabel|ESWC +http://www.semanlink.net/tag/eswc|broader|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/eswc|creationDate|2007-05-03 +http://www.semanlink.net/tag/eswc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc|uri|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc|broader_prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/maidsafe|creationTime|2014-05-12T00:00:46Z +http://www.semanlink.net/tag/maidsafe|prefLabel|Maidsafe +http://www.semanlink.net/tag/maidsafe|broader|http://www.semanlink.net/tag/peer_to_peer +http://www.semanlink.net/tag/maidsafe|related|http://www.semanlink.net/tag/bitcoin +http://www.semanlink.net/tag/maidsafe|creationDate|2014-05-12 +http://www.semanlink.net/tag/maidsafe|comment|"""The New Decentralized Internet""" +http://www.semanlink.net/tag/maidsafe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maidsafe|homepage|http://maidsafe.net +http://www.semanlink.net/tag/maidsafe|uri|http://www.semanlink.net/tag/maidsafe +http://www.semanlink.net/tag/maidsafe|broader_prefLabel|Peer to peer +http://www.semanlink.net/tag/maidsafe|broader_altLabel|P2P +http://www.semanlink.net/tag/documentation|creationTime|2013-09-13T11:35:24Z +http://www.semanlink.net/tag/documentation|prefLabel|Documentation +http://www.semanlink.net/tag/documentation|creationDate|2013-09-13 +http://www.semanlink.net/tag/documentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/documentation|uri|http://www.semanlink.net/tag/documentation +http://www.semanlink.net/tag/extinction_d_especes|prefLabel|Extinction d'espèces +http://www.semanlink.net/tag/extinction_d_especes|broader|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/extinction_d_especes|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/extinction_d_especes|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/extinction_d_especes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extinction_d_especes|uri|http://www.semanlink.net/tag/extinction_d_especes +http://www.semanlink.net/tag/extinction_d_especes|broader_prefLabel|Biodiversité +http://www.semanlink.net/tag/extinction_d_especes|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/extinction_d_especes|broader_prefLabel|Écologie +http://www.semanlink.net/tag/extinction_d_especes|broader_altLabel|Biodiversity +http://www.semanlink.net/tag/college_de_france|creationTime|2016-02-20T14:38:44Z +http://www.semanlink.net/tag/college_de_france|prefLabel|Collège de France +http://www.semanlink.net/tag/college_de_france|creationDate|2016-02-20 +http://www.semanlink.net/tag/college_de_france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/college_de_france|uri|http://www.semanlink.net/tag/college_de_france +http://www.semanlink.net/tag/archeologie_amazonienne|prefLabel|Archéologie amazonienne +http://www.semanlink.net/tag/archeologie_amazonienne|broader|http://www.semanlink.net/tag/indiens_du_bresil +http://www.semanlink.net/tag/archeologie_amazonienne|broader|http://www.semanlink.net/tag/amazonie +http://www.semanlink.net/tag/archeologie_amazonienne|broader|http://www.semanlink.net/tag/civilisations_precolombiennes +http://www.semanlink.net/tag/archeologie_amazonienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie_amazonienne|uri|http://www.semanlink.net/tag/archeologie_amazonienne +http://www.semanlink.net/tag/archeologie_amazonienne|broader_prefLabel|Indiens du Brésil +http://www.semanlink.net/tag/archeologie_amazonienne|broader_prefLabel|Amazonie +http://www.semanlink.net/tag/archeologie_amazonienne|broader_prefLabel|Civilisations précolombiennes +http://www.semanlink.net/tag/plutonium|creationTime|2016-02-09T23:36:45Z +http://www.semanlink.net/tag/plutonium|prefLabel|Plutonium +http://www.semanlink.net/tag/plutonium|creationDate|2016-02-09 +http://www.semanlink.net/tag/plutonium|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plutonium|uri|http://www.semanlink.net/tag/plutonium +http://www.semanlink.net/tag/text_aware_kg_embedding|creationTime|2020-04-29T19:07:18Z +http://www.semanlink.net/tag/text_aware_kg_embedding|prefLabel|Text-Aware KG embedding +http://www.semanlink.net/tag/text_aware_kg_embedding|broader|http://www.semanlink.net/tag/text_kg_and_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|related|http://www.semanlink.net/tag/rdf_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|creationDate|2020-04-29 +http://www.semanlink.net/tag/text_aware_kg_embedding|comment|"Many early knowledge graph embeddings do not use literal attributes, only structure of the graph... + +Text-Aware KG embedding systems leverage both kinds of information. For instance [#DKRL](tag:dkrl) learns a structure-based representation (as TransE does) and a description-based representation that can be used in an integrated scoring function, thus combining the relative +information coming from both text and facts." +http://www.semanlink.net/tag/text_aware_kg_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_aware_kg_embedding|altLabel|Text in Knowledge Graph embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|altLabel|Text in KG embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|uri|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_prefLabel|Text, KG and embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|Embeddings of Text + Knowledge Graphs +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|Embeddings of Text + Knowledge Bases +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|Embeddings of text + KB +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|KGE +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_altLabel|KG embedding +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_related|http://www.semanlink.net/tag/these_renault_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/text_aware_kg_embedding|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/configuration|creationTime|2011-02-03T22:16:51Z +http://www.semanlink.net/tag/configuration|prefLabel|Configuration +http://www.semanlink.net/tag/configuration|creationDate|2011-02-03 +http://www.semanlink.net/tag/configuration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/configuration|uri|http://www.semanlink.net/tag/configuration +http://www.semanlink.net/tag/nubie|prefLabel|Nubie +http://www.semanlink.net/tag/nubie|broader|http://www.semanlink.net/tag/soudan +http://www.semanlink.net/tag/nubie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nubie|uri|http://www.semanlink.net/tag/nubie +http://www.semanlink.net/tag/nubie|broader_prefLabel|Soudan +http://www.semanlink.net/tag/templatic_documents|creationTime|2020-06-15T22:54:14Z +http://www.semanlink.net/tag/templatic_documents|prefLabel|Templatic documents +http://www.semanlink.net/tag/templatic_documents|broader|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/tag/templatic_documents|related|http://www.semanlink.net/tag/visually_rich_documents +http://www.semanlink.net/tag/templatic_documents|creationDate|2020-06-15 +http://www.semanlink.net/tag/templatic_documents|comment|"Templatic documents, such as receipts, bills, are extremely common and critical in a diverse range of business +workflows. + +> The challenge in this information extraction problem arises because it +straddles the NLP and computer vision +worlds... +> Such documents do not contain “natural language” but instead resemble forms, with data often presented in tables ([src](doc:2020/06/google_ai_blog_extracting_stru)) +" +http://www.semanlink.net/tag/templatic_documents|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/templatic_documents|altLabel|Form-like Documents +http://www.semanlink.net/tag/templatic_documents|uri|http://www.semanlink.net/tag/templatic_documents +http://www.semanlink.net/tag/templatic_documents|broader_prefLabel|2D-NLP +http://www.semanlink.net/tag/fibonacci|creationTime|2011-08-20T23:09:55Z +http://www.semanlink.net/tag/fibonacci|prefLabel|Fibonacci +http://www.semanlink.net/tag/fibonacci|creationDate|2011-08-20 +http://www.semanlink.net/tag/fibonacci|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fibonacci|uri|http://www.semanlink.net/tag/fibonacci +http://www.semanlink.net/tag/cross_modal_retrieval|creationTime|2018-07-11T13:06:23Z +http://www.semanlink.net/tag/cross_modal_retrieval|prefLabel|Cross-Modal Retrieval +http://www.semanlink.net/tag/cross_modal_retrieval|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/cross_modal_retrieval|creationDate|2018-07-11 +http://www.semanlink.net/tag/cross_modal_retrieval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_modal_retrieval|uri|http://www.semanlink.net/tag/cross_modal_retrieval +http://www.semanlink.net/tag/cross_modal_retrieval|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/cross_modal_retrieval|broader_altLabel|IR +http://www.semanlink.net/tag/semantically_searchable_distributed_repository|prefLabel|Semantically searchable distributed repository +http://www.semanlink.net/tag/semantically_searchable_distributed_repository|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/semantically_searchable_distributed_repository|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantically_searchable_distributed_repository|uri|http://www.semanlink.net/tag/semantically_searchable_distributed_repository +http://www.semanlink.net/tag/semantically_searchable_distributed_repository|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/high_frequency_trading|creationTime|2010-09-18T01:47:27Z +http://www.semanlink.net/tag/high_frequency_trading|prefLabel|High-frequency trading +http://www.semanlink.net/tag/high_frequency_trading|broader|http://www.semanlink.net/tag/marches_financiers +http://www.semanlink.net/tag/high_frequency_trading|creationDate|2010-09-18 +http://www.semanlink.net/tag/high_frequency_trading|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/high_frequency_trading|describedBy|https://en.wikipedia.org/wiki/Algo_trading +http://www.semanlink.net/tag/high_frequency_trading|altLabel|Algorithmic trading +http://www.semanlink.net/tag/high_frequency_trading|altLabel|Algo-trading +http://www.semanlink.net/tag/high_frequency_trading|uri|http://www.semanlink.net/tag/high_frequency_trading +http://www.semanlink.net/tag/high_frequency_trading|broader_prefLabel|Marchés financiers +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|creationTime|2020-08-15T11:54:54Z +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|prefLabel|Hierarchical multi-label text classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader|http://www.semanlink.net/tag/hierarchical_multi_label_classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader|http://www.semanlink.net/tag/nlp_hierarchical_text_classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|creationDate|2020-08-15 +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|uri|http://www.semanlink.net/tag/hierarchical_multi_label_text_classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader_prefLabel|Hierarchical multi-label classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader_prefLabel|Hierarchical text classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader_prefLabel|Multi-label Text classification +http://www.semanlink.net/tag/hierarchical_multi_label_text_classification|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/journal|creationTime|2008-07-08T21:22:42Z +http://www.semanlink.net/tag/journal|prefLabel|Presse +http://www.semanlink.net/tag/journal|broader|http://www.semanlink.net/tag/journalisme +http://www.semanlink.net/tag/journal|creationDate|2008-07-08 +http://www.semanlink.net/tag/journal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/journal|altLabel|Journal +http://www.semanlink.net/tag/journal|uri|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/journal|broader_prefLabel|Journalisme +http://www.semanlink.net/tag/sw_guys|creationTime|2011-08-11T19:11:46Z +http://www.semanlink.net/tag/sw_guys|prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/sw_guys|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/sw_guys|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sw_guys|creationDate|2011-08-11 +http://www.semanlink.net/tag/sw_guys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_guys|uri|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/sw_guys|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/sw_guys|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sw_guys|broader_altLabel|Technical guys +http://www.semanlink.net/tag/sw_guys|broader_altLabel|sw +http://www.semanlink.net/tag/sw_guys|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/bitmap_index|creationTime|2014-04-23T21:50:05Z +http://www.semanlink.net/tag/bitmap_index|prefLabel|Bitmap index +http://www.semanlink.net/tag/bitmap_index|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/bitmap_index|creationDate|2014-04-23 +http://www.semanlink.net/tag/bitmap_index|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bitmap_index|describedBy|https://en.wikipedia.org/wiki/Bitmap_index +http://www.semanlink.net/tag/bitmap_index|uri|http://www.semanlink.net/tag/bitmap_index +http://www.semanlink.net/tag/bitmap_index|broader_prefLabel|Database +http://www.semanlink.net/tag/oauth|creationTime|2014-09-26T00:27:46Z +http://www.semanlink.net/tag/oauth|prefLabel|OAuth +http://www.semanlink.net/tag/oauth|creationDate|2014-09-26 +http://www.semanlink.net/tag/oauth|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oauth|describedBy|https://en.wikipedia.org/wiki/OAuth +http://www.semanlink.net/tag/oauth|uri|http://www.semanlink.net/tag/oauth +http://www.semanlink.net/tag/arctique|prefLabel|Arctique +http://www.semanlink.net/tag/arctique|broader|http://www.semanlink.net/tag/regions_polaires +http://www.semanlink.net/tag/arctique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arctique|uri|http://www.semanlink.net/tag/arctique +http://www.semanlink.net/tag/arctique|broader_prefLabel|Régions polaires +http://www.semanlink.net/tag/backtranslation|creationTime|2019-03-26T00:43:33Z +http://www.semanlink.net/tag/backtranslation|prefLabel|Backtranslation +http://www.semanlink.net/tag/backtranslation|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/backtranslation|broader|http://www.semanlink.net/tag/data_augmentation +http://www.semanlink.net/tag/backtranslation|creationDate|2019-03-26 +http://www.semanlink.net/tag/backtranslation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/backtranslation|uri|http://www.semanlink.net/tag/backtranslation +http://www.semanlink.net/tag/backtranslation|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/backtranslation|broader_prefLabel|Data Augmentation +http://www.semanlink.net/tag/apple_java|prefLabel|Apple Java +http://www.semanlink.net/tag/apple_java|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/apple_java|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple_java|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_java|uri|http://www.semanlink.net/tag/apple_java +http://www.semanlink.net/tag/apple_java|broader_prefLabel|Java +http://www.semanlink.net/tag/apple_java|broader_prefLabel|Apple +http://www.semanlink.net/tag/neural_symbolic_computing|creationTime|2020-03-15T10:40:59Z +http://www.semanlink.net/tag/neural_symbolic_computing|prefLabel|Neural-Symbolic Computing +http://www.semanlink.net/tag/neural_symbolic_computing|broader|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/neural_symbolic_computing|creationDate|2020-03-15 +http://www.semanlink.net/tag/neural_symbolic_computing|comment|> Neural-symbolic computing aims at integrating two most fundamental cognitive abilities: the ability to learn from experience, and the ability to reason from what has been learned ([src](/doc/2020/03/_1905_06088_neural_symbolic_co)) +http://www.semanlink.net/tag/neural_symbolic_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neural_symbolic_computing|uri|http://www.semanlink.net/tag/neural_symbolic_computing +http://www.semanlink.net/tag/neural_symbolic_computing|broader_prefLabel|NN / Symbolic AI hybridation +http://www.semanlink.net/tag/neural_symbolic_computing|broader_related|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/tag/cliqz|creationTime|2020-01-20T19:13:44Z +http://www.semanlink.net/tag/cliqz|prefLabel|Cliqz +http://www.semanlink.net/tag/cliqz|broader|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/cliqz|broader|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/tag/cliqz|related|http://www.semanlink.net/tag/firefox +http://www.semanlink.net/tag/cliqz|creationDate|2020-01-20 +http://www.semanlink.net/tag/cliqz|comment|privacy-oriented web browser and search engine +http://www.semanlink.net/tag/cliqz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cliqz|describedBy|https://en.wikipedia.org/wiki/Cliqz +http://www.semanlink.net/tag/cliqz|uri|http://www.semanlink.net/tag/cliqz +http://www.semanlink.net/tag/cliqz|broader_prefLabel|Search Engines +http://www.semanlink.net/tag/cliqz|broader_prefLabel|Brouteur +http://www.semanlink.net/tag/cliqz|broader_altLabel|Moteur de recherche +http://www.semanlink.net/tag/cliqz|broader_altLabel|Browser +http://www.semanlink.net/tag/bi_lstm|creationTime|2018-03-05T18:58:20Z +http://www.semanlink.net/tag/bi_lstm|prefLabel|bi-LSTM +http://www.semanlink.net/tag/bi_lstm|broader|http://www.semanlink.net/tag/lstm_networks +http://www.semanlink.net/tag/bi_lstm|creationDate|2018-03-05 +http://www.semanlink.net/tag/bi_lstm|comment|instead of just processing the words in a sentence from left to right, also go from right to left, allowing later words to help disambiguate the meaning of earlier words and phrases +http://www.semanlink.net/tag/bi_lstm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bi_lstm|altLabel|BILSTM +http://www.semanlink.net/tag/bi_lstm|uri|http://www.semanlink.net/tag/bi_lstm +http://www.semanlink.net/tag/bi_lstm|broader_prefLabel|LSTM +http://www.semanlink.net/tag/bi_lstm|broader_related|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/tag/bi_lstm|broader_related|http://www.semanlink.net/tag/vanishing_gradient +http://www.semanlink.net/tag/web_server|prefLabel|Web server +http://www.semanlink.net/tag/web_server|broader|http://www.semanlink.net/tag/web_serving +http://www.semanlink.net/tag/web_server|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_server|uri|http://www.semanlink.net/tag/web_server +http://www.semanlink.net/tag/web_server|broader_prefLabel|Web Serving +http://www.semanlink.net/tag/rural_india|prefLabel|Rural India +http://www.semanlink.net/tag/rural_india|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/rural_india|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rural_india|uri|http://www.semanlink.net/tag/rural_india +http://www.semanlink.net/tag/rural_india|broader_prefLabel|Inde +http://www.semanlink.net/tag/universite|creationTime|2008-12-26T15:18:20Z +http://www.semanlink.net/tag/universite|prefLabel|Université +http://www.semanlink.net/tag/universite|creationDate|2008-12-26 +http://www.semanlink.net/tag/universite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/universite|uri|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/protection_de_la_nature|creationTime|2020-05-13T15:22:46Z +http://www.semanlink.net/tag/protection_de_la_nature|prefLabel|Protection de la nature +http://www.semanlink.net/tag/protection_de_la_nature|broader|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/protection_de_la_nature|creationDate|2020-05-13 +http://www.semanlink.net/tag/protection_de_la_nature|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/protection_de_la_nature|uri|http://www.semanlink.net/tag/protection_de_la_nature +http://www.semanlink.net/tag/protection_de_la_nature|broader_prefLabel|Nature +http://www.semanlink.net/tag/angularjs|creationTime|2015-06-12T00:43:47Z +http://www.semanlink.net/tag/angularjs|prefLabel|AngularJS +http://www.semanlink.net/tag/angularjs|broader|http://www.semanlink.net/tag/javascript_frameork +http://www.semanlink.net/tag/angularjs|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/angularjs|creationDate|2015-06-12 +http://www.semanlink.net/tag/angularjs|comment|"AngularJS — Superheroic JavaScript MVW Framework
+HTML enhanced for web apps!" +http://www.semanlink.net/tag/angularjs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/angularjs|describedBy|https://angularjs.org/ +http://www.semanlink.net/tag/angularjs|uri|http://www.semanlink.net/tag/angularjs +http://www.semanlink.net/tag/angularjs|broader_prefLabel|Javascript framework +http://www.semanlink.net/tag/angularjs|broader_prefLabel|Google +http://www.semanlink.net/tag/angularjs|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/saas|creationTime|2012-04-10T23:29:19Z +http://www.semanlink.net/tag/saas|prefLabel|SaaS +http://www.semanlink.net/tag/saas|creationDate|2012-04-10 +http://www.semanlink.net/tag/saas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/saas|uri|http://www.semanlink.net/tag/saas +http://www.semanlink.net/tag/oasis_specs|creationTime|2008-12-12T00:01:41Z +http://www.semanlink.net/tag/oasis_specs|prefLabel|OASIS: specs +http://www.semanlink.net/tag/oasis_specs|creationDate|2008-12-12 +http://www.semanlink.net/tag/oasis_specs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oasis_specs|uri|http://www.semanlink.net/tag/oasis_specs +http://www.semanlink.net/tag/face_recognition|creationTime|2017-06-05T10:13:18Z +http://www.semanlink.net/tag/face_recognition|prefLabel|Face recognition +http://www.semanlink.net/tag/face_recognition|related|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/tag/face_recognition|creationDate|2017-06-05 +http://www.semanlink.net/tag/face_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/face_recognition|uri|http://www.semanlink.net/tag/face_recognition +http://www.semanlink.net/tag/societe_de_consommation|prefLabel|Société de consommation +http://www.semanlink.net/tag/societe_de_consommation|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/societe_de_consommation|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/societe_de_consommation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/societe_de_consommation|uri|http://www.semanlink.net/tag/societe_de_consommation +http://www.semanlink.net/tag/societe_de_consommation|broader_prefLabel|Société +http://www.semanlink.net/tag/societe_de_consommation|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/chili|prefLabel|Chili +http://www.semanlink.net/tag/chili|broader|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/chili|broader|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/chili|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chili|uri|http://www.semanlink.net/tag/chili +http://www.semanlink.net/tag/chili|broader_prefLabel|Amérique latine +http://www.semanlink.net/tag/chili|broader_prefLabel|Amérique du sud +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|creationTime|2007-04-21T14:47:53Z +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|prefLabel|J'ai un petit problème avec mon ordinateur +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader|http://www.semanlink.net/tag/computers +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader|http://www.semanlink.net/tag/j_ai_un_petit_probleme +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader|http://www.semanlink.net/tag/ca_craint +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|creationDate|2007-04-21 +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|uri|http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader_prefLabel|Computers +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader_prefLabel|J'ai un petit problème +http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur|broader_prefLabel|Ca craint +http://www.semanlink.net/tag/industrie_du_disque|prefLabel|Industrie du disque +http://www.semanlink.net/tag/industrie_du_disque|broader|http://www.semanlink.net/tag/content_industries +http://www.semanlink.net/tag/industrie_du_disque|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/industrie_du_disque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_du_disque|altLabel|Record industry +http://www.semanlink.net/tag/industrie_du_disque|altLabel|Industrie musicale +http://www.semanlink.net/tag/industrie_du_disque|uri|http://www.semanlink.net/tag/industrie_du_disque +http://www.semanlink.net/tag/industrie_du_disque|broader_prefLabel|Content industries +http://www.semanlink.net/tag/industrie_du_disque|broader_prefLabel|Musique +http://www.semanlink.net/tag/industrie_du_disque|broader_altLabel|Music +http://www.semanlink.net/tag/sebastian_germesin|creationTime|2012-08-31T00:52:57Z +http://www.semanlink.net/tag/sebastian_germesin|prefLabel|Sebastian Germesin +http://www.semanlink.net/tag/sebastian_germesin|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/sebastian_germesin|related|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://www.semanlink.net/tag/sebastian_germesin|related|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.semanlink.net/tag/sebastian_germesin|creationDate|2012-08-31 +http://www.semanlink.net/tag/sebastian_germesin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sebastian_germesin|uri|http://www.semanlink.net/tag/sebastian_germesin +http://www.semanlink.net/tag/sebastian_germesin|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/information|creationTime|2008-02-15T22:40:01Z +http://www.semanlink.net/tag/information|prefLabel|Information +http://www.semanlink.net/tag/information|creationDate|2008-02-15 +http://www.semanlink.net/tag/information|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information|uri|http://www.semanlink.net/tag/information +http://www.semanlink.net/tag/pellet|creationTime|2010-12-16T15:44:30Z +http://www.semanlink.net/tag/pellet|prefLabel|Pellet +http://www.semanlink.net/tag/pellet|broader|http://www.semanlink.net/tag/clark_and_parsia +http://www.semanlink.net/tag/pellet|creationDate|2010-12-16 +http://www.semanlink.net/tag/pellet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pellet|uri|http://www.semanlink.net/tag/pellet +http://www.semanlink.net/tag/pellet|broader_prefLabel|Clark and Parsia +http://www.semanlink.net/tag/pellet|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/edward_snowden|creationTime|2013-06-11T10:27:49Z +http://www.semanlink.net/tag/edward_snowden|prefLabel|Edward Snowden +http://www.semanlink.net/tag/edward_snowden|related|http://www.semanlink.net/tag/nsa +http://www.semanlink.net/tag/edward_snowden|creationDate|2013-06-11 +http://www.semanlink.net/tag/edward_snowden|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edward_snowden|describedBy|https://en.wikipedia.org/wiki/Edward_Snowden +http://www.semanlink.net/tag/edward_snowden|uri|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/entity_mining|creationTime|2012-04-16T16:24:51Z +http://www.semanlink.net/tag/entity_mining|prefLabel|Entity mining +http://www.semanlink.net/tag/entity_mining|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/entity_mining|creationDate|2012-04-16 +http://www.semanlink.net/tag/entity_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_mining|uri|http://www.semanlink.net/tag/entity_mining +http://www.semanlink.net/tag/entity_mining|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/entity_mining|broader_altLabel|NER +http://www.semanlink.net/tag/grand_voyageur|prefLabel|Grand voyageur +http://www.semanlink.net/tag/grand_voyageur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grand_voyageur|uri|http://www.semanlink.net/tag/grand_voyageur +http://www.semanlink.net/tag/enterprise_system|creationTime|2010-08-24T10:53:09Z +http://www.semanlink.net/tag/enterprise_system|prefLabel|Enterprise System +http://www.semanlink.net/tag/enterprise_system|broader|http://www.semanlink.net/tag/information_system +http://www.semanlink.net/tag/enterprise_system|creationDate|2010-08-24 +http://www.semanlink.net/tag/enterprise_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_system|uri|http://www.semanlink.net/tag/enterprise_system +http://www.semanlink.net/tag/enterprise_system|broader_prefLabel|Information System +http://www.semanlink.net/tag/histoire_de_l_afrique|prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/histoire_de_l_afrique|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_de_l_afrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/histoire_de_l_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_afrique|uri|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/histoire_de_l_afrique|broader_prefLabel|Histoire +http://www.semanlink.net/tag/histoire_de_l_afrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/histoire_de_l_afrique|broader_altLabel|Africa +http://www.semanlink.net/tag/dallol|creationTime|2008-11-21T23:00:58Z +http://www.semanlink.net/tag/dallol|prefLabel|Dallol +http://www.semanlink.net/tag/dallol|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/dallol|broader|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/dallol|creationDate|2008-11-21 +http://www.semanlink.net/tag/dallol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dallol|describedBy|https://en.wikipedia.org/wiki/Dallol_(volcano) +http://www.semanlink.net/tag/dallol|uri|http://www.semanlink.net/tag/dallol +http://www.semanlink.net/tag/dallol|broader_prefLabel|Volcan +http://www.semanlink.net/tag/dallol|broader_prefLabel|Ethiopie +http://www.semanlink.net/tag/weapon_of_mass_distraction|creationTime|2020-01-20T00:05:35Z +http://www.semanlink.net/tag/weapon_of_mass_distraction|prefLabel|Weapon of mass distraction +http://www.semanlink.net/tag/weapon_of_mass_distraction|broader|http://www.semanlink.net/tag/the_web_sucks +http://www.semanlink.net/tag/weapon_of_mass_distraction|creationDate|2020-01-20 +http://www.semanlink.net/tag/weapon_of_mass_distraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/weapon_of_mass_distraction|uri|http://www.semanlink.net/tag/weapon_of_mass_distraction +http://www.semanlink.net/tag/weapon_of_mass_distraction|broader_prefLabel|The web sucks +http://www.semanlink.net/tag/synthetic_genome|creationTime|2019-05-15T23:12:07Z +http://www.semanlink.net/tag/synthetic_genome|prefLabel|Synthetic Genome +http://www.semanlink.net/tag/synthetic_genome|broader|http://www.semanlink.net/tag/artificial_life +http://www.semanlink.net/tag/synthetic_genome|creationDate|2019-05-15 +http://www.semanlink.net/tag/synthetic_genome|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synthetic_genome|uri|http://www.semanlink.net/tag/synthetic_genome +http://www.semanlink.net/tag/synthetic_genome|broader_prefLabel|Artificial life +http://www.semanlink.net/tag/website_creation|creationTime|2015-10-13T10:18:42Z +http://www.semanlink.net/tag/website_creation|prefLabel|Website: creation +http://www.semanlink.net/tag/website_creation|creationDate|2015-10-13 +http://www.semanlink.net/tag/website_creation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/website_creation|uri|http://www.semanlink.net/tag/website_creation +http://www.semanlink.net/tag/hypersolutions|prefLabel|hyperSOLutions +http://www.semanlink.net/tag/hypersolutions|broader|http://www.semanlink.net/tag/my_old_things +http://www.semanlink.net/tag/hypersolutions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypersolutions|uri|http://www.semanlink.net/tag/hypersolutions +http://www.semanlink.net/tag/hypersolutions|broader_prefLabel|My old things +http://www.semanlink.net/tag/technical_documentation|prefLabel|Technical documentation +http://www.semanlink.net/tag/technical_documentation|creationDate|2006-10-09 +http://www.semanlink.net/tag/technical_documentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technical_documentation|uri|http://www.semanlink.net/tag/technical_documentation +http://www.semanlink.net/tag/prehistoire|prefLabel|Préhistoire +http://www.semanlink.net/tag/prehistoire|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/prehistoire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/prehistoire|uri|http://www.semanlink.net/tag/prehistoire +http://www.semanlink.net/tag/prehistoire|broader_prefLabel|Histoire +http://www.semanlink.net/tag/religion|prefLabel|Religion +http://www.semanlink.net/tag/religion|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/religion|uri|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/mailing_list|creationTime|2007-06-23T15:13:38Z +http://www.semanlink.net/tag/mailing_list|prefLabel|Mailing list +http://www.semanlink.net/tag/mailing_list|creationDate|2007-06-23 +http://www.semanlink.net/tag/mailing_list|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mailing_list|uri|http://www.semanlink.net/tag/mailing_list +http://www.semanlink.net/tag/path|creationTime|2015-02-27T14:14:04Z +http://www.semanlink.net/tag/path|prefLabel|$PATH +http://www.semanlink.net/tag/path|creationDate|2015-02-27 +http://www.semanlink.net/tag/path|comment|"mac: ``open ~/.bash_profile`` (file which is read whenever you start a new terminal) + + +" +http://www.semanlink.net/tag/path|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/path|uri|http://www.semanlink.net/tag/path +http://www.semanlink.net/tag/linking_enterprise_data|creationTime|2008-06-25T19:54:13Z +http://www.semanlink.net/tag/linking_enterprise_data|prefLabel|Linking Enterprise Data +http://www.semanlink.net/tag/linking_enterprise_data|broader|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanlink.net/tag/linking_enterprise_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linking_enterprise_data|broader|http://www.semanlink.net/tag/enterprise_data +http://www.semanlink.net/tag/linking_enterprise_data|creationDate|2008-06-25 +http://www.semanlink.net/tag/linking_enterprise_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linking_enterprise_data|altLabel|Linked Data in enterprise +http://www.semanlink.net/tag/linking_enterprise_data|uri|http://www.semanlink.net/tag/linking_enterprise_data +http://www.semanlink.net/tag/linking_enterprise_data|broader_prefLabel|Semantic Enterprise +http://www.semanlink.net/tag/linking_enterprise_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linking_enterprise_data|broader_prefLabel|Enterprise Data +http://www.semanlink.net/tag/linking_enterprise_data|broader_altLabel|Enterprise Semantic Web +http://www.semanlink.net/tag/linking_enterprise_data|broader_altLabel|Corporate Semantic Web +http://www.semanlink.net/tag/linking_enterprise_data|broader_altLabel|Semantic Web in the enterprise +http://www.semanlink.net/tag/linking_enterprise_data|broader_altLabel|LD +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linking_enterprise_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/bbc_programmes|creationTime|2011-01-22T12:04:23Z +http://www.semanlink.net/tag/bbc_programmes|prefLabel|BBC - Programmes +http://www.semanlink.net/tag/bbc_programmes|broader|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.semanlink.net/tag/bbc_programmes|creationDate|2011-01-22 +http://www.semanlink.net/tag/bbc_programmes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bbc_programmes|describedBy|http://www.bbc.co.uk/programmes +http://www.semanlink.net/tag/bbc_programmes|uri|http://www.semanlink.net/tag/bbc_programmes +http://www.semanlink.net/tag/bbc_programmes|broader_prefLabel|BBC semantic publishing +http://www.semanlink.net/tag/bbc_programmes|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/canal|creationTime|2016-05-14T11:57:30Z +http://www.semanlink.net/tag/canal|prefLabel|Canal+ +http://www.semanlink.net/tag/canal|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/canal|creationDate|2016-05-14 +http://www.semanlink.net/tag/canal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/canal|uri|http://www.semanlink.net/tag/canal +http://www.semanlink.net/tag/canal|broader_prefLabel|Télévision +http://www.semanlink.net/tag/canal|broader_altLabel|TV +http://www.semanlink.net/tag/gartner|creationTime|2007-06-04T21:48:18Z +http://www.semanlink.net/tag/gartner|prefLabel|Gartner +http://www.semanlink.net/tag/gartner|creationDate|2007-06-04 +http://www.semanlink.net/tag/gartner|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gartner|describedBy|https://en.wikipedia.org/wiki/Gartner +http://www.semanlink.net/tag/gartner|uri|http://www.semanlink.net/tag/gartner +http://www.semanlink.net/tag/abrutis|creationTime|2012-06-30T14:46:14Z +http://www.semanlink.net/tag/abrutis|prefLabel|Abrutis +http://www.semanlink.net/tag/abrutis|creationDate|2012-06-30 +http://www.semanlink.net/tag/abrutis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abrutis|uri|http://www.semanlink.net/tag/abrutis +http://www.semanlink.net/tag/manipulations_politiques|creationTime|2021-04-24T11:33:05Z +http://www.semanlink.net/tag/manipulations_politiques|prefLabel|Manipulations politiques +http://www.semanlink.net/tag/manipulations_politiques|broader|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/tag/manipulations_politiques|related|http://www.semanlink.net/tag/election +http://www.semanlink.net/tag/manipulations_politiques|creationDate|2021-04-24 +http://www.semanlink.net/tag/manipulations_politiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manipulations_politiques|uri|http://www.semanlink.net/tag/manipulations_politiques +http://www.semanlink.net/tag/manipulations_politiques|broader_prefLabel|Manipulation +http://www.semanlink.net/tag/solid|creationTime|2018-03-27T09:36:43Z +http://www.semanlink.net/tag/solid|prefLabel|Solid +http://www.semanlink.net/tag/solid|broader|http://www.semanlink.net/tag/data_ownership +http://www.semanlink.net/tag/solid|broader|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/tag/solid|broader|http://www.semanlink.net/tag/linked_data_application +http://www.semanlink.net/tag/solid|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/solid|broader|http://www.semanlink.net/tag/decentralized_social_network +http://www.semanlink.net/tag/solid|related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/solid|creationDate|2018-03-27 +http://www.semanlink.net/tag/solid|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solid|homepage|https://solid.mit.edu/ +http://www.semanlink.net/tag/solid|uri|http://www.semanlink.net/tag/solid +http://www.semanlink.net/tag/solid|broader_prefLabel|Data ownership +http://www.semanlink.net/tag/solid|broader_prefLabel|Privacy and internet +http://www.semanlink.net/tag/solid|broader_prefLabel|Linked Data: application +http://www.semanlink.net/tag/solid|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/solid|broader_prefLabel|Decentralized social network +http://www.semanlink.net/tag/solid|broader_altLabel|LD +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/cybersurveillance +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/solid|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/patent_infringement|prefLabel|Patent Infringement +http://www.semanlink.net/tag/patent_infringement|broader|http://www.semanlink.net/tag/patent +http://www.semanlink.net/tag/patent_infringement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patent_infringement|uri|http://www.semanlink.net/tag/patent_infringement +http://www.semanlink.net/tag/patent_infringement|broader_prefLabel|Patent +http://www.semanlink.net/tag/patent_infringement|broader_altLabel|Brevet +http://www.semanlink.net/tag/marlene_dietrich|creationTime|2008-05-15T22:32:57Z +http://www.semanlink.net/tag/marlene_dietrich|prefLabel|Marlene Dietrich +http://www.semanlink.net/tag/marlene_dietrich|creationDate|2008-05-15 +http://www.semanlink.net/tag/marlene_dietrich|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marlene_dietrich|describedBy|https://en.wikipedia.org/wiki/Marlene_Dietrich +http://www.semanlink.net/tag/marlene_dietrich|uri|http://www.semanlink.net/tag/marlene_dietrich +http://www.semanlink.net/tag/institutions_internationales|prefLabel|Institutions internationales +http://www.semanlink.net/tag/institutions_internationales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/institutions_internationales|uri|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/boosting|creationTime|2018-10-28T00:39:35Z +http://www.semanlink.net/tag/boosting|prefLabel|Boosting +http://www.semanlink.net/tag/boosting|broader|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/boosting|related|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +http://www.semanlink.net/tag/boosting|creationDate|2018-10-28 +http://www.semanlink.net/tag/boosting|comment|"ML ensemble meta-algorithm for primarily reducing bias, and also variance in supervised learning, and a family of machine learning algorithms that convert weak learners to strong ones. + +" +http://www.semanlink.net/tag/boosting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boosting|describedBy|https://en.wikipedia.org/wiki/Boosting_(machine_learning) +http://www.semanlink.net/tag/boosting|uri|http://www.semanlink.net/tag/boosting +http://www.semanlink.net/tag/boosting|broader_prefLabel|Ensemble learning +http://www.semanlink.net/tag/fiction|creationTime|2017-08-02T12:52:33Z +http://www.semanlink.net/tag/fiction|prefLabel|Fiction +http://www.semanlink.net/tag/fiction|creationDate|2017-08-02 +http://www.semanlink.net/tag/fiction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fiction|uri|http://www.semanlink.net/tag/fiction +http://www.semanlink.net/tag/bp|creationTime|2010-06-14T16:09:24Z +http://www.semanlink.net/tag/bp|prefLabel|BP +http://www.semanlink.net/tag/bp|broader|http://www.semanlink.net/tag/compagnies_petrolieres +http://www.semanlink.net/tag/bp|creationDate|2010-06-14 +http://www.semanlink.net/tag/bp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bp|uri|http://www.semanlink.net/tag/bp +http://www.semanlink.net/tag/bp|broader_prefLabel|Compagnies pétrolières +http://www.semanlink.net/tag/alexandria_ocasio_cortez|creationTime|2019-03-24T19:26:10Z +http://www.semanlink.net/tag/alexandria_ocasio_cortez|prefLabel|Alexandria Ocasio-Cortez +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/alexandria_ocasio_cortez|creationDate|2019-03-24 +http://www.semanlink.net/tag/alexandria_ocasio_cortez|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandria_ocasio_cortez|uri|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader_prefLabel|USA +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/alexandria_ocasio_cortez|broader_altLabel|United States +http://www.semanlink.net/tag/machine_learning_tool|creationTime|2015-02-12T16:44:31Z +http://www.semanlink.net/tag/machine_learning_tool|prefLabel|Machine Learning tool +http://www.semanlink.net/tag/machine_learning_tool|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_tool|creationDate|2015-02-12 +http://www.semanlink.net/tag/machine_learning_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_tool|uri|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/machine_learning_tool|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_tool|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_tool|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_tool|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/origine_de_l_agriculture|prefLabel|Origine de l'agriculture +http://www.semanlink.net/tag/origine_de_l_agriculture|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/origine_de_l_agriculture|broader|http://www.semanlink.net/tag/neolithique +http://www.semanlink.net/tag/origine_de_l_agriculture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/origine_de_l_agriculture|uri|http://www.semanlink.net/tag/origine_de_l_agriculture +http://www.semanlink.net/tag/origine_de_l_agriculture|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/origine_de_l_agriculture|broader_prefLabel|Néolithique +http://www.semanlink.net/tag/general_semantics|creationTime|2010-04-28T23:36:17Z +http://www.semanlink.net/tag/general_semantics|prefLabel|General semantics +http://www.semanlink.net/tag/general_semantics|creationDate|2010-04-28 +http://www.semanlink.net/tag/general_semantics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/general_semantics|describedBy|https://en.wikipedia.org/wiki/General_semantics +http://www.semanlink.net/tag/general_semantics|uri|http://www.semanlink.net/tag/general_semantics +http://www.semanlink.net/tag/tabous|prefLabel|Tabous +http://www.semanlink.net/tag/tabous|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tabous|uri|http://www.semanlink.net/tag/tabous +http://www.semanlink.net/tag/bash|creationTime|2013-08-28T16:30:07Z +http://www.semanlink.net/tag/bash|prefLabel|bash +http://www.semanlink.net/tag/bash|broader|http://www.semanlink.net/tag/unix +http://www.semanlink.net/tag/bash|creationDate|2013-08-28 +http://www.semanlink.net/tag/bash|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bash|uri|http://www.semanlink.net/tag/bash +http://www.semanlink.net/tag/bash|broader_prefLabel|Unix +http://www.semanlink.net/tag/vanishing_gradient|creationTime|2019-06-11T12:46:00Z +http://www.semanlink.net/tag/vanishing_gradient|prefLabel|Vanishing gradient +http://www.semanlink.net/tag/vanishing_gradient|broader|http://www.semanlink.net/tag/gradient_descent +http://www.semanlink.net/tag/vanishing_gradient|creationDate|2019-06-11 +http://www.semanlink.net/tag/vanishing_gradient|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vanishing_gradient|uri|http://www.semanlink.net/tag/vanishing_gradient +http://www.semanlink.net/tag/vanishing_gradient|broader_prefLabel|Gradient descent +http://www.semanlink.net/tag/vanishing_gradient|broader_related|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/tag/hotel|creationTime|2012-05-20T02:59:39Z +http://www.semanlink.net/tag/hotel|prefLabel|Hôtel +http://www.semanlink.net/tag/hotel|creationDate|2012-05-20 +http://www.semanlink.net/tag/hotel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hotel|uri|http://www.semanlink.net/tag/hotel +http://www.semanlink.net/tag/targeted_ads|creationTime|2019-12-03T00:47:28Z +http://www.semanlink.net/tag/targeted_ads|prefLabel|Targeted ads +http://www.semanlink.net/tag/targeted_ads|broader|http://www.semanlink.net/tag/publicite +http://www.semanlink.net/tag/targeted_ads|creationDate|2019-12-03 +http://www.semanlink.net/tag/targeted_ads|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/targeted_ads|uri|http://www.semanlink.net/tag/targeted_ads +http://www.semanlink.net/tag/targeted_ads|broader_prefLabel|Publicité +http://www.semanlink.net/tag/targeted_ads|broader_altLabel|Advertising +http://www.semanlink.net/tag/targeted_ads|broader_altLabel|Pub +http://www.semanlink.net/tag/train|prefLabel|Train +http://www.semanlink.net/tag/train|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/train|uri|http://www.semanlink.net/tag/train +http://www.semanlink.net/tag/gnowsis|prefLabel|gnowsis +http://www.semanlink.net/tag/gnowsis|broader|http://www.semanlink.net/tag/leo_sauermann +http://www.semanlink.net/tag/gnowsis|broader|http://www.semanlink.net/tag/semantic_web_project +http://www.semanlink.net/tag/gnowsis|broader|http://www.semanlink.net/tag/semantic_desktop +http://www.semanlink.net/tag/gnowsis|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/gnowsis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gnowsis|uri|http://www.semanlink.net/tag/gnowsis +http://www.semanlink.net/tag/gnowsis|broader_prefLabel|Leo Sauermann +http://www.semanlink.net/tag/gnowsis|broader_prefLabel|Semantic Web project +http://www.semanlink.net/tag/gnowsis|broader_prefLabel|Semantic Desktop +http://www.semanlink.net/tag/gnowsis|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/gnowsis|broader_related|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/moral_machines|creationTime|2012-11-30T22:26:38Z +http://www.semanlink.net/tag/moral_machines|prefLabel|Moral machines +http://www.semanlink.net/tag/moral_machines|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/moral_machines|creationDate|2012-11-30 +http://www.semanlink.net/tag/moral_machines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moral_machines|uri|http://www.semanlink.net/tag/moral_machines +http://www.semanlink.net/tag/moral_machines|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/moral_machines|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/2eme_guerre_mondiale|prefLabel|2eme guerre mondiale +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/2eme_guerre_mondiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/2eme_guerre_mondiale|uri|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader_prefLabel|War +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader_prefLabel|Histoire du XXe siècle +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader_prefLabel|Histoire +http://www.semanlink.net/tag/2eme_guerre_mondiale|broader_altLabel|Guerre +http://www.semanlink.net/tag/la_ronde_de_nuit|creationTime|2011-01-11T01:06:29Z +http://www.semanlink.net/tag/la_ronde_de_nuit|prefLabel|La Ronde de Nuit +http://www.semanlink.net/tag/la_ronde_de_nuit|broader|http://www.semanlink.net/tag/rembrandt +http://www.semanlink.net/tag/la_ronde_de_nuit|creationDate|2011-01-11 +http://www.semanlink.net/tag/la_ronde_de_nuit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_ronde_de_nuit|describedBy|https://en.wikipedia.org/wiki/Night_Watch_(painting) +http://www.semanlink.net/tag/la_ronde_de_nuit|uri|http://www.semanlink.net/tag/la_ronde_de_nuit +http://www.semanlink.net/tag/la_ronde_de_nuit|broader_prefLabel|Rembrandt +http://www.semanlink.net/tag/neuromorphic_system|creationTime|2014-04-30T14:02:08Z +http://www.semanlink.net/tag/neuromorphic_system|prefLabel|Neuromorphic system +http://www.semanlink.net/tag/neuromorphic_system|creationDate|2014-04-30 +http://www.semanlink.net/tag/neuromorphic_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neuromorphic_system|uri|http://www.semanlink.net/tag/neuromorphic_system +http://www.semanlink.net/tag/sw_has_failed|creationTime|2014-02-03T22:54:05Z +http://www.semanlink.net/tag/sw_has_failed|prefLabel|SW has failed +http://www.semanlink.net/tag/sw_has_failed|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sw_has_failed|creationDate|2014-02-03 +http://www.semanlink.net/tag/sw_has_failed|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_has_failed|uri|http://www.semanlink.net/tag/sw_has_failed +http://www.semanlink.net/tag/sw_has_failed|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sw_has_failed|broader_altLabel|sw +http://www.semanlink.net/tag/sw_has_failed|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/algorithmes|prefLabel|Algorithmes +http://www.semanlink.net/tag/algorithmes|creationDate|2006-10-17 +http://www.semanlink.net/tag/algorithmes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/algorithmes|uri|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/private_wiki|creationTime|2015-03-15T23:30:55Z +http://www.semanlink.net/tag/private_wiki|prefLabel|Private wiki +http://www.semanlink.net/tag/private_wiki|broader|http://www.semanlink.net/tag/wiki +http://www.semanlink.net/tag/private_wiki|creationDate|2015-03-15 +http://www.semanlink.net/tag/private_wiki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/private_wiki|uri|http://www.semanlink.net/tag/private_wiki +http://www.semanlink.net/tag/private_wiki|broader_prefLabel|Wiki +http://www.semanlink.net/tag/certificat_de_nationalite|creationTime|2007-09-25T22:01:07Z +http://www.semanlink.net/tag/certificat_de_nationalite|prefLabel|Certificat de nationalité +http://www.semanlink.net/tag/certificat_de_nationalite|broader|http://www.semanlink.net/tag/administration_francaise +http://www.semanlink.net/tag/certificat_de_nationalite|creationDate|2007-09-25 +http://www.semanlink.net/tag/certificat_de_nationalite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/certificat_de_nationalite|uri|http://www.semanlink.net/tag/certificat_de_nationalite +http://www.semanlink.net/tag/certificat_de_nationalite|broader_prefLabel|Administration française +http://www.semanlink.net/tag/web_3_0|prefLabel|Web 3.0 +http://www.semanlink.net/tag/web_3_0|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/web_3_0|creationDate|2006-11-14 +http://www.semanlink.net/tag/web_3_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_3_0|uri|http://www.semanlink.net/tag/web_3_0 +http://www.semanlink.net/tag/web_3_0|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/web_3_0|broader_altLabel|sw +http://www.semanlink.net/tag/web_3_0|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/target_sense_verification|creationTime|2021-05-13T00:27:40Z +http://www.semanlink.net/tag/target_sense_verification|prefLabel|Target Sense Verification +http://www.semanlink.net/tag/target_sense_verification|broader|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/tag/target_sense_verification|related|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/tag/target_sense_verification|creationDate|2021-05-13 +http://www.semanlink.net/tag/target_sense_verification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/target_sense_verification|uri|http://www.semanlink.net/tag/target_sense_verification +http://www.semanlink.net/tag/target_sense_verification|broader_prefLabel|Word sense / Lexical ambiguity +http://www.semanlink.net/tag/target_sense_verification|broader_altLabel|Polysemy +http://www.semanlink.net/tag/target_sense_verification|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/solrcloud|creationTime|2013-03-20T00:18:34Z +http://www.semanlink.net/tag/solrcloud|prefLabel|SolrCloud +http://www.semanlink.net/tag/solrcloud|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solrcloud|creationDate|2013-03-20 +http://www.semanlink.net/tag/solrcloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solrcloud|uri|http://www.semanlink.net/tag/solrcloud +http://www.semanlink.net/tag/solrcloud|broader_prefLabel|Solr +http://www.semanlink.net/tag/einstein|prefLabel|Einstein +http://www.semanlink.net/tag/einstein|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/einstein|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/einstein|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/einstein|broader|http://www.semanlink.net/tag/physicien +http://www.semanlink.net/tag/einstein|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/einstein|uri|http://www.semanlink.net/tag/einstein +http://www.semanlink.net/tag/einstein|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/einstein|broader_prefLabel|Physique +http://www.semanlink.net/tag/einstein|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/einstein|broader_prefLabel|Physicien +http://www.semanlink.net/tag/einstein|broader_altLabel|Physics +http://www.semanlink.net/tag/einstein|broader_altLabel|Savant +http://www.semanlink.net/tag/federated_database_system|creationTime|2009-02-06T22:44:39Z +http://www.semanlink.net/tag/federated_database_system|prefLabel|Federated database system +http://www.semanlink.net/tag/federated_database_system|creationDate|2009-02-06 +http://www.semanlink.net/tag/federated_database_system|comment|A federated database system is a type of meta-database management system (DBMS) which transparently integrates multiple autonomous database systems into a single federated database +http://www.semanlink.net/tag/federated_database_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/federated_database_system|describedBy|https://en.wikipedia.org/wiki/Federated_database +http://www.semanlink.net/tag/federated_database_system|uri|http://www.semanlink.net/tag/federated_database_system +http://www.semanlink.net/tag/c2gweb_rdf|creationTime|2012-10-01T14:47:06Z +http://www.semanlink.net/tag/c2gweb_rdf|prefLabel|C2GWeb RDF +http://www.semanlink.net/tag/c2gweb_rdf|broader|http://www.semanlink.net/tag/c2gweb +http://www.semanlink.net/tag/c2gweb_rdf|creationDate|2012-10-01 +http://www.semanlink.net/tag/c2gweb_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2gweb_rdf|uri|http://www.semanlink.net/tag/c2gweb_rdf +http://www.semanlink.net/tag/c2gweb_rdf|broader_prefLabel|C2GWeb +http://www.semanlink.net/tag/rijksmuseum|prefLabel|Rijksmuseum +http://www.semanlink.net/tag/rijksmuseum|broader|http://www.semanlink.net/tag/amsterdam +http://www.semanlink.net/tag/rijksmuseum|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/rijksmuseum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rijksmuseum|uri|http://www.semanlink.net/tag/rijksmuseum +http://www.semanlink.net/tag/rijksmuseum|broader_prefLabel|Amsterdam +http://www.semanlink.net/tag/rijksmuseum|broader_prefLabel|Musée +http://www.semanlink.net/tag/bug_brother|creationTime|2009-04-22T18:01:22Z +http://www.semanlink.net/tag/bug_brother|prefLabel|Bug Brother +http://www.semanlink.net/tag/bug_brother|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/bug_brother|creationDate|2009-04-22 +http://www.semanlink.net/tag/bug_brother|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bug_brother|uri|http://www.semanlink.net/tag/bug_brother +http://www.semanlink.net/tag/bug_brother|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/2d_nlp|creationTime|2020-06-16T09:29:21Z +http://www.semanlink.net/tag/2d_nlp|prefLabel|2D-NLP +http://www.semanlink.net/tag/2d_nlp|broader|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/2d_nlp|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/2d_nlp|creationDate|2020-06-16 +http://www.semanlink.net/tag/2d_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/2d_nlp|uri|http://www.semanlink.net/tag/2d_nlp +http://www.semanlink.net/tag/2d_nlp|broader_prefLabel|Information extraction +http://www.semanlink.net/tag/2d_nlp|broader_prefLabel|NLP +http://www.semanlink.net/tag/2d_nlp|broader_altLabel|TALN +http://www.semanlink.net/tag/2d_nlp|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/2d_nlp|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/2d_nlp|broader_related|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/bookmark_managers|prefLabel|Bookmark Managers +http://www.semanlink.net/tag/bookmark_managers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bookmark_managers|uri|http://www.semanlink.net/tag/bookmark_managers +http://www.semanlink.net/tag/qwant|creationTime|2019-12-14T01:05:53Z +http://www.semanlink.net/tag/qwant|prefLabel|Qwant +http://www.semanlink.net/tag/qwant|creationDate|2019-12-14 +http://www.semanlink.net/tag/qwant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/qwant|uri|http://www.semanlink.net/tag/qwant +http://www.semanlink.net/tag/droits_de_l_homme|creationTime|2007-04-29T23:51:53Z +http://www.semanlink.net/tag/droits_de_l_homme|prefLabel|Droits de l'Homme +http://www.semanlink.net/tag/droits_de_l_homme|creationDate|2007-04-29 +http://www.semanlink.net/tag/droits_de_l_homme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/droits_de_l_homme|uri|http://www.semanlink.net/tag/droits_de_l_homme +http://www.semanlink.net/tag/charlie_hebdo|creationTime|2009-06-11T23:28:06Z +http://www.semanlink.net/tag/charlie_hebdo|prefLabel|Charlie Hebdo +http://www.semanlink.net/tag/charlie_hebdo|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/charlie_hebdo|broader|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/tag/charlie_hebdo|creationDate|2009-06-11 +http://www.semanlink.net/tag/charlie_hebdo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/charlie_hebdo|uri|http://www.semanlink.net/tag/charlie_hebdo +http://www.semanlink.net/tag/charlie_hebdo|broader_prefLabel|Presse +http://www.semanlink.net/tag/charlie_hebdo|broader_prefLabel|Rigolo +http://www.semanlink.net/tag/charlie_hebdo|broader_altLabel|Journal +http://www.semanlink.net/tag/oum_kalsoum|prefLabel|Oum Kalsoum +http://www.semanlink.net/tag/oum_kalsoum|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/oum_kalsoum|broader|http://www.semanlink.net/tag/egypte +http://www.semanlink.net/tag/oum_kalsoum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oum_kalsoum|uri|http://www.semanlink.net/tag/oum_kalsoum +http://www.semanlink.net/tag/oum_kalsoum|broader_prefLabel|Musicien +http://www.semanlink.net/tag/oum_kalsoum|broader_prefLabel|Egypte +http://www.semanlink.net/tag/alexandre_le_grand|creationTime|2014-09-11T23:43:43Z +http://www.semanlink.net/tag/alexandre_le_grand|prefLabel|Alexandre le Grand +http://www.semanlink.net/tag/alexandre_le_grand|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/alexandre_le_grand|broader|http://www.semanlink.net/tag/personnage_historique +http://www.semanlink.net/tag/alexandre_le_grand|creationDate|2014-09-11 +http://www.semanlink.net/tag/alexandre_le_grand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandre_le_grand|uri|http://www.semanlink.net/tag/alexandre_le_grand +http://www.semanlink.net/tag/alexandre_le_grand|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/alexandre_le_grand|broader_prefLabel|Personnage historique +http://www.semanlink.net/tag/copyright|prefLabel|Copyright +http://www.semanlink.net/tag/copyright|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/copyright|uri|http://www.semanlink.net/tag/copyright +http://www.semanlink.net/tag/deep_generative_modeling|creationTime|2021-01-12T17:14:14Z +http://www.semanlink.net/tag/deep_generative_modeling|prefLabel|Deep Generative Modeling +http://www.semanlink.net/tag/deep_generative_modeling|broader|http://www.semanlink.net/tag/generative_model +http://www.semanlink.net/tag/deep_generative_modeling|creationDate|2021-01-12 +http://www.semanlink.net/tag/deep_generative_modeling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_generative_modeling|uri|http://www.semanlink.net/tag/deep_generative_modeling +http://www.semanlink.net/tag/deep_generative_modeling|broader_prefLabel|Generative model +http://www.semanlink.net/tag/deep_generative_modeling|broader_altLabel|Generative modeling +http://www.semanlink.net/tag/macintosh|prefLabel|Macintosh +http://www.semanlink.net/tag/macintosh|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/macintosh|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/macintosh|uri|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/macintosh|broader_prefLabel|Apple +http://www.semanlink.net/tag/intent_classification_and_slot_filling|creationTime|2020-01-09T01:00:43Z +http://www.semanlink.net/tag/intent_classification_and_slot_filling|prefLabel|Intent classification and slot filling +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader|http://www.semanlink.net/tag/slot_tagging +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/intent_classification_and_slot_filling|creationDate|2020-01-09 +http://www.semanlink.net/tag/intent_classification_and_slot_filling|comment|"- Intent classification: predicting the intent of a query +- slot filling extracts semantic concepts in the query (a sequence labeling task that tags the input word sequence). + +For example the user query could be “Find me an +action movie by Steven Spielberg”. The intent here is “find_movie” while +the slots are “genre” with value “action” and “directed_by” with value +“Steven Spielberg”." +http://www.semanlink.net/tag/intent_classification_and_slot_filling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/intent_classification_and_slot_filling|uri|http://www.semanlink.net/tag/intent_classification_and_slot_filling +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_prefLabel|Slot tagging +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_prefLabel|Intent detection +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_altLabel|Slot filling +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_altLabel|intent detection +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_altLabel|intent classification +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_related|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_related|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/tag/intent_classification_and_slot_filling|broader_related|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/sparql_perfs|creationTime|2016-01-25T18:12:41Z +http://www.semanlink.net/tag/sparql_perfs|prefLabel|SPARQL perfs +http://www.semanlink.net/tag/sparql_perfs|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_perfs|creationDate|2016-01-25 +http://www.semanlink.net/tag/sparql_perfs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_perfs|uri|http://www.semanlink.net/tag/sparql_perfs +http://www.semanlink.net/tag/sparql_perfs|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/newton|prefLabel|Newton +http://www.semanlink.net/tag/newton|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/newton|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/newton|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/newton|broader|http://www.semanlink.net/tag/physicien +http://www.semanlink.net/tag/newton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/newton|uri|http://www.semanlink.net/tag/newton +http://www.semanlink.net/tag/newton|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/newton|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/newton|broader_prefLabel|Physique +http://www.semanlink.net/tag/newton|broader_prefLabel|Physicien +http://www.semanlink.net/tag/newton|broader_altLabel|Savant +http://www.semanlink.net/tag/newton|broader_altLabel|Physics +http://www.semanlink.net/tag/python_4_data_science|creationTime|2015-11-22T18:03:23Z +http://www.semanlink.net/tag/python_4_data_science|prefLabel|Python 4 Data science +http://www.semanlink.net/tag/python_4_data_science|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/python_4_data_science|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/python_4_data_science|creationDate|2015-11-22 +http://www.semanlink.net/tag/python_4_data_science|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_4_data_science|uri|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/python_4_data_science|broader_prefLabel|Python +http://www.semanlink.net/tag/python_4_data_science|broader_prefLabel|Data science +http://www.semanlink.net/tag/python_4_data_science|broader_altLabel|Data analysis +http://www.semanlink.net/tag/mona_lisa|creationTime|2008-12-10T14:57:23Z +http://www.semanlink.net/tag/mona_lisa|prefLabel|Mona Lisa +http://www.semanlink.net/tag/mona_lisa|broader|http://www.semanlink.net/tag/leonardo_da_vinci +http://www.semanlink.net/tag/mona_lisa|broader|http://www.semanlink.net/tag/peinture +http://www.semanlink.net/tag/mona_lisa|creationDate|2008-12-10 +http://www.semanlink.net/tag/mona_lisa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mona_lisa|altLabel|Joconde +http://www.semanlink.net/tag/mona_lisa|uri|http://www.semanlink.net/tag/mona_lisa +http://www.semanlink.net/tag/mona_lisa|broader_prefLabel|Leonardo da Vinci +http://www.semanlink.net/tag/mona_lisa|broader_prefLabel|Painting +http://www.semanlink.net/tag/mona_lisa|broader_altLabel|Peinture +http://www.semanlink.net/tag/arnaque|prefLabel|Arnaque +http://www.semanlink.net/tag/arnaque|creationDate|2006-12-22 +http://www.semanlink.net/tag/arnaque|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arnaque|uri|http://www.semanlink.net/tag/arnaque +http://www.semanlink.net/tag/recit_de_voyage|creationTime|2008-01-23T23:07:14Z +http://www.semanlink.net/tag/recit_de_voyage|prefLabel|Récit de voyage +http://www.semanlink.net/tag/recit_de_voyage|creationDate|2008-01-23 +http://www.semanlink.net/tag/recit_de_voyage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recit_de_voyage|uri|http://www.semanlink.net/tag/recit_de_voyage +http://www.semanlink.net/tag/changement_climatique|prefLabel|Changement climatique +http://www.semanlink.net/tag/changement_climatique|broader|http://www.semanlink.net/tag/climat +http://www.semanlink.net/tag/changement_climatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/changement_climatique|uri|http://www.semanlink.net/tag/changement_climatique +http://www.semanlink.net/tag/changement_climatique|broader_prefLabel|Climat +http://www.semanlink.net/tag/phrase_embeddings|creationTime|2018-05-12T16:05:04Z +http://www.semanlink.net/tag/phrase_embeddings|prefLabel|Phrase embeddings +http://www.semanlink.net/tag/phrase_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/phrase_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/phrase_embeddings|broader|http://www.semanlink.net/tag/phrases_nlp +http://www.semanlink.net/tag/phrase_embeddings|creationDate|2018-05-12 +http://www.semanlink.net/tag/phrase_embeddings|comment|"several representations are proposed to extend word representation for phrases ([Yin and Schütze, 2014](/doc/?uri=http%3A%2F%2Faclweb.org%2Fanthology%2FP14-3006); Yu and Dredze, 2015; Passos et al., 2014). However, they don’t use structured knowledge to derive phrase representations (as said [here](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1607.07956)) + +[Sabastian Ruder](/tag/sebastian_ruder) in 2017 says the [following](http://ruder.io/word-embeddings-2017/index.html#phrasesandmultiwordexpressions): + +> explicitly modelling phrases has so far not shown significant improvements on downstream tasks that would justify the additional complexity + +(but hum: what's about NER - in particular if using external knowledge such as lexicons?) + + + +" +http://www.semanlink.net/tag/phrase_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phrase_embeddings|uri|http://www.semanlink.net/tag/phrase_embeddings +http://www.semanlink.net/tag/phrase_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/phrase_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/phrase_embeddings|broader_prefLabel|Phrases (NLP) +http://www.semanlink.net/tag/phrase_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/phrase_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/phrase_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/phrase_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/phrase_embeddings|broader_related|http://www.semanlink.net/tag/n_gram +http://www.semanlink.net/tag/owl_ontology_browser|creationTime|2008-04-11T15:54:38Z +http://www.semanlink.net/tag/owl_ontology_browser|prefLabel|OWL ontology browser +http://www.semanlink.net/tag/owl_ontology_browser|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_ontology_browser|creationDate|2008-04-11 +http://www.semanlink.net/tag/owl_ontology_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_ontology_browser|uri|http://www.semanlink.net/tag/owl_ontology_browser +http://www.semanlink.net/tag/owl_ontology_browser|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_ontology_browser|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/online_security|creationTime|2012-08-08T10:43:32Z +http://www.semanlink.net/tag/online_security|prefLabel|Online Security +http://www.semanlink.net/tag/online_security|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/online_security|creationDate|2012-08-08 +http://www.semanlink.net/tag/online_security|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/online_security|uri|http://www.semanlink.net/tag/online_security +http://www.semanlink.net/tag/online_security|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/online_security|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/poincare_embeddings|creationTime|2018-05-20T09:01:43Z +http://www.semanlink.net/tag/poincare_embeddings|prefLabel|Poincaré Embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/poincare_embeddings|broader|http://www.semanlink.net/tag/poincare +http://www.semanlink.net/tag/poincare_embeddings|broader|http://www.semanlink.net/tag/hierarchy_aware_knowledge_graph_embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/poincare_embeddings|creationDate|2018-05-20 +http://www.semanlink.net/tag/poincare_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poincare_embeddings|uri|http://www.semanlink.net/tag/poincare_embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader_prefLabel|NLP@Facebook +http://www.semanlink.net/tag/poincare_embeddings|broader_prefLabel|Poincaré +http://www.semanlink.net/tag/poincare_embeddings|broader_prefLabel|Hierarchy-Aware KG Embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader_altLabel|Hierarchy-Aware Knowledge Graph Embeddings +http://www.semanlink.net/tag/poincare_embeddings|broader_related|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/poincare_embeddings|broader_related|http://www.semanlink.net/tag/hierarchical_classification +http://www.semanlink.net/tag/brain_initiative|creationTime|2018-01-03T00:58:31Z +http://www.semanlink.net/tag/brain_initiative|prefLabel|BRAIN Initiative +http://www.semanlink.net/tag/brain_initiative|broader|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/brain_initiative|creationDate|2018-01-03 +http://www.semanlink.net/tag/brain_initiative|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_initiative|describedBy|https://www.braininitiative.nih.gov/ +http://www.semanlink.net/tag/brain_initiative|uri|http://www.semanlink.net/tag/brain_initiative +http://www.semanlink.net/tag/brain_initiative|broader_prefLabel|Neuroscience +http://www.semanlink.net/tag/france_fiasco_administratif|creationTime|2020-05-07T13:18:07Z +http://www.semanlink.net/tag/france_fiasco_administratif|prefLabel|France : dysfonctionnement administratif +http://www.semanlink.net/tag/france_fiasco_administratif|broader|http://www.semanlink.net/tag/administration_francaise +http://www.semanlink.net/tag/france_fiasco_administratif|broader|http://www.semanlink.net/tag/france_bureaucratie +http://www.semanlink.net/tag/france_fiasco_administratif|broader|http://www.semanlink.net/tag/nullite_francaise +http://www.semanlink.net/tag/france_fiasco_administratif|broader|http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions +http://www.semanlink.net/tag/france_fiasco_administratif|creationDate|2020-05-07 +http://www.semanlink.net/tag/france_fiasco_administratif|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_fiasco_administratif|uri|http://www.semanlink.net/tag/france_fiasco_administratif +http://www.semanlink.net/tag/france_fiasco_administratif|broader_prefLabel|Administration française +http://www.semanlink.net/tag/france_fiasco_administratif|broader_prefLabel|France : bureaucratie +http://www.semanlink.net/tag/france_fiasco_administratif|broader_prefLabel|Nullité française +http://www.semanlink.net/tag/france_fiasco_administratif|broader_prefLabel|France : dysfonctionnement des institutions +http://www.semanlink.net/tag/france_fiasco_administratif|broader_related|http://www.semanlink.net/tag/france_dysfonctionnement_des_institutions +http://www.semanlink.net/tag/nigeria|prefLabel|Nigeria +http://www.semanlink.net/tag/nigeria|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/nigeria|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nigeria|uri|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/nigeria|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/information_system|creationTime|2010-08-24T10:53:42Z +http://www.semanlink.net/tag/information_system|prefLabel|Information System +http://www.semanlink.net/tag/information_system|creationDate|2010-08-24 +http://www.semanlink.net/tag/information_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_system|uri|http://www.semanlink.net/tag/information_system +http://www.semanlink.net/tag/web|prefLabel|Web +http://www.semanlink.net/tag/web|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web|uri|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/web|broader_prefLabel|Internet +http://www.semanlink.net/tag/mp3|prefLabel|MP3 +http://www.semanlink.net/tag/mp3|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/mp3|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mp3|uri|http://www.semanlink.net/tag/mp3 +http://www.semanlink.net/tag/mp3|broader_prefLabel|Musique +http://www.semanlink.net/tag/mp3|broader_altLabel|Music +http://www.semanlink.net/tag/unknown_tag|creationTime|2015-02-16T23:08:27Z +http://www.semanlink.net/tag/unknown_tag|prefLabel|Unknown Tag +http://www.semanlink.net/tag/unknown_tag|creationDate|2015-02-16 +http://www.semanlink.net/tag/unknown_tag|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unknown_tag|uri|http://www.semanlink.net/tag/unknown_tag +http://www.semanlink.net/tag/nuclear_war|creationTime|2018-01-14T19:18:47Z +http://www.semanlink.net/tag/nuclear_war|prefLabel|Nuclear war +http://www.semanlink.net/tag/nuclear_war|broader|http://www.semanlink.net/tag/guerre +http://www.semanlink.net/tag/nuclear_war|creationDate|2018-01-14 +http://www.semanlink.net/tag/nuclear_war|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nuclear_war|uri|http://www.semanlink.net/tag/nuclear_war +http://www.semanlink.net/tag/nuclear_war|broader_prefLabel|War +http://www.semanlink.net/tag/nuclear_war|broader_altLabel|Guerre +http://www.semanlink.net/tag/tree_embeddings|creationTime|2019-06-09T23:32:09Z +http://www.semanlink.net/tag/tree_embeddings|prefLabel|Tree embeddings +http://www.semanlink.net/tag/tree_embeddings|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/tree_embeddings|creationDate|2019-06-09 +http://www.semanlink.net/tag/tree_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tree_embeddings|uri|http://www.semanlink.net/tag/tree_embeddings +http://www.semanlink.net/tag/tree_embeddings|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/tree_embeddings|broader_altLabel|embedding +http://www.semanlink.net/tag/tree_embeddings|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/tree_embeddings|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/out_of_distribution_detection|creationTime|2018-08-27T00:13:34Z +http://www.semanlink.net/tag/out_of_distribution_detection|prefLabel|Outlier Detection +http://www.semanlink.net/tag/out_of_distribution_detection|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/out_of_distribution_detection|creationDate|2018-08-27 +http://www.semanlink.net/tag/out_of_distribution_detection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/out_of_distribution_detection|altLabel|Out-of-Distribution Detection +http://www.semanlink.net/tag/out_of_distribution_detection|uri|http://www.semanlink.net/tag/out_of_distribution_detection +http://www.semanlink.net/tag/out_of_distribution_detection|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/displaying_xml_with_css|prefLabel|Displaying XML with css +http://www.semanlink.net/tag/displaying_xml_with_css|broader|http://www.semanlink.net/tag/css +http://www.semanlink.net/tag/displaying_xml_with_css|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/displaying_xml_with_css|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/displaying_xml_with_css|uri|http://www.semanlink.net/tag/displaying_xml_with_css +http://www.semanlink.net/tag/displaying_xml_with_css|broader_prefLabel|css +http://www.semanlink.net/tag/displaying_xml_with_css|broader_prefLabel|XML +http://www.semanlink.net/tag/sand|creationTime|2014-11-08T08:02:34Z +http://www.semanlink.net/tag/sand|prefLabel|Sand +http://www.semanlink.net/tag/sand|creationDate|2014-11-08 +http://www.semanlink.net/tag/sand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sand|uri|http://www.semanlink.net/tag/sand +http://www.semanlink.net/tag/polynomial|creationTime|2016-05-28T09:16:55Z +http://www.semanlink.net/tag/polynomial|prefLabel|Polynomial +http://www.semanlink.net/tag/polynomial|creationDate|2016-05-28 +http://www.semanlink.net/tag/polynomial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/polynomial|describedBy|https://en.wikipedia.org/wiki/Polynomial +http://www.semanlink.net/tag/polynomial|uri|http://www.semanlink.net/tag/polynomial +http://www.semanlink.net/tag/banque_mondiale|prefLabel|Banque mondiale +http://www.semanlink.net/tag/banque_mondiale|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/banque_mondiale|broader|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/banque_mondiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banque_mondiale|uri|http://www.semanlink.net/tag/banque_mondiale +http://www.semanlink.net/tag/banque_mondiale|broader_prefLabel|Economie +http://www.semanlink.net/tag/banque_mondiale|broader_prefLabel|Institutions internationales +http://www.semanlink.net/tag/banque_mondiale|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/personal_data|creationTime|2012-04-18T10:02:43Z +http://www.semanlink.net/tag/personal_data|prefLabel|Personal data +http://www.semanlink.net/tag/personal_data|creationDate|2012-04-18 +http://www.semanlink.net/tag/personal_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_data|uri|http://www.semanlink.net/tag/personal_data +http://www.semanlink.net/tag/adsense|prefLabel|AdSense +http://www.semanlink.net/tag/adsense|broader|http://www.semanlink.net/tag/google_advertising +http://www.semanlink.net/tag/adsense|comment|régie publicitaire de Google +http://www.semanlink.net/tag/adsense|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/adsense|describedBy|https://fr.wikipedia.org/wiki/Google_AdSense +http://www.semanlink.net/tag/adsense|uri|http://www.semanlink.net/tag/adsense +http://www.semanlink.net/tag/adsense|broader_prefLabel|Google + Advertising +http://www.semanlink.net/tag/aborigenes|creationTime|2021-07-31T18:35:31Z +http://www.semanlink.net/tag/aborigenes|prefLabel|Aborigènes +http://www.semanlink.net/tag/aborigenes|broader|http://www.semanlink.net/tag/australie +http://www.semanlink.net/tag/aborigenes|creationDate|2021-07-31 +http://www.semanlink.net/tag/aborigenes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aborigenes|uri|http://www.semanlink.net/tag/aborigenes +http://www.semanlink.net/tag/aborigenes|broader_prefLabel|Australie +http://www.semanlink.net/tag/nature_journal|creationTime|2013-06-03T14:49:52Z +http://www.semanlink.net/tag/nature_journal|prefLabel|Nature (journal) +http://www.semanlink.net/tag/nature_journal|broader|http://www.semanlink.net/tag/publication_scientifique +http://www.semanlink.net/tag/nature_journal|creationDate|2013-06-03 +http://www.semanlink.net/tag/nature_journal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nature_journal|uri|http://www.semanlink.net/tag/nature_journal +http://www.semanlink.net/tag/nature_journal|broader_prefLabel|Publication scientifique +http://www.semanlink.net/tag/meetup_web_semantique|creationTime|2011-01-09T21:38:38Z +http://www.semanlink.net/tag/meetup_web_semantique|prefLabel|Meetup Web Sémantique +http://www.semanlink.net/tag/meetup_web_semantique|broader|http://www.semanlink.net/tag/meetup +http://www.semanlink.net/tag/meetup_web_semantique|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/meetup_web_semantique|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/meetup_web_semantique|creationDate|2011-01-09 +http://www.semanlink.net/tag/meetup_web_semantique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meetup_web_semantique|uri|http://www.semanlink.net/tag/meetup_web_semantique +http://www.semanlink.net/tag/meetup_web_semantique|broader_prefLabel|Meetup +http://www.semanlink.net/tag/meetup_web_semantique|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/meetup_web_semantique|broader_prefLabel|Paris +http://www.semanlink.net/tag/meetup_web_semantique|broader_altLabel|sw +http://www.semanlink.net/tag/meetup_web_semantique|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/semantic_markup_in_html|creationTime|2007-05-18T21:48:49Z +http://www.semanlink.net/tag/semantic_markup_in_html|prefLabel|Semantic markup in HTML +http://www.semanlink.net/tag/semantic_markup_in_html|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_markup_in_html|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/semantic_markup_in_html|broader|http://www.semanlink.net/tag/html +http://www.semanlink.net/tag/semantic_markup_in_html|creationDate|2007-05-18 +http://www.semanlink.net/tag/semantic_markup_in_html|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_markup_in_html|altLabel|RDF in HTML +http://www.semanlink.net/tag/semantic_markup_in_html|uri|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.semanlink.net/tag/semantic_markup_in_html|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_markup_in_html|broader_prefLabel|RDF +http://www.semanlink.net/tag/semantic_markup_in_html|broader_prefLabel|HTML +http://www.semanlink.net/tag/semantic_markup_in_html|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_markup_in_html|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/semantic_markup_in_html|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/semantic_markup_in_html|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/semantic_markup_in_html|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/semantic_markup_in_html|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/semantic_markup_in_html|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/dropout|creationTime|2019-05-13T09:10:39Z +http://www.semanlink.net/tag/dropout|prefLabel|Dropout +http://www.semanlink.net/tag/dropout|creationDate|2019-05-13 +http://www.semanlink.net/tag/dropout|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dropout|uri|http://www.semanlink.net/tag/dropout +http://www.semanlink.net/tag/manager_snippet|prefLabel|Snippet Manager +http://www.semanlink.net/tag/manager_snippet|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/manager_snippet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manager_snippet|uri|http://www.semanlink.net/tag/manager_snippet +http://www.semanlink.net/tag/manager_snippet|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/apis_and_linked_data|creationTime|2016-02-25T03:10:53Z +http://www.semanlink.net/tag/apis_and_linked_data|prefLabel|APIs and Linked Data +http://www.semanlink.net/tag/apis_and_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/apis_and_linked_data|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/apis_and_linked_data|creationDate|2016-02-25 +http://www.semanlink.net/tag/apis_and_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apis_and_linked_data|uri|http://www.semanlink.net/tag/apis_and_linked_data +http://www.semanlink.net/tag/apis_and_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/apis_and_linked_data|broader_prefLabel|API +http://www.semanlink.net/tag/apis_and_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/apis_and_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/coursera_computational_neuroscience|creationTime|2014-01-11T09:56:19Z +http://www.semanlink.net/tag/coursera_computational_neuroscience|prefLabel|Coursera: Computational Neuroscience +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_computational_neuroscience|related|http://www.semanlink.net/tag/hebbian_theory +http://www.semanlink.net/tag/coursera_computational_neuroscience|related|http://www.semanlink.net/tag/sparse_dictionary_learning +http://www.semanlink.net/tag/coursera_computational_neuroscience|creationDate|2014-01-11 +http://www.semanlink.net/tag/coursera_computational_neuroscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_computational_neuroscience|homepage|https://class.coursera.org/compneuro-002 +http://www.semanlink.net/tag/coursera_computational_neuroscience|uri|http://www.semanlink.net/tag/coursera_computational_neuroscience +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader_prefLabel|Computational Neuroscience +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader_related|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.semanlink.net/tag/coursera_computational_neuroscience|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ian_davis|creationTime|2011-06-23T16:28:18Z +http://www.semanlink.net/tag/ian_davis|prefLabel|Ian Davis +http://www.semanlink.net/tag/ian_davis|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ian_davis|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/ian_davis|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/ian_davis|related|http://www.semanlink.net/tag/talis +http://www.semanlink.net/tag/ian_davis|creationDate|2011-06-23 +http://www.semanlink.net/tag/ian_davis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ian_davis|uri|http://www.semanlink.net/tag/ian_davis +http://www.semanlink.net/tag/ian_davis|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ian_davis|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/ian_davis|broader_altLabel|Technical guys +http://www.semanlink.net/tag/genetique_et_evolution|creationTime|2011-04-04T15:16:49Z +http://www.semanlink.net/tag/genetique_et_evolution|prefLabel|Génétique et Évolution +http://www.semanlink.net/tag/genetique_et_evolution|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/genetique_et_evolution|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genetique_et_evolution|creationDate|2011-04-04 +http://www.semanlink.net/tag/genetique_et_evolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetique_et_evolution|uri|http://www.semanlink.net/tag/genetique_et_evolution +http://www.semanlink.net/tag/genetique_et_evolution|broader_prefLabel|Evolution +http://www.semanlink.net/tag/genetique_et_evolution|broader_prefLabel|Genetics +http://www.semanlink.net/tag/genetique_et_evolution|broader_prefLabel|Génétique +http://www.semanlink.net/tag/europe_and_uk|creationTime|2014-06-27T00:45:08Z +http://www.semanlink.net/tag/europe_and_uk|prefLabel|Europe and UK +http://www.semanlink.net/tag/europe_and_uk|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/europe_and_uk|broader|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/europe_and_uk|creationDate|2014-06-27 +http://www.semanlink.net/tag/europe_and_uk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/europe_and_uk|uri|http://www.semanlink.net/tag/europe_and_uk +http://www.semanlink.net/tag/europe_and_uk|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/europe_and_uk|broader_prefLabel|Union européenne +http://www.semanlink.net/tag/europe_and_uk|broader_altLabel|UK +http://www.semanlink.net/tag/europe_and_uk|broader_altLabel|UE +http://www.semanlink.net/tag/business_case|creationTime|2010-07-24T01:40:39Z +http://www.semanlink.net/tag/business_case|prefLabel|Business case +http://www.semanlink.net/tag/business_case|creationDate|2010-07-24 +http://www.semanlink.net/tag/business_case|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/business_case|uri|http://www.semanlink.net/tag/business_case +http://www.semanlink.net/tag/data_integration|creationTime|2008-07-06T04:40:52Z +http://www.semanlink.net/tag/data_integration|prefLabel|Data integration +http://www.semanlink.net/tag/data_integration|related|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/data_integration|creationDate|2008-07-06 +http://www.semanlink.net/tag/data_integration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_integration|uri|http://www.semanlink.net/tag/data_integration +http://www.semanlink.net/tag/path_queries|creationTime|2015-10-31T00:16:49Z +http://www.semanlink.net/tag/path_queries|prefLabel|Path queries +http://www.semanlink.net/tag/path_queries|creationDate|2015-10-31 +http://www.semanlink.net/tag/path_queries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/path_queries|uri|http://www.semanlink.net/tag/path_queries +http://www.semanlink.net/tag/rigolo|prefLabel|Rigolo +http://www.semanlink.net/tag/rigolo|broader|http://www.semanlink.net/tag/fun +http://www.semanlink.net/tag/rigolo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rigolo|uri|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/tag/rigolo|broader_prefLabel|Fun +http://www.semanlink.net/tag/techlash|creationTime|2020-12-19T13:54:16Z +http://www.semanlink.net/tag/techlash|prefLabel|Techlash +http://www.semanlink.net/tag/techlash|broader|http://www.semanlink.net/tag/dark_side_of_tech +http://www.semanlink.net/tag/techlash|creationDate|2020-12-19 +http://www.semanlink.net/tag/techlash|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/techlash|uri|http://www.semanlink.net/tag/techlash +http://www.semanlink.net/tag/techlash|broader_prefLabel|Dark side of Tech +http://www.semanlink.net/tag/java_jni|creationTime|2008-12-02T00:18:57Z +http://www.semanlink.net/tag/java_jni|prefLabel|Java: JNI +http://www.semanlink.net/tag/java_jni|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/java_jni|creationDate|2008-12-02 +http://www.semanlink.net/tag/java_jni|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_jni|uri|http://www.semanlink.net/tag/java_jni +http://www.semanlink.net/tag/java_jni|broader_prefLabel|Java dev +http://www.semanlink.net/tag/antidot|creationTime|2011-12-26T18:08:50Z +http://www.semanlink.net/tag/antidot|prefLabel|Antidot +http://www.semanlink.net/tag/antidot|broader|http://www.semanlink.net/tag/french_semantic_web_company +http://www.semanlink.net/tag/antidot|related|http://www.semanlink.net/tag/gautier_poupeau +http://www.semanlink.net/tag/antidot|creationDate|2011-12-26 +http://www.semanlink.net/tag/antidot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antidot|uri|http://www.semanlink.net/tag/antidot +http://www.semanlink.net/tag/antidot|broader_prefLabel|French Semantic web company +http://www.semanlink.net/tag/windows_vista|creationTime|2007-02-13T00:24:34Z +http://www.semanlink.net/tag/windows_vista|prefLabel|Windows Vista +http://www.semanlink.net/tag/windows_vista|broader|http://www.semanlink.net/tag/windows +http://www.semanlink.net/tag/windows_vista|creationDate|2007-02-13 +http://www.semanlink.net/tag/windows_vista|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/windows_vista|uri|http://www.semanlink.net/tag/windows_vista +http://www.semanlink.net/tag/windows_vista|broader_prefLabel|Windows +http://www.semanlink.net/tag/wikilinks_corpus|creationTime|2013-03-12T14:54:01Z +http://www.semanlink.net/tag/wikilinks_corpus|prefLabel|Wikilinks Corpus +http://www.semanlink.net/tag/wikilinks_corpus|broader|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/tag/wikilinks_corpus|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/wikilinks_corpus|broader|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/tag/wikilinks_corpus|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikilinks_corpus|creationDate|2013-03-12 +http://www.semanlink.net/tag/wikilinks_corpus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikilinks_corpus|uri|http://www.semanlink.net/tag/wikilinks_corpus +http://www.semanlink.net/tag/wikilinks_corpus|broader_prefLabel|Google Research +http://www.semanlink.net/tag/wikilinks_corpus|broader_prefLabel|Big Data +http://www.semanlink.net/tag/wikilinks_corpus|broader_prefLabel|Named Entity Recognition +http://www.semanlink.net/tag/wikilinks_corpus|broader_altLabel|NER +http://www.semanlink.net/tag/wikilinks_corpus|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/schema_org_actions|creationTime|2014-09-10T23:16:20Z +http://www.semanlink.net/tag/schema_org_actions|prefLabel|Schema.org Actions +http://www.semanlink.net/tag/schema_org_actions|broader|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/tag/schema_org_actions|related|http://www.semanlink.net/tag/hydra +http://www.semanlink.net/tag/schema_org_actions|creationDate|2014-09-10 +http://www.semanlink.net/tag/schema_org_actions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/schema_org_actions|uri|http://www.semanlink.net/tag/schema_org_actions +http://www.semanlink.net/tag/schema_org_actions|broader_prefLabel|schema.org +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/microdata +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/dan_brickley +http://www.semanlink.net/tag/schema_org_actions|broader_related|http://www.semanlink.net/tag/bing +http://www.semanlink.net/tag/proletarisation|creationTime|2016-01-05T18:08:15Z +http://www.semanlink.net/tag/proletarisation|prefLabel|Prolétarisation +http://www.semanlink.net/tag/proletarisation|broader|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/proletarisation|related|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/proletarisation|creationDate|2016-01-05 +http://www.semanlink.net/tag/proletarisation|comment|"“La prolétarisation est ce qui consiste à priver un sujet (producteur, consommateur, concepteur) de ses savoirs (savoir-faire, savoir-vivre, savoir concevoir, savoir décider)"" (Christain Fauré)" +http://www.semanlink.net/tag/proletarisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/proletarisation|uri|http://www.semanlink.net/tag/proletarisation +http://www.semanlink.net/tag/proletarisation|broader_prefLabel|Travail +http://www.semanlink.net/tag/turkmenistan|prefLabel|Turkmenistan +http://www.semanlink.net/tag/turkmenistan|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/turkmenistan|broader|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/turkmenistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turkmenistan|describedBy|https://en.wikipedia.org/wiki/Turkmenistan +http://www.semanlink.net/tag/turkmenistan|altLabel|Turkménistan +http://www.semanlink.net/tag/turkmenistan|uri|http://www.semanlink.net/tag/turkmenistan +http://www.semanlink.net/tag/turkmenistan|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/turkmenistan|broader_prefLabel|URSS +http://www.semanlink.net/tag/turkmenistan|broader_prefLabel|Asie centrale +http://www.semanlink.net/tag/zoroastre|creationTime|2016-10-02T12:10:07Z +http://www.semanlink.net/tag/zoroastre|prefLabel|Zoroastre +http://www.semanlink.net/tag/zoroastre|broader|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/zoroastre|creationDate|2016-10-02 +http://www.semanlink.net/tag/zoroastre|comment|celui qui s'adresse à son dieu comme un ami à un ami +http://www.semanlink.net/tag/zoroastre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zoroastre|describedBy|https://fr.wikipedia.org/wiki/Zoroastre +http://www.semanlink.net/tag/zoroastre|uri|http://www.semanlink.net/tag/zoroastre +http://www.semanlink.net/tag/zoroastre|broader_prefLabel|Philosophe +http://www.semanlink.net/tag/kadhafi|creationTime|2013-01-22T22:26:25Z +http://www.semanlink.net/tag/kadhafi|prefLabel|Kadhafi +http://www.semanlink.net/tag/kadhafi|broader|http://www.semanlink.net/tag/lybie +http://www.semanlink.net/tag/kadhafi|broader|http://www.semanlink.net/tag/dictateur +http://www.semanlink.net/tag/kadhafi|creationDate|2013-01-22 +http://www.semanlink.net/tag/kadhafi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kadhafi|uri|http://www.semanlink.net/tag/kadhafi +http://www.semanlink.net/tag/kadhafi|broader_prefLabel|Lybie +http://www.semanlink.net/tag/kadhafi|broader_prefLabel|Dictateur +http://www.semanlink.net/tag/integrating_tomcat_with_apache|prefLabel|Integrating Tomcat with Apache +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader|http://www.semanlink.net/tag/apache +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader|http://www.semanlink.net/tag/developer_documentation +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader|http://www.semanlink.net/tag/tomcat +http://www.semanlink.net/tag/integrating_tomcat_with_apache|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/integrating_tomcat_with_apache|uri|http://www.semanlink.net/tag/integrating_tomcat_with_apache +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader_prefLabel|Apache web server +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader_prefLabel|Developer documentation +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader_prefLabel|Tomcat +http://www.semanlink.net/tag/integrating_tomcat_with_apache|broader_altLabel|Dev doc +http://www.semanlink.net/tag/zinder_alimentation_en_eau|creationTime|2007-10-29T16:30:00Z +http://www.semanlink.net/tag/zinder_alimentation_en_eau|prefLabel|Zinder : alimentation en eau +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader|http://www.semanlink.net/tag/chine_afrique +http://www.semanlink.net/tag/zinder_alimentation_en_eau|creationDate|2007-10-29 +http://www.semanlink.net/tag/zinder_alimentation_en_eau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zinder_alimentation_en_eau|uri|http://www.semanlink.net/tag/zinder_alimentation_en_eau +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader_prefLabel|Zinder +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader_prefLabel|Chine / Afrique +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader_altLabel|Damarangam +http://www.semanlink.net/tag/zinder_alimentation_en_eau|broader_altLabel|Chinafrique +http://www.semanlink.net/tag/placements_ethiques|creationTime|2010-10-14T18:45:25Z +http://www.semanlink.net/tag/placements_ethiques|prefLabel|Placements éthiques +http://www.semanlink.net/tag/placements_ethiques|creationDate|2010-10-14 +http://www.semanlink.net/tag/placements_ethiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/placements_ethiques|uri|http://www.semanlink.net/tag/placements_ethiques +http://www.semanlink.net/tag/herodote|creationTime|2007-08-10T13:26:38Z +http://www.semanlink.net/tag/herodote|prefLabel|Hérodote +http://www.semanlink.net/tag/herodote|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/herodote|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/herodote|creationDate|2007-08-10 +http://www.semanlink.net/tag/herodote|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/herodote|uri|http://www.semanlink.net/tag/herodote +http://www.semanlink.net/tag/herodote|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/herodote|broader_prefLabel|Géographie +http://www.semanlink.net/tag/croisade_des_albigeois|prefLabel|Croisade des Albigeois +http://www.semanlink.net/tag/croisade_des_albigeois|broader|http://www.semanlink.net/tag/croisades +http://www.semanlink.net/tag/croisade_des_albigeois|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/croisade_des_albigeois|uri|http://www.semanlink.net/tag/croisade_des_albigeois +http://www.semanlink.net/tag/croisade_des_albigeois|broader_prefLabel|Croisades +http://www.semanlink.net/tag/offres_d_emploi|creationTime|2008-08-18T23:38:07Z +http://www.semanlink.net/tag/offres_d_emploi|prefLabel|Offres d'emploi +http://www.semanlink.net/tag/offres_d_emploi|creationDate|2008-08-18 +http://www.semanlink.net/tag/offres_d_emploi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/offres_d_emploi|uri|http://www.semanlink.net/tag/offres_d_emploi +http://www.semanlink.net/tag/infotechnocratie|creationTime|2013-09-24T10:23:56Z +http://www.semanlink.net/tag/infotechnocratie|prefLabel|Infotechnocratie +http://www.semanlink.net/tag/infotechnocratie|creationDate|2013-09-24 +http://www.semanlink.net/tag/infotechnocratie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/infotechnocratie|uri|http://www.semanlink.net/tag/infotechnocratie +http://www.semanlink.net/tag/guide_d_achat|prefLabel|Guide d'achat +http://www.semanlink.net/tag/guide_d_achat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guide_d_achat|uri|http://www.semanlink.net/tag/guide_d_achat +http://www.semanlink.net/tag/jeu_d_echecs|prefLabel|Jeu d'échecs +http://www.semanlink.net/tag/jeu_d_echecs|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/jeu_d_echecs|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/jeu_d_echecs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeu_d_echecs|uri|http://www.semanlink.net/tag/jeu_d_echecs +http://www.semanlink.net/tag/jeu_d_echecs|broader_prefLabel|Divers +http://www.semanlink.net/tag/jeu_d_echecs|broader_prefLabel|Jeux +http://www.semanlink.net/tag/natural_language_supervision|creationTime|2021-01-06T15:54:20Z +http://www.semanlink.net/tag/natural_language_supervision|prefLabel|Natural Language Supervision +http://www.semanlink.net/tag/natural_language_supervision|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/natural_language_supervision|related|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/tag/natural_language_supervision|creationDate|2021-01-06 +http://www.semanlink.net/tag/natural_language_supervision|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/natural_language_supervision|uri|http://www.semanlink.net/tag/natural_language_supervision +http://www.semanlink.net/tag/natural_language_supervision|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/extractive_summarization|creationTime|2020-02-09T23:36:59Z +http://www.semanlink.net/tag/extractive_summarization|prefLabel|Extractive Text Summarization +http://www.semanlink.net/tag/extractive_summarization|broader|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/extractive_summarization|creationDate|2020-02-09 +http://www.semanlink.net/tag/extractive_summarization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extractive_summarization|uri|http://www.semanlink.net/tag/extractive_summarization +http://www.semanlink.net/tag/extractive_summarization|broader_prefLabel|Text Summarization +http://www.semanlink.net/tag/extractive_summarization|broader_altLabel|Automatic summarization +http://www.semanlink.net/tag/extractive_summarization|broader_related|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/tag/open_standards|creationTime|2008-06-12T23:48:03Z +http://www.semanlink.net/tag/open_standards|prefLabel|Open standards +http://www.semanlink.net/tag/open_standards|creationDate|2008-06-12 +http://www.semanlink.net/tag/open_standards|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_standards|uri|http://www.semanlink.net/tag/open_standards +http://www.semanlink.net/tag/dave_reynolds|creationTime|2011-07-17T18:58:19Z +http://www.semanlink.net/tag/dave_reynolds|prefLabel|Dave Reynolds +http://www.semanlink.net/tag/dave_reynolds|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/dave_reynolds|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/dave_reynolds|related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/dave_reynolds|creationDate|2011-07-17 +http://www.semanlink.net/tag/dave_reynolds|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dave_reynolds|uri|http://www.semanlink.net/tag/dave_reynolds +http://www.semanlink.net/tag/dave_reynolds|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/dave_reynolds|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/dave_reynolds|broader_altLabel|Technical guys +http://www.semanlink.net/tag/deep_learning_optimization_methods|creationTime|2017-10-02T13:23:28Z +http://www.semanlink.net/tag/deep_learning_optimization_methods|prefLabel|Deep Learning: Optimization methods +http://www.semanlink.net/tag/deep_learning_optimization_methods|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning_optimization_methods|creationDate|2017-10-02 +http://www.semanlink.net/tag/deep_learning_optimization_methods|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning_optimization_methods|altLabel|Deep Learning: Optimization +http://www.semanlink.net/tag/deep_learning_optimization_methods|uri|http://www.semanlink.net/tag/deep_learning_optimization_methods +http://www.semanlink.net/tag/deep_learning_optimization_methods|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning_optimization_methods|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning_optimization_methods|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/teaching_kids_to_code|creationTime|2010-08-30T14:50:07Z +http://www.semanlink.net/tag/teaching_kids_to_code|prefLabel|Teaching Kids to Code +http://www.semanlink.net/tag/teaching_kids_to_code|creationDate|2010-08-30 +http://www.semanlink.net/tag/teaching_kids_to_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/teaching_kids_to_code|uri|http://www.semanlink.net/tag/teaching_kids_to_code +http://www.semanlink.net/tag/fps_and_www_2008|creationTime|2008-05-17T23:13:39Z +http://www.semanlink.net/tag/fps_and_www_2008|prefLabel|fps and WWW 2008 +http://www.semanlink.net/tag/fps_and_www_2008|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_and_www_2008|broader|http://www.semanlink.net/tag/www08 +http://www.semanlink.net/tag/fps_and_www_2008|creationDate|2008-05-17 +http://www.semanlink.net/tag/fps_and_www_2008|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_and_www_2008|uri|http://www.semanlink.net/tag/fps_and_www_2008 +http://www.semanlink.net/tag/fps_and_www_2008|broader_prefLabel|fps +http://www.semanlink.net/tag/fps_and_www_2008|broader_prefLabel|WWW 2008 +http://www.semanlink.net/tag/classical_mechanics|creationTime|2021-01-13T11:44:34Z +http://www.semanlink.net/tag/classical_mechanics|prefLabel|Classical mechanics +http://www.semanlink.net/tag/classical_mechanics|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/classical_mechanics|related|http://www.semanlink.net/tag/newton +http://www.semanlink.net/tag/classical_mechanics|creationDate|2021-01-13 +http://www.semanlink.net/tag/classical_mechanics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/classical_mechanics|describedBy|https://en.wikipedia.org/wiki/Classical_mechanics +http://www.semanlink.net/tag/classical_mechanics|altLabel|Mécanique classique +http://www.semanlink.net/tag/classical_mechanics|uri|http://www.semanlink.net/tag/classical_mechanics +http://www.semanlink.net/tag/classical_mechanics|broader_prefLabel|Physique +http://www.semanlink.net/tag/classical_mechanics|broader_altLabel|Physics +http://www.semanlink.net/tag/vive_le_capitalisme|creationTime|2009-03-10T23:14:44Z +http://www.semanlink.net/tag/vive_le_capitalisme|prefLabel|Vive le capitalisme ! +http://www.semanlink.net/tag/vive_le_capitalisme|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/vive_le_capitalisme|creationDate|2009-03-10 +http://www.semanlink.net/tag/vive_le_capitalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vive_le_capitalisme|altLabel|Capitalisme de merde +http://www.semanlink.net/tag/vive_le_capitalisme|uri|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/vive_le_capitalisme|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/cinema_bresilien|creationTime|2007-11-24T12:07:47Z +http://www.semanlink.net/tag/cinema_bresilien|prefLabel|Cinéma brésilien +http://www.semanlink.net/tag/cinema_bresilien|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/cinema_bresilien|creationDate|2007-11-24 +http://www.semanlink.net/tag/cinema_bresilien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cinema_bresilien|uri|http://www.semanlink.net/tag/cinema_bresilien +http://www.semanlink.net/tag/cinema_bresilien|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/fait_divers|prefLabel|Fait divers +http://www.semanlink.net/tag/fait_divers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fait_divers|uri|http://www.semanlink.net/tag/fait_divers +http://www.semanlink.net/tag/skos_w3c_document|creationTime|2008-05-08T16:49:40Z +http://www.semanlink.net/tag/skos_w3c_document|prefLabel|SKOS W3C document +http://www.semanlink.net/tag/skos_w3c_document|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/skos_w3c_document|broader|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/skos_w3c_document|creationDate|2008-05-08 +http://www.semanlink.net/tag/skos_w3c_document|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/skos_w3c_document|uri|http://www.semanlink.net/tag/skos_w3c_document +http://www.semanlink.net/tag/skos_w3c_document|broader_prefLabel|W3C +http://www.semanlink.net/tag/skos_w3c_document|broader_prefLabel|SKOS +http://www.semanlink.net/tag/skos_w3c_document|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/skos_w3c_document|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/skos_w3c_document|broader_related|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/manipulations_genetiques|prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/manipulations_genetiques|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/manipulations_genetiques|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/manipulations_genetiques|creationDate|2006-11-23 +http://www.semanlink.net/tag/manipulations_genetiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manipulations_genetiques|uri|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/manipulations_genetiques|broader_prefLabel|Genetics +http://www.semanlink.net/tag/manipulations_genetiques|broader_prefLabel|Génétique +http://www.semanlink.net/tag/manipulations_genetiques|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/manipulations_genetiques|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/polynesians|creationTime|2013-08-27T13:51:16Z +http://www.semanlink.net/tag/polynesians|prefLabel|Polynesians +http://www.semanlink.net/tag/polynesians|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/polynesians|creationDate|2013-08-27 +http://www.semanlink.net/tag/polynesians|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/polynesians|describedBy|https://en.wikipedia.org/wiki/Polynesians +http://www.semanlink.net/tag/polynesians|uri|http://www.semanlink.net/tag/polynesians +http://www.semanlink.net/tag/polynesians|broader_prefLabel|Peuples +http://www.semanlink.net/tag/semtechbiz|creationTime|2011-11-02T23:22:10Z +http://www.semanlink.net/tag/semtechbiz|prefLabel|SemTechBiz +http://www.semanlink.net/tag/semtechbiz|broader|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/semtechbiz|creationDate|2011-11-02 +http://www.semanlink.net/tag/semtechbiz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semtechbiz|uri|http://www.semanlink.net/tag/semtechbiz +http://www.semanlink.net/tag/semtechbiz|broader_prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/guillaume_genthial|creationTime|2018-05-21T12:03:31Z +http://www.semanlink.net/tag/guillaume_genthial|prefLabel|Guillaume Genthial +http://www.semanlink.net/tag/guillaume_genthial|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/guillaume_genthial|related|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/guillaume_genthial|creationDate|2018-05-21 +http://www.semanlink.net/tag/guillaume_genthial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guillaume_genthial|uri|http://www.semanlink.net/tag/guillaume_genthial +http://www.semanlink.net/tag/guillaume_genthial|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/apprentissage|prefLabel|Apprentissage +http://www.semanlink.net/tag/apprentissage|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/apprentissage|related|http://www.semanlink.net/tag/language_learning +http://www.semanlink.net/tag/apprentissage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apprentissage|uri|http://www.semanlink.net/tag/apprentissage +http://www.semanlink.net/tag/apprentissage|broader_prefLabel|Divers +http://www.semanlink.net/tag/google_cloud|creationTime|2018-05-31T16:24:27Z +http://www.semanlink.net/tag/google_cloud|prefLabel|Google Cloud +http://www.semanlink.net/tag/google_cloud|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_cloud|broader|http://www.semanlink.net/tag/cloud +http://www.semanlink.net/tag/google_cloud|creationDate|2018-05-31 +http://www.semanlink.net/tag/google_cloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_cloud|uri|http://www.semanlink.net/tag/google_cloud +http://www.semanlink.net/tag/google_cloud|broader_prefLabel|Google +http://www.semanlink.net/tag/google_cloud|broader_prefLabel|Cloud +http://www.semanlink.net/tag/google_cloud|broader_altLabel|Cloud computing +http://www.semanlink.net/tag/google_cloud|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/protection_de_l_environnement|creationTime|2008-08-15T11:51:06Z +http://www.semanlink.net/tag/protection_de_l_environnement|prefLabel|Protection de l'environnement +http://www.semanlink.net/tag/protection_de_l_environnement|broader|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/tag/protection_de_l_environnement|creationDate|2008-08-15 +http://www.semanlink.net/tag/protection_de_l_environnement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/protection_de_l_environnement|uri|http://www.semanlink.net/tag/protection_de_l_environnement +http://www.semanlink.net/tag/protection_de_l_environnement|broader_prefLabel|Environnement +http://www.semanlink.net/tag/sarkozy|prefLabel|Sarkozy +http://www.semanlink.net/tag/sarkozy|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/sarkozy|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/sarkozy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sarkozy|uri|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/sarkozy|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/sarkozy|broader_prefLabel|Politique française +http://www.semanlink.net/tag/rdf_and_property_graphs|creationTime|2019-03-01T00:16:56Z +http://www.semanlink.net/tag/rdf_and_property_graphs|prefLabel|RDF and Property Graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|broader|http://www.semanlink.net/tag/property_graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|broader|http://www.semanlink.net/tag/rdf_graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|creationDate|2019-03-01 +http://www.semanlink.net/tag/rdf_and_property_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_and_property_graphs|uri|http://www.semanlink.net/tag/rdf_and_property_graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|broader_prefLabel|Property Graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|broader_prefLabel|RDF graphs +http://www.semanlink.net/tag/rdf_and_property_graphs|broader_altLabel|Property Graph Model +http://www.semanlink.net/tag/blojsom|prefLabel|blojsom +http://www.semanlink.net/tag/blojsom|broader|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/tag/blojsom|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/blojsom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blojsom|uri|http://www.semanlink.net/tag/blojsom +http://www.semanlink.net/tag/blojsom|broader_prefLabel|Blog software +http://www.semanlink.net/tag/blojsom|broader_prefLabel|Java +http://www.semanlink.net/tag/covid19_impreparation|creationTime|2020-03-28T16:55:46Z +http://www.semanlink.net/tag/covid19_impreparation|prefLabel|Covid19 : impréparation +http://www.semanlink.net/tag/covid19_impreparation|broader|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/tag/covid19_impreparation|creationDate|2020-03-28 +http://www.semanlink.net/tag/covid19_impreparation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/covid19_impreparation|uri|http://www.semanlink.net/tag/covid19_impreparation +http://www.semanlink.net/tag/covid19_impreparation|broader_prefLabel|Covid19 +http://www.semanlink.net/tag/covid19_impreparation|broader_altLabel|covid-19 +http://www.semanlink.net/tag/covid19_impreparation|broader_altLabel|Covid +http://www.semanlink.net/tag/covid19_impreparation|broader_altLabel|Coronavirus +http://www.semanlink.net/tag/entity_salience|creationTime|2020-10-10T03:39:05Z +http://www.semanlink.net/tag/entity_salience|prefLabel|Entity salience +http://www.semanlink.net/tag/entity_salience|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/entity_salience|creationDate|2020-10-10 +http://www.semanlink.net/tag/entity_salience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entity_salience|uri|http://www.semanlink.net/tag/entity_salience +http://www.semanlink.net/tag/entity_salience|broader_prefLabel|Entities +http://www.semanlink.net/tag/multi_hop_reasonning|creationTime|2018-11-08T00:57:14Z +http://www.semanlink.net/tag/multi_hop_reasonning|prefLabel|Multi-hop reasonning +http://www.semanlink.net/tag/multi_hop_reasonning|broader|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/multi_hop_reasonning|creationDate|2018-11-08 +http://www.semanlink.net/tag/multi_hop_reasonning|comment|Question Answering for complex questions is often modeled as a graph construction or traversal task, where a solver must build or traverse a graph of facts that answer and explain a given question. +http://www.semanlink.net/tag/multi_hop_reasonning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_hop_reasonning|altLabel|Multi-hop inference +http://www.semanlink.net/tag/multi_hop_reasonning|uri|http://www.semanlink.net/tag/multi_hop_reasonning +http://www.semanlink.net/tag/multi_hop_reasonning|broader_prefLabel|Question Answering +http://www.semanlink.net/tag/multi_hop_reasonning|broader_altLabel|QA +http://www.semanlink.net/tag/multi_hop_reasonning|broader_related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/audi|creationTime|2008-06-17T21:44:04Z +http://www.semanlink.net/tag/audi|prefLabel|Audi +http://www.semanlink.net/tag/audi|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/audi|creationDate|2008-06-17 +http://www.semanlink.net/tag/audi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/audi|uri|http://www.semanlink.net/tag/audi +http://www.semanlink.net/tag/audi|broader_prefLabel|Automobile +http://www.semanlink.net/tag/audi|broader_altLabel|Automotive +http://www.semanlink.net/tag/ong|prefLabel|ONG +http://www.semanlink.net/tag/ong|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ong|uri|http://www.semanlink.net/tag/ong +http://www.semanlink.net/tag/la_terre_vue_du_ciel|prefLabel|La Terre vue du ciel +http://www.semanlink.net/tag/la_terre_vue_du_ciel|creationDate|2006-09-23 +http://www.semanlink.net/tag/la_terre_vue_du_ciel|comment|" +" +http://www.semanlink.net/tag/la_terre_vue_du_ciel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/la_terre_vue_du_ciel|uri|http://www.semanlink.net/tag/la_terre_vue_du_ciel +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|prefLabel|Web services: document vs RPC Style +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|uri|http://www.semanlink.net/tag/web_services_document_vs_rpc_style +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|broader_prefLabel|Web Services +http://www.semanlink.net/tag/web_services_document_vs_rpc_style|broader_altLabel|WS +http://www.semanlink.net/tag/seevl|creationTime|2011-05-24T22:02:15Z +http://www.semanlink.net/tag/seevl|prefLabel|Seevl +http://www.semanlink.net/tag/seevl|broader|http://www.semanlink.net/tag/linked_data_application +http://www.semanlink.net/tag/seevl|broader|http://www.semanlink.net/tag/alexandre_passant +http://www.semanlink.net/tag/seevl|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/seevl|creationDate|2011-05-24 +http://www.semanlink.net/tag/seevl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seevl|uri|http://www.semanlink.net/tag/seevl +http://www.semanlink.net/tag/seevl|broader_prefLabel|Linked Data: application +http://www.semanlink.net/tag/seevl|broader_prefLabel|Alexandre Passant +http://www.semanlink.net/tag/seevl|broader_prefLabel|Musique +http://www.semanlink.net/tag/seevl|broader_altLabel|Music +http://www.semanlink.net/tag/seevl|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/drm_in_html_5|creationTime|2013-04-27T19:57:21Z +http://www.semanlink.net/tag/drm_in_html_5|prefLabel|DRM in HTML 5 +http://www.semanlink.net/tag/drm_in_html_5|broader|http://www.semanlink.net/tag/html5 +http://www.semanlink.net/tag/drm_in_html_5|broader|http://www.semanlink.net/tag/drm +http://www.semanlink.net/tag/drm_in_html_5|creationDate|2013-04-27 +http://www.semanlink.net/tag/drm_in_html_5|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drm_in_html_5|uri|http://www.semanlink.net/tag/drm_in_html_5 +http://www.semanlink.net/tag/drm_in_html_5|broader_prefLabel|HTML5 +http://www.semanlink.net/tag/drm_in_html_5|broader_prefLabel|DRM +http://www.semanlink.net/tag/carbon_sequestration|prefLabel|Carbon sequestration +http://www.semanlink.net/tag/carbon_sequestration|broader|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/tag/carbon_sequestration|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carbon_sequestration|uri|http://www.semanlink.net/tag/carbon_sequestration +http://www.semanlink.net/tag/carbon_sequestration|broader_prefLabel|Climate crisis +http://www.semanlink.net/tag/carbon_sequestration|broader_altLabel|Réchauffement climatique +http://www.semanlink.net/tag/carbon_sequestration|broader_altLabel|Global warming +http://www.semanlink.net/tag/carbon_sequestration|broader_related|http://www.semanlink.net/tag/anthropocene +http://www.semanlink.net/tag/cloud_based_lod_platform|creationTime|2013-04-04T13:15:50Z +http://www.semanlink.net/tag/cloud_based_lod_platform|prefLabel|Cloud based LOD platform +http://www.semanlink.net/tag/cloud_based_lod_platform|broader|http://www.semanlink.net/tag/cloud_and_linked_data +http://www.semanlink.net/tag/cloud_based_lod_platform|broader|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/cloud_based_lod_platform|creationDate|2013-04-04 +http://www.semanlink.net/tag/cloud_based_lod_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cloud_based_lod_platform|uri|http://www.semanlink.net/tag/cloud_based_lod_platform +http://www.semanlink.net/tag/cloud_based_lod_platform|broader_prefLabel|Cloud and Linked Data +http://www.semanlink.net/tag/cloud_based_lod_platform|broader_prefLabel|Linked Data Platform +http://www.semanlink.net/tag/cloud_based_lod_platform|broader_altLabel|LDP +http://www.semanlink.net/tag/yahoo_my_web_2_0|prefLabel|Yahoo - My Web 2.0 +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/tag/yahoo_my_web_2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yahoo_my_web_2_0|uri|http://www.semanlink.net/tag/yahoo_my_web_2_0 +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/yahoo_my_web_2_0|broader_prefLabel|Yahoo! +http://www.semanlink.net/tag/thought_vector|creationTime|2018-01-06T16:39:32Z +http://www.semanlink.net/tag/thought_vector|prefLabel|Thought Vector +http://www.semanlink.net/tag/thought_vector|creationDate|2018-01-06 +http://www.semanlink.net/tag/thought_vector|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thought_vector|uri|http://www.semanlink.net/tag/thought_vector +http://www.semanlink.net/tag/provocative_idea|creationTime|2011-12-17T15:39:07Z +http://www.semanlink.net/tag/provocative_idea|prefLabel|Provocative idea +http://www.semanlink.net/tag/provocative_idea|creationDate|2011-12-17 +http://www.semanlink.net/tag/provocative_idea|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/provocative_idea|uri|http://www.semanlink.net/tag/provocative_idea +http://www.semanlink.net/tag/javadoc|prefLabel|Javadoc +http://www.semanlink.net/tag/javadoc|broader|http://www.semanlink.net/tag/documentation_tool +http://www.semanlink.net/tag/javadoc|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/javadoc|broader|http://www.semanlink.net/tag/developer_documentation +http://www.semanlink.net/tag/javadoc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javadoc|uri|http://www.semanlink.net/tag/javadoc +http://www.semanlink.net/tag/javadoc|broader_prefLabel|Documentation tool +http://www.semanlink.net/tag/javadoc|broader_prefLabel|Java +http://www.semanlink.net/tag/javadoc|broader_prefLabel|Developer documentation +http://www.semanlink.net/tag/javadoc|broader_altLabel|Dev doc +http://www.semanlink.net/tag/bush|prefLabel|Bush +http://www.semanlink.net/tag/bush|broader|http://www.semanlink.net/tag/president_des_usa +http://www.semanlink.net/tag/bush|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/bush|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/bush|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bush|uri|http://www.semanlink.net/tag/bush +http://www.semanlink.net/tag/bush|broader_prefLabel|Président des USA +http://www.semanlink.net/tag/bush|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/bush|broader_prefLabel|USA +http://www.semanlink.net/tag/bush|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/bush|broader_altLabel|United States +http://www.semanlink.net/tag/wifi|prefLabel|WIFI +http://www.semanlink.net/tag/wifi|creationDate|2006-11-22 +http://www.semanlink.net/tag/wifi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wifi|uri|http://www.semanlink.net/tag/wifi +http://www.semanlink.net/tag/damian_steer|creationTime|2013-07-07T00:01:52Z +http://www.semanlink.net/tag/damian_steer|prefLabel|Damian Steer +http://www.semanlink.net/tag/damian_steer|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/damian_steer|creationDate|2013-07-07 +http://www.semanlink.net/tag/damian_steer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/damian_steer|uri|http://www.semanlink.net/tag/damian_steer +http://www.semanlink.net/tag/damian_steer|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/sante|prefLabel|Santé +http://www.semanlink.net/tag/sante|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sante|uri|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/mafia|prefLabel|Mafia +http://www.semanlink.net/tag/mafia|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/mafia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mafia|uri|http://www.semanlink.net/tag/mafia +http://www.semanlink.net/tag/mafia|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/linux|prefLabel|Linux +http://www.semanlink.net/tag/linux|broader|http://www.semanlink.net/tag/unix +http://www.semanlink.net/tag/linux|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/linux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linux|uri|http://www.semanlink.net/tag/linux +http://www.semanlink.net/tag/linux|broader_prefLabel|Unix +http://www.semanlink.net/tag/linux|broader_prefLabel|Open Source +http://www.semanlink.net/tag/domain_specific_nlp|creationTime|2021-10-21T14:50:25Z +http://www.semanlink.net/tag/domain_specific_nlp|prefLabel|Domain-Specific NLP +http://www.semanlink.net/tag/domain_specific_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/domain_specific_nlp|related|http://www.semanlink.net/tag/domain_knowledge_deep_learning +http://www.semanlink.net/tag/domain_specific_nlp|creationDate|2021-10-21 +http://www.semanlink.net/tag/domain_specific_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/domain_specific_nlp|uri|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/tag/domain_specific_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/heroisme|prefLabel|Héroïsme +http://www.semanlink.net/tag/heroisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/heroisme|uri|http://www.semanlink.net/tag/heroisme +http://www.semanlink.net/tag/regroupement_familial|creationTime|2007-09-18T21:33:33Z +http://www.semanlink.net/tag/regroupement_familial|prefLabel|Regroupement familial +http://www.semanlink.net/tag/regroupement_familial|broader|http://www.semanlink.net/tag/immigration_familiale +http://www.semanlink.net/tag/regroupement_familial|creationDate|2007-09-18 +http://www.semanlink.net/tag/regroupement_familial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/regroupement_familial|uri|http://www.semanlink.net/tag/regroupement_familial +http://www.semanlink.net/tag/regroupement_familial|broader_prefLabel|Immigration familiale +http://www.semanlink.net/tag/peuples|prefLabel|Peuples +http://www.semanlink.net/tag/peuples|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peuples|uri|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/truffe|creationTime|2007-03-08T00:31:24Z +http://www.semanlink.net/tag/truffe|prefLabel|Truffe +http://www.semanlink.net/tag/truffe|broader|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/truffe|broader|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/tag/truffe|broader|http://www.semanlink.net/tag/champignon +http://www.semanlink.net/tag/truffe|creationDate|2007-03-08 +http://www.semanlink.net/tag/truffe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/truffe|uri|http://www.semanlink.net/tag/truffe +http://www.semanlink.net/tag/truffe|broader_prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/truffe|broader_prefLabel|Gastronomie +http://www.semanlink.net/tag/truffe|broader_prefLabel|Champignons +http://www.semanlink.net/tag/truffe|broader_altLabel|Cuisine +http://www.semanlink.net/tag/gilles_lepin|prefLabel|Gilles Lepin +http://www.semanlink.net/tag/gilles_lepin|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/gilles_lepin|broader|http://www.semanlink.net/tag/ami +http://www.semanlink.net/tag/gilles_lepin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gilles_lepin|uri|http://www.semanlink.net/tag/gilles_lepin +http://www.semanlink.net/tag/gilles_lepin|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/gilles_lepin|broader_prefLabel|Ami +http://www.semanlink.net/tag/gilles_lepin|broader_altLabel|Technical guys +http://www.semanlink.net/tag/silk_road|creationTime|2014-11-11T20:57:23Z +http://www.semanlink.net/tag/silk_road|prefLabel|Silk Road +http://www.semanlink.net/tag/silk_road|broader|http://www.semanlink.net/tag/dark_web +http://www.semanlink.net/tag/silk_road|creationDate|2014-11-11 +http://www.semanlink.net/tag/silk_road|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/silk_road|uri|http://www.semanlink.net/tag/silk_road +http://www.semanlink.net/tag/silk_road|broader_prefLabel|Dark Web +http://www.semanlink.net/tag/semantic_web_w3_org|prefLabel|semantic-web@w3.org +http://www.semanlink.net/tag/semantic_web_w3_org|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_w3_org|creationDate|2007-01-09 +http://www.semanlink.net/tag/semantic_web_w3_org|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_w3_org|uri|http://www.semanlink.net/tag/semantic_web_w3_org +http://www.semanlink.net/tag/semantic_web_w3_org|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_w3_org|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_w3_org|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/morale|creationTime|2011-08-22T18:02:22Z +http://www.semanlink.net/tag/morale|prefLabel|Morale +http://www.semanlink.net/tag/morale|creationDate|2011-08-22 +http://www.semanlink.net/tag/morale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/morale|uri|http://www.semanlink.net/tag/morale +http://www.semanlink.net/tag/nlp_juridique|creationTime|2019-12-17T14:47:42Z +http://www.semanlink.net/tag/nlp_juridique|prefLabel|NLP + juridique +http://www.semanlink.net/tag/nlp_juridique|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_juridique|creationDate|2019-12-17 +http://www.semanlink.net/tag/nlp_juridique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_juridique|uri|http://www.semanlink.net/tag/nlp_juridique +http://www.semanlink.net/tag/nlp_juridique|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_juridique|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/blosxom|prefLabel|Blosxom +http://www.semanlink.net/tag/blosxom|broader|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/tag/blosxom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blosxom|uri|http://www.semanlink.net/tag/blosxom +http://www.semanlink.net/tag/blosxom|broader_prefLabel|Blog software +http://www.semanlink.net/tag/government_data|creationTime|2010-07-30T14:08:35Z +http://www.semanlink.net/tag/government_data|prefLabel|Government data +http://www.semanlink.net/tag/government_data|broader|http://www.semanlink.net/tag/site_web_gouvernemental +http://www.semanlink.net/tag/government_data|broader|http://www.semanlink.net/tag/public_data +http://www.semanlink.net/tag/government_data|creationDate|2010-07-30 +http://www.semanlink.net/tag/government_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/government_data|uri|http://www.semanlink.net/tag/government_data +http://www.semanlink.net/tag/government_data|broader_prefLabel|Site web gouvernemental +http://www.semanlink.net/tag/government_data|broader_prefLabel|Public data +http://www.semanlink.net/tag/lesk_algorithm|creationTime|2019-04-12T11:19:20Z +http://www.semanlink.net/tag/lesk_algorithm|prefLabel|Lesk algorithm +http://www.semanlink.net/tag/lesk_algorithm|broader|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/tag/lesk_algorithm|broader|http://www.semanlink.net/tag/distributional_semantics +http://www.semanlink.net/tag/lesk_algorithm|broader|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/tag/lesk_algorithm|creationDate|2019-04-12 +http://www.semanlink.net/tag/lesk_algorithm|comment|"[#Word sense disambiguation](/tag/word_sense_disambiguation) algorithm based on the assumption that words in a given ""neighborhood"" (section of text) tend to share a common topic +" +http://www.semanlink.net/tag/lesk_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lesk_algorithm|describedBy|https://en.wikipedia.org/wiki/Lesk_algorithm +http://www.semanlink.net/tag/lesk_algorithm|uri|http://www.semanlink.net/tag/lesk_algorithm +http://www.semanlink.net/tag/lesk_algorithm|broader_prefLabel|Algorithmes +http://www.semanlink.net/tag/lesk_algorithm|broader_prefLabel|Distributional semantics +http://www.semanlink.net/tag/lesk_algorithm|broader_prefLabel|Word-sense disambiguation +http://www.semanlink.net/tag/lesk_algorithm|broader_related|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.semanlink.net/tag/lesk_algorithm|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/lesk_algorithm|broader_related|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/tag/afrique_du_sud|prefLabel|Afrique du Sud +http://www.semanlink.net/tag/afrique_du_sud|broader|http://www.semanlink.net/tag/afrique_australe +http://www.semanlink.net/tag/afrique_du_sud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_du_sud|uri|http://www.semanlink.net/tag/afrique_du_sud +http://www.semanlink.net/tag/afrique_du_sud|broader_prefLabel|Afrique australe +http://www.semanlink.net/tag/pbs|prefLabel|PBS +http://www.semanlink.net/tag/pbs|broader|http://www.semanlink.net/tag/television +http://www.semanlink.net/tag/pbs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pbs|uri|http://www.semanlink.net/tag/pbs +http://www.semanlink.net/tag/pbs|broader_prefLabel|Télévision +http://www.semanlink.net/tag/pbs|broader_altLabel|TV +http://www.semanlink.net/tag/emmanuelle_bernes|creationTime|2013-03-23T10:06:46Z +http://www.semanlink.net/tag/emmanuelle_bernes|prefLabel|Emmanuelle Bernes +http://www.semanlink.net/tag/emmanuelle_bernes|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/emmanuelle_bernes|creationDate|2013-03-23 +http://www.semanlink.net/tag/emmanuelle_bernes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emmanuelle_bernes|uri|http://www.semanlink.net/tag/emmanuelle_bernes +http://www.semanlink.net/tag/emmanuelle_bernes|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/genome|prefLabel|Génome +http://www.semanlink.net/tag/genome|broader|http://www.semanlink.net/tag/genetique +http://www.semanlink.net/tag/genome|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genome|uri|http://www.semanlink.net/tag/genome +http://www.semanlink.net/tag/genome|broader_prefLabel|Genetics +http://www.semanlink.net/tag/genome|broader_prefLabel|Génétique +http://www.semanlink.net/tag/shelley_powers|prefLabel|Shelley Powers +http://www.semanlink.net/tag/shelley_powers|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/shelley_powers|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/shelley_powers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shelley_powers|altLabel|Mad Techie Woman +http://www.semanlink.net/tag/shelley_powers|altLabel|Burningbird +http://www.semanlink.net/tag/shelley_powers|uri|http://www.semanlink.net/tag/shelley_powers +http://www.semanlink.net/tag/shelley_powers|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/shelley_powers|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/shelley_powers|broader_altLabel|Technical guys +http://www.semanlink.net/tag/documentaire|creationTime|2008-06-11T00:44:11Z +http://www.semanlink.net/tag/documentaire|prefLabel|Documentaire +http://www.semanlink.net/tag/documentaire|creationDate|2008-06-11 +http://www.semanlink.net/tag/documentaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/documentaire|uri|http://www.semanlink.net/tag/documentaire +http://www.semanlink.net/tag/electric_car|creationTime|2008-11-20T22:04:48Z +http://www.semanlink.net/tag/electric_car|prefLabel|Electric car +http://www.semanlink.net/tag/electric_car|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/electric_car|creationDate|2008-11-20 +http://www.semanlink.net/tag/electric_car|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/electric_car|altLabel|Voiture électrique +http://www.semanlink.net/tag/electric_car|uri|http://www.semanlink.net/tag/electric_car +http://www.semanlink.net/tag/electric_car|broader_prefLabel|Automobile +http://www.semanlink.net/tag/electric_car|broader_altLabel|Automotive +http://www.semanlink.net/tag/lobby_agroalimentaire|prefLabel|Lobby agroalimentaire +http://www.semanlink.net/tag/lobby_agroalimentaire|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/lobby_agroalimentaire|broader|http://www.semanlink.net/tag/agro_industrie +http://www.semanlink.net/tag/lobby_agroalimentaire|broader|http://www.semanlink.net/tag/lobbies_economiques +http://www.semanlink.net/tag/lobby_agroalimentaire|broader|http://www.semanlink.net/tag/lobby +http://www.semanlink.net/tag/lobby_agroalimentaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lobby_agroalimentaire|uri|http://www.semanlink.net/tag/lobby_agroalimentaire +http://www.semanlink.net/tag/lobby_agroalimentaire|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/lobby_agroalimentaire|broader_prefLabel|Agro-industrie +http://www.semanlink.net/tag/lobby_agroalimentaire|broader_prefLabel|Lobbies économiques +http://www.semanlink.net/tag/lobby_agroalimentaire|broader_prefLabel|Lobby +http://www.semanlink.net/tag/transport|prefLabel|Transport +http://www.semanlink.net/tag/transport|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transport|uri|http://www.semanlink.net/tag/transport +http://www.semanlink.net/tag/piratage_des_oeuvres|prefLabel|Piratage des œuvres +http://www.semanlink.net/tag/piratage_des_oeuvres|broader|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/tag/piratage_des_oeuvres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/piratage_des_oeuvres|uri|http://www.semanlink.net/tag/piratage_des_oeuvres +http://www.semanlink.net/tag/piratage_des_oeuvres|broader_prefLabel|Propriété intellectuelle +http://www.semanlink.net/tag/empire_colonial_francais|prefLabel|Empire colonial français +http://www.semanlink.net/tag/empire_colonial_francais|broader|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/tag/empire_colonial_francais|broader|http://www.semanlink.net/tag/histoire_de_france +http://www.semanlink.net/tag/empire_colonial_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/empire_colonial_francais|uri|http://www.semanlink.net/tag/empire_colonial_francais +http://www.semanlink.net/tag/empire_colonial_francais|broader_prefLabel|Colonisation +http://www.semanlink.net/tag/empire_colonial_francais|broader_prefLabel|Histoire de France +http://www.semanlink.net/tag/empire_colonial_francais|broader_altLabel|Colonialisme +http://www.semanlink.net/tag/virtuoso_doc|creationTime|2008-12-28T12:45:30Z +http://www.semanlink.net/tag/virtuoso_doc|prefLabel|Virtuoso:doc +http://www.semanlink.net/tag/virtuoso_doc|broader|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/virtuoso_doc|creationDate|2008-12-28 +http://www.semanlink.net/tag/virtuoso_doc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtuoso_doc|uri|http://www.semanlink.net/tag/virtuoso_doc +http://www.semanlink.net/tag/virtuoso_doc|broader_prefLabel|Virtuoso +http://www.semanlink.net/tag/mobile_computing|creationTime|2017-09-17T16:43:05Z +http://www.semanlink.net/tag/mobile_computing|prefLabel|Mobile computing +http://www.semanlink.net/tag/mobile_computing|creationDate|2017-09-17 +http://www.semanlink.net/tag/mobile_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_computing|uri|http://www.semanlink.net/tag/mobile_computing +http://www.semanlink.net/tag/zero_shot|creationTime|2020-08-12T17:03:26Z +http://www.semanlink.net/tag/zero_shot|prefLabel|Zero shot +http://www.semanlink.net/tag/zero_shot|creationDate|2020-08-12 +http://www.semanlink.net/tag/zero_shot|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zero_shot|uri|http://www.semanlink.net/tag/zero_shot +http://www.semanlink.net/tag/jean_rohmer|creationTime|2009-01-28T17:21:23Z +http://www.semanlink.net/tag/jean_rohmer|prefLabel|Jean Rohmer +http://www.semanlink.net/tag/jean_rohmer|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/jean_rohmer|creationDate|2009-01-28 +http://www.semanlink.net/tag/jean_rohmer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_rohmer|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/jean_rohmer|uri|http://www.semanlink.net/tag/jean_rohmer +http://www.semanlink.net/tag/jean_rohmer|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/jean_rohmer|broader_altLabel|Technical guys +http://www.semanlink.net/tag/uris_within_uris|creationTime|2014-08-30T12:49:04Z +http://www.semanlink.net/tag/uris_within_uris|prefLabel|URIs within URIs +http://www.semanlink.net/tag/uris_within_uris|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uris_within_uris|creationDate|2014-08-30 +http://www.semanlink.net/tag/uris_within_uris|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uris_within_uris|uri|http://www.semanlink.net/tag/uris_within_uris +http://www.semanlink.net/tag/uris_within_uris|broader_prefLabel|URI +http://www.semanlink.net/tag/foundation_models|creationTime|2021-09-12T23:23:21Z +http://www.semanlink.net/tag/foundation_models|prefLabel|Foundation Models +http://www.semanlink.net/tag/foundation_models|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/foundation_models|related|http://www.semanlink.net/tag/openai_gpt +http://www.semanlink.net/tag/foundation_models|related|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/tag/foundation_models|related|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/foundation_models|creationDate|2021-09-12 +http://www.semanlink.net/tag/foundation_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foundation_models|uri|http://www.semanlink.net/tag/foundation_models +http://www.semanlink.net/tag/foundation_models|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/foundation_models|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/foundation_models|broader_altLabel|AI +http://www.semanlink.net/tag/foundation_models|broader_altLabel|IA +http://www.semanlink.net/tag/foundation_models|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/nlp_baidu|creationTime|2021-10-26T16:03:09Z +http://www.semanlink.net/tag/nlp_baidu|prefLabel|NLP@Baidu +http://www.semanlink.net/tag/nlp_baidu|broader|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/tag/nlp_baidu|broader|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/tag/nlp_baidu|creationDate|2021-10-26 +http://www.semanlink.net/tag/nlp_baidu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_baidu|uri|http://www.semanlink.net/tag/nlp_baidu +http://www.semanlink.net/tag/nlp_baidu|broader_prefLabel|NLP Teams +http://www.semanlink.net/tag/nlp_baidu|broader_prefLabel|Baidu +http://www.semanlink.net/tag/nlp_baidu|broader_altLabel|NLP Groups +http://www.semanlink.net/tag/rdf4j|creationTime|2021-01-05T14:25:39Z +http://www.semanlink.net/tag/rdf4j|prefLabel|RDF4J +http://www.semanlink.net/tag/rdf4j|broader|http://www.semanlink.net/tag/rdf_framework +http://www.semanlink.net/tag/rdf4j|creationDate|2021-01-05 +http://www.semanlink.net/tag/rdf4j|comment|Replaces [Sesame](tag:sesame) +http://www.semanlink.net/tag/rdf4j|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf4j|describedBy|https://rdf4j.org/ +http://www.semanlink.net/tag/rdf4j|uri|http://www.semanlink.net/tag/rdf4j +http://www.semanlink.net/tag/rdf4j|broader_prefLabel|RDF Framework +http://www.semanlink.net/tag/data_publica|creationTime|2012-10-22T23:12:22Z +http://www.semanlink.net/tag/data_publica|prefLabel|Data Publica +http://www.semanlink.net/tag/data_publica|broader|http://www.semanlink.net/tag/data_portal +http://www.semanlink.net/tag/data_publica|creationDate|2012-10-22 +http://www.semanlink.net/tag/data_publica|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_publica|homepage|http://www.data-publica.com +http://www.semanlink.net/tag/data_publica|uri|http://www.semanlink.net/tag/data_publica +http://www.semanlink.net/tag/data_publica|broader_prefLabel|Data portal +http://www.semanlink.net/tag/google_ai_blog|creationTime|2020-12-09T12:08:04Z +http://www.semanlink.net/tag/google_ai_blog|prefLabel|Google AI Blog +http://www.semanlink.net/tag/google_ai_blog|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/google_ai_blog|broader|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/tag/google_ai_blog|creationDate|2020-12-09 +http://www.semanlink.net/tag/google_ai_blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_ai_blog|uri|http://www.semanlink.net/tag/google_ai_blog +http://www.semanlink.net/tag/google_ai_blog|broader_prefLabel|Blog +http://www.semanlink.net/tag/google_ai_blog|broader_prefLabel|AI@Google +http://www.semanlink.net/tag/chant|creationTime|2013-08-05T14:42:41Z +http://www.semanlink.net/tag/chant|prefLabel|Chant +http://www.semanlink.net/tag/chant|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/chant|creationDate|2013-08-05 +http://www.semanlink.net/tag/chant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chant|uri|http://www.semanlink.net/tag/chant +http://www.semanlink.net/tag/chant|broader_prefLabel|Musique +http://www.semanlink.net/tag/chant|broader_altLabel|Music +http://www.semanlink.net/tag/ecriture|prefLabel|Ecriture +http://www.semanlink.net/tag/ecriture|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/ecriture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecriture|uri|http://www.semanlink.net/tag/ecriture +http://www.semanlink.net/tag/ecriture|broader_prefLabel|Divers +http://www.semanlink.net/tag/argentine|prefLabel|Argentine +http://www.semanlink.net/tag/argentine|broader|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/argentine|broader|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/argentine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/argentine|uri|http://www.semanlink.net/tag/argentine +http://www.semanlink.net/tag/argentine|broader_prefLabel|Amérique du sud +http://www.semanlink.net/tag/argentine|broader_prefLabel|Amérique latine +http://www.semanlink.net/tag/ocr|creationTime|2019-11-05T18:38:36Z +http://www.semanlink.net/tag/ocr|prefLabel|OCR +http://www.semanlink.net/tag/ocr|creationDate|2019-11-05 +http://www.semanlink.net/tag/ocr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ocr|uri|http://www.semanlink.net/tag/ocr +http://www.semanlink.net/tag/venus_prehistoriques|creationTime|2007-11-21T15:48:20Z +http://www.semanlink.net/tag/venus_prehistoriques|prefLabel|Vénus préhistoriques +http://www.semanlink.net/tag/venus_prehistoriques|broader|http://www.semanlink.net/tag/sculpture +http://www.semanlink.net/tag/venus_prehistoriques|broader|http://www.semanlink.net/tag/venus_divinite +http://www.semanlink.net/tag/venus_prehistoriques|creationDate|2007-11-21 +http://www.semanlink.net/tag/venus_prehistoriques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venus_prehistoriques|uri|http://www.semanlink.net/tag/venus_prehistoriques +http://www.semanlink.net/tag/venus_prehistoriques|broader_prefLabel|Sculpture +http://www.semanlink.net/tag/venus_prehistoriques|broader_prefLabel|Vénus (divinité) +http://www.semanlink.net/tag/venus_prehistoriques|broader_altLabel|Statuaire +http://www.semanlink.net/tag/2018|creationTime|2018-12-27T12:02:06Z +http://www.semanlink.net/tag/2018|prefLabel|2018 +http://www.semanlink.net/tag/2018|creationDate|2018-12-27 +http://www.semanlink.net/tag/2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/2018|uri|http://www.semanlink.net/tag/2018 +http://www.semanlink.net/tag/ldp_w3c|creationTime|2014-10-12T22:52:39Z +http://www.semanlink.net/tag/ldp_w3c|prefLabel|LDP @ W3C +http://www.semanlink.net/tag/ldp_w3c|broader|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/ldp_w3c|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/ldp_w3c|creationDate|2014-10-12 +http://www.semanlink.net/tag/ldp_w3c|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldp_w3c|uri|http://www.semanlink.net/tag/ldp_w3c +http://www.semanlink.net/tag/ldp_w3c|broader_prefLabel|Linked Data Platform +http://www.semanlink.net/tag/ldp_w3c|broader_prefLabel|W3C +http://www.semanlink.net/tag/ldp_w3c|broader_altLabel|LDP +http://www.semanlink.net/tag/ldp_w3c|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/ldp_w3c|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/histoire_des_sciences|prefLabel|Histoire des sciences +http://www.semanlink.net/tag/histoire_des_sciences|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/histoire_des_sciences|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_des_sciences|uri|http://www.semanlink.net/tag/histoire_des_sciences +http://www.semanlink.net/tag/histoire_des_sciences|broader_prefLabel|Science +http://www.semanlink.net/tag/histoire_des_sciences|broader_altLabel|sciences +http://www.semanlink.net/tag/armement|prefLabel|Armement +http://www.semanlink.net/tag/armement|broader|http://www.semanlink.net/tag/militaire +http://www.semanlink.net/tag/armement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/armement|uri|http://www.semanlink.net/tag/armement +http://www.semanlink.net/tag/armement|broader_prefLabel|Militaire +http://www.semanlink.net/tag/aggregators|prefLabel|Aggregators +http://www.semanlink.net/tag/aggregators|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aggregators|uri|http://www.semanlink.net/tag/aggregators +http://www.semanlink.net/tag/lehman_brothers|creationTime|2010-12-22T17:00:08Z +http://www.semanlink.net/tag/lehman_brothers|prefLabel|Lehman Brothers +http://www.semanlink.net/tag/lehman_brothers|creationDate|2010-12-22 +http://www.semanlink.net/tag/lehman_brothers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lehman_brothers|uri|http://www.semanlink.net/tag/lehman_brothers +http://www.semanlink.net/tag/q_a|creationTime|2010-09-30T23:42:56Z +http://www.semanlink.net/tag/q_a|prefLabel|Q&A +http://www.semanlink.net/tag/q_a|creationDate|2010-09-30 +http://www.semanlink.net/tag/q_a|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/q_a|uri|http://www.semanlink.net/tag/q_a +http://www.semanlink.net/tag/hierarchies_in_ml|creationTime|2020-08-30T01:25:34Z +http://www.semanlink.net/tag/hierarchies_in_ml|prefLabel|Hierarchies in ML +http://www.semanlink.net/tag/hierarchies_in_ml|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/hierarchies_in_ml|creationDate|2020-08-30 +http://www.semanlink.net/tag/hierarchies_in_ml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchies_in_ml|uri|http://www.semanlink.net/tag/hierarchies_in_ml +http://www.semanlink.net/tag/hierarchies_in_ml|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/crm|creationTime|2009-06-07T00:24:06Z +http://www.semanlink.net/tag/crm|prefLabel|CRM +http://www.semanlink.net/tag/crm|creationDate|2009-06-07 +http://www.semanlink.net/tag/crm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crm|uri|http://www.semanlink.net/tag/crm +http://www.semanlink.net/tag/whistleblower|creationTime|2015-11-08T18:30:25Z +http://www.semanlink.net/tag/whistleblower|prefLabel|Whistleblower +http://www.semanlink.net/tag/whistleblower|creationDate|2015-11-08 +http://www.semanlink.net/tag/whistleblower|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/whistleblower|describedBy|https://en.wikipedia.org/wiki/Whistleblower +http://www.semanlink.net/tag/whistleblower|uri|http://www.semanlink.net/tag/whistleblower +http://www.semanlink.net/tag/ruby|prefLabel|Ruby +http://www.semanlink.net/tag/ruby|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/ruby|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ruby|uri|http://www.semanlink.net/tag/ruby +http://www.semanlink.net/tag/ruby|broader_prefLabel|Programming language +http://www.semanlink.net/tag/ruby|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/cafard|prefLabel|Cafard +http://www.semanlink.net/tag/cafard|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/cafard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cafard|uri|http://www.semanlink.net/tag/cafard +http://www.semanlink.net/tag/cafard|broader_prefLabel|Insecte +http://www.semanlink.net/tag/leaks|creationTime|2016-04-04T12:42:58Z +http://www.semanlink.net/tag/leaks|prefLabel|Leaks +http://www.semanlink.net/tag/leaks|related|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/tag/leaks|creationDate|2016-04-04 +http://www.semanlink.net/tag/leaks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leaks|uri|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/tag/city_states|creationTime|2021-05-29T00:50:48Z +http://www.semanlink.net/tag/city_states|prefLabel|City-States +http://www.semanlink.net/tag/city_states|creationDate|2021-05-29 +http://www.semanlink.net/tag/city_states|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/city_states|uri|http://www.semanlink.net/tag/city_states +http://www.semanlink.net/tag/monotremes|prefLabel|Monotrèmes +http://www.semanlink.net/tag/monotremes|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/monotremes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/monotremes|uri|http://www.semanlink.net/tag/monotremes +http://www.semanlink.net/tag/monotremes|broader_prefLabel|Animal +http://www.semanlink.net/tag/e_commerce_data|creationTime|2020-12-14T23:32:27Z +http://www.semanlink.net/tag/e_commerce_data|prefLabel|e-commerce data +http://www.semanlink.net/tag/e_commerce_data|broader|http://www.semanlink.net/tag/e_commerce +http://www.semanlink.net/tag/e_commerce_data|creationDate|2020-12-14 +http://www.semanlink.net/tag/e_commerce_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/e_commerce_data|uri|http://www.semanlink.net/tag/e_commerce_data +http://www.semanlink.net/tag/e_commerce_data|broader_prefLabel|e-commerce +http://www.semanlink.net/tag/opera_do_malandro|creationTime|2008-11-14T22:37:44Z +http://www.semanlink.net/tag/opera_do_malandro|prefLabel|Ópera do Malandro +http://www.semanlink.net/tag/opera_do_malandro|broader|http://www.semanlink.net/tag/film_bresilien +http://www.semanlink.net/tag/opera_do_malandro|broader|http://www.semanlink.net/tag/chico_buarque +http://www.semanlink.net/tag/opera_do_malandro|creationDate|2008-11-14 +http://www.semanlink.net/tag/opera_do_malandro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/opera_do_malandro|uri|http://www.semanlink.net/tag/opera_do_malandro +http://www.semanlink.net/tag/opera_do_malandro|broader_prefLabel|Filme brasileiro +http://www.semanlink.net/tag/opera_do_malandro|broader_prefLabel|Chico Buarque +http://www.semanlink.net/tag/opera_do_malandro|broader_altLabel|Film brésilien +http://www.semanlink.net/tag/caroline_fourest|creationTime|2012-01-15T13:02:24Z +http://www.semanlink.net/tag/caroline_fourest|prefLabel|Caroline Fourest +http://www.semanlink.net/tag/caroline_fourest|broader|http://www.semanlink.net/tag/journaliste +http://www.semanlink.net/tag/caroline_fourest|creationDate|2012-01-15 +http://www.semanlink.net/tag/caroline_fourest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/caroline_fourest|uri|http://www.semanlink.net/tag/caroline_fourest +http://www.semanlink.net/tag/caroline_fourest|broader_prefLabel|Journaliste +http://www.semanlink.net/tag/thucydide|prefLabel|Thucydide +http://www.semanlink.net/tag/thucydide|broader|http://www.semanlink.net/tag/historien +http://www.semanlink.net/tag/thucydide|broader|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/thucydide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thucydide|uri|http://www.semanlink.net/tag/thucydide +http://www.semanlink.net/tag/thucydide|broader_prefLabel|Historien +http://www.semanlink.net/tag/thucydide|broader_prefLabel|Grèce antique +http://www.semanlink.net/tag/web_marchand|prefLabel|Web marchand +http://www.semanlink.net/tag/web_marchand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_marchand|uri|http://www.semanlink.net/tag/web_marchand +http://www.semanlink.net/tag/mongol|prefLabel|Mongol +http://www.semanlink.net/tag/mongol|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/mongol|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/mongol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mongol|uri|http://www.semanlink.net/tag/mongol +http://www.semanlink.net/tag/mongol|broader_prefLabel|Asie +http://www.semanlink.net/tag/mongol|broader_prefLabel|Peuples +http://www.semanlink.net/tag/entities|creationTime|2012-05-22T12:06:12Z +http://www.semanlink.net/tag/entities|prefLabel|Entities +http://www.semanlink.net/tag/entities|creationDate|2012-05-22 +http://www.semanlink.net/tag/entities|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/entities|uri|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/crustace|prefLabel|Crustacé +http://www.semanlink.net/tag/crustace|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/crustace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crustace|uri|http://www.semanlink.net/tag/crustace +http://www.semanlink.net/tag/crustace|broader_prefLabel|Animal +http://www.semanlink.net/tag/illusion_d_optique|prefLabel|Illusion d'optique +http://www.semanlink.net/tag/illusion_d_optique|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/illusion_d_optique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/illusion_d_optique|uri|http://www.semanlink.net/tag/illusion_d_optique +http://www.semanlink.net/tag/illusion_d_optique|broader_prefLabel|Divers +http://www.semanlink.net/tag/anticipation|prefLabel|Anticipation +http://www.semanlink.net/tag/anticipation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anticipation|uri|http://www.semanlink.net/tag/anticipation +http://www.semanlink.net/tag/danny_ayers|prefLabel|Danny Ayers +http://www.semanlink.net/tag/danny_ayers|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/danny_ayers|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/danny_ayers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/danny_ayers|uri|http://www.semanlink.net/tag/danny_ayers +http://www.semanlink.net/tag/danny_ayers|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/danny_ayers|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/danny_ayers|broader_altLabel|Technical guys +http://www.semanlink.net/tag/eclipse_juno|creationTime|2012-09-06T14:53:56Z +http://www.semanlink.net/tag/eclipse_juno|prefLabel|Eclipse Juno +http://www.semanlink.net/tag/eclipse_juno|broader|http://www.semanlink.net/tag/eclipse +http://www.semanlink.net/tag/eclipse_juno|creationDate|2012-09-06 +http://www.semanlink.net/tag/eclipse_juno|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eclipse_juno|uri|http://www.semanlink.net/tag/eclipse_juno +http://www.semanlink.net/tag/eclipse_juno|broader_prefLabel|Eclipse +http://www.semanlink.net/tag/hierarchical_categories|creationTime|2018-05-12T16:54:24Z +http://www.semanlink.net/tag/hierarchical_categories|prefLabel|Hierarchical Categories +http://www.semanlink.net/tag/hierarchical_categories|broader|http://www.semanlink.net/tag/concept_hierarchies +http://www.semanlink.net/tag/hierarchical_categories|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/hierarchical_categories|related|http://www.semanlink.net/tag/hierarchical_tags +http://www.semanlink.net/tag/hierarchical_categories|creationDate|2018-05-12 +http://www.semanlink.net/tag/hierarchical_categories|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hierarchical_categories|uri|http://www.semanlink.net/tag/hierarchical_categories +http://www.semanlink.net/tag/hierarchical_categories|broader_prefLabel|Concept hierarchies +http://www.semanlink.net/tag/hierarchical_categories|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/hierarchical_categories|broader_related|http://www.semanlink.net/tag/skos +http://www.semanlink.net/tag/linear_algebra|creationTime|2017-07-19T16:12:26Z +http://www.semanlink.net/tag/linear_algebra|prefLabel|Linear algebra +http://www.semanlink.net/tag/linear_algebra|broader|http://www.semanlink.net/tag/algebre +http://www.semanlink.net/tag/linear_algebra|creationDate|2017-07-19 +http://www.semanlink.net/tag/linear_algebra|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linear_algebra|describedBy|https://en.wikipedia.org/wiki/Linear_algebra +http://www.semanlink.net/tag/linear_algebra|altLabel|Algèbre linéaire +http://www.semanlink.net/tag/linear_algebra|uri|http://www.semanlink.net/tag/linear_algebra +http://www.semanlink.net/tag/linear_algebra|broader_prefLabel|Algèbre +http://www.semanlink.net/tag/numpy|creationTime|2015-10-19T12:19:02Z +http://www.semanlink.net/tag/numpy|prefLabel|NumPy +http://www.semanlink.net/tag/numpy|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/numpy|creationDate|2015-10-19 +http://www.semanlink.net/tag/numpy|comment|Python library that provides a multidimensional array object, the ndarray object. This encapsulates n-dimensional arrays of homogeneous data types +http://www.semanlink.net/tag/numpy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/numpy|uri|http://www.semanlink.net/tag/numpy +http://www.semanlink.net/tag/numpy|broader_prefLabel|Python +http://www.semanlink.net/tag/urss|prefLabel|Ex URSS +http://www.semanlink.net/tag/urss|prefLabel|URSS +http://www.semanlink.net/tag/urss|broader|http://www.semanlink.net/tag/communisme +http://www.semanlink.net/tag/urss|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/urss|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/urss|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/urss|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/urss|uri|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/urss|broader_prefLabel|Communisme +http://www.semanlink.net/tag/urss|broader_prefLabel|Asie +http://www.semanlink.net/tag/urss|broader_prefLabel|Europe +http://www.semanlink.net/tag/urss|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/urss|broader_prefLabel|URSS +http://www.semanlink.net/tag/w3c_tag|creationTime|2007-06-28T00:07:42Z +http://www.semanlink.net/tag/w3c_tag|prefLabel|W3C TAG +http://www.semanlink.net/tag/w3c_tag|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_tag|creationDate|2007-06-28 +http://www.semanlink.net/tag/w3c_tag|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_tag|uri|http://www.semanlink.net/tag/w3c_tag +http://www.semanlink.net/tag/w3c_tag|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_tag|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_tag|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/dan_jurafsky|creationTime|2012-04-15T16:10:23Z +http://www.semanlink.net/tag/dan_jurafsky|prefLabel|Dan Jurafsky +http://www.semanlink.net/tag/dan_jurafsky|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/dan_jurafsky|creationDate|2012-04-15 +http://www.semanlink.net/tag/dan_jurafsky|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dan_jurafsky|uri|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/dan_jurafsky|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/rdfquery|creationTime|2012-02-21T09:41:05Z +http://www.semanlink.net/tag/rdfquery|prefLabel|rdfQuery +http://www.semanlink.net/tag/rdfquery|broader|http://www.semanlink.net/tag/rdfa_tool +http://www.semanlink.net/tag/rdfquery|broader|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/rdfquery|broader|http://www.semanlink.net/tag/jquery +http://www.semanlink.net/tag/rdfquery|related|http://www.semanlink.net/tag/jeni_tennison +http://www.semanlink.net/tag/rdfquery|related|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://www.semanlink.net/tag/rdfquery|creationDate|2012-02-21 +http://www.semanlink.net/tag/rdfquery|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfquery|describedBy|http://code.google.com/p/rdfquery/ +http://www.semanlink.net/tag/rdfquery|uri|http://www.semanlink.net/tag/rdfquery +http://www.semanlink.net/tag/rdfquery|broader_prefLabel|RDFa tool +http://www.semanlink.net/tag/rdfquery|broader_prefLabel|Javascript RDF +http://www.semanlink.net/tag/rdfquery|broader_prefLabel|jQuery +http://www.semanlink.net/tag/rdfquery|broader_related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/jeu|prefLabel|Jeu +http://www.semanlink.net/tag/jeu|broader|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/jeu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeu|uri|http://www.semanlink.net/tag/jeu +http://www.semanlink.net/tag/jeu|broader_prefLabel|Jeux +http://www.semanlink.net/tag/agriculture_francaise|prefLabel|Agriculture française +http://www.semanlink.net/tag/agriculture_francaise|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/agriculture_francaise|broader|http://www.semanlink.net/tag/economie_francaise +http://www.semanlink.net/tag/agriculture_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/agriculture_francaise|uri|http://www.semanlink.net/tag/agriculture_francaise +http://www.semanlink.net/tag/agriculture_francaise|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/agriculture_francaise|broader_prefLabel|Economie française +http://www.semanlink.net/tag/agriculture_francaise|broader_altLabel|Economie France +http://www.semanlink.net/tag/amazonie|prefLabel|Amazonie +http://www.semanlink.net/tag/amazonie|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/amazonie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amazonie|uri|http://www.semanlink.net/tag/amazonie +http://www.semanlink.net/tag/amazonie|broader_prefLabel|Brésil +http://www.semanlink.net/tag/amazonie|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/github|creationTime|2012-08-14T00:59:51Z +http://www.semanlink.net/tag/github|prefLabel|GitHub +http://www.semanlink.net/tag/github|creationDate|2012-08-14 +http://www.semanlink.net/tag/github|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/github|homepage|https://github.com +http://www.semanlink.net/tag/github|uri|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/semantic_interoperability|creationTime|2008-01-19T17:24:41Z +http://www.semanlink.net/tag/semantic_interoperability|prefLabel|Semantic Interoperability +http://www.semanlink.net/tag/semantic_interoperability|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_interoperability|creationDate|2008-01-19 +http://www.semanlink.net/tag/semantic_interoperability|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_interoperability|uri|http://www.semanlink.net/tag/semantic_interoperability +http://www.semanlink.net/tag/semantic_interoperability|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_interoperability|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_interoperability|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/asynchronous|creationTime|2018-04-09T10:47:29Z +http://www.semanlink.net/tag/asynchronous|prefLabel|Asynchronous +http://www.semanlink.net/tag/asynchronous|creationDate|2018-04-09 +http://www.semanlink.net/tag/asynchronous|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asynchronous|uri|http://www.semanlink.net/tag/asynchronous +http://www.semanlink.net/tag/personal_knowledge_graph|creationTime|2019-05-28T17:05:59Z +http://www.semanlink.net/tag/personal_knowledge_graph|prefLabel|Personal Knowledge Graph +http://www.semanlink.net/tag/personal_knowledge_graph|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/personal_knowledge_graph|creationDate|2019-05-28 +http://www.semanlink.net/tag/personal_knowledge_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_knowledge_graph|uri|http://www.semanlink.net/tag/personal_knowledge_graph +http://www.semanlink.net/tag/personal_knowledge_graph|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/personal_knowledge_graph|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/personal_knowledge_graph|broader_altLabel|KG +http://www.semanlink.net/tag/google_research|creationTime|2013-03-12T14:50:52Z +http://www.semanlink.net/tag/google_research|prefLabel|Google Research +http://www.semanlink.net/tag/google_research|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_research|creationDate|2013-03-12 +http://www.semanlink.net/tag/google_research|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_research|uri|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/tag/google_research|broader_prefLabel|Google +http://www.semanlink.net/tag/google_research|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/scotland|creationTime|2020-09-30T01:38:23Z +http://www.semanlink.net/tag/scotland|prefLabel|Scotland +http://www.semanlink.net/tag/scotland|broader|http://www.semanlink.net/tag/royaume_uni +http://www.semanlink.net/tag/scotland|creationDate|2020-09-30 +http://www.semanlink.net/tag/scotland|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scotland|describedBy|https://en.wikipedia.org/wiki/Scotland +http://www.semanlink.net/tag/scotland|uri|http://www.semanlink.net/tag/scotland +http://www.semanlink.net/tag/scotland|broader_prefLabel|Royaume Uni +http://www.semanlink.net/tag/scotland|broader_altLabel|UK +http://www.semanlink.net/tag/conditional_random_field|creationTime|2014-04-24T01:19:12Z +http://www.semanlink.net/tag/conditional_random_field|prefLabel|Conditional random fields +http://www.semanlink.net/tag/conditional_random_field|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/conditional_random_field|broader|http://www.semanlink.net/tag/probabilistic_graphical_models +http://www.semanlink.net/tag/conditional_random_field|related|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/tag/conditional_random_field|related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/conditional_random_field|related|http://www.semanlink.net/tag/hidden_markov_model +http://www.semanlink.net/tag/conditional_random_field|related|http://www.semanlink.net/tag/sequence_labeling +http://www.semanlink.net/tag/conditional_random_field|creationDate|2014-04-24 +http://www.semanlink.net/tag/conditional_random_field|comment|"Class of statistical modelling method for structured prediction (prediction of structured objects, rather than scalar) that can take context into account; e.g., in NLP, the linear chain CRF predicts sequences of labels for sequences of input samples (take +as input a set of features for each token in a +sentence, and learn to predict an optimal sequence of +labels for the full sentence) + +Applications in POS Tagging, shallow parsing, named entity recognition, gene finding + + +" +http://www.semanlink.net/tag/conditional_random_field|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conditional_random_field|describedBy|https://en.wikipedia.org/wiki/Conditional_random_field +http://www.semanlink.net/tag/conditional_random_field|altLabel|CRF +http://www.semanlink.net/tag/conditional_random_field|uri|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/conditional_random_field|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/conditional_random_field|broader_prefLabel|Probabilistic Graphical Models +http://www.semanlink.net/tag/experience_scientifique|prefLabel|Expérience scientifique +http://www.semanlink.net/tag/experience_scientifique|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/experience_scientifique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/experience_scientifique|uri|http://www.semanlink.net/tag/experience_scientifique +http://www.semanlink.net/tag/experience_scientifique|broader_prefLabel|Science +http://www.semanlink.net/tag/experience_scientifique|broader_altLabel|sciences +http://www.semanlink.net/tag/justice_americaine|creationTime|2007-07-12T22:52:09Z +http://www.semanlink.net/tag/justice_americaine|prefLabel|Justice américaine +http://www.semanlink.net/tag/justice_americaine|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/justice_americaine|broader|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/justice_americaine|creationDate|2007-07-12 +http://www.semanlink.net/tag/justice_americaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/justice_americaine|uri|http://www.semanlink.net/tag/justice_americaine +http://www.semanlink.net/tag/justice_americaine|broader_prefLabel|USA +http://www.semanlink.net/tag/justice_americaine|broader_prefLabel|Justice +http://www.semanlink.net/tag/justice_americaine|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/justice_americaine|broader_altLabel|United States +http://www.semanlink.net/tag/fete_nationale|creationTime|2007-07-14T00:55:17Z +http://www.semanlink.net/tag/fete_nationale|prefLabel|Fête nationale +http://www.semanlink.net/tag/fete_nationale|creationDate|2007-07-14 +http://www.semanlink.net/tag/fete_nationale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fete_nationale|uri|http://www.semanlink.net/tag/fete_nationale +http://www.semanlink.net/tag/collaborative_ontologie_creation|creationTime|2013-04-16T15:12:01Z +http://www.semanlink.net/tag/collaborative_ontologie_creation|prefLabel|Collaborative ontologie creation +http://www.semanlink.net/tag/collaborative_ontologie_creation|broader|http://www.semanlink.net/tag/linked_data_collaborative_editing +http://www.semanlink.net/tag/collaborative_ontologie_creation|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/collaborative_ontologie_creation|creationDate|2013-04-16 +http://www.semanlink.net/tag/collaborative_ontologie_creation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/collaborative_ontologie_creation|uri|http://www.semanlink.net/tag/collaborative_ontologie_creation +http://www.semanlink.net/tag/collaborative_ontologie_creation|broader_prefLabel|Linked Data / collaborative editing +http://www.semanlink.net/tag/collaborative_ontologie_creation|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/collaborative_ontologie_creation|broader_altLabel|Ontology +http://www.semanlink.net/tag/personal_information_management|prefLabel|Personal-information management +http://www.semanlink.net/tag/personal_information_management|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/personal_information_management|related|http://www.semanlink.net/tag/personal_knowledge_management +http://www.semanlink.net/tag/personal_information_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_information_management|altLabel|PIM +http://www.semanlink.net/tag/personal_information_management|uri|http://www.semanlink.net/tag/personal_information_management +http://www.semanlink.net/tag/personal_information_management|broader_prefLabel|Informatique +http://www.semanlink.net/tag/afripedia|creationTime|2012-11-19T16:53:08Z +http://www.semanlink.net/tag/afripedia|prefLabel|Afripedia +http://www.semanlink.net/tag/afripedia|broader|http://www.semanlink.net/tag/francophonie +http://www.semanlink.net/tag/afripedia|broader|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/afripedia|broader|http://www.semanlink.net/tag/wikimedia +http://www.semanlink.net/tag/afripedia|creationDate|2012-11-19 +http://www.semanlink.net/tag/afripedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afripedia|uri|http://www.semanlink.net/tag/afripedia +http://www.semanlink.net/tag/afripedia|broader_prefLabel|Francophonie +http://www.semanlink.net/tag/afripedia|broader_prefLabel|Wikipedia +http://www.semanlink.net/tag/afripedia|broader_prefLabel|Wikimedia +http://www.semanlink.net/tag/afripedia|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/afripedia|broader_related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/google_maps|prefLabel|Google Maps +http://www.semanlink.net/tag/google_maps|broader|http://www.semanlink.net/tag/carte +http://www.semanlink.net/tag/google_maps|broader|http://www.semanlink.net/tag/earth_map +http://www.semanlink.net/tag/google_maps|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_maps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_maps|uri|http://www.semanlink.net/tag/google_maps +http://www.semanlink.net/tag/google_maps|broader_prefLabel|Carte +http://www.semanlink.net/tag/google_maps|broader_prefLabel|Earth map +http://www.semanlink.net/tag/google_maps|broader_prefLabel|Google +http://www.semanlink.net/tag/google_maps|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/nlp_datasets|creationTime|2018-06-23T01:06:00Z +http://www.semanlink.net/tag/nlp_datasets|prefLabel|NLP datasets +http://www.semanlink.net/tag/nlp_datasets|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_datasets|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/nlp_datasets|creationDate|2018-06-23 +http://www.semanlink.net/tag/nlp_datasets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_datasets|uri|http://www.semanlink.net/tag/nlp_datasets +http://www.semanlink.net/tag/nlp_datasets|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_datasets|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/nlp_datasets|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_datasets|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_datasets|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/transductive_svm|creationTime|2018-03-03T14:31:57Z +http://www.semanlink.net/tag/transductive_svm|prefLabel|Transductive SVM +http://www.semanlink.net/tag/transductive_svm|broader|http://www.semanlink.net/tag/transductive_learning +http://www.semanlink.net/tag/transductive_svm|creationDate|2018-03-03 +http://www.semanlink.net/tag/transductive_svm|comment|"Extend SVMs with the aim of max-margin classification while ensuring that there are as few unlabelled observations near the margin as possible +" +http://www.semanlink.net/tag/transductive_svm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transductive_svm|uri|http://www.semanlink.net/tag/transductive_svm +http://www.semanlink.net/tag/transductive_svm|broader_prefLabel|Transductive Learning +http://www.semanlink.net/tag/transductive_svm|broader_related|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/tag/social_networks|prefLabel|Social Networks +http://www.semanlink.net/tag/social_networks|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/social_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_networks|uri|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/social_networks|broader_prefLabel|Internet +http://www.semanlink.net/tag/mobilite|creationTime|2017-11-03T20:16:04Z +http://www.semanlink.net/tag/mobilite|prefLabel|Mobilité +http://www.semanlink.net/tag/mobilite|creationDate|2017-11-03 +http://www.semanlink.net/tag/mobilite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobilite|uri|http://www.semanlink.net/tag/mobilite +http://www.semanlink.net/tag/rdf_application|prefLabel|RDF Application +http://www.semanlink.net/tag/rdf_application|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/rdf_application|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_application|uri|http://www.semanlink.net/tag/rdf_application +http://www.semanlink.net/tag/rdf_application|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/rdf_application|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_application|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_application|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_application|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_application|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_application|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/elevage_industriel|creationTime|2013-02-14T08:44:40Z +http://www.semanlink.net/tag/elevage_industriel|prefLabel|Elevage industriel +http://www.semanlink.net/tag/elevage_industriel|broader|http://www.semanlink.net/tag/elevage +http://www.semanlink.net/tag/elevage_industriel|related|http://www.semanlink.net/tag/elevage_porcin +http://www.semanlink.net/tag/elevage_industriel|creationDate|2013-02-14 +http://www.semanlink.net/tag/elevage_industriel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elevage_industriel|altLabel|Industrial farming +http://www.semanlink.net/tag/elevage_industriel|uri|http://www.semanlink.net/tag/elevage_industriel +http://www.semanlink.net/tag/elevage_industriel|broader_prefLabel|Elevage +http://www.semanlink.net/tag/business_intelligence|creationTime|2010-07-31T13:45:58Z +http://www.semanlink.net/tag/business_intelligence|prefLabel|Business intelligence +http://www.semanlink.net/tag/business_intelligence|related|http://www.semanlink.net/tag/xbrl +http://www.semanlink.net/tag/business_intelligence|creationDate|2010-07-31 +http://www.semanlink.net/tag/business_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/business_intelligence|describedBy|https://en.wikipedia.org/wiki/Business_intelligence +http://www.semanlink.net/tag/business_intelligence|altLabel|BI +http://www.semanlink.net/tag/business_intelligence|uri|http://www.semanlink.net/tag/business_intelligence +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|creationTime|2007-07-14T01:01:52Z +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|prefLabel|L'Afrique à la Bastille - 13 juillet 2007 +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader|http://www.semanlink.net/tag/liberte_egalite_fraternite +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader|http://www.semanlink.net/tag/rfi +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader|http://www.semanlink.net/tag/14_juillet +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|creationDate|2007-07-14 +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|uri|http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007 +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_prefLabel|Liberté, égalité, fraternité +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_prefLabel|RFI +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_prefLabel|14 juillet +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_altLabel|Radio France Internationale +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007|broader_altLabel|African music +http://www.semanlink.net/tag/retard_technologique_europeen|prefLabel|Retard technologique européen +http://www.semanlink.net/tag/retard_technologique_europeen|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/retard_technologique_europeen|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/retard_technologique_europeen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/retard_technologique_europeen|uri|http://www.semanlink.net/tag/retard_technologique_europeen +http://www.semanlink.net/tag/retard_technologique_europeen|broader_prefLabel|Technologie +http://www.semanlink.net/tag/retard_technologique_europeen|broader_prefLabel|Europe +http://www.semanlink.net/tag/markown_javascript|creationTime|2015-10-10T20:58:25Z +http://www.semanlink.net/tag/markown_javascript|prefLabel|Markown / Javascript +http://www.semanlink.net/tag/markown_javascript|broader|http://www.semanlink.net/tag/markdown +http://www.semanlink.net/tag/markown_javascript|creationDate|2015-10-10 +http://www.semanlink.net/tag/markown_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markown_javascript|uri|http://www.semanlink.net/tag/markown_javascript +http://www.semanlink.net/tag/markown_javascript|broader_prefLabel|Markdown +http://www.semanlink.net/tag/nasa|prefLabel|NASA +http://www.semanlink.net/tag/nasa|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/nasa|broader|http://www.semanlink.net/tag/exploration_spatiale +http://www.semanlink.net/tag/nasa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nasa|homepage|http://www.nasa.gov/ +http://www.semanlink.net/tag/nasa|uri|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/nasa|broader_prefLabel|USA +http://www.semanlink.net/tag/nasa|broader_prefLabel|Exploration spatiale +http://www.semanlink.net/tag/nasa|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/nasa|broader_altLabel|United States +http://www.semanlink.net/tag/harry_halpin|creationTime|2014-11-08T07:31:00Z +http://www.semanlink.net/tag/harry_halpin|prefLabel|Harry Halpin +http://www.semanlink.net/tag/harry_halpin|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/harry_halpin|creationDate|2014-11-08 +http://www.semanlink.net/tag/harry_halpin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/harry_halpin|uri|http://www.semanlink.net/tag/harry_halpin +http://www.semanlink.net/tag/harry_halpin|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/harry_halpin|broader_altLabel|Technical guys +http://www.semanlink.net/tag/map_reduce|creationTime|2013-04-23T02:21:46Z +http://www.semanlink.net/tag/map_reduce|prefLabel|Map-reduce +http://www.semanlink.net/tag/map_reduce|broader|http://www.semanlink.net/tag/big_data +http://www.semanlink.net/tag/map_reduce|creationDate|2013-04-23 +http://www.semanlink.net/tag/map_reduce|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/map_reduce|uri|http://www.semanlink.net/tag/map_reduce +http://www.semanlink.net/tag/map_reduce|broader_prefLabel|Big Data +http://www.semanlink.net/tag/map_reduce|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/the_web_is_dying|creationTime|2017-11-01T10:32:44Z +http://www.semanlink.net/tag/the_web_is_dying|prefLabel|The Web is dying +http://www.semanlink.net/tag/the_web_is_dying|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/the_web_is_dying|related|http://www.semanlink.net/tag/the_web_sucks +http://www.semanlink.net/tag/the_web_is_dying|creationDate|2017-11-01 +http://www.semanlink.net/tag/the_web_is_dying|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_web_is_dying|uri|http://www.semanlink.net/tag/the_web_is_dying +http://www.semanlink.net/tag/the_web_is_dying|broader_prefLabel|Web +http://www.semanlink.net/tag/database|creationTime|2007-04-19T22:47:11Z +http://www.semanlink.net/tag/database|prefLabel|Database +http://www.semanlink.net/tag/database|creationDate|2007-04-19 +http://www.semanlink.net/tag/database|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/database|uri|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/personal_knowledge_management|creationTime|2010-07-01T16:22:42Z +http://www.semanlink.net/tag/personal_knowledge_management|prefLabel|Personal Knowledge Management +http://www.semanlink.net/tag/personal_knowledge_management|creationDate|2010-07-01 +http://www.semanlink.net/tag/personal_knowledge_management|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/personal_knowledge_management|uri|http://www.semanlink.net/tag/personal_knowledge_management +http://www.semanlink.net/tag/capitalisme_financier|creationTime|2011-01-16T12:10:40Z +http://www.semanlink.net/tag/capitalisme_financier|prefLabel|Capitalisme financier +http://www.semanlink.net/tag/capitalisme_financier|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/capitalisme_financier|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/capitalisme_financier|creationDate|2011-01-16 +http://www.semanlink.net/tag/capitalisme_financier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/capitalisme_financier|uri|http://www.semanlink.net/tag/capitalisme_financier +http://www.semanlink.net/tag/capitalisme_financier|broader_prefLabel|Finance +http://www.semanlink.net/tag/capitalisme_financier|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/semanlink_archives|creationTime|2007-11-13T22:52:38Z +http://www.semanlink.net/tag/semanlink_archives|prefLabel|Semanlink: archives +http://www.semanlink.net/tag/semanlink_archives|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/semanlink_archives|creationDate|2007-11-13 +http://www.semanlink.net/tag/semanlink_archives|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_archives|uri|http://www.semanlink.net/tag/semanlink_archives +http://www.semanlink.net/tag/semanlink_archives|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/semanlink_archives|broader_altLabel|SL +http://www.semanlink.net/tag/semanlink_archives|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/over_engineering|creationTime|2017-08-18T12:35:37Z +http://www.semanlink.net/tag/over_engineering|prefLabel|Over-Engineering +http://www.semanlink.net/tag/over_engineering|creationDate|2017-08-18 +http://www.semanlink.net/tag/over_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/over_engineering|uri|http://www.semanlink.net/tag/over_engineering +http://www.semanlink.net/tag/csv|creationTime|2010-08-12T16:05:51Z +http://www.semanlink.net/tag/csv|prefLabel|CSV +http://www.semanlink.net/tag/csv|broader|http://www.semanlink.net/tag/tables +http://www.semanlink.net/tag/csv|creationDate|2010-08-12 +http://www.semanlink.net/tag/csv|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/csv|uri|http://www.semanlink.net/tag/csv +http://www.semanlink.net/tag/csv|broader_prefLabel|Tables +http://www.semanlink.net/tag/peste|prefLabel|Peste +http://www.semanlink.net/tag/peste|broader|http://www.semanlink.net/tag/pandemie +http://www.semanlink.net/tag/peste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peste|uri|http://www.semanlink.net/tag/peste +http://www.semanlink.net/tag/peste|broader_prefLabel|Pandémie +http://www.semanlink.net/tag/duck_typing|creationTime|2007-07-07T13:54:14Z +http://www.semanlink.net/tag/duck_typing|prefLabel|Duck Typing +http://www.semanlink.net/tag/duck_typing|broader|http://www.semanlink.net/tag/programming +http://www.semanlink.net/tag/duck_typing|broader|http://www.semanlink.net/tag/type_system +http://www.semanlink.net/tag/duck_typing|creationDate|2007-07-07 +http://www.semanlink.net/tag/duck_typing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/duck_typing|describedBy|https://en.wikipedia.org/wiki/Duck_typing +http://www.semanlink.net/tag/duck_typing|uri|http://www.semanlink.net/tag/duck_typing +http://www.semanlink.net/tag/duck_typing|broader_prefLabel|Programming +http://www.semanlink.net/tag/duck_typing|broader_prefLabel|Type system +http://www.semanlink.net/tag/devices|prefLabel|Devices +http://www.semanlink.net/tag/devices|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/devices|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/devices|uri|http://www.semanlink.net/tag/devices +http://www.semanlink.net/tag/devices|broader_prefLabel|Informatique +http://www.semanlink.net/tag/progres_technique|creationTime|2010-05-05T00:15:05Z +http://www.semanlink.net/tag/progres_technique|prefLabel|Progrès technique +http://www.semanlink.net/tag/progres_technique|creationDate|2010-05-05 +http://www.semanlink.net/tag/progres_technique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/progres_technique|uri|http://www.semanlink.net/tag/progres_technique +http://www.semanlink.net/tag/apple_software|prefLabel|Apple Software +http://www.semanlink.net/tag/apple_software|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple_software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_software|uri|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/apple_software|broader_prefLabel|Apple +http://www.semanlink.net/tag/jena_user_conference|prefLabel|Jena User Conference +http://www.semanlink.net/tag/jena_user_conference|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jena_user_conference|broader|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semanlink.net/tag/jena_user_conference|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_user_conference|altLabel|JUC +http://www.semanlink.net/tag/jena_user_conference|uri|http://www.semanlink.net/tag/jena_user_conference +http://www.semanlink.net/tag/jena_user_conference|broader_prefLabel|Jena +http://www.semanlink.net/tag/jena_user_conference|broader_prefLabel|Semantic Web conferences +http://www.semanlink.net/tag/jena_user_conference|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/combining_numerical_and_text_features|creationTime|2019-05-20T13:06:31Z +http://www.semanlink.net/tag/combining_numerical_and_text_features|prefLabel|Combining numerical and text features +http://www.semanlink.net/tag/combining_numerical_and_text_features|broader|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/tag/combining_numerical_and_text_features|creationDate|2019-05-20 +http://www.semanlink.net/tag/combining_numerical_and_text_features|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/combining_numerical_and_text_features|uri|http://www.semanlink.net/tag/combining_numerical_and_text_features +http://www.semanlink.net/tag/combining_numerical_and_text_features|broader_prefLabel|Combining text and structured data (ML-NLP) +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|creationTime|2010-07-31T13:46:46Z +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|prefLabel|Business Intelligence and Semantic Web +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader|http://www.semanlink.net/tag/business_intelligence +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|creationDate|2010-07-31 +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|uri|http://www.semanlink.net/tag/business_intelligence_and_semantic_web +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_prefLabel|Business intelligence +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_altLabel|BI +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/business_intelligence_and_semantic_web|broader_related|http://www.semanlink.net/tag/xbrl +http://www.semanlink.net/tag/information_resources|creationTime|2007-10-13T00:30:50Z +http://www.semanlink.net/tag/information_resources|prefLabel|Information resources +http://www.semanlink.net/tag/information_resources|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/information_resources|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/information_resources|creationDate|2007-10-13 +http://www.semanlink.net/tag/information_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_resources|altLabel|Concept's URI +http://www.semanlink.net/tag/information_resources|uri|http://www.semanlink.net/tag/information_resources +http://www.semanlink.net/tag/information_resources|broader_prefLabel|URI +http://www.semanlink.net/tag/information_resources|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/information_resources|broader_altLabel|LD +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/information_resources|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/piraterie|creationTime|2019-10-26T17:10:29Z +http://www.semanlink.net/tag/piraterie|prefLabel|Piraterie +http://www.semanlink.net/tag/piraterie|creationDate|2019-10-26 +http://www.semanlink.net/tag/piraterie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/piraterie|uri|http://www.semanlink.net/tag/piraterie +http://www.semanlink.net/tag/carnaval|creationTime|2009-06-14T21:28:39Z +http://www.semanlink.net/tag/carnaval|prefLabel|Carnaval +http://www.semanlink.net/tag/carnaval|creationDate|2009-06-14 +http://www.semanlink.net/tag/carnaval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/carnaval|uri|http://www.semanlink.net/tag/carnaval +http://www.semanlink.net/tag/installing_wordpress|creationTime|2007-07-07T15:26:14Z +http://www.semanlink.net/tag/installing_wordpress|prefLabel|Installing WordPress +http://www.semanlink.net/tag/installing_wordpress|broader|http://www.semanlink.net/tag/wordpress +http://www.semanlink.net/tag/installing_wordpress|broader|http://www.semanlink.net/tag/installing_apps +http://www.semanlink.net/tag/installing_wordpress|creationDate|2007-07-07 +http://www.semanlink.net/tag/installing_wordpress|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/installing_wordpress|uri|http://www.semanlink.net/tag/installing_wordpress +http://www.semanlink.net/tag/installing_wordpress|broader_prefLabel|WordPress +http://www.semanlink.net/tag/installing_wordpress|broader_prefLabel|Installing apps +http://www.semanlink.net/tag/nlp_as_a_service|creationTime|2019-04-24T13:49:17Z +http://www.semanlink.net/tag/nlp_as_a_service|prefLabel|NLP as a service +http://www.semanlink.net/tag/nlp_as_a_service|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/nlp_as_a_service|creationDate|2019-04-24 +http://www.semanlink.net/tag/nlp_as_a_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_as_a_service|uri|http://www.semanlink.net/tag/nlp_as_a_service +http://www.semanlink.net/tag/nlp_as_a_service|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/logiciel_libre|prefLabel|Logiciel libre +http://www.semanlink.net/tag/logiciel_libre|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/logiciel_libre|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/logiciel_libre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/logiciel_libre|uri|http://www.semanlink.net/tag/logiciel_libre +http://www.semanlink.net/tag/logiciel_libre|broader_prefLabel|Open Source +http://www.semanlink.net/tag/logiciel_libre|broader_prefLabel|Informatique +http://www.semanlink.net/tag/mineralogie|creationTime|2007-04-05T21:46:13Z +http://www.semanlink.net/tag/mineralogie|prefLabel|Minéralogie +http://www.semanlink.net/tag/mineralogie|broader|http://www.semanlink.net/tag/geologie +http://www.semanlink.net/tag/mineralogie|creationDate|2007-04-05 +http://www.semanlink.net/tag/mineralogie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mineralogie|uri|http://www.semanlink.net/tag/mineralogie +http://www.semanlink.net/tag/mineralogie|broader_prefLabel|Géologie +http://www.semanlink.net/tag/peter_breunig|creationTime|2021-08-18T13:34:47Z +http://www.semanlink.net/tag/peter_breunig|prefLabel|Peter Breunig +http://www.semanlink.net/tag/peter_breunig|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/peter_breunig|broader|http://www.semanlink.net/tag/archeologue +http://www.semanlink.net/tag/peter_breunig|related|http://www.semanlink.net/tag/nok +http://www.semanlink.net/tag/peter_breunig|creationDate|2021-08-18 +http://www.semanlink.net/tag/peter_breunig|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peter_breunig|uri|http://www.semanlink.net/tag/peter_breunig +http://www.semanlink.net/tag/peter_breunig|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/peter_breunig|broader_prefLabel|Archéologue +http://www.semanlink.net/tag/peter_breunig|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/html|prefLabel|HTML +http://www.semanlink.net/tag/html|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/html|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/html|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html|uri|http://www.semanlink.net/tag/html +http://www.semanlink.net/tag/html|broader_prefLabel|Internet +http://www.semanlink.net/tag/html|broader_prefLabel|Dev +http://www.semanlink.net/tag/electronic_frontier_foundation|creationTime|2017-09-22T01:01:55Z +http://www.semanlink.net/tag/electronic_frontier_foundation|prefLabel|Electronic Frontier Foundation +http://www.semanlink.net/tag/electronic_frontier_foundation|creationDate|2017-09-22 +http://www.semanlink.net/tag/electronic_frontier_foundation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/electronic_frontier_foundation|altLabel|EFF +http://www.semanlink.net/tag/electronic_frontier_foundation|uri|http://www.semanlink.net/tag/electronic_frontier_foundation +http://www.semanlink.net/tag/semweb_china|creationTime|2011-11-13T14:20:29Z +http://www.semanlink.net/tag/semweb_china|prefLabel|SemWeb China +http://www.semanlink.net/tag/semweb_china|broader|http://www.semanlink.net/tag/chine_technologie +http://www.semanlink.net/tag/semweb_china|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semweb_china|creationDate|2011-11-13 +http://www.semanlink.net/tag/semweb_china|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semweb_china|uri|http://www.semanlink.net/tag/semweb_china +http://www.semanlink.net/tag/semweb_china|broader_prefLabel|Chine : technologie +http://www.semanlink.net/tag/semweb_china|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semweb_china|broader_altLabel|sw +http://www.semanlink.net/tag/semweb_china|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/apache_on_my_mac|creationTime|2008-12-01T00:47:50Z +http://www.semanlink.net/tag/apache_on_my_mac|prefLabel|Apache on my mac +http://www.semanlink.net/tag/apache_on_my_mac|broader|http://www.semanlink.net/tag/notes_d_install +http://www.semanlink.net/tag/apache_on_my_mac|broader|http://www.semanlink.net/tag/apache +http://www.semanlink.net/tag/apache_on_my_mac|creationDate|2008-12-01 +http://www.semanlink.net/tag/apache_on_my_mac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apache_on_my_mac|uri|http://www.semanlink.net/tag/apache_on_my_mac +http://www.semanlink.net/tag/apache_on_my_mac|broader_prefLabel|Notes d'install +http://www.semanlink.net/tag/apache_on_my_mac|broader_prefLabel|Apache web server +http://www.semanlink.net/tag/politique_monetaire|creationTime|2007-09-18T22:24:22Z +http://www.semanlink.net/tag/politique_monetaire|prefLabel|Politique monétaire +http://www.semanlink.net/tag/politique_monetaire|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/politique_monetaire|creationDate|2007-09-18 +http://www.semanlink.net/tag/politique_monetaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_monetaire|uri|http://www.semanlink.net/tag/politique_monetaire +http://www.semanlink.net/tag/politique_monetaire|broader_prefLabel|Economie +http://www.semanlink.net/tag/politique_monetaire|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/iphoto|prefLabel|iphoto +http://www.semanlink.net/tag/iphoto|broader|http://www.semanlink.net/tag/os_x_app +http://www.semanlink.net/tag/iphoto|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iphoto|uri|http://www.semanlink.net/tag/iphoto +http://www.semanlink.net/tag/iphoto|broader_prefLabel|OS X app +http://www.semanlink.net/tag/green_new_deal|creationTime|2019-04-26T23:02:34Z +http://www.semanlink.net/tag/green_new_deal|prefLabel|Green New Deal +http://www.semanlink.net/tag/green_new_deal|creationDate|2019-04-26 +http://www.semanlink.net/tag/green_new_deal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/green_new_deal|uri|http://www.semanlink.net/tag/green_new_deal +http://www.semanlink.net/tag/abeille|creationTime|2007-02-21T00:02:31Z +http://www.semanlink.net/tag/abeille|prefLabel|Abeille +http://www.semanlink.net/tag/abeille|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/abeille|creationDate|2007-02-21 +http://www.semanlink.net/tag/abeille|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abeille|uri|http://www.semanlink.net/tag/abeille +http://www.semanlink.net/tag/abeille|broader_prefLabel|Insecte +http://www.semanlink.net/tag/incremental_clustering|creationTime|2021-10-17T10:17:37Z +http://www.semanlink.net/tag/incremental_clustering|prefLabel|Incremental Clustering +http://www.semanlink.net/tag/incremental_clustering|broader|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/tag/incremental_clustering|creationDate|2021-10-17 +http://www.semanlink.net/tag/incremental_clustering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/incremental_clustering|uri|http://www.semanlink.net/tag/incremental_clustering +http://www.semanlink.net/tag/incremental_clustering|broader_prefLabel|Clustering +http://www.semanlink.net/tag/incremental_clustering|broader_altLabel|Data clustering +http://www.semanlink.net/tag/incremental_clustering|broader_altLabel|Cluster analysis +http://www.semanlink.net/tag/incremental_clustering|broader_related|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/tag/applescript|prefLabel|AppleScript +http://www.semanlink.net/tag/applescript|broader|http://www.semanlink.net/tag/mac_dev +http://www.semanlink.net/tag/applescript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/applescript|uri|http://www.semanlink.net/tag/applescript +http://www.semanlink.net/tag/applescript|broader_prefLabel|Mac dev +http://www.semanlink.net/tag/semantic_negotiation|creationTime|2008-01-19T17:10:19Z +http://www.semanlink.net/tag/semantic_negotiation|prefLabel|Semantic Negotiation +http://www.semanlink.net/tag/semantic_negotiation|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_negotiation|related|http://www.semanlink.net/tag/tap +http://www.semanlink.net/tag/semantic_negotiation|creationDate|2008-01-19 +http://www.semanlink.net/tag/semantic_negotiation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_negotiation|uri|http://www.semanlink.net/tag/semantic_negotiation +http://www.semanlink.net/tag/semantic_negotiation|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_negotiation|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_negotiation|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/svn|creationTime|2012-06-27T16:47:07Z +http://www.semanlink.net/tag/svn|prefLabel|svn +http://www.semanlink.net/tag/svn|creationDate|2012-06-27 +http://www.semanlink.net/tag/svn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/svn|uri|http://www.semanlink.net/tag/svn +http://www.semanlink.net/tag/histoire_de_l_art|prefLabel|Histoire de l'art +http://www.semanlink.net/tag/histoire_de_l_art|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/histoire_de_l_art|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_art|uri|http://www.semanlink.net/tag/histoire_de_l_art +http://www.semanlink.net/tag/histoire_de_l_art|broader_prefLabel|Art +http://www.semanlink.net/tag/thesaurus|prefLabel|Thesaurus +http://www.semanlink.net/tag/thesaurus|broader|http://www.semanlink.net/tag/thesaurus_taxonomies +http://www.semanlink.net/tag/thesaurus|related|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/tag/thesaurus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thesaurus|uri|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/thesaurus|broader_prefLabel|Thesaurus & Taxonomies +http://www.semanlink.net/tag/thesaurus|broader_related|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/rocard|prefLabel|Rocard +http://www.semanlink.net/tag/rocard|broader|http://www.semanlink.net/tag/homme_politique +http://www.semanlink.net/tag/rocard|broader|http://www.semanlink.net/tag/politique_francaise +http://www.semanlink.net/tag/rocard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rocard|uri|http://www.semanlink.net/tag/rocard +http://www.semanlink.net/tag/rocard|broader_prefLabel|Homme politique +http://www.semanlink.net/tag/rocard|broader_prefLabel|Politique française +http://www.semanlink.net/tag/signal|creationTime|2021-01-12T02:33:54Z +http://www.semanlink.net/tag/signal|prefLabel|Signal +http://www.semanlink.net/tag/signal|creationDate|2021-01-12 +http://www.semanlink.net/tag/signal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/signal|uri|http://www.semanlink.net/tag/signal +http://www.semanlink.net/tag/web_app|creationTime|2021-06-30T16:42:11Z +http://www.semanlink.net/tag/web_app|prefLabel|Web app +http://www.semanlink.net/tag/web_app|broader|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/tag/web_app|creationDate|2021-06-30 +http://www.semanlink.net/tag/web_app|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_app|uri|http://www.semanlink.net/tag/web_app +http://www.semanlink.net/tag/web_app|broader_prefLabel|Web dev +http://www.semanlink.net/tag/web_app|broader_altLabel|Web app dev +http://www.semanlink.net/tag/information_theory|creationTime|2013-05-24T22:44:24Z +http://www.semanlink.net/tag/information_theory|prefLabel|Information theory +http://www.semanlink.net/tag/information_theory|broader|http://www.semanlink.net/tag/information +http://www.semanlink.net/tag/information_theory|creationDate|2013-05-24 +http://www.semanlink.net/tag/information_theory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_theory|uri|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/information_theory|broader_prefLabel|Information +http://www.semanlink.net/tag/dictateur|prefLabel|Dictateur +http://www.semanlink.net/tag/dictateur|broader|http://www.semanlink.net/tag/dictature +http://www.semanlink.net/tag/dictateur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dictateur|uri|http://www.semanlink.net/tag/dictateur +http://www.semanlink.net/tag/dictateur|broader_prefLabel|Dictature +http://www.semanlink.net/tag/leningrad|prefLabel|Leningrad +http://www.semanlink.net/tag/leningrad|broader|http://www.semanlink.net/tag/urss +http://www.semanlink.net/tag/leningrad|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/leningrad|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leningrad|uri|http://www.semanlink.net/tag/leningrad +http://www.semanlink.net/tag/leningrad|broader_prefLabel|Ex URSS +http://www.semanlink.net/tag/leningrad|broader_prefLabel|URSS +http://www.semanlink.net/tag/leningrad|broader_prefLabel|Ville +http://www.semanlink.net/tag/fast_ai|creationTime|2018-01-20T10:50:40Z +http://www.semanlink.net/tag/fast_ai|prefLabel|fast.ai +http://www.semanlink.net/tag/fast_ai|broader|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/tag/fast_ai|broader|http://www.semanlink.net/tag/online_course_materials +http://www.semanlink.net/tag/fast_ai|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/fast_ai|creationDate|2018-01-20 +http://www.semanlink.net/tag/fast_ai|comment|"- [Home Page](https://www.fast.ai/) +- [MOOC](https://course.fast.ai/) +- [Github](https://github.com/fastai/fastai) +- [Forum](https://forums.fast.ai/) +- [docs.fast.ai](https://docs.fast.ai/) + + +" +http://www.semanlink.net/tag/fast_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fast_ai|homepage|https://www.fast.ai/ +http://www.semanlink.net/tag/fast_ai|altLabel|fastai +http://www.semanlink.net/tag/fast_ai|uri|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/tag/fast_ai|broader_prefLabel|Machine Learning Course +http://www.semanlink.net/tag/fast_ai|broader_prefLabel|Online Course Materials +http://www.semanlink.net/tag/fast_ai|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/fast_ai|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/fast_ai|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/bourbaki|creationTime|2019-03-14T22:47:43Z +http://www.semanlink.net/tag/bourbaki|prefLabel|Bourbaki +http://www.semanlink.net/tag/bourbaki|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/bourbaki|creationDate|2019-03-14 +http://www.semanlink.net/tag/bourbaki|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bourbaki|uri|http://www.semanlink.net/tag/bourbaki +http://www.semanlink.net/tag/bourbaki|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/bourbaki|broader_altLabel|Math +http://www.semanlink.net/tag/physique_des_particules|prefLabel|Physique des particules +http://www.semanlink.net/tag/physique_des_particules|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/physique_des_particules|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/physique_des_particules|uri|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/physique_des_particules|broader_prefLabel|Physique +http://www.semanlink.net/tag/physique_des_particules|broader_altLabel|Physics +http://www.semanlink.net/tag/greffe_de_tete|creationTime|2017-05-04T20:08:01Z +http://www.semanlink.net/tag/greffe_de_tete|prefLabel|Greffe de tête +http://www.semanlink.net/tag/greffe_de_tete|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/greffe_de_tete|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/greffe_de_tete|creationDate|2017-05-04 +http://www.semanlink.net/tag/greffe_de_tete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greffe_de_tete|uri|http://www.semanlink.net/tag/greffe_de_tete +http://www.semanlink.net/tag/greffe_de_tete|broader_prefLabel|Brain +http://www.semanlink.net/tag/greffe_de_tete|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/greffe_de_tete|broader_altLabel|Cerveau +http://www.semanlink.net/tag/greffe_de_tete|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/greffe_de_tete|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/colza_transgenique|creationTime|2007-10-23T00:25:47Z +http://www.semanlink.net/tag/colza_transgenique|prefLabel|Colza transgénique +http://www.semanlink.net/tag/colza_transgenique|broader|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/tag/colza_transgenique|broader|http://www.semanlink.net/tag/colza +http://www.semanlink.net/tag/colza_transgenique|creationDate|2007-10-23 +http://www.semanlink.net/tag/colza_transgenique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/colza_transgenique|uri|http://www.semanlink.net/tag/colza_transgenique +http://www.semanlink.net/tag/colza_transgenique|broader_prefLabel|GMO +http://www.semanlink.net/tag/colza_transgenique|broader_prefLabel|Colza +http://www.semanlink.net/tag/colza_transgenique|broader_altLabel|OGM +http://www.semanlink.net/tag/airport|prefLabel|Airport +http://www.semanlink.net/tag/airport|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/airport|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/airport|uri|http://www.semanlink.net/tag/airport +http://www.semanlink.net/tag/airport|broader_prefLabel|Apple +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|creationTime|2021-07-31T18:09:09Z +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|prefLabel|Destruction de vestiges antiques +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|creationDate|2021-07-31 +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|uri|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/destruction_de_vestiges_antiques|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/gig_economy|creationTime|2020-10-14T19:27:19Z +http://www.semanlink.net/tag/gig_economy|prefLabel|Gig economy +http://www.semanlink.net/tag/gig_economy|creationDate|2020-10-14 +http://www.semanlink.net/tag/gig_economy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gig_economy|uri|http://www.semanlink.net/tag/gig_economy +http://www.semanlink.net/tag/online_dictionary|creationTime|2007-02-24T02:25:37Z +http://www.semanlink.net/tag/online_dictionary|prefLabel|Online dictionary +http://www.semanlink.net/tag/online_dictionary|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/online_dictionary|creationDate|2007-02-24 +http://www.semanlink.net/tag/online_dictionary|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/online_dictionary|uri|http://www.semanlink.net/tag/online_dictionary +http://www.semanlink.net/tag/online_dictionary|broader_prefLabel|Langues +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|creationTime|2011-07-18T18:53:24Z +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|prefLabel|OS X 10.6 - Snow leopard +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|creationDate|2011-07-18 +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|uri|http://www.semanlink.net/tag/os_x_10_6_snow_leopard +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|broader_altLabel|OS X +http://www.semanlink.net/tag/os_x_10_6_snow_leopard|broader_altLabel|OSX +http://www.semanlink.net/tag/sem_web_demo|creationTime|2011-12-26T18:10:15Z +http://www.semanlink.net/tag/sem_web_demo|prefLabel|Sem web demo +http://www.semanlink.net/tag/sem_web_demo|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sem_web_demo|creationDate|2011-12-26 +http://www.semanlink.net/tag/sem_web_demo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sem_web_demo|uri|http://www.semanlink.net/tag/sem_web_demo +http://www.semanlink.net/tag/sem_web_demo|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sem_web_demo|broader_altLabel|sw +http://www.semanlink.net/tag/sem_web_demo|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/platonov|creationTime|2014-01-02T11:44:31Z +http://www.semanlink.net/tag/platonov|prefLabel|Platonov +http://www.semanlink.net/tag/platonov|broader|http://www.semanlink.net/tag/litterature_russe +http://www.semanlink.net/tag/platonov|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/platonov|creationDate|2014-01-02 +http://www.semanlink.net/tag/platonov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/platonov|describedBy|https://fr.wikipedia.org/wiki/Andre%C3%AF_Platonov +http://www.semanlink.net/tag/platonov|uri|http://www.semanlink.net/tag/platonov +http://www.semanlink.net/tag/platonov|broader_prefLabel|Littérature russe +http://www.semanlink.net/tag/platonov|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/data_sniffer|creationTime|2015-12-17T14:14:24Z +http://www.semanlink.net/tag/data_sniffer|prefLabel|Data Sniffer +http://www.semanlink.net/tag/data_sniffer|creationDate|2015-12-17 +http://www.semanlink.net/tag/data_sniffer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_sniffer|uri|http://www.semanlink.net/tag/data_sniffer +http://www.semanlink.net/tag/quasicrystals|creationTime|2011-12-26T11:42:50Z +http://www.semanlink.net/tag/quasicrystals|prefLabel|Quasicrystals +http://www.semanlink.net/tag/quasicrystals|creationDate|2011-12-26 +http://www.semanlink.net/tag/quasicrystals|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quasicrystals|uri|http://www.semanlink.net/tag/quasicrystals +http://www.semanlink.net/tag/tiers_monde|prefLabel|Tiers-Monde +http://www.semanlink.net/tag/tiers_monde|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/tiers_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tiers_monde|uri|http://www.semanlink.net/tag/tiers_monde +http://www.semanlink.net/tag/tiers_monde|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/climat|prefLabel|Climat +http://www.semanlink.net/tag/climat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/climat|uri|http://www.semanlink.net/tag/climat +http://www.semanlink.net/tag/gnizr|creationTime|2007-11-20T21:44:33Z +http://www.semanlink.net/tag/gnizr|prefLabel|gnizr +http://www.semanlink.net/tag/gnizr|broader|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/gnizr|broader|http://www.semanlink.net/tag/social_bookmarking +http://www.semanlink.net/tag/gnizr|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/gnizr|related|http://www.semanlink.net/tag/del_icio_us +http://www.semanlink.net/tag/gnizr|creationDate|2007-11-20 +http://www.semanlink.net/tag/gnizr|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gnizr|uri|http://www.semanlink.net/tag/gnizr +http://www.semanlink.net/tag/gnizr|broader_prefLabel|Semanlink related +http://www.semanlink.net/tag/gnizr|broader_prefLabel|Social bookmarking +http://www.semanlink.net/tag/gnizr|broader_prefLabel|Tagging +http://www.semanlink.net/tag/nlp_sem_web|creationTime|2014-06-18T09:35:50Z +http://www.semanlink.net/tag/nlp_sem_web|prefLabel|NLP + Sem web +http://www.semanlink.net/tag/nlp_sem_web|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_sem_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/nlp_sem_web|creationDate|2014-06-18 +http://www.semanlink.net/tag/nlp_sem_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_sem_web|uri|http://www.semanlink.net/tag/nlp_sem_web +http://www.semanlink.net/tag/nlp_sem_web|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_sem_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/nlp_sem_web|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_sem_web|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_sem_web|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/nlp_sem_web|broader_altLabel|sw +http://www.semanlink.net/tag/nlp_sem_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/links|prefLabel|Links +http://www.semanlink.net/tag/links|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/links|uri|http://www.semanlink.net/tag/links +http://www.semanlink.net/tag/text_to_sql|creationTime|2020-03-18T12:12:50Z +http://www.semanlink.net/tag/text_to_sql|prefLabel|Text to SQL +http://www.semanlink.net/tag/text_to_sql|broader|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/text_to_sql|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/text_to_sql|creationDate|2020-03-18 +http://www.semanlink.net/tag/text_to_sql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_to_sql|uri|http://www.semanlink.net/tag/text_to_sql +http://www.semanlink.net/tag/text_to_sql|broader_prefLabel|SQL +http://www.semanlink.net/tag/text_to_sql|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/seq2seq_with_attention|creationTime|2018-11-13T17:26:20Z +http://www.semanlink.net/tag/seq2seq_with_attention|prefLabel|Seq2Seq with Attention +http://www.semanlink.net/tag/seq2seq_with_attention|broader|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/seq2seq_with_attention|broader|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/tag/seq2seq_with_attention|creationDate|2018-11-13 +http://www.semanlink.net/tag/seq2seq_with_attention|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seq2seq_with_attention|uri|http://www.semanlink.net/tag/seq2seq_with_attention +http://www.semanlink.net/tag/seq2seq_with_attention|broader_prefLabel|Attention mechanism +http://www.semanlink.net/tag/seq2seq_with_attention|broader_prefLabel|Sequence-to-sequence learning +http://www.semanlink.net/tag/seq2seq_with_attention|broader_altLabel|Sequence Modeling +http://www.semanlink.net/tag/seq2seq_with_attention|broader_altLabel|Seq2Seq +http://www.semanlink.net/tag/seq2seq_with_attention|broader_related|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/tag/seq2seq_with_attention|broader_related|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/tag/seq2seq_with_attention|broader_related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/seq2seq_with_attention|broader_related|http://www.semanlink.net/tag/finite_state_transducer +http://www.semanlink.net/tag/baselines|creationTime|2019-09-03T23:33:37Z +http://www.semanlink.net/tag/baselines|prefLabel|Baselines +http://www.semanlink.net/tag/baselines|creationDate|2019-09-03 +http://www.semanlink.net/tag/baselines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/baselines|uri|http://www.semanlink.net/tag/baselines +http://www.semanlink.net/tag/html5|creationTime|2010-09-03T22:14:56Z +http://www.semanlink.net/tag/html5|prefLabel|HTML5 +http://www.semanlink.net/tag/html5|broader|http://www.semanlink.net/tag/html +http://www.semanlink.net/tag/html5|creationDate|2010-09-03 +http://www.semanlink.net/tag/html5|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/html5|uri|http://www.semanlink.net/tag/html5 +http://www.semanlink.net/tag/html5|broader_prefLabel|HTML +http://www.semanlink.net/tag/patrick_boucheron|creationTime|2020-11-22T15:01:45Z +http://www.semanlink.net/tag/patrick_boucheron|prefLabel|Patrick Boucheron +http://www.semanlink.net/tag/patrick_boucheron|broader|http://www.semanlink.net/tag/historien +http://www.semanlink.net/tag/patrick_boucheron|related|http://www.semanlink.net/tag/arte +http://www.semanlink.net/tag/patrick_boucheron|creationDate|2020-11-22 +http://www.semanlink.net/tag/patrick_boucheron|comment|L'histoire fait dates, sur arte.tv +http://www.semanlink.net/tag/patrick_boucheron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patrick_boucheron|describedBy|https://fr.wikipedia.org/wiki/Patrick_Boucheron +http://www.semanlink.net/tag/patrick_boucheron|uri|http://www.semanlink.net/tag/patrick_boucheron +http://www.semanlink.net/tag/patrick_boucheron|broader_prefLabel|Historien +http://www.semanlink.net/tag/knowledge_resources|creationTime|2019-06-28T00:36:51Z +http://www.semanlink.net/tag/knowledge_resources|prefLabel|Knowledge resources +http://www.semanlink.net/tag/knowledge_resources|creationDate|2019-06-28 +http://www.semanlink.net/tag/knowledge_resources|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_resources|uri|http://www.semanlink.net/tag/knowledge_resources +http://www.semanlink.net/tag/python_library|creationTime|2021-01-30T14:32:21Z +http://www.semanlink.net/tag/python_library|prefLabel|Python library +http://www.semanlink.net/tag/python_library|broader|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/tag/python_library|creationDate|2021-01-30 +http://www.semanlink.net/tag/python_library|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/python_library|uri|http://www.semanlink.net/tag/python_library +http://www.semanlink.net/tag/python_library|broader_prefLabel|Python tools +http://www.semanlink.net/tag/logic_and_semantic_web|creationTime|2010-07-19T12:12:23Z +http://www.semanlink.net/tag/logic_and_semantic_web|prefLabel|Logic and semantic web +http://www.semanlink.net/tag/logic_and_semantic_web|broader|http://www.semanlink.net/tag/logic +http://www.semanlink.net/tag/logic_and_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/logic_and_semantic_web|creationDate|2010-07-19 +http://www.semanlink.net/tag/logic_and_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/logic_and_semantic_web|uri|http://www.semanlink.net/tag/logic_and_semantic_web +http://www.semanlink.net/tag/logic_and_semantic_web|broader_prefLabel|Logic +http://www.semanlink.net/tag/logic_and_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/logic_and_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/logic_and_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/antiquite_iranienne|prefLabel|Antiquité iranienne +http://www.semanlink.net/tag/antiquite_iranienne|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/antiquite_iranienne|broader|http://www.semanlink.net/tag/iran +http://www.semanlink.net/tag/antiquite_iranienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite_iranienne|uri|http://www.semanlink.net/tag/antiquite_iranienne +http://www.semanlink.net/tag/antiquite_iranienne|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/antiquite_iranienne|broader_prefLabel|Iran +http://www.semanlink.net/tag/http_cache|creationTime|2008-01-07T00:35:59Z +http://www.semanlink.net/tag/http_cache|prefLabel|HTTP Cache +http://www.semanlink.net/tag/http_cache|broader|http://www.semanlink.net/tag/cache +http://www.semanlink.net/tag/http_cache|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/http_cache|creationDate|2008-01-07 +http://www.semanlink.net/tag/http_cache|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/http_cache|uri|http://www.semanlink.net/tag/http_cache +http://www.semanlink.net/tag/http_cache|broader_prefLabel|Cache +http://www.semanlink.net/tag/http_cache|broader_prefLabel|HTTP +http://www.semanlink.net/tag/pandemie|creationTime|2020-05-12T23:54:29Z +http://www.semanlink.net/tag/pandemie|prefLabel|Pandémie +http://www.semanlink.net/tag/pandemie|broader|http://www.semanlink.net/tag/maladie_contagieuse +http://www.semanlink.net/tag/pandemie|creationDate|2020-05-12 +http://www.semanlink.net/tag/pandemie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pandemie|uri|http://www.semanlink.net/tag/pandemie +http://www.semanlink.net/tag/pandemie|broader_prefLabel|Maladie contagieuse +http://www.semanlink.net/tag/edvige|creationTime|2008-09-05T21:18:36Z +http://www.semanlink.net/tag/edvige|prefLabel|Edvige +http://www.semanlink.net/tag/edvige|broader|http://www.semanlink.net/tag/sarkozy +http://www.semanlink.net/tag/edvige|broader|http://www.semanlink.net/tag/fichage +http://www.semanlink.net/tag/edvige|broader|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/edvige|creationDate|2008-09-05 +http://www.semanlink.net/tag/edvige|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edvige|uri|http://www.semanlink.net/tag/edvige +http://www.semanlink.net/tag/edvige|broader_prefLabel|Sarkozy +http://www.semanlink.net/tag/edvige|broader_prefLabel|Fichage +http://www.semanlink.net/tag/edvige|broader_prefLabel|Etat policier +http://www.semanlink.net/tag/edvige|broader_related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/grandes_invasions|creationTime|2020-05-11T21:43:11Z +http://www.semanlink.net/tag/grandes_invasions|prefLabel|Grandes invasions +http://www.semanlink.net/tag/grandes_invasions|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/grandes_invasions|broader|http://www.semanlink.net/tag/migrations_humaines +http://www.semanlink.net/tag/grandes_invasions|related|http://www.semanlink.net/tag/moyen_age +http://www.semanlink.net/tag/grandes_invasions|creationDate|2020-05-11 +http://www.semanlink.net/tag/grandes_invasions|comment|Völkerwanderung en Allemand +http://www.semanlink.net/tag/grandes_invasions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grandes_invasions|describedBy|https://fr.wikipedia.org/wiki/Invasions_barbares +http://www.semanlink.net/tag/grandes_invasions|uri|http://www.semanlink.net/tag/grandes_invasions +http://www.semanlink.net/tag/grandes_invasions|broader_prefLabel|Histoire +http://www.semanlink.net/tag/grandes_invasions|broader_prefLabel|Migrations humaines +http://www.semanlink.net/tag/extremisme_islamique|prefLabel|Extrémisme islamique +http://www.semanlink.net/tag/extremisme_islamique|broader|http://www.semanlink.net/tag/fondamentalisme_islamique +http://www.semanlink.net/tag/extremisme_islamique|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/extremisme_islamique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extremisme_islamique|uri|http://www.semanlink.net/tag/extremisme_islamique +http://www.semanlink.net/tag/extremisme_islamique|broader_prefLabel|Fondamentalisme islamique +http://www.semanlink.net/tag/extremisme_islamique|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/greve|creationTime|2016-07-25T16:56:00Z +http://www.semanlink.net/tag/greve|prefLabel|Grève +http://www.semanlink.net/tag/greve|creationDate|2016-07-25 +http://www.semanlink.net/tag/greve|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/greve|uri|http://www.semanlink.net/tag/greve +http://www.semanlink.net/tag/centrales_nucleaires|prefLabel|Centrales nucléaires +http://www.semanlink.net/tag/centrales_nucleaires|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/centrales_nucleaires|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/centrales_nucleaires|uri|http://www.semanlink.net/tag/centrales_nucleaires +http://www.semanlink.net/tag/centrales_nucleaires|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/centrales_nucleaires|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/temps|prefLabel|Temps +http://www.semanlink.net/tag/temps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/temps|uri|http://www.semanlink.net/tag/temps +http://www.semanlink.net/tag/markdown|creationTime|2015-05-14T17:44:03Z +http://www.semanlink.net/tag/markdown|prefLabel|Markdown +http://www.semanlink.net/tag/markdown|creationDate|2015-05-14 +http://www.semanlink.net/tag/markdown|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markdown|describedBy|https://en.wikipedia.org/wiki/Markdown +http://www.semanlink.net/tag/markdown|uri|http://www.semanlink.net/tag/markdown +http://www.semanlink.net/tag/maven_tips|creationTime|2013-09-20T09:59:10Z +http://www.semanlink.net/tag/maven_tips|prefLabel|Maven tips +http://www.semanlink.net/tag/maven_tips|broader|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/maven_tips|broader|http://www.semanlink.net/tag/maven +http://www.semanlink.net/tag/maven_tips|creationDate|2013-09-20 +http://www.semanlink.net/tag/maven_tips|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maven_tips|uri|http://www.semanlink.net/tag/maven_tips +http://www.semanlink.net/tag/maven_tips|broader_prefLabel|Tips +http://www.semanlink.net/tag/maven_tips|broader_prefLabel|Maven +http://www.semanlink.net/tag/niklas_lindstrom|creationTime|2015-08-06T17:41:07Z +http://www.semanlink.net/tag/niklas_lindstrom|prefLabel|Niklas Lindström +http://www.semanlink.net/tag/niklas_lindstrom|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/niklas_lindstrom|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/niklas_lindstrom|creationDate|2015-08-06 +http://www.semanlink.net/tag/niklas_lindstrom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/niklas_lindstrom|uri|http://www.semanlink.net/tag/niklas_lindstrom +http://www.semanlink.net/tag/niklas_lindstrom|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/niklas_lindstrom|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/niklas_lindstrom|broader_altLabel|Technical guys +http://www.semanlink.net/tag/union_europeenne|prefLabel|Union européenne +http://www.semanlink.net/tag/union_europeenne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/union_europeenne|broader|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/union_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/union_europeenne|altLabel|UE +http://www.semanlink.net/tag/union_europeenne|uri|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/tag/union_europeenne|broader_prefLabel|Europe +http://www.semanlink.net/tag/union_europeenne|broader_prefLabel|Institutions européennes +http://www.semanlink.net/tag/data_ownership|creationTime|2018-04-29T13:19:17Z +http://www.semanlink.net/tag/data_ownership|prefLabel|Data ownership +http://www.semanlink.net/tag/data_ownership|creationDate|2018-04-29 +http://www.semanlink.net/tag/data_ownership|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_ownership|uri|http://www.semanlink.net/tag/data_ownership +http://www.semanlink.net/tag/peinture|prefLabel|Painting +http://www.semanlink.net/tag/peinture|broader|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/peinture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peinture|altLabel|Peinture +http://www.semanlink.net/tag/peinture|uri|http://www.semanlink.net/tag/peinture +http://www.semanlink.net/tag/peinture|broader_prefLabel|Art +http://www.semanlink.net/tag/nlp_in_enterprise|creationTime|2020-01-07T12:06:26Z +http://www.semanlink.net/tag/nlp_in_enterprise|prefLabel|NLP in enterprise +http://www.semanlink.net/tag/nlp_in_enterprise|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_in_enterprise|related|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/nlp_in_enterprise|creationDate|2020-01-07 +http://www.semanlink.net/tag/nlp_in_enterprise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_in_enterprise|uri|http://www.semanlink.net/tag/nlp_in_enterprise +http://www.semanlink.net/tag/nlp_in_enterprise|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_in_enterprise|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_in_enterprise|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_in_enterprise|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/conscience|creationTime|2008-08-17T11:54:32Z +http://www.semanlink.net/tag/conscience|prefLabel|Consciousness +http://www.semanlink.net/tag/conscience|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/conscience|related|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/tag/conscience|creationDate|2008-08-17 +http://www.semanlink.net/tag/conscience|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conscience|altLabel|Conscience +http://www.semanlink.net/tag/conscience|uri|http://www.semanlink.net/tag/conscience +http://www.semanlink.net/tag/conscience|broader_prefLabel|Brain +http://www.semanlink.net/tag/conscience|broader_altLabel|Cerveau +http://www.semanlink.net/tag/conscience|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/financial_data|creationTime|2010-07-01T18:35:16Z +http://www.semanlink.net/tag/financial_data|prefLabel|Financial Data +http://www.semanlink.net/tag/financial_data|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/financial_data|creationDate|2010-07-01 +http://www.semanlink.net/tag/financial_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/financial_data|uri|http://www.semanlink.net/tag/financial_data +http://www.semanlink.net/tag/financial_data|broader_prefLabel|Finance +http://www.semanlink.net/tag/neo4j|creationTime|2013-03-12T15:00:14Z +http://www.semanlink.net/tag/neo4j|prefLabel|Neo4j +http://www.semanlink.net/tag/neo4j|broader|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/tag/neo4j|broader|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/neo4j|creationDate|2013-03-12 +http://www.semanlink.net/tag/neo4j|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neo4j|uri|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/tag/neo4j|broader_prefLabel|Graph database +http://www.semanlink.net/tag/neo4j|broader_prefLabel|NOSQL +http://www.semanlink.net/tag/neo4j|broader_related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/genome_editing|creationTime|2015-03-15T18:52:57Z +http://www.semanlink.net/tag/genome_editing|prefLabel|Genome editing +http://www.semanlink.net/tag/genome_editing|broader|http://www.semanlink.net/tag/gene_editing +http://www.semanlink.net/tag/genome_editing|broader|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/genome_editing|creationDate|2015-03-15 +http://www.semanlink.net/tag/genome_editing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genome_editing|describedBy|https://en.wikipedia.org/wiki/Genome_editing +http://www.semanlink.net/tag/genome_editing|altLabel|Edition du génome +http://www.semanlink.net/tag/genome_editing|uri|http://www.semanlink.net/tag/genome_editing +http://www.semanlink.net/tag/genome_editing|broader_prefLabel|Gene editing +http://www.semanlink.net/tag/genome_editing|broader_prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/orson_welles|creationTime|2008-05-15T22:34:14Z +http://www.semanlink.net/tag/orson_welles|prefLabel|Orson Welles +http://www.semanlink.net/tag/orson_welles|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/orson_welles|creationDate|2008-05-15 +http://www.semanlink.net/tag/orson_welles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orson_welles|describedBy|https://en.wikipedia.org/wiki/Orson_Welles +http://www.semanlink.net/tag/orson_welles|uri|http://www.semanlink.net/tag/orson_welles +http://www.semanlink.net/tag/orson_welles|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/orson_welles|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/recommended_reading|creationTime|2018-08-19T22:42:20Z +http://www.semanlink.net/tag/recommended_reading|prefLabel|Recommended reading +http://www.semanlink.net/tag/recommended_reading|broader|http://www.semanlink.net/tag/livre_a_lire +http://www.semanlink.net/tag/recommended_reading|creationDate|2018-08-19 +http://www.semanlink.net/tag/recommended_reading|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recommended_reading|uri|http://www.semanlink.net/tag/recommended_reading +http://www.semanlink.net/tag/recommended_reading|broader_prefLabel|Livre à lire +http://www.semanlink.net/tag/cerveau|prefLabel|Brain +http://www.semanlink.net/tag/cerveau|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/cerveau|related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/cerveau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cerveau|altLabel|Cerveau +http://www.semanlink.net/tag/cerveau|uri|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/cerveau|broader_prefLabel|Biology +http://www.semanlink.net/tag/cerveau|broader_altLabel|Biologie +http://www.semanlink.net/tag/rdf_vs_xml|prefLabel|RDF vs XML +http://www.semanlink.net/tag/rdf_vs_xml|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_vs_xml|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_vs_xml|uri|http://www.semanlink.net/tag/rdf_vs_xml +http://www.semanlink.net/tag/rdf_vs_xml|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_vs_xml|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_vs_xml|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_vs_xml|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_vs_xml|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_vs_xml|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/momie|prefLabel|Momie +http://www.semanlink.net/tag/momie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/momie|uri|http://www.semanlink.net/tag/momie +http://www.semanlink.net/tag/uri|prefLabel|URI +http://www.semanlink.net/tag/uri|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/uri|broader|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.semanlink.net/tag/uri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri|uri|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri|broader_prefLabel|Internet +http://www.semanlink.net/tag/uri|broader_prefLabel|Web architecture +http://www.semanlink.net/tag/trust_in_the_web_of_data|creationTime|2013-07-12T11:12:18Z +http://www.semanlink.net/tag/trust_in_the_web_of_data|prefLabel|Trust in the Web of Data +http://www.semanlink.net/tag/trust_in_the_web_of_data|broader|http://www.semanlink.net/tag/trust +http://www.semanlink.net/tag/trust_in_the_web_of_data|broader|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/trust_in_the_web_of_data|creationDate|2013-07-12 +http://www.semanlink.net/tag/trust_in_the_web_of_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trust_in_the_web_of_data|uri|http://www.semanlink.net/tag/trust_in_the_web_of_data +http://www.semanlink.net/tag/trust_in_the_web_of_data|broader_prefLabel|Trust +http://www.semanlink.net/tag/trust_in_the_web_of_data|broader_prefLabel|Web of data +http://www.semanlink.net/tag/rdf2h_browser|creationTime|2018-02-20T22:32:50Z +http://www.semanlink.net/tag/rdf2h_browser|prefLabel|RDF2h +http://www.semanlink.net/tag/rdf2h_browser|broader|http://www.semanlink.net/tag/linked_data_browser +http://www.semanlink.net/tag/rdf2h_browser|creationDate|2018-02-20 +http://www.semanlink.net/tag/rdf2h_browser|comment|A configurable Client-Side Linked Data Browser in less than 150 lines of code +http://www.semanlink.net/tag/rdf2h_browser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf2h_browser|uri|http://www.semanlink.net/tag/rdf2h_browser +http://www.semanlink.net/tag/rdf2h_browser|broader_prefLabel|Linked Data Browser +http://www.semanlink.net/tag/comedie_policiere|creationTime|2020-09-30T21:25:08Z +http://www.semanlink.net/tag/comedie_policiere|prefLabel|Comédie policière +http://www.semanlink.net/tag/comedie_policiere|creationDate|2020-09-30 +http://www.semanlink.net/tag/comedie_policiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/comedie_policiere|uri|http://www.semanlink.net/tag/comedie_policiere +http://www.semanlink.net/tag/n_importe_quoi|prefLabel|N'importe quoi +http://www.semanlink.net/tag/n_importe_quoi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/n_importe_quoi|uri|http://www.semanlink.net/tag/n_importe_quoi +http://www.semanlink.net/tag/internet_tool|prefLabel|Internet tool +http://www.semanlink.net/tag/internet_tool|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/internet_tool|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/internet_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/internet_tool|uri|http://www.semanlink.net/tag/internet_tool +http://www.semanlink.net/tag/internet_tool|broader_prefLabel|Internet +http://www.semanlink.net/tag/internet_tool|broader_prefLabel|Tools +http://www.semanlink.net/tag/huile_de_palme|creationTime|2013-07-30T14:18:40Z +http://www.semanlink.net/tag/huile_de_palme|prefLabel|Huile de palme +http://www.semanlink.net/tag/huile_de_palme|creationDate|2013-07-30 +http://www.semanlink.net/tag/huile_de_palme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/huile_de_palme|uri|http://www.semanlink.net/tag/huile_de_palme +http://www.semanlink.net/tag/zombie|creationTime|2014-03-01T21:51:49Z +http://www.semanlink.net/tag/zombie|prefLabel|Zombie +http://www.semanlink.net/tag/zombie|creationDate|2014-03-01 +http://www.semanlink.net/tag/zombie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zombie|uri|http://www.semanlink.net/tag/zombie +http://www.semanlink.net/tag/leon_levy_bencheton|creationTime|2013-09-24T10:24:23Z +http://www.semanlink.net/tag/leon_levy_bencheton|prefLabel|Léon Lévy-Bencheton +http://www.semanlink.net/tag/leon_levy_bencheton|creationDate|2013-09-24 +http://www.semanlink.net/tag/leon_levy_bencheton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leon_levy_bencheton|uri|http://www.semanlink.net/tag/leon_levy_bencheton +http://www.semanlink.net/tag/computational_complexity|creationTime|2007-02-06T23:29:14Z +http://www.semanlink.net/tag/computational_complexity|prefLabel|Computational complexity +http://www.semanlink.net/tag/computational_complexity|creationDate|2007-02-06 +http://www.semanlink.net/tag/computational_complexity|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computational_complexity|uri|http://www.semanlink.net/tag/computational_complexity +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|creationTime|2018-05-30T15:40:58Z +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|prefLabel|DANs (Deep Averaging Neural Networks) +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|broader|http://www.semanlink.net/tag/neural_bag_of_words +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|creationDate|2018-05-30 +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|uri|http://www.semanlink.net/tag/dans_deep_averaging_neural_networks +http://www.semanlink.net/tag/dans_deep_averaging_neural_networks|broader_prefLabel|Neural Bag of Words +http://www.semanlink.net/tag/api|creationTime|2014-07-26T02:01:39Z +http://www.semanlink.net/tag/api|prefLabel|API +http://www.semanlink.net/tag/api|creationDate|2014-07-26 +http://www.semanlink.net/tag/api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/api|uri|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/interactive_knowledge_stack|creationTime|2011-06-08T19:01:11Z +http://www.semanlink.net/tag/interactive_knowledge_stack|prefLabel|Interactive Knowledge Stack +http://www.semanlink.net/tag/interactive_knowledge_stack|broader|http://www.semanlink.net/tag/semantic_cms +http://www.semanlink.net/tag/interactive_knowledge_stack|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/interactive_knowledge_stack|creationDate|2011-06-08 +http://www.semanlink.net/tag/interactive_knowledge_stack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/interactive_knowledge_stack|homepage|http://www.iks-project.eu/ +http://www.semanlink.net/tag/interactive_knowledge_stack|altLabel|IKS +http://www.semanlink.net/tag/interactive_knowledge_stack|uri|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_prefLabel|Semantic CMS +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_altLabel|LD +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/interactive_knowledge_stack|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|creationTime|2009-09-01T10:54:46Z +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|prefLabel|Championnats du monde à Paris-Saint Denis, 2003 +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|broader|http://www.semanlink.net/tag/championnat_du_monde_d_athletisme +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|creationDate|2009-09-01 +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|uri|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|broader_prefLabel|Paris +http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003|broader_prefLabel|Championnat du monde d'athlétisme +http://www.semanlink.net/tag/micropayments_on_the_web|creationTime|2010-12-03T12:01:26Z +http://www.semanlink.net/tag/micropayments_on_the_web|prefLabel|Micropayments on the web +http://www.semanlink.net/tag/micropayments_on_the_web|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/micropayments_on_the_web|broader|http://www.semanlink.net/tag/web +http://www.semanlink.net/tag/micropayments_on_the_web|broader|http://www.semanlink.net/tag/payment +http://www.semanlink.net/tag/micropayments_on_the_web|broader|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/micropayments_on_the_web|creationDate|2010-12-03 +http://www.semanlink.net/tag/micropayments_on_the_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/micropayments_on_the_web|uri|http://www.semanlink.net/tag/micropayments_on_the_web +http://www.semanlink.net/tag/micropayments_on_the_web|broader_prefLabel|Finance +http://www.semanlink.net/tag/micropayments_on_the_web|broader_prefLabel|Web +http://www.semanlink.net/tag/micropayments_on_the_web|broader_prefLabel|Payment +http://www.semanlink.net/tag/micropayments_on_the_web|broader_prefLabel|Money +http://www.semanlink.net/tag/micropayments_on_the_web|broader_altLabel|Monnaie +http://www.semanlink.net/tag/micropayments_on_the_web|broader_related|http://www.semanlink.net/tag/money +http://www.semanlink.net/tag/musique_bresilienne|creationTime|2014-07-02T09:09:14Z +http://www.semanlink.net/tag/musique_bresilienne|prefLabel|Musique brésilienne +http://www.semanlink.net/tag/musique_bresilienne|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/musique_bresilienne|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/musique_bresilienne|creationDate|2014-07-02 +http://www.semanlink.net/tag/musique_bresilienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musique_bresilienne|uri|http://www.semanlink.net/tag/musique_bresilienne +http://www.semanlink.net/tag/musique_bresilienne|broader_prefLabel|Musique +http://www.semanlink.net/tag/musique_bresilienne|broader_prefLabel|Brésil +http://www.semanlink.net/tag/musique_bresilienne|broader_altLabel|Music +http://www.semanlink.net/tag/musique_bresilienne|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/imprimantes|prefLabel|Imprimantes +http://www.semanlink.net/tag/imprimantes|broader|http://www.semanlink.net/tag/devices +http://www.semanlink.net/tag/imprimantes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imprimantes|uri|http://www.semanlink.net/tag/imprimantes +http://www.semanlink.net/tag/imprimantes|broader_prefLabel|Devices +http://www.semanlink.net/tag/amerique|prefLabel|Amérique +http://www.semanlink.net/tag/amerique|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/amerique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amerique|uri|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/amerique|broader_prefLabel|Géographie +http://www.semanlink.net/tag/semanlink_related|prefLabel|Semanlink related +http://www.semanlink.net/tag/semanlink_related|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_related|uri|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/mars|prefLabel|Mars +http://www.semanlink.net/tag/mars|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/mars|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mars|uri|http://www.semanlink.net/tag/mars +http://www.semanlink.net/tag/mars|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/hydrogen|creationTime|2018-02-10T00:25:25Z +http://www.semanlink.net/tag/hydrogen|prefLabel|Hydrogen +http://www.semanlink.net/tag/hydrogen|related|http://www.semanlink.net/tag/transition_energetique +http://www.semanlink.net/tag/hydrogen|creationDate|2018-02-10 +http://www.semanlink.net/tag/hydrogen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hydrogen|uri|http://www.semanlink.net/tag/hydrogen +http://www.semanlink.net/tag/langue_francaise|prefLabel|Langue française +http://www.semanlink.net/tag/langue_francaise|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/langue_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langue_francaise|uri|http://www.semanlink.net/tag/langue_francaise +http://www.semanlink.net/tag/langue_francaise|broader_prefLabel|Langues +http://www.semanlink.net/tag/toyota|prefLabel|Toyota +http://www.semanlink.net/tag/toyota|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/toyota|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/toyota|broader|http://www.semanlink.net/tag/japon +http://www.semanlink.net/tag/toyota|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/toyota|uri|http://www.semanlink.net/tag/toyota +http://www.semanlink.net/tag/toyota|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/toyota|broader_prefLabel|Automobile +http://www.semanlink.net/tag/toyota|broader_prefLabel|Japon +http://www.semanlink.net/tag/toyota|broader_altLabel|Automotive +http://www.semanlink.net/tag/toyota|broader_altLabel|Japan +http://www.semanlink.net/tag/semantic_web_business|prefLabel|Semantic Web : Business +http://www.semanlink.net/tag/semantic_web_business|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_business|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_business|uri|http://www.semanlink.net/tag/semantic_web_business +http://www.semanlink.net/tag/semantic_web_business|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_business|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_business|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/raphael_troncy|creationTime|2013-05-17T11:09:53Z +http://www.semanlink.net/tag/raphael_troncy|prefLabel|Raphaël Troncy +http://www.semanlink.net/tag/raphael_troncy|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/raphael_troncy|related|http://www.semanlink.net/tag/eswc_2012 +http://www.semanlink.net/tag/raphael_troncy|creationDate|2013-05-17 +http://www.semanlink.net/tag/raphael_troncy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/raphael_troncy|uri|http://www.semanlink.net/tag/raphael_troncy +http://www.semanlink.net/tag/raphael_troncy|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/jean_dujardin|creationTime|2012-05-04T01:24:03Z +http://www.semanlink.net/tag/jean_dujardin|prefLabel|Jean Dujardin +http://www.semanlink.net/tag/jean_dujardin|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/jean_dujardin|creationDate|2012-05-04 +http://www.semanlink.net/tag/jean_dujardin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_dujardin|describedBy|https://fr.wikipedia.org/wiki/Jean_Dujardin +http://www.semanlink.net/tag/jean_dujardin|uri|http://www.semanlink.net/tag/jean_dujardin +http://www.semanlink.net/tag/jean_dujardin|broader_prefLabel|Acteur +http://www.semanlink.net/tag/semantic_integration_hub|prefLabel|Semantic Integration Hub +http://www.semanlink.net/tag/semantic_integration_hub|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_integration_hub|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_integration_hub|uri|http://www.semanlink.net/tag/semantic_integration_hub +http://www.semanlink.net/tag/semantic_integration_hub|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_integration_hub|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_integration_hub|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/oov|creationTime|2019-01-29T12:50:46Z +http://www.semanlink.net/tag/oov|prefLabel|OOV +http://www.semanlink.net/tag/oov|creationDate|2019-01-29 +http://www.semanlink.net/tag/oov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oov|uri|http://www.semanlink.net/tag/oov +http://www.semanlink.net/tag/owl_2|creationTime|2008-12-08T11:50:24Z +http://www.semanlink.net/tag/owl_2|prefLabel|OWL 2 +http://www.semanlink.net/tag/owl_2|broader|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/owl_2|creationDate|2008-12-08 +http://www.semanlink.net/tag/owl_2|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owl_2|uri|http://www.semanlink.net/tag/owl_2 +http://www.semanlink.net/tag/owl_2|broader_prefLabel|OWL +http://www.semanlink.net/tag/owl_2|broader_related|http://www.semanlink.net/tag/bijan_parsia +http://www.semanlink.net/tag/crispr_cas9|creationTime|2016-01-05T23:22:17Z +http://www.semanlink.net/tag/crispr_cas9|prefLabel|CRISPR +http://www.semanlink.net/tag/crispr_cas9|broader|http://www.semanlink.net/tag/gene_editing +http://www.semanlink.net/tag/crispr_cas9|creationDate|2016-01-05 +http://www.semanlink.net/tag/crispr_cas9|comment|"Les bactéries sont dotées de nombreux systèmes de défense pour contrer les attaques constantes de virus, aussi appelés bactériophages. En l’occurrence, le système immunitaire CRISPR-Cas9 permet de constituer une mémoire des infections passées en intégrant dans le chromosome bactérien des fragments d’ADN viral, qui seront utilisés lors d’attaques ultérieures pour diriger Cas9 (dont la fonction est de dégrader l’ADN) spécifiquement contre le génome viral à détruire. Le système CRISPR-Cas9 a été détourné de sa fonction originelle pour mettre au point un puissant et prometteur outil biotechnologique d’édition de gènes. ([source](http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html)) +" +http://www.semanlink.net/tag/crispr_cas9|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crispr_cas9|uri|http://www.semanlink.net/tag/crispr_cas9 +http://www.semanlink.net/tag/crispr_cas9|broader_prefLabel|Gene editing +http://www.semanlink.net/tag/ethnologie|prefLabel|Ethnologie +http://www.semanlink.net/tag/ethnologie|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/ethnologie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ethnologie|uri|http://www.semanlink.net/tag/ethnologie +http://www.semanlink.net/tag/ethnologie|broader_prefLabel|Géographie +http://www.semanlink.net/tag/event_camera|creationTime|2021-04-15T17:13:55Z +http://www.semanlink.net/tag/event_camera|prefLabel|Event camera +http://www.semanlink.net/tag/event_camera|creationDate|2021-04-15 +http://www.semanlink.net/tag/event_camera|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/event_camera|uri|http://www.semanlink.net/tag/event_camera +http://www.semanlink.net/tag/pangolin|creationTime|2013-03-01T01:49:58Z +http://www.semanlink.net/tag/pangolin|prefLabel|Pangolin +http://www.semanlink.net/tag/pangolin|broader|http://www.semanlink.net/tag/especes_menacees +http://www.semanlink.net/tag/pangolin|creationDate|2013-03-01 +http://www.semanlink.net/tag/pangolin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pangolin|uri|http://www.semanlink.net/tag/pangolin +http://www.semanlink.net/tag/pangolin|broader_prefLabel|Espèces menacées +http://www.semanlink.net/tag/pangolin|broader_altLabel|Endangered Species +http://www.semanlink.net/tag/pangolin|broader_related|http://www.semanlink.net/tag/disparition_d_especes +http://www.semanlink.net/tag/search_engines|prefLabel|Search Engines +http://www.semanlink.net/tag/search_engines|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/search_engines|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/search_engines|broader|http://www.semanlink.net/tag/search +http://www.semanlink.net/tag/search_engines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/search_engines|altLabel|Moteur de recherche +http://www.semanlink.net/tag/search_engines|uri|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/search_engines|broader_prefLabel|Internet +http://www.semanlink.net/tag/search_engines|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/search_engines|broader_prefLabel|Search +http://www.semanlink.net/tag/search_engines|broader_altLabel|IR +http://www.semanlink.net/tag/data_mining|creationTime|2013-03-29T01:20:21Z +http://www.semanlink.net/tag/data_mining|prefLabel|Data mining +http://www.semanlink.net/tag/data_mining|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/data_mining|creationDate|2013-03-29 +http://www.semanlink.net/tag/data_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_mining|uri|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/data_mining|broader_prefLabel|Data science +http://www.semanlink.net/tag/data_mining|broader_altLabel|Data analysis +http://www.semanlink.net/tag/mesure_du_temps|creationTime|2007-07-28T13:45:52Z +http://www.semanlink.net/tag/mesure_du_temps|prefLabel|Mesure du temps +http://www.semanlink.net/tag/mesure_du_temps|creationDate|2007-07-28 +http://www.semanlink.net/tag/mesure_du_temps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mesure_du_temps|uri|http://www.semanlink.net/tag/mesure_du_temps +http://www.semanlink.net/tag/electronics|creationTime|2013-04-19T14:18:50Z +http://www.semanlink.net/tag/electronics|prefLabel|Electronics +http://www.semanlink.net/tag/electronics|creationDate|2013-04-19 +http://www.semanlink.net/tag/electronics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/electronics|uri|http://www.semanlink.net/tag/electronics +http://www.semanlink.net/tag/elda|creationTime|2011-01-24T19:01:45Z +http://www.semanlink.net/tag/elda|prefLabel|Elda +http://www.semanlink.net/tag/elda|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/elda|broader|http://www.semanlink.net/tag/epimorphics +http://www.semanlink.net/tag/elda|broader|http://www.semanlink.net/tag/linked_data_api +http://www.semanlink.net/tag/elda|creationDate|2011-01-24 +http://www.semanlink.net/tag/elda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elda|uri|http://www.semanlink.net/tag/elda +http://www.semanlink.net/tag/elda|broader_prefLabel|Jena +http://www.semanlink.net/tag/elda|broader_prefLabel|Epimorphics +http://www.semanlink.net/tag/elda|broader_prefLabel|Linked Data API +http://www.semanlink.net/tag/elda|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/elda|broader_related|http://www.semanlink.net/tag/andy_seaborne +http://www.semanlink.net/tag/elda|broader_related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/three_parent_embryo|prefLabel|Three-parent embryo +http://www.semanlink.net/tag/three_parent_embryo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/three_parent_embryo|uri|http://www.semanlink.net/tag/three_parent_embryo +http://www.semanlink.net/tag/data_mining_tools|creationTime|2013-08-21T16:37:39Z +http://www.semanlink.net/tag/data_mining_tools|prefLabel|Data mining tools +http://www.semanlink.net/tag/data_mining_tools|broader|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/data_mining_tools|creationDate|2013-08-21 +http://www.semanlink.net/tag/data_mining_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_mining_tools|uri|http://www.semanlink.net/tag/data_mining_tools +http://www.semanlink.net/tag/data_mining_tools|broader_prefLabel|Data mining +http://www.semanlink.net/tag/ranking_information_retrieval|creationTime|2019-04-18T00:20:09Z +http://www.semanlink.net/tag/ranking_information_retrieval|prefLabel|Ranking (information retrieval) +http://www.semanlink.net/tag/ranking_information_retrieval|broader|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.semanlink.net/tag/ranking_information_retrieval|broader|http://www.semanlink.net/tag/ranking +http://www.semanlink.net/tag/ranking_information_retrieval|broader|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/tag/ranking_information_retrieval|creationDate|2019-04-18 +http://www.semanlink.net/tag/ranking_information_retrieval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ranking_information_retrieval|describedBy|https://en.wikipedia.org/wiki/Ranking_(information_retrieval) +http://www.semanlink.net/tag/ranking_information_retrieval|uri|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/tag/ranking_information_retrieval|broader_prefLabel|Information retrieval: techniques +http://www.semanlink.net/tag/ranking_information_retrieval|broader_prefLabel|Ranking +http://www.semanlink.net/tag/ranking_information_retrieval|broader_prefLabel|Information retrieval +http://www.semanlink.net/tag/ranking_information_retrieval|broader_altLabel|IR +http://www.semanlink.net/tag/mime_type|creationTime|2012-09-02T15:37:07Z +http://www.semanlink.net/tag/mime_type|prefLabel|MIME type +http://www.semanlink.net/tag/mime_type|creationDate|2012-09-02 +http://www.semanlink.net/tag/mime_type|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mime_type|uri|http://www.semanlink.net/tag/mime_type +http://www.semanlink.net/tag/memristor|creationTime|2009-07-15T22:35:04Z +http://www.semanlink.net/tag/memristor|prefLabel|Memristor +http://www.semanlink.net/tag/memristor|broader|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/tag/memristor|creationDate|2009-07-15 +http://www.semanlink.net/tag/memristor|comment|Artificial synapse. A two-terminal device that can store information in its resistance state even when the power is turned off. +http://www.semanlink.net/tag/memristor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memristor|describedBy|https://en.wikipedia.org/wiki/Memristor +http://www.semanlink.net/tag/memristor|uri|http://www.semanlink.net/tag/memristor +http://www.semanlink.net/tag/memristor|broader_prefLabel|Bio inspired computing devices +http://www.semanlink.net/tag/memristor|broader_altLabel|Neuromorphic engineering +http://www.semanlink.net/tag/memristor|broader_altLabel|Brains in silicon +http://www.semanlink.net/tag/memristor|broader_altLabel|Neuromorphique +http://www.semanlink.net/tag/immigration_familiale|prefLabel|Immigration familiale +http://www.semanlink.net/tag/immigration_familiale|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/immigration_familiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/immigration_familiale|uri|http://www.semanlink.net/tag/immigration_familiale +http://www.semanlink.net/tag/immigration_familiale|broader_prefLabel|Immigration +http://www.semanlink.net/tag/histoire_coloniale|creationTime|2013-04-29T01:20:58Z +http://www.semanlink.net/tag/histoire_coloniale|prefLabel|Histoire coloniale +http://www.semanlink.net/tag/histoire_coloniale|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_coloniale|broader|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/tag/histoire_coloniale|creationDate|2013-04-29 +http://www.semanlink.net/tag/histoire_coloniale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_coloniale|uri|http://www.semanlink.net/tag/histoire_coloniale +http://www.semanlink.net/tag/histoire_coloniale|broader_prefLabel|Histoire +http://www.semanlink.net/tag/histoire_coloniale|broader_prefLabel|Colonisation +http://www.semanlink.net/tag/histoire_coloniale|broader_altLabel|Colonialisme +http://www.semanlink.net/tag/bbc|prefLabel|BBC +http://www.semanlink.net/tag/bbc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bbc|uri|http://www.semanlink.net/tag/bbc +http://www.semanlink.net/tag/structured_data_embedding|creationTime|2019-04-30T13:29:36Z +http://www.semanlink.net/tag/structured_data_embedding|prefLabel|Structured Data Embedding +http://www.semanlink.net/tag/structured_data_embedding|broader|http://www.semanlink.net/tag/embeddings +http://www.semanlink.net/tag/structured_data_embedding|creationDate|2019-04-30 +http://www.semanlink.net/tag/structured_data_embedding|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/structured_data_embedding|uri|http://www.semanlink.net/tag/structured_data_embedding +http://www.semanlink.net/tag/structured_data_embedding|broader_prefLabel|Embeddings +http://www.semanlink.net/tag/structured_data_embedding|broader_altLabel|embedding +http://www.semanlink.net/tag/structured_data_embedding|broader_related|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/tag/structured_data_embedding|broader_related|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/folksonomies_vs_ontologies|prefLabel|Folksonomies vs ontologies +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader|http://www.semanlink.net/tag/folksonomy +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/folksonomies_vs_ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/folksonomies_vs_ontologies|uri|http://www.semanlink.net/tag/folksonomies_vs_ontologies +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader_prefLabel|Folksonomy +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader_altLabel|Folksonomies +http://www.semanlink.net/tag/folksonomies_vs_ontologies|broader_altLabel|Ontology +http://www.semanlink.net/tag/lab_grown_organs|creationTime|2018-08-04T14:22:03Z +http://www.semanlink.net/tag/lab_grown_organs|prefLabel|Lab-grown organs +http://www.semanlink.net/tag/lab_grown_organs|broader|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.semanlink.net/tag/lab_grown_organs|broader|http://www.semanlink.net/tag/bio_engineering +http://www.semanlink.net/tag/lab_grown_organs|creationDate|2018-08-04 +http://www.semanlink.net/tag/lab_grown_organs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lab_grown_organs|uri|http://www.semanlink.net/tag/lab_grown_organs +http://www.semanlink.net/tag/lab_grown_organs|broader_prefLabel|Nous vivons une époque moderne +http://www.semanlink.net/tag/lab_grown_organs|broader_prefLabel|Bio-Engineering +http://www.semanlink.net/tag/lab_grown_organs|broader_altLabel|C'est déjà demain +http://www.semanlink.net/tag/antifascisme|creationTime|2017-12-16T15:07:14Z +http://www.semanlink.net/tag/antifascisme|prefLabel|Antifascisme +http://www.semanlink.net/tag/antifascisme|broader|http://www.semanlink.net/tag/fascisme +http://www.semanlink.net/tag/antifascisme|creationDate|2017-12-16 +http://www.semanlink.net/tag/antifascisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antifascisme|uri|http://www.semanlink.net/tag/antifascisme +http://www.semanlink.net/tag/antifascisme|broader_prefLabel|Fascisme +http://www.semanlink.net/tag/ble|creationTime|2010-05-28T00:53:25Z +http://www.semanlink.net/tag/ble|prefLabel|Blé +http://www.semanlink.net/tag/ble|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/ble|creationDate|2010-05-28 +http://www.semanlink.net/tag/ble|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ble|uri|http://www.semanlink.net/tag/ble +http://www.semanlink.net/tag/ble|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/pays_bas|prefLabel|Pays-Bas +http://www.semanlink.net/tag/pays_bas|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/pays_bas|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/pays_bas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pays_bas|uri|http://www.semanlink.net/tag/pays_bas +http://www.semanlink.net/tag/pays_bas|broader_prefLabel|Europe +http://www.semanlink.net/tag/pays_bas|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/features_machine_learning|creationTime|2017-12-06T16:55:31Z +http://www.semanlink.net/tag/features_machine_learning|prefLabel|Features (Machine Learning) +http://www.semanlink.net/tag/features_machine_learning|creationDate|2017-12-06 +http://www.semanlink.net/tag/features_machine_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/features_machine_learning|uri|http://www.semanlink.net/tag/features_machine_learning +http://www.semanlink.net/tag/los_angeles|creationTime|2009-10-18T22:45:28Z +http://www.semanlink.net/tag/los_angeles|prefLabel|Los Angeles +http://www.semanlink.net/tag/los_angeles|creationDate|2009-10-18 +http://www.semanlink.net/tag/los_angeles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/los_angeles|uri|http://www.semanlink.net/tag/los_angeles +http://www.semanlink.net/tag/pdf_format|creationTime|2020-01-23T18:15:43Z +http://www.semanlink.net/tag/pdf_format|prefLabel|pdf format +http://www.semanlink.net/tag/pdf_format|creationDate|2020-01-23 +http://www.semanlink.net/tag/pdf_format|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pdf_format|uri|http://www.semanlink.net/tag/pdf_format +http://www.semanlink.net/tag/automl|creationTime|2020-03-17T22:02:12Z +http://www.semanlink.net/tag/automl|prefLabel|AutoML +http://www.semanlink.net/tag/automl|creationDate|2020-03-17 +http://www.semanlink.net/tag/automl|comment|the process of automating the process of applying machine learning to real-world problems. +http://www.semanlink.net/tag/automl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automl|describedBy|https://en.wikipedia.org/wiki/Automated_machine_learning +http://www.semanlink.net/tag/automl|uri|http://www.semanlink.net/tag/automl +http://www.semanlink.net/tag/jersey_rdf|creationTime|2011-08-19T11:24:35Z +http://www.semanlink.net/tag/jersey_rdf|prefLabel|jersey/RDF +http://www.semanlink.net/tag/jersey_rdf|broader|http://www.semanlink.net/tag/jersey +http://www.semanlink.net/tag/jersey_rdf|broader|http://www.semanlink.net/tag/semantic_web_services +http://www.semanlink.net/tag/jersey_rdf|creationDate|2011-08-19 +http://www.semanlink.net/tag/jersey_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jersey_rdf|uri|http://www.semanlink.net/tag/jersey_rdf +http://www.semanlink.net/tag/jersey_rdf|broader_prefLabel|jersey +http://www.semanlink.net/tag/jersey_rdf|broader_prefLabel|Semantic Web Services +http://www.semanlink.net/tag/numericable|creationTime|2013-11-19T00:57:43Z +http://www.semanlink.net/tag/numericable|prefLabel|Numéricable +http://www.semanlink.net/tag/numericable|creationDate|2013-11-19 +http://www.semanlink.net/tag/numericable|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/numericable|uri|http://www.semanlink.net/tag/numericable +http://www.semanlink.net/tag/control_theory|creationTime|2015-12-30T20:13:20Z +http://www.semanlink.net/tag/control_theory|prefLabel|Control theory +http://www.semanlink.net/tag/control_theory|creationDate|2015-12-30 +http://www.semanlink.net/tag/control_theory|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/control_theory|describedBy|https://en.wikipedia.org/wiki/Control_theory +http://www.semanlink.net/tag/control_theory|uri|http://www.semanlink.net/tag/control_theory +http://www.semanlink.net/tag/edward_curry|creationTime|2010-03-06T13:50:37Z +http://www.semanlink.net/tag/edward_curry|prefLabel|Edward Curry +http://www.semanlink.net/tag/edward_curry|creationDate|2010-03-06 +http://www.semanlink.net/tag/edward_curry|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edward_curry|uri|http://www.semanlink.net/tag/edward_curry +http://www.semanlink.net/tag/timeline|creationTime|2012-11-28T00:02:04Z +http://www.semanlink.net/tag/timeline|prefLabel|Timeline +http://www.semanlink.net/tag/timeline|creationDate|2012-11-28 +http://www.semanlink.net/tag/timeline|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/timeline|uri|http://www.semanlink.net/tag/timeline +http://www.semanlink.net/tag/huge_rdf_data_source|prefLabel|Huge RDF data source +http://www.semanlink.net/tag/huge_rdf_data_source|broader|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/huge_rdf_data_source|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/huge_rdf_data_source|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/huge_rdf_data_source|uri|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/huge_rdf_data_source|broader_prefLabel|RDF Data source +http://www.semanlink.net/tag/huge_rdf_data_source|broader_prefLabel|RDF +http://www.semanlink.net/tag/huge_rdf_data_source|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/huge_rdf_data_source|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/huge_rdf_data_source|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/huge_rdf_data_source|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/huge_rdf_data_source|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/obelisque_d_axoum|prefLabel|Obélisque d'Axoum +http://www.semanlink.net/tag/obelisque_d_axoum|broader|http://www.semanlink.net/tag/obelisque +http://www.semanlink.net/tag/obelisque_d_axoum|broader|http://www.semanlink.net/tag/pillage_de_vestiges_antiques +http://www.semanlink.net/tag/obelisque_d_axoum|broader|http://www.semanlink.net/tag/italie +http://www.semanlink.net/tag/obelisque_d_axoum|broader|http://www.semanlink.net/tag/axoum +http://www.semanlink.net/tag/obelisque_d_axoum|broader|http://www.semanlink.net/tag/mussolini +http://www.semanlink.net/tag/obelisque_d_axoum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/obelisque_d_axoum|uri|http://www.semanlink.net/tag/obelisque_d_axoum +http://www.semanlink.net/tag/obelisque_d_axoum|broader_prefLabel|Obélisque +http://www.semanlink.net/tag/obelisque_d_axoum|broader_prefLabel|Pillage de vestiges antiques +http://www.semanlink.net/tag/obelisque_d_axoum|broader_prefLabel|Italie +http://www.semanlink.net/tag/obelisque_d_axoum|broader_prefLabel|Axoum +http://www.semanlink.net/tag/obelisque_d_axoum|broader_prefLabel|Mussolini +http://www.semanlink.net/tag/obelisque_d_axoum|broader_altLabel|Aksoum +http://www.semanlink.net/tag/obelisque_d_axoum|broader_altLabel|Royaume d'Aksoum +http://www.semanlink.net/tag/rules|creationTime|2007-02-08T23:35:05Z +http://www.semanlink.net/tag/rules|prefLabel|Rules +http://www.semanlink.net/tag/rules|creationDate|2007-02-08 +http://www.semanlink.net/tag/rules|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rules|uri|http://www.semanlink.net/tag/rules +http://www.semanlink.net/tag/terre_cuite|prefLabel|Terre cuite +http://www.semanlink.net/tag/terre_cuite|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/terre_cuite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/terre_cuite|uri|http://www.semanlink.net/tag/terre_cuite +http://www.semanlink.net/tag/terre_cuite|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/terre_cuite|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/sql|creationTime|2007-06-14T21:54:08Z +http://www.semanlink.net/tag/sql|prefLabel|SQL +http://www.semanlink.net/tag/sql|broader|http://www.semanlink.net/tag/database +http://www.semanlink.net/tag/sql|creationDate|2007-06-14 +http://www.semanlink.net/tag/sql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sql|uri|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/sql|broader_prefLabel|Database +http://www.semanlink.net/tag/data_visualisation|creationTime|2015-10-16T11:37:03Z +http://www.semanlink.net/tag/data_visualisation|prefLabel|Data visualisation +http://www.semanlink.net/tag/data_visualisation|creationDate|2015-10-16 +http://www.semanlink.net/tag/data_visualisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_visualisation|uri|http://www.semanlink.net/tag/data_visualisation +http://www.semanlink.net/tag/thrace|prefLabel|Thrace +http://www.semanlink.net/tag/thrace|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/thrace|broader|http://www.semanlink.net/tag/bulgarie +http://www.semanlink.net/tag/thrace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thrace|uri|http://www.semanlink.net/tag/thrace +http://www.semanlink.net/tag/thrace|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/thrace|broader_prefLabel|Bulgarie +http://www.semanlink.net/tag/henry_story|creationTime|2008-04-07T14:37:28Z +http://www.semanlink.net/tag/henry_story|prefLabel|Henry Story +http://www.semanlink.net/tag/henry_story|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/henry_story|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/henry_story|related|http://www.semanlink.net/tag/sun_microsystems +http://www.semanlink.net/tag/henry_story|related|http://www.semanlink.net/tag/semantic_camp_paris +http://www.semanlink.net/tag/henry_story|creationDate|2008-04-07 +http://www.semanlink.net/tag/henry_story|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/henry_story|homepage|http://bblfish.net/ +http://www.semanlink.net/tag/henry_story|uri|http://www.semanlink.net/tag/henry_story +http://www.semanlink.net/tag/henry_story|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/henry_story|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/henry_story|broader_altLabel|Technical guys +http://www.semanlink.net/tag/folksonomy|prefLabel|Folksonomy +http://www.semanlink.net/tag/folksonomy|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/folksonomy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/folksonomy|altLabel|Folksonomies +http://www.semanlink.net/tag/folksonomy|uri|http://www.semanlink.net/tag/folksonomy +http://www.semanlink.net/tag/folksonomy|broader_prefLabel|Tagging +http://www.semanlink.net/tag/piggy_bank|prefLabel|Piggy Bank +http://www.semanlink.net/tag/piggy_bank|broader|http://www.semanlink.net/tag/simile +http://www.semanlink.net/tag/piggy_bank|broader|http://www.semanlink.net/tag/ajax +http://www.semanlink.net/tag/piggy_bank|comment|"Piggy Bank is a Firefox extension that turns your browser into a mashup platform, by allowing you to extract data from different web sites and mix them together.
+Piggy Bank also allows you store this extracted information locally for you to search later and to exchange at need the collected information with others." +http://www.semanlink.net/tag/piggy_bank|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/piggy_bank|uri|http://www.semanlink.net/tag/piggy_bank +http://www.semanlink.net/tag/piggy_bank|broader_prefLabel|SIMILE +http://www.semanlink.net/tag/piggy_bank|broader_prefLabel|Ajax +http://www.semanlink.net/tag/piggy_bank|broader_altLabel|XMLHttpRequest +http://www.semanlink.net/tag/tsunami|prefLabel|Tsunami +http://www.semanlink.net/tag/tsunami|broader|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/tsunami|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tsunami|uri|http://www.semanlink.net/tag/tsunami +http://www.semanlink.net/tag/tsunami|broader_prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/xquery|creationTime|2009-06-24T21:59:36Z +http://www.semanlink.net/tag/xquery|prefLabel|XQuery +http://www.semanlink.net/tag/xquery|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/xquery|creationDate|2009-06-24 +http://www.semanlink.net/tag/xquery|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xquery|uri|http://www.semanlink.net/tag/xquery +http://www.semanlink.net/tag/xquery|broader_prefLabel|XML +http://www.semanlink.net/tag/deletefb|creationTime|2019-12-03T00:48:20Z +http://www.semanlink.net/tag/deletefb|prefLabel|DeleteFB +http://www.semanlink.net/tag/deletefb|broader|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/tag/deletefb|broader|http://www.semanlink.net/tag/social_networkd_are_bad +http://www.semanlink.net/tag/deletefb|broader|http://www.semanlink.net/tag/the_web_sucks +http://www.semanlink.net/tag/deletefb|broader|http://www.semanlink.net/tag/dark_side_of_tech +http://www.semanlink.net/tag/deletefb|creationDate|2019-12-03 +http://www.semanlink.net/tag/deletefb|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deletefb|uri|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/tag/deletefb|broader_prefLabel|Facebook +http://www.semanlink.net/tag/deletefb|broader_prefLabel|Social Networkd are bad +http://www.semanlink.net/tag/deletefb|broader_prefLabel|The web sucks +http://www.semanlink.net/tag/deletefb|broader_prefLabel|Dark side of Tech +http://www.semanlink.net/tag/deletefb|broader_altLabel|FB +http://www.semanlink.net/tag/universites_francaises|prefLabel|Universités françaises +http://www.semanlink.net/tag/universites_francaises|broader|http://www.semanlink.net/tag/universite +http://www.semanlink.net/tag/universites_francaises|broader|http://www.semanlink.net/tag/enseignement_superieur +http://www.semanlink.net/tag/universites_francaises|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/universites_francaises|uri|http://www.semanlink.net/tag/universites_francaises +http://www.semanlink.net/tag/universites_francaises|broader_prefLabel|Université +http://www.semanlink.net/tag/universites_francaises|broader_prefLabel|Enseignement supérieur +http://www.semanlink.net/tag/rdf_blank_nodes|creationTime|2011-01-15T02:53:04Z +http://www.semanlink.net/tag/rdf_blank_nodes|prefLabel|RDF blank nodes +http://www.semanlink.net/tag/rdf_blank_nodes|broader|http://www.semanlink.net/tag/rdf_dev +http://www.semanlink.net/tag/rdf_blank_nodes|creationDate|2011-01-15 +http://www.semanlink.net/tag/rdf_blank_nodes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_blank_nodes|uri|http://www.semanlink.net/tag/rdf_blank_nodes +http://www.semanlink.net/tag/rdf_blank_nodes|broader_prefLabel|RDF dev +http://www.semanlink.net/tag/artificial_artificial_intelligence|prefLabel|Artificial, Artificial Intelligence +http://www.semanlink.net/tag/artificial_artificial_intelligence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/artificial_artificial_intelligence|uri|http://www.semanlink.net/tag/artificial_artificial_intelligence +http://www.semanlink.net/tag/optical_computing|creationTime|2018-08-28T09:40:22Z +http://www.semanlink.net/tag/optical_computing|prefLabel|Optical computing +http://www.semanlink.net/tag/optical_computing|creationDate|2018-08-28 +http://www.semanlink.net/tag/optical_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/optical_computing|uri|http://www.semanlink.net/tag/optical_computing +http://www.semanlink.net/tag/cross_lingual_word_embeddings|creationTime|2018-05-20T12:02:34Z +http://www.semanlink.net/tag/cross_lingual_word_embeddings|prefLabel|Cross-lingual Word Embeddings +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/cross_lingual_word_embeddings|related|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/cross_lingual_word_embeddings|creationDate|2018-05-20 +http://www.semanlink.net/tag/cross_lingual_word_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_lingual_word_embeddings|uri|http://www.semanlink.net/tag/cross_lingual_word_embeddings +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/cross_lingual_word_embeddings|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/realisateur|prefLabel|Réalisateur +http://www.semanlink.net/tag/realisateur|broader|http://www.semanlink.net/tag/cinema +http://www.semanlink.net/tag/realisateur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/realisateur|altLabel|Cinéaste +http://www.semanlink.net/tag/realisateur|uri|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/realisateur|broader_prefLabel|Cinéma +http://www.semanlink.net/tag/data_management_platform|creationTime|2013-09-02T11:06:16Z +http://www.semanlink.net/tag/data_management_platform|prefLabel|Data management platform +http://www.semanlink.net/tag/data_management_platform|creationDate|2013-09-02 +http://www.semanlink.net/tag/data_management_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_management_platform|uri|http://www.semanlink.net/tag/data_management_platform +http://www.semanlink.net/tag/bricolage_mac|prefLabel|Bricolage Mac +http://www.semanlink.net/tag/bricolage_mac|broader|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/bricolage_mac|broader|http://www.semanlink.net/tag/bricolage +http://www.semanlink.net/tag/bricolage_mac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bricolage_mac|uri|http://www.semanlink.net/tag/bricolage_mac +http://www.semanlink.net/tag/bricolage_mac|broader_prefLabel|Macintosh +http://www.semanlink.net/tag/bricolage_mac|broader_prefLabel|Bricolage +http://www.semanlink.net/tag/curiosite_naturelle|prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/curiosite_naturelle|broader|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/curiosite_naturelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/curiosite_naturelle|uri|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/curiosite_naturelle|broader_prefLabel|Nature +http://www.semanlink.net/tag/digital_entertainment|prefLabel|Digital entertainment +http://www.semanlink.net/tag/digital_entertainment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_entertainment|uri|http://www.semanlink.net/tag/digital_entertainment +http://www.semanlink.net/tag/node2vec|creationTime|2018-05-10T14:33:38Z +http://www.semanlink.net/tag/node2vec|prefLabel|Node2Vec +http://www.semanlink.net/tag/node2vec|broader|http://www.semanlink.net/tag/node_embeddings +http://www.semanlink.net/tag/node2vec|related|http://www.semanlink.net/tag/random_walk +http://www.semanlink.net/tag/node2vec|creationDate|2018-05-10 +http://www.semanlink.net/tag/node2vec|comment|Framework for learning embeddings for nodes in networks by maximizing the likelihood of preserving network neighborhoods of nodes. Uses a flexible notion of a node's neighborhood and a biased random walk procedure, which efficiently explores diverse neighborhoods. +http://www.semanlink.net/tag/node2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/node2vec|uri|http://www.semanlink.net/tag/node2vec +http://www.semanlink.net/tag/node2vec|broader_prefLabel|Node Embeddings +http://www.semanlink.net/tag/bricolage|creationTime|2011-10-13T23:44:57Z +http://www.semanlink.net/tag/bricolage|prefLabel|Bricolage +http://www.semanlink.net/tag/bricolage|creationDate|2011-10-13 +http://www.semanlink.net/tag/bricolage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bricolage|uri|http://www.semanlink.net/tag/bricolage +http://www.semanlink.net/tag/antimatiere|prefLabel|Antimatière +http://www.semanlink.net/tag/antimatiere|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/antimatiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antimatiere|uri|http://www.semanlink.net/tag/antimatiere +http://www.semanlink.net/tag/antimatiere|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/java_tip|creationTime|2017-04-07T01:19:34Z +http://www.semanlink.net/tag/java_tip|prefLabel|Java tip +http://www.semanlink.net/tag/java_tip|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/java_tip|broader|http://www.semanlink.net/tag/dev_tips +http://www.semanlink.net/tag/java_tip|creationDate|2017-04-07 +http://www.semanlink.net/tag/java_tip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/java_tip|uri|http://www.semanlink.net/tag/java_tip +http://www.semanlink.net/tag/java_tip|broader_prefLabel|Java +http://www.semanlink.net/tag/java_tip|broader_prefLabel|Dev tips +http://www.semanlink.net/tag/java_tip|broader_altLabel|Dev tip +http://www.semanlink.net/tag/censorship|creationTime|2013-09-29T13:28:31Z +http://www.semanlink.net/tag/censorship|prefLabel|Censorship +http://www.semanlink.net/tag/censorship|creationDate|2013-09-29 +http://www.semanlink.net/tag/censorship|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/censorship|describedBy|https://en.wikipedia.org/wiki/Censorship +http://www.semanlink.net/tag/censorship|altLabel|Censure +http://www.semanlink.net/tag/censorship|uri|http://www.semanlink.net/tag/censorship +http://www.semanlink.net/tag/deep_links|creationTime|2014-12-20T10:28:19Z +http://www.semanlink.net/tag/deep_links|prefLabel|Deep Links +http://www.semanlink.net/tag/deep_links|creationDate|2014-12-20 +http://www.semanlink.net/tag/deep_links|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_links|uri|http://www.semanlink.net/tag/deep_links +http://www.semanlink.net/tag/renaissance|prefLabel|Renaissance +http://www.semanlink.net/tag/renaissance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/renaissance|uri|http://www.semanlink.net/tag/renaissance +http://www.semanlink.net/tag/gamestop|creationTime|2021-01-29T10:15:59Z +http://www.semanlink.net/tag/gamestop|prefLabel|Gamestop +http://www.semanlink.net/tag/gamestop|broader|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/tag/gamestop|broader|http://www.semanlink.net/tag/short_sales +http://www.semanlink.net/tag/gamestop|broader|http://www.semanlink.net/tag/fonds_speculatifs +http://www.semanlink.net/tag/gamestop|broader|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/tag/gamestop|broader|http://www.semanlink.net/tag/reddit +http://www.semanlink.net/tag/gamestop|creationDate|2021-01-29 +http://www.semanlink.net/tag/gamestop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gamestop|uri|http://www.semanlink.net/tag/gamestop +http://www.semanlink.net/tag/gamestop|broader_prefLabel|Spéculation +http://www.semanlink.net/tag/gamestop|broader_prefLabel|Short selling +http://www.semanlink.net/tag/gamestop|broader_prefLabel|Fonds spéculatifs +http://www.semanlink.net/tag/gamestop|broader_prefLabel|Vive le capitalisme ! +http://www.semanlink.net/tag/gamestop|broader_prefLabel|Reddit +http://www.semanlink.net/tag/gamestop|broader_altLabel|Ventes à découvert +http://www.semanlink.net/tag/gamestop|broader_altLabel|Short sales +http://www.semanlink.net/tag/gamestop|broader_altLabel|Hedge funds +http://www.semanlink.net/tag/gamestop|broader_altLabel|Capitalisme de merde +http://www.semanlink.net/tag/rock|prefLabel|Rock +http://www.semanlink.net/tag/rock|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/rock|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rock|uri|http://www.semanlink.net/tag/rock +http://www.semanlink.net/tag/rock|broader_prefLabel|Musique +http://www.semanlink.net/tag/rock|broader_altLabel|Music +http://www.semanlink.net/tag/islamisme|prefLabel|Islamisme +http://www.semanlink.net/tag/islamisme|broader|http://www.semanlink.net/tag/islam +http://www.semanlink.net/tag/islamisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/islamisme|uri|http://www.semanlink.net/tag/islamisme +http://www.semanlink.net/tag/islamisme|broader_prefLabel|Islam +http://www.semanlink.net/tag/neutrino|prefLabel|Neutrino +http://www.semanlink.net/tag/neutrino|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/neutrino|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/neutrino|altLabel|Neutrinos +http://www.semanlink.net/tag/neutrino|uri|http://www.semanlink.net/tag/neutrino +http://www.semanlink.net/tag/neutrino|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/enfance|prefLabel|Enfance +http://www.semanlink.net/tag/enfance|broader|http://www.semanlink.net/tag/jeunesse +http://www.semanlink.net/tag/enfance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enfance|uri|http://www.semanlink.net/tag/enfance +http://www.semanlink.net/tag/enfance|broader_prefLabel|Jeunesse +http://www.semanlink.net/tag/commandant_cousteau|creationTime|2021-01-20T22:29:22Z +http://www.semanlink.net/tag/commandant_cousteau|prefLabel|Commandant Cousteau +http://www.semanlink.net/tag/commandant_cousteau|related|http://www.semanlink.net/tag/mer +http://www.semanlink.net/tag/commandant_cousteau|creationDate|2021-01-20 +http://www.semanlink.net/tag/commandant_cousteau|comment|Le Commandant Cousteau : tout commença dans l'eau ([Anagramme](tag:anagramme)) +http://www.semanlink.net/tag/commandant_cousteau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/commandant_cousteau|describedBy|https://fr.wikipedia.org/wiki/Jacques-Yves_Cousteau +http://www.semanlink.net/tag/commandant_cousteau|uri|http://www.semanlink.net/tag/commandant_cousteau +http://www.semanlink.net/tag/eswc_2021|creationTime|2021-06-10T16:00:07Z +http://www.semanlink.net/tag/eswc_2021|prefLabel|ESWC 2021 +http://www.semanlink.net/tag/eswc_2021|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2021|creationDate|2021-06-10 +http://www.semanlink.net/tag/eswc_2021|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2021|uri|http://www.semanlink.net/tag/eswc_2021 +http://www.semanlink.net/tag/eswc_2021|broader_prefLabel|ESWC +http://www.semanlink.net/tag/fleur|prefLabel|Fleur +http://www.semanlink.net/tag/fleur|broader|http://www.semanlink.net/tag/plante +http://www.semanlink.net/tag/fleur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fleur|uri|http://www.semanlink.net/tag/fleur +http://www.semanlink.net/tag/fleur|broader_prefLabel|Plante +http://www.semanlink.net/tag/politique_de_l_enfant_unique|prefLabel|Politique de l'enfant unique +http://www.semanlink.net/tag/politique_de_l_enfant_unique|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/politique_de_l_enfant_unique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/politique_de_l_enfant_unique|uri|http://www.semanlink.net/tag/politique_de_l_enfant_unique +http://www.semanlink.net/tag/politique_de_l_enfant_unique|broader_prefLabel|Chine +http://www.semanlink.net/tag/politique_de_l_enfant_unique|broader_altLabel|China +http://www.semanlink.net/tag/colonisation|prefLabel|Colonisation +http://www.semanlink.net/tag/colonisation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/colonisation|altLabel|Colonialisme +http://www.semanlink.net/tag/colonisation|uri|http://www.semanlink.net/tag/colonisation +http://www.semanlink.net/tag/wiki_markup|creationTime|2014-09-29T23:03:49Z +http://www.semanlink.net/tag/wiki_markup|prefLabel|Wiki markup +http://www.semanlink.net/tag/wiki_markup|broader|http://www.semanlink.net/tag/text_tools +http://www.semanlink.net/tag/wiki_markup|creationDate|2014-09-29 +http://www.semanlink.net/tag/wiki_markup|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wiki_markup|uri|http://www.semanlink.net/tag/wiki_markup +http://www.semanlink.net/tag/wiki_markup|broader_prefLabel|Text tools +http://www.semanlink.net/tag/delocalisation_des_services|prefLabel|Délocalisation des services +http://www.semanlink.net/tag/delocalisation_des_services|broader|http://www.semanlink.net/tag/delocalisations +http://www.semanlink.net/tag/delocalisation_des_services|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delocalisation_des_services|uri|http://www.semanlink.net/tag/delocalisation_des_services +http://www.semanlink.net/tag/delocalisation_des_services|broader_prefLabel|Délocalisations +http://www.semanlink.net/tag/web_2_0_application|prefLabel|Web 2.0 application +http://www.semanlink.net/tag/web_2_0_application|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/web_2_0_application|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_2_0_application|uri|http://www.semanlink.net/tag/web_2_0_application +http://www.semanlink.net/tag/web_2_0_application|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/nullite_francaise|creationTime|2020-10-08T15:32:44Z +http://www.semanlink.net/tag/nullite_francaise|prefLabel|Nullité française +http://www.semanlink.net/tag/nullite_francaise|broader|http://www.semanlink.net/tag/etat_de_la_france +http://www.semanlink.net/tag/nullite_francaise|creationDate|2020-10-08 +http://www.semanlink.net/tag/nullite_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nullite_francaise|uri|http://www.semanlink.net/tag/nullite_francaise +http://www.semanlink.net/tag/nullite_francaise|broader_prefLabel|Etat de la France +http://www.semanlink.net/tag/data_portal|creationTime|2012-10-23T00:54:24Z +http://www.semanlink.net/tag/data_portal|prefLabel|Data portal +http://www.semanlink.net/tag/data_portal|creationDate|2012-10-23 +http://www.semanlink.net/tag/data_portal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_portal|uri|http://www.semanlink.net/tag/data_portal +http://www.semanlink.net/tag/animal|prefLabel|Animal +http://www.semanlink.net/tag/animal|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/animal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/animal|uri|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/animal|broader_prefLabel|Biology +http://www.semanlink.net/tag/animal|broader_altLabel|Biologie +http://www.semanlink.net/tag/xri|prefLabel|XRI +http://www.semanlink.net/tag/xri|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xri|uri|http://www.semanlink.net/tag/xri +http://www.semanlink.net/tag/reseaux_bayesiens|prefLabel|Bayesian analysis +http://www.semanlink.net/tag/reseaux_bayesiens|broader|http://www.semanlink.net/tag/uncertainty_reasoning +http://www.semanlink.net/tag/reseaux_bayesiens|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/reseaux_bayesiens|broader|http://www.semanlink.net/tag/probabilistic_graphical_models +http://www.semanlink.net/tag/reseaux_bayesiens|related|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/reseaux_bayesiens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/reseaux_bayesiens|altLabel|Réseaux bayésiens +http://www.semanlink.net/tag/reseaux_bayesiens|altLabel|Bayesian networks +http://www.semanlink.net/tag/reseaux_bayesiens|altLabel|bayesian +http://www.semanlink.net/tag/reseaux_bayesiens|uri|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/reseaux_bayesiens|broader_prefLabel|Uncertainty Reasoning +http://www.semanlink.net/tag/reseaux_bayesiens|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/reseaux_bayesiens|broader_prefLabel|Probabilistic Graphical Models +http://www.semanlink.net/tag/reseaux_bayesiens|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/reseaux_bayesiens|broader_altLabel|AI +http://www.semanlink.net/tag/reseaux_bayesiens|broader_altLabel|IA +http://www.semanlink.net/tag/reseaux_bayesiens|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/sexe|prefLabel|Sexe +http://www.semanlink.net/tag/sexe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sexe|uri|http://www.semanlink.net/tag/sexe +http://www.semanlink.net/tag/biodiversite_declin|creationTime|2018-03-27T13:30:15Z +http://www.semanlink.net/tag/biodiversite_declin|prefLabel|Biodiversité : effondrement +http://www.semanlink.net/tag/biodiversite_declin|broader|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/biodiversite_declin|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/biodiversite_declin|broader|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/tag/biodiversite_declin|creationDate|2018-03-27 +http://www.semanlink.net/tag/biodiversite_declin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biodiversite_declin|uri|http://www.semanlink.net/tag/biodiversite_declin +http://www.semanlink.net/tag/biodiversite_declin|broader_prefLabel|Biodiversité +http://www.semanlink.net/tag/biodiversite_declin|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/biodiversite_declin|broader_prefLabel|Crise écologique +http://www.semanlink.net/tag/biodiversite_declin|broader_altLabel|Biodiversity +http://www.semanlink.net/tag/journal_le_monde|prefLabel|Journal Le Monde +http://www.semanlink.net/tag/journal_le_monde|broader|http://www.semanlink.net/tag/journal +http://www.semanlink.net/tag/journal_le_monde|creationDate|2006-11-20 +http://www.semanlink.net/tag/journal_le_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/journal_le_monde|uri|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/tag/journal_le_monde|broader_prefLabel|Presse +http://www.semanlink.net/tag/journal_le_monde|broader_altLabel|Journal +http://www.semanlink.net/tag/jersey|creationTime|2011-02-03T23:13:23Z +http://www.semanlink.net/tag/jersey|prefLabel|jersey +http://www.semanlink.net/tag/jersey|broader|http://www.semanlink.net/tag/jax_rs +http://www.semanlink.net/tag/jersey|broader|http://www.semanlink.net/tag/restful_web_services +http://www.semanlink.net/tag/jersey|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/jersey|broader|http://www.semanlink.net/tag/java_dev +http://www.semanlink.net/tag/jersey|creationDate|2011-02-03 +http://www.semanlink.net/tag/jersey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jersey|describedBy|http://jersey.java.net/ +http://www.semanlink.net/tag/jersey|uri|http://www.semanlink.net/tag/jersey +http://www.semanlink.net/tag/jersey|broader_prefLabel|JAX-RS +http://www.semanlink.net/tag/jersey|broader_prefLabel|RESTful Web Services +http://www.semanlink.net/tag/jersey|broader_prefLabel|Open Source +http://www.semanlink.net/tag/jersey|broader_prefLabel|Java dev +http://www.semanlink.net/tag/rdf_schema_inferencing|prefLabel|RDF Schema inferencing +http://www.semanlink.net/tag/rdf_schema_inferencing|broader|http://www.semanlink.net/tag/rdf_schema +http://www.semanlink.net/tag/rdf_schema_inferencing|broader|http://www.semanlink.net/tag/inference +http://www.semanlink.net/tag/rdf_schema_inferencing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_schema_inferencing|uri|http://www.semanlink.net/tag/rdf_schema_inferencing +http://www.semanlink.net/tag/rdf_schema_inferencing|broader_prefLabel|RDF Schema +http://www.semanlink.net/tag/rdf_schema_inferencing|broader_prefLabel|Inference +http://www.semanlink.net/tag/rdf_schema_inferencing|broader_altLabel|RDF-S +http://www.semanlink.net/tag/rdf_schema_inferencing|broader_altLabel|RDFS +http://www.semanlink.net/tag/rdf_thrift|creationTime|2014-09-08T18:08:44Z +http://www.semanlink.net/tag/rdf_thrift|prefLabel|RDF Thrift +http://www.semanlink.net/tag/rdf_thrift|broader|http://www.semanlink.net/tag/rdf_binary +http://www.semanlink.net/tag/rdf_thrift|creationDate|2014-09-08 +http://www.semanlink.net/tag/rdf_thrift|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_thrift|uri|http://www.semanlink.net/tag/rdf_thrift +http://www.semanlink.net/tag/rdf_thrift|broader_prefLabel|RDF/binary +http://www.semanlink.net/tag/rdf_access_to_relational_databases|creationTime|2009-02-10T22:38:58Z +http://www.semanlink.net/tag/rdf_access_to_relational_databases|prefLabel|RDF Access to Relational Databases +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader|http://www.semanlink.net/tag/rdf_and_database +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.semanlink.net/tag/rdf_access_to_relational_databases|creationDate|2009-02-10 +http://www.semanlink.net/tag/rdf_access_to_relational_databases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_access_to_relational_databases|uri|http://www.semanlink.net/tag/rdf_access_to_relational_databases +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader_prefLabel|RDF and database +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader_prefLabel|Relational Databases and the Semantic Web +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader_altLabel|RDF and database +http://www.semanlink.net/tag/rdf_access_to_relational_databases|broader_related|http://www.semanlink.net/tag/nosql +http://www.semanlink.net/tag/xml_schema|prefLabel|XML Schema +http://www.semanlink.net/tag/xml_schema|broader|http://www.semanlink.net/tag/xml +http://www.semanlink.net/tag/xml_schema|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xml_schema|uri|http://www.semanlink.net/tag/xml_schema +http://www.semanlink.net/tag/xml_schema|broader_prefLabel|XML +http://www.semanlink.net/tag/rio_de_janeiro|creationTime|2009-06-14T21:29:03Z +http://www.semanlink.net/tag/rio_de_janeiro|prefLabel|Rio de Janeiro +http://www.semanlink.net/tag/rio_de_janeiro|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/rio_de_janeiro|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/rio_de_janeiro|creationDate|2009-06-14 +http://www.semanlink.net/tag/rio_de_janeiro|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rio_de_janeiro|uri|http://www.semanlink.net/tag/rio_de_janeiro +http://www.semanlink.net/tag/rio_de_janeiro|broader_prefLabel|Ville +http://www.semanlink.net/tag/rio_de_janeiro|broader_prefLabel|Brésil +http://www.semanlink.net/tag/rio_de_janeiro|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/dna_nanotechnology|creationTime|2016-07-19T11:31:26Z +http://www.semanlink.net/tag/dna_nanotechnology|prefLabel|DNA nanotechnology +http://www.semanlink.net/tag/dna_nanotechnology|broader|http://www.semanlink.net/tag/nanotechnologies +http://www.semanlink.net/tag/dna_nanotechnology|broader|http://www.semanlink.net/tag/adn +http://www.semanlink.net/tag/dna_nanotechnology|creationDate|2016-07-19 +http://www.semanlink.net/tag/dna_nanotechnology|comment|design and manufacture of artificial nucleic acid structures for technological uses +http://www.semanlink.net/tag/dna_nanotechnology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dna_nanotechnology|describedBy|https://en.wikipedia.org/wiki/DNA_nanotechnology +http://www.semanlink.net/tag/dna_nanotechnology|uri|http://www.semanlink.net/tag/dna_nanotechnology +http://www.semanlink.net/tag/dna_nanotechnology|broader_prefLabel|Nanotechnologies +http://www.semanlink.net/tag/dna_nanotechnology|broader_prefLabel|ADN +http://www.semanlink.net/tag/dna_nanotechnology|broader_altLabel|DNA +http://www.semanlink.net/tag/tika|creationTime|2019-10-20T10:18:07Z +http://www.semanlink.net/tag/tika|prefLabel|Tika +http://www.semanlink.net/tag/tika|broader|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/tag/tika|creationDate|2019-10-20 +http://www.semanlink.net/tag/tika|comment|"[wiki](https://cwiki.apache.org/confluence/display/TIKA/Home) + +[Apache POI :- Get Headings from DOC file](https://stackoverflow.com/questions/19676895/apache-poi-get-headings-from-doc-file)" +http://www.semanlink.net/tag/tika|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tika|homepage|https://tika.apache.org +http://www.semanlink.net/tag/tika|uri|http://www.semanlink.net/tag/tika +http://www.semanlink.net/tag/tika|broader_prefLabel|apache.org +http://www.semanlink.net/tag/jdd_apple|prefLabel|JDD Apple +http://www.semanlink.net/tag/jdd_apple|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/jdd_apple|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jdd_apple|uri|http://www.semanlink.net/tag/jdd_apple +http://www.semanlink.net/tag/jdd_apple|broader_prefLabel|Apple +http://www.semanlink.net/tag/ecocide|creationTime|2015-01-24T12:06:19Z +http://www.semanlink.net/tag/ecocide|prefLabel|Ecocide +http://www.semanlink.net/tag/ecocide|broader|http://www.semanlink.net/tag/crime +http://www.semanlink.net/tag/ecocide|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/ecocide|creationDate|2015-01-24 +http://www.semanlink.net/tag/ecocide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecocide|uri|http://www.semanlink.net/tag/ecocide +http://www.semanlink.net/tag/ecocide|broader_prefLabel|Crime +http://www.semanlink.net/tag/ecocide|broader_prefLabel|Écologie +http://www.semanlink.net/tag/rdf_graphs|prefLabel|RDF graphs +http://www.semanlink.net/tag/rdf_graphs|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_graphs|uri|http://www.semanlink.net/tag/rdf_graphs +http://www.semanlink.net/tag/rdf_graphs|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_graphs|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_graphs|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_graphs|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_graphs|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_graphs|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/harvard|creationTime|2021-04-11T16:14:01Z +http://www.semanlink.net/tag/harvard|prefLabel|Harvard +http://www.semanlink.net/tag/harvard|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/harvard|creationDate|2021-04-11 +http://www.semanlink.net/tag/harvard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/harvard|uri|http://www.semanlink.net/tag/harvard +http://www.semanlink.net/tag/harvard|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/fondamentalisme_islamique|creationTime|2007-08-07T09:57:45Z +http://www.semanlink.net/tag/fondamentalisme_islamique|prefLabel|Fondamentalisme islamique +http://www.semanlink.net/tag/fondamentalisme_islamique|broader|http://www.semanlink.net/tag/islamisme +http://www.semanlink.net/tag/fondamentalisme_islamique|creationDate|2007-08-07 +http://www.semanlink.net/tag/fondamentalisme_islamique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fondamentalisme_islamique|uri|http://www.semanlink.net/tag/fondamentalisme_islamique +http://www.semanlink.net/tag/fondamentalisme_islamique|broader_prefLabel|Islamisme +http://www.semanlink.net/tag/spiking_neural_network|creationTime|2019-01-29T01:17:30Z +http://www.semanlink.net/tag/spiking_neural_network|prefLabel|Spiking Neural Network +http://www.semanlink.net/tag/spiking_neural_network|broader|http://www.semanlink.net/tag/artificial_neural_network +http://www.semanlink.net/tag/spiking_neural_network|creationDate|2019-01-29 +http://www.semanlink.net/tag/spiking_neural_network|comment|"ANN models that more closely mimic natural neural networks. In addition to neuronal and synaptic state, SNNs also incorporate the concept of time. Neurons in the SNN do not fire at each propagation cycle (as it happens with typical multi-layer perceptron networks), but rather fire only when a membrane potential – an intrinsic quality of the neuron related to its membrane electrical charge – reaches a specific value. When a neuron fires, it generates a signal which travels to other neurons +" +http://www.semanlink.net/tag/spiking_neural_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spiking_neural_network|describedBy|https://en.wikipedia.org/wiki/Spiking_neural_network +http://www.semanlink.net/tag/spiking_neural_network|altLabel|Spiking Neural Networks +http://www.semanlink.net/tag/spiking_neural_network|altLabel|SNN +http://www.semanlink.net/tag/spiking_neural_network|uri|http://www.semanlink.net/tag/spiking_neural_network +http://www.semanlink.net/tag/spiking_neural_network|broader_prefLabel|Neural networks +http://www.semanlink.net/tag/spiking_neural_network|broader_altLabel|Artificial neural network +http://www.semanlink.net/tag/spiking_neural_network|broader_altLabel|ANN +http://www.semanlink.net/tag/spiking_neural_network|broader_altLabel|NN +http://www.semanlink.net/tag/asie_centrale|prefLabel|Asie centrale +http://www.semanlink.net/tag/asie_centrale|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/asie_centrale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/asie_centrale|uri|http://www.semanlink.net/tag/asie_centrale +http://www.semanlink.net/tag/asie_centrale|broader_prefLabel|Asie +http://www.semanlink.net/tag/china_s_social_credit_system|creationTime|2018-01-03T23:37:53Z +http://www.semanlink.net/tag/china_s_social_credit_system|prefLabel|China's Social Credit System +http://www.semanlink.net/tag/china_s_social_credit_system|broader|http://www.semanlink.net/tag/reputation_system +http://www.semanlink.net/tag/china_s_social_credit_system|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/china_s_social_credit_system|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/china_s_social_credit_system|creationDate|2018-01-03 +http://www.semanlink.net/tag/china_s_social_credit_system|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/china_s_social_credit_system|describedBy|https://en.wikipedia.org/wiki/Social_Credit_System +http://www.semanlink.net/tag/china_s_social_credit_system|uri|http://www.semanlink.net/tag/china_s_social_credit_system +http://www.semanlink.net/tag/china_s_social_credit_system|broader_prefLabel|Reputation system +http://www.semanlink.net/tag/china_s_social_credit_system|broader_prefLabel|Chine +http://www.semanlink.net/tag/china_s_social_credit_system|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/china_s_social_credit_system|broader_altLabel|China +http://www.semanlink.net/tag/ims_vdex|creationTime|2010-07-26T18:40:49Z +http://www.semanlink.net/tag/ims_vdex|prefLabel|IMS VDEX +http://www.semanlink.net/tag/ims_vdex|broader|http://www.semanlink.net/tag/thesaurus +http://www.semanlink.net/tag/ims_vdex|creationDate|2010-07-26 +http://www.semanlink.net/tag/ims_vdex|comment|The IMS Vocabulary Definition Exchange (VDEX) specification is a grammar for controlled vocabularies. +http://www.semanlink.net/tag/ims_vdex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ims_vdex|describedBy|https://en.wikipedia.org/wiki/IMS_VDEX +http://www.semanlink.net/tag/ims_vdex|altLabel|VDEX +http://www.semanlink.net/tag/ims_vdex|uri|http://www.semanlink.net/tag/ims_vdex +http://www.semanlink.net/tag/ims_vdex|broader_prefLabel|Thesaurus +http://www.semanlink.net/tag/ims_vdex|broader_related|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/tag/knowledge_representation|creationTime|2008-01-02T15:47:21Z +http://www.semanlink.net/tag/knowledge_representation|prefLabel|Knowledge Representation +http://www.semanlink.net/tag/knowledge_representation|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/knowledge_representation|creationDate|2008-01-02 +http://www.semanlink.net/tag/knowledge_representation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_representation|altLabel|KR +http://www.semanlink.net/tag/knowledge_representation|uri|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/tag/knowledge_representation|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/knowledge_representation|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/knowledge_representation|broader_altLabel|AI +http://www.semanlink.net/tag/knowledge_representation|broader_altLabel|IA +http://www.semanlink.net/tag/knowledge_representation|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/enseignement_francais|creationTime|2007-12-16T02:24:51Z +http://www.semanlink.net/tag/enseignement_francais|prefLabel|Enseignement français +http://www.semanlink.net/tag/enseignement_francais|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/enseignement_francais|creationDate|2007-12-16 +http://www.semanlink.net/tag/enseignement_francais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enseignement_francais|uri|http://www.semanlink.net/tag/enseignement_francais +http://www.semanlink.net/tag/enseignement_francais|broader_prefLabel|Education +http://www.semanlink.net/tag/enseignement_francais|broader_altLabel|Enseignement +http://www.semanlink.net/tag/ocean|prefLabel|Océan +http://www.semanlink.net/tag/ocean|broader|http://www.semanlink.net/tag/mer +http://www.semanlink.net/tag/ocean|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ocean|uri|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/ocean|broader_prefLabel|Mer +http://www.semanlink.net/tag/jiroft|prefLabel|Jiroft +http://www.semanlink.net/tag/jiroft|broader|http://www.semanlink.net/tag/age_du_bronze +http://www.semanlink.net/tag/jiroft|broader|http://www.semanlink.net/tag/antiquite_iranienne +http://www.semanlink.net/tag/jiroft|broader|http://www.semanlink.net/tag/decouverte_archeologique +http://www.semanlink.net/tag/jiroft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jiroft|uri|http://www.semanlink.net/tag/jiroft +http://www.semanlink.net/tag/jiroft|broader_prefLabel|Âge du bronze +http://www.semanlink.net/tag/jiroft|broader_prefLabel|Antiquité iranienne +http://www.semanlink.net/tag/jiroft|broader_prefLabel|Découverte archéologique +http://www.semanlink.net/tag/enceintes_connectees|creationTime|2019-04-12T23:17:49Z +http://www.semanlink.net/tag/enceintes_connectees|prefLabel|Enceintes connectées +http://www.semanlink.net/tag/enceintes_connectees|creationDate|2019-04-12 +http://www.semanlink.net/tag/enceintes_connectees|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enceintes_connectees|uri|http://www.semanlink.net/tag/enceintes_connectees +http://www.semanlink.net/tag/vietnam|prefLabel|Vietnam +http://www.semanlink.net/tag/vietnam|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/vietnam|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vietnam|uri|http://www.semanlink.net/tag/vietnam +http://www.semanlink.net/tag/vietnam|broader_prefLabel|Asie +http://www.semanlink.net/tag/usa_histoire|creationTime|2008-11-13T21:25:52Z +http://www.semanlink.net/tag/usa_histoire|prefLabel|USA : histoire +http://www.semanlink.net/tag/usa_histoire|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/usa_histoire|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/usa_histoire|creationDate|2008-11-13 +http://www.semanlink.net/tag/usa_histoire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/usa_histoire|uri|http://www.semanlink.net/tag/usa_histoire +http://www.semanlink.net/tag/usa_histoire|broader_prefLabel|USA +http://www.semanlink.net/tag/usa_histoire|broader_prefLabel|Histoire +http://www.semanlink.net/tag/usa_histoire|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/usa_histoire|broader_altLabel|United States +http://www.semanlink.net/tag/spellchecker|creationTime|2017-10-25T23:42:09Z +http://www.semanlink.net/tag/spellchecker|prefLabel|Spellchecker +http://www.semanlink.net/tag/spellchecker|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/spellchecker|creationDate|2017-10-25 +http://www.semanlink.net/tag/spellchecker|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spellchecker|altLabel|Spell checker +http://www.semanlink.net/tag/spellchecker|altLabel|Spelling correction +http://www.semanlink.net/tag/spellchecker|uri|http://www.semanlink.net/tag/spellchecker +http://www.semanlink.net/tag/spellchecker|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/medical_data|creationTime|2018-04-14T11:12:05Z +http://www.semanlink.net/tag/medical_data|prefLabel|Medical Data +http://www.semanlink.net/tag/medical_data|broader|http://www.semanlink.net/tag/sante +http://www.semanlink.net/tag/medical_data|creationDate|2018-04-14 +http://www.semanlink.net/tag/medical_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medical_data|uri|http://www.semanlink.net/tag/medical_data +http://www.semanlink.net/tag/medical_data|broader_prefLabel|Santé +http://www.semanlink.net/tag/californie|prefLabel|Californie +http://www.semanlink.net/tag/californie|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/californie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/californie|uri|http://www.semanlink.net/tag/californie +http://www.semanlink.net/tag/californie|broader_prefLabel|USA +http://www.semanlink.net/tag/californie|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/californie|broader_altLabel|United States +http://www.semanlink.net/tag/enterprise_data|creationTime|2011-01-07T23:59:56Z +http://www.semanlink.net/tag/enterprise_data|prefLabel|Enterprise Data +http://www.semanlink.net/tag/enterprise_data|creationDate|2011-01-07 +http://www.semanlink.net/tag/enterprise_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_data|uri|http://www.semanlink.net/tag/enterprise_data +http://www.semanlink.net/tag/jsp|prefLabel|JSP +http://www.semanlink.net/tag/jsp|broader|http://www.semanlink.net/tag/java +http://www.semanlink.net/tag/jsp|broader|http://www.semanlink.net/tag/internet_related_technologies +http://www.semanlink.net/tag/jsp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsp|uri|http://www.semanlink.net/tag/jsp +http://www.semanlink.net/tag/jsp|broader_prefLabel|Java +http://www.semanlink.net/tag/jsp|broader_prefLabel|Internet Related Technologies +http://www.semanlink.net/tag/venezuela|prefLabel|Venezuela +http://www.semanlink.net/tag/venezuela|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/venezuela|uri|http://www.semanlink.net/tag/venezuela +http://www.semanlink.net/tag/lycee_alain|creationTime|2012-11-18T03:10:40Z +http://www.semanlink.net/tag/lycee_alain|prefLabel|Lycée Alain +http://www.semanlink.net/tag/lycee_alain|creationDate|2012-11-18 +http://www.semanlink.net/tag/lycee_alain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lycee_alain|uri|http://www.semanlink.net/tag/lycee_alain +http://www.semanlink.net/tag/david_blei|creationTime|2013-08-20T17:34:38Z +http://www.semanlink.net/tag/david_blei|prefLabel|David Blei +http://www.semanlink.net/tag/david_blei|related|http://www.semanlink.net/tag/topic_modeling +http://www.semanlink.net/tag/david_blei|creationDate|2013-08-20 +http://www.semanlink.net/tag/david_blei|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/david_blei|uri|http://www.semanlink.net/tag/david_blei +http://www.semanlink.net/tag/michael_hausenblas|creationTime|2008-09-02T13:58:19Z +http://www.semanlink.net/tag/michael_hausenblas|prefLabel|Michael Hausenblas +http://www.semanlink.net/tag/michael_hausenblas|related|http://www.semanlink.net/tag/eswc_2007 +http://www.semanlink.net/tag/michael_hausenblas|creationDate|2008-09-02 +http://www.semanlink.net/tag/michael_hausenblas|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/michael_hausenblas|describedBy|http://mhausenblas.blogr.com/ +http://www.semanlink.net/tag/michael_hausenblas|weblog|http://mhausenblas.blogr.com/ +http://www.semanlink.net/tag/michael_hausenblas|uri|http://www.semanlink.net/tag/michael_hausenblas +http://www.semanlink.net/tag/atos_origin|creationTime|2008-05-19T19:05:50Z +http://www.semanlink.net/tag/atos_origin|prefLabel|Atos Origin +http://www.semanlink.net/tag/atos_origin|creationDate|2008-05-19 +http://www.semanlink.net/tag/atos_origin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/atos_origin|uri|http://www.semanlink.net/tag/atos_origin +http://www.semanlink.net/tag/ensemble_learning|creationTime|2018-10-28T00:36:43Z +http://www.semanlink.net/tag/ensemble_learning|prefLabel|Ensemble learning +http://www.semanlink.net/tag/ensemble_learning|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/ensemble_learning|creationDate|2018-10-28 +http://www.semanlink.net/tag/ensemble_learning|comment|Methods that use multiple learning algorithms to obtain better predictive performance +http://www.semanlink.net/tag/ensemble_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ensemble_learning|describedBy|https://en.wikipedia.org/wiki/Ensemble_learning +http://www.semanlink.net/tag/ensemble_learning|uri|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/tag/ensemble_learning|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/bollore|creationTime|2015-08-14T15:15:26Z +http://www.semanlink.net/tag/bollore|prefLabel|Bolloré +http://www.semanlink.net/tag/bollore|creationDate|2015-08-14 +http://www.semanlink.net/tag/bollore|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bollore|uri|http://www.semanlink.net/tag/bollore +http://www.semanlink.net/tag/afrique_medievale|creationTime|2019-11-12T21:09:02Z +http://www.semanlink.net/tag/afrique_medievale|prefLabel|Afrique médiévale +http://www.semanlink.net/tag/afrique_medievale|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/afrique_medievale|creationDate|2019-11-12 +http://www.semanlink.net/tag/afrique_medievale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_medievale|uri|http://www.semanlink.net/tag/afrique_medievale +http://www.semanlink.net/tag/afrique_medievale|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/goths|creationTime|2021-08-18T11:01:55Z +http://www.semanlink.net/tag/goths|prefLabel|Goths +http://www.semanlink.net/tag/goths|broader|http://www.semanlink.net/tag/grandes_invasions +http://www.semanlink.net/tag/goths|creationDate|2021-08-18 +http://www.semanlink.net/tag/goths|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/goths|uri|http://www.semanlink.net/tag/goths +http://www.semanlink.net/tag/goths|broader_prefLabel|Grandes invasions +http://www.semanlink.net/tag/goths|broader_related|http://www.semanlink.net/tag/moyen_age +http://www.semanlink.net/tag/jouet|creationTime|2013-07-12T13:20:34Z +http://www.semanlink.net/tag/jouet|prefLabel|Jouet +http://www.semanlink.net/tag/jouet|creationDate|2013-07-12 +http://www.semanlink.net/tag/jouet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jouet|uri|http://www.semanlink.net/tag/jouet +http://www.semanlink.net/tag/wolfram|creationTime|2014-09-16T10:08:01Z +http://www.semanlink.net/tag/wolfram|prefLabel|Stephen Wolfram +http://www.semanlink.net/tag/wolfram|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/wolfram|creationDate|2014-09-16 +http://www.semanlink.net/tag/wolfram|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wolfram|describedBy|https://en.wikipedia.org/wiki/Stephen_Wolfram#Applications_of_A_New_Kind_of_Science +http://www.semanlink.net/tag/wolfram|altLabel|Wolfram +http://www.semanlink.net/tag/wolfram|uri|http://www.semanlink.net/tag/wolfram +http://www.semanlink.net/tag/wolfram|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/wolfram|broader_altLabel|Technical guys +http://www.semanlink.net/tag/kassav|prefLabel|Kassav' +http://www.semanlink.net/tag/kassav|broader|http://www.semanlink.net/tag/zouk +http://www.semanlink.net/tag/kassav|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kassav|uri|http://www.semanlink.net/tag/kassav +http://www.semanlink.net/tag/kassav|broader_prefLabel|Zouk +http://www.semanlink.net/tag/moustique|creationTime|2008-01-25T21:35:28Z +http://www.semanlink.net/tag/moustique|prefLabel|Moustique +http://www.semanlink.net/tag/moustique|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/moustique|related|http://www.semanlink.net/tag/dengue +http://www.semanlink.net/tag/moustique|related|http://www.semanlink.net/tag/paludisme +http://www.semanlink.net/tag/moustique|creationDate|2008-01-25 +http://www.semanlink.net/tag/moustique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/moustique|uri|http://www.semanlink.net/tag/moustique +http://www.semanlink.net/tag/moustique|broader_prefLabel|Insecte +http://www.semanlink.net/tag/kbpedia|creationTime|2020-05-03T01:21:00Z +http://www.semanlink.net/tag/kbpedia|prefLabel|KBPedia +http://www.semanlink.net/tag/kbpedia|broader|http://www.semanlink.net/tag/mike_bergman +http://www.semanlink.net/tag/kbpedia|broader|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/kbpedia|broader|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/kbpedia|broader|http://www.semanlink.net/tag/knowledge_based_ai +http://www.semanlink.net/tag/kbpedia|creationDate|2020-05-03 +http://www.semanlink.net/tag/kbpedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kbpedia|uri|http://www.semanlink.net/tag/kbpedia +http://www.semanlink.net/tag/kbpedia|broader_prefLabel|Mike Bergman +http://www.semanlink.net/tag/kbpedia|broader_prefLabel|Knowledge Graphs +http://www.semanlink.net/tag/kbpedia|broader_prefLabel|Frederick Giasson +http://www.semanlink.net/tag/kbpedia|broader_prefLabel|Knowledge-based AI +http://www.semanlink.net/tag/kbpedia|broader_altLabel|AI3:::Adaptive Information +http://www.semanlink.net/tag/kbpedia|broader_altLabel|Knowledge Graph +http://www.semanlink.net/tag/kbpedia|broader_altLabel|KG +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/ldow2008 +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/semantic_web_and_ai +http://www.semanlink.net/tag/kbpedia|broader_related|http://www.semanlink.net/tag/cognitive_computing +http://www.semanlink.net/tag/flashtext_algorithm|creationTime|2020-12-16T00:02:27Z +http://www.semanlink.net/tag/flashtext_algorithm|prefLabel|FlashText algorithm +http://www.semanlink.net/tag/flashtext_algorithm|broader|http://www.semanlink.net/tag/string_searching_algorithm +http://www.semanlink.net/tag/flashtext_algorithm|related|http://www.semanlink.net/tag/aho_corasick_algorithm +http://www.semanlink.net/tag/flashtext_algorithm|creationDate|2020-12-16 +http://www.semanlink.net/tag/flashtext_algorithm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/flashtext_algorithm|uri|http://www.semanlink.net/tag/flashtext_algorithm +http://www.semanlink.net/tag/flashtext_algorithm|broader_prefLabel|String-searching algorithm +http://www.semanlink.net/tag/extremophiles|prefLabel|Extrémophiles +http://www.semanlink.net/tag/extremophiles|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/extremophiles|broader|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/tag/extremophiles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/extremophiles|uri|http://www.semanlink.net/tag/extremophiles +http://www.semanlink.net/tag/extremophiles|broader_prefLabel|Biology +http://www.semanlink.net/tag/extremophiles|broader_prefLabel|Curiosités naturelles +http://www.semanlink.net/tag/extremophiles|broader_altLabel|Biologie +http://www.semanlink.net/tag/research_papers|creationTime|2018-07-24T23:36:10Z +http://www.semanlink.net/tag/research_papers|prefLabel|Research papers +http://www.semanlink.net/tag/research_papers|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/research_papers|creationDate|2018-07-24 +http://www.semanlink.net/tag/research_papers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/research_papers|uri|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/tag/research_papers|broader_prefLabel|Recherche +http://www.semanlink.net/tag/research_papers|broader_altLabel|Research +http://www.semanlink.net/tag/emmanuelle_charpentier|creationTime|2016-02-11T17:39:37Z +http://www.semanlink.net/tag/emmanuelle_charpentier|prefLabel|Emmanuelle Charpentier +http://www.semanlink.net/tag/emmanuelle_charpentier|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/emmanuelle_charpentier|broader|http://www.semanlink.net/tag/femme_celebre +http://www.semanlink.net/tag/emmanuelle_charpentier|related|http://www.semanlink.net/tag/crispr_cas9 +http://www.semanlink.net/tag/emmanuelle_charpentier|creationDate|2016-02-11 +http://www.semanlink.net/tag/emmanuelle_charpentier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emmanuelle_charpentier|describedBy|https://fr.wikipedia.org/wiki/Emmanuelle_Charpentier +http://www.semanlink.net/tag/emmanuelle_charpentier|uri|http://www.semanlink.net/tag/emmanuelle_charpentier +http://www.semanlink.net/tag/emmanuelle_charpentier|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/emmanuelle_charpentier|broader_prefLabel|Femme célèbre (où qui mérite de l'être) +http://www.semanlink.net/tag/emmanuelle_charpentier|broader_altLabel|Savant +http://www.semanlink.net/tag/tree_of_life|creationTime|2012-05-22T18:50:57Z +http://www.semanlink.net/tag/tree_of_life|prefLabel|Tree of life +http://www.semanlink.net/tag/tree_of_life|broader|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/tree_of_life|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/tree_of_life|creationDate|2012-05-22 +http://www.semanlink.net/tag/tree_of_life|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tree_of_life|altLabel|Classification du vivant +http://www.semanlink.net/tag/tree_of_life|uri|http://www.semanlink.net/tag/tree_of_life +http://www.semanlink.net/tag/tree_of_life|broader_prefLabel|Evolution +http://www.semanlink.net/tag/tree_of_life|broader_prefLabel|Biology +http://www.semanlink.net/tag/tree_of_life|broader_altLabel|Biologie +http://www.semanlink.net/tag/recherche|prefLabel|Recherche +http://www.semanlink.net/tag/recherche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recherche|altLabel|Research +http://www.semanlink.net/tag/recherche|uri|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/machine_learning_problems|creationTime|2015-10-16T10:54:00Z +http://www.semanlink.net/tag/machine_learning_problems|prefLabel|Machine learning: problems +http://www.semanlink.net/tag/machine_learning_problems|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/machine_learning_problems|creationDate|2015-10-16 +http://www.semanlink.net/tag/machine_learning_problems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_problems|uri|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/machine_learning_problems|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/machine_learning_problems|broader_altLabel|ML +http://www.semanlink.net/tag/machine_learning_problems|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/machine_learning_problems|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/lybie|prefLabel|Lybie +http://www.semanlink.net/tag/lybie|broader|http://www.semanlink.net/tag/afrique_du_nord +http://www.semanlink.net/tag/lybie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lybie|uri|http://www.semanlink.net/tag/lybie +http://www.semanlink.net/tag/lybie|broader_prefLabel|Afrique du Nord +http://www.semanlink.net/tag/clint_eastwood|prefLabel|Clint Eastwood +http://www.semanlink.net/tag/clint_eastwood|broader|http://www.semanlink.net/tag/realisateur +http://www.semanlink.net/tag/clint_eastwood|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/clint_eastwood|creationDate|2007-01-14 +http://www.semanlink.net/tag/clint_eastwood|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/clint_eastwood|uri|http://www.semanlink.net/tag/clint_eastwood +http://www.semanlink.net/tag/clint_eastwood|broader_prefLabel|Réalisateur +http://www.semanlink.net/tag/clint_eastwood|broader_prefLabel|Acteur +http://www.semanlink.net/tag/clint_eastwood|broader_altLabel|Cinéaste +http://www.semanlink.net/tag/rdf_validator|prefLabel|RDF Validator +http://www.semanlink.net/tag/rdf_validator|broader|http://www.semanlink.net/tag/validation +http://www.semanlink.net/tag/rdf_validator|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdf_validator|creationDate|2007-01-19 +http://www.semanlink.net/tag/rdf_validator|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_validator|uri|http://www.semanlink.net/tag/rdf_validator +http://www.semanlink.net/tag/rdf_validator|broader_prefLabel|Validator +http://www.semanlink.net/tag/rdf_validator|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/plastic|creationTime|2012-05-03T22:25:50Z +http://www.semanlink.net/tag/plastic|prefLabel|Plastic +http://www.semanlink.net/tag/plastic|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/plastic|creationDate|2012-05-03 +http://www.semanlink.net/tag/plastic|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/plastic|altLabel|Plastique +http://www.semanlink.net/tag/plastic|uri|http://www.semanlink.net/tag/plastic +http://www.semanlink.net/tag/plastic|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/cross_entropy|creationTime|2019-01-14T15:59:38Z +http://www.semanlink.net/tag/cross_entropy|prefLabel|Cross-Entropy +http://www.semanlink.net/tag/cross_entropy|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/cross_entropy|creationDate|2019-01-14 +http://www.semanlink.net/tag/cross_entropy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cross_entropy|describedBy|https://en.wikipedia.org/wiki/Cross_entropy +http://www.semanlink.net/tag/cross_entropy|uri|http://www.semanlink.net/tag/cross_entropy +http://www.semanlink.net/tag/cross_entropy|broader_prefLabel|Information theory +http://www.semanlink.net/tag/gore_vidal|prefLabel|Gore Vidal +http://www.semanlink.net/tag/gore_vidal|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/gore_vidal|broader|http://www.semanlink.net/tag/romancier +http://www.semanlink.net/tag/gore_vidal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gore_vidal|uri|http://www.semanlink.net/tag/gore_vidal +http://www.semanlink.net/tag/gore_vidal|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/gore_vidal|broader_prefLabel|Romancier +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|creationTime|2014-07-22T18:31:20Z +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|prefLabel|Accaparement des terres agricoles +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|broader|http://www.semanlink.net/tag/terres_agricoles +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|creationDate|2014-07-22 +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|uri|http://www.semanlink.net/tag/accaparement_des_terres_agricoles +http://www.semanlink.net/tag/accaparement_des_terres_agricoles|broader_prefLabel|Terres agricoles +http://www.semanlink.net/tag/semweb_pro_2011|creationTime|2012-02-13T19:41:26Z +http://www.semanlink.net/tag/semweb_pro_2011|prefLabel|SemWeb Pro 2011 +http://www.semanlink.net/tag/semweb_pro_2011|broader|http://www.semanlink.net/tag/semweb_pro +http://www.semanlink.net/tag/semweb_pro_2011|creationDate|2012-02-13 +http://www.semanlink.net/tag/semweb_pro_2011|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semweb_pro_2011|uri|http://www.semanlink.net/tag/semweb_pro_2011 +http://www.semanlink.net/tag/semweb_pro_2011|broader_prefLabel|SemWeb Pro +http://www.semanlink.net/tag/node_js|creationTime|2011-09-13T14:15:23Z +http://www.semanlink.net/tag/node_js|prefLabel|node.js +http://www.semanlink.net/tag/node_js|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/node_js|creationDate|2011-09-13 +http://www.semanlink.net/tag/node_js|comment|Evented I/O for V8 JavaScript +http://www.semanlink.net/tag/node_js|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/node_js|homepage|http://nodejs.org/ +http://www.semanlink.net/tag/node_js|uri|http://www.semanlink.net/tag/node_js +http://www.semanlink.net/tag/node_js|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/node_js|broader_altLabel|js +http://www.semanlink.net/tag/art_d_afrique|prefLabel|Art d'Afrique +http://www.semanlink.net/tag/art_d_afrique|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/art_d_afrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/art_d_afrique|altLabel|African art +http://www.semanlink.net/tag/art_d_afrique|uri|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/art_d_afrique|broader_prefLabel|Afrique +http://www.semanlink.net/tag/art_d_afrique|broader_altLabel|Africa +http://www.semanlink.net/tag/crime_contre_l_humanite|prefLabel|Crime contre l'Humanité +http://www.semanlink.net/tag/crime_contre_l_humanite|broader|http://www.semanlink.net/tag/crime +http://www.semanlink.net/tag/crime_contre_l_humanite|broader|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/crime_contre_l_humanite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crime_contre_l_humanite|uri|http://www.semanlink.net/tag/crime_contre_l_humanite +http://www.semanlink.net/tag/crime_contre_l_humanite|broader_prefLabel|Crime +http://www.semanlink.net/tag/crime_contre_l_humanite|broader_prefLabel|Horreur +http://www.semanlink.net/tag/euphrasie|prefLabel|Euphrasie +http://www.semanlink.net/tag/euphrasie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/euphrasie|uri|http://www.semanlink.net/tag/euphrasie +http://www.semanlink.net/tag/nlp_papers|creationTime|2021-03-26T01:59:50Z +http://www.semanlink.net/tag/nlp_papers|prefLabel|NLP papers +http://www.semanlink.net/tag/nlp_papers|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_papers|creationDate|2021-03-26 +http://www.semanlink.net/tag/nlp_papers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_papers|uri|http://www.semanlink.net/tag/nlp_papers +http://www.semanlink.net/tag/nlp_papers|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_papers|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_papers|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_papers|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/rdf2rdfa|creationTime|2013-08-27T16:50:40Z +http://www.semanlink.net/tag/rdf2rdfa|prefLabel|RDF2RDFa +http://www.semanlink.net/tag/rdf2rdfa|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdf2rdfa|creationDate|2013-08-27 +http://www.semanlink.net/tag/rdf2rdfa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf2rdfa|uri|http://www.semanlink.net/tag/rdf2rdfa +http://www.semanlink.net/tag/rdf2rdfa|broader_prefLabel|RDFa +http://www.semanlink.net/tag/rdf2rdfa|broader_altLabel|RDF/A +http://www.semanlink.net/tag/rdf2rdfa|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdf2rdfa|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdf2rdfa|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/mensonge_d_etat|prefLabel|Mensonge d'état +http://www.semanlink.net/tag/mensonge_d_etat|creationDate|2006-08-19 +http://www.semanlink.net/tag/mensonge_d_etat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mensonge_d_etat|uri|http://www.semanlink.net/tag/mensonge_d_etat +http://www.semanlink.net/tag/rdfa_lite|creationTime|2013-01-26T15:07:50Z +http://www.semanlink.net/tag/rdfa_lite|prefLabel|RDFa Lite +http://www.semanlink.net/tag/rdfa_lite|broader|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/rdfa_lite|creationDate|2013-01-26 +http://www.semanlink.net/tag/rdfa_lite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfa_lite|uri|http://www.semanlink.net/tag/rdfa_lite +http://www.semanlink.net/tag/rdfa_lite|broader_prefLabel|RDFa +http://www.semanlink.net/tag/rdfa_lite|broader_altLabel|RDF/A +http://www.semanlink.net/tag/rdfa_lite|broader_related|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/rdfa_lite|broader_related|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanlink.net/tag/rdfa_lite|broader_related|http://www.semanlink.net/tag/microformats +http://www.semanlink.net/tag/geoffrey_hinton|creationTime|2017-08-16T10:29:12Z +http://www.semanlink.net/tag/geoffrey_hinton|prefLabel|Geoffrey Hinton +http://www.semanlink.net/tag/geoffrey_hinton|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/geoffrey_hinton|creationDate|2017-08-16 +http://www.semanlink.net/tag/geoffrey_hinton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geoffrey_hinton|describedBy|https://en.wikipedia.org/wiki/Geoffrey_Hinton +http://www.semanlink.net/tag/geoffrey_hinton|uri|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/tag/geoffrey_hinton|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/c2g|creationTime|2011-02-16T13:29:00Z +http://www.semanlink.net/tag/c2g|prefLabel|C2G +http://www.semanlink.net/tag/c2g|broader|http://www.semanlink.net/tag/configuration +http://www.semanlink.net/tag/c2g|creationDate|2011-02-16 +http://www.semanlink.net/tag/c2g|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/c2g|uri|http://www.semanlink.net/tag/c2g +http://www.semanlink.net/tag/c2g|broader_prefLabel|Configuration +http://www.semanlink.net/tag/software|prefLabel|Software +http://www.semanlink.net/tag/software|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/software|uri|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/software|broader_prefLabel|Technologie +http://www.semanlink.net/tag/zhang_qian|creationTime|2007-04-29T00:57:05Z +http://www.semanlink.net/tag/zhang_qian|prefLabel|Zhang Qian +http://www.semanlink.net/tag/zhang_qian|broader|http://www.semanlink.net/tag/histoire_de_la_chine +http://www.semanlink.net/tag/zhang_qian|broader|http://www.semanlink.net/tag/explorateur +http://www.semanlink.net/tag/zhang_qian|creationDate|2007-04-29 +http://www.semanlink.net/tag/zhang_qian|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zhang_qian|uri|http://www.semanlink.net/tag/zhang_qian +http://www.semanlink.net/tag/zhang_qian|broader_prefLabel|Histoire de la Chine +http://www.semanlink.net/tag/zhang_qian|broader_prefLabel|Explorateur +http://www.semanlink.net/tag/uri_template|creationTime|2014-09-08T13:45:43Z +http://www.semanlink.net/tag/uri_template|prefLabel|URI Template +http://www.semanlink.net/tag/uri_template|broader|http://www.semanlink.net/tag/uri +http://www.semanlink.net/tag/uri_template|creationDate|2014-09-08 +http://www.semanlink.net/tag/uri_template|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uri_template|uri|http://www.semanlink.net/tag/uri_template +http://www.semanlink.net/tag/uri_template|broader_prefLabel|URI +http://www.semanlink.net/tag/alpinisme|prefLabel|Alpinisme +http://www.semanlink.net/tag/alpinisme|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/alpinisme|broader|http://www.semanlink.net/tag/montagne +http://www.semanlink.net/tag/alpinisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alpinisme|uri|http://www.semanlink.net/tag/alpinisme +http://www.semanlink.net/tag/alpinisme|broader_prefLabel|Sport +http://www.semanlink.net/tag/alpinisme|broader_prefLabel|Montagne +http://www.semanlink.net/tag/encyclopedie|prefLabel|Encyclopédie +http://www.semanlink.net/tag/encyclopedie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encyclopedie|uri|http://www.semanlink.net/tag/encyclopedie +http://www.semanlink.net/tag/1789|creationTime|2011-02-02T01:38:09Z +http://www.semanlink.net/tag/1789|prefLabel|1789 +http://www.semanlink.net/tag/1789|broader|http://www.semanlink.net/tag/revolution_francaise +http://www.semanlink.net/tag/1789|creationDate|2011-02-02 +http://www.semanlink.net/tag/1789|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/1789|uri|http://www.semanlink.net/tag/1789 +http://www.semanlink.net/tag/1789|broader_prefLabel|Révolution française +http://www.semanlink.net/tag/systemes_distribues|creationTime|2016-02-08T13:35:21Z +http://www.semanlink.net/tag/systemes_distribues|prefLabel|Systèmes distribués +http://www.semanlink.net/tag/systemes_distribues|creationDate|2016-02-08 +http://www.semanlink.net/tag/systemes_distribues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/systemes_distribues|uri|http://www.semanlink.net/tag/systemes_distribues +http://www.semanlink.net/tag/laser|prefLabel|Laser +http://www.semanlink.net/tag/laser|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/laser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/laser|uri|http://www.semanlink.net/tag/laser +http://www.semanlink.net/tag/laser|broader_prefLabel|Physique +http://www.semanlink.net/tag/laser|broader_altLabel|Physics +http://www.semanlink.net/tag/amphibiens|prefLabel|Amphibiens +http://www.semanlink.net/tag/amphibiens|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/amphibiens|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amphibiens|uri|http://www.semanlink.net/tag/amphibiens +http://www.semanlink.net/tag/amphibiens|broader_prefLabel|Animal +http://www.semanlink.net/tag/anglais|prefLabel|Anglais +http://www.semanlink.net/tag/anglais|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/anglais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anglais|altLabel|English +http://www.semanlink.net/tag/anglais|uri|http://www.semanlink.net/tag/anglais +http://www.semanlink.net/tag/anglais|broader_prefLabel|Langues +http://www.semanlink.net/tag/multitask_learning_in_nlp|creationTime|2018-07-25T13:12:53Z +http://www.semanlink.net/tag/multitask_learning_in_nlp|prefLabel|Multitask Learning in NLP +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/multitask_learning_in_nlp|creationDate|2018-07-25 +http://www.semanlink.net/tag/multitask_learning_in_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multitask_learning_in_nlp|altLabel|Joint Learning in NLP +http://www.semanlink.net/tag/multitask_learning_in_nlp|altLabel|Joint models in NLP +http://www.semanlink.net/tag/multitask_learning_in_nlp|uri|http://www.semanlink.net/tag/multitask_learning_in_nlp +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_prefLabel|Multi-task learning +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_altLabel|Multitask Learning +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_altLabel|Joint learning +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_altLabel|Joint models +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_altLabel|MTL +http://www.semanlink.net/tag/multitask_learning_in_nlp|broader_related|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/tag/classe_moyenne|prefLabel|Classe moyenne +http://www.semanlink.net/tag/classe_moyenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/classe_moyenne|uri|http://www.semanlink.net/tag/classe_moyenne +http://www.semanlink.net/tag/semantic_indexing|prefLabel|Semantic indexing +http://www.semanlink.net/tag/semantic_indexing|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_indexing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_indexing|uri|http://www.semanlink.net/tag/semantic_indexing +http://www.semanlink.net/tag/semantic_indexing|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_indexing|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_indexing|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/bigquery|creationTime|2019-02-19T19:22:18Z +http://www.semanlink.net/tag/bigquery|prefLabel|BigQuery +http://www.semanlink.net/tag/bigquery|broader|http://www.semanlink.net/tag/google_cloud +http://www.semanlink.net/tag/bigquery|related|http://www.semanlink.net/tag/sql +http://www.semanlink.net/tag/bigquery|creationDate|2019-02-19 +http://www.semanlink.net/tag/bigquery|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bigquery|uri|http://www.semanlink.net/tag/bigquery +http://www.semanlink.net/tag/bigquery|broader_prefLabel|Google Cloud +http://www.semanlink.net/tag/datalakes|creationTime|2018-06-17T12:28:20Z +http://www.semanlink.net/tag/datalakes|prefLabel|Datalakes +http://www.semanlink.net/tag/datalakes|creationDate|2018-06-17 +http://www.semanlink.net/tag/datalakes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/datalakes|uri|http://www.semanlink.net/tag/datalakes +http://www.semanlink.net/tag/monuments_historiques|creationTime|2011-09-26T14:41:22Z +http://www.semanlink.net/tag/monuments_historiques|prefLabel|Monuments historiques +http://www.semanlink.net/tag/monuments_historiques|creationDate|2011-09-26 +http://www.semanlink.net/tag/monuments_historiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/monuments_historiques|uri|http://www.semanlink.net/tag/monuments_historiques +http://www.semanlink.net/tag/nemrud|prefLabel|Nemrud +http://www.semanlink.net/tag/nemrud|broader|http://www.semanlink.net/tag/asie_mineure +http://www.semanlink.net/tag/nemrud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nemrud|uri|http://www.semanlink.net/tag/nemrud +http://www.semanlink.net/tag/nemrud|broader_prefLabel|Asie mineure +http://www.semanlink.net/tag/nemrud|broader_altLabel|Anatolie +http://www.semanlink.net/tag/desktop_applications|prefLabel|Desktop applications +http://www.semanlink.net/tag/desktop_applications|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/desktop_applications|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/desktop_applications|uri|http://www.semanlink.net/tag/desktop_applications +http://www.semanlink.net/tag/desktop_applications|broader_prefLabel|Informatique +http://www.semanlink.net/tag/wsdl|prefLabel|WSDL +http://www.semanlink.net/tag/wsdl|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/wsdl|broader|http://www.semanlink.net/tag/service_description +http://www.semanlink.net/tag/wsdl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wsdl|uri|http://www.semanlink.net/tag/wsdl +http://www.semanlink.net/tag/wsdl|broader_prefLabel|Web Services +http://www.semanlink.net/tag/wsdl|broader_prefLabel|Service description +http://www.semanlink.net/tag/wsdl|broader_altLabel|WS +http://www.semanlink.net/tag/coursera|creationTime|2012-08-09T22:36:36Z +http://www.semanlink.net/tag/coursera|prefLabel|Coursera +http://www.semanlink.net/tag/coursera|broader|http://www.semanlink.net/tag/online_course_materials +http://www.semanlink.net/tag/coursera|broader|http://www.semanlink.net/tag/mooc +http://www.semanlink.net/tag/coursera|related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/coursera|creationDate|2012-08-09 +http://www.semanlink.net/tag/coursera|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera|homepage|https://www.coursera.org +http://www.semanlink.net/tag/coursera|uri|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera|broader_prefLabel|Online Course Materials +http://www.semanlink.net/tag/coursera|broader_prefLabel|MOOC +http://www.semanlink.net/tag/mozilla|prefLabel|Mozilla +http://www.semanlink.net/tag/mozilla|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/mozilla|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mozilla|uri|http://www.semanlink.net/tag/mozilla +http://www.semanlink.net/tag/mozilla|broader_prefLabel|Dev +http://www.semanlink.net/tag/bert_fine_tuning|creationTime|2021-10-30T09:14:27Z +http://www.semanlink.net/tag/bert_fine_tuning|prefLabel|BERT fine-tuning +http://www.semanlink.net/tag/bert_fine_tuning|broader|http://www.semanlink.net/tag/bert +http://www.semanlink.net/tag/bert_fine_tuning|creationDate|2021-10-30 +http://www.semanlink.net/tag/bert_fine_tuning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bert_fine_tuning|uri|http://www.semanlink.net/tag/bert_fine_tuning +http://www.semanlink.net/tag/bert_fine_tuning|broader_prefLabel|BERT +http://www.semanlink.net/tag/riz|prefLabel|Riz +http://www.semanlink.net/tag/riz|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/riz|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/riz|uri|http://www.semanlink.net/tag/riz +http://www.semanlink.net/tag/riz|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/kevin_kostner|creationTime|2021-10-08T23:23:52Z +http://www.semanlink.net/tag/kevin_kostner|prefLabel|Kevin Kostner +http://www.semanlink.net/tag/kevin_kostner|broader|http://www.semanlink.net/tag/acteur +http://www.semanlink.net/tag/kevin_kostner|creationDate|2021-10-08 +http://www.semanlink.net/tag/kevin_kostner|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kevin_kostner|uri|http://www.semanlink.net/tag/kevin_kostner +http://www.semanlink.net/tag/kevin_kostner|broader_prefLabel|Acteur +http://www.semanlink.net/tag/hymne_a_la_joie|creationTime|2016-12-05T16:16:53Z +http://www.semanlink.net/tag/hymne_a_la_joie|prefLabel|Hymne à la joie +http://www.semanlink.net/tag/hymne_a_la_joie|broader|http://www.semanlink.net/tag/beethoven +http://www.semanlink.net/tag/hymne_a_la_joie|creationDate|2016-12-05 +http://www.semanlink.net/tag/hymne_a_la_joie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hymne_a_la_joie|uri|http://www.semanlink.net/tag/hymne_a_la_joie +http://www.semanlink.net/tag/hymne_a_la_joie|broader_prefLabel|Beethoven +http://www.semanlink.net/tag/pluton|prefLabel|Pluton +http://www.semanlink.net/tag/pluton|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/pluton|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pluton|uri|http://www.semanlink.net/tag/pluton +http://www.semanlink.net/tag/pluton|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/seattle|creationTime|2014-04-16T01:09:24Z +http://www.semanlink.net/tag/seattle|prefLabel|Seattle +http://www.semanlink.net/tag/seattle|creationDate|2014-04-16 +http://www.semanlink.net/tag/seattle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seattle|uri|http://www.semanlink.net/tag/seattle +http://www.semanlink.net/tag/koure|creationTime|2020-06-06T21:27:00Z +http://www.semanlink.net/tag/koure|prefLabel|Kouré +http://www.semanlink.net/tag/koure|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/koure|creationDate|2020-06-06 +http://www.semanlink.net/tag/koure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/koure|uri|http://www.semanlink.net/tag/koure +http://www.semanlink.net/tag/koure|broader_prefLabel|Niger +http://www.semanlink.net/tag/koure|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/koure|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/koure|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/karen_blixen|prefLabel|Karen Blixen +http://www.semanlink.net/tag/karen_blixen|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/karen_blixen|broader|http://www.semanlink.net/tag/danemark +http://www.semanlink.net/tag/karen_blixen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/karen_blixen|uri|http://www.semanlink.net/tag/karen_blixen +http://www.semanlink.net/tag/karen_blixen|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/karen_blixen|broader_prefLabel|Danemark +http://www.semanlink.net/tag/mobile_phone|creationTime|2013-08-25T14:54:22Z +http://www.semanlink.net/tag/mobile_phone|prefLabel|Mobile phone +http://www.semanlink.net/tag/mobile_phone|broader|http://www.semanlink.net/tag/mobile_device +http://www.semanlink.net/tag/mobile_phone|broader|http://www.semanlink.net/tag/telephone +http://www.semanlink.net/tag/mobile_phone|creationDate|2013-08-25 +http://www.semanlink.net/tag/mobile_phone|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mobile_phone|uri|http://www.semanlink.net/tag/mobile_phone +http://www.semanlink.net/tag/mobile_phone|broader_prefLabel|Mobile device +http://www.semanlink.net/tag/mobile_phone|broader_prefLabel|Téléphone +http://www.semanlink.net/tag/mobile_phone|broader_altLabel|Téléphonie +http://www.semanlink.net/tag/couple_mixte|creationTime|2008-08-31T02:30:38Z +http://www.semanlink.net/tag/couple_mixte|prefLabel|Couple mixte +http://www.semanlink.net/tag/couple_mixte|creationDate|2008-08-31 +http://www.semanlink.net/tag/couple_mixte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/couple_mixte|uri|http://www.semanlink.net/tag/couple_mixte +http://www.semanlink.net/tag/tim_cook|creationTime|2016-02-18T00:05:45Z +http://www.semanlink.net/tag/tim_cook|prefLabel|Tim Cook +http://www.semanlink.net/tag/tim_cook|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/tim_cook|creationDate|2016-02-18 +http://www.semanlink.net/tag/tim_cook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tim_cook|uri|http://www.semanlink.net/tag/tim_cook +http://www.semanlink.net/tag/tim_cook|broader_prefLabel|Apple +http://www.semanlink.net/tag/medical_information_search|creationTime|2018-04-01T14:50:42Z +http://www.semanlink.net/tag/medical_information_search|prefLabel|Medical Information Search +http://www.semanlink.net/tag/medical_information_search|broader|http://www.semanlink.net/tag/medical_ir_ml_ia +http://www.semanlink.net/tag/medical_information_search|broader|http://www.semanlink.net/tag/medical_data +http://www.semanlink.net/tag/medical_information_search|creationDate|2018-04-01 +http://www.semanlink.net/tag/medical_information_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medical_information_search|uri|http://www.semanlink.net/tag/medical_information_search +http://www.semanlink.net/tag/medical_information_search|broader_prefLabel|Medical IR, ML, IA +http://www.semanlink.net/tag/medical_information_search|broader_prefLabel|Medical Data +http://www.semanlink.net/tag/cjnn|creationTime|2012-11-18T01:38:51Z +http://www.semanlink.net/tag/cjnn|prefLabel|CJNN +http://www.semanlink.net/tag/cjnn|broader|http://www.semanlink.net/tag/souvenirs +http://www.semanlink.net/tag/cjnn|creationDate|2012-11-18 +http://www.semanlink.net/tag/cjnn|comment|Club des jeunes naturalistes normands +http://www.semanlink.net/tag/cjnn|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cjnn|uri|http://www.semanlink.net/tag/cjnn +http://www.semanlink.net/tag/cjnn|broader_prefLabel|Souvenirs +http://www.semanlink.net/tag/cjnn|broader_altLabel|Souvenir +http://www.semanlink.net/tag/gautier_poupeau|creationTime|2008-05-19T19:04:34Z +http://www.semanlink.net/tag/gautier_poupeau|prefLabel|Gautier Poupeau +http://www.semanlink.net/tag/gautier_poupeau|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/gautier_poupeau|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/gautier_poupeau|related|http://www.semanlink.net/tag/atos_origin +http://www.semanlink.net/tag/gautier_poupeau|creationDate|2008-05-19 +http://www.semanlink.net/tag/gautier_poupeau|type|http://xmlns.com/foaf/0.1/Person +http://www.semanlink.net/tag/gautier_poupeau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gautier_poupeau|describedBy|http://www.lespetitescases.net/ +http://www.semanlink.net/tag/gautier_poupeau|uri|http://www.semanlink.net/tag/gautier_poupeau +http://www.semanlink.net/tag/gautier_poupeau|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/gautier_poupeau|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/gautier_poupeau|broader_altLabel|Technical guys +http://www.semanlink.net/tag/lutte_traditionnelle|prefLabel|Lutte traditionnelle +http://www.semanlink.net/tag/lutte_traditionnelle|broader|http://www.semanlink.net/tag/sport_de_combat +http://www.semanlink.net/tag/lutte_traditionnelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lutte_traditionnelle|uri|http://www.semanlink.net/tag/lutte_traditionnelle +http://www.semanlink.net/tag/lutte_traditionnelle|broader_prefLabel|Sport de combat +http://www.semanlink.net/tag/rosee|prefLabel|Rosée +http://www.semanlink.net/tag/rosee|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/rosee|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rosee|uri|http://www.semanlink.net/tag/rosee +http://www.semanlink.net/tag/rosee|broader_prefLabel|Eau +http://www.semanlink.net/tag/json_ld_frame|creationTime|2016-05-05T16:35:10Z +http://www.semanlink.net/tag/json_ld_frame|prefLabel|JSON-LD frame +http://www.semanlink.net/tag/json_ld_frame|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/json_ld_frame|creationDate|2016-05-05 +http://www.semanlink.net/tag/json_ld_frame|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json_ld_frame|uri|http://www.semanlink.net/tag/json_ld_frame +http://www.semanlink.net/tag/json_ld_frame|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/json_ld_frame|broader_altLabel|JSONLD +http://www.semanlink.net/tag/esclavage|prefLabel|Esclavage +http://www.semanlink.net/tag/esclavage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/esclavage|altLabel|Slavery +http://www.semanlink.net/tag/esclavage|uri|http://www.semanlink.net/tag/esclavage +http://www.semanlink.net/tag/overview|creationTime|2018-11-17T15:18:12Z +http://www.semanlink.net/tag/overview|prefLabel|Overview +http://www.semanlink.net/tag/overview|creationDate|2018-11-17 +http://www.semanlink.net/tag/overview|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/overview|uri|http://www.semanlink.net/tag/overview +http://www.semanlink.net/tag/sequencage_du_genome|prefLabel|Séquençage du génome +http://www.semanlink.net/tag/sequencage_du_genome|broader|http://www.semanlink.net/tag/genome +http://www.semanlink.net/tag/sequencage_du_genome|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/sequencage_du_genome|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sequencage_du_genome|uri|http://www.semanlink.net/tag/sequencage_du_genome +http://www.semanlink.net/tag/sequencage_du_genome|broader_prefLabel|Génome +http://www.semanlink.net/tag/sequencage_du_genome|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/sequencage_du_genome|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/global_semantic_context|creationTime|2018-09-27T22:12:38Z +http://www.semanlink.net/tag/global_semantic_context|prefLabel|Global Semantic Context +http://www.semanlink.net/tag/global_semantic_context|creationDate|2018-09-27 +http://www.semanlink.net/tag/global_semantic_context|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/global_semantic_context|uri|http://www.semanlink.net/tag/global_semantic_context +http://www.semanlink.net/tag/journalisme|creationTime|2008-08-08T18:25:05Z +http://www.semanlink.net/tag/journalisme|prefLabel|Journalisme +http://www.semanlink.net/tag/journalisme|creationDate|2008-08-08 +http://www.semanlink.net/tag/journalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/journalisme|uri|http://www.semanlink.net/tag/journalisme +http://www.semanlink.net/tag/europe|prefLabel|Europe +http://www.semanlink.net/tag/europe|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/europe|uri|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/europe|broader_prefLabel|Géographie +http://www.semanlink.net/tag/evolution|prefLabel|Evolution +http://www.semanlink.net/tag/evolution|broader|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/evolution|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/evolution|broader|http://www.semanlink.net/tag/science +http://www.semanlink.net/tag/evolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/evolution|uri|http://www.semanlink.net/tag/evolution +http://www.semanlink.net/tag/evolution|broader_prefLabel|Histoire de la vie +http://www.semanlink.net/tag/evolution|broader_prefLabel|Biology +http://www.semanlink.net/tag/evolution|broader_prefLabel|Science +http://www.semanlink.net/tag/evolution|broader_altLabel|Biologie +http://www.semanlink.net/tag/evolution|broader_altLabel|sciences +http://www.semanlink.net/tag/delicious_api|prefLabel|delicious api +http://www.semanlink.net/tag/delicious_api|broader|http://www.semanlink.net/tag/del_icio_us +http://www.semanlink.net/tag/delicious_api|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/delicious_api|broader|http://www.semanlink.net/tag/api +http://www.semanlink.net/tag/delicious_api|creationDate|2006-09-25 +http://www.semanlink.net/tag/delicious_api|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delicious_api|uri|http://www.semanlink.net/tag/delicious_api +http://www.semanlink.net/tag/delicious_api|broader_prefLabel|del.icio.us +http://www.semanlink.net/tag/delicious_api|broader_prefLabel|Tagging +http://www.semanlink.net/tag/delicious_api|broader_prefLabel|API +http://www.semanlink.net/tag/delicious_api|broader_altLabel|delicious +http://www.semanlink.net/tag/domain_adaptation|creationTime|2018-09-22T11:43:54Z +http://www.semanlink.net/tag/domain_adaptation|prefLabel|Domain adaptation +http://www.semanlink.net/tag/domain_adaptation|broader|http://www.semanlink.net/tag/domain_specific_nlp +http://www.semanlink.net/tag/domain_adaptation|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/domain_adaptation|broader|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/tag/domain_adaptation|creationDate|2018-09-22 +http://www.semanlink.net/tag/domain_adaptation|comment|transfer of knowledge from a source domain to a related target domain +http://www.semanlink.net/tag/domain_adaptation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/domain_adaptation|describedBy|https://en.wikipedia.org/wiki/Domain_adaptation +http://www.semanlink.net/tag/domain_adaptation|uri|http://www.semanlink.net/tag/domain_adaptation +http://www.semanlink.net/tag/domain_adaptation|broader_prefLabel|Domain-Specific NLP +http://www.semanlink.net/tag/domain_adaptation|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/domain_adaptation|broader_prefLabel|Transfer learning +http://www.semanlink.net/tag/domain_adaptation|broader_related|http://www.semanlink.net/tag/domain_knowledge_deep_learning +http://www.semanlink.net/tag/domain_adaptation|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/domain_adaptation|broader_related|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/tag/google_groups|creationTime|2012-07-10T23:53:34Z +http://www.semanlink.net/tag/google_groups|prefLabel|Google Groups +http://www.semanlink.net/tag/google_groups|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/google_groups|creationDate|2012-07-10 +http://www.semanlink.net/tag/google_groups|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_groups|uri|http://www.semanlink.net/tag/google_groups +http://www.semanlink.net/tag/google_groups|broader_prefLabel|Google +http://www.semanlink.net/tag/google_groups|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/modification_du_genome_humain|creationTime|2018-11-27T00:13:45Z +http://www.semanlink.net/tag/modification_du_genome_humain|prefLabel|Modification du génome humain +http://www.semanlink.net/tag/modification_du_genome_humain|broader|http://www.semanlink.net/tag/manipulations_genetiques +http://www.semanlink.net/tag/modification_du_genome_humain|creationDate|2018-11-27 +http://www.semanlink.net/tag/modification_du_genome_humain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/modification_du_genome_humain|uri|http://www.semanlink.net/tag/modification_du_genome_humain +http://www.semanlink.net/tag/modification_du_genome_humain|broader_prefLabel|Manipulations génétiques +http://www.semanlink.net/tag/vision|creationTime|2018-12-06T08:42:30Z +http://www.semanlink.net/tag/vision|prefLabel|Vision +http://www.semanlink.net/tag/vision|related|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/tag/vision|creationDate|2018-12-06 +http://www.semanlink.net/tag/vision|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vision|uri|http://www.semanlink.net/tag/vision +http://www.semanlink.net/tag/relation_extraction|creationTime|2018-05-10T14:43:54Z +http://www.semanlink.net/tag/relation_extraction|prefLabel|Relation Extraction +http://www.semanlink.net/tag/relation_extraction|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/relation_extraction|broader|http://www.semanlink.net/tag/entities +http://www.semanlink.net/tag/relation_extraction|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/relation_extraction|broader|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/tag/relation_extraction|creationDate|2018-05-10 +http://www.semanlink.net/tag/relation_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relation_extraction|uri|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/tag/relation_extraction|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/relation_extraction|broader_prefLabel|Entities +http://www.semanlink.net/tag/relation_extraction|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/relation_extraction|broader_prefLabel|Knowledge Extraction +http://www.semanlink.net/tag/relation_extraction|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/bio_engineering|creationTime|2018-08-04T14:23:00Z +http://www.semanlink.net/tag/bio_engineering|prefLabel|Bio-Engineering +http://www.semanlink.net/tag/bio_engineering|creationDate|2018-08-04 +http://www.semanlink.net/tag/bio_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bio_engineering|uri|http://www.semanlink.net/tag/bio_engineering +http://www.semanlink.net/tag/nlp_4_requirements_engineering|creationTime|2019-07-04T23:54:26Z +http://www.semanlink.net/tag/nlp_4_requirements_engineering|prefLabel|NLP 4 Requirements Engineering +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader|http://www.semanlink.net/tag/requirements_engineering +http://www.semanlink.net/tag/nlp_4_requirements_engineering|creationDate|2019-07-04 +http://www.semanlink.net/tag/nlp_4_requirements_engineering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_4_requirements_engineering|uri|http://www.semanlink.net/tag/nlp_4_requirements_engineering +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader_prefLabel|Requirements Engineering +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/nlp_4_requirements_engineering|broader_altLabel|Ingénierie des exigences +http://www.semanlink.net/tag/software_design|prefLabel|Software design +http://www.semanlink.net/tag/software_design|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/software_design|uri|http://www.semanlink.net/tag/software_design +http://www.semanlink.net/tag/physique_des_particules_modele_standard|prefLabel|Physique des particules : modèle standard +http://www.semanlink.net/tag/physique_des_particules_modele_standard|broader|http://www.semanlink.net/tag/physique_des_particules +http://www.semanlink.net/tag/physique_des_particules_modele_standard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/physique_des_particules_modele_standard|uri|http://www.semanlink.net/tag/physique_des_particules_modele_standard +http://www.semanlink.net/tag/physique_des_particules_modele_standard|broader_prefLabel|Physique des particules +http://www.semanlink.net/tag/random_walk|creationTime|2020-08-11T00:48:26Z +http://www.semanlink.net/tag/random_walk|prefLabel|Random walk +http://www.semanlink.net/tag/random_walk|related|http://www.semanlink.net/tag/rdf2vec +http://www.semanlink.net/tag/random_walk|creationDate|2020-08-11 +http://www.semanlink.net/tag/random_walk|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/random_walk|describedBy|https://en.wikipedia.org/wiki/Random_walk#On_graphs +http://www.semanlink.net/tag/random_walk|uri|http://www.semanlink.net/tag/random_walk +http://www.semanlink.net/tag/synchrotron|creationTime|2008-04-10T10:35:50Z +http://www.semanlink.net/tag/synchrotron|prefLabel|Synchrotron +http://www.semanlink.net/tag/synchrotron|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/synchrotron|creationDate|2008-04-10 +http://www.semanlink.net/tag/synchrotron|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synchrotron|uri|http://www.semanlink.net/tag/synchrotron +http://www.semanlink.net/tag/synchrotron|broader_prefLabel|Physique +http://www.semanlink.net/tag/synchrotron|broader_altLabel|Physics +http://www.semanlink.net/tag/configuration_as_linked_data|creationTime|2011-12-16T23:50:35Z +http://www.semanlink.net/tag/configuration_as_linked_data|prefLabel|Configuration as Linked Data +http://www.semanlink.net/tag/configuration_as_linked_data|broader|http://www.semanlink.net/tag/configuration +http://www.semanlink.net/tag/configuration_as_linked_data|broader|http://www.semanlink.net/tag/configuration_and_sw +http://www.semanlink.net/tag/configuration_as_linked_data|broader|http://www.semanlink.net/tag/linked_data_application +http://www.semanlink.net/tag/configuration_as_linked_data|creationDate|2011-12-16 +http://www.semanlink.net/tag/configuration_as_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/configuration_as_linked_data|uri|http://www.semanlink.net/tag/configuration_as_linked_data +http://www.semanlink.net/tag/configuration_as_linked_data|broader_prefLabel|Configuration +http://www.semanlink.net/tag/configuration_as_linked_data|broader_prefLabel|Configuration and SW +http://www.semanlink.net/tag/configuration_as_linked_data|broader_prefLabel|Linked Data: application +http://www.semanlink.net/tag/night|creationTime|2012-12-06T21:58:51Z +http://www.semanlink.net/tag/night|prefLabel|Night +http://www.semanlink.net/tag/night|creationDate|2012-12-06 +http://www.semanlink.net/tag/night|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/night|uri|http://www.semanlink.net/tag/night +http://www.semanlink.net/tag/everest|prefLabel|Everest +http://www.semanlink.net/tag/everest|broader|http://www.semanlink.net/tag/himalaya +http://www.semanlink.net/tag/everest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/everest|uri|http://www.semanlink.net/tag/everest +http://www.semanlink.net/tag/everest|broader_prefLabel|Himalaya +http://www.semanlink.net/tag/semantic_enterprise_architecture|creationTime|2010-07-02T01:17:09Z +http://www.semanlink.net/tag/semantic_enterprise_architecture|prefLabel|Semantic Enterprise Architecture +http://www.semanlink.net/tag/semantic_enterprise_architecture|broader|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanlink.net/tag/semantic_enterprise_architecture|creationDate|2010-07-02 +http://www.semanlink.net/tag/semantic_enterprise_architecture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_enterprise_architecture|uri|http://www.semanlink.net/tag/semantic_enterprise_architecture +http://www.semanlink.net/tag/semantic_enterprise_architecture|broader_prefLabel|Semantic Enterprise +http://www.semanlink.net/tag/semantic_enterprise_architecture|broader_altLabel|Enterprise Semantic Web +http://www.semanlink.net/tag/semantic_enterprise_architecture|broader_altLabel|Corporate Semantic Web +http://www.semanlink.net/tag/semantic_enterprise_architecture|broader_altLabel|Semantic Web in the enterprise +http://www.semanlink.net/tag/photos_online|prefLabel|photos online +http://www.semanlink.net/tag/photos_online|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/photos_online|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photos_online|uri|http://www.semanlink.net/tag/photos_online +http://www.semanlink.net/tag/photos_online|broader_prefLabel|Photo +http://www.semanlink.net/tag/photos_online|broader_altLabel|Images +http://www.semanlink.net/tag/jeunesse|creationTime|2008-09-10T20:33:43Z +http://www.semanlink.net/tag/jeunesse|prefLabel|Jeunesse +http://www.semanlink.net/tag/jeunesse|creationDate|2008-09-10 +http://www.semanlink.net/tag/jeunesse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeunesse|uri|http://www.semanlink.net/tag/jeunesse +http://www.semanlink.net/tag/revolte|creationTime|2008-06-10T21:05:06Z +http://www.semanlink.net/tag/revolte|prefLabel|Révolte +http://www.semanlink.net/tag/revolte|creationDate|2008-06-10 +http://www.semanlink.net/tag/revolte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/revolte|uri|http://www.semanlink.net/tag/revolte +http://www.semanlink.net/tag/crime|creationTime|2015-01-24T12:06:45Z +http://www.semanlink.net/tag/crime|prefLabel|Crime +http://www.semanlink.net/tag/crime|creationDate|2015-01-24 +http://www.semanlink.net/tag/crime|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crime|uri|http://www.semanlink.net/tag/crime +http://www.semanlink.net/tag/word_embedding_compositionality|creationTime|2018-05-10T17:05:29Z +http://www.semanlink.net/tag/word_embedding_compositionality|prefLabel|Word Embedding Compositionality +http://www.semanlink.net/tag/word_embedding_compositionality|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/word_embedding_compositionality|creationDate|2018-05-10 +http://www.semanlink.net/tag/word_embedding_compositionality|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/word_embedding_compositionality|uri|http://www.semanlink.net/tag/word_embedding_compositionality +http://www.semanlink.net/tag/word_embedding_compositionality|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/word_embedding_compositionality|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/word_embedding_compositionality|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/word_embedding_compositionality|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/word_embedding_compositionality|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/word_embedding_compositionality|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/sparql_1_1|creationTime|2010-08-20T12:38:45Z +http://www.semanlink.net/tag/sparql_1_1|prefLabel|SPARQL 1.1 +http://www.semanlink.net/tag/sparql_1_1|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_1_1|creationDate|2010-08-20 +http://www.semanlink.net/tag/sparql_1_1|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_1_1|uri|http://www.semanlink.net/tag/sparql_1_1 +http://www.semanlink.net/tag/sparql_1_1|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/requin|creationTime|2017-07-21T02:04:07Z +http://www.semanlink.net/tag/requin|prefLabel|Requin +http://www.semanlink.net/tag/requin|broader|http://www.semanlink.net/tag/poisson +http://www.semanlink.net/tag/requin|creationDate|2017-07-21 +http://www.semanlink.net/tag/requin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/requin|uri|http://www.semanlink.net/tag/requin +http://www.semanlink.net/tag/requin|broader_prefLabel|Poisson +http://www.semanlink.net/tag/histoire_de_l_europe|prefLabel|Histoire de l'Europe +http://www.semanlink.net/tag/histoire_de_l_europe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/histoire_de_l_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_europe|uri|http://www.semanlink.net/tag/histoire_de_l_europe +http://www.semanlink.net/tag/histoire_de_l_europe|broader_prefLabel|Europe +http://www.semanlink.net/tag/meteorite|prefLabel|Météorite +http://www.semanlink.net/tag/meteorite|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/meteorite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meteorite|uri|http://www.semanlink.net/tag/meteorite +http://www.semanlink.net/tag/meteorite|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/environnement|prefLabel|Environnement +http://www.semanlink.net/tag/environnement|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/environnement|uri|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/tag/desertification|prefLabel|Désertification +http://www.semanlink.net/tag/desertification|broader|http://www.semanlink.net/tag/desert +http://www.semanlink.net/tag/desertification|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/desertification|uri|http://www.semanlink.net/tag/desertification +http://www.semanlink.net/tag/desertification|broader_prefLabel|Désert +http://www.semanlink.net/tag/ayrault|creationTime|2013-07-30T14:18:50Z +http://www.semanlink.net/tag/ayrault|prefLabel|Ayrault +http://www.semanlink.net/tag/ayrault|creationDate|2013-07-30 +http://www.semanlink.net/tag/ayrault|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ayrault|uri|http://www.semanlink.net/tag/ayrault +http://www.semanlink.net/tag/livre|prefLabel|Livre +http://www.semanlink.net/tag/livre|broader|http://www.semanlink.net/tag/reading +http://www.semanlink.net/tag/livre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/livre|altLabel|Livres +http://www.semanlink.net/tag/livre|uri|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/livre|broader_prefLabel|Reading +http://www.semanlink.net/tag/brain_implants|creationTime|2018-01-03T00:56:06Z +http://www.semanlink.net/tag/brain_implants|prefLabel|Brain implants +http://www.semanlink.net/tag/brain_implants|broader|http://www.semanlink.net/tag/cerveau +http://www.semanlink.net/tag/brain_implants|creationDate|2018-01-03 +http://www.semanlink.net/tag/brain_implants|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brain_implants|describedBy|https://en.wikipedia.org/wiki/Brain_implant +http://www.semanlink.net/tag/brain_implants|uri|http://www.semanlink.net/tag/brain_implants +http://www.semanlink.net/tag/brain_implants|broader_prefLabel|Brain +http://www.semanlink.net/tag/brain_implants|broader_altLabel|Cerveau +http://www.semanlink.net/tag/brain_implants|broader_related|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/tag/undecidability|prefLabel|Undecidability +http://www.semanlink.net/tag/undecidability|broader|http://www.semanlink.net/tag/inference +http://www.semanlink.net/tag/undecidability|creationDate|2006-12-01 +http://www.semanlink.net/tag/undecidability|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/undecidability|uri|http://www.semanlink.net/tag/undecidability +http://www.semanlink.net/tag/undecidability|broader_prefLabel|Inference +http://www.semanlink.net/tag/afrique_equatoriale|prefLabel|Afrique équatoriale +http://www.semanlink.net/tag/afrique_equatoriale|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_equatoriale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_equatoriale|uri|http://www.semanlink.net/tag/afrique_equatoriale +http://www.semanlink.net/tag/afrique_equatoriale|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_equatoriale|broader_altLabel|Africa +http://www.semanlink.net/tag/wuhan|creationTime|2020-04-11T14:47:38Z +http://www.semanlink.net/tag/wuhan|prefLabel|Wuhan +http://www.semanlink.net/tag/wuhan|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/wuhan|creationDate|2020-04-11 +http://www.semanlink.net/tag/wuhan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wuhan|uri|http://www.semanlink.net/tag/wuhan +http://www.semanlink.net/tag/wuhan|broader_prefLabel|Chine +http://www.semanlink.net/tag/wuhan|broader_altLabel|China +http://www.semanlink.net/tag/shoah|prefLabel|Shoah +http://www.semanlink.net/tag/shoah|broader|http://www.semanlink.net/tag/nazisme +http://www.semanlink.net/tag/shoah|broader|http://www.semanlink.net/tag/juif +http://www.semanlink.net/tag/shoah|broader|http://www.semanlink.net/tag/genocide +http://www.semanlink.net/tag/shoah|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/shoah|uri|http://www.semanlink.net/tag/shoah +http://www.semanlink.net/tag/shoah|broader_prefLabel|Nazisme +http://www.semanlink.net/tag/shoah|broader_prefLabel|Juifs +http://www.semanlink.net/tag/shoah|broader_prefLabel|Génocide +http://www.semanlink.net/tag/shoah|broader_altLabel|Nazi +http://www.semanlink.net/tag/shoah|broader_related|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://www.semanlink.net/tag/shoah|broader_related|http://www.semanlink.net/tag/antisemitisme +http://www.semanlink.net/tag/jsonld_jena|creationTime|2016-04-09T11:47:12Z +http://www.semanlink.net/tag/jsonld_jena|prefLabel|Jsonld/Jena +http://www.semanlink.net/tag/jsonld_jena|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/jsonld_jena|broader|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jsonld_jena|related|http://www.semanlink.net/tag/jsonld_java +http://www.semanlink.net/tag/jsonld_jena|creationDate|2016-04-09 +http://www.semanlink.net/tag/jsonld_jena|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jsonld_jena|uri|http://www.semanlink.net/tag/jsonld_jena +http://www.semanlink.net/tag/jsonld_jena|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/jsonld_jena|broader_prefLabel|Jena +http://www.semanlink.net/tag/jsonld_jena|broader_altLabel|JSONLD +http://www.semanlink.net/tag/jsonld_jena|broader_related|http://www.semanlink.net/tag/hp +http://www.semanlink.net/tag/vie_artificielle|prefLabel|Vie artificielle +http://www.semanlink.net/tag/vie_artificielle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/vie_artificielle|uri|http://www.semanlink.net/tag/vie_artificielle +http://www.semanlink.net/tag/nuclear_power_no_thanks|creationTime|2020-01-04T01:01:43Z +http://www.semanlink.net/tag/nuclear_power_no_thanks|prefLabel|Nuclear Power? No thanks +http://www.semanlink.net/tag/nuclear_power_no_thanks|broader|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/tag/nuclear_power_no_thanks|creationDate|2020-01-04 +http://www.semanlink.net/tag/nuclear_power_no_thanks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nuclear_power_no_thanks|uri|http://www.semanlink.net/tag/nuclear_power_no_thanks +http://www.semanlink.net/tag/nuclear_power_no_thanks|broader_prefLabel|Industrie nucléaire +http://www.semanlink.net/tag/nuclear_power_no_thanks|broader_altLabel|Nucléaire +http://www.semanlink.net/tag/forward_chaining|creationTime|2009-01-03T01:16:03Z +http://www.semanlink.net/tag/forward_chaining|prefLabel|Forward chaining +http://www.semanlink.net/tag/forward_chaining|broader|http://www.semanlink.net/tag/entailment +http://www.semanlink.net/tag/forward_chaining|creationDate|2009-01-03 +http://www.semanlink.net/tag/forward_chaining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/forward_chaining|uri|http://www.semanlink.net/tag/forward_chaining +http://www.semanlink.net/tag/forward_chaining|broader_prefLabel|Entailment +http://www.semanlink.net/tag/solr_and_nlp|creationTime|2014-03-15T13:50:06Z +http://www.semanlink.net/tag/solr_and_nlp|prefLabel|Solr and NLP +http://www.semanlink.net/tag/solr_and_nlp|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/solr_and_nlp|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr_and_nlp|creationDate|2014-03-15 +http://www.semanlink.net/tag/solr_and_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr_and_nlp|uri|http://www.semanlink.net/tag/solr_and_nlp +http://www.semanlink.net/tag/solr_and_nlp|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/solr_and_nlp|broader_prefLabel|Solr +http://www.semanlink.net/tag/web_services_for_javascript|prefLabel|Web Services for JavaScript +http://www.semanlink.net/tag/web_services_for_javascript|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/web_services_for_javascript|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/web_services_for_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_services_for_javascript|uri|http://www.semanlink.net/tag/web_services_for_javascript +http://www.semanlink.net/tag/web_services_for_javascript|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/web_services_for_javascript|broader_prefLabel|Web Services +http://www.semanlink.net/tag/web_services_for_javascript|broader_altLabel|js +http://www.semanlink.net/tag/web_services_for_javascript|broader_altLabel|WS +http://www.semanlink.net/tag/alimentation|creationTime|2009-12-13T14:53:54Z +http://www.semanlink.net/tag/alimentation|prefLabel|Alimentation +http://www.semanlink.net/tag/alimentation|creationDate|2009-12-13 +http://www.semanlink.net/tag/alimentation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alimentation|altLabel|Nourriture +http://www.semanlink.net/tag/alimentation|uri|http://www.semanlink.net/tag/alimentation +http://www.semanlink.net/tag/magie|creationTime|2013-08-27T13:52:01Z +http://www.semanlink.net/tag/magie|prefLabel|Magie +http://www.semanlink.net/tag/magie|creationDate|2013-08-27 +http://www.semanlink.net/tag/magie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/magie|uri|http://www.semanlink.net/tag/magie +http://www.semanlink.net/tag/sparql_update|creationTime|2008-10-21T14:27:17Z +http://www.semanlink.net/tag/sparql_update|prefLabel|SPARQL Update +http://www.semanlink.net/tag/sparql_update|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_update|broader|http://www.semanlink.net/tag/read_write_linked_data +http://www.semanlink.net/tag/sparql_update|creationDate|2008-10-21 +http://www.semanlink.net/tag/sparql_update|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_update|uri|http://www.semanlink.net/tag/sparql_update +http://www.semanlink.net/tag/sparql_update|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_update|broader_prefLabel|Read-Write Linked Data +http://www.semanlink.net/tag/sparql_update|broader_altLabel|RW Linked Data +http://www.semanlink.net/tag/imac|prefLabel|iMac +http://www.semanlink.net/tag/imac|broader|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/imac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imac|uri|http://www.semanlink.net/tag/imac +http://www.semanlink.net/tag/imac|broader_prefLabel|Macintosh +http://www.semanlink.net/tag/arq_property_functions|creationTime|2008-12-12T10:37:00Z +http://www.semanlink.net/tag/arq_property_functions|prefLabel|ARQ property functions +http://www.semanlink.net/tag/arq_property_functions|broader|http://www.semanlink.net/tag/arq +http://www.semanlink.net/tag/arq_property_functions|creationDate|2008-12-12 +http://www.semanlink.net/tag/arq_property_functions|comment|Voir com.hp.hpl.jena.sparql.engine.QueryExecutionBase +http://www.semanlink.net/tag/arq_property_functions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/arq_property_functions|uri|http://www.semanlink.net/tag/arq_property_functions +http://www.semanlink.net/tag/arq_property_functions|broader_prefLabel|ARQ +http://www.semanlink.net/tag/services_secrets|creationTime|2014-09-03T22:39:40Z +http://www.semanlink.net/tag/services_secrets|prefLabel|Services secrets +http://www.semanlink.net/tag/services_secrets|creationDate|2014-09-03 +http://www.semanlink.net/tag/services_secrets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/services_secrets|uri|http://www.semanlink.net/tag/services_secrets +http://www.semanlink.net/tag/ibm|prefLabel|IBM +http://www.semanlink.net/tag/ibm|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/ibm|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/ibm|broader|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/ibm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ibm|uri|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/ibm|broader_prefLabel|NTIC +http://www.semanlink.net/tag/ibm|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/ibm|broader_prefLabel|Informatique +http://www.semanlink.net/tag/javascript_rdf_parser|prefLabel|Javascript RDF Parser +http://www.semanlink.net/tag/javascript_rdf_parser|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_rdf_parser|broader|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/javascript_rdf_parser|broader|http://www.semanlink.net/tag/rdf_parser +http://www.semanlink.net/tag/javascript_rdf_parser|related|http://www.semanlink.net/tag/ajar +http://www.semanlink.net/tag/javascript_rdf_parser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_rdf_parser|uri|http://www.semanlink.net/tag/javascript_rdf_parser +http://www.semanlink.net/tag/javascript_rdf_parser|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_rdf_parser|broader_prefLabel|Javascript RDF +http://www.semanlink.net/tag/javascript_rdf_parser|broader_prefLabel|RDF Parser +http://www.semanlink.net/tag/javascript_rdf_parser|broader_altLabel|js +http://www.semanlink.net/tag/javascript_rdf_parser|broader_related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/troubleshooting|prefLabel|Troubleshooting +http://www.semanlink.net/tag/troubleshooting|broader|http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur +http://www.semanlink.net/tag/troubleshooting|creationDate|2006-11-23 +http://www.semanlink.net/tag/troubleshooting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/troubleshooting|uri|http://www.semanlink.net/tag/troubleshooting +http://www.semanlink.net/tag/troubleshooting|broader_prefLabel|J'ai un petit problème avec mon ordinateur +http://www.semanlink.net/tag/rfi|prefLabel|RFI +http://www.semanlink.net/tag/rfi|broader|http://www.semanlink.net/tag/radio +http://www.semanlink.net/tag/rfi|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rfi|altLabel|Radio France Internationale +http://www.semanlink.net/tag/rfi|uri|http://www.semanlink.net/tag/rfi +http://www.semanlink.net/tag/rfi|broader_prefLabel|Radio +http://www.semanlink.net/tag/afia|creationTime|2019-12-03T10:52:45Z +http://www.semanlink.net/tag/afia|prefLabel|AFIA +http://www.semanlink.net/tag/afia|creationDate|2019-12-03 +http://www.semanlink.net/tag/afia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afia|uri|http://www.semanlink.net/tag/afia +http://www.semanlink.net/tag/pimo|prefLabel|PIMO +http://www.semanlink.net/tag/pimo|broader|http://www.semanlink.net/tag/personal_ontology +http://www.semanlink.net/tag/pimo|broader|http://www.semanlink.net/tag/gnowsis +http://www.semanlink.net/tag/pimo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pimo|uri|http://www.semanlink.net/tag/pimo +http://www.semanlink.net/tag/pimo|broader_prefLabel|Personal ontology +http://www.semanlink.net/tag/pimo|broader_prefLabel|gnowsis +http://www.semanlink.net/tag/jena_assembler|creationTime|2008-11-17T00:39:45Z +http://www.semanlink.net/tag/jena_assembler|prefLabel|Jena: assembler +http://www.semanlink.net/tag/jena_assembler|broader|http://www.semanlink.net/tag/jena_dev +http://www.semanlink.net/tag/jena_assembler|creationDate|2008-11-17 +http://www.semanlink.net/tag/jena_assembler|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jena_assembler|uri|http://www.semanlink.net/tag/jena_assembler +http://www.semanlink.net/tag/jena_assembler|broader_prefLabel|Jena dev +http://www.semanlink.net/tag/economiste|prefLabel|Economiste +http://www.semanlink.net/tag/economiste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economiste|uri|http://www.semanlink.net/tag/economiste +http://www.semanlink.net/tag/smart_energy_grids|creationTime|2009-05-12T00:14:56Z +http://www.semanlink.net/tag/smart_energy_grids|prefLabel|Smart energy grids +http://www.semanlink.net/tag/smart_energy_grids|broader|http://www.semanlink.net/tag/economies_d_energie +http://www.semanlink.net/tag/smart_energy_grids|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/smart_energy_grids|creationDate|2009-05-12 +http://www.semanlink.net/tag/smart_energy_grids|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/smart_energy_grids|uri|http://www.semanlink.net/tag/smart_energy_grids +http://www.semanlink.net/tag/smart_energy_grids|broader_prefLabel|Economies d'énergie +http://www.semanlink.net/tag/smart_energy_grids|broader_prefLabel|Energie +http://www.semanlink.net/tag/http_patch|creationTime|2014-09-22T09:54:39Z +http://www.semanlink.net/tag/http_patch|prefLabel|HTTP PATCH +http://www.semanlink.net/tag/http_patch|broader|http://www.semanlink.net/tag/http +http://www.semanlink.net/tag/http_patch|creationDate|2014-09-22 +http://www.semanlink.net/tag/http_patch|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/http_patch|uri|http://www.semanlink.net/tag/http_patch +http://www.semanlink.net/tag/http_patch|broader_prefLabel|HTTP +http://www.semanlink.net/tag/mines_d_or|creationTime|2007-07-26T13:02:49Z +http://www.semanlink.net/tag/mines_d_or|prefLabel|Mines d'or +http://www.semanlink.net/tag/mines_d_or|broader|http://www.semanlink.net/tag/industrie_miniere +http://www.semanlink.net/tag/mines_d_or|broader|http://www.semanlink.net/tag/or +http://www.semanlink.net/tag/mines_d_or|creationDate|2007-07-26 +http://www.semanlink.net/tag/mines_d_or|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mines_d_or|uri|http://www.semanlink.net/tag/mines_d_or +http://www.semanlink.net/tag/mines_d_or|broader_prefLabel|Industrie minière +http://www.semanlink.net/tag/mines_d_or|broader_prefLabel|Or +http://www.semanlink.net/tag/thewebconf_2019|creationTime|2019-04-30T13:28:54Z +http://www.semanlink.net/tag/thewebconf_2019|prefLabel|TheWebConf 2019 +http://www.semanlink.net/tag/thewebconf_2019|broader|http://www.semanlink.net/tag/www_conference +http://www.semanlink.net/tag/thewebconf_2019|creationDate|2019-04-30 +http://www.semanlink.net/tag/thewebconf_2019|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thewebconf_2019|uri|http://www.semanlink.net/tag/thewebconf_2019 +http://www.semanlink.net/tag/thewebconf_2019|broader_prefLabel|TheWebConf +http://www.semanlink.net/tag/thewebconf_2019|broader_altLabel|WWW Conference +http://www.semanlink.net/tag/calais_jungle|creationTime|2015-12-13T12:33:50Z +http://www.semanlink.net/tag/calais_jungle|prefLabel|Calais (jungle) +http://www.semanlink.net/tag/calais_jungle|broader|http://www.semanlink.net/tag/honteux +http://www.semanlink.net/tag/calais_jungle|broader|http://www.semanlink.net/tag/immigration +http://www.semanlink.net/tag/calais_jungle|creationDate|2015-12-13 +http://www.semanlink.net/tag/calais_jungle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/calais_jungle|uri|http://www.semanlink.net/tag/calais_jungle +http://www.semanlink.net/tag/calais_jungle|broader_prefLabel|Honteux +http://www.semanlink.net/tag/calais_jungle|broader_prefLabel|Immigration +http://www.semanlink.net/tag/coursera_introduction_to_data_science|creationTime|2013-04-29T16:29:47Z +http://www.semanlink.net/tag/coursera_introduction_to_data_science|prefLabel|Coursera: Introduction to Data Science +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader|http://www.semanlink.net/tag/coursera +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/coursera_introduction_to_data_science|creationDate|2013-04-29 +http://www.semanlink.net/tag/coursera_introduction_to_data_science|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coursera_introduction_to_data_science|uri|http://www.semanlink.net/tag/coursera_introduction_to_data_science +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader_prefLabel|Coursera +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader_prefLabel|Data science +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader_altLabel|Data analysis +http://www.semanlink.net/tag/coursera_introduction_to_data_science|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/exposition_universelle|prefLabel|Exposition universelle +http://www.semanlink.net/tag/exposition_universelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exposition_universelle|uri|http://www.semanlink.net/tag/exposition_universelle +http://www.semanlink.net/tag/linked_data_service|creationTime|2010-03-10T21:18:38Z +http://www.semanlink.net/tag/linked_data_service|prefLabel|Linked Data Service +http://www.semanlink.net/tag/linked_data_service|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_service|creationDate|2010-03-10 +http://www.semanlink.net/tag/linked_data_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_service|uri|http://www.semanlink.net/tag/linked_data_service +http://www.semanlink.net/tag/linked_data_service|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_service|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_service|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/heredia|prefLabel|Heredia +http://www.semanlink.net/tag/heredia|broader|http://www.semanlink.net/tag/poete +http://www.semanlink.net/tag/heredia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/heredia|uri|http://www.semanlink.net/tag/heredia +http://www.semanlink.net/tag/heredia|broader_prefLabel|Poète +http://www.semanlink.net/tag/automotive_ontologies|creationTime|2012-12-13T11:39:10Z +http://www.semanlink.net/tag/automotive_ontologies|prefLabel|Automotive ontologies +http://www.semanlink.net/tag/automotive_ontologies|broader|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.semanlink.net/tag/automotive_ontologies|creationDate|2012-12-13 +http://www.semanlink.net/tag/automotive_ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/automotive_ontologies|altLabel|Car ontology +http://www.semanlink.net/tag/automotive_ontologies|uri|http://www.semanlink.net/tag/automotive_ontologies +http://www.semanlink.net/tag/automotive_ontologies|broader_prefLabel|Automotive and web technologies +http://www.semanlink.net/tag/grddl|prefLabel|GRDDL +http://www.semanlink.net/tag/grddl|broader|http://www.semanlink.net/tag/xhtml +http://www.semanlink.net/tag/grddl|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/grddl|broader|http://www.semanlink.net/tag/xslt +http://www.semanlink.net/tag/grddl|related|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/tag/grddl|comment|GRDDL is a method for embedding machine-readable data into an XHTML document. GRDDL does not define any specific way of embedding this data, instead it uses XSLT for extracting the information from the XHTML document and producing RDF. Using this method, the machine-readable data within the document can use almost any representation, because GRDDL applications will always be able to transform this representation into RDF by transforming the document with the XSLT associated with the document. +http://www.semanlink.net/tag/grddl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grddl|uri|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/grddl|broader_prefLabel|XHTML +http://www.semanlink.net/tag/grddl|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/grddl|broader_prefLabel|XSLT +http://www.semanlink.net/tag/grddl|broader_altLabel|sw +http://www.semanlink.net/tag/grddl|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/fichage|creationTime|2008-07-08T21:18:30Z +http://www.semanlink.net/tag/fichage|prefLabel|Fichage +http://www.semanlink.net/tag/fichage|creationDate|2008-07-08 +http://www.semanlink.net/tag/fichage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fichage|uri|http://www.semanlink.net/tag/fichage +http://www.semanlink.net/tag/congo_belge|creationTime|2021-04-28T17:04:12Z +http://www.semanlink.net/tag/congo_belge|prefLabel|Congo belge +http://www.semanlink.net/tag/congo_belge|broader|http://www.semanlink.net/tag/histoire_coloniale +http://www.semanlink.net/tag/congo_belge|broader|http://www.semanlink.net/tag/belgique +http://www.semanlink.net/tag/congo_belge|broader|http://www.semanlink.net/tag/afrique_centrale +http://www.semanlink.net/tag/congo_belge|creationDate|2021-04-28 +http://www.semanlink.net/tag/congo_belge|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/congo_belge|uri|http://www.semanlink.net/tag/congo_belge +http://www.semanlink.net/tag/congo_belge|broader_prefLabel|Histoire coloniale +http://www.semanlink.net/tag/congo_belge|broader_prefLabel|Belgique +http://www.semanlink.net/tag/congo_belge|broader_prefLabel|Afrique Centrale +http://www.semanlink.net/tag/openai|creationTime|2018-11-15T23:03:54Z +http://www.semanlink.net/tag/openai|prefLabel|OpenAI +http://www.semanlink.net/tag/openai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/openai|creationDate|2018-11-15 +http://www.semanlink.net/tag/openai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/openai|homepage|https://openai.com/ +http://www.semanlink.net/tag/openai|uri|http://www.semanlink.net/tag/openai +http://www.semanlink.net/tag/openai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/openai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/openai|broader_altLabel|AI +http://www.semanlink.net/tag/openai|broader_altLabel|IA +http://www.semanlink.net/tag/openai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/decouverte_archeologique|prefLabel|Découverte archéologique +http://www.semanlink.net/tag/decouverte_archeologique|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/decouverte_archeologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/decouverte_archeologique|uri|http://www.semanlink.net/tag/decouverte_archeologique +http://www.semanlink.net/tag/decouverte_archeologique|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/decouverte_archeologique|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/industrie_miniere|prefLabel|Industrie minière +http://www.semanlink.net/tag/industrie_miniere|broader|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/industrie_miniere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie_miniere|uri|http://www.semanlink.net/tag/industrie_miniere +http://www.semanlink.net/tag/industrie_miniere|broader_prefLabel|industrie +http://www.semanlink.net/tag/touareg|prefLabel|Touareg +http://www.semanlink.net/tag/touareg|broader|http://www.semanlink.net/tag/peuples +http://www.semanlink.net/tag/touareg|broader|http://www.semanlink.net/tag/sahara +http://www.semanlink.net/tag/touareg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/touareg|uri|http://www.semanlink.net/tag/touareg +http://www.semanlink.net/tag/touareg|broader_prefLabel|Peuples +http://www.semanlink.net/tag/touareg|broader_prefLabel|Sahara +http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices|prefLabel|Genetically Engineered Micro and Nanodevices +http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices|broader|http://www.semanlink.net/tag/dna_nanotechnology +http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices|uri|http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices +http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices|broader_prefLabel|DNA nanotechnology +http://www.semanlink.net/tag/semanlink_dev|prefLabel|Semanlink dev +http://www.semanlink.net/tag/semanlink_dev|broader|http://www.semanlink.net/tag/semanlink +http://www.semanlink.net/tag/semanlink_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/semanlink_dev|comment|[Notes](/doc/2020/01/Semanlink%20dev%20notes.md) +http://www.semanlink.net/tag/semanlink_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semanlink_dev|uri|http://www.semanlink.net/tag/semanlink_dev +http://www.semanlink.net/tag/semanlink_dev|broader_prefLabel|Semanlink +http://www.semanlink.net/tag/semanlink_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/semanlink_dev|broader_altLabel|SL +http://www.semanlink.net/tag/semanlink_dev|broader_related|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/tag/docker_mac|creationTime|2016-04-06T15:28:35Z +http://www.semanlink.net/tag/docker_mac|prefLabel|Docker-Mac +http://www.semanlink.net/tag/docker_mac|broader|http://www.semanlink.net/tag/docker +http://www.semanlink.net/tag/docker_mac|creationDate|2016-04-06 +http://www.semanlink.net/tag/docker_mac|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/docker_mac|uri|http://www.semanlink.net/tag/docker_mac +http://www.semanlink.net/tag/docker_mac|broader_prefLabel|Docker +http://www.semanlink.net/tag/brinxmat|creationTime|2016-04-09T00:54:13Z +http://www.semanlink.net/tag/brinxmat|prefLabel|Brinxmat +http://www.semanlink.net/tag/brinxmat|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/brinxmat|creationDate|2016-04-09 +http://www.semanlink.net/tag/brinxmat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brinxmat|uri|http://www.semanlink.net/tag/brinxmat +http://www.semanlink.net/tag/brinxmat|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/mathematicien|prefLabel|Mathématicien +http://www.semanlink.net/tag/mathematicien|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/mathematicien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mathematicien|uri|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/mathematicien|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/mathematicien|broader_altLabel|Math +http://www.semanlink.net/tag/wikimedia|creationTime|2011-10-18T09:26:22Z +http://www.semanlink.net/tag/wikimedia|prefLabel|Wikimedia +http://www.semanlink.net/tag/wikimedia|related|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/tag/wikimedia|creationDate|2011-10-18 +http://www.semanlink.net/tag/wikimedia|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikimedia|uri|http://www.semanlink.net/tag/wikimedia +http://www.semanlink.net/tag/stanford|prefLabel|Stanford +http://www.semanlink.net/tag/stanford|broader|http://www.semanlink.net/tag/universites_americaines +http://www.semanlink.net/tag/stanford|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stanford|uri|http://www.semanlink.net/tag/stanford +http://www.semanlink.net/tag/stanford|broader_prefLabel|Universités américaines +http://www.semanlink.net/tag/cache|creationTime|2008-11-18T18:47:50Z +http://www.semanlink.net/tag/cache|prefLabel|Cache +http://www.semanlink.net/tag/cache|creationDate|2008-11-18 +http://www.semanlink.net/tag/cache|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cache|uri|http://www.semanlink.net/tag/cache +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|prefLabel|N-ary Relations on the Semantic Web +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|uri|http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|broader_altLabel|sw +http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/rdfizers|prefLabel|RDFizers +http://www.semanlink.net/tag/rdfizers|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdfizers|comment|RDFizers are tools that allow to transform existing data into an RDF representation. +http://www.semanlink.net/tag/rdfizers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfizers|uri|http://www.semanlink.net/tag/rdfizers +http://www.semanlink.net/tag/rdfizers|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdfizers|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdfizers|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdfizers|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdfizers|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdfizers|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/coupe_du_monde_de_football|prefLabel|Coupe du monde de football +http://www.semanlink.net/tag/coupe_du_monde_de_football|broader|http://www.semanlink.net/tag/football +http://www.semanlink.net/tag/coupe_du_monde_de_football|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupe_du_monde_de_football|uri|http://www.semanlink.net/tag/coupe_du_monde_de_football +http://www.semanlink.net/tag/coupe_du_monde_de_football|broader_prefLabel|Football +http://www.semanlink.net/tag/cold_start_problem|creationTime|2020-11-10T14:29:29Z +http://www.semanlink.net/tag/cold_start_problem|prefLabel|Cold start +http://www.semanlink.net/tag/cold_start_problem|creationDate|2020-11-10 +http://www.semanlink.net/tag/cold_start_problem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cold_start_problem|uri|http://www.semanlink.net/tag/cold_start_problem +http://www.semanlink.net/tag/faim|prefLabel|Faim +http://www.semanlink.net/tag/faim|broader|http://www.semanlink.net/tag/alimentation +http://www.semanlink.net/tag/faim|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/faim|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/faim|uri|http://www.semanlink.net/tag/faim +http://www.semanlink.net/tag/faim|broader_prefLabel|Alimentation +http://www.semanlink.net/tag/faim|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/faim|broader_altLabel|Nourriture +http://www.semanlink.net/tag/owled_2007_and_fps|creationTime|2008-09-23T15:01:11Z +http://www.semanlink.net/tag/owled_2007_and_fps|prefLabel|OWLED 2007 AND fps +http://www.semanlink.net/tag/owled_2007_and_fps|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/owled_2007_and_fps|broader|http://www.semanlink.net/tag/owled_2007 +http://www.semanlink.net/tag/owled_2007_and_fps|broader|http://www.semanlink.net/tag/sw_in_technical_automotive_documentation +http://www.semanlink.net/tag/owled_2007_and_fps|creationDate|2008-09-23 +http://www.semanlink.net/tag/owled_2007_and_fps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/owled_2007_and_fps|uri|http://www.semanlink.net/tag/owled_2007_and_fps +http://www.semanlink.net/tag/owled_2007_and_fps|broader_prefLabel|fps +http://www.semanlink.net/tag/owled_2007_and_fps|broader_prefLabel|OWLED 2007 +http://www.semanlink.net/tag/owled_2007_and_fps|broader_prefLabel|SW in Technical Automotive Documentation +http://www.semanlink.net/tag/athenes|prefLabel|Athènes +http://www.semanlink.net/tag/athenes|broader|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/athenes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/athenes|uri|http://www.semanlink.net/tag/athenes +http://www.semanlink.net/tag/athenes|broader_prefLabel|Grèce +http://www.semanlink.net/tag/droit|creationTime|2008-03-29T15:09:19Z +http://www.semanlink.net/tag/droit|prefLabel|Droit +http://www.semanlink.net/tag/droit|related|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/droit|creationDate|2008-03-29 +http://www.semanlink.net/tag/droit|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/droit|uri|http://www.semanlink.net/tag/droit +http://www.semanlink.net/tag/travailler_le_dimanche|creationTime|2008-11-18T22:30:18Z +http://www.semanlink.net/tag/travailler_le_dimanche|prefLabel|Travailler le dimanche +http://www.semanlink.net/tag/travailler_le_dimanche|creationDate|2008-11-18 +http://www.semanlink.net/tag/travailler_le_dimanche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/travailler_le_dimanche|uri|http://www.semanlink.net/tag/travailler_le_dimanche +http://www.semanlink.net/tag/phd|creationTime|2017-08-27T02:21:20Z +http://www.semanlink.net/tag/phd|prefLabel|PhD +http://www.semanlink.net/tag/phd|creationDate|2017-08-27 +http://www.semanlink.net/tag/phd|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phd|uri|http://www.semanlink.net/tag/phd +http://www.semanlink.net/tag/cognition|prefLabel|Cognition +http://www.semanlink.net/tag/cognition|broader|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/cognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cognition|uri|http://www.semanlink.net/tag/cognition +http://www.semanlink.net/tag/cognition|broader_prefLabel|Divers +http://www.semanlink.net/tag/angularjs_module|creationTime|2015-09-04T22:35:36Z +http://www.semanlink.net/tag/angularjs_module|prefLabel|AngularJS module +http://www.semanlink.net/tag/angularjs_module|creationDate|2015-09-04 +http://www.semanlink.net/tag/angularjs_module|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/angularjs_module|uri|http://www.semanlink.net/tag/angularjs_module +http://www.semanlink.net/tag/emergence|creationTime|2020-09-17T23:49:53Z +http://www.semanlink.net/tag/emergence|prefLabel|Emergence +http://www.semanlink.net/tag/emergence|creationDate|2020-09-17 +http://www.semanlink.net/tag/emergence|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/emergence|uri|http://www.semanlink.net/tag/emergence +http://www.semanlink.net/tag/kiwi_project|creationTime|2008-04-07T14:46:02Z +http://www.semanlink.net/tag/kiwi_project|prefLabel|KIWI project +http://www.semanlink.net/tag/kiwi_project|broader|http://www.semanlink.net/tag/commission_europeenne +http://www.semanlink.net/tag/kiwi_project|broader|http://www.semanlink.net/tag/semantic_wiki +http://www.semanlink.net/tag/kiwi_project|related|http://www.semanlink.net/tag/sun_microsystems +http://www.semanlink.net/tag/kiwi_project|creationDate|2008-04-07 +http://www.semanlink.net/tag/kiwi_project|comment|"""Knowledge in a Wiki"" +
The project KIWI is concerned with knowledge management in Semantic Wikis and funded by the European Commission +" +http://www.semanlink.net/tag/kiwi_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kiwi_project|homepage|http://www.kiwi-project.eu/ +http://www.semanlink.net/tag/kiwi_project|describedBy|http://www.kiwi-project.eu/ +http://www.semanlink.net/tag/kiwi_project|uri|http://www.semanlink.net/tag/kiwi_project +http://www.semanlink.net/tag/kiwi_project|broader_prefLabel|Commission européenne +http://www.semanlink.net/tag/kiwi_project|broader_prefLabel|Semantic Wiki +http://www.semanlink.net/tag/kiwi_project|broader_altLabel|European Commission +http://www.semanlink.net/tag/rdf_editor|prefLabel|RDF editor +http://www.semanlink.net/tag/rdf_editor|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_editor|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdf_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_editor|uri|http://www.semanlink.net/tag/rdf_editor +http://www.semanlink.net/tag/rdf_editor|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_editor|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/rdf_editor|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_editor|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_editor|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_editor|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_editor|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/matieres_premieres|prefLabel|Matières premières +http://www.semanlink.net/tag/matieres_premieres|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/matieres_premieres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matieres_premieres|uri|http://www.semanlink.net/tag/matieres_premieres +http://www.semanlink.net/tag/matieres_premieres|broader_prefLabel|Economie +http://www.semanlink.net/tag/matieres_premieres|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/honteux|prefLabel|Honteux +http://www.semanlink.net/tag/honteux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/honteux|uri|http://www.semanlink.net/tag/honteux +http://www.semanlink.net/tag/spacy|creationTime|2017-06-29T18:32:57Z +http://www.semanlink.net/tag/spacy|prefLabel|spaCy +http://www.semanlink.net/tag/spacy|broader|http://www.semanlink.net/tag/python_nlp +http://www.semanlink.net/tag/spacy|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/spacy|creationDate|2017-06-29 +http://www.semanlink.net/tag/spacy|comment|"""Industrial-Strength Natural Language Processing in Python"" + +> Spacy is opinionated, in that it typically offers one highly optimized way to do something (whereas nltk offers a huge variety of ways, although they are usually not as optimized). -- Jeremy Howard" +http://www.semanlink.net/tag/spacy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spacy|homepage|https://spacy.io/ +http://www.semanlink.net/tag/spacy|uri|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/tag/spacy|broader_prefLabel|Python-NLP +http://www.semanlink.net/tag/spacy|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/spacy|broader_related|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/spacy|broader_related|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/tag/mental_typewriter|prefLabel|Mental typewriter +http://www.semanlink.net/tag/mental_typewriter|broader|http://www.semanlink.net/tag/thought_alone_controlled_device +http://www.semanlink.net/tag/mental_typewriter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mental_typewriter|uri|http://www.semanlink.net/tag/mental_typewriter +http://www.semanlink.net/tag/mental_typewriter|broader_prefLabel|Thought alone controlled device +http://www.semanlink.net/tag/distributed_computing|creationTime|2013-02-18T16:13:10Z +http://www.semanlink.net/tag/distributed_computing|prefLabel|Distributed computing +http://www.semanlink.net/tag/distributed_computing|creationDate|2013-02-18 +http://www.semanlink.net/tag/distributed_computing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/distributed_computing|uri|http://www.semanlink.net/tag/distributed_computing +http://www.semanlink.net/tag/capitalisme|prefLabel|Capitalisme +http://www.semanlink.net/tag/capitalisme|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/capitalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/capitalisme|uri|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/capitalisme|broader_prefLabel|Economie +http://www.semanlink.net/tag/capitalisme|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/polluted_places|prefLabel|Polluted places +http://www.semanlink.net/tag/polluted_places|broader|http://www.semanlink.net/tag/pollution +http://www.semanlink.net/tag/polluted_places|creationDate|2006-10-28 +http://www.semanlink.net/tag/polluted_places|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/polluted_places|uri|http://www.semanlink.net/tag/polluted_places +http://www.semanlink.net/tag/polluted_places|broader_prefLabel|Pollution +http://www.semanlink.net/tag/rdf_dev|prefLabel|RDF dev +http://www.semanlink.net/tag/rdf_dev|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_dev|uri|http://www.semanlink.net/tag/rdf_dev +http://www.semanlink.net/tag/rdf_dev|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_dev|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_dev|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_dev|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_dev|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_dev|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/thumbnails|prefLabel|Thumbnails +http://www.semanlink.net/tag/thumbnails|creationDate|2006-10-11 +http://www.semanlink.net/tag/thumbnails|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/thumbnails|uri|http://www.semanlink.net/tag/thumbnails +http://www.semanlink.net/tag/divers|prefLabel|Divers +http://www.semanlink.net/tag/divers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/divers|uri|http://www.semanlink.net/tag/divers +http://www.semanlink.net/tag/cannibalisme|creationTime|2007-10-08T21:34:37Z +http://www.semanlink.net/tag/cannibalisme|prefLabel|Cannibalisme +http://www.semanlink.net/tag/cannibalisme|creationDate|2007-10-08 +http://www.semanlink.net/tag/cannibalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cannibalisme|uri|http://www.semanlink.net/tag/cannibalisme +http://www.semanlink.net/tag/computers|creationTime|2009-03-01T00:59:47Z +http://www.semanlink.net/tag/computers|prefLabel|Computers +http://www.semanlink.net/tag/computers|creationDate|2009-03-01 +http://www.semanlink.net/tag/computers|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/computers|uri|http://www.semanlink.net/tag/computers +http://www.semanlink.net/tag/knowledge_maps|creationTime|2017-10-19T00:10:14Z +http://www.semanlink.net/tag/knowledge_maps|prefLabel|Knowledge Maps +http://www.semanlink.net/tag/knowledge_maps|creationDate|2017-10-19 +http://www.semanlink.net/tag/knowledge_maps|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_maps|uri|http://www.semanlink.net/tag/knowledge_maps +http://www.semanlink.net/tag/deep_unsupervised_learning|creationTime|2020-03-08T11:46:45Z +http://www.semanlink.net/tag/deep_unsupervised_learning|prefLabel|Deep Unsupervised Learning +http://www.semanlink.net/tag/deep_unsupervised_learning|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/deep_unsupervised_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_unsupervised_learning|creationDate|2020-03-08 +http://www.semanlink.net/tag/deep_unsupervised_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_unsupervised_learning|uri|http://www.semanlink.net/tag/deep_unsupervised_learning +http://www.semanlink.net/tag/deep_unsupervised_learning|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/deep_unsupervised_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_unsupervised_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_unsupervised_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/sbert|creationTime|2021-03-25T17:37:29Z +http://www.semanlink.net/tag/sbert|prefLabel|Sentence-BERT +http://www.semanlink.net/tag/sbert|broader|http://www.semanlink.net/tag/bert_and_sentence_embeddings +http://www.semanlink.net/tag/sbert|creationDate|2021-03-25 +http://www.semanlink.net/tag/sbert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sbert|homepage|https://www.sbert.net +http://www.semanlink.net/tag/sbert|altLabel|SBERT +http://www.semanlink.net/tag/sbert|altLabel|SentenceTransformers +http://www.semanlink.net/tag/sbert|uri|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/tag/sbert|broader_prefLabel|BERT + Sentence Embeddings +http://www.semanlink.net/tag/mars_express|prefLabel|Mars Express +http://www.semanlink.net/tag/mars_express|broader|http://www.semanlink.net/tag/esa +http://www.semanlink.net/tag/mars_express|broader|http://www.semanlink.net/tag/mars_2004 +http://www.semanlink.net/tag/mars_express|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mars_express|uri|http://www.semanlink.net/tag/mars_express +http://www.semanlink.net/tag/mars_express|broader_prefLabel|esa +http://www.semanlink.net/tag/mars_express|broader_prefLabel|Mars 2004 +http://www.semanlink.net/tag/ocean_indien|creationTime|2013-06-01T21:40:04Z +http://www.semanlink.net/tag/ocean_indien|prefLabel|Océan indien +http://www.semanlink.net/tag/ocean_indien|broader|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/ocean_indien|creationDate|2013-06-01 +http://www.semanlink.net/tag/ocean_indien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ocean_indien|uri|http://www.semanlink.net/tag/ocean_indien +http://www.semanlink.net/tag/ocean_indien|broader_prefLabel|Océan +http://www.semanlink.net/tag/nlp_tools|creationTime|2014-03-14T01:19:03Z +http://www.semanlink.net/tag/nlp_tools|prefLabel|NLP tools +http://www.semanlink.net/tag/nlp_tools|broader|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/nlp_tools|broader|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/tag/nlp_tools|creationDate|2014-03-14 +http://www.semanlink.net/tag/nlp_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_tools|uri|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/nlp_tools|broader_prefLabel|Tools +http://www.semanlink.net/tag/nlp_tools|broader_prefLabel|NLP +http://www.semanlink.net/tag/nlp_tools|broader_altLabel|TALN +http://www.semanlink.net/tag/nlp_tools|broader_altLabel|Traitement Automatique du Langage Naturel +http://www.semanlink.net/tag/nlp_tools|broader_altLabel|Natural Language Processing +http://www.semanlink.net/tag/juan_sequeda|creationTime|2012-05-10T08:51:11Z +http://www.semanlink.net/tag/juan_sequeda|prefLabel|Juan Sequeda +http://www.semanlink.net/tag/juan_sequeda|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/juan_sequeda|creationDate|2012-05-10 +http://www.semanlink.net/tag/juan_sequeda|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/juan_sequeda|uri|http://www.semanlink.net/tag/juan_sequeda +http://www.semanlink.net/tag/juan_sequeda|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/php|prefLabel|PHP +http://www.semanlink.net/tag/php|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/php|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/php|uri|http://www.semanlink.net/tag/php +http://www.semanlink.net/tag/php|broader_prefLabel|Programming language +http://www.semanlink.net/tag/php|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/irlande_du_nord|creationTime|2016-06-12T09:23:38Z +http://www.semanlink.net/tag/irlande_du_nord|prefLabel|Irlande du Nord +http://www.semanlink.net/tag/irlande_du_nord|creationDate|2016-06-12 +http://www.semanlink.net/tag/irlande_du_nord|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/irlande_du_nord|uri|http://www.semanlink.net/tag/irlande_du_nord +http://www.semanlink.net/tag/comedie|prefLabel|Comédie +http://www.semanlink.net/tag/comedie|creationDate|2006-10-21 +http://www.semanlink.net/tag/comedie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/comedie|uri|http://www.semanlink.net/tag/comedie +http://www.semanlink.net/tag/uncertainty_reasoning|prefLabel|Uncertainty Reasoning +http://www.semanlink.net/tag/uncertainty_reasoning|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/uncertainty_reasoning|creationDate|2006-11-07 +http://www.semanlink.net/tag/uncertainty_reasoning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uncertainty_reasoning|uri|http://www.semanlink.net/tag/uncertainty_reasoning +http://www.semanlink.net/tag/uncertainty_reasoning|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/uncertainty_reasoning|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/uncertainty_reasoning|broader_altLabel|AI +http://www.semanlink.net/tag/uncertainty_reasoning|broader_altLabel|IA +http://www.semanlink.net/tag/uncertainty_reasoning|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/musique_en_ligne|prefLabel|Musique en ligne +http://www.semanlink.net/tag/musique_en_ligne|broader|http://www.semanlink.net/tag/digital_media +http://www.semanlink.net/tag/musique_en_ligne|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/musique_en_ligne|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/musique_en_ligne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musique_en_ligne|uri|http://www.semanlink.net/tag/musique_en_ligne +http://www.semanlink.net/tag/musique_en_ligne|broader_prefLabel|Digital Media +http://www.semanlink.net/tag/musique_en_ligne|broader_prefLabel|NTIC +http://www.semanlink.net/tag/musique_en_ligne|broader_prefLabel|Musique +http://www.semanlink.net/tag/musique_en_ligne|broader_altLabel|Music +http://www.semanlink.net/tag/variational_bayesian_methods|creationTime|2018-08-07T10:39:11Z +http://www.semanlink.net/tag/variational_bayesian_methods|prefLabel|Variational Bayesian methods +http://www.semanlink.net/tag/variational_bayesian_methods|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/variational_bayesian_methods|broader|http://www.semanlink.net/tag/bayesian_reasoning +http://www.semanlink.net/tag/variational_bayesian_methods|creationDate|2018-08-07 +http://www.semanlink.net/tag/variational_bayesian_methods|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/variational_bayesian_methods|describedBy|https://en.wikipedia.org/wiki/Variational_Bayesian_methods +http://www.semanlink.net/tag/variational_bayesian_methods|altLabel|Variational Inference +http://www.semanlink.net/tag/variational_bayesian_methods|uri|http://www.semanlink.net/tag/variational_bayesian_methods +http://www.semanlink.net/tag/variational_bayesian_methods|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/variational_bayesian_methods|broader_prefLabel|Bayesian Reasoning +http://www.semanlink.net/tag/privatisation_du_vivant|creationTime|2013-05-31T20:15:52Z +http://www.semanlink.net/tag/privatisation_du_vivant|prefLabel|Privatisation du vivant +http://www.semanlink.net/tag/privatisation_du_vivant|creationDate|2013-05-31 +http://www.semanlink.net/tag/privatisation_du_vivant|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/privatisation_du_vivant|uri|http://www.semanlink.net/tag/privatisation_du_vivant +http://www.semanlink.net/tag/acl_2021|creationTime|2021-07-07T23:49:53Z +http://www.semanlink.net/tag/acl_2021|prefLabel|ACL 2021 +http://www.semanlink.net/tag/acl_2021|broader|http://www.semanlink.net/tag/acl +http://www.semanlink.net/tag/acl_2021|creationDate|2021-07-07 +http://www.semanlink.net/tag/acl_2021|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acl_2021|uri|http://www.semanlink.net/tag/acl_2021 +http://www.semanlink.net/tag/acl_2021|broader_prefLabel|ACL +http://www.semanlink.net/tag/bill_gates|prefLabel|Bill Gates +http://www.semanlink.net/tag/bill_gates|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/bill_gates|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bill_gates|altLabel|Gates +http://www.semanlink.net/tag/bill_gates|uri|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/bill_gates|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/bill_gates|broader_altLabel|Technical guys +http://www.semanlink.net/tag/guaranteed_basic_income|creationTime|2015-11-09T13:28:59Z +http://www.semanlink.net/tag/guaranteed_basic_income|prefLabel|Universal basic income +http://www.semanlink.net/tag/guaranteed_basic_income|broader|http://www.semanlink.net/tag/travail +http://www.semanlink.net/tag/guaranteed_basic_income|creationDate|2015-11-09 +http://www.semanlink.net/tag/guaranteed_basic_income|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guaranteed_basic_income|altLabel|Revenu Universel +http://www.semanlink.net/tag/guaranteed_basic_income|altLabel|UBI +http://www.semanlink.net/tag/guaranteed_basic_income|uri|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.semanlink.net/tag/guaranteed_basic_income|broader_prefLabel|Travail +http://www.semanlink.net/tag/stanford_classifier|creationTime|2014-04-01T10:37:58Z +http://www.semanlink.net/tag/stanford_classifier|prefLabel|Stanford classifier +http://www.semanlink.net/tag/stanford_classifier|broader|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/tag/stanford_classifier|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/stanford_classifier|broader|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/tag/stanford_classifier|related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/stanford_classifier|creationDate|2014-04-01 +http://www.semanlink.net/tag/stanford_classifier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/stanford_classifier|uri|http://www.semanlink.net/tag/stanford_classifier +http://www.semanlink.net/tag/stanford_classifier|broader_prefLabel|Text Classification +http://www.semanlink.net/tag/stanford_classifier|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/stanford_classifier|broader_prefLabel|NLP@Stanford +http://www.semanlink.net/tag/stanford_classifier|broader_related|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/tag/stanford_classifier|broader_related|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|creationTime|2015-05-12T02:03:53Z +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|prefLabel|Scandale des écoutes en Allemagne +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader|http://www.semanlink.net/tag/allemagne +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader|http://www.semanlink.net/tag/espionnage +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader|http://www.semanlink.net/tag/angela_merkel +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|creationDate|2015-05-12 +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|uri|http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_prefLabel|Allemagne +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_prefLabel|Espionnage +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_prefLabel|NSA spying scandal +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_prefLabel|Angela Merkel +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_altLabel|Germany +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_altLabel|Deutschland +http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne|broader_related|http://www.semanlink.net/tag/the_guardian +http://www.semanlink.net/tag/elias_torres|creationTime|2007-05-18T21:47:30Z +http://www.semanlink.net/tag/elias_torres|prefLabel|Elias Torres +http://www.semanlink.net/tag/elias_torres|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/elias_torres|related|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/tag/elias_torres|creationDate|2007-05-18 +http://www.semanlink.net/tag/elias_torres|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/elias_torres|homepage|http://torrez.us/ +http://www.semanlink.net/tag/elias_torres|uri|http://www.semanlink.net/tag/elias_torres +http://www.semanlink.net/tag/elias_torres|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/elias_torres|broader_altLabel|Technical guys +http://www.semanlink.net/tag/disruptive_change|creationTime|2010-08-30T16:16:19Z +http://www.semanlink.net/tag/disruptive_change|prefLabel|Disruptive change +http://www.semanlink.net/tag/disruptive_change|creationDate|2010-08-30 +http://www.semanlink.net/tag/disruptive_change|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disruptive_change|uri|http://www.semanlink.net/tag/disruptive_change +http://www.semanlink.net/tag/energie|prefLabel|Energie +http://www.semanlink.net/tag/energie|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/energie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energie|uri|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/energie|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/horreur_economique|prefLabel|Horreur économique +http://www.semanlink.net/tag/horreur_economique|broader|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.semanlink.net/tag/horreur_economique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/horreur_economique|uri|http://www.semanlink.net/tag/horreur_economique +http://www.semanlink.net/tag/horreur_economique|broader_prefLabel|Critique du libéralisme +http://www.semanlink.net/tag/historic_images|creationTime|2014-08-29T19:04:17Z +http://www.semanlink.net/tag/historic_images|prefLabel|Historic images +http://www.semanlink.net/tag/historic_images|broader|http://www.semanlink.net/tag/photo +http://www.semanlink.net/tag/historic_images|creationDate|2014-08-29 +http://www.semanlink.net/tag/historic_images|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/historic_images|uri|http://www.semanlink.net/tag/historic_images +http://www.semanlink.net/tag/historic_images|broader_prefLabel|Photo +http://www.semanlink.net/tag/historic_images|broader_altLabel|Images +http://www.semanlink.net/tag/collective_punishment|creationTime|2020-12-22T21:05:49Z +http://www.semanlink.net/tag/collective_punishment|prefLabel|Collective punishment +http://www.semanlink.net/tag/collective_punishment|creationDate|2020-12-22 +http://www.semanlink.net/tag/collective_punishment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/collective_punishment|describedBy|https://en.wikipedia.org/wiki/Collective_punishment +http://www.semanlink.net/tag/collective_punishment|uri|http://www.semanlink.net/tag/collective_punishment +http://www.semanlink.net/tag/maximum_entropy|creationTime|2012-04-10T02:05:47Z +http://www.semanlink.net/tag/maximum_entropy|prefLabel|Maximum Entropy +http://www.semanlink.net/tag/maximum_entropy|creationDate|2012-04-10 +http://www.semanlink.net/tag/maximum_entropy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maximum_entropy|uri|http://www.semanlink.net/tag/maximum_entropy +http://www.semanlink.net/tag/irak|prefLabel|Irak +http://www.semanlink.net/tag/irak|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/irak|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/irak|altLabel|Iraq +http://www.semanlink.net/tag/irak|uri|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/irak|broader_prefLabel|Asie +http://www.semanlink.net/tag/film_espagnol|creationTime|2016-09-25T22:58:53Z +http://www.semanlink.net/tag/film_espagnol|prefLabel|Film espagnol +http://www.semanlink.net/tag/film_espagnol|broader|http://www.semanlink.net/tag/espagne +http://www.semanlink.net/tag/film_espagnol|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_espagnol|creationDate|2016-09-25 +http://www.semanlink.net/tag/film_espagnol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_espagnol|uri|http://www.semanlink.net/tag/film_espagnol +http://www.semanlink.net/tag/film_espagnol|broader_prefLabel|Espagne +http://www.semanlink.net/tag/film_espagnol|broader_prefLabel|Film +http://www.semanlink.net/tag/nlp_negation|creationTime|2019-10-31T08:29:38Z +http://www.semanlink.net/tag/nlp_negation|prefLabel|NLP: negation +http://www.semanlink.net/tag/nlp_negation|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/nlp_negation|creationDate|2019-10-31 +http://www.semanlink.net/tag/nlp_negation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_negation|uri|http://www.semanlink.net/tag/nlp_negation +http://www.semanlink.net/tag/nlp_negation|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/exploitation_petroliere|creationTime|2008-06-01T13:06:41Z +http://www.semanlink.net/tag/exploitation_petroliere|prefLabel|Exploitation pétrolière +http://www.semanlink.net/tag/exploitation_petroliere|creationDate|2008-06-01 +http://www.semanlink.net/tag/exploitation_petroliere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exploitation_petroliere|uri|http://www.semanlink.net/tag/exploitation_petroliere +http://www.semanlink.net/tag/lda2vec|creationTime|2017-12-11T13:47:15Z +http://www.semanlink.net/tag/lda2vec|prefLabel|LDA2vec +http://www.semanlink.net/tag/lda2vec|broader|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://www.semanlink.net/tag/lda2vec|broader|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/lda2vec|creationDate|2017-12-11 +http://www.semanlink.net/tag/lda2vec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lda2vec|uri|http://www.semanlink.net/tag/lda2vec +http://www.semanlink.net/tag/lda2vec|broader_prefLabel|Latent Dirichlet allocation +http://www.semanlink.net/tag/lda2vec|broader_prefLabel|Word embeddings +http://www.semanlink.net/tag/lda2vec|broader_altLabel|LDA +http://www.semanlink.net/tag/lda2vec|broader_altLabel|Word Embedding +http://www.semanlink.net/tag/lda2vec|broader_altLabel|Plongement lexical +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/david_blei +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/latent_semantic_analysis +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/tag/lda2vec|broader_related|http://www.semanlink.net/tag/sense_embeddings +http://www.semanlink.net/tag/egypte|prefLabel|Egypte +http://www.semanlink.net/tag/egypte|broader|http://www.semanlink.net/tag/afrique_du_nord +http://www.semanlink.net/tag/egypte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/egypte|uri|http://www.semanlink.net/tag/egypte +http://www.semanlink.net/tag/egypte|broader_prefLabel|Afrique du Nord +http://www.semanlink.net/tag/sun_microsystems|prefLabel|Sun Microsystems +http://www.semanlink.net/tag/sun_microsystems|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/sun_microsystems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sun_microsystems|altLabel|sun +http://www.semanlink.net/tag/sun_microsystems|uri|http://www.semanlink.net/tag/sun_microsystems +http://www.semanlink.net/tag/sun_microsystems|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/metaverse|prefLabel|Metaverse +http://www.semanlink.net/tag/metaverse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/metaverse|uri|http://www.semanlink.net/tag/metaverse +http://www.semanlink.net/tag/histoire_du_monde|creationTime|2013-10-09T21:43:17Z +http://www.semanlink.net/tag/histoire_du_monde|prefLabel|Histoire du monde +http://www.semanlink.net/tag/histoire_du_monde|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_du_monde|creationDate|2013-10-09 +http://www.semanlink.net/tag/histoire_du_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_du_monde|uri|http://www.semanlink.net/tag/histoire_du_monde +http://www.semanlink.net/tag/histoire_du_monde|broader_prefLabel|Histoire +http://www.semanlink.net/tag/ted|creationTime|2013-06-12T23:59:28Z +http://www.semanlink.net/tag/ted|prefLabel|TED +http://www.semanlink.net/tag/ted|creationDate|2013-06-12 +http://www.semanlink.net/tag/ted|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ted|uri|http://www.semanlink.net/tag/ted +http://www.semanlink.net/tag/phd_thesis|creationTime|2020-05-05T15:48:15Z +http://www.semanlink.net/tag/phd_thesis|prefLabel|PhD Thesis +http://www.semanlink.net/tag/phd_thesis|broader|http://www.semanlink.net/tag/phd +http://www.semanlink.net/tag/phd_thesis|creationDate|2020-05-05 +http://www.semanlink.net/tag/phd_thesis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/phd_thesis|uri|http://www.semanlink.net/tag/phd_thesis +http://www.semanlink.net/tag/phd_thesis|broader_prefLabel|PhD +http://www.semanlink.net/tag/european_project|creationTime|2009-02-18T01:17:46Z +http://www.semanlink.net/tag/european_project|prefLabel|European project +http://www.semanlink.net/tag/european_project|creationDate|2009-02-18 +http://www.semanlink.net/tag/european_project|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/european_project|uri|http://www.semanlink.net/tag/european_project +http://www.semanlink.net/tag/event|creationTime|2008-03-08T00:02:39Z +http://www.semanlink.net/tag/event|prefLabel|Event +http://www.semanlink.net/tag/event|creationDate|2008-03-08 +http://www.semanlink.net/tag/event|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/event|uri|http://www.semanlink.net/tag/event +http://www.semanlink.net/tag/markov|creationTime|2020-07-27T14:04:09Z +http://www.semanlink.net/tag/markov|prefLabel|Markov +http://www.semanlink.net/tag/markov|creationDate|2020-07-27 +http://www.semanlink.net/tag/markov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/markov|uri|http://www.semanlink.net/tag/markov +http://www.semanlink.net/tag/athletisme|prefLabel|Athlétisme +http://www.semanlink.net/tag/athletisme|broader|http://www.semanlink.net/tag/sport +http://www.semanlink.net/tag/athletisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/athletisme|uri|http://www.semanlink.net/tag/athletisme +http://www.semanlink.net/tag/athletisme|broader_prefLabel|Sport +http://www.semanlink.net/tag/statistical_physics|creationTime|2018-10-18T13:43:53Z +http://www.semanlink.net/tag/statistical_physics|prefLabel|Statistical physics +http://www.semanlink.net/tag/statistical_physics|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/statistical_physics|creationDate|2018-10-18 +http://www.semanlink.net/tag/statistical_physics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/statistical_physics|uri|http://www.semanlink.net/tag/statistical_physics +http://www.semanlink.net/tag/statistical_physics|broader_prefLabel|Physique +http://www.semanlink.net/tag/statistical_physics|broader_altLabel|Physics +http://www.semanlink.net/tag/information_theory_and_deep_learning|creationTime|2019-10-11T02:04:32Z +http://www.semanlink.net/tag/information_theory_and_deep_learning|prefLabel|Information theory AND Deep Learning +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/tag/information_theory_and_deep_learning|related|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/tag/information_theory_and_deep_learning|creationDate|2019-10-11 +http://www.semanlink.net/tag/information_theory_and_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/information_theory_and_deep_learning|uri|http://www.semanlink.net/tag/information_theory_and_deep_learning +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader_prefLabel|Information theory +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/information_theory_and_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/virtuoso_review|creationTime|2009-02-06T22:34:06Z +http://www.semanlink.net/tag/virtuoso_review|prefLabel|Virtuoso: review +http://www.semanlink.net/tag/virtuoso_review|broader|http://www.semanlink.net/tag/virtuoso +http://www.semanlink.net/tag/virtuoso_review|creationDate|2009-02-06 +http://www.semanlink.net/tag/virtuoso_review|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/virtuoso_review|uri|http://www.semanlink.net/tag/virtuoso_review +http://www.semanlink.net/tag/virtuoso_review|broader_prefLabel|Virtuoso +http://www.semanlink.net/tag/lost_city|prefLabel|Lost City +http://www.semanlink.net/tag/lost_city|broader|http://www.semanlink.net/tag/ocean +http://www.semanlink.net/tag/lost_city|broader|http://www.semanlink.net/tag/extremophiles +http://www.semanlink.net/tag/lost_city|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lost_city|uri|http://www.semanlink.net/tag/lost_city +http://www.semanlink.net/tag/lost_city|broader_prefLabel|Océan +http://www.semanlink.net/tag/lost_city|broader_prefLabel|Extrémophiles +http://www.semanlink.net/tag/analytics|creationTime|2014-12-18T11:41:07Z +http://www.semanlink.net/tag/analytics|prefLabel|Analytics +http://www.semanlink.net/tag/analytics|creationDate|2014-12-18 +http://www.semanlink.net/tag/analytics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/analytics|uri|http://www.semanlink.net/tag/analytics +http://www.semanlink.net/tag/ipython|creationTime|2015-04-28T14:22:40Z +http://www.semanlink.net/tag/ipython|prefLabel|IPython +http://www.semanlink.net/tag/ipython|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/ipython|broader|http://www.semanlink.net/tag/jupyter +http://www.semanlink.net/tag/ipython|creationDate|2015-04-28 +http://www.semanlink.net/tag/ipython|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ipython|describedBy|https://en.wikipedia.org/wiki/IPython +http://www.semanlink.net/tag/ipython|uri|http://www.semanlink.net/tag/ipython +http://www.semanlink.net/tag/ipython|broader_prefLabel|Python +http://www.semanlink.net/tag/ipython|broader_prefLabel|Jupyter +http://www.semanlink.net/tag/pauvrete|prefLabel|Pauvreté +http://www.semanlink.net/tag/pauvrete|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/pauvrete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pauvrete|uri|http://www.semanlink.net/tag/pauvrete +http://www.semanlink.net/tag/pauvrete|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/address_book|creationTime|2008-05-08T03:33:24Z +http://www.semanlink.net/tag/address_book|prefLabel|Address Book +http://www.semanlink.net/tag/address_book|creationDate|2008-05-08 +http://www.semanlink.net/tag/address_book|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/address_book|uri|http://www.semanlink.net/tag/address_book +http://www.semanlink.net/tag/john_pereira|creationTime|2013-11-03T10:48:00Z +http://www.semanlink.net/tag/john_pereira|prefLabel|John Pereira +http://www.semanlink.net/tag/john_pereira|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/john_pereira|related|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.semanlink.net/tag/john_pereira|related|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.semanlink.net/tag/john_pereira|creationDate|2013-11-03 +http://www.semanlink.net/tag/john_pereira|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/john_pereira|uri|http://www.semanlink.net/tag/john_pereira +http://www.semanlink.net/tag/john_pereira|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/pakistan|prefLabel|Pakistan +http://www.semanlink.net/tag/pakistan|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/pakistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pakistan|uri|http://www.semanlink.net/tag/pakistan +http://www.semanlink.net/tag/pakistan|broader_prefLabel|Asie +http://www.semanlink.net/tag/haystack|creationTime|2021-10-02T22:08:57Z +http://www.semanlink.net/tag/haystack|prefLabel|Haystack +http://www.semanlink.net/tag/haystack|broader|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/haystack|broader|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/tag/haystack|broader|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/haystack|creationDate|2021-10-02 +http://www.semanlink.net/tag/haystack|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/haystack|uri|http://www.semanlink.net/tag/haystack +http://www.semanlink.net/tag/haystack|broader_prefLabel|Neural Search +http://www.semanlink.net/tag/haystack|broader_prefLabel|NLP tools +http://www.semanlink.net/tag/haystack|broader_prefLabel|Question Answering +http://www.semanlink.net/tag/haystack|broader_altLabel|Neural IR models +http://www.semanlink.net/tag/haystack|broader_altLabel|Neural retrieval +http://www.semanlink.net/tag/haystack|broader_altLabel|Neural Models for Information Retrieval +http://www.semanlink.net/tag/haystack|broader_altLabel|QA +http://www.semanlink.net/tag/haystack|broader_related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/httprange_14_solution|prefLabel|httpRange-14 (solution) +http://www.semanlink.net/tag/httprange_14_solution|broader|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/httprange_14_solution|creationDate|2007-01-04 +http://www.semanlink.net/tag/httprange_14_solution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/httprange_14_solution|uri|http://www.semanlink.net/tag/httprange_14_solution +http://www.semanlink.net/tag/httprange_14_solution|broader_prefLabel|httpRange-14 +http://www.semanlink.net/tag/httprange_14_solution|broader_altLabel|303-redirect +http://www.semanlink.net/tag/httprange_14_solution|broader_related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/httprange_14_solution|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco|prefLabel|Sites du Patrimoine mondial de l'Unesco +http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco|broader|http://www.semanlink.net/tag/unesco +http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco|uri|http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco +http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco|broader_prefLabel|UNESCO +http://www.semanlink.net/tag/eme|creationTime|2017-04-02T12:06:57Z +http://www.semanlink.net/tag/eme|prefLabel|EME +http://www.semanlink.net/tag/eme|broader|http://www.semanlink.net/tag/drm +http://www.semanlink.net/tag/eme|creationDate|2017-04-02 +http://www.semanlink.net/tag/eme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eme|altLabel|Encrypted Media Extensions +http://www.semanlink.net/tag/eme|uri|http://www.semanlink.net/tag/eme +http://www.semanlink.net/tag/eme|broader_prefLabel|DRM +http://www.semanlink.net/tag/facial_recognition|creationTime|2017-09-07T01:41:48Z +http://www.semanlink.net/tag/facial_recognition|prefLabel|Facial Recognition +http://www.semanlink.net/tag/facial_recognition|broader|http://www.semanlink.net/tag/image_recognition +http://www.semanlink.net/tag/facial_recognition|creationDate|2017-09-07 +http://www.semanlink.net/tag/facial_recognition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/facial_recognition|uri|http://www.semanlink.net/tag/facial_recognition +http://www.semanlink.net/tag/facial_recognition|broader_prefLabel|Image recognition +http://www.semanlink.net/tag/goldfire|creationTime|2021-05-12T11:12:32Z +http://www.semanlink.net/tag/goldfire|prefLabel|Goldfire +http://www.semanlink.net/tag/goldfire|broader|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/goldfire|creationDate|2021-05-12 +http://www.semanlink.net/tag/goldfire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/goldfire|uri|http://www.semanlink.net/tag/goldfire +http://www.semanlink.net/tag/goldfire|broader_prefLabel|Cognitive Search +http://www.semanlink.net/tag/goldfire|broader_related|http://www.semanlink.net/tag/stardog +http://www.semanlink.net/tag/goldfire|broader_related|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/tag/goldfire|broader_related|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/tag/humour|prefLabel|Humour +http://www.semanlink.net/tag/humour|broader|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/tag/humour|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/humour|uri|http://www.semanlink.net/tag/humour +http://www.semanlink.net/tag/humour|broader_prefLabel|Rigolo +http://www.semanlink.net/tag/categorical_variables|creationTime|2018-03-03T17:14:23Z +http://www.semanlink.net/tag/categorical_variables|prefLabel|Categorical Variables +http://www.semanlink.net/tag/categorical_variables|creationDate|2018-03-03 +http://www.semanlink.net/tag/categorical_variables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/categorical_variables|uri|http://www.semanlink.net/tag/categorical_variables +http://www.semanlink.net/tag/mesopotamie|prefLabel|Mésopotamie +http://www.semanlink.net/tag/mesopotamie|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/mesopotamie|broader|http://www.semanlink.net/tag/irak +http://www.semanlink.net/tag/mesopotamie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mesopotamie|uri|http://www.semanlink.net/tag/mesopotamie +http://www.semanlink.net/tag/mesopotamie|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/mesopotamie|broader_prefLabel|Irak +http://www.semanlink.net/tag/mesopotamie|broader_altLabel|Iraq +http://www.semanlink.net/tag/societe|prefLabel|Société +http://www.semanlink.net/tag/societe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/societe|uri|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/meroe|prefLabel|Méroé +http://www.semanlink.net/tag/meroe|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/meroe|broader|http://www.semanlink.net/tag/nubie +http://www.semanlink.net/tag/meroe|related|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/meroe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/meroe|altLabel|Koush +http://www.semanlink.net/tag/meroe|uri|http://www.semanlink.net/tag/meroe +http://www.semanlink.net/tag/meroe|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/meroe|broader_prefLabel|Nubie +http://www.semanlink.net/tag/meroe|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/semantic_web_use_cases|prefLabel|Semantic web : Use cases +http://www.semanlink.net/tag/semantic_web_use_cases|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_use_cases|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_use_cases|uri|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.semanlink.net/tag/semantic_web_use_cases|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_use_cases|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_use_cases|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/disruption|creationTime|2015-11-08T11:52:05Z +http://www.semanlink.net/tag/disruption|prefLabel|Disruption +http://www.semanlink.net/tag/disruption|creationDate|2015-11-08 +http://www.semanlink.net/tag/disruption|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/disruption|uri|http://www.semanlink.net/tag/disruption +http://www.semanlink.net/tag/guepe|prefLabel|Guêpe +http://www.semanlink.net/tag/guepe|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/guepe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/guepe|uri|http://www.semanlink.net/tag/guepe +http://www.semanlink.net/tag/guepe|broader_prefLabel|Insecte +http://www.semanlink.net/tag/machine_learning_library|creationTime|2020-07-10T09:41:07Z +http://www.semanlink.net/tag/machine_learning_library|prefLabel|Machine Learning library +http://www.semanlink.net/tag/machine_learning_library|broader|http://www.semanlink.net/tag/machine_learning_tool +http://www.semanlink.net/tag/machine_learning_library|broader|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/machine_learning_library|creationDate|2020-07-10 +http://www.semanlink.net/tag/machine_learning_library|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/machine_learning_library|uri|http://www.semanlink.net/tag/machine_learning_library +http://www.semanlink.net/tag/machine_learning_library|broader_prefLabel|Machine Learning tool +http://www.semanlink.net/tag/machine_learning_library|broader_prefLabel|Library (code) +http://www.semanlink.net/tag/machine_learning_library|broader_related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|creationTime|2021-05-19T14:21:02Z +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|prefLabel|Enterprise Knowledge Graph Platform +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|broader|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|related|http://www.semanlink.net/tag/cognitive_search +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|creationDate|2021-05-19 +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|uri|http://www.semanlink.net/tag/enterprise_knowledge_graph_platform +http://www.semanlink.net/tag/enterprise_knowledge_graph_platform|broader_prefLabel|Enterprise Knowledge Graph +http://www.semanlink.net/tag/voltaire|creationTime|2014-09-15T15:16:45Z +http://www.semanlink.net/tag/voltaire|prefLabel|Voltaire +http://www.semanlink.net/tag/voltaire|broader|http://www.semanlink.net/tag/philosophe +http://www.semanlink.net/tag/voltaire|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/voltaire|creationDate|2014-09-15 +http://www.semanlink.net/tag/voltaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voltaire|uri|http://www.semanlink.net/tag/voltaire +http://www.semanlink.net/tag/voltaire|broader_prefLabel|Philosophe +http://www.semanlink.net/tag/voltaire|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/biological_data|creationTime|2017-09-04T21:11:07Z +http://www.semanlink.net/tag/biological_data|prefLabel|Biological data +http://www.semanlink.net/tag/biological_data|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/biological_data|related|http://www.semanlink.net/tag/biomedical_data +http://www.semanlink.net/tag/biological_data|creationDate|2017-09-04 +http://www.semanlink.net/tag/biological_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biological_data|uri|http://www.semanlink.net/tag/biological_data +http://www.semanlink.net/tag/biological_data|broader_prefLabel|Biology +http://www.semanlink.net/tag/biological_data|broader_altLabel|Biologie +http://www.semanlink.net/tag/aspect_nlp|creationTime|2021-10-21T15:38:42Z +http://www.semanlink.net/tag/aspect_nlp|prefLabel|Aspect (NLP) +http://www.semanlink.net/tag/aspect_nlp|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/aspect_nlp|creationDate|2021-10-21 +http://www.semanlink.net/tag/aspect_nlp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/aspect_nlp|uri|http://www.semanlink.net/tag/aspect_nlp +http://www.semanlink.net/tag/aspect_nlp|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/sparql_extension_functions|creationTime|2011-01-10T09:32:36Z +http://www.semanlink.net/tag/sparql_extension_functions|prefLabel|SPARQL Extension Functions +http://www.semanlink.net/tag/sparql_extension_functions|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_extension_functions|creationDate|2011-01-10 +http://www.semanlink.net/tag/sparql_extension_functions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_extension_functions|uri|http://www.semanlink.net/tag/sparql_extension_functions +http://www.semanlink.net/tag/sparql_extension_functions|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/quads|creationTime|2009-07-08T16:41:46Z +http://www.semanlink.net/tag/quads|prefLabel|Quads +http://www.semanlink.net/tag/quads|creationDate|2009-07-08 +http://www.semanlink.net/tag/quads|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/quads|uri|http://www.semanlink.net/tag/quads +http://www.semanlink.net/tag/dieu|creationTime|2015-08-07T00:32:58Z +http://www.semanlink.net/tag/dieu|prefLabel|dieu +http://www.semanlink.net/tag/dieu|creationDate|2015-08-07 +http://www.semanlink.net/tag/dieu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dieu|uri|http://www.semanlink.net/tag/dieu +http://www.semanlink.net/tag/energies_renouvelables|prefLabel|Energies renouvelables +http://www.semanlink.net/tag/energies_renouvelables|broader|http://www.semanlink.net/tag/energie +http://www.semanlink.net/tag/energies_renouvelables|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energies_renouvelables|uri|http://www.semanlink.net/tag/energies_renouvelables +http://www.semanlink.net/tag/energies_renouvelables|broader_prefLabel|Energie +http://www.semanlink.net/tag/leigh_dodds|prefLabel|Leigh Dodds +http://www.semanlink.net/tag/leigh_dodds|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/leigh_dodds|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/leigh_dodds|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/leigh_dodds|altLabel|Lost Boy +http://www.semanlink.net/tag/leigh_dodds|uri|http://www.semanlink.net/tag/leigh_dodds +http://www.semanlink.net/tag/leigh_dodds|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/leigh_dodds|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/leigh_dodds|broader_altLabel|Technical guys +http://www.semanlink.net/tag/malbouffe|prefLabel|Malbouffe +http://www.semanlink.net/tag/malbouffe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/malbouffe|uri|http://www.semanlink.net/tag/malbouffe +http://www.semanlink.net/tag/belo_horizonte|creationTime|2014-05-03T17:20:42Z +http://www.semanlink.net/tag/belo_horizonte|prefLabel|Belo Horizonte +http://www.semanlink.net/tag/belo_horizonte|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/belo_horizonte|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/belo_horizonte|creationDate|2014-05-03 +http://www.semanlink.net/tag/belo_horizonte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/belo_horizonte|uri|http://www.semanlink.net/tag/belo_horizonte +http://www.semanlink.net/tag/belo_horizonte|broader_prefLabel|Brésil +http://www.semanlink.net/tag/belo_horizonte|broader_prefLabel|Ville +http://www.semanlink.net/tag/belo_horizonte|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/multiagent_ai|creationTime|2016-01-09T00:51:41Z +http://www.semanlink.net/tag/multiagent_ai|prefLabel|Multiagent AI +http://www.semanlink.net/tag/multiagent_ai|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/multiagent_ai|creationDate|2016-01-09 +http://www.semanlink.net/tag/multiagent_ai|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multiagent_ai|uri|http://www.semanlink.net/tag/multiagent_ai +http://www.semanlink.net/tag/multiagent_ai|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/multiagent_ai|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/multiagent_ai|broader_altLabel|AI +http://www.semanlink.net/tag/multiagent_ai|broader_altLabel|IA +http://www.semanlink.net/tag/multiagent_ai|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/beer|creationTime|2013-03-18T22:19:30Z +http://www.semanlink.net/tag/beer|prefLabel|Beer +http://www.semanlink.net/tag/beer|creationDate|2013-03-18 +http://www.semanlink.net/tag/beer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/beer|uri|http://www.semanlink.net/tag/beer +http://www.semanlink.net/tag/multi_document_summarization|creationTime|2018-11-06T23:12:11Z +http://www.semanlink.net/tag/multi_document_summarization|prefLabel|Multi-Document Summarization +http://www.semanlink.net/tag/multi_document_summarization|broader|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/tag/multi_document_summarization|creationDate|2018-11-06 +http://www.semanlink.net/tag/multi_document_summarization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_document_summarization|uri|http://www.semanlink.net/tag/multi_document_summarization +http://www.semanlink.net/tag/multi_document_summarization|broader_prefLabel|Text Summarization +http://www.semanlink.net/tag/multi_document_summarization|broader_altLabel|Automatic summarization +http://www.semanlink.net/tag/multi_document_summarization|broader_related|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/tag/peer_to_peer|prefLabel|Peer to peer +http://www.semanlink.net/tag/peer_to_peer|broader|http://www.semanlink.net/tag/internet_related_technologies +http://www.semanlink.net/tag/peer_to_peer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peer_to_peer|altLabel|P2P +http://www.semanlink.net/tag/peer_to_peer|uri|http://www.semanlink.net/tag/peer_to_peer +http://www.semanlink.net/tag/peer_to_peer|broader_prefLabel|Internet Related Technologies +http://www.semanlink.net/tag/rdf_parser|prefLabel|RDF Parser +http://www.semanlink.net/tag/rdf_parser|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_parser|broader|http://www.semanlink.net/tag/rdf_tools +http://www.semanlink.net/tag/rdf_parser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_parser|uri|http://www.semanlink.net/tag/rdf_parser +http://www.semanlink.net/tag/rdf_parser|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_parser|broader_prefLabel|RDF Tools +http://www.semanlink.net/tag/rdf_parser|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_parser|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_parser|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_parser|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_parser|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/nok|prefLabel|Nok +http://www.semanlink.net/tag/nok|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/nok|broader|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/nok|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/nok|broader|http://www.semanlink.net/tag/nigeria +http://www.semanlink.net/tag/nok|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nok|uri|http://www.semanlink.net/tag/nok +http://www.semanlink.net/tag/nok|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/nok|broader_prefLabel|Art d'Afrique +http://www.semanlink.net/tag/nok|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/nok|broader_prefLabel|Nigeria +http://www.semanlink.net/tag/nok|broader_altLabel|African art +http://www.semanlink.net/tag/nok|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/social_bookmarking|prefLabel|Social bookmarking +http://www.semanlink.net/tag/social_bookmarking|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/social_bookmarking|broader|http://www.semanlink.net/tag/social_content_services +http://www.semanlink.net/tag/social_bookmarking|broader|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/tag/social_bookmarking|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_bookmarking|uri|http://www.semanlink.net/tag/social_bookmarking +http://www.semanlink.net/tag/social_bookmarking|broader_prefLabel|Tagging +http://www.semanlink.net/tag/social_bookmarking|broader_prefLabel|Social Content Services +http://www.semanlink.net/tag/social_bookmarking|broader_prefLabel|Social Networks +http://www.semanlink.net/tag/saturne|prefLabel|Saturne +http://www.semanlink.net/tag/saturne|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/saturne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/saturne|altLabel|Saturn +http://www.semanlink.net/tag/saturne|uri|http://www.semanlink.net/tag/saturne +http://www.semanlink.net/tag/saturne|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/cuivre|creationTime|2008-06-25T21:40:30Z +http://www.semanlink.net/tag/cuivre|prefLabel|Cuivre +http://www.semanlink.net/tag/cuivre|creationDate|2008-06-25 +http://www.semanlink.net/tag/cuivre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cuivre|uri|http://www.semanlink.net/tag/cuivre +http://www.semanlink.net/tag/sani_aboussa|prefLabel|Sani Aboussa +http://www.semanlink.net/tag/sani_aboussa|broader|http://www.semanlink.net/tag/musique_du_niger +http://www.semanlink.net/tag/sani_aboussa|broader|http://www.semanlink.net/tag/musicien +http://www.semanlink.net/tag/sani_aboussa|related|http://www.semanlink.net/tag/zinder +http://www.semanlink.net/tag/sani_aboussa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sani_aboussa|uri|http://www.semanlink.net/tag/sani_aboussa +http://www.semanlink.net/tag/sani_aboussa|broader_prefLabel|Musique du Niger +http://www.semanlink.net/tag/sani_aboussa|broader_prefLabel|Musicien +http://www.semanlink.net/tag/terrorisme_islamiste|prefLabel|Terrorisme islamiste +http://www.semanlink.net/tag/terrorisme_islamiste|broader|http://www.semanlink.net/tag/terrorisme +http://www.semanlink.net/tag/terrorisme_islamiste|broader|http://www.semanlink.net/tag/extremisme_islamique +http://www.semanlink.net/tag/terrorisme_islamiste|broader|http://www.semanlink.net/tag/guerres_de_religion +http://www.semanlink.net/tag/terrorisme_islamiste|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/terrorisme_islamiste|uri|http://www.semanlink.net/tag/terrorisme_islamiste +http://www.semanlink.net/tag/terrorisme_islamiste|broader_prefLabel|Terrorisme +http://www.semanlink.net/tag/terrorisme_islamiste|broader_prefLabel|Extrémisme islamique +http://www.semanlink.net/tag/terrorisme_islamiste|broader_prefLabel|Guerres de religion +http://www.semanlink.net/tag/economie|prefLabel|Economie +http://www.semanlink.net/tag/economie|related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/economie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economie|uri|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/isp_servlet_hosting|prefLabel|ISP / Servlet Hosting +http://www.semanlink.net/tag/isp_servlet_hosting|broader|http://www.semanlink.net/tag/servlet +http://www.semanlink.net/tag/isp_servlet_hosting|broader|http://www.semanlink.net/tag/isp +http://www.semanlink.net/tag/isp_servlet_hosting|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/isp_servlet_hosting|uri|http://www.semanlink.net/tag/isp_servlet_hosting +http://www.semanlink.net/tag/isp_servlet_hosting|broader_prefLabel|Servlet +http://www.semanlink.net/tag/isp_servlet_hosting|broader_prefLabel|ISP +http://www.semanlink.net/tag/origines_du_sida|prefLabel|Origines du sida +http://www.semanlink.net/tag/origines_du_sida|broader|http://www.semanlink.net/tag/sida +http://www.semanlink.net/tag/origines_du_sida|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/origines_du_sida|uri|http://www.semanlink.net/tag/origines_du_sida +http://www.semanlink.net/tag/origines_du_sida|broader_prefLabel|Sida +http://www.semanlink.net/tag/origines_du_sida|broader_altLabel|VIH +http://www.semanlink.net/tag/origines_du_sida|broader_altLabel|AIDS +http://www.semanlink.net/tag/origines_du_sida|broader_altLabel|HIV +http://www.semanlink.net/tag/musique_du_niger|prefLabel|Musique du Niger +http://www.semanlink.net/tag/musique_du_niger|broader|http://www.semanlink.net/tag/music_of_africa +http://www.semanlink.net/tag/musique_du_niger|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/musique_du_niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/musique_du_niger|uri|http://www.semanlink.net/tag/musique_du_niger +http://www.semanlink.net/tag/musique_du_niger|broader_prefLabel|Music of Africa +http://www.semanlink.net/tag/musique_du_niger|broader_prefLabel|Niger +http://www.semanlink.net/tag/musique_du_niger|broader_altLabel|Musique africaine +http://www.semanlink.net/tag/musique_du_niger|broader_altLabel|African music +http://www.semanlink.net/tag/musique_du_niger|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/musique_du_niger|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/musique_du_niger|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/apple_intel|prefLabel|Apple-Intel +http://www.semanlink.net/tag/apple_intel|broader|http://www.semanlink.net/tag/intel +http://www.semanlink.net/tag/apple_intel|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/apple_intel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apple_intel|uri|http://www.semanlink.net/tag/apple_intel +http://www.semanlink.net/tag/apple_intel|broader_prefLabel|Intel +http://www.semanlink.net/tag/apple_intel|broader_prefLabel|Apple +http://www.semanlink.net/tag/homme_de_flores|prefLabel|Homme de Florès +http://www.semanlink.net/tag/homme_de_flores|broader|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/homme_de_flores|broader|http://www.semanlink.net/tag/indonesie +http://www.semanlink.net/tag/homme_de_flores|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/homme_de_flores|uri|http://www.semanlink.net/tag/homme_de_flores +http://www.semanlink.net/tag/homme_de_flores|broader_prefLabel|Paleoanthropology +http://www.semanlink.net/tag/homme_de_flores|broader_prefLabel|Indonésie +http://www.semanlink.net/tag/homme_de_flores|broader_altLabel|Paléontologie humaine +http://www.semanlink.net/tag/cooperation|prefLabel|Coopération +http://www.semanlink.net/tag/cooperation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cooperation|uri|http://www.semanlink.net/tag/cooperation +http://www.semanlink.net/tag/sample_code|prefLabel|Sample code +http://www.semanlink.net/tag/sample_code|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/sample_code|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sample_code|uri|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/tag/sample_code|broader_prefLabel|Dev +http://www.semanlink.net/tag/louvre|prefLabel|Louvre +http://www.semanlink.net/tag/louvre|broader|http://www.semanlink.net/tag/musee +http://www.semanlink.net/tag/louvre|broader|http://www.semanlink.net/tag/paris +http://www.semanlink.net/tag/louvre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/louvre|uri|http://www.semanlink.net/tag/louvre +http://www.semanlink.net/tag/louvre|broader_prefLabel|Musée +http://www.semanlink.net/tag/louvre|broader_prefLabel|Paris +http://www.semanlink.net/tag/multi_language_support|creationTime|2012-07-03T00:53:18Z +http://www.semanlink.net/tag/multi_language_support|prefLabel|Multi-language support +http://www.semanlink.net/tag/multi_language_support|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/multi_language_support|creationDate|2012-07-03 +http://www.semanlink.net/tag/multi_language_support|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/multi_language_support|uri|http://www.semanlink.net/tag/multi_language_support +http://www.semanlink.net/tag/multi_language_support|broader_prefLabel|Language +http://www.semanlink.net/tag/multi_language_support|broader_altLabel|Langage +http://www.semanlink.net/tag/jedi_blue|creationTime|2021-10-25T12:00:01Z +http://www.semanlink.net/tag/jedi_blue|prefLabel|Jedi Blue +http://www.semanlink.net/tag/jedi_blue|broader|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/tag/jedi_blue|broader|http://www.semanlink.net/tag/google_advertising +http://www.semanlink.net/tag/jedi_blue|broader|http://www.semanlink.net/tag/google +http://www.semanlink.net/tag/jedi_blue|creationDate|2021-10-25 +http://www.semanlink.net/tag/jedi_blue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jedi_blue|uri|http://www.semanlink.net/tag/jedi_blue +http://www.semanlink.net/tag/jedi_blue|broader_prefLabel|DeleteFB +http://www.semanlink.net/tag/jedi_blue|broader_prefLabel|Google + Advertising +http://www.semanlink.net/tag/jedi_blue|broader_prefLabel|Google +http://www.semanlink.net/tag/jedi_blue|broader_related|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/tag/alexandre_bertails|creationTime|2014-09-25T13:19:29Z +http://www.semanlink.net/tag/alexandre_bertails|prefLabel|Alexandre Bertails +http://www.semanlink.net/tag/alexandre_bertails|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/alexandre_bertails|related|http://www.semanlink.net/tag/linked_data_platform +http://www.semanlink.net/tag/alexandre_bertails|related|http://www.semanlink.net/tag/ld_patch +http://www.semanlink.net/tag/alexandre_bertails|creationDate|2014-09-25 +http://www.semanlink.net/tag/alexandre_bertails|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/alexandre_bertails|uri|http://www.semanlink.net/tag/alexandre_bertails +http://www.semanlink.net/tag/alexandre_bertails|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/geometry_of_language_embeddings|creationTime|2019-06-10T00:03:48Z +http://www.semanlink.net/tag/geometry_of_language_embeddings|prefLabel|Geometry of language embeddings +http://www.semanlink.net/tag/geometry_of_language_embeddings|broader|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/geometry_of_language_embeddings|creationDate|2019-06-10 +http://www.semanlink.net/tag/geometry_of_language_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geometry_of_language_embeddings|uri|http://www.semanlink.net/tag/geometry_of_language_embeddings +http://www.semanlink.net/tag/geometry_of_language_embeddings|broader_prefLabel|Text Embeddings +http://www.semanlink.net/tag/geometry_of_language_embeddings|broader_related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/militaire|prefLabel|Militaire +http://www.semanlink.net/tag/militaire|creationDate|2006-08-19 +http://www.semanlink.net/tag/militaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/militaire|uri|http://www.semanlink.net/tag/militaire +http://www.semanlink.net/tag/perou|prefLabel|Pérou +http://www.semanlink.net/tag/perou|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/perou|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/perou|uri|http://www.semanlink.net/tag/perou +http://www.semanlink.net/tag/perou|broader_prefLabel|Amérique +http://www.semanlink.net/tag/airbus|prefLabel|Airbus +http://www.semanlink.net/tag/airbus|creationDate|2006-10-09 +http://www.semanlink.net/tag/airbus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/airbus|uri|http://www.semanlink.net/tag/airbus +http://www.semanlink.net/tag/titan|prefLabel|Titan +http://www.semanlink.net/tag/titan|broader|http://www.semanlink.net/tag/saturne +http://www.semanlink.net/tag/titan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/titan|uri|http://www.semanlink.net/tag/titan +http://www.semanlink.net/tag/titan|broader_prefLabel|Saturne +http://www.semanlink.net/tag/titan|broader_altLabel|Saturn +http://www.semanlink.net/tag/threat_models|creationTime|2019-03-27T08:37:35Z +http://www.semanlink.net/tag/threat_models|prefLabel|Threat models +http://www.semanlink.net/tag/threat_models|creationDate|2019-03-27 +http://www.semanlink.net/tag/threat_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/threat_models|uri|http://www.semanlink.net/tag/threat_models +http://www.semanlink.net/tag/voute_nubienne|creationTime|2009-01-23T20:38:27Z +http://www.semanlink.net/tag/voute_nubienne|prefLabel|Voûte nubienne +http://www.semanlink.net/tag/voute_nubienne|broader|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/voute_nubienne|broader|http://www.semanlink.net/tag/architecture_en_terre +http://www.semanlink.net/tag/voute_nubienne|creationDate|2009-01-23 +http://www.semanlink.net/tag/voute_nubienne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voute_nubienne|uri|http://www.semanlink.net/tag/voute_nubienne +http://www.semanlink.net/tag/voute_nubienne|broader_prefLabel|Sahel +http://www.semanlink.net/tag/voute_nubienne|broader_prefLabel|Architecture en terre +http://www.semanlink.net/tag/voute_nubienne|broader_related|http://www.semanlink.net/tag/banco +http://www.semanlink.net/tag/rdf_owl_documentation_tool|creationTime|2013-03-13T09:49:57Z +http://www.semanlink.net/tag/rdf_owl_documentation_tool|prefLabel|RDF-OWL documentation tool +http://www.semanlink.net/tag/rdf_owl_documentation_tool|broader|http://www.semanlink.net/tag/documentation_tool +http://www.semanlink.net/tag/rdf_owl_documentation_tool|broader|http://www.semanlink.net/tag/owl_tool +http://www.semanlink.net/tag/rdf_owl_documentation_tool|creationDate|2013-03-13 +http://www.semanlink.net/tag/rdf_owl_documentation_tool|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_owl_documentation_tool|uri|http://www.semanlink.net/tag/rdf_owl_documentation_tool +http://www.semanlink.net/tag/rdf_owl_documentation_tool|broader_prefLabel|Documentation tool +http://www.semanlink.net/tag/rdf_owl_documentation_tool|broader_prefLabel|OWL tool +http://www.semanlink.net/tag/named_graphs|creationTime|2007-07-13T19:00:31Z +http://www.semanlink.net/tag/named_graphs|prefLabel|Named Graphs +http://www.semanlink.net/tag/named_graphs|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/named_graphs|creationDate|2007-07-13 +http://www.semanlink.net/tag/named_graphs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/named_graphs|uri|http://www.semanlink.net/tag/named_graphs +http://www.semanlink.net/tag/named_graphs|broader_prefLabel|RDF +http://www.semanlink.net/tag/named_graphs|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/named_graphs|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/named_graphs|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/named_graphs|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/named_graphs|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/siberie|prefLabel|Sibérie +http://www.semanlink.net/tag/siberie|broader|http://www.semanlink.net/tag/russie +http://www.semanlink.net/tag/siberie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/siberie|uri|http://www.semanlink.net/tag/siberie +http://www.semanlink.net/tag/siberie|broader_prefLabel|Russie +http://www.semanlink.net/tag/the_lancet|prefLabel|The Lancet +http://www.semanlink.net/tag/the_lancet|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/the_lancet|broader|http://www.semanlink.net/tag/publication_scientifique +http://www.semanlink.net/tag/the_lancet|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/the_lancet|uri|http://www.semanlink.net/tag/the_lancet +http://www.semanlink.net/tag/the_lancet|broader_prefLabel|Médecine +http://www.semanlink.net/tag/the_lancet|broader_prefLabel|Publication scientifique +http://www.semanlink.net/tag/rules_language|creationTime|2009-11-12T13:47:23Z +http://www.semanlink.net/tag/rules_language|prefLabel|Rules language +http://www.semanlink.net/tag/rules_language|broader|http://www.semanlink.net/tag/rules +http://www.semanlink.net/tag/rules_language|creationDate|2009-11-12 +http://www.semanlink.net/tag/rules_language|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rules_language|uri|http://www.semanlink.net/tag/rules_language +http://www.semanlink.net/tag/rules_language|broader_prefLabel|Rules +http://www.semanlink.net/tag/poesie|prefLabel|Poésie +http://www.semanlink.net/tag/poesie|broader|http://www.semanlink.net/tag/litterature +http://www.semanlink.net/tag/poesie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/poesie|uri|http://www.semanlink.net/tag/poesie +http://www.semanlink.net/tag/poesie|broader_prefLabel|Littérature +http://www.semanlink.net/tag/poesie|broader_related|http://www.semanlink.net/tag/livre +http://www.semanlink.net/tag/semantic_web_products|creationTime|2010-07-30T14:34:01Z +http://www.semanlink.net/tag/semantic_web_products|prefLabel|Semantic Web Products +http://www.semanlink.net/tag/semantic_web_products|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_products|creationDate|2010-07-30 +http://www.semanlink.net/tag/semantic_web_products|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_products|uri|http://www.semanlink.net/tag/semantic_web_products +http://www.semanlink.net/tag/semantic_web_products|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_products|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_products|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/brown_corpus|creationTime|2017-06-20T13:42:47Z +http://www.semanlink.net/tag/brown_corpus|prefLabel|Brown Corpus +http://www.semanlink.net/tag/brown_corpus|broader|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.semanlink.net/tag/brown_corpus|related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/brown_corpus|creationDate|2017-06-20 +http://www.semanlink.net/tag/brown_corpus|comment|collection of text where each element is already gramatically tagged. It contains about one million words and is often used to train statistical PoS taggers. +http://www.semanlink.net/tag/brown_corpus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/brown_corpus|describedBy|https://en.wikipedia.org/wiki/Brown_Corpus +http://www.semanlink.net/tag/brown_corpus|uri|http://www.semanlink.net/tag/brown_corpus +http://www.semanlink.net/tag/brown_corpus|broader_prefLabel|Text Corpora and Lexical Resources +http://www.semanlink.net/tag/brown_corpus|broader_altLabel|Text corpora +http://www.semanlink.net/tag/brown_corpus|broader_altLabel|Lexical Resource +http://www.semanlink.net/tag/brown_corpus|broader_altLabel|Text corpus +http://www.semanlink.net/tag/brown_corpus|broader_related|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.semanlink.net/tag/json_visualization|creationTime|2014-12-09T15:30:33Z +http://www.semanlink.net/tag/json_visualization|prefLabel|JSON Visualization +http://www.semanlink.net/tag/json_visualization|broader|http://www.semanlink.net/tag/json +http://www.semanlink.net/tag/json_visualization|creationDate|2014-12-09 +http://www.semanlink.net/tag/json_visualization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json_visualization|uri|http://www.semanlink.net/tag/json_visualization +http://www.semanlink.net/tag/json_visualization|broader_prefLabel|JSON +http://www.semanlink.net/tag/patricia_highsmith|creationTime|2020-05-06T23:23:20Z +http://www.semanlink.net/tag/patricia_highsmith|prefLabel|Patricia Highsmith +http://www.semanlink.net/tag/patricia_highsmith|broader|http://www.semanlink.net/tag/thriller +http://www.semanlink.net/tag/patricia_highsmith|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/patricia_highsmith|creationDate|2020-05-06 +http://www.semanlink.net/tag/patricia_highsmith|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/patricia_highsmith|uri|http://www.semanlink.net/tag/patricia_highsmith +http://www.semanlink.net/tag/patricia_highsmith|broader_prefLabel|Thriller +http://www.semanlink.net/tag/patricia_highsmith|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/code_on_demand|creationTime|2015-03-05T10:23:51Z +http://www.semanlink.net/tag/code_on_demand|prefLabel|Code on demand +http://www.semanlink.net/tag/code_on_demand|creationDate|2015-03-05 +http://www.semanlink.net/tag/code_on_demand|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/code_on_demand|uri|http://www.semanlink.net/tag/code_on_demand +http://www.semanlink.net/tag/afrique_de_l_est|prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/afrique_de_l_est|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_de_l_est|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_de_l_est|uri|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/afrique_de_l_est|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_de_l_est|broader_altLabel|Africa +http://www.semanlink.net/tag/time_in_rdf|creationTime|2009-08-27T13:52:38Z +http://www.semanlink.net/tag/time_in_rdf|prefLabel|Time in RDF +http://www.semanlink.net/tag/time_in_rdf|broader|http://www.semanlink.net/tag/temps +http://www.semanlink.net/tag/time_in_rdf|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/time_in_rdf|creationDate|2009-08-27 +http://www.semanlink.net/tag/time_in_rdf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/time_in_rdf|uri|http://www.semanlink.net/tag/time_in_rdf +http://www.semanlink.net/tag/time_in_rdf|broader_prefLabel|Temps +http://www.semanlink.net/tag/time_in_rdf|broader_prefLabel|RDF +http://www.semanlink.net/tag/time_in_rdf|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/time_in_rdf|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/time_in_rdf|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/time_in_rdf|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/time_in_rdf|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/culture|prefLabel|Culture +http://www.semanlink.net/tag/culture|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/culture|uri|http://www.semanlink.net/tag/culture +http://www.semanlink.net/tag/competitivite|prefLabel|Compétitivité +http://www.semanlink.net/tag/competitivite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/competitivite|uri|http://www.semanlink.net/tag/competitivite +http://www.semanlink.net/tag/abel_prize|prefLabel|Abel Prize +http://www.semanlink.net/tag/abel_prize|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/abel_prize|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/abel_prize|uri|http://www.semanlink.net/tag/abel_prize +http://www.semanlink.net/tag/abel_prize|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/abel_prize|broader_altLabel|Math +http://www.semanlink.net/tag/blockchain|creationTime|2015-09-13T12:20:38Z +http://www.semanlink.net/tag/blockchain|prefLabel|Blockchain +http://www.semanlink.net/tag/blockchain|creationDate|2015-09-13 +http://www.semanlink.net/tag/blockchain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blockchain|describedBy|https://en.wikipedia.org/wiki/Block_chain_(database) +http://www.semanlink.net/tag/blockchain|uri|http://www.semanlink.net/tag/blockchain +http://www.semanlink.net/tag/softmax|creationTime|2020-04-22T21:43:48Z +http://www.semanlink.net/tag/softmax|prefLabel|Softmax +http://www.semanlink.net/tag/softmax|creationDate|2020-04-22 +http://www.semanlink.net/tag/softmax|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/softmax|describedBy|https://en.wikipedia.org/wiki/Softmax_function +http://www.semanlink.net/tag/softmax|uri|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/tag/semantic_web_tools|prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/semantic_web_tools|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_tools|uri|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/semantic_web_tools|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_tools|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_tools|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/frederick_giasson|creationTime|2007-04-25T15:40:10Z +http://www.semanlink.net/tag/frederick_giasson|prefLabel|Frederick Giasson +http://www.semanlink.net/tag/frederick_giasson|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/frederick_giasson|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/frederick_giasson|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/frederick_giasson|related|http://www.semanlink.net/tag/umbel +http://www.semanlink.net/tag/frederick_giasson|creationDate|2007-04-25 +http://www.semanlink.net/tag/frederick_giasson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/frederick_giasson|uri|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/frederick_giasson|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/frederick_giasson|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/frederick_giasson|broader_altLabel|Technical guys +http://www.semanlink.net/tag/fps_blog|prefLabel|fps blog +http://www.semanlink.net/tag/fps_blog|broader|http://www.semanlink.net/tag/blogger +http://www.semanlink.net/tag/fps_blog|broader|http://www.semanlink.net/tag/fps +http://www.semanlink.net/tag/fps_blog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fps_blog|uri|http://www.semanlink.net/tag/fps_blog +http://www.semanlink.net/tag/fps_blog|broader_prefLabel|Blogger +http://www.semanlink.net/tag/fps_blog|broader_prefLabel|fps +http://www.semanlink.net/tag/sahel|prefLabel|Sahel +http://www.semanlink.net/tag/sahel|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/sahel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sahel|uri|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/sahel|broader_prefLabel|Afrique +http://www.semanlink.net/tag/sahel|broader_altLabel|Africa +http://www.semanlink.net/tag/mac_software|prefLabel|Mac software +http://www.semanlink.net/tag/mac_software|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/mac_software|broader|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/mac_software|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_software|uri|http://www.semanlink.net/tag/mac_software +http://www.semanlink.net/tag/mac_software|broader_prefLabel|Software +http://www.semanlink.net/tag/mac_software|broader_prefLabel|Macintosh +http://www.semanlink.net/tag/semantic_web_dev|prefLabel|Semantic Web Dev +http://www.semanlink.net/tag/semantic_web_dev|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/semantic_web_dev|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_dev|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_dev|uri|http://www.semanlink.net/tag/semantic_web_dev +http://www.semanlink.net/tag/semantic_web_dev|broader_prefLabel|Dev +http://www.semanlink.net/tag/semantic_web_dev|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_dev|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_dev|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/transe|creationTime|2018-01-30T10:48:55Z +http://www.semanlink.net/tag/transe|prefLabel|TransE +http://www.semanlink.net/tag/transe|broader|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/tag/transe|broader|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/transe|broader|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/transe|creationDate|2018-01-30 +http://www.semanlink.net/tag/transe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transe|uri|http://www.semanlink.net/tag/transe +http://www.semanlink.net/tag/transe|broader_prefLabel|Knowledge Graph Embeddings +http://www.semanlink.net/tag/transe|broader_prefLabel|Knowledge Graph Completion +http://www.semanlink.net/tag/transe|broader_prefLabel|Entity embeddings +http://www.semanlink.net/tag/transe|broader_altLabel|KGE +http://www.semanlink.net/tag/transe|broader_altLabel|Knowledge graph embedding +http://www.semanlink.net/tag/transe|broader_altLabel|KG embedding +http://www.semanlink.net/tag/transe|broader_related|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/tag/transe|broader_related|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/tag/transe|broader_related|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/tag/transe|broader_related|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/tag/transe|broader_related|http://www.semanlink.net/tag/information_extraction +http://www.semanlink.net/tag/cfpm|prefLabel|CFPM +http://www.semanlink.net/tag/cfpm|broader|http://www.semanlink.net/tag/musique_du_niger +http://www.semanlink.net/tag/cfpm|broader|http://www.semanlink.net/tag/niamey +http://www.semanlink.net/tag/cfpm|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cfpm|uri|http://www.semanlink.net/tag/cfpm +http://www.semanlink.net/tag/cfpm|broader_prefLabel|Musique du Niger +http://www.semanlink.net/tag/cfpm|broader_prefLabel|Niamey +http://www.semanlink.net/tag/tortures_americaines|prefLabel|Tortures américaines +http://www.semanlink.net/tag/tortures_americaines|broader|http://www.semanlink.net/tag/torture +http://www.semanlink.net/tag/tortures_americaines|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/tortures_americaines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tortures_americaines|uri|http://www.semanlink.net/tag/tortures_americaines +http://www.semanlink.net/tag/tortures_americaines|broader_prefLabel|Torture +http://www.semanlink.net/tag/tortures_americaines|broader_prefLabel|USA +http://www.semanlink.net/tag/tortures_americaines|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/tortures_americaines|broader_altLabel|United States +http://www.semanlink.net/tag/paludisme|prefLabel|Paludisme +http://www.semanlink.net/tag/paludisme|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/paludisme|broader|http://www.semanlink.net/tag/grands_problemes +http://www.semanlink.net/tag/paludisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paludisme|altLabel|Malaria +http://www.semanlink.net/tag/paludisme|uri|http://www.semanlink.net/tag/paludisme +http://www.semanlink.net/tag/paludisme|broader_prefLabel|Maladie +http://www.semanlink.net/tag/paludisme|broader_prefLabel|Grands problèmes +http://www.semanlink.net/tag/survey_analysis|creationTime|2017-06-08T00:47:15Z +http://www.semanlink.net/tag/survey_analysis|prefLabel|Survey analysis +http://www.semanlink.net/tag/survey_analysis|broader|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/tag/survey_analysis|related|http://www.semanlink.net/tag/topic_modeling_over_short_texts +http://www.semanlink.net/tag/survey_analysis|creationDate|2017-06-08 +http://www.semanlink.net/tag/survey_analysis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/survey_analysis|uri|http://www.semanlink.net/tag/survey_analysis +http://www.semanlink.net/tag/survey_analysis|broader_prefLabel|NLP: use cases +http://www.semanlink.net/tag/survey_analysis|broader_altLabel|NLP: applications +http://www.semanlink.net/tag/cameroun|prefLabel|Cameroun +http://www.semanlink.net/tag/cameroun|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/cameroun|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cameroun|uri|http://www.semanlink.net/tag/cameroun +http://www.semanlink.net/tag/cameroun|broader_prefLabel|Afrique +http://www.semanlink.net/tag/cameroun|broader_altLabel|Africa +http://www.semanlink.net/tag/uber|creationTime|2014-06-09T11:04:44Z +http://www.semanlink.net/tag/uber|prefLabel|Uber +http://www.semanlink.net/tag/uber|broader|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/tag/uber|creationDate|2014-06-09 +http://www.semanlink.net/tag/uber|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/uber|uri|http://www.semanlink.net/tag/uber +http://www.semanlink.net/tag/uber|broader_prefLabel|Automobile +http://www.semanlink.net/tag/uber|broader_altLabel|Automotive +http://www.semanlink.net/tag/architecture_en_terre|creationTime|2013-03-05T00:20:53Z +http://www.semanlink.net/tag/architecture_en_terre|prefLabel|Architecture en terre +http://www.semanlink.net/tag/architecture_en_terre|broader|http://www.semanlink.net/tag/architecture +http://www.semanlink.net/tag/architecture_en_terre|related|http://www.semanlink.net/tag/banco +http://www.semanlink.net/tag/architecture_en_terre|creationDate|2013-03-05 +http://www.semanlink.net/tag/architecture_en_terre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/architecture_en_terre|uri|http://www.semanlink.net/tag/architecture_en_terre +http://www.semanlink.net/tag/architecture_en_terre|broader_prefLabel|Architecture +http://www.semanlink.net/tag/inde_moderne|creationTime|2014-09-25T00:41:21Z +http://www.semanlink.net/tag/inde_moderne|prefLabel|Inde moderne +http://www.semanlink.net/tag/inde_moderne|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/inde_moderne|creationDate|2014-09-25 +http://www.semanlink.net/tag/inde_moderne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inde_moderne|uri|http://www.semanlink.net/tag/inde_moderne +http://www.semanlink.net/tag/inde_moderne|broader_prefLabel|Inde +http://www.semanlink.net/tag/gao|creationTime|2013-06-03T13:29:28Z +http://www.semanlink.net/tag/gao|prefLabel|GAO +http://www.semanlink.net/tag/gao|broader|http://www.semanlink.net/tag/fps_ontologies +http://www.semanlink.net/tag/gao|broader|http://www.semanlink.net/tag/c2gweb_product_description_and_makolab +http://www.semanlink.net/tag/gao|broader|http://www.semanlink.net/tag/automotive_ontologies +http://www.semanlink.net/tag/gao|related|http://www.semanlink.net/tag/makolab +http://www.semanlink.net/tag/gao|creationDate|2013-06-03 +http://www.semanlink.net/tag/gao|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gao|uri|http://www.semanlink.net/tag/gao +http://www.semanlink.net/tag/gao|broader_prefLabel|fps ontologies +http://www.semanlink.net/tag/gao|broader_prefLabel|C2GWeb, Product description and Makolab +http://www.semanlink.net/tag/gao|broader_prefLabel|Automotive ontologies +http://www.semanlink.net/tag/gao|broader_altLabel|Car ontology +http://www.semanlink.net/tag/mspace|prefLabel|mSpace +http://www.semanlink.net/tag/mspace|broader|http://www.semanlink.net/tag/semantic_web_application +http://www.semanlink.net/tag/mspace|comment|mSpace is an interaction model and software framework to help people access and explore information. +http://www.semanlink.net/tag/mspace|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mspace|uri|http://www.semanlink.net/tag/mspace +http://www.semanlink.net/tag/mspace|broader_prefLabel|Semantic Web : Application +http://www.semanlink.net/tag/tor_anonymity_network|creationTime|2012-08-17T22:24:17Z +http://www.semanlink.net/tag/tor_anonymity_network|prefLabel|TOR +http://www.semanlink.net/tag/tor_anonymity_network|creationDate|2012-08-17 +http://www.semanlink.net/tag/tor_anonymity_network|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tor_anonymity_network|homepage|https://www.torproject.org +http://www.semanlink.net/tag/tor_anonymity_network|describedBy|https://en.wikipedia.org/wiki/Tor_(anonymity_network) +http://www.semanlink.net/tag/tor_anonymity_network|uri|http://www.semanlink.net/tag/tor_anonymity_network +http://www.semanlink.net/tag/memoire|creationTime|2008-08-29T18:44:59Z +http://www.semanlink.net/tag/memoire|prefLabel|Mémoire +http://www.semanlink.net/tag/memoire|creationDate|2008-08-29 +http://www.semanlink.net/tag/memoire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/memoire|uri|http://www.semanlink.net/tag/memoire +http://www.semanlink.net/tag/javascript_dom|prefLabel|JavaScript DOM +http://www.semanlink.net/tag/javascript_dom|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_dom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_dom|uri|http://www.semanlink.net/tag/javascript_dom +http://www.semanlink.net/tag/javascript_dom|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_dom|broader_altLabel|js +http://www.semanlink.net/tag/voyager|prefLabel|Voyager +http://www.semanlink.net/tag/voyager|broader|http://www.semanlink.net/tag/missions_spatiales +http://www.semanlink.net/tag/voyager|broader|http://www.semanlink.net/tag/exploit +http://www.semanlink.net/tag/voyager|broader|http://www.semanlink.net/tag/nasa +http://www.semanlink.net/tag/voyager|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voyager|uri|http://www.semanlink.net/tag/voyager +http://www.semanlink.net/tag/voyager|broader_prefLabel|Missions spatiales +http://www.semanlink.net/tag/voyager|broader_prefLabel|Exploit +http://www.semanlink.net/tag/voyager|broader_prefLabel|NASA +http://www.semanlink.net/tag/afrique_de_l_ouest|prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/afrique_de_l_ouest|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_de_l_ouest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_de_l_ouest|uri|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/afrique_de_l_ouest|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_de_l_ouest|broader_altLabel|Africa +http://www.semanlink.net/tag/paleoanthropology|creationTime|2020-08-21T17:08:34Z +http://www.semanlink.net/tag/paleoanthropology|prefLabel|Paleoanthropology +http://www.semanlink.net/tag/paleoanthropology|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/paleoanthropology|creationDate|2020-08-21 +http://www.semanlink.net/tag/paleoanthropology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paleoanthropology|describedBy|https://en.wikipedia.org/wiki/Paleoanthropology +http://www.semanlink.net/tag/paleoanthropology|altLabel|Paléontologie humaine +http://www.semanlink.net/tag/paleoanthropology|uri|http://www.semanlink.net/tag/paleoanthropology +http://www.semanlink.net/tag/paleoanthropology|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/event_extraction|creationTime|2020-12-17T14:17:20Z +http://www.semanlink.net/tag/event_extraction|prefLabel|Event extraction +http://www.semanlink.net/tag/event_extraction|broader|http://www.semanlink.net/tag/nlp_problem +http://www.semanlink.net/tag/event_extraction|related|http://www.semanlink.net/tag/slot_tagging +http://www.semanlink.net/tag/event_extraction|related|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/tag/event_extraction|creationDate|2020-12-17 +http://www.semanlink.net/tag/event_extraction|comment|Extract structured information —“what is happening” and the persons/objects that are involved — from unstructured text ([src](doc:2020/12/event_extraction_by_answering_)) +http://www.semanlink.net/tag/event_extraction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/event_extraction|uri|http://www.semanlink.net/tag/event_extraction +http://www.semanlink.net/tag/event_extraction|broader_prefLabel|NLP tasks / problems +http://www.semanlink.net/tag/data_science|creationTime|2013-03-29T01:22:37Z +http://www.semanlink.net/tag/data_science|prefLabel|Data science +http://www.semanlink.net/tag/data_science|creationDate|2013-03-29 +http://www.semanlink.net/tag/data_science|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_science|altLabel|Data analysis +http://www.semanlink.net/tag/data_science|uri|http://www.semanlink.net/tag/data_science +http://www.semanlink.net/tag/w3c_recommendation|prefLabel|W3C Recommendation +http://www.semanlink.net/tag/w3c_recommendation|broader|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/w3c_recommendation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/w3c_recommendation|uri|http://www.semanlink.net/tag/w3c_recommendation +http://www.semanlink.net/tag/w3c_recommendation|broader_prefLabel|W3C +http://www.semanlink.net/tag/w3c_recommendation|broader_related|http://www.semanlink.net/tag/owl +http://www.semanlink.net/tag/w3c_recommendation|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/informatique|prefLabel|Informatique +http://www.semanlink.net/tag/informatique|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/informatique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/informatique|uri|http://www.semanlink.net/tag/informatique +http://www.semanlink.net/tag/informatique|broader_prefLabel|Technologie +http://www.semanlink.net/tag/catholicisme|prefLabel|Catholicisme +http://www.semanlink.net/tag/catholicisme|broader|http://www.semanlink.net/tag/religion +http://www.semanlink.net/tag/catholicisme|broader|http://www.semanlink.net/tag/chretiente +http://www.semanlink.net/tag/catholicisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catholicisme|altLabel|Catholique +http://www.semanlink.net/tag/catholicisme|uri|http://www.semanlink.net/tag/catholicisme +http://www.semanlink.net/tag/catholicisme|broader_prefLabel|Religion +http://www.semanlink.net/tag/catholicisme|broader_prefLabel|Chrétienté +http://www.semanlink.net/tag/croissance|prefLabel|Croissance +http://www.semanlink.net/tag/croissance|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/croissance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/croissance|uri|http://www.semanlink.net/tag/croissance +http://www.semanlink.net/tag/croissance|broader_prefLabel|Economie +http://www.semanlink.net/tag/croissance|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/hugo|prefLabel|Victor Hugo +http://www.semanlink.net/tag/hugo|broader|http://www.semanlink.net/tag/grand_homme +http://www.semanlink.net/tag/hugo|broader|http://www.semanlink.net/tag/poete +http://www.semanlink.net/tag/hugo|broader|http://www.semanlink.net/tag/ecrivain +http://www.semanlink.net/tag/hugo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hugo|uri|http://www.semanlink.net/tag/hugo +http://www.semanlink.net/tag/hugo|broader_prefLabel|Grand Homme +http://www.semanlink.net/tag/hugo|broader_prefLabel|Poète +http://www.semanlink.net/tag/hugo|broader_prefLabel|Ecrivain +http://www.semanlink.net/tag/biodiversite|prefLabel|Biodiversité +http://www.semanlink.net/tag/biodiversite|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/biodiversite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biodiversite|altLabel|Biodiversity +http://www.semanlink.net/tag/biodiversite|uri|http://www.semanlink.net/tag/biodiversite +http://www.semanlink.net/tag/biodiversite|broader_prefLabel|Biology +http://www.semanlink.net/tag/biodiversite|broader_altLabel|Biologie +http://www.semanlink.net/tag/pic_de_hubbert|prefLabel|Pic de Hubbert +http://www.semanlink.net/tag/pic_de_hubbert|broader|http://www.semanlink.net/tag/petrole +http://www.semanlink.net/tag/pic_de_hubbert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pic_de_hubbert|uri|http://www.semanlink.net/tag/pic_de_hubbert +http://www.semanlink.net/tag/pic_de_hubbert|broader_prefLabel|Pétrole +http://www.semanlink.net/tag/dinosaures|prefLabel|Dinosaures +http://www.semanlink.net/tag/dinosaures|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/dinosaures|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dinosaures|uri|http://www.semanlink.net/tag/dinosaures +http://www.semanlink.net/tag/dinosaures|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/ipython_notebook|creationTime|2015-11-10T18:09:19Z +http://www.semanlink.net/tag/ipython_notebook|prefLabel|IPython notebook +http://www.semanlink.net/tag/ipython_notebook|broader|http://www.semanlink.net/tag/ipython +http://www.semanlink.net/tag/ipython_notebook|creationDate|2015-11-10 +http://www.semanlink.net/tag/ipython_notebook|comment|"``` +cd [notebook dir] +ipython notebook +``` + +faire ce qui est écrit dans le terminal : + +``` +Copy/paste this URL into your browser when you connect for the first time, + to login with a token: + http://localhost:8888/?token=XXXXXX +``` + +ensuite seulement [localhost:8888](http://localhost:8888) + + +" +http://www.semanlink.net/tag/ipython_notebook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ipython_notebook|uri|http://www.semanlink.net/tag/ipython_notebook +http://www.semanlink.net/tag/ipython_notebook|broader_prefLabel|IPython +http://www.semanlink.net/tag/country_ontologies|prefLabel|Country ontologies +http://www.semanlink.net/tag/country_ontologies|broader|http://www.semanlink.net/tag/ontologies +http://www.semanlink.net/tag/country_ontologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/country_ontologies|uri|http://www.semanlink.net/tag/country_ontologies +http://www.semanlink.net/tag/country_ontologies|broader_prefLabel|Ontologies +http://www.semanlink.net/tag/country_ontologies|broader_altLabel|Ontology +http://www.semanlink.net/tag/rdf_vocabularies|prefLabel|RDF Vocabularies +http://www.semanlink.net/tag/rdf_vocabularies|broader|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/tag/rdf_vocabularies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdf_vocabularies|uri|http://www.semanlink.net/tag/rdf_vocabularies +http://www.semanlink.net/tag/rdf_vocabularies|broader_prefLabel|RDF +http://www.semanlink.net/tag/rdf_vocabularies|broader_related|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/tag/rdf_vocabularies|broader_related|http://www.semanlink.net/tag/ora_lassila +http://www.semanlink.net/tag/rdf_vocabularies|broader_related|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/rdf_vocabularies|broader_related|http://www.semanlink.net/tag/guha +http://www.semanlink.net/tag/rdf_vocabularies|broader_related|http://www.semanlink.net/tag/grddl +http://www.semanlink.net/tag/mauritanie|prefLabel|Mauritanie +http://www.semanlink.net/tag/mauritanie|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/mauritanie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mauritanie|uri|http://www.semanlink.net/tag/mauritanie +http://www.semanlink.net/tag/mauritanie|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/coupe_du_monde_2010|creationTime|2010-07-03T11:33:12Z +http://www.semanlink.net/tag/coupe_du_monde_2010|prefLabel|Coupe du monde 2010 +http://www.semanlink.net/tag/coupe_du_monde_2010|broader|http://www.semanlink.net/tag/coupe_du_monde_de_football +http://www.semanlink.net/tag/coupe_du_monde_2010|creationDate|2010-07-03 +http://www.semanlink.net/tag/coupe_du_monde_2010|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupe_du_monde_2010|uri|http://www.semanlink.net/tag/coupe_du_monde_2010 +http://www.semanlink.net/tag/coupe_du_monde_2010|broader_prefLabel|Coupe du monde de football +http://www.semanlink.net/tag/catastrophe_industrielle|prefLabel|Catastrophe industrielle +http://www.semanlink.net/tag/catastrophe_industrielle|broader|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/catastrophe_industrielle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophe_industrielle|uri|http://www.semanlink.net/tag/catastrophe_industrielle +http://www.semanlink.net/tag/catastrophe_industrielle|broader_prefLabel|Catastrophe +http://www.semanlink.net/tag/matiere_premiere|prefLabel|Matière première +http://www.semanlink.net/tag/matiere_premiere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matiere_premiere|uri|http://www.semanlink.net/tag/matiere_premiere +http://www.semanlink.net/tag/nationalisme|prefLabel|Nationalisme +http://www.semanlink.net/tag/nationalisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nationalisme|uri|http://www.semanlink.net/tag/nationalisme +http://www.semanlink.net/tag/blaspheme|creationTime|2020-09-09T21:27:34Z +http://www.semanlink.net/tag/blaspheme|prefLabel|Blasphème +http://www.semanlink.net/tag/blaspheme|broader|http://www.semanlink.net/tag/dieu +http://www.semanlink.net/tag/blaspheme|creationDate|2020-09-09 +http://www.semanlink.net/tag/blaspheme|comment|"""Blasphème : insulte à Dieu"". Dieu n'existant pas, le blasphème non plus" +http://www.semanlink.net/tag/blaspheme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blaspheme|uri|http://www.semanlink.net/tag/blaspheme +http://www.semanlink.net/tag/blaspheme|broader_prefLabel|dieu +http://www.semanlink.net/tag/centrafrique|creationTime|2020-05-13T15:23:18Z +http://www.semanlink.net/tag/centrafrique|prefLabel|Centrafrique +http://www.semanlink.net/tag/centrafrique|creationDate|2020-05-13 +http://www.semanlink.net/tag/centrafrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/centrafrique|uri|http://www.semanlink.net/tag/centrafrique +http://www.semanlink.net/tag/jackson|creationTime|2015-03-09T19:14:05Z +http://www.semanlink.net/tag/jackson|prefLabel|Jackson +http://www.semanlink.net/tag/jackson|creationDate|2015-03-09 +http://www.semanlink.net/tag/jackson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jackson|uri|http://www.semanlink.net/tag/jackson +http://www.semanlink.net/tag/antiquite_romaine|prefLabel|Antiquité romaine +http://www.semanlink.net/tag/antiquite_romaine|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/antiquite_romaine|broader|http://www.semanlink.net/tag/rome +http://www.semanlink.net/tag/antiquite_romaine|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite_romaine|uri|http://www.semanlink.net/tag/antiquite_romaine +http://www.semanlink.net/tag/antiquite_romaine|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/antiquite_romaine|broader_prefLabel|Rome +http://www.semanlink.net/tag/drag_and_drop|creationTime|2019-04-19T17:29:35Z +http://www.semanlink.net/tag/drag_and_drop|prefLabel|Drag-and-Drop +http://www.semanlink.net/tag/drag_and_drop|creationDate|2019-04-19 +http://www.semanlink.net/tag/drag_and_drop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/drag_and_drop|uri|http://www.semanlink.net/tag/drag_and_drop +http://www.semanlink.net/tag/lee_sedol|creationTime|2016-03-13T18:58:49Z +http://www.semanlink.net/tag/lee_sedol|prefLabel|Lee Sedol +http://www.semanlink.net/tag/lee_sedol|broader|http://www.semanlink.net/tag/go_game +http://www.semanlink.net/tag/lee_sedol|creationDate|2016-03-13 +http://www.semanlink.net/tag/lee_sedol|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lee_sedol|uri|http://www.semanlink.net/tag/lee_sedol +http://www.semanlink.net/tag/lee_sedol|broader_prefLabel|Go (Game) +http://www.semanlink.net/tag/slideshare|creationTime|2014-04-23T21:49:24Z +http://www.semanlink.net/tag/slideshare|prefLabel|SlideShare +http://www.semanlink.net/tag/slideshare|creationDate|2014-04-23 +http://www.semanlink.net/tag/slideshare|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slideshare|uri|http://www.semanlink.net/tag/slideshare +http://www.semanlink.net/tag/mecanique_quantique|prefLabel|Mécanique quantique +http://www.semanlink.net/tag/mecanique_quantique|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/mecanique_quantique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mecanique_quantique|uri|http://www.semanlink.net/tag/mecanique_quantique +http://www.semanlink.net/tag/mecanique_quantique|broader_prefLabel|Physique +http://www.semanlink.net/tag/mecanique_quantique|broader_altLabel|Physics +http://www.semanlink.net/tag/suisse|prefLabel|Suisse +http://www.semanlink.net/tag/suisse|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/suisse|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/suisse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/suisse|uri|http://www.semanlink.net/tag/suisse +http://www.semanlink.net/tag/suisse|broader_prefLabel|Europe +http://www.semanlink.net/tag/suisse|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/antisemitisme|prefLabel|Antisémitisme +http://www.semanlink.net/tag/antisemitisme|broader|http://www.semanlink.net/tag/racisme +http://www.semanlink.net/tag/antisemitisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antisemitisme|uri|http://www.semanlink.net/tag/antisemitisme +http://www.semanlink.net/tag/antisemitisme|broader_prefLabel|Racisme +http://www.semanlink.net/tag/gabon|prefLabel|Gabon +http://www.semanlink.net/tag/gabon|broader|http://www.semanlink.net/tag/afrique_equatoriale +http://www.semanlink.net/tag/gabon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gabon|uri|http://www.semanlink.net/tag/gabon +http://www.semanlink.net/tag/gabon|broader_prefLabel|Afrique équatoriale +http://www.semanlink.net/tag/ip_address|creationTime|2013-09-28T11:13:18Z +http://www.semanlink.net/tag/ip_address|prefLabel|IP address +http://www.semanlink.net/tag/ip_address|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/ip_address|creationDate|2013-09-28 +http://www.semanlink.net/tag/ip_address|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ip_address|uri|http://www.semanlink.net/tag/ip_address +http://www.semanlink.net/tag/ip_address|broader_prefLabel|Internet +http://www.semanlink.net/tag/unsupervised_machine_translation|creationTime|2018-09-27T11:49:25Z +http://www.semanlink.net/tag/unsupervised_machine_translation|prefLabel|Unsupervised machine translation +http://www.semanlink.net/tag/unsupervised_machine_translation|broader|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/tag/unsupervised_machine_translation|broader|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/tag/unsupervised_machine_translation|creationDate|2018-09-27 +http://www.semanlink.net/tag/unsupervised_machine_translation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unsupervised_machine_translation|uri|http://www.semanlink.net/tag/unsupervised_machine_translation +http://www.semanlink.net/tag/unsupervised_machine_translation|broader_prefLabel|Unsupervised machine learning +http://www.semanlink.net/tag/unsupervised_machine_translation|broader_prefLabel|Machine translation +http://www.semanlink.net/tag/unsupervised_machine_translation|broader_altLabel|Traduction automatique +http://www.semanlink.net/tag/pharaon|prefLabel|Pharaon +http://www.semanlink.net/tag/pharaon|broader|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/pharaon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pharaon|uri|http://www.semanlink.net/tag/pharaon +http://www.semanlink.net/tag/pharaon|broader_prefLabel|Egypte antique +http://www.semanlink.net/tag/google_guy|creationTime|2015-11-20T17:29:17Z +http://www.semanlink.net/tag/google_guy|prefLabel|Google guy +http://www.semanlink.net/tag/google_guy|creationDate|2015-11-20 +http://www.semanlink.net/tag/google_guy|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/google_guy|uri|http://www.semanlink.net/tag/google_guy +http://www.semanlink.net/tag/maintenance|creationTime|2019-12-07T18:53:29Z +http://www.semanlink.net/tag/maintenance|prefLabel|Maintenance +http://www.semanlink.net/tag/maintenance|creationDate|2019-12-07 +http://www.semanlink.net/tag/maintenance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maintenance|uri|http://www.semanlink.net/tag/maintenance +http://www.semanlink.net/tag/ldow|creationTime|2009-03-18T00:21:20Z +http://www.semanlink.net/tag/ldow|prefLabel|LDOW +http://www.semanlink.net/tag/ldow|creationDate|2009-03-18 +http://www.semanlink.net/tag/ldow|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ldow|uri|http://www.semanlink.net/tag/ldow +http://www.semanlink.net/tag/grece_antique|prefLabel|Grèce antique +http://www.semanlink.net/tag/grece_antique|broader|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/grece_antique|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/grece_antique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/grece_antique|uri|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/tag/grece_antique|broader_prefLabel|Grèce +http://www.semanlink.net/tag/grece_antique|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/dependency_injection|creationTime|2018-08-04T22:44:51Z +http://www.semanlink.net/tag/dependency_injection|prefLabel|Dependency Injection +http://www.semanlink.net/tag/dependency_injection|broader|http://www.semanlink.net/tag/inversion_of_control +http://www.semanlink.net/tag/dependency_injection|creationDate|2018-08-04 +http://www.semanlink.net/tag/dependency_injection|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dependency_injection|describedBy|https://en.wikipedia.org/wiki/Dependency_injection +http://www.semanlink.net/tag/dependency_injection|uri|http://www.semanlink.net/tag/dependency_injection +http://www.semanlink.net/tag/dependency_injection|broader_prefLabel|Inversion of Control +http://www.semanlink.net/tag/ben_laden|prefLabel|Ben Laden +http://www.semanlink.net/tag/ben_laden|broader|http://www.semanlink.net/tag/al_qaida +http://www.semanlink.net/tag/ben_laden|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ben_laden|uri|http://www.semanlink.net/tag/ben_laden +http://www.semanlink.net/tag/ben_laden|broader_prefLabel|Al-Qaida +http://www.semanlink.net/tag/homme_celebre|prefLabel|Homme célèbre +http://www.semanlink.net/tag/homme_celebre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/homme_celebre|uri|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/bon_chef_d_etat|creationTime|2015-04-02T22:21:54Z +http://www.semanlink.net/tag/bon_chef_d_etat|prefLabel|Bon chef d'état +http://www.semanlink.net/tag/bon_chef_d_etat|broader|http://www.semanlink.net/tag/chef_d_etat +http://www.semanlink.net/tag/bon_chef_d_etat|creationDate|2015-04-02 +http://www.semanlink.net/tag/bon_chef_d_etat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bon_chef_d_etat|uri|http://www.semanlink.net/tag/bon_chef_d_etat +http://www.semanlink.net/tag/bon_chef_d_etat|broader_prefLabel|Chef d'état +http://www.semanlink.net/tag/antiquite_du_pakistan|prefLabel|Antiquité du Pakistan +http://www.semanlink.net/tag/antiquite_du_pakistan|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/antiquite_du_pakistan|broader|http://www.semanlink.net/tag/pakistan +http://www.semanlink.net/tag/antiquite_du_pakistan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antiquite_du_pakistan|uri|http://www.semanlink.net/tag/antiquite_du_pakistan +http://www.semanlink.net/tag/antiquite_du_pakistan|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/antiquite_du_pakistan|broader_prefLabel|Pakistan +http://www.semanlink.net/tag/surveillance_capitalism|creationTime|2020-12-19T13:52:55Z +http://www.semanlink.net/tag/surveillance_capitalism|prefLabel|Surveillance Capitalism +http://www.semanlink.net/tag/surveillance_capitalism|creationDate|2020-12-19 +http://www.semanlink.net/tag/surveillance_capitalism|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/surveillance_capitalism|uri|http://www.semanlink.net/tag/surveillance_capitalism +http://www.semanlink.net/tag/pixelwise_dense_prediction|creationTime|2020-04-16T15:08:54Z +http://www.semanlink.net/tag/pixelwise_dense_prediction|prefLabel|Pixelwise dense prediction +http://www.semanlink.net/tag/pixelwise_dense_prediction|broader|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/tag/pixelwise_dense_prediction|creationDate|2020-04-16 +http://www.semanlink.net/tag/pixelwise_dense_prediction|comment|the task of predicting a label for each pixel of an image +http://www.semanlink.net/tag/pixelwise_dense_prediction|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pixelwise_dense_prediction|uri|http://www.semanlink.net/tag/pixelwise_dense_prediction +http://www.semanlink.net/tag/pixelwise_dense_prediction|broader_prefLabel|Computer vision +http://www.semanlink.net/tag/pixelwise_dense_prediction|broader_altLabel|Visual Recognition +http://www.semanlink.net/tag/exode_rural|creationTime|2007-07-26T18:56:49Z +http://www.semanlink.net/tag/exode_rural|prefLabel|Exode rural +http://www.semanlink.net/tag/exode_rural|creationDate|2007-07-26 +http://www.semanlink.net/tag/exode_rural|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exode_rural|uri|http://www.semanlink.net/tag/exode_rural +http://www.semanlink.net/tag/voyage|creationTime|2008-05-20T21:10:03Z +http://www.semanlink.net/tag/voyage|prefLabel|Voyage +http://www.semanlink.net/tag/voyage|creationDate|2008-05-20 +http://www.semanlink.net/tag/voyage|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/voyage|uri|http://www.semanlink.net/tag/voyage +http://www.semanlink.net/tag/hugh_glaser|creationTime|2008-05-15T22:09:50Z +http://www.semanlink.net/tag/hugh_glaser|prefLabel|Hugh Glaser +http://www.semanlink.net/tag/hugh_glaser|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/hugh_glaser|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/hugh_glaser|related|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/hugh_glaser|creationDate|2008-05-15 +http://www.semanlink.net/tag/hugh_glaser|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hugh_glaser|uri|http://www.semanlink.net/tag/hugh_glaser +http://www.semanlink.net/tag/hugh_glaser|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/hugh_glaser|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/hugh_glaser|broader_altLabel|Technical guys +http://www.semanlink.net/tag/zombie_pcs|prefLabel|Zombie PCs +http://www.semanlink.net/tag/zombie_pcs|broader|http://www.semanlink.net/tag/securite_informatique +http://www.semanlink.net/tag/zombie_pcs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zombie_pcs|uri|http://www.semanlink.net/tag/zombie_pcs +http://www.semanlink.net/tag/zombie_pcs|broader_prefLabel|Cybersecurity +http://www.semanlink.net/tag/zombie_pcs|broader_prefLabel|Sécurité informatique +http://www.semanlink.net/tag/enterprise_search|creationTime|2012-07-04T22:46:14Z +http://www.semanlink.net/tag/enterprise_search|prefLabel|Enterprise Search +http://www.semanlink.net/tag/enterprise_search|creationDate|2012-07-04 +http://www.semanlink.net/tag/enterprise_search|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enterprise_search|uri|http://www.semanlink.net/tag/enterprise_search +http://www.semanlink.net/tag/fossile|prefLabel|Fossile +http://www.semanlink.net/tag/fossile|broader|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/tag/fossile|broader|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/tag/fossile|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fossile|uri|http://www.semanlink.net/tag/fossile +http://www.semanlink.net/tag/fossile|broader_prefLabel|Paléontologie +http://www.semanlink.net/tag/fossile|broader_prefLabel|Histoire de la vie +http://www.semanlink.net/tag/maxent_models|creationTime|2012-04-15T14:34:19Z +http://www.semanlink.net/tag/maxent_models|prefLabel|Maxent models +http://www.semanlink.net/tag/maxent_models|broader|http://www.semanlink.net/tag/machine_learning_techniques +http://www.semanlink.net/tag/maxent_models|broader|http://www.semanlink.net/tag/nlp_techniques +http://www.semanlink.net/tag/maxent_models|broader|http://www.semanlink.net/tag/maximum_entropy +http://www.semanlink.net/tag/maxent_models|creationDate|2012-04-15 +http://www.semanlink.net/tag/maxent_models|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maxent_models|uri|http://www.semanlink.net/tag/maxent_models +http://www.semanlink.net/tag/maxent_models|broader_prefLabel|Machine learning: techniques +http://www.semanlink.net/tag/maxent_models|broader_prefLabel|NLP techniques +http://www.semanlink.net/tag/maxent_models|broader_prefLabel|Maximum Entropy +http://www.semanlink.net/tag/massively_multiplayer_online_games|prefLabel|Massively multiplayer online games +http://www.semanlink.net/tag/massively_multiplayer_online_games|broader|http://www.semanlink.net/tag/jeux_en_ligne +http://www.semanlink.net/tag/massively_multiplayer_online_games|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/massively_multiplayer_online_games|altLabel|MMOG +http://www.semanlink.net/tag/massively_multiplayer_online_games|uri|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://www.semanlink.net/tag/massively_multiplayer_online_games|broader_prefLabel|Jeux en ligne +http://www.semanlink.net/tag/developpement_durable|prefLabel|Développement durable +http://www.semanlink.net/tag/developpement_durable|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/developpement_durable|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/developpement_durable|uri|http://www.semanlink.net/tag/developpement_durable +http://www.semanlink.net/tag/developpement_durable|broader_prefLabel|Écologie +http://www.semanlink.net/tag/linkto_semanlink|prefLabel|LinkTo Semanlink +http://www.semanlink.net/tag/linkto_semanlink|broader|http://www.semanlink.net/tag/about_semanlink +http://www.semanlink.net/tag/linkto_semanlink|broader|http://www.semanlink.net/tag/link_to_me +http://www.semanlink.net/tag/linkto_semanlink|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linkto_semanlink|uri|http://www.semanlink.net/tag/linkto_semanlink +http://www.semanlink.net/tag/linkto_semanlink|broader_prefLabel|About Semanlink +http://www.semanlink.net/tag/linkto_semanlink|broader_prefLabel|Link to me +http://www.semanlink.net/tag/racisme|prefLabel|Racisme +http://www.semanlink.net/tag/racisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/racisme|uri|http://www.semanlink.net/tag/racisme +http://www.semanlink.net/tag/banque_centrale_europeenne|creationTime|2007-09-18T22:23:40Z +http://www.semanlink.net/tag/banque_centrale_europeenne|prefLabel|Banque Centrale Européenne +http://www.semanlink.net/tag/banque_centrale_europeenne|broader|http://www.semanlink.net/tag/politique_monetaire +http://www.semanlink.net/tag/banque_centrale_europeenne|broader|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/banque_centrale_europeenne|creationDate|2007-09-18 +http://www.semanlink.net/tag/banque_centrale_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/banque_centrale_europeenne|altLabel|BCE +http://www.semanlink.net/tag/banque_centrale_europeenne|uri|http://www.semanlink.net/tag/banque_centrale_europeenne +http://www.semanlink.net/tag/banque_centrale_europeenne|broader_prefLabel|Politique monétaire +http://www.semanlink.net/tag/banque_centrale_europeenne|broader_prefLabel|Institutions européennes +http://www.semanlink.net/tag/jeremy_carroll|creationTime|2007-05-31T01:04:00Z +http://www.semanlink.net/tag/jeremy_carroll|prefLabel|Jeremy Carroll +http://www.semanlink.net/tag/jeremy_carroll|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/jeremy_carroll|related|http://www.semanlink.net/tag/jena +http://www.semanlink.net/tag/jeremy_carroll|creationDate|2007-05-31 +http://www.semanlink.net/tag/jeremy_carroll|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeremy_carroll|uri|http://www.semanlink.net/tag/jeremy_carroll +http://www.semanlink.net/tag/jeremy_carroll|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/mindmap|prefLabel|MindMap +http://www.semanlink.net/tag/mindmap|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mindmap|uri|http://www.semanlink.net/tag/mindmap +http://www.semanlink.net/tag/media_fragments|creationTime|2017-02-26T10:47:05Z +http://www.semanlink.net/tag/media_fragments|prefLabel|Media fragments +http://www.semanlink.net/tag/media_fragments|related|http://www.semanlink.net/tag/raphael_troncy +http://www.semanlink.net/tag/media_fragments|creationDate|2017-02-26 +http://www.semanlink.net/tag/media_fragments|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/media_fragments|uri|http://www.semanlink.net/tag/media_fragments +http://www.semanlink.net/tag/geographie|prefLabel|Géographie +http://www.semanlink.net/tag/geographie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/geographie|uri|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/education_and_linked_data|creationTime|2013-06-14T16:32:31Z +http://www.semanlink.net/tag/education_and_linked_data|prefLabel|Education and Linked Data +http://www.semanlink.net/tag/education_and_linked_data|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/education_and_linked_data|broader|http://www.semanlink.net/tag/technology_enhanced_learning +http://www.semanlink.net/tag/education_and_linked_data|creationDate|2013-06-14 +http://www.semanlink.net/tag/education_and_linked_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/education_and_linked_data|uri|http://www.semanlink.net/tag/education_and_linked_data +http://www.semanlink.net/tag/education_and_linked_data|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/education_and_linked_data|broader_prefLabel|Technology Enhanced Learning +http://www.semanlink.net/tag/education_and_linked_data|broader_altLabel|LD +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/education_and_linked_data|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/valls|creationTime|2014-07-03T21:53:17Z +http://www.semanlink.net/tag/valls|prefLabel|Valls +http://www.semanlink.net/tag/valls|creationDate|2014-07-03 +http://www.semanlink.net/tag/valls|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/valls|uri|http://www.semanlink.net/tag/valls +http://www.semanlink.net/tag/biodiversity_data|creationTime|2007-11-14T13:57:08Z +http://www.semanlink.net/tag/biodiversity_data|prefLabel|Biodiversity data +http://www.semanlink.net/tag/biodiversity_data|creationDate|2007-11-14 +http://www.semanlink.net/tag/biodiversity_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biodiversity_data|uri|http://www.semanlink.net/tag/biodiversity_data +http://www.semanlink.net/tag/nathan_rixham|creationTime|2011-08-11T19:10:19Z +http://www.semanlink.net/tag/nathan_rixham|prefLabel|Nathan Rixham +http://www.semanlink.net/tag/nathan_rixham|broader|http://www.semanlink.net/tag/sw_guys +http://www.semanlink.net/tag/nathan_rixham|creationDate|2011-08-11 +http://www.semanlink.net/tag/nathan_rixham|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nathan_rixham|homepage|http://webr3.org/blog/ +http://www.semanlink.net/tag/nathan_rixham|weblog|http://webr3.org/blog/ +http://www.semanlink.net/tag/nathan_rixham|uri|http://www.semanlink.net/tag/nathan_rixham +http://www.semanlink.net/tag/nathan_rixham|broader_prefLabel|SW guys (and girls) +http://www.semanlink.net/tag/mac_os_x_tip|creationTime|2013-05-10T00:46:49Z +http://www.semanlink.net/tag/mac_os_x_tip|prefLabel|Mac OS X Tip +http://www.semanlink.net/tag/mac_os_x_tip|broader|http://www.semanlink.net/tag/tips +http://www.semanlink.net/tag/mac_os_x_tip|broader|http://www.semanlink.net/tag/mac_os_x +http://www.semanlink.net/tag/mac_os_x_tip|creationDate|2013-05-10 +http://www.semanlink.net/tag/mac_os_x_tip|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_os_x_tip|uri|http://www.semanlink.net/tag/mac_os_x_tip +http://www.semanlink.net/tag/mac_os_x_tip|broader_prefLabel|Tips +http://www.semanlink.net/tag/mac_os_x_tip|broader_prefLabel|Mac OS X +http://www.semanlink.net/tag/mac_os_x_tip|broader_altLabel|OS X +http://www.semanlink.net/tag/mac_os_x_tip|broader_altLabel|OSX +http://www.semanlink.net/tag/ml_evaluation|creationTime|2020-05-10T17:17:13Z +http://www.semanlink.net/tag/ml_evaluation|prefLabel|ML: evaluation +http://www.semanlink.net/tag/ml_evaluation|broader|http://www.semanlink.net/tag/machine_learning_problems +http://www.semanlink.net/tag/ml_evaluation|creationDate|2020-05-10 +http://www.semanlink.net/tag/ml_evaluation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_evaluation|uri|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/tag/ml_evaluation|broader_prefLabel|Machine learning: problems +http://www.semanlink.net/tag/ecole_des_mines|creationTime|2011-10-02T22:24:33Z +http://www.semanlink.net/tag/ecole_des_mines|prefLabel|Ecole des Mines +http://www.semanlink.net/tag/ecole_des_mines|creationDate|2011-10-02 +http://www.semanlink.net/tag/ecole_des_mines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecole_des_mines|uri|http://www.semanlink.net/tag/ecole_des_mines +http://www.semanlink.net/tag/fair_use|creationTime|2007-09-13T22:23:42Z +http://www.semanlink.net/tag/fair_use|prefLabel|Fair use +http://www.semanlink.net/tag/fair_use|creationDate|2007-09-13 +http://www.semanlink.net/tag/fair_use|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fair_use|uri|http://www.semanlink.net/tag/fair_use +http://www.semanlink.net/tag/db2connect|creationTime|2009-04-08T15:18:32Z +http://www.semanlink.net/tag/db2connect|prefLabel|DB2Connect +http://www.semanlink.net/tag/db2connect|related|http://www.semanlink.net/tag/as400 +http://www.semanlink.net/tag/db2connect|creationDate|2009-04-08 +http://www.semanlink.net/tag/db2connect|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/db2connect|uri|http://www.semanlink.net/tag/db2connect +http://www.semanlink.net/tag/tomas_mikolov|creationTime|2017-09-09T14:42:32Z +http://www.semanlink.net/tag/tomas_mikolov|prefLabel|Tomas Mikolov +http://www.semanlink.net/tag/tomas_mikolov|broader|http://www.semanlink.net/tag/ai_girls_and_guys +http://www.semanlink.net/tag/tomas_mikolov|related|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/tag/tomas_mikolov|related|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/tag/tomas_mikolov|creationDate|2017-09-09 +http://www.semanlink.net/tag/tomas_mikolov|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tomas_mikolov|uri|http://www.semanlink.net/tag/tomas_mikolov +http://www.semanlink.net/tag/tomas_mikolov|broader_prefLabel|AI girls and guys +http://www.semanlink.net/tag/jean_claude_ameisen|creationTime|2011-05-13T00:28:18Z +http://www.semanlink.net/tag/jean_claude_ameisen|prefLabel|Jean-Claude Ameisen +http://www.semanlink.net/tag/jean_claude_ameisen|broader|http://www.semanlink.net/tag/heredite +http://www.semanlink.net/tag/jean_claude_ameisen|broader|http://www.semanlink.net/tag/darwin +http://www.semanlink.net/tag/jean_claude_ameisen|broader|http://www.semanlink.net/tag/france_inter +http://www.semanlink.net/tag/jean_claude_ameisen|creationDate|2011-05-13 +http://www.semanlink.net/tag/jean_claude_ameisen|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jean_claude_ameisen|uri|http://www.semanlink.net/tag/jean_claude_ameisen +http://www.semanlink.net/tag/jean_claude_ameisen|broader_prefLabel|Hérédité +http://www.semanlink.net/tag/jean_claude_ameisen|broader_prefLabel|Darwin +http://www.semanlink.net/tag/jean_claude_ameisen|broader_prefLabel|France Inter +http://www.semanlink.net/tag/rada_mihalcea|creationTime|2019-04-17T23:53:27Z +http://www.semanlink.net/tag/rada_mihalcea|prefLabel|Rada Mihalcea +http://www.semanlink.net/tag/rada_mihalcea|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/rada_mihalcea|creationDate|2019-04-17 +http://www.semanlink.net/tag/rada_mihalcea|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rada_mihalcea|describedBy|https://en.wikipedia.org/wiki/Rada_Mihalcea +http://www.semanlink.net/tag/rada_mihalcea|uri|http://www.semanlink.net/tag/rada_mihalcea +http://www.semanlink.net/tag/rada_mihalcea|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/termite|creationTime|2007-12-28T02:05:33Z +http://www.semanlink.net/tag/termite|prefLabel|Termite +http://www.semanlink.net/tag/termite|broader|http://www.semanlink.net/tag/insecte +http://www.semanlink.net/tag/termite|creationDate|2007-12-28 +http://www.semanlink.net/tag/termite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/termite|uri|http://www.semanlink.net/tag/termite +http://www.semanlink.net/tag/termite|broader_prefLabel|Insecte +http://www.semanlink.net/tag/edgar_morin|prefLabel|Edgar Morin +http://www.semanlink.net/tag/edgar_morin|broader|http://www.semanlink.net/tag/intellectuel +http://www.semanlink.net/tag/edgar_morin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/edgar_morin|uri|http://www.semanlink.net/tag/edgar_morin +http://www.semanlink.net/tag/edgar_morin|broader_prefLabel|Intellectuel +http://www.semanlink.net/tag/pierre_larrouturou|creationTime|2012-05-01T12:09:20Z +http://www.semanlink.net/tag/pierre_larrouturou|prefLabel|Pierre Larrouturou +http://www.semanlink.net/tag/pierre_larrouturou|creationDate|2012-05-01 +http://www.semanlink.net/tag/pierre_larrouturou|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pierre_larrouturou|describedBy|https://fr.wikipedia.org/wiki/Pierre_Larrouturou +http://www.semanlink.net/tag/pierre_larrouturou|uri|http://www.semanlink.net/tag/pierre_larrouturou +http://www.semanlink.net/tag/ultralingua|creationTime|2007-02-24T02:25:00Z +http://www.semanlink.net/tag/ultralingua|prefLabel|Ultralingua +http://www.semanlink.net/tag/ultralingua|broader|http://www.semanlink.net/tag/online_dictionary +http://www.semanlink.net/tag/ultralingua|creationDate|2007-02-24 +http://www.semanlink.net/tag/ultralingua|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ultralingua|uri|http://www.semanlink.net/tag/ultralingua +http://www.semanlink.net/tag/ultralingua|broader_prefLabel|Online dictionary +http://www.semanlink.net/tag/ml_as_a_service|creationTime|2015-08-25T10:59:21Z +http://www.semanlink.net/tag/ml_as_a_service|prefLabel|ML as a service +http://www.semanlink.net/tag/ml_as_a_service|broader|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/tag/ml_as_a_service|creationDate|2015-08-25 +http://www.semanlink.net/tag/ml_as_a_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ml_as_a_service|uri|http://www.semanlink.net/tag/ml_as_a_service +http://www.semanlink.net/tag/ml_as_a_service|broader_prefLabel|Machine learning +http://www.semanlink.net/tag/ml_as_a_service|broader_altLabel|ML +http://www.semanlink.net/tag/ml_as_a_service|broader_related|http://www.semanlink.net/tag/ng +http://www.semanlink.net/tag/ml_as_a_service|broader_related|http://www.semanlink.net/tag/data_mining +http://www.semanlink.net/tag/pompe_a_eau|prefLabel|Pompe à eau +http://www.semanlink.net/tag/pompe_a_eau|broader|http://www.semanlink.net/tag/irrigation +http://www.semanlink.net/tag/pompe_a_eau|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pompe_a_eau|uri|http://www.semanlink.net/tag/pompe_a_eau +http://www.semanlink.net/tag/pompe_a_eau|broader_prefLabel|Irrigation +http://www.semanlink.net/tag/probabilites|prefLabel|Probabilités +http://www.semanlink.net/tag/probabilites|broader|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/tag/probabilites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/probabilites|uri|http://www.semanlink.net/tag/probabilites +http://www.semanlink.net/tag/probabilites|broader_prefLabel|Mathématiques +http://www.semanlink.net/tag/probabilites|broader_altLabel|Math +http://www.semanlink.net/tag/cnrs|prefLabel|CNRS +http://www.semanlink.net/tag/cnrs|broader|http://www.semanlink.net/tag/recherche_francaise +http://www.semanlink.net/tag/cnrs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cnrs|uri|http://www.semanlink.net/tag/cnrs +http://www.semanlink.net/tag/cnrs|broader_prefLabel|France : recherche +http://www.semanlink.net/tag/cnrs|broader_altLabel|Recherche française +http://www.semanlink.net/tag/supply_chain|creationTime|2016-01-31T13:23:10Z +http://www.semanlink.net/tag/supply_chain|prefLabel|Supply chain +http://www.semanlink.net/tag/supply_chain|creationDate|2016-01-31 +http://www.semanlink.net/tag/supply_chain|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/supply_chain|uri|http://www.semanlink.net/tag/supply_chain +http://www.semanlink.net/tag/droit_a_l_information|creationTime|2013-04-11T21:41:15Z +http://www.semanlink.net/tag/droit_a_l_information|prefLabel|Droit à l'information +http://www.semanlink.net/tag/droit_a_l_information|broader|http://www.semanlink.net/tag/justice +http://www.semanlink.net/tag/droit_a_l_information|creationDate|2013-04-11 +http://www.semanlink.net/tag/droit_a_l_information|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/droit_a_l_information|uri|http://www.semanlink.net/tag/droit_a_l_information +http://www.semanlink.net/tag/droit_a_l_information|broader_prefLabel|Justice +http://www.semanlink.net/tag/oracle|prefLabel|Oracle +http://www.semanlink.net/tag/oracle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oracle|uri|http://www.semanlink.net/tag/oracle +http://www.semanlink.net/tag/classification_systems|prefLabel|Classification systems +http://www.semanlink.net/tag/classification_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/classification_systems|uri|http://www.semanlink.net/tag/classification_systems +http://www.semanlink.net/tag/cheval|prefLabel|Cheval +http://www.semanlink.net/tag/cheval|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/cheval|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cheval|uri|http://www.semanlink.net/tag/cheval +http://www.semanlink.net/tag/cheval|broader_prefLabel|Animal +http://www.semanlink.net/tag/yoav_goldberg|creationTime|2017-11-06T15:32:31Z +http://www.semanlink.net/tag/yoav_goldberg|prefLabel|Yoav Goldberg +http://www.semanlink.net/tag/yoav_goldberg|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/yoav_goldberg|related|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/tag/yoav_goldberg|related|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/tag/yoav_goldberg|creationDate|2017-11-06 +http://www.semanlink.net/tag/yoav_goldberg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/yoav_goldberg|uri|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/tag/yoav_goldberg|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/xtech|creationTime|2007-05-02T21:57:30Z +http://www.semanlink.net/tag/xtech|prefLabel|XTech +http://www.semanlink.net/tag/xtech|broader|http://www.semanlink.net/tag/conferences +http://www.semanlink.net/tag/xtech|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/xtech|creationDate|2007-05-02 +http://www.semanlink.net/tag/xtech|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/xtech|uri|http://www.semanlink.net/tag/xtech +http://www.semanlink.net/tag/xtech|broader_prefLabel|Conférences +http://www.semanlink.net/tag/xtech|broader_prefLabel|NTIC +http://www.semanlink.net/tag/xtech|broader_related|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/tag/atom|prefLabel|ATOM (format) +http://www.semanlink.net/tag/atom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/atom|uri|http://www.semanlink.net/tag/atom +http://www.semanlink.net/tag/catastrophe_ecologique|prefLabel|Catastrophe écologique +http://www.semanlink.net/tag/catastrophe_ecologique|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/catastrophe_ecologique|broader|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/catastrophe_ecologique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophe_ecologique|altLabel|Désastre écologique +http://www.semanlink.net/tag/catastrophe_ecologique|uri|http://www.semanlink.net/tag/catastrophe_ecologique +http://www.semanlink.net/tag/catastrophe_ecologique|broader_prefLabel|Écologie +http://www.semanlink.net/tag/catastrophe_ecologique|broader_prefLabel|Catastrophe +http://www.semanlink.net/tag/wikidata_query_service|creationTime|2015-09-08T23:44:25Z +http://www.semanlink.net/tag/wikidata_query_service|prefLabel|Wikidata query service +http://www.semanlink.net/tag/wikidata_query_service|broader|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/tag/wikidata_query_service|creationDate|2015-09-08 +http://www.semanlink.net/tag/wikidata_query_service|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/wikidata_query_service|homepage|https://query.wikidata.org/ +http://www.semanlink.net/tag/wikidata_query_service|uri|http://www.semanlink.net/tag/wikidata_query_service +http://www.semanlink.net/tag/wikidata_query_service|broader_prefLabel|Wikidata +http://www.semanlink.net/tag/wikidata_query_service|broader_related|http://www.semanlink.net/tag/dbpedia +http://www.semanlink.net/tag/avatar|creationTime|2018-11-05T18:27:08Z +http://www.semanlink.net/tag/avatar|prefLabel|Avatar +http://www.semanlink.net/tag/avatar|creationDate|2018-11-05 +http://www.semanlink.net/tag/avatar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/avatar|uri|http://www.semanlink.net/tag/avatar +http://www.semanlink.net/tag/riemann|prefLabel|Riemann +http://www.semanlink.net/tag/riemann|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/riemann|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/riemann|uri|http://www.semanlink.net/tag/riemann +http://www.semanlink.net/tag/riemann|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/solr_autocomplete|creationTime|2015-06-20T09:50:07Z +http://www.semanlink.net/tag/solr_autocomplete|prefLabel|Solr - autocomplete +http://www.semanlink.net/tag/solr_autocomplete|broader|http://www.semanlink.net/tag/solr +http://www.semanlink.net/tag/solr_autocomplete|creationDate|2015-06-20 +http://www.semanlink.net/tag/solr_autocomplete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/solr_autocomplete|uri|http://www.semanlink.net/tag/solr_autocomplete +http://www.semanlink.net/tag/solr_autocomplete|broader_prefLabel|Solr +http://www.semanlink.net/tag/parasitisme|prefLabel|Parasitisme +http://www.semanlink.net/tag/parasitisme|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/parasitisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parasitisme|uri|http://www.semanlink.net/tag/parasitisme +http://www.semanlink.net/tag/parasitisme|broader_prefLabel|Biology +http://www.semanlink.net/tag/parasitisme|broader_altLabel|Biologie +http://www.semanlink.net/tag/ged|creationTime|2011-01-23T23:21:45Z +http://www.semanlink.net/tag/ged|prefLabel|GED +http://www.semanlink.net/tag/ged|creationDate|2011-01-23 +http://www.semanlink.net/tag/ged|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ged|uri|http://www.semanlink.net/tag/ged +http://www.semanlink.net/tag/minos_neutrino_experiment|prefLabel|MINOS Neutrino Experiment +http://www.semanlink.net/tag/minos_neutrino_experiment|broader|http://www.semanlink.net/tag/neutrino +http://www.semanlink.net/tag/minos_neutrino_experiment|broader|http://www.semanlink.net/tag/experience_scientifique +http://www.semanlink.net/tag/minos_neutrino_experiment|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/minos_neutrino_experiment|uri|http://www.semanlink.net/tag/minos_neutrino_experiment +http://www.semanlink.net/tag/minos_neutrino_experiment|broader_prefLabel|Neutrino +http://www.semanlink.net/tag/minos_neutrino_experiment|broader_prefLabel|Expérience scientifique +http://www.semanlink.net/tag/minos_neutrino_experiment|broader_altLabel|Neutrinos +http://www.semanlink.net/tag/antibiotiques|creationTime|2013-07-15T10:03:41Z +http://www.semanlink.net/tag/antibiotiques|prefLabel|Antibiotiques +http://www.semanlink.net/tag/antibiotiques|broader|http://www.semanlink.net/tag/medecine +http://www.semanlink.net/tag/antibiotiques|creationDate|2013-07-15 +http://www.semanlink.net/tag/antibiotiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antibiotiques|uri|http://www.semanlink.net/tag/antibiotiques +http://www.semanlink.net/tag/antibiotiques|broader_prefLabel|Médecine +http://www.semanlink.net/tag/sparql_en_javascript|creationTime|2007-10-13T00:10:14Z +http://www.semanlink.net/tag/sparql_en_javascript|prefLabel|SPARQL en javascript +http://www.semanlink.net/tag/sparql_en_javascript|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_en_javascript|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/sparql_en_javascript|broader|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/sparql_en_javascript|creationDate|2007-10-13 +http://www.semanlink.net/tag/sparql_en_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_en_javascript|uri|http://www.semanlink.net/tag/sparql_en_javascript +http://www.semanlink.net/tag/sparql_en_javascript|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/sparql_en_javascript|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/sparql_en_javascript|broader_prefLabel|Javascript RDF +http://www.semanlink.net/tag/sparql_en_javascript|broader_altLabel|js +http://www.semanlink.net/tag/sparql_en_javascript|broader_related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/github_pages|creationTime|2013-11-28T23:38:55Z +http://www.semanlink.net/tag/github_pages|prefLabel|GitHub Pages +http://www.semanlink.net/tag/github_pages|broader|http://www.semanlink.net/tag/github +http://www.semanlink.net/tag/github_pages|creationDate|2013-11-28 +http://www.semanlink.net/tag/github_pages|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/github_pages|uri|http://www.semanlink.net/tag/github_pages +http://www.semanlink.net/tag/github_pages|broader_prefLabel|GitHub +http://www.semanlink.net/tag/dur_a_trouver|creationTime|2007-04-28T00:56:39Z +http://www.semanlink.net/tag/dur_a_trouver|prefLabel|Dur à trouver +http://www.semanlink.net/tag/dur_a_trouver|creationDate|2007-04-28 +http://www.semanlink.net/tag/dur_a_trouver|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dur_a_trouver|uri|http://www.semanlink.net/tag/dur_a_trouver +http://www.semanlink.net/tag/volcan|prefLabel|Volcan +http://www.semanlink.net/tag/volcan|broader|http://www.semanlink.net/tag/geologie +http://www.semanlink.net/tag/volcan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/volcan|uri|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/volcan|broader_prefLabel|Géologie +http://www.semanlink.net/tag/synthetic_biology|creationTime|2010-12-06T19:03:36Z +http://www.semanlink.net/tag/synthetic_biology|prefLabel|Synthetic biology +http://www.semanlink.net/tag/synthetic_biology|creationDate|2010-12-06 +http://www.semanlink.net/tag/synthetic_biology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/synthetic_biology|describedBy|https://en.wikipedia.org/wiki/Synthetic_biology +http://www.semanlink.net/tag/synthetic_biology|uri|http://www.semanlink.net/tag/synthetic_biology +http://www.semanlink.net/tag/dark_web|creationTime|2014-09-06T10:25:35Z +http://www.semanlink.net/tag/dark_web|prefLabel|Dark Web +http://www.semanlink.net/tag/dark_web|creationDate|2014-09-06 +http://www.semanlink.net/tag/dark_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dark_web|uri|http://www.semanlink.net/tag/dark_web +http://www.semanlink.net/tag/eswc_2008|creationTime|2008-06-20T19:02:36Z +http://www.semanlink.net/tag/eswc_2008|prefLabel|ESWC 2008 +http://www.semanlink.net/tag/eswc_2008|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2008|creationDate|2008-06-20 +http://www.semanlink.net/tag/eswc_2008|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2008|uri|http://www.semanlink.net/tag/eswc_2008 +http://www.semanlink.net/tag/eswc_2008|broader_prefLabel|ESWC +http://www.semanlink.net/tag/social_graph|creationTime|2012-05-10T02:00:12Z +http://www.semanlink.net/tag/social_graph|prefLabel|Social Graph +http://www.semanlink.net/tag/social_graph|creationDate|2012-05-10 +http://www.semanlink.net/tag/social_graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_graph|uri|http://www.semanlink.net/tag/social_graph +http://www.semanlink.net/tag/porsche|creationTime|2021-04-11T09:31:45Z +http://www.semanlink.net/tag/porsche|prefLabel|Porsche +http://www.semanlink.net/tag/porsche|broader|http://www.semanlink.net/tag/volkswagen +http://www.semanlink.net/tag/porsche|creationDate|2021-04-11 +http://www.semanlink.net/tag/porsche|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/porsche|describedBy|https://en.wikipedia.org/wiki/Porsche +http://www.semanlink.net/tag/porsche|uri|http://www.semanlink.net/tag/porsche +http://www.semanlink.net/tag/porsche|broader_prefLabel|Volkswagen +http://www.semanlink.net/tag/porsche|broader_altLabel|VW +http://www.semanlink.net/tag/semantic_technology|creationTime|2012-07-31T00:01:06Z +http://www.semanlink.net/tag/semantic_technology|prefLabel|Semantic technology +http://www.semanlink.net/tag/semantic_technology|creationDate|2012-07-31 +http://www.semanlink.net/tag/semantic_technology|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_technology|uri|http://www.semanlink.net/tag/semantic_technology +http://www.semanlink.net/tag/learning_to_hash|creationTime|2021-06-03T11:20:47Z +http://www.semanlink.net/tag/learning_to_hash|prefLabel|Learning to hash +http://www.semanlink.net/tag/learning_to_hash|broader|http://www.semanlink.net/tag/learned_index_structures +http://www.semanlink.net/tag/learning_to_hash|creationDate|2021-06-03 +http://www.semanlink.net/tag/learning_to_hash|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/learning_to_hash|uri|http://www.semanlink.net/tag/learning_to_hash +http://www.semanlink.net/tag/learning_to_hash|broader_prefLabel|Learned Index Structures +http://www.semanlink.net/tag/learning_to_hash|broader_related|http://www.semanlink.net/tag/semantic_hashing +http://www.semanlink.net/tag/foret|prefLabel|Forêt +http://www.semanlink.net/tag/foret|broader|http://www.semanlink.net/tag/arbres +http://www.semanlink.net/tag/foret|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/foret|uri|http://www.semanlink.net/tag/foret +http://www.semanlink.net/tag/foret|broader_prefLabel|Arbres +http://www.semanlink.net/tag/dl_why_does_it_work|creationTime|2017-12-30T11:09:08Z +http://www.semanlink.net/tag/dl_why_does_it_work|prefLabel|DL: why does it work? +http://www.semanlink.net/tag/dl_why_does_it_work|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/dl_why_does_it_work|creationDate|2017-12-30 +http://www.semanlink.net/tag/dl_why_does_it_work|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dl_why_does_it_work|uri|http://www.semanlink.net/tag/dl_why_does_it_work +http://www.semanlink.net/tag/dl_why_does_it_work|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/dl_why_does_it_work|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/dl_why_does_it_work|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/gregg_kellogg|creationTime|2014-10-20T02:44:37Z +http://www.semanlink.net/tag/gregg_kellogg|prefLabel|Gregg Kellogg +http://www.semanlink.net/tag/gregg_kellogg|creationDate|2014-10-20 +http://www.semanlink.net/tag/gregg_kellogg|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gregg_kellogg|uri|http://www.semanlink.net/tag/gregg_kellogg +http://www.semanlink.net/tag/inde|prefLabel|Inde +http://www.semanlink.net/tag/inde|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/inde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inde|uri|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/inde|broader_prefLabel|Asie +http://www.semanlink.net/tag/assemblee_nationale|prefLabel|Assemblée nationale +http://www.semanlink.net/tag/assemblee_nationale|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/assemblee_nationale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/assemblee_nationale|uri|http://www.semanlink.net/tag/assemblee_nationale +http://www.semanlink.net/tag/assemblee_nationale|broader_prefLabel|France +http://www.semanlink.net/tag/energie_du_vide|prefLabel|Energie du vide +http://www.semanlink.net/tag/energie_du_vide|broader|http://www.semanlink.net/tag/enigmes_de_la_physique +http://www.semanlink.net/tag/energie_du_vide|creationDate|2006-07-20 +http://www.semanlink.net/tag/energie_du_vide|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energie_du_vide|uri|http://www.semanlink.net/tag/energie_du_vide +http://www.semanlink.net/tag/energie_du_vide|broader_prefLabel|Enigmes de la physique +http://www.semanlink.net/tag/scandinavie|prefLabel|Scandinavie +http://www.semanlink.net/tag/scandinavie|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/scandinavie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/scandinavie|uri|http://www.semanlink.net/tag/scandinavie +http://www.semanlink.net/tag/scandinavie|broader_prefLabel|Europe +http://www.semanlink.net/tag/usa|prefLabel|USA +http://www.semanlink.net/tag/usa|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/usa|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/usa|altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/usa|altLabel|United States +http://www.semanlink.net/tag/usa|uri|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/usa|broader_prefLabel|Amérique +http://www.semanlink.net/tag/mind_mapping|prefLabel|Mind mapping +http://www.semanlink.net/tag/mind_mapping|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mind_mapping|uri|http://www.semanlink.net/tag/mind_mapping +http://www.semanlink.net/tag/media_player|creationTime|2008-10-02T01:09:48Z +http://www.semanlink.net/tag/media_player|prefLabel|Media Player +http://www.semanlink.net/tag/media_player|creationDate|2008-10-02 +http://www.semanlink.net/tag/media_player|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/media_player|uri|http://www.semanlink.net/tag/media_player +http://www.semanlink.net/tag/seamless_journey|creationTime|2013-05-23T14:18:22Z +http://www.semanlink.net/tag/seamless_journey|prefLabel|Seamless journey +http://www.semanlink.net/tag/seamless_journey|creationDate|2013-05-23 +http://www.semanlink.net/tag/seamless_journey|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/seamless_journey|uri|http://www.semanlink.net/tag/seamless_journey +http://www.semanlink.net/tag/ai_teams|creationTime|2020-03-19T21:38:01Z +http://www.semanlink.net/tag/ai_teams|prefLabel|AI teams +http://www.semanlink.net/tag/ai_teams|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_teams|creationDate|2020-03-19 +http://www.semanlink.net/tag/ai_teams|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_teams|uri|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/ai_teams|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_teams|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_teams|broader_altLabel|AI +http://www.semanlink.net/tag/ai_teams|broader_altLabel|IA +http://www.semanlink.net/tag/ai_teams|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/sparql_shortcomings|creationTime|2015-10-07T22:43:49Z +http://www.semanlink.net/tag/sparql_shortcomings|prefLabel|SPARQL: shortcomings +http://www.semanlink.net/tag/sparql_shortcomings|broader|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql_shortcomings|creationDate|2015-10-07 +http://www.semanlink.net/tag/sparql_shortcomings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql_shortcomings|uri|http://www.semanlink.net/tag/sparql_shortcomings +http://www.semanlink.net/tag/sparql_shortcomings|broader_prefLabel|SPARQL +http://www.semanlink.net/tag/knowledge_mining|creationTime|2020-12-19T11:28:01Z +http://www.semanlink.net/tag/knowledge_mining|prefLabel|Knowledge mining +http://www.semanlink.net/tag/knowledge_mining|broader|http://www.semanlink.net/tag/ai_knowledge_bases +http://www.semanlink.net/tag/knowledge_mining|creationDate|2020-12-19 +http://www.semanlink.net/tag/knowledge_mining|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/knowledge_mining|uri|http://www.semanlink.net/tag/knowledge_mining +http://www.semanlink.net/tag/knowledge_mining|broader_prefLabel|AI + Knowledge Bases +http://www.semanlink.net/tag/knowledge_mining|broader_related|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/tag/gnu|prefLabel|GNU +http://www.semanlink.net/tag/gnu|broader|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/tag/gnu|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gnu|uri|http://www.semanlink.net/tag/gnu +http://www.semanlink.net/tag/gnu|broader_prefLabel|Open Source +http://www.semanlink.net/tag/eau_de_mars|prefLabel|Eau de Mars +http://www.semanlink.net/tag/eau_de_mars|broader|http://www.semanlink.net/tag/eau_extraterrestre +http://www.semanlink.net/tag/eau_de_mars|broader|http://www.semanlink.net/tag/mars +http://www.semanlink.net/tag/eau_de_mars|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eau_de_mars|uri|http://www.semanlink.net/tag/eau_de_mars +http://www.semanlink.net/tag/eau_de_mars|broader_prefLabel|Eau extraterrestre +http://www.semanlink.net/tag/eau_de_mars|broader_prefLabel|Mars +http://www.semanlink.net/tag/eau_de_mars|broader_related|http://www.semanlink.net/tag/vie_extraterrestre +http://www.semanlink.net/tag/open_domain_question_answering|creationTime|2019-05-17T10:35:26Z +http://www.semanlink.net/tag/open_domain_question_answering|prefLabel|Open Domain Question Answering +http://www.semanlink.net/tag/open_domain_question_answering|broader|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/tag/open_domain_question_answering|creationDate|2019-05-17 +http://www.semanlink.net/tag/open_domain_question_answering|comment|"answering arbitrary +context-independent questions (e.g. well-known +facts or historical details).Typically assumed +that the model can access an external collection +of knowledge (e.g. a +structured knowledge base or unstructured text +corpus) (~open-book exam)" +http://www.semanlink.net/tag/open_domain_question_answering|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/open_domain_question_answering|uri|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/tag/open_domain_question_answering|broader_prefLabel|Question Answering +http://www.semanlink.net/tag/open_domain_question_answering|broader_altLabel|QA +http://www.semanlink.net/tag/open_domain_question_answering|broader_related|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/tag/javascript_librairies|creationTime|2008-06-17T23:08:59Z +http://www.semanlink.net/tag/javascript_librairies|prefLabel|JavaScript librairies +http://www.semanlink.net/tag/javascript_librairies|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_librairies|broader|http://www.semanlink.net/tag/web_dev_framework +http://www.semanlink.net/tag/javascript_librairies|broader|http://www.semanlink.net/tag/library_code +http://www.semanlink.net/tag/javascript_librairies|creationDate|2008-06-17 +http://www.semanlink.net/tag/javascript_librairies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_librairies|altLabel|JavaScript framework +http://www.semanlink.net/tag/javascript_librairies|uri|http://www.semanlink.net/tag/javascript_librairies +http://www.semanlink.net/tag/javascript_librairies|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_librairies|broader_prefLabel|Web dev framework +http://www.semanlink.net/tag/javascript_librairies|broader_prefLabel|Library (code) +http://www.semanlink.net/tag/javascript_librairies|broader_altLabel|js +http://www.semanlink.net/tag/javascript_librairies|broader_related|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/tag/touchgraph|prefLabel|TouchGraph +http://www.semanlink.net/tag/touchgraph|broader|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/tag/touchgraph|broader|http://www.semanlink.net/tag/applet +http://www.semanlink.net/tag/touchgraph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/touchgraph|uri|http://www.semanlink.net/tag/touchgraph +http://www.semanlink.net/tag/touchgraph|broader_prefLabel|Graph visualization +http://www.semanlink.net/tag/touchgraph|broader_prefLabel|Applet +http://www.semanlink.net/tag/rfid_passports|prefLabel|RFID passports +http://www.semanlink.net/tag/rfid_passports|broader|http://www.semanlink.net/tag/etat_policier +http://www.semanlink.net/tag/rfid_passports|broader|http://www.semanlink.net/tag/rfid +http://www.semanlink.net/tag/rfid_passports|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/rfid_passports|creationDate|2006-07-20 +http://www.semanlink.net/tag/rfid_passports|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rfid_passports|uri|http://www.semanlink.net/tag/rfid_passports +http://www.semanlink.net/tag/rfid_passports|broader_prefLabel|Etat policier +http://www.semanlink.net/tag/rfid_passports|broader_prefLabel|RFID +http://www.semanlink.net/tag/rfid_passports|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/rfid_passports|broader_related|http://www.semanlink.net/tag/securite +http://www.semanlink.net/tag/historien|prefLabel|Historien +http://www.semanlink.net/tag/historien|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/historien|uri|http://www.semanlink.net/tag/historien +http://www.semanlink.net/tag/celera_ou_craig_venter|prefLabel|Celera ou Craig Venter +http://www.semanlink.net/tag/celera_ou_craig_venter|broader|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/celera_ou_craig_venter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/celera_ou_craig_venter|uri|http://www.semanlink.net/tag/celera_ou_craig_venter +http://www.semanlink.net/tag/celera_ou_craig_venter|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/celera_ou_craig_venter|broader_prefLabel|Biotechnologies +http://www.semanlink.net/tag/social_manipulation|creationTime|2018-03-21T09:29:37Z +http://www.semanlink.net/tag/social_manipulation|prefLabel|Social manipulation +http://www.semanlink.net/tag/social_manipulation|broader|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/tag/social_manipulation|creationDate|2018-03-21 +http://www.semanlink.net/tag/social_manipulation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/social_manipulation|uri|http://www.semanlink.net/tag/social_manipulation +http://www.semanlink.net/tag/social_manipulation|broader_prefLabel|Manipulation +http://www.semanlink.net/tag/lyrics|prefLabel|Lyrics +http://www.semanlink.net/tag/lyrics|broader|http://www.semanlink.net/tag/musique +http://www.semanlink.net/tag/lyrics|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lyrics|uri|http://www.semanlink.net/tag/lyrics +http://www.semanlink.net/tag/lyrics|broader_prefLabel|Musique +http://www.semanlink.net/tag/lyrics|broader_altLabel|Music +http://www.semanlink.net/tag/energie_solaire|prefLabel|Energie solaire +http://www.semanlink.net/tag/energie_solaire|broader|http://www.semanlink.net/tag/energies_renouvelables +http://www.semanlink.net/tag/energie_solaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energie_solaire|uri|http://www.semanlink.net/tag/energie_solaire +http://www.semanlink.net/tag/energie_solaire|broader_prefLabel|Energies renouvelables +http://www.semanlink.net/tag/smushing|prefLabel|Smushing +http://www.semanlink.net/tag/smushing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/smushing|uri|http://www.semanlink.net/tag/smushing +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|creationTime|2019-11-16T12:16:03Z +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|prefLabel|Connectionist vs symbolic debate +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|creationDate|2019-11-16 +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|uri|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader_altLabel|AI +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader_altLabel|IA +http://www.semanlink.net/tag/connectionist_vs_symbolic_debate|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/dictature|prefLabel|Dictature +http://www.semanlink.net/tag/dictature|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dictature|uri|http://www.semanlink.net/tag/dictature +http://www.semanlink.net/tag/afrique_centrale|prefLabel|Afrique Centrale +http://www.semanlink.net/tag/afrique_centrale|broader|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/tag/afrique_centrale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/afrique_centrale|uri|http://www.semanlink.net/tag/afrique_centrale +http://www.semanlink.net/tag/afrique_centrale|broader_prefLabel|Afrique +http://www.semanlink.net/tag/afrique_centrale|broader_altLabel|Africa +http://www.semanlink.net/tag/anthrax|creationTime|2016-07-29T21:33:37Z +http://www.semanlink.net/tag/anthrax|prefLabel|Anthrax +http://www.semanlink.net/tag/anthrax|creationDate|2016-07-29 +http://www.semanlink.net/tag/anthrax|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anthrax|uri|http://www.semanlink.net/tag/anthrax +http://www.semanlink.net/tag/diplomatie|creationTime|2010-12-01T01:18:20Z +http://www.semanlink.net/tag/diplomatie|prefLabel|Diplomatie +http://www.semanlink.net/tag/diplomatie|creationDate|2010-12-01 +http://www.semanlink.net/tag/diplomatie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/diplomatie|uri|http://www.semanlink.net/tag/diplomatie +http://www.semanlink.net/tag/sw_demo|creationTime|2010-09-30T16:58:07Z +http://www.semanlink.net/tag/sw_demo|prefLabel|SW demo +http://www.semanlink.net/tag/sw_demo|broader|http://www.semanlink.net/tag/demo +http://www.semanlink.net/tag/sw_demo|creationDate|2010-09-30 +http://www.semanlink.net/tag/sw_demo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_demo|uri|http://www.semanlink.net/tag/sw_demo +http://www.semanlink.net/tag/sw_demo|broader_prefLabel|Demo +http://www.semanlink.net/tag/rdfjs|creationTime|2017-08-09T00:07:47Z +http://www.semanlink.net/tag/rdfjs|prefLabel|RDFJS +http://www.semanlink.net/tag/rdfjs|broader|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/tag/rdfjs|creationDate|2017-08-09 +http://www.semanlink.net/tag/rdfjs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rdfjs|uri|http://www.semanlink.net/tag/rdfjs +http://www.semanlink.net/tag/rdfjs|broader_prefLabel|Javascript RDF +http://www.semanlink.net/tag/rdfjs|broader_related|http://www.semanlink.net/tag/rdf_in_json +http://www.semanlink.net/tag/slides|prefLabel|Slides +http://www.semanlink.net/tag/slides|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/slides|uri|http://www.semanlink.net/tag/slides +http://www.semanlink.net/tag/css_html_templates|prefLabel|css/html templates +http://www.semanlink.net/tag/css_html_templates|broader|http://www.semanlink.net/tag/css +http://www.semanlink.net/tag/css_html_templates|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/css_html_templates|uri|http://www.semanlink.net/tag/css_html_templates +http://www.semanlink.net/tag/css_html_templates|broader_prefLabel|css +http://www.semanlink.net/tag/jerusalem|prefLabel|Jérusalem +http://www.semanlink.net/tag/jerusalem|broader|http://www.semanlink.net/tag/israel +http://www.semanlink.net/tag/jerusalem|creationDate|2006-07-27 +http://www.semanlink.net/tag/jerusalem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jerusalem|uri|http://www.semanlink.net/tag/jerusalem +http://www.semanlink.net/tag/jerusalem|broader_prefLabel|Israël +http://www.semanlink.net/tag/chine_europe|prefLabel|Chine-Europe +http://www.semanlink.net/tag/chine_europe|broader|http://www.semanlink.net/tag/chine +http://www.semanlink.net/tag/chine_europe|broader|http://www.semanlink.net/tag/chine_vs_occident +http://www.semanlink.net/tag/chine_europe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/chine_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/chine_europe|uri|http://www.semanlink.net/tag/chine_europe +http://www.semanlink.net/tag/chine_europe|broader_prefLabel|Chine +http://www.semanlink.net/tag/chine_europe|broader_prefLabel|Chine vs Occident +http://www.semanlink.net/tag/chine_europe|broader_prefLabel|Europe +http://www.semanlink.net/tag/chine_europe|broader_altLabel|China +http://www.semanlink.net/tag/imperialisme|prefLabel|Impérialisme +http://www.semanlink.net/tag/imperialisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/imperialisme|uri|http://www.semanlink.net/tag/imperialisme +http://www.semanlink.net/tag/colza|creationTime|2007-10-23T00:26:01Z +http://www.semanlink.net/tag/colza|prefLabel|Colza +http://www.semanlink.net/tag/colza|creationDate|2007-10-23 +http://www.semanlink.net/tag/colza|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/colza|uri|http://www.semanlink.net/tag/colza +http://www.semanlink.net/tag/egypte_antique|prefLabel|Egypte antique +http://www.semanlink.net/tag/egypte_antique|broader|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/egypte_antique|broader|http://www.semanlink.net/tag/egypte +http://www.semanlink.net/tag/egypte_antique|broader|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/tag/egypte_antique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/egypte_antique|uri|http://www.semanlink.net/tag/egypte_antique +http://www.semanlink.net/tag/egypte_antique|broader_prefLabel|Antiquité +http://www.semanlink.net/tag/egypte_antique|broader_prefLabel|Egypte +http://www.semanlink.net/tag/egypte_antique|broader_prefLabel|Archéologie africaine +http://www.semanlink.net/tag/egypte_antique|broader_related|http://www.semanlink.net/tag/art_d_afrique +http://www.semanlink.net/tag/apv_evolution|creationTime|2010-07-05T10:04:21Z +http://www.semanlink.net/tag/apv_evolution|prefLabel|APV evolution +http://www.semanlink.net/tag/apv_evolution|creationDate|2010-07-05 +http://www.semanlink.net/tag/apv_evolution|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/apv_evolution|uri|http://www.semanlink.net/tag/apv_evolution +http://www.semanlink.net/tag/france_is_ai_2018|creationTime|2018-10-15T18:07:46Z +http://www.semanlink.net/tag/france_is_ai_2018|prefLabel|France is AI 2018 +http://www.semanlink.net/tag/france_is_ai_2018|broader|http://www.semanlink.net/tag/ai_conference +http://www.semanlink.net/tag/france_is_ai_2018|creationDate|2018-10-15 +http://www.semanlink.net/tag/france_is_ai_2018|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/france_is_ai_2018|altLabel|FranceIsAI 2018 +http://www.semanlink.net/tag/france_is_ai_2018|uri|http://www.semanlink.net/tag/france_is_ai_2018 +http://www.semanlink.net/tag/france_is_ai_2018|broader_prefLabel|AI Conference +http://www.semanlink.net/tag/product_modelling|creationTime|2009-11-12T14:01:06Z +http://www.semanlink.net/tag/product_modelling|prefLabel|Product Modelling +http://www.semanlink.net/tag/product_modelling|creationDate|2009-11-12 +http://www.semanlink.net/tag/product_modelling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/product_modelling|uri|http://www.semanlink.net/tag/product_modelling +http://www.semanlink.net/tag/bombay|prefLabel|Bombay +http://www.semanlink.net/tag/bombay|broader|http://www.semanlink.net/tag/inde +http://www.semanlink.net/tag/bombay|broader|http://www.semanlink.net/tag/ville +http://www.semanlink.net/tag/bombay|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bombay|uri|http://www.semanlink.net/tag/bombay +http://www.semanlink.net/tag/bombay|broader_prefLabel|Inde +http://www.semanlink.net/tag/bombay|broader_prefLabel|Ville +http://www.semanlink.net/tag/graph_editor|creationTime|2014-12-22T13:49:28Z +http://www.semanlink.net/tag/graph_editor|prefLabel|Graph Editor +http://www.semanlink.net/tag/graph_editor|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_editor|creationDate|2014-12-22 +http://www.semanlink.net/tag/graph_editor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_editor|uri|http://www.semanlink.net/tag/graph_editor +http://www.semanlink.net/tag/graph_editor|broader_prefLabel|Graph +http://www.semanlink.net/tag/cascade|creationTime|2013-07-31T10:57:01Z +http://www.semanlink.net/tag/cascade|prefLabel|Cascade +http://www.semanlink.net/tag/cascade|broader|http://www.semanlink.net/tag/eau +http://www.semanlink.net/tag/cascade|creationDate|2013-07-31 +http://www.semanlink.net/tag/cascade|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cascade|uri|http://www.semanlink.net/tag/cascade +http://www.semanlink.net/tag/cascade|broader_prefLabel|Eau +http://www.semanlink.net/tag/fecondation|prefLabel|Fécondation +http://www.semanlink.net/tag/fecondation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/fecondation|uri|http://www.semanlink.net/tag/fecondation +http://www.semanlink.net/tag/peak_everything|creationTime|2015-10-18T19:34:09Z +http://www.semanlink.net/tag/peak_everything|prefLabel|Peak Everything +http://www.semanlink.net/tag/peak_everything|creationDate|2015-10-18 +http://www.semanlink.net/tag/peak_everything|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/peak_everything|uri|http://www.semanlink.net/tag/peak_everything +http://www.semanlink.net/tag/film_de_guerre|creationTime|2013-10-14T01:38:59Z +http://www.semanlink.net/tag/film_de_guerre|prefLabel|Film de guerre +http://www.semanlink.net/tag/film_de_guerre|broader|http://www.semanlink.net/tag/film +http://www.semanlink.net/tag/film_de_guerre|creationDate|2013-10-14 +http://www.semanlink.net/tag/film_de_guerre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/film_de_guerre|uri|http://www.semanlink.net/tag/film_de_guerre +http://www.semanlink.net/tag/film_de_guerre|broader_prefLabel|Film +http://www.semanlink.net/tag/tensor2tensor|creationTime|2019-01-21T10:58:30Z +http://www.semanlink.net/tag/tensor2tensor|prefLabel|Tensor2Tensor +http://www.semanlink.net/tag/tensor2tensor|broader|http://www.semanlink.net/tag/tensorflow +http://www.semanlink.net/tag/tensor2tensor|creationDate|2019-01-21 +http://www.semanlink.net/tag/tensor2tensor|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tensor2tensor|uri|http://www.semanlink.net/tag/tensor2tensor +http://www.semanlink.net/tag/tensor2tensor|broader_prefLabel|TensorFlow +http://www.semanlink.net/tag/tensor2tensor|broader_related|http://www.semanlink.net/tag/christopher_olah +http://www.semanlink.net/tag/hypercard|prefLabel|Hypercard +http://www.semanlink.net/tag/hypercard|broader|http://www.semanlink.net/tag/software +http://www.semanlink.net/tag/hypercard|broader|http://www.semanlink.net/tag/apple_software +http://www.semanlink.net/tag/hypercard|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/hypercard|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hypercard|uri|http://www.semanlink.net/tag/hypercard +http://www.semanlink.net/tag/hypercard|broader_prefLabel|Software +http://www.semanlink.net/tag/hypercard|broader_prefLabel|Apple Software +http://www.semanlink.net/tag/hypercard|broader_prefLabel|Apple +http://www.semanlink.net/tag/global_human_sensor_net|creationTime|2009-05-25T17:58:25Z +http://www.semanlink.net/tag/global_human_sensor_net|prefLabel|Global human sensor net +http://www.semanlink.net/tag/global_human_sensor_net|creationDate|2009-05-25 +http://www.semanlink.net/tag/global_human_sensor_net|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/global_human_sensor_net|uri|http://www.semanlink.net/tag/global_human_sensor_net +http://www.semanlink.net/tag/news|prefLabel|News +http://www.semanlink.net/tag/news|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/news|uri|http://www.semanlink.net/tag/news +http://www.semanlink.net/tag/djibouti|creationTime|2008-11-21T23:21:36Z +http://www.semanlink.net/tag/djibouti|prefLabel|Djibouti +http://www.semanlink.net/tag/djibouti|broader|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/djibouti|creationDate|2008-11-21 +http://www.semanlink.net/tag/djibouti|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/djibouti|uri|http://www.semanlink.net/tag/djibouti +http://www.semanlink.net/tag/djibouti|broader_prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/bacteries|prefLabel|Bacteria +http://www.semanlink.net/tag/bacteries|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/bacteries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bacteries|altLabel|Bactéries +http://www.semanlink.net/tag/bacteries|uri|http://www.semanlink.net/tag/bacteries +http://www.semanlink.net/tag/bacteries|broader_prefLabel|Biology +http://www.semanlink.net/tag/bacteries|broader_altLabel|Biologie +http://www.semanlink.net/tag/philosophie|prefLabel|Philosophie +http://www.semanlink.net/tag/philosophie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/philosophie|altLabel|Philosophy +http://www.semanlink.net/tag/philosophie|uri|http://www.semanlink.net/tag/philosophie +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|prefLabel|Empires d'Afrique de l'Ouest +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|uri|http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader_prefLabel|Afrique de l'Ouest +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader_prefLabel|Histoire de l'Afrique +http://www.semanlink.net/tag/empires_d_afrique_de_l_ouest|broader_prefLabel|Afrique de l'ouest : histoire +http://www.semanlink.net/tag/kleenex|prefLabel|Kleenex +http://www.semanlink.net/tag/kleenex|broader|http://www.semanlink.net/tag/societe_de_consommation +http://www.semanlink.net/tag/kleenex|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kleenex|uri|http://www.semanlink.net/tag/kleenex +http://www.semanlink.net/tag/kleenex|broader_prefLabel|Société de consommation +http://www.semanlink.net/tag/graph_visualization|prefLabel|Graph visualization +http://www.semanlink.net/tag/graph_visualization|broader|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/graph_visualization|broader|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/graph_visualization|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph_visualization|uri|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/tag/graph_visualization|broader_prefLabel|Visualization Tools +http://www.semanlink.net/tag/graph_visualization|broader_prefLabel|Graph +http://www.semanlink.net/tag/graph_visualization|broader_altLabel|Data Visualization Tools +http://www.semanlink.net/tag/graph_visualization|broader_related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/archeologie_europeenne|prefLabel|Archéologie européenne +http://www.semanlink.net/tag/archeologie_europeenne|broader|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/tag/archeologie_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/archeologie_europeenne|uri|http://www.semanlink.net/tag/archeologie_europeenne +http://www.semanlink.net/tag/archeologie_europeenne|broader_prefLabel|Archéologie +http://www.semanlink.net/tag/archeologie_europeenne|broader_related|http://www.semanlink.net/tag/antiquite +http://www.semanlink.net/tag/maladie_contagieuse|creationTime|2007-12-06T21:40:20Z +http://www.semanlink.net/tag/maladie_contagieuse|prefLabel|Maladie contagieuse +http://www.semanlink.net/tag/maladie_contagieuse|broader|http://www.semanlink.net/tag/maladie +http://www.semanlink.net/tag/maladie_contagieuse|creationDate|2007-12-06 +http://www.semanlink.net/tag/maladie_contagieuse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maladie_contagieuse|uri|http://www.semanlink.net/tag/maladie_contagieuse +http://www.semanlink.net/tag/maladie_contagieuse|broader_prefLabel|Maladie +http://www.semanlink.net/tag/francafrique|prefLabel|Françafrique +http://www.semanlink.net/tag/francafrique|broader|http://www.semanlink.net/tag/france_afrique +http://www.semanlink.net/tag/francafrique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/francafrique|uri|http://www.semanlink.net/tag/francafrique +http://www.semanlink.net/tag/francafrique|broader_prefLabel|France / Afrique +http://www.semanlink.net/tag/bonus|creationTime|2009-08-14T08:58:54Z +http://www.semanlink.net/tag/bonus|prefLabel|Bonus +http://www.semanlink.net/tag/bonus|creationDate|2009-08-14 +http://www.semanlink.net/tag/bonus|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bonus|uri|http://www.semanlink.net/tag/bonus +http://www.semanlink.net/tag/transnets|prefLabel|Transnets +http://www.semanlink.net/tag/transnets|broader|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/tag/transnets|broader|http://www.semanlink.net/tag/ntic +http://www.semanlink.net/tag/transnets|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/transnets|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/transnets|uri|http://www.semanlink.net/tag/transnets +http://www.semanlink.net/tag/transnets|broader_prefLabel|Journal Le Monde +http://www.semanlink.net/tag/transnets|broader_prefLabel|NTIC +http://www.semanlink.net/tag/transnets|broader_prefLabel|Blog +http://www.semanlink.net/tag/matplotlib|creationTime|2017-06-28T14:57:34Z +http://www.semanlink.net/tag/matplotlib|prefLabel|matplotlib +http://www.semanlink.net/tag/matplotlib|broader|http://www.semanlink.net/tag/data_visualization_tools +http://www.semanlink.net/tag/matplotlib|broader|http://www.semanlink.net/tag/python +http://www.semanlink.net/tag/matplotlib|creationDate|2017-06-28 +http://www.semanlink.net/tag/matplotlib|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/matplotlib|describedBy|https://en.wikipedia.org/wiki/Matplotlib +http://www.semanlink.net/tag/matplotlib|uri|http://www.semanlink.net/tag/matplotlib +http://www.semanlink.net/tag/matplotlib|broader_prefLabel|Visualization Tools +http://www.semanlink.net/tag/matplotlib|broader_prefLabel|Python +http://www.semanlink.net/tag/matplotlib|broader_altLabel|Data Visualization Tools +http://www.semanlink.net/tag/matplotlib|broader_related|http://www.semanlink.net/tag/information_visualization +http://www.semanlink.net/tag/horreur|prefLabel|Horreur +http://www.semanlink.net/tag/horreur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/horreur|uri|http://www.semanlink.net/tag/horreur +http://www.semanlink.net/tag/media_conversationnel|creationTime|2019-12-15T00:38:53Z +http://www.semanlink.net/tag/media_conversationnel|prefLabel|Média conversationnel +http://www.semanlink.net/tag/media_conversationnel|broader|http://www.semanlink.net/tag/medias +http://www.semanlink.net/tag/media_conversationnel|creationDate|2019-12-15 +http://www.semanlink.net/tag/media_conversationnel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/media_conversationnel|uri|http://www.semanlink.net/tag/media_conversationnel +http://www.semanlink.net/tag/media_conversationnel|broader_prefLabel|Médias +http://www.semanlink.net/tag/howto_tutorial_faq|creationTime|2007-07-07T13:48:03Z +http://www.semanlink.net/tag/howto_tutorial_faq|prefLabel|Howto, tutorial, FAQ +http://www.semanlink.net/tag/howto_tutorial_faq|creationDate|2007-07-07 +http://www.semanlink.net/tag/howto_tutorial_faq|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/howto_tutorial_faq|uri|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.semanlink.net/tag/steve_jobs|prefLabel|Steve Jobs +http://www.semanlink.net/tag/steve_jobs|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/steve_jobs|broader|http://www.semanlink.net/tag/apple +http://www.semanlink.net/tag/steve_jobs|broader|http://www.semanlink.net/tag/homme_celebre +http://www.semanlink.net/tag/steve_jobs|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/steve_jobs|uri|http://www.semanlink.net/tag/steve_jobs +http://www.semanlink.net/tag/steve_jobs|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/steve_jobs|broader_prefLabel|Apple +http://www.semanlink.net/tag/steve_jobs|broader_prefLabel|Homme célèbre +http://www.semanlink.net/tag/steve_jobs|broader_altLabel|Technical guys +http://www.semanlink.net/tag/bnf|prefLabel|BNF +http://www.semanlink.net/tag/bnf|broader|http://www.semanlink.net/tag/bibliotheque +http://www.semanlink.net/tag/bnf|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/bnf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bnf|uri|http://www.semanlink.net/tag/bnf +http://www.semanlink.net/tag/bnf|broader_prefLabel|Bibliothèque +http://www.semanlink.net/tag/bnf|broader_prefLabel|France +http://www.semanlink.net/tag/depense_publique|creationTime|2017-03-11T14:04:10Z +http://www.semanlink.net/tag/depense_publique|prefLabel|Dépense publique +http://www.semanlink.net/tag/depense_publique|creationDate|2017-03-11 +http://www.semanlink.net/tag/depense_publique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/depense_publique|uri|http://www.semanlink.net/tag/depense_publique +http://www.semanlink.net/tag/pillage_de_vestiges_antiques|prefLabel|Pillage de vestiges antiques +http://www.semanlink.net/tag/pillage_de_vestiges_antiques|broader|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.semanlink.net/tag/pillage_de_vestiges_antiques|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pillage_de_vestiges_antiques|uri|http://www.semanlink.net/tag/pillage_de_vestiges_antiques +http://www.semanlink.net/tag/pillage_de_vestiges_antiques|broader_prefLabel|Destruction de vestiges antiques +http://www.semanlink.net/tag/semantic_data|creationTime|2013-05-17T13:09:59Z +http://www.semanlink.net/tag/semantic_data|prefLabel|Semantic data +http://www.semanlink.net/tag/semantic_data|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_data|creationDate|2013-05-17 +http://www.semanlink.net/tag/semantic_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_data|uri|http://www.semanlink.net/tag/semantic_data +http://www.semanlink.net/tag/semantic_data|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_data|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_data|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/ign|prefLabel|IGN +http://www.semanlink.net/tag/ign|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/ign|creationDate|2006-09-23 +http://www.semanlink.net/tag/ign|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ign|uri|http://www.semanlink.net/tag/ign +http://www.semanlink.net/tag/ign|broader_prefLabel|Géographie +http://www.semanlink.net/tag/spurl|prefLabel|spurl +http://www.semanlink.net/tag/spurl|broader|http://www.semanlink.net/tag/social_bookmarking +http://www.semanlink.net/tag/spurl|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/spurl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/spurl|uri|http://www.semanlink.net/tag/spurl +http://www.semanlink.net/tag/spurl|broader_prefLabel|Social bookmarking +http://www.semanlink.net/tag/spurl|broader_prefLabel|Tagging +http://www.semanlink.net/tag/ai_amazon|creationTime|2020-03-19T21:34:00Z +http://www.semanlink.net/tag/ai_amazon|prefLabel|AI@Amazon +http://www.semanlink.net/tag/ai_amazon|broader|http://www.semanlink.net/tag/ai_teams +http://www.semanlink.net/tag/ai_amazon|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_amazon|creationDate|2020-03-19 +http://www.semanlink.net/tag/ai_amazon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_amazon|uri|http://www.semanlink.net/tag/ai_amazon +http://www.semanlink.net/tag/ai_amazon|broader_prefLabel|AI teams +http://www.semanlink.net/tag/ai_amazon|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_amazon|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_amazon|broader_altLabel|AI +http://www.semanlink.net/tag/ai_amazon|broader_altLabel|IA +http://www.semanlink.net/tag/ai_amazon|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/magie_noire|creationTime|2014-10-28T23:01:53Z +http://www.semanlink.net/tag/magie_noire|prefLabel|Magie noire +http://www.semanlink.net/tag/magie_noire|broader|http://www.semanlink.net/tag/magie +http://www.semanlink.net/tag/magie_noire|creationDate|2014-10-28 +http://www.semanlink.net/tag/magie_noire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/magie_noire|uri|http://www.semanlink.net/tag/magie_noire +http://www.semanlink.net/tag/magie_noire|broader_prefLabel|Magie +http://www.semanlink.net/tag/enfants|prefLabel|Enfants +http://www.semanlink.net/tag/enfants|creationDate|2006-12-30 +http://www.semanlink.net/tag/enfants|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/enfants|uri|http://www.semanlink.net/tag/enfants +http://www.semanlink.net/tag/mutualart_com|creationTime|2008-03-29T18:55:51Z +http://www.semanlink.net/tag/mutualart_com|prefLabel|MutualArt.com +http://www.semanlink.net/tag/mutualart_com|broader|http://www.semanlink.net/tag/semantic_web_business +http://www.semanlink.net/tag/mutualart_com|creationDate|2008-03-29 +http://www.semanlink.net/tag/mutualart_com|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mutualart_com|uri|http://www.semanlink.net/tag/mutualart_com +http://www.semanlink.net/tag/mutualart_com|broader_prefLabel|Semantic Web : Business +http://www.semanlink.net/tag/recherche_francaise|prefLabel|France : recherche +http://www.semanlink.net/tag/recherche_francaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/recherche_francaise|broader|http://www.semanlink.net/tag/recherche +http://www.semanlink.net/tag/recherche_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/recherche_francaise|altLabel|Recherche française +http://www.semanlink.net/tag/recherche_francaise|uri|http://www.semanlink.net/tag/recherche_francaise +http://www.semanlink.net/tag/recherche_francaise|broader_prefLabel|France +http://www.semanlink.net/tag/recherche_francaise|broader_prefLabel|Recherche +http://www.semanlink.net/tag/recherche_francaise|broader_altLabel|Research +http://www.semanlink.net/tag/web_notebook|prefLabel|Web notebook +http://www.semanlink.net/tag/web_notebook|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_notebook|uri|http://www.semanlink.net/tag/web_notebook +http://www.semanlink.net/tag/debug_deep_learning|creationTime|2020-10-07T08:35:18Z +http://www.semanlink.net/tag/debug_deep_learning|prefLabel|Debug Deep Learning +http://www.semanlink.net/tag/debug_deep_learning|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/debug_deep_learning|creationDate|2020-10-07 +http://www.semanlink.net/tag/debug_deep_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/debug_deep_learning|uri|http://www.semanlink.net/tag/debug_deep_learning +http://www.semanlink.net/tag/debug_deep_learning|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/debug_deep_learning|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/debug_deep_learning|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/heliosphere|creationTime|2008-05-17T14:02:28Z +http://www.semanlink.net/tag/heliosphere|prefLabel|Héliosphère +http://www.semanlink.net/tag/heliosphere|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/heliosphere|creationDate|2008-05-17 +http://www.semanlink.net/tag/heliosphere|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/heliosphere|uri|http://www.semanlink.net/tag/heliosphere +http://www.semanlink.net/tag/heliosphere|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/1984|prefLabel|1984 +http://www.semanlink.net/tag/1984|broader|http://www.semanlink.net/tag/orwell +http://www.semanlink.net/tag/1984|broader|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/tag/1984|creationDate|2006-10-18 +http://www.semanlink.net/tag/1984|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/1984|uri|http://www.semanlink.net/tag/1984 +http://www.semanlink.net/tag/1984|broader_prefLabel|Orwell +http://www.semanlink.net/tag/1984|broader_prefLabel|Big Brother +http://www.semanlink.net/tag/eswc_2014|creationTime|2014-10-06T00:24:18Z +http://www.semanlink.net/tag/eswc_2014|prefLabel|ESWC 2014 +http://www.semanlink.net/tag/eswc_2014|broader|http://www.semanlink.net/tag/eswc +http://www.semanlink.net/tag/eswc_2014|creationDate|2014-10-06 +http://www.semanlink.net/tag/eswc_2014|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eswc_2014|uri|http://www.semanlink.net/tag/eswc_2014 +http://www.semanlink.net/tag/eswc_2014|broader_prefLabel|ESWC +http://www.semanlink.net/tag/words|creationTime|2017-11-24T01:43:52Z +http://www.semanlink.net/tag/words|prefLabel|Words +http://www.semanlink.net/tag/words|broader|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/words|creationDate|2017-11-24 +http://www.semanlink.net/tag/words|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/words|uri|http://www.semanlink.net/tag/words +http://www.semanlink.net/tag/words|broader_prefLabel|Langues +http://www.semanlink.net/tag/energie_sombre|prefLabel|Energie sombre +http://www.semanlink.net/tag/energie_sombre|broader|http://www.semanlink.net/tag/physique +http://www.semanlink.net/tag/energie_sombre|broader|http://www.semanlink.net/tag/masse_manquante +http://www.semanlink.net/tag/energie_sombre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/energie_sombre|uri|http://www.semanlink.net/tag/energie_sombre +http://www.semanlink.net/tag/energie_sombre|broader_prefLabel|Physique +http://www.semanlink.net/tag/energie_sombre|broader_prefLabel|Masse manquante +http://www.semanlink.net/tag/energie_sombre|broader_altLabel|Physics +http://www.semanlink.net/tag/energie_sombre|broader_altLabel|Missing Matter +http://www.semanlink.net/tag/histoire_de_france|prefLabel|Histoire de France +http://www.semanlink.net/tag/histoire_de_france|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/histoire_de_france|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_de_france|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_france|uri|http://www.semanlink.net/tag/histoire_de_france +http://www.semanlink.net/tag/histoire_de_france|broader_prefLabel|France +http://www.semanlink.net/tag/histoire_de_france|broader_prefLabel|Histoire +http://www.semanlink.net/tag/trafic_de_drogue|creationTime|2013-01-22T22:28:46Z +http://www.semanlink.net/tag/trafic_de_drogue|prefLabel|Trafic de drogue +http://www.semanlink.net/tag/trafic_de_drogue|creationDate|2013-01-22 +http://www.semanlink.net/tag/trafic_de_drogue|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/trafic_de_drogue|uri|http://www.semanlink.net/tag/trafic_de_drogue +http://www.semanlink.net/tag/self_organizing_systems|creationTime|2011-06-22T15:14:34Z +http://www.semanlink.net/tag/self_organizing_systems|prefLabel|Self-organizing systems +http://www.semanlink.net/tag/self_organizing_systems|creationDate|2011-06-22 +http://www.semanlink.net/tag/self_organizing_systems|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/self_organizing_systems|uri|http://www.semanlink.net/tag/self_organizing_systems +http://www.semanlink.net/tag/tepuys|prefLabel|Tepuys +http://www.semanlink.net/tag/tepuys|broader|http://www.semanlink.net/tag/venezuela +http://www.semanlink.net/tag/tepuys|comment|Le mont Roraima, situé au sud-est du Venezuela, recèle une flore et une faune étranges. Les Tepuys, nom donné par les Indiens à d'immenses blocs de grès, recèlent de nombreuses espèces rares, à l'image de la grenouille noire. Surréalistes, ces rochers donnent également à voir des formes d'animaux ou de visages humains, sculptés par l'érosion. +http://www.semanlink.net/tag/tepuys|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tepuys|describedBy|https://en.wikipedia.org/wiki/Tepui +http://www.semanlink.net/tag/tepuys|uri|http://www.semanlink.net/tag/tepuys +http://www.semanlink.net/tag/tepuys|broader_prefLabel|Venezuela +http://www.semanlink.net/tag/hortefeux|creationTime|2008-04-07T21:29:56Z +http://www.semanlink.net/tag/hortefeux|prefLabel|Hortefeux +http://www.semanlink.net/tag/hortefeux|broader|http://www.semanlink.net/tag/gouvernement_sarkozy +http://www.semanlink.net/tag/hortefeux|creationDate|2008-04-07 +http://www.semanlink.net/tag/hortefeux|comment|"""Ministre de l'immigration et de l'identité nationale""" +http://www.semanlink.net/tag/hortefeux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/hortefeux|uri|http://www.semanlink.net/tag/hortefeux +http://www.semanlink.net/tag/hortefeux|broader_prefLabel|Gouvernement Sarkozy +http://www.semanlink.net/tag/graph|prefLabel|Graph +http://www.semanlink.net/tag/graph|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/graph|uri|http://www.semanlink.net/tag/graph +http://www.semanlink.net/tag/pologne|prefLabel|Pologne +http://www.semanlink.net/tag/pologne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/pologne|broader|http://www.semanlink.net/tag/pays_d_europe +http://www.semanlink.net/tag/pologne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pologne|uri|http://www.semanlink.net/tag/pologne +http://www.semanlink.net/tag/pologne|broader_prefLabel|Europe +http://www.semanlink.net/tag/pologne|broader_prefLabel|Pays d'Europe +http://www.semanlink.net/tag/migrations_humaines|prefLabel|Migrations humaines +http://www.semanlink.net/tag/migrations_humaines|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/migrations_humaines|uri|http://www.semanlink.net/tag/migrations_humaines +http://www.semanlink.net/tag/robotique|prefLabel|Robotique +http://www.semanlink.net/tag/robotique|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/robotique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/robotique|altLabel|Robotics +http://www.semanlink.net/tag/robotique|altLabel|Robot +http://www.semanlink.net/tag/robotique|uri|http://www.semanlink.net/tag/robotique +http://www.semanlink.net/tag/robotique|broader_prefLabel|Technologie +http://www.semanlink.net/tag/crete|prefLabel|Crète +http://www.semanlink.net/tag/crete|broader|http://www.semanlink.net/tag/grece +http://www.semanlink.net/tag/crete|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crete|uri|http://www.semanlink.net/tag/crete +http://www.semanlink.net/tag/crete|broader_prefLabel|Grèce +http://www.semanlink.net/tag/smart_contracts|creationTime|2016-06-20T14:26:39Z +http://www.semanlink.net/tag/smart_contracts|prefLabel|Smart contracts +http://www.semanlink.net/tag/smart_contracts|broader|http://www.semanlink.net/tag/blockchain +http://www.semanlink.net/tag/smart_contracts|creationDate|2016-06-20 +http://www.semanlink.net/tag/smart_contracts|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/smart_contracts|uri|http://www.semanlink.net/tag/smart_contracts +http://www.semanlink.net/tag/smart_contracts|broader_prefLabel|Blockchain +http://www.semanlink.net/tag/english_grammar|creationTime|2013-02-12T16:19:27Z +http://www.semanlink.net/tag/english_grammar|prefLabel|English-grammar +http://www.semanlink.net/tag/english_grammar|broader|http://www.semanlink.net/tag/anglais +http://www.semanlink.net/tag/english_grammar|creationDate|2013-02-12 +http://www.semanlink.net/tag/english_grammar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/english_grammar|uri|http://www.semanlink.net/tag/english_grammar +http://www.semanlink.net/tag/english_grammar|broader_prefLabel|Anglais +http://www.semanlink.net/tag/english_grammar|broader_altLabel|English +http://www.semanlink.net/tag/paul_graham|prefLabel|Paul Graham +http://www.semanlink.net/tag/paul_graham|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/paul_graham|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paul_graham|uri|http://www.semanlink.net/tag/paul_graham +http://www.semanlink.net/tag/paul_graham|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/paul_graham|broader_altLabel|Technical guys +http://www.semanlink.net/tag/unesco|prefLabel|UNESCO +http://www.semanlink.net/tag/unesco|broader|http://www.semanlink.net/tag/institutions_internationales +http://www.semanlink.net/tag/unesco|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unesco|uri|http://www.semanlink.net/tag/unesco +http://www.semanlink.net/tag/unesco|broader_prefLabel|Institutions internationales +http://www.semanlink.net/tag/access_control|creationTime|2010-12-06T16:25:28Z +http://www.semanlink.net/tag/access_control|prefLabel|Access Control +http://www.semanlink.net/tag/access_control|creationDate|2010-12-06 +http://www.semanlink.net/tag/access_control|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/access_control|uri|http://www.semanlink.net/tag/access_control +http://www.semanlink.net/tag/glacier|prefLabel|Glacier +http://www.semanlink.net/tag/glacier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/glacier|uri|http://www.semanlink.net/tag/glacier +http://www.semanlink.net/tag/coree_du_sud|prefLabel|Corée du Sud +http://www.semanlink.net/tag/coree_du_sud|broader|http://www.semanlink.net/tag/coree +http://www.semanlink.net/tag/coree_du_sud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coree_du_sud|altLabel|South Korea +http://www.semanlink.net/tag/coree_du_sud|uri|http://www.semanlink.net/tag/coree_du_sud +http://www.semanlink.net/tag/coree_du_sud|broader_prefLabel|Corée +http://www.semanlink.net/tag/digital_humanities|creationTime|2013-08-20T16:33:35Z +http://www.semanlink.net/tag/digital_humanities|prefLabel|Digital Humanities +http://www.semanlink.net/tag/digital_humanities|creationDate|2013-08-20 +http://www.semanlink.net/tag/digital_humanities|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_humanities|uri|http://www.semanlink.net/tag/digital_humanities +http://www.semanlink.net/tag/medias|prefLabel|Médias +http://www.semanlink.net/tag/medias|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/medias|uri|http://www.semanlink.net/tag/medias +http://www.semanlink.net/tag/bresil|prefLabel|Brésil +http://www.semanlink.net/tag/bresil|broader|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/bresil|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/bresil|related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/bresil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bresil|uri|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/bresil|broader_prefLabel|Amérique du sud +http://www.semanlink.net/tag/bresil|broader_prefLabel|Amérique +http://www.semanlink.net/tag/pnud|prefLabel|PNUD +http://www.semanlink.net/tag/pnud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pnud|uri|http://www.semanlink.net/tag/pnud +http://www.semanlink.net/tag/catastrophe_naturelle|prefLabel|Catastrophe naturelle +http://www.semanlink.net/tag/catastrophe_naturelle|broader|http://www.semanlink.net/tag/catastrophe +http://www.semanlink.net/tag/catastrophe_naturelle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/catastrophe_naturelle|uri|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.semanlink.net/tag/catastrophe_naturelle|broader_prefLabel|Catastrophe +http://www.semanlink.net/tag/crochemelier|creationTime|2021-08-09T22:38:05Z +http://www.semanlink.net/tag/crochemelier|prefLabel|Crochemelier +http://www.semanlink.net/tag/crochemelier|broader|http://www.semanlink.net/tag/archeologie_percheronne +http://www.semanlink.net/tag/crochemelier|broader|http://www.semanlink.net/tag/cjnn +http://www.semanlink.net/tag/crochemelier|creationDate|2021-08-09 +http://www.semanlink.net/tag/crochemelier|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/crochemelier|uri|http://www.semanlink.net/tag/crochemelier +http://www.semanlink.net/tag/crochemelier|broader_prefLabel|Archeologie percheronne +http://www.semanlink.net/tag/crochemelier|broader_prefLabel|CJNN +http://www.semanlink.net/tag/desert|prefLabel|Désert +http://www.semanlink.net/tag/desert|broader|http://www.semanlink.net/tag/geographie +http://www.semanlink.net/tag/desert|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/desert|uri|http://www.semanlink.net/tag/desert +http://www.semanlink.net/tag/desert|broader_prefLabel|Géographie +http://www.semanlink.net/tag/biotechnologies|prefLabel|Biotechnologies +http://www.semanlink.net/tag/biotechnologies|prefLabel|Biotechnologies +http://www.semanlink.net/tag/biotechnologies|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/biotechnologies|broader|http://www.semanlink.net/tag/biologie +http://www.semanlink.net/tag/biotechnologies|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/biotechnologies|uri|http://www.semanlink.net/tag/biotechnologies +http://www.semanlink.net/tag/biotechnologies|broader_prefLabel|Technologie +http://www.semanlink.net/tag/biotechnologies|broader_prefLabel|Biology +http://www.semanlink.net/tag/biotechnologies|broader_altLabel|Biologie +http://www.semanlink.net/tag/marketing|prefLabel|Marketing +http://www.semanlink.net/tag/marketing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marketing|uri|http://www.semanlink.net/tag/marketing +http://www.semanlink.net/tag/text_embeddings|creationTime|2018-04-14T11:37:02Z +http://www.semanlink.net/tag/text_embeddings|prefLabel|Text Embeddings +http://www.semanlink.net/tag/text_embeddings|broader|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/tag/text_embeddings|broader|http://www.semanlink.net/tag/embeddings_in_nlp +http://www.semanlink.net/tag/text_embeddings|related|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/tag/text_embeddings|creationDate|2018-04-14 +http://www.semanlink.net/tag/text_embeddings|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/text_embeddings|uri|http://www.semanlink.net/tag/text_embeddings +http://www.semanlink.net/tag/text_embeddings|broader_prefLabel|NLP: Text Representation +http://www.semanlink.net/tag/text_embeddings|broader_prefLabel|Embeddings in NLP +http://www.semanlink.net/tag/text_embeddings|broader_altLabel|Text Representation +http://www.semanlink.net/tag/text_embeddings|broader_related|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/tag/indiens_du_bresil|prefLabel|Indiens du Brésil +http://www.semanlink.net/tag/indiens_du_bresil|broader|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/tag/indiens_du_bresil|broader|http://www.semanlink.net/tag/amerindien +http://www.semanlink.net/tag/indiens_du_bresil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/indiens_du_bresil|uri|http://www.semanlink.net/tag/indiens_du_bresil +http://www.semanlink.net/tag/indiens_du_bresil|broader_prefLabel|Brésil +http://www.semanlink.net/tag/indiens_du_bresil|broader_prefLabel|Amérindien +http://www.semanlink.net/tag/indiens_du_bresil|broader_altLabel|Native americans +http://www.semanlink.net/tag/indiens_du_bresil|broader_related|http://www.semanlink.net/tag/saudade +http://www.semanlink.net/tag/ai_startups|creationTime|2019-05-07T11:30:31Z +http://www.semanlink.net/tag/ai_startups|prefLabel|AI: startups +http://www.semanlink.net/tag/ai_startups|broader|http://www.semanlink.net/tag/startups +http://www.semanlink.net/tag/ai_startups|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/ai_startups|creationDate|2019-05-07 +http://www.semanlink.net/tag/ai_startups|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ai_startups|uri|http://www.semanlink.net/tag/ai_startups +http://www.semanlink.net/tag/ai_startups|broader_prefLabel|Startups +http://www.semanlink.net/tag/ai_startups|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/ai_startups|broader_altLabel|Startup +http://www.semanlink.net/tag/ai_startups|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/ai_startups|broader_altLabel|AI +http://www.semanlink.net/tag/ai_startups|broader_altLabel|IA +http://www.semanlink.net/tag/ai_startups|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/parfum|creationTime|2010-12-04T15:20:06Z +http://www.semanlink.net/tag/parfum|prefLabel|Parfum +http://www.semanlink.net/tag/parfum|creationDate|2010-12-04 +http://www.semanlink.net/tag/parfum|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parfum|uri|http://www.semanlink.net/tag/parfum +http://www.semanlink.net/tag/tag_cloud|prefLabel|Tag cloud +http://www.semanlink.net/tag/tag_cloud|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/tag_cloud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tag_cloud|uri|http://www.semanlink.net/tag/tag_cloud +http://www.semanlink.net/tag/tag_cloud|broader_prefLabel|Tagging +http://www.semanlink.net/tag/groenland|prefLabel|Groenland +http://www.semanlink.net/tag/groenland|broader|http://www.semanlink.net/tag/arctique +http://www.semanlink.net/tag/groenland|broader|http://www.semanlink.net/tag/danemark +http://www.semanlink.net/tag/groenland|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/groenland|uri|http://www.semanlink.net/tag/groenland +http://www.semanlink.net/tag/groenland|broader_prefLabel|Arctique +http://www.semanlink.net/tag/groenland|broader_prefLabel|Danemark +http://www.semanlink.net/tag/israel|prefLabel|Israël +http://www.semanlink.net/tag/israel|broader|http://www.semanlink.net/tag/juif +http://www.semanlink.net/tag/israel|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/israel|uri|http://www.semanlink.net/tag/israel +http://www.semanlink.net/tag/israel|broader_prefLabel|Juifs +http://www.semanlink.net/tag/speculation|creationTime|2010-05-21T21:23:13Z +http://www.semanlink.net/tag/speculation|prefLabel|Spéculation +http://www.semanlink.net/tag/speculation|broader|http://www.semanlink.net/tag/finance +http://www.semanlink.net/tag/speculation|creationDate|2010-05-21 +http://www.semanlink.net/tag/speculation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/speculation|uri|http://www.semanlink.net/tag/speculation +http://www.semanlink.net/tag/speculation|broader_prefLabel|Finance +http://www.semanlink.net/tag/mais|prefLabel|Maïs +http://www.semanlink.net/tag/mais|broader|http://www.semanlink.net/tag/agriculture +http://www.semanlink.net/tag/mais|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mais|uri|http://www.semanlink.net/tag/mais +http://www.semanlink.net/tag/mais|broader_prefLabel|Agriculture +http://www.semanlink.net/tag/capitalistes|prefLabel|Capitalistes +http://www.semanlink.net/tag/capitalistes|broader|http://www.semanlink.net/tag/capitalisme +http://www.semanlink.net/tag/capitalistes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/capitalistes|uri|http://www.semanlink.net/tag/capitalistes +http://www.semanlink.net/tag/capitalistes|broader_prefLabel|Capitalisme +http://www.semanlink.net/tag/ecole|prefLabel|Ecole +http://www.semanlink.net/tag/ecole|broader|http://www.semanlink.net/tag/education +http://www.semanlink.net/tag/ecole|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ecole|uri|http://www.semanlink.net/tag/ecole +http://www.semanlink.net/tag/ecole|broader_prefLabel|Education +http://www.semanlink.net/tag/ecole|broader_altLabel|Enseignement +http://www.semanlink.net/tag/histoire_de_l_asie|creationTime|2016-10-05T21:58:18Z +http://www.semanlink.net/tag/histoire_de_l_asie|prefLabel|Histoire de l'Asie +http://www.semanlink.net/tag/histoire_de_l_asie|broader|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/tag/histoire_de_l_asie|creationDate|2016-10-05 +http://www.semanlink.net/tag/histoire_de_l_asie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_de_l_asie|uri|http://www.semanlink.net/tag/histoire_de_l_asie +http://www.semanlink.net/tag/histoire_de_l_asie|broader_prefLabel|Histoire +http://www.semanlink.net/tag/tutorial|prefLabel|Tutorial +http://www.semanlink.net/tag/tutorial|broader|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.semanlink.net/tag/tutorial|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/tutorial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tutorial|uri|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/tag/tutorial|broader_prefLabel|Howto, tutorial, FAQ +http://www.semanlink.net/tag/tutorial|broader_prefLabel|Dev +http://www.semanlink.net/tag/public_data|creationTime|2011-04-05T10:15:08Z +http://www.semanlink.net/tag/public_data|prefLabel|Public data +http://www.semanlink.net/tag/public_data|creationDate|2011-04-05 +http://www.semanlink.net/tag/public_data|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/public_data|uri|http://www.semanlink.net/tag/public_data +http://www.semanlink.net/tag/sparql|prefLabel|SPARQL +http://www.semanlink.net/tag/sparql|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/sparql|comment|Backed by the flexibility of the RDF data model, and consisting of both a query language and data access protocol SPARQL has the potential to become a key component in Web 2.0 applications. SPARQL could provide a common query language for all Web 2.0 applications. +http://www.semanlink.net/tag/sparql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sparql|uri|http://www.semanlink.net/tag/sparql +http://www.semanlink.net/tag/sparql|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/sparql|broader_altLabel|sw +http://www.semanlink.net/tag/sparql|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/deep_learning_implementing|creationTime|2018-04-10T13:34:26Z +http://www.semanlink.net/tag/deep_learning_implementing|prefLabel|Deep learning: implementing +http://www.semanlink.net/tag/deep_learning_implementing|broader|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/tag/deep_learning_implementing|creationDate|2018-04-10 +http://www.semanlink.net/tag/deep_learning_implementing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deep_learning_implementing|uri|http://www.semanlink.net/tag/deep_learning_implementing +http://www.semanlink.net/tag/deep_learning_implementing|broader_prefLabel|Deep Learning +http://www.semanlink.net/tag/deep_learning_implementing|broader_related|http://www.semanlink.net/tag/feature_extraction +http://www.semanlink.net/tag/deep_learning_implementing|broader_related|http://www.semanlink.net/tag/feature_learning +http://www.semanlink.net/tag/encrypted_media_extensions|creationTime|2017-02-28T14:01:29Z +http://www.semanlink.net/tag/encrypted_media_extensions|prefLabel|Encrypted Media Extensions +http://www.semanlink.net/tag/encrypted_media_extensions|broader|http://www.semanlink.net/tag/drm_in_html_5 +http://www.semanlink.net/tag/encrypted_media_extensions|creationDate|2017-02-28 +http://www.semanlink.net/tag/encrypted_media_extensions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/encrypted_media_extensions|uri|http://www.semanlink.net/tag/encrypted_media_extensions +http://www.semanlink.net/tag/encrypted_media_extensions|broader_prefLabel|DRM in HTML 5 +http://www.semanlink.net/tag/craig_venter|prefLabel|Craig Venter +http://www.semanlink.net/tag/craig_venter|broader|http://www.semanlink.net/tag/celera_ou_craig_venter +http://www.semanlink.net/tag/craig_venter|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/craig_venter|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/craig_venter|uri|http://www.semanlink.net/tag/craig_venter +http://www.semanlink.net/tag/craig_venter|broader_prefLabel|Celera ou Craig Venter +http://www.semanlink.net/tag/craig_venter|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/craig_venter|broader_altLabel|Savant +http://www.semanlink.net/tag/data_web|prefLabel|Web of data +http://www.semanlink.net/tag/data_web|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/data_web|uri|http://www.semanlink.net/tag/data_web +http://www.semanlink.net/tag/cyc|prefLabel|Cyc +http://www.semanlink.net/tag/cyc|broader|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/tag/cyc|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/cyc|uri|http://www.semanlink.net/tag/cyc +http://www.semanlink.net/tag/cyc|broader_prefLabel|Artificial Intelligence +http://www.semanlink.net/tag/cyc|broader_altLabel|Intelligence Artificielle +http://www.semanlink.net/tag/cyc|broader_altLabel|AI +http://www.semanlink.net/tag/cyc|broader_altLabel|IA +http://www.semanlink.net/tag/cyc|broader_related|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/tag/turing|prefLabel|Turing +http://www.semanlink.net/tag/turing|broader|http://www.semanlink.net/tag/mathematicien +http://www.semanlink.net/tag/turing|broader|http://www.semanlink.net/tag/scientifique +http://www.semanlink.net/tag/turing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/turing|uri|http://www.semanlink.net/tag/turing +http://www.semanlink.net/tag/turing|broader_prefLabel|Mathématicien +http://www.semanlink.net/tag/turing|broader_prefLabel|Scientifique +http://www.semanlink.net/tag/turing|broader_altLabel|Savant +http://www.semanlink.net/tag/megalithes|creationTime|2019-08-29T20:59:02Z +http://www.semanlink.net/tag/megalithes|prefLabel|Mégalithes +http://www.semanlink.net/tag/megalithes|creationDate|2019-08-29 +http://www.semanlink.net/tag/megalithes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/megalithes|uri|http://www.semanlink.net/tag/megalithes +http://www.semanlink.net/tag/petition|prefLabel|Pétition +http://www.semanlink.net/tag/petition|creationDate|2006-12-29 +http://www.semanlink.net/tag/petition|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/petition|uri|http://www.semanlink.net/tag/petition +http://www.semanlink.net/tag/oregon|creationTime|2007-09-11T21:38:59Z +http://www.semanlink.net/tag/oregon|prefLabel|Oregon +http://www.semanlink.net/tag/oregon|broader|http://www.semanlink.net/tag/usa +http://www.semanlink.net/tag/oregon|creationDate|2007-09-11 +http://www.semanlink.net/tag/oregon|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/oregon|uri|http://www.semanlink.net/tag/oregon +http://www.semanlink.net/tag/oregon|broader_prefLabel|USA +http://www.semanlink.net/tag/oregon|broader_altLabel|ÉTATS-UNIS +http://www.semanlink.net/tag/oregon|broader_altLabel|United States +http://www.semanlink.net/tag/frog|creationTime|2013-03-18T22:05:51Z +http://www.semanlink.net/tag/frog|prefLabel|Frog +http://www.semanlink.net/tag/frog|broader|http://www.semanlink.net/tag/animal +http://www.semanlink.net/tag/frog|creationDate|2013-03-18 +http://www.semanlink.net/tag/frog|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/frog|uri|http://www.semanlink.net/tag/frog +http://www.semanlink.net/tag/frog|broader_prefLabel|Animal +http://www.semanlink.net/tag/tools|prefLabel|Tools +http://www.semanlink.net/tag/tools|broader|http://www.semanlink.net/tag/dev +http://www.semanlink.net/tag/tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/tools|uri|http://www.semanlink.net/tag/tools +http://www.semanlink.net/tag/tools|broader_prefLabel|Dev +http://www.semanlink.net/tag/digital_video|prefLabel|Digital Video +http://www.semanlink.net/tag/digital_video|broader|http://www.semanlink.net/tag/technologie +http://www.semanlink.net/tag/digital_video|broader|http://www.semanlink.net/tag/digital_media +http://www.semanlink.net/tag/digital_video|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/digital_video|uri|http://www.semanlink.net/tag/digital_video +http://www.semanlink.net/tag/digital_video|broader_prefLabel|Technologie +http://www.semanlink.net/tag/digital_video|broader_prefLabel|Digital Media +http://www.semanlink.net/tag/javascript_and_tutorial|creationTime|2007-11-27T14:23:30Z +http://www.semanlink.net/tag/javascript_and_tutorial|prefLabel|JavaScript Tutorial +http://www.semanlink.net/tag/javascript_and_tutorial|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/javascript_and_tutorial|broader|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/tag/javascript_and_tutorial|creationDate|2007-11-27 +http://www.semanlink.net/tag/javascript_and_tutorial|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/javascript_and_tutorial|uri|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.semanlink.net/tag/javascript_and_tutorial|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/javascript_and_tutorial|broader_prefLabel|Tutorial +http://www.semanlink.net/tag/javascript_and_tutorial|broader_altLabel|js +http://www.semanlink.net/tag/likelihood|creationTime|2018-12-07T08:40:25Z +http://www.semanlink.net/tag/likelihood|prefLabel|Likelihood +http://www.semanlink.net/tag/likelihood|creationDate|2018-12-07 +http://www.semanlink.net/tag/likelihood|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/likelihood|uri|http://www.semanlink.net/tag/likelihood +http://www.semanlink.net/tag/rss_extensions|prefLabel|RSS extensions +http://www.semanlink.net/tag/rss_extensions|broader|http://www.semanlink.net/tag/rss +http://www.semanlink.net/tag/rss_extensions|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/rss_extensions|uri|http://www.semanlink.net/tag/rss_extensions +http://www.semanlink.net/tag/rss_extensions|broader_prefLabel|RSS +http://www.semanlink.net/tag/conquete_spatiale|prefLabel|Conquête spatiale +http://www.semanlink.net/tag/conquete_spatiale|broader|http://www.semanlink.net/tag/espace +http://www.semanlink.net/tag/conquete_spatiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/conquete_spatiale|uri|http://www.semanlink.net/tag/conquete_spatiale +http://www.semanlink.net/tag/conquete_spatiale|broader_prefLabel|Espace +http://www.semanlink.net/tag/conquete_spatiale|broader_altLabel|Space +http://www.semanlink.net/tag/learning_english|creationTime|2012-08-05T15:56:55Z +http://www.semanlink.net/tag/learning_english|prefLabel|Learning english +http://www.semanlink.net/tag/learning_english|broader|http://www.semanlink.net/tag/anglais +http://www.semanlink.net/tag/learning_english|creationDate|2012-08-05 +http://www.semanlink.net/tag/learning_english|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/learning_english|uri|http://www.semanlink.net/tag/learning_english +http://www.semanlink.net/tag/learning_english|broader_prefLabel|Anglais +http://www.semanlink.net/tag/learning_english|broader_altLabel|English +http://www.semanlink.net/tag/constitution_europeenne|prefLabel|Constitution européenne +http://www.semanlink.net/tag/constitution_europeenne|broader|http://www.semanlink.net/tag/institutions_europeennes +http://www.semanlink.net/tag/constitution_europeenne|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/constitution_europeenne|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/constitution_europeenne|uri|http://www.semanlink.net/tag/constitution_europeenne +http://www.semanlink.net/tag/constitution_europeenne|broader_prefLabel|Institutions européennes +http://www.semanlink.net/tag/constitution_europeenne|broader_prefLabel|Europe +http://www.semanlink.net/tag/outsourcing|prefLabel|Outsourcing +http://www.semanlink.net/tag/outsourcing|broader|http://www.semanlink.net/tag/delocalisations +http://www.semanlink.net/tag/outsourcing|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/outsourcing|uri|http://www.semanlink.net/tag/outsourcing +http://www.semanlink.net/tag/outsourcing|broader_prefLabel|Délocalisations +http://www.semanlink.net/tag/language_learning|creationTime|2020-09-17T23:48:34Z +http://www.semanlink.net/tag/language_learning|prefLabel|Language learning +http://www.semanlink.net/tag/language_learning|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/language_learning|creationDate|2020-09-17 +http://www.semanlink.net/tag/language_learning|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/language_learning|uri|http://www.semanlink.net/tag/language_learning +http://www.semanlink.net/tag/language_learning|broader_prefLabel|Language +http://www.semanlink.net/tag/language_learning|broader_altLabel|Langage +http://www.semanlink.net/tag/center_media_microsoft|prefLabel|Microsoft Media Center +http://www.semanlink.net/tag/center_media_microsoft|broader|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/tag/center_media_microsoft|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/center_media_microsoft|uri|http://www.semanlink.net/tag/center_media_microsoft +http://www.semanlink.net/tag/center_media_microsoft|broader_prefLabel|Microsoft +http://www.semanlink.net/tag/center_media_microsoft|broader_related|http://www.semanlink.net/tag/bill_gates +http://www.semanlink.net/tag/acl_2020|creationTime|2020-06-15T23:10:11Z +http://www.semanlink.net/tag/acl_2020|prefLabel|ACL 2020 +http://www.semanlink.net/tag/acl_2020|broader|http://www.semanlink.net/tag/acl +http://www.semanlink.net/tag/acl_2020|creationDate|2020-06-15 +http://www.semanlink.net/tag/acl_2020|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/acl_2020|uri|http://www.semanlink.net/tag/acl_2020 +http://www.semanlink.net/tag/acl_2020|broader_prefLabel|ACL +http://www.semanlink.net/tag/linked_data_demo|creationTime|2008-09-12T14:27:42Z +http://www.semanlink.net/tag/linked_data_demo|prefLabel|Linked Data demo +http://www.semanlink.net/tag/linked_data_demo|broader|http://www.semanlink.net/tag/sw_demo +http://www.semanlink.net/tag/linked_data_demo|broader|http://www.semanlink.net/tag/linked_data +http://www.semanlink.net/tag/linked_data_demo|creationDate|2008-09-12 +http://www.semanlink.net/tag/linked_data_demo|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/linked_data_demo|uri|http://www.semanlink.net/tag/linked_data_demo +http://www.semanlink.net/tag/linked_data_demo|broader_prefLabel|SW demo +http://www.semanlink.net/tag/linked_data_demo|broader_prefLabel|Linked Data +http://www.semanlink.net/tag/linked_data_demo|broader_altLabel|LD +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/yves_raymond +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/frederick_giasson +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/huge_rdf_data_source +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/chris_bizer +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/jamendo +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/richard_cyganiak +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/httprange_14 +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/musicbrainz +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/kingsley_idehen +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/giovanni_tummarello +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/tom_heath +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/rdf_data_source +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/tim_berners_lee +http://www.semanlink.net/tag/linked_data_demo|broader_related|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.semanlink.net/tag/mac_mini|prefLabel|Mac Mini +http://www.semanlink.net/tag/mac_mini|broader|http://www.semanlink.net/tag/macintosh +http://www.semanlink.net/tag/mac_mini|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mac_mini|uri|http://www.semanlink.net/tag/mac_mini +http://www.semanlink.net/tag/mac_mini|broader_prefLabel|Macintosh +http://www.semanlink.net/tag/sumer|prefLabel|Sumer +http://www.semanlink.net/tag/sumer|broader|http://www.semanlink.net/tag/mesopotamie +http://www.semanlink.net/tag/sumer|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sumer|uri|http://www.semanlink.net/tag/sumer +http://www.semanlink.net/tag/sumer|broader_prefLabel|Mésopotamie +http://www.semanlink.net/tag/sw_online_tools|creationTime|2009-04-01T01:45:18Z +http://www.semanlink.net/tag/sw_online_tools|prefLabel|SW online tools +http://www.semanlink.net/tag/sw_online_tools|broader|http://www.semanlink.net/tag/semantic_web_tools +http://www.semanlink.net/tag/sw_online_tools|creationDate|2009-04-01 +http://www.semanlink.net/tag/sw_online_tools|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/sw_online_tools|uri|http://www.semanlink.net/tag/sw_online_tools +http://www.semanlink.net/tag/sw_online_tools|broader_prefLabel|Semantic Web : Tools +http://www.semanlink.net/tag/relativite_generale|prefLabel|Relativité générale +http://www.semanlink.net/tag/relativite_generale|broader|http://www.semanlink.net/tag/relativite +http://www.semanlink.net/tag/relativite_generale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/relativite_generale|uri|http://www.semanlink.net/tag/relativite_generale +http://www.semanlink.net/tag/relativite_generale|broader_prefLabel|Relativité +http://www.semanlink.net/tag/societe_francaise|prefLabel|Société française +http://www.semanlink.net/tag/societe_francaise|prefLabel|Société française +http://www.semanlink.net/tag/societe_francaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/societe_francaise|broader|http://www.semanlink.net/tag/societe +http://www.semanlink.net/tag/societe_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/societe_francaise|uri|http://www.semanlink.net/tag/societe_francaise +http://www.semanlink.net/tag/societe_francaise|broader_prefLabel|France +http://www.semanlink.net/tag/societe_francaise|broader_prefLabel|Société +http://www.semanlink.net/tag/nike|creationTime|2013-08-27T13:48:57Z +http://www.semanlink.net/tag/nike|prefLabel|Nike +http://www.semanlink.net/tag/nike|broader|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/nike|creationDate|2013-08-27 +http://www.semanlink.net/tag/nike|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nike|describedBy|https://en.wikipedia.org/wiki/Nike,_Inc. +http://www.semanlink.net/tag/nike|uri|http://www.semanlink.net/tag/nike +http://www.semanlink.net/tag/nike|broader_prefLabel|Entreprise +http://www.semanlink.net/tag/blogs_le_monde|prefLabel|Blogs Le Monde +http://www.semanlink.net/tag/blogs_le_monde|broader|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/tag/blogs_le_monde|broader|http://www.semanlink.net/tag/blog +http://www.semanlink.net/tag/blogs_le_monde|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/blogs_le_monde|uri|http://www.semanlink.net/tag/blogs_le_monde +http://www.semanlink.net/tag/blogs_le_monde|broader_prefLabel|Journal Le Monde +http://www.semanlink.net/tag/blogs_le_monde|broader_prefLabel|Blog +http://www.semanlink.net/tag/ssl|creationTime|2010-05-04T08:57:10Z +http://www.semanlink.net/tag/ssl|prefLabel|SSL +http://www.semanlink.net/tag/ssl|broader|http://www.semanlink.net/tag/access_control +http://www.semanlink.net/tag/ssl|creationDate|2010-05-04 +http://www.semanlink.net/tag/ssl|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ssl|uri|http://www.semanlink.net/tag/ssl +http://www.semanlink.net/tag/ssl|broader_prefLabel|Access Control +http://www.semanlink.net/tag/jardin|prefLabel|Jardin +http://www.semanlink.net/tag/jardin|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jardin|uri|http://www.semanlink.net/tag/jardin +http://www.semanlink.net/tag/population_mondiale|prefLabel|Population mondiale +http://www.semanlink.net/tag/population_mondiale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/population_mondiale|uri|http://www.semanlink.net/tag/population_mondiale +http://www.semanlink.net/tag/eminem|prefLabel|Eminem +http://www.semanlink.net/tag/eminem|broader|http://www.semanlink.net/tag/rap +http://www.semanlink.net/tag/eminem|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eminem|uri|http://www.semanlink.net/tag/eminem +http://www.semanlink.net/tag/eminem|broader_prefLabel|Rap +http://www.semanlink.net/tag/literary_criticism|creationTime|2013-08-20T16:34:07Z +http://www.semanlink.net/tag/literary_criticism|prefLabel|Literary criticism +http://www.semanlink.net/tag/literary_criticism|creationDate|2013-08-20 +http://www.semanlink.net/tag/literary_criticism|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/literary_criticism|uri|http://www.semanlink.net/tag/literary_criticism +http://www.semanlink.net/tag/paradoxe|creationTime|2012-09-21T21:15:38Z +http://www.semanlink.net/tag/paradoxe|prefLabel|Paradoxe +http://www.semanlink.net/tag/paradoxe|creationDate|2012-09-21 +http://www.semanlink.net/tag/paradoxe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/paradoxe|uri|http://www.semanlink.net/tag/paradoxe +http://www.semanlink.net/tag/delinquance|prefLabel|Délinquance +http://www.semanlink.net/tag/delinquance|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/delinquance|uri|http://www.semanlink.net/tag/delinquance +http://www.semanlink.net/tag/zones_intertropicales|prefLabel|Zones intertropicales +http://www.semanlink.net/tag/zones_intertropicales|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/zones_intertropicales|uri|http://www.semanlink.net/tag/zones_intertropicales +http://www.semanlink.net/tag/json_ld_apis|creationTime|2016-06-26T13:53:39Z +http://www.semanlink.net/tag/json_ld_apis|prefLabel|JSON-LD APIs +http://www.semanlink.net/tag/json_ld_apis|broader|http://www.semanlink.net/tag/json_ld +http://www.semanlink.net/tag/json_ld_apis|creationDate|2016-06-26 +http://www.semanlink.net/tag/json_ld_apis|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/json_ld_apis|uri|http://www.semanlink.net/tag/json_ld_apis +http://www.semanlink.net/tag/json_ld_apis|broader_prefLabel|JSON-LD +http://www.semanlink.net/tag/json_ld_apis|broader_altLabel|JSONLD +http://www.semanlink.net/tag/benjamin_heinzerling|creationTime|2020-01-10T17:16:12Z +http://www.semanlink.net/tag/benjamin_heinzerling|prefLabel|Benjamin Heinzerling +http://www.semanlink.net/tag/benjamin_heinzerling|broader|http://www.semanlink.net/tag/nlp_girls_and_guys +http://www.semanlink.net/tag/benjamin_heinzerling|creationDate|2020-01-10 +http://www.semanlink.net/tag/benjamin_heinzerling|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/benjamin_heinzerling|uri|http://www.semanlink.net/tag/benjamin_heinzerling +http://www.semanlink.net/tag/benjamin_heinzerling|broader_prefLabel|NLP girls and guys +http://www.semanlink.net/tag/mexique|prefLabel|Mexique +http://www.semanlink.net/tag/mexique|broader|http://www.semanlink.net/tag/amerique +http://www.semanlink.net/tag/mexique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mexique|uri|http://www.semanlink.net/tag/mexique +http://www.semanlink.net/tag/mexique|broader_prefLabel|Amérique +http://www.semanlink.net/tag/kenya|prefLabel|Kenya +http://www.semanlink.net/tag/kenya|broader|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/kenya|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/kenya|uri|http://www.semanlink.net/tag/kenya +http://www.semanlink.net/tag/kenya|broader_prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/art|prefLabel|Art +http://www.semanlink.net/tag/art|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/art|uri|http://www.semanlink.net/tag/art +http://www.semanlink.net/tag/manipulation|prefLabel|Manipulation +http://www.semanlink.net/tag/manipulation|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/manipulation|uri|http://www.semanlink.net/tag/manipulation +http://www.semanlink.net/tag/coupe_du_monde_2006|prefLabel|Coupe du monde 2006 +http://www.semanlink.net/tag/coupe_du_monde_2006|broader|http://www.semanlink.net/tag/coupe_du_monde_de_football +http://www.semanlink.net/tag/coupe_du_monde_2006|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coupe_du_monde_2006|uri|http://www.semanlink.net/tag/coupe_du_monde_2006 +http://www.semanlink.net/tag/coupe_du_monde_2006|broader_prefLabel|Coupe du monde de football +http://www.semanlink.net/tag/parc_du_w|prefLabel|Parc du W +http://www.semanlink.net/tag/parc_du_w|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/parc_du_w|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/parc_du_w|uri|http://www.semanlink.net/tag/parc_du_w +http://www.semanlink.net/tag/parc_du_w|broader_prefLabel|Niger +http://www.semanlink.net/tag/parc_du_w|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/parc_du_w|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/parc_du_w|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/complexite|creationTime|2017-07-05T18:36:24Z +http://www.semanlink.net/tag/complexite|prefLabel|Complexité +http://www.semanlink.net/tag/complexite|creationDate|2017-07-05 +http://www.semanlink.net/tag/complexite|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/complexite|uri|http://www.semanlink.net/tag/complexite +http://www.semanlink.net/tag/histoire_du_xxe_siecle|creationTime|2009-06-17T01:00:45Z +http://www.semanlink.net/tag/histoire_du_xxe_siecle|prefLabel|Histoire du XXe siècle +http://www.semanlink.net/tag/histoire_du_xxe_siecle|creationDate|2009-06-17 +http://www.semanlink.net/tag/histoire_du_xxe_siecle|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/histoire_du_xxe_siecle|uri|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://www.semanlink.net/tag/economie_francaise|prefLabel|Economie française +http://www.semanlink.net/tag/economie_francaise|broader|http://www.semanlink.net/tag/france +http://www.semanlink.net/tag/economie_francaise|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/economie_francaise|altLabel|Economie France +http://www.semanlink.net/tag/economie_francaise|uri|http://www.semanlink.net/tag/economie_francaise +http://www.semanlink.net/tag/economie_francaise|broader_prefLabel|France +http://www.semanlink.net/tag/caf|creationTime|2014-06-16T09:11:50Z +http://www.semanlink.net/tag/caf|prefLabel|CAF +http://www.semanlink.net/tag/caf|creationDate|2014-06-16 +http://www.semanlink.net/tag/caf|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/caf|uri|http://www.semanlink.net/tag/caf +http://www.semanlink.net/tag/nature|prefLabel|Nature +http://www.semanlink.net/tag/nature|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nature|uri|http://www.semanlink.net/tag/nature +http://www.semanlink.net/tag/red_hat|prefLabel|Red Hat +http://www.semanlink.net/tag/red_hat|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/red_hat|uri|http://www.semanlink.net/tag/red_hat +http://www.semanlink.net/tag/semantic_web_web_2_0|creationTime|2008-06-12T08:33:13Z +http://www.semanlink.net/tag/semantic_web_web_2_0|prefLabel|Semantic Web / Web 2.0 +http://www.semanlink.net/tag/semantic_web_web_2_0|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/semantic_web_web_2_0|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_web_web_2_0|creationDate|2008-06-12 +http://www.semanlink.net/tag/semantic_web_web_2_0|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_web_web_2_0|uri|http://www.semanlink.net/tag/semantic_web_web_2_0 +http://www.semanlink.net/tag/semantic_web_web_2_0|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/semantic_web_web_2_0|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_web_web_2_0|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_web_web_2_0|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/technorati|prefLabel|Technorati +http://www.semanlink.net/tag/technorati|broader|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/tag/technorati|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/technorati|uri|http://www.semanlink.net/tag/technorati +http://www.semanlink.net/tag/technorati|broader_prefLabel|Tagging +http://www.semanlink.net/tag/ours_polaire|prefLabel|Ours polaire +http://www.semanlink.net/tag/ours_polaire|broader|http://www.semanlink.net/tag/ours +http://www.semanlink.net/tag/ours_polaire|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ours_polaire|uri|http://www.semanlink.net/tag/ours_polaire +http://www.semanlink.net/tag/ours_polaire|broader_prefLabel|Ours +http://www.semanlink.net/tag/boudhisme|creationTime|2007-08-06T17:23:46Z +http://www.semanlink.net/tag/boudhisme|prefLabel|Boudhisme +http://www.semanlink.net/tag/boudhisme|creationDate|2007-08-06 +http://www.semanlink.net/tag/boudhisme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boudhisme|uri|http://www.semanlink.net/tag/boudhisme +http://www.semanlink.net/tag/semantic_components|creationTime|2010-07-08T15:11:41Z +http://www.semanlink.net/tag/semantic_components|prefLabel|Semantic Components +http://www.semanlink.net/tag/semantic_components|creationDate|2010-07-08 +http://www.semanlink.net/tag/semantic_components|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_components|uri|http://www.semanlink.net/tag/semantic_components +http://www.semanlink.net/tag/killer_app|creationTime|2014-09-16T19:01:47Z +http://www.semanlink.net/tag/killer_app|prefLabel|Killer App +http://www.semanlink.net/tag/killer_app|creationDate|2014-09-16 +http://www.semanlink.net/tag/killer_app|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/killer_app|uri|http://www.semanlink.net/tag/killer_app +http://www.semanlink.net/tag/serpent|creationTime|2020-05-12T12:51:24Z +http://www.semanlink.net/tag/serpent|prefLabel|Serpent +http://www.semanlink.net/tag/serpent|broader|http://www.semanlink.net/tag/reptile +http://www.semanlink.net/tag/serpent|creationDate|2020-05-12 +http://www.semanlink.net/tag/serpent|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/serpent|uri|http://www.semanlink.net/tag/serpent +http://www.semanlink.net/tag/serpent|broader_prefLabel|Reptile +http://www.semanlink.net/tag/human_in_the_loop|creationTime|2020-01-07T12:31:25Z +http://www.semanlink.net/tag/human_in_the_loop|prefLabel|Human in the loop +http://www.semanlink.net/tag/human_in_the_loop|creationDate|2020-01-07 +http://www.semanlink.net/tag/human_in_the_loop|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/human_in_the_loop|uri|http://www.semanlink.net/tag/human_in_the_loop +http://www.semanlink.net/tag/dvd|prefLabel|DVD +http://www.semanlink.net/tag/dvd|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/dvd|uri|http://www.semanlink.net/tag/dvd +http://www.semanlink.net/tag/planet_under_pressure|prefLabel|Planet under pressure +http://www.semanlink.net/tag/planet_under_pressure|broader|http://www.semanlink.net/tag/ecologie +http://www.semanlink.net/tag/planet_under_pressure|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/planet_under_pressure|uri|http://www.semanlink.net/tag/planet_under_pressure +http://www.semanlink.net/tag/planet_under_pressure|broader_prefLabel|Écologie +http://www.semanlink.net/tag/mind_control|creationTime|2016-03-30T02:11:58Z +http://www.semanlink.net/tag/mind_control|prefLabel|Mind control +http://www.semanlink.net/tag/mind_control|creationDate|2016-03-30 +http://www.semanlink.net/tag/mind_control|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mind_control|uri|http://www.semanlink.net/tag/mind_control +http://www.semanlink.net/tag/methodes_agiles|creationTime|2012-12-22T14:46:30Z +http://www.semanlink.net/tag/methodes_agiles|prefLabel|Méthodes agiles +http://www.semanlink.net/tag/methodes_agiles|broader|http://www.semanlink.net/tag/management +http://www.semanlink.net/tag/methodes_agiles|creationDate|2012-12-22 +http://www.semanlink.net/tag/methodes_agiles|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/methodes_agiles|uri|http://www.semanlink.net/tag/methodes_agiles +http://www.semanlink.net/tag/methodes_agiles|broader_prefLabel|Management +http://www.semanlink.net/tag/ted_nelson|prefLabel|Ted Nelson +http://www.semanlink.net/tag/ted_nelson|broader|http://www.semanlink.net/tag/technical_girls_and_guys +http://www.semanlink.net/tag/ted_nelson|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ted_nelson|uri|http://www.semanlink.net/tag/ted_nelson +http://www.semanlink.net/tag/ted_nelson|broader_prefLabel|Technical girls and guys +http://www.semanlink.net/tag/ted_nelson|broader_altLabel|Technical guys +http://www.semanlink.net/tag/couleur|creationTime|2014-11-27T13:45:09Z +http://www.semanlink.net/tag/couleur|prefLabel|Couleur +http://www.semanlink.net/tag/couleur|creationDate|2014-11-27 +http://www.semanlink.net/tag/couleur|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/couleur|uri|http://www.semanlink.net/tag/couleur +http://www.semanlink.net/tag/maria|prefLabel|Maria +http://www.semanlink.net/tag/maria|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/maria|uri|http://www.semanlink.net/tag/maria +http://www.semanlink.net/tag/jeux|prefLabel|Jeux +http://www.semanlink.net/tag/jeux|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/jeux|uri|http://www.semanlink.net/tag/jeux +http://www.semanlink.net/tag/orange|prefLabel|Orange (telecom) +http://www.semanlink.net/tag/orange|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orange|uri|http://www.semanlink.net/tag/orange +http://www.semanlink.net/tag/nlp_book|creationTime|2018-03-05T18:09:27Z +http://www.semanlink.net/tag/nlp_book|prefLabel|NLP: book +http://www.semanlink.net/tag/nlp_book|broader|http://www.semanlink.net/tag/ai_book +http://www.semanlink.net/tag/nlp_book|creationDate|2018-03-05 +http://www.semanlink.net/tag/nlp_book|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/nlp_book|uri|http://www.semanlink.net/tag/nlp_book +http://www.semanlink.net/tag/nlp_book|broader_prefLabel|AI: books & journals +http://www.semanlink.net/tag/ethiopie|prefLabel|Ethiopie +http://www.semanlink.net/tag/ethiopie|broader|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/ethiopie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/ethiopie|uri|http://www.semanlink.net/tag/ethiopie +http://www.semanlink.net/tag/ethiopie|broader_prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/web_services_critique|prefLabel|Web services : critique +http://www.semanlink.net/tag/web_services_critique|broader|http://www.semanlink.net/tag/web_services +http://www.semanlink.net/tag/web_services_critique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/web_services_critique|uri|http://www.semanlink.net/tag/web_services_critique +http://www.semanlink.net/tag/web_services_critique|broader_prefLabel|Web Services +http://www.semanlink.net/tag/web_services_critique|broader_altLabel|WS +http://www.semanlink.net/tag/iguane|creationTime|2020-05-12T12:51:31Z +http://www.semanlink.net/tag/iguane|prefLabel|Iguane +http://www.semanlink.net/tag/iguane|broader|http://www.semanlink.net/tag/reptile +http://www.semanlink.net/tag/iguane|creationDate|2020-05-12 +http://www.semanlink.net/tag/iguane|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/iguane|uri|http://www.semanlink.net/tag/iguane +http://www.semanlink.net/tag/iguane|broader_prefLabel|Reptile +http://www.semanlink.net/tag/eruption_volcanique|prefLabel|Eruption volcanique +http://www.semanlink.net/tag/eruption_volcanique|broader|http://www.semanlink.net/tag/volcan +http://www.semanlink.net/tag/eruption_volcanique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/eruption_volcanique|uri|http://www.semanlink.net/tag/eruption_volcanique +http://www.semanlink.net/tag/eruption_volcanique|broader_prefLabel|Volcan +http://www.semanlink.net/tag/industrie|prefLabel|industrie +http://www.semanlink.net/tag/industrie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/industrie|uri|http://www.semanlink.net/tag/industrie +http://www.semanlink.net/tag/semantic_networks|prefLabel|Semantic Networks +http://www.semanlink.net/tag/semantic_networks|broader|http://www.semanlink.net/tag/semantic_web +http://www.semanlink.net/tag/semantic_networks|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/semantic_networks|uri|http://www.semanlink.net/tag/semantic_networks +http://www.semanlink.net/tag/semantic_networks|broader_prefLabel|Semantic Web +http://www.semanlink.net/tag/semantic_networks|broader_altLabel|sw +http://www.semanlink.net/tag/semantic_networks|broader_altLabel|Web sémantique +http://www.semanlink.net/tag/batteries|prefLabel|Batteries +http://www.semanlink.net/tag/batteries|creationDate|2006-09-26 +http://www.semanlink.net/tag/batteries|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/batteries|uri|http://www.semanlink.net/tag/batteries +http://www.semanlink.net/tag/net_neutrality|creationTime|2009-05-29T00:14:57Z +http://www.semanlink.net/tag/net_neutrality|prefLabel|Net Neutrality +http://www.semanlink.net/tag/net_neutrality|broader|http://www.semanlink.net/tag/internet +http://www.semanlink.net/tag/net_neutrality|creationDate|2009-05-29 +http://www.semanlink.net/tag/net_neutrality|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/net_neutrality|uri|http://www.semanlink.net/tag/net_neutrality +http://www.semanlink.net/tag/net_neutrality|broader_prefLabel|Internet +http://www.semanlink.net/tag/soudan|prefLabel|Soudan +http://www.semanlink.net/tag/soudan|broader|http://www.semanlink.net/tag/afrique_de_l_est +http://www.semanlink.net/tag/soudan|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soudan|uri|http://www.semanlink.net/tag/soudan +http://www.semanlink.net/tag/soudan|broader_prefLabel|Afrique de l'Est +http://www.semanlink.net/tag/pulsar|prefLabel|Pulsar +http://www.semanlink.net/tag/pulsar|broader|http://www.semanlink.net/tag/astrophysique +http://www.semanlink.net/tag/pulsar|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pulsar|uri|http://www.semanlink.net/tag/pulsar +http://www.semanlink.net/tag/pulsar|broader_prefLabel|Astrophysique +http://www.semanlink.net/tag/download_execute_javascript|creationTime|2012-07-13T02:06:04Z +http://www.semanlink.net/tag/download_execute_javascript|prefLabel|Download & Execute Javascript +http://www.semanlink.net/tag/download_execute_javascript|broader|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/tag/download_execute_javascript|creationDate|2012-07-13 +http://www.semanlink.net/tag/download_execute_javascript|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/download_execute_javascript|uri|http://www.semanlink.net/tag/download_execute_javascript +http://www.semanlink.net/tag/download_execute_javascript|broader_prefLabel|JavaScript +http://www.semanlink.net/tag/download_execute_javascript|broader_altLabel|js +http://www.semanlink.net/tag/langues|prefLabel|Langues +http://www.semanlink.net/tag/langues|broader|http://www.semanlink.net/tag/langage +http://www.semanlink.net/tag/langues|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/langues|uri|http://www.semanlink.net/tag/langues +http://www.semanlink.net/tag/langues|broader_prefLabel|Language +http://www.semanlink.net/tag/langues|broader_altLabel|Langage +http://www.semanlink.net/tag/a_suivre|prefLabel|A suivre +http://www.semanlink.net/tag/a_suivre|broader|http://www.semanlink.net/tag/todo_list +http://www.semanlink.net/tag/a_suivre|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/a_suivre|uri|http://www.semanlink.net/tag/a_suivre +http://www.semanlink.net/tag/a_suivre|broader_prefLabel|Todo list +http://www.semanlink.net/tag/anthropocene|creationTime|2011-01-17T21:47:49Z +http://www.semanlink.net/tag/anthropocene|prefLabel|Anthropocène +http://www.semanlink.net/tag/anthropocene|creationDate|2011-01-17 +http://www.semanlink.net/tag/anthropocene|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/anthropocene|uri|http://www.semanlink.net/tag/anthropocene +http://www.semanlink.net/tag/marie_jo_perec|prefLabel|Marie-Jo Pérec +http://www.semanlink.net/tag/marie_jo_perec|broader|http://www.semanlink.net/tag/sportif +http://www.semanlink.net/tag/marie_jo_perec|broader|http://www.semanlink.net/tag/athletisme +http://www.semanlink.net/tag/marie_jo_perec|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marie_jo_perec|uri|http://www.semanlink.net/tag/marie_jo_perec +http://www.semanlink.net/tag/marie_jo_perec|broader_prefLabel|Sportif +http://www.semanlink.net/tag/marie_jo_perec|broader_prefLabel|Athlétisme +http://www.semanlink.net/tag/lisp|prefLabel|Lisp +http://www.semanlink.net/tag/lisp|broader|http://www.semanlink.net/tag/programming_language +http://www.semanlink.net/tag/lisp|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/lisp|uri|http://www.semanlink.net/tag/lisp +http://www.semanlink.net/tag/lisp|broader_prefLabel|Programming language +http://www.semanlink.net/tag/lisp|broader_altLabel|Langage de programmation +http://www.semanlink.net/tag/inegalites|prefLabel|Inégalités +http://www.semanlink.net/tag/inegalites|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/inegalites|uri|http://www.semanlink.net/tag/inegalites +http://www.semanlink.net/tag/exoplanetes|prefLabel|Exoplanètes +http://www.semanlink.net/tag/exoplanetes|broader|http://www.semanlink.net/tag/astronomie +http://www.semanlink.net/tag/exoplanetes|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/exoplanetes|uri|http://www.semanlink.net/tag/exoplanetes +http://www.semanlink.net/tag/exoplanetes|broader_prefLabel|Astronomie +http://www.semanlink.net/tag/mashups|prefLabel|Mashups +http://www.semanlink.net/tag/mashups|broader|http://www.semanlink.net/tag/web_2_0 +http://www.semanlink.net/tag/mashups|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mashups|uri|http://www.semanlink.net/tag/mashups +http://www.semanlink.net/tag/mashups|broader_prefLabel|Web 2.0 +http://www.semanlink.net/tag/pbs_program|prefLabel|PBS program +http://www.semanlink.net/tag/pbs_program|broader|http://www.semanlink.net/tag/pbs +http://www.semanlink.net/tag/pbs_program|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pbs_program|uri|http://www.semanlink.net/tag/pbs_program +http://www.semanlink.net/tag/pbs_program|broader_prefLabel|PBS +http://www.semanlink.net/tag/critique_de_la_societe_occidentale|prefLabel|Critique de la société occidentale +http://www.semanlink.net/tag/critique_de_la_societe_occidentale|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/critique_de_la_societe_occidentale|uri|http://www.semanlink.net/tag/critique_de_la_societe_occidentale +http://www.semanlink.net/tag/declin_de_l_europe|prefLabel|Déclin de l'Europe +http://www.semanlink.net/tag/declin_de_l_europe|broader|http://www.semanlink.net/tag/europe +http://www.semanlink.net/tag/declin_de_l_europe|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/declin_de_l_europe|uri|http://www.semanlink.net/tag/declin_de_l_europe +http://www.semanlink.net/tag/declin_de_l_europe|broader_prefLabel|Europe +http://www.semanlink.net/tag/passwords|prefLabel|Passwords +http://www.semanlink.net/tag/passwords|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/passwords|uri|http://www.semanlink.net/tag/passwords +http://www.semanlink.net/tag/deloitte|creationTime|2018-06-07T23:58:40Z +http://www.semanlink.net/tag/deloitte|prefLabel|Deloitte +http://www.semanlink.net/tag/deloitte|creationDate|2018-06-07 +http://www.semanlink.net/tag/deloitte|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/deloitte|uri|http://www.semanlink.net/tag/deloitte +http://www.semanlink.net/tag/unix|prefLabel|Unix +http://www.semanlink.net/tag/unix|broader|http://www.semanlink.net/tag/os +http://www.semanlink.net/tag/unix|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/unix|uri|http://www.semanlink.net/tag/unix +http://www.semanlink.net/tag/unix|broader_prefLabel|OS +http://www.semanlink.net/tag/marchands_d_arme|creationTime|2013-09-15T14:30:23Z +http://www.semanlink.net/tag/marchands_d_arme|prefLabel|Marchands d'arme +http://www.semanlink.net/tag/marchands_d_arme|broader|http://www.semanlink.net/tag/industrie_de_l_armement +http://www.semanlink.net/tag/marchands_d_arme|creationDate|2013-09-15 +http://www.semanlink.net/tag/marchands_d_arme|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/marchands_d_arme|uri|http://www.semanlink.net/tag/marchands_d_arme +http://www.semanlink.net/tag/marchands_d_arme|broader_prefLabel|Industrie de l'armement +http://www.semanlink.net/tag/soleil|prefLabel|Soleil +http://www.semanlink.net/tag/soleil|broader|http://www.semanlink.net/tag/systeme_solaire +http://www.semanlink.net/tag/soleil|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/soleil|uri|http://www.semanlink.net/tag/soleil +http://www.semanlink.net/tag/soleil|broader_prefLabel|Système solaire +http://www.semanlink.net/tag/photos_du_niger|prefLabel|Photos du Niger +http://www.semanlink.net/tag/photos_du_niger|broader|http://www.semanlink.net/tag/niger +http://www.semanlink.net/tag/photos_du_niger|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/photos_du_niger|uri|http://www.semanlink.net/tag/photos_du_niger +http://www.semanlink.net/tag/photos_du_niger|broader_prefLabel|Niger +http://www.semanlink.net/tag/photos_du_niger|broader_related|http://www.semanlink.net/tag/sahel +http://www.semanlink.net/tag/photos_du_niger|broader_related|http://www.semanlink.net/tag/mali +http://www.semanlink.net/tag/photos_du_niger|broader_related|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/tag/pib|prefLabel|PIB +http://www.semanlink.net/tag/pib|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/pib|uri|http://www.semanlink.net/tag/pib +http://www.semanlink.net/tag/bourse|prefLabel|Bourse +http://www.semanlink.net/tag/bourse|broader|http://www.semanlink.net/tag/economie +http://www.semanlink.net/tag/bourse|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/bourse|uri|http://www.semanlink.net/tag/bourse +http://www.semanlink.net/tag/bourse|broader_prefLabel|Economie +http://www.semanlink.net/tag/bourse|broader_related|http://www.semanlink.net/tag/entreprise +http://www.semanlink.net/tag/g8|prefLabel|G8 +http://www.semanlink.net/tag/g8|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/g8|uri|http://www.semanlink.net/tag/g8 +http://www.semanlink.net/tag/indonesie|prefLabel|Indonésie +http://www.semanlink.net/tag/indonesie|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/indonesie|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/indonesie|uri|http://www.semanlink.net/tag/indonesie +http://www.semanlink.net/tag/indonesie|broader_prefLabel|Asie +http://www.semanlink.net/tag/coree|prefLabel|Corée +http://www.semanlink.net/tag/coree|broader|http://www.semanlink.net/tag/asie +http://www.semanlink.net/tag/coree|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/coree|uri|http://www.semanlink.net/tag/coree +http://www.semanlink.net/tag/coree|broader_prefLabel|Asie +http://www.semanlink.net/tag/boube_gado|prefLabel|Boube Gado +http://www.semanlink.net/tag/boube_gado|broader|http://www.semanlink.net/tag/archeologie_du_niger +http://www.semanlink.net/tag/boube_gado|broader|http://www.semanlink.net/tag/archeologue +http://www.semanlink.net/tag/boube_gado|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/boube_gado|uri|http://www.semanlink.net/tag/boube_gado +http://www.semanlink.net/tag/boube_gado|broader_prefLabel|Archéologie du Niger +http://www.semanlink.net/tag/boube_gado|broader_prefLabel|Archéologue +http://www.semanlink.net/tag/boube_gado|broader_related|http://www.semanlink.net/tag/anne_haour +http://www.semanlink.net/tag/amerique_du_sud|creationTime|2009-03-10T23:06:37Z +http://www.semanlink.net/tag/amerique_du_sud|prefLabel|Amérique du sud +http://www.semanlink.net/tag/amerique_du_sud|broader|http://www.semanlink.net/tag/amerique_latine +http://www.semanlink.net/tag/amerique_du_sud|creationDate|2009-03-10 +http://www.semanlink.net/tag/amerique_du_sud|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/amerique_du_sud|uri|http://www.semanlink.net/tag/amerique_du_sud +http://www.semanlink.net/tag/amerique_du_sud|broader_prefLabel|Amérique latine +http://www.semanlink.net/tag/antarctique|prefLabel|Antarctique +http://www.semanlink.net/tag/antarctique|broader|http://www.semanlink.net/tag/regions_polaires +http://www.semanlink.net/tag/antarctique|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/antarctique|uri|http://www.semanlink.net/tag/antarctique +http://www.semanlink.net/tag/antarctique|broader_prefLabel|Régions polaires +http://www.semanlink.net/tag/mysql|prefLabel|MySQL +http://www.semanlink.net/tag/mysql|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/mysql|uri|http://www.semanlink.net/tag/mysql +http://www.semanlink.net/tag/orstom|prefLabel|ORSTOM +http://www.semanlink.net/tag/orstom|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/orstom|uri|http://www.semanlink.net/tag/orstom +http://www.semanlink.net/tag/gado|prefLabel|Gado +http://www.semanlink.net/tag/gado|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/gado|uri|http://www.semanlink.net/tag/gado +http://www.semanlink.net/tag/senegal|prefLabel|Sénégal +http://www.semanlink.net/tag/senegal|type|http://www.semanlink.net/2001/00/semanlink-schema#Tag +http://www.semanlink.net/tag/senegal|uri|http://www.semanlink.net/tag/senegal +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|creationDate|2009-04-15 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|tag|http://www.semanlink.net/tag/bnf +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|tag|http://www.semanlink.net/tag/semantic_web_application +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|tag|http://www.semanlink.net/tag/virtuoso +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|title|Semantic Web technologies for digital preservation : the SPAR project +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-401/iswc2008pd_submission_14.pdf|creationTime|2009-04-15T22:30:58Z +https://fasttext.cc/docs/en/cheatsheet.html|creationDate|2019-01-29 +https://fasttext.cc/docs/en/cheatsheet.html|tag|http://www.semanlink.net/tag/fasttext +https://fasttext.cc/docs/en/cheatsheet.html|tag|http://www.semanlink.net/tag/cheat_sheet +https://fasttext.cc/docs/en/cheatsheet.html|title|Cheatsheet · fastText +https://fasttext.cc/docs/en/cheatsheet.html|creationTime|2019-01-29T15:06:30Z +https://arxiv.org/abs/1404.5367|creationDate|2018-05-22 +https://arxiv.org/abs/1404.5367|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1404.5367|tag|http://www.semanlink.net/tag/andrew_mccallum +https://arxiv.org/abs/1404.5367|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +https://arxiv.org/abs/1404.5367|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1404.5367|tag|http://www.semanlink.net/tag/phrase_embeddings +https://arxiv.org/abs/1404.5367|arxiv_author|Andrew McCallum +https://arxiv.org/abs/1404.5367|arxiv_author|Vineet Kumar +https://arxiv.org/abs/1404.5367|arxiv_author|Alexandre Passos +https://arxiv.org/abs/1404.5367|comment|"Employs lexicons as part of the word embedding training: + +> The skip-gram model can be trained to +predict not only neighboring words but also lexicon +membership of the central word (or phrase). + +Quickly demonstrates how we can plug phrase embeddings +into an existing log-linear CRF System. + +" +https://arxiv.org/abs/1404.5367|title|[1404.5367] Lexicon Infused Phrase Embeddings for Named Entity Resolution +https://arxiv.org/abs/1404.5367|creationTime|2018-05-22T16:22:37Z +https://arxiv.org/abs/1404.5367|arxiv_summary|"Most state-of-the-art approaches for named-entity recognition (NER) use semi +supervised information in the form of word clusters and lexicons. Recently +neural network-based language models have been explored, as they as a byproduct +generate highly informative vector representations for words, known as word +embeddings. In this paper we present two contributions: a new form of learning +word embeddings that can leverage information from relevant lexicons to improve +the representations, and the first system to use neural word embeddings to +achieve state-of-the-art results on named-entity recognition in both CoNLL and +Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for +CoNLL 2003---significantly better than any previous system trained on public +data, and matching a system employing massive private industrial query-log +data." +https://arxiv.org/abs/1404.5367|arxiv_firstAuthor|Alexandre Passos +https://arxiv.org/abs/1404.5367|arxiv_updated|2014-04-22T02:12:06Z +https://arxiv.org/abs/1404.5367|arxiv_title|Lexicon Infused Phrase Embeddings for Named Entity Resolution +https://arxiv.org/abs/1404.5367|arxiv_published|2014-04-22T02:12:06Z +https://arxiv.org/abs/1404.5367|arxiv_num|1404.5367 +http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/|creationDate|2008-04-11 +http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/|tag|http://www.semanlink.net/tag/owlsight +http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/|tag|http://www.semanlink.net/tag/grddl +http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/|title|GRDDL in OwlSight Thinking Clearly +http://clarkparsia.com/weblog/2008/04/10/grddl-in-owlsight/|creationTime|2008-04-11T15:52:54Z +http://www.mkbergman.com/?p=355.|creationDate|2008-08-28 +http://www.mkbergman.com/?p=355.|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/?p=355.|tag|http://www.semanlink.net/tag/virtuoso_review +http://www.mkbergman.com/?p=355.|comment|Virtuoso and Related Tools String Together a Surprisingly Complete Score +http://www.mkbergman.com/?p=355.|title|OpenLink Plugs the Gaps in the Structured Web » AI3:::Adaptive Information +http://www.mkbergman.com/?p=355.|creationTime|2008-08-28T16:44:16Z +http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/|creationDate|2019-01-27 +http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/|comment|"Main Idea: To leverage large amounts of unsupervised data to infer “weak” labels and use that signal for learning supervised models as if we had the ground truth labels. + +Blog post about [this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.08803) + + +" +http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/|relatedDoc|https://arxiv.org/abs/1704.08803 +http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/|title|Beating the Teacher: Neural Ranking Models with Weak Supervision – Mostafa Dehghani +http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/|creationTime|2019-01-27T17:29:21Z +http://www.irt.org/|creationDate|2006-07-27 +http://www.irt.org/|tag|http://www.semanlink.net/tag/tutorial +http://www.irt.org/|tag|http://www.semanlink.net/tag/howto +http://www.irt.org/|tag|http://www.semanlink.net/tag/internet_related_technologies +http://www.irt.org/|title|"irt.org Home Page (""Internet Related Technologies"")" +https://arxiv.org/abs/1808.07699|creationDate|2019-04-23 +https://arxiv.org/abs/1808.07699|tag|http://www.semanlink.net/tag/entity_linking +https://arxiv.org/abs/1808.07699|tag|http://www.semanlink.net/tag/end_to_end_entity_linking +https://arxiv.org/abs/1808.07699|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1808.07699|arxiv_author|Thomas Hofmann +https://arxiv.org/abs/1808.07699|arxiv_author|Nikolaos Kolitsas +https://arxiv.org/abs/1808.07699|arxiv_author|Octavian-Eugen Ganea +https://arxiv.org/abs/1808.07699|comment|"> We presented the first **neural end-to-end entity linking** +model and show the benefit of jointly optimizing +entity recognition and linking. Leveraging key +components, namely word, entity and mention embeddings, +we prove that engineered features can +be almost completely replaced by modern neural +networks." +https://arxiv.org/abs/1808.07699|title|[1808.07699] End-to-End Neural Entity Linking +https://arxiv.org/abs/1808.07699|creationTime|2019-04-23T19:12:16Z +https://arxiv.org/abs/1808.07699|arxiv_summary|"Entity Linking (EL) is an essential task for semantic text understanding and +information extraction. Popular methods separately address the Mention +Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging +their mutual dependency. We here propose the first neural end-to-end EL system +that jointly discovers and links entities in a text document. The main idea is +to consider all possible spans as potential mentions and learn contextual +similarity scores over their entity candidates that are useful for both MD and +ED decisions. Key components are context-aware mention embeddings, entity +embeddings and a probabilistic mention - entity map, without demanding other +engineered features. Empirically, we show that our end-to-end method +significantly outperforms popular systems on the Gerbil platform when enough +training data is available. Conversely, if testing datasets follow different +annotation conventions compared to the training set (e.g. queries/ tweets vs +news documents), our ED model coupled with a traditional NER system offers the +best or second best EL accuracy." +https://arxiv.org/abs/1808.07699|arxiv_firstAuthor|Nikolaos Kolitsas +https://arxiv.org/abs/1808.07699|arxiv_updated|2018-08-29T17:44:38Z +https://arxiv.org/abs/1808.07699|arxiv_title|End-to-End Neural Entity Linking +https://arxiv.org/abs/1808.07699|arxiv_published|2018-08-23T11:16:57Z +https://arxiv.org/abs/1808.07699|arxiv_num|1808.07699 +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|tag|http://www.semanlink.net/tag/jeni_tennison +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|title|@itemid and URL properties in schema.org from Jeni Tennison on 2011-11-04 (public-vocabs@w3.org from November 2011) +http://lists.w3.org/Archives/Public/public-vocabs/2011Nov/0022.html|creationTime|2013-07-06T15:46:10Z +http://i.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|creationDate|2017-07-25 +http://i.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|tag|http://www.semanlink.net/tag/sparse_distributed_memory +http://i.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|title|Sparse Distributed Memory: Principles and Operation +http://i.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|creationTime|2017-07-25T16:00:35Z +https://arxiv.org/abs/1512.00765|creationDate|2017-06-09 +https://arxiv.org/abs/1512.00765|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1512.00765|tag|http://www.semanlink.net/tag/tf_idf +https://arxiv.org/abs/1512.00765|tag|http://www.semanlink.net/tag/nlp_short_texts +https://arxiv.org/abs/1512.00765|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1512.00765|arxiv_author|Thomas Demeester +https://arxiv.org/abs/1512.00765|arxiv_author|Steven Bohez +https://arxiv.org/abs/1512.00765|arxiv_author|Bart Dhoedt +https://arxiv.org/abs/1512.00765|arxiv_author|Steven Van Canneyt +https://arxiv.org/abs/1512.00765|arxiv_author|Cedric De Boom +https://arxiv.org/abs/1512.00765|comment|"In order to pair short text +fragments—as a concatenation of separate words—an adequate +distributed sentence representation is needed. Main contribution: a first step towards a hybrid method that +combines the strength of dense distributed representations— +as opposed to sparse term matching—with the strength of +tf-idf based methods. The combination of word embeddings and tf-idf +information might lead to a better model for semantic content +within very short text fragments." +https://arxiv.org/abs/1512.00765|title|[1512.00765] Learning Semantic Similarity for Very Short Texts +https://arxiv.org/abs/1512.00765|creationTime|2017-06-09T14:51:21Z +https://arxiv.org/abs/1512.00765|arxiv_summary|"Levering data on social media, such as Twitter and Facebook, requires +information retrieval algorithms to become able to relate very short text +fragments to each other. Traditional text similarity methods such as tf-idf +cosine-similarity, based on word overlap, mostly fail to produce good results +in this case, since word overlap is little or non-existent. Recently, +distributed word representations, or word embeddings, have been shown to +successfully allow words to match on the semantic level. In order to pair short +text fragments - as a concatenation of separate words - an adequate distributed +sentence representation is needed, in existing literature often obtained by +naively combining the individual word representations. We therefore +investigated several text representations as a combination of word embeddings +in the context of semantic pair matching. This paper investigates the +effectiveness of several such naive techniques, as well as traditional tf-idf +similarity, for fragments of different lengths. Our main contribution is a +first step towards a hybrid method that combines the strength of dense +distributed representations - as opposed to sparse term matching - with the +strength of tf-idf based methods to automatically reduce the impact of less +informative terms. Our new approach outperforms the existing techniques in a +toy experimental set-up, leading to the conclusion that the combination of word +embeddings and tf-idf information might lead to a better model for semantic +content within very short text fragments." +https://arxiv.org/abs/1512.00765|arxiv_firstAuthor|Cedric De Boom +https://arxiv.org/abs/1512.00765|arxiv_updated|2015-12-02T16:31:20Z +https://arxiv.org/abs/1512.00765|arxiv_title|Learning Semantic Similarity for Very Short Texts +https://arxiv.org/abs/1512.00765|arxiv_published|2015-12-02T16:31:20Z +https://arxiv.org/abs/1512.00765|arxiv_num|1512.00765 +http://www.laquadrature.net/en/dominant-telcos-try-to-end-net-neutrality-through-itu|creationDate|2012-09-20 +http://www.laquadrature.net/en/dominant-telcos-try-to-end-net-neutrality-through-itu|tag|http://www.semanlink.net/tag/net_neutrality +http://www.laquadrature.net/en/dominant-telcos-try-to-end-net-neutrality-through-itu|title|Dominant Telcos Try to End Net Neutrality Through ITU La Quadrature du Net +http://www.laquadrature.net/en/dominant-telcos-try-to-end-net-neutrality-through-itu|creationTime|2012-09-20T15:41:03Z +http://www.fortune.com/fortune/print/0,15935,1101810,00.html|creationDate|2005-09-15 +http://www.fortune.com/fortune/print/0,15935,1101810,00.html|tag|http://www.semanlink.net/tag/biotech_industry +http://www.fortune.com/fortune/print/0,15935,1101810,00.html|tag|http://www.semanlink.net/tag/patent +http://www.fortune.com/fortune/print/0,15935,1101810,00.html|comment|Twenty-five years ago a law known as Bayh-Dole spawned the biotech industry. It made lots of university scientists fabulously rich. It was also supposed to usher in a new era of innovation. So why are medical miracles in such short supply? +http://www.fortune.com/fortune/print/0,15935,1101810,00.html|title|Fortune.com: The Law of Unintended Consequences +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|creationDate|2018-10-05 +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|tag|http://www.semanlink.net/tag/tensorflow +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|tag|http://www.semanlink.net/tag/keras +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|tag|http://www.semanlink.net/tag/sample_code +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|tag|http://www.semanlink.net/tag/tpu +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|title|Training on TPU +https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb|creationTime|2018-10-05T08:19:26Z +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|creationDate|2019-05-22 +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|comment|"A system for rapidly creating training sets with weak supervision + +> in Snorkel, users write programmatic operations to label, transform, and structure training datasets for machine learning, without needing to hand label any training data; Snorkel then uses modern, theoretically-grounded modeling techniques to clean and integrate the resulting training data." +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|title|Snorkel by HazyResearch +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|bookmarkOf|https://hazyresearch.github.io/snorkel/ +http://www.semanlink.net/doc/2019/05/snorkel_by_hazyresearch|creationTime|2019-05-22T00:35:44Z +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|creationDate|2009-07-28 +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|tag|http://www.semanlink.net/tag/amazonie +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|tag|http://www.semanlink.net/tag/deforestation +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|comment|"""La rémunération du service environnemental est la solution à tous nos problèmes""" +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|title|Ecologistes et agriculteurs s'affrontent durement sur la déforestation de l'Amazonie +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|creationTime|2009-07-28T11:42:39Z +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|source|Le Monde +http://www.lemonde.fr/planete/article/2009/07/27/ecologistes-et-agriculteurs-s-affrontent-durement-sur-la-deforestation-de-l-amazonie_1223095_3244.html#ens_id=1223174|date|2009-07-28 +https://dl.acm.org/citation.cfm?doid=3184558.3186906|creationDate|2018-05-10 +https://dl.acm.org/citation.cfm?doid=3184558.3186906|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://dl.acm.org/citation.cfm?doid=3184558.3186906|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +https://dl.acm.org/citation.cfm?doid=3184558.3186906|tag|http://www.semanlink.net/tag/sense_embeddings +https://dl.acm.org/citation.cfm?doid=3184558.3186906|comment|"post-processing method for generating low-dimensional sense embedding. Emploies the ontological and contextual information simultaneously. + +(Poster at the Web Conf) [Github](https://github.com/y95847frank/Joint-Retrofitting) + +Calcule des ""sense embeddings"", en partant de word embeddings pré-calculés (par ex avec word2vec), et de données de type lexicographiques (ex wordnet), en contraignant, pour un sens, la distance entre sense et word embedding. + +Abstract: + +> While recent word embedding models demonstrate their abilities to capture syntactic and semantic information, the demand for sense level embedding is getting higher. In this study, we propose a novel joint sense embedding learning model that retrofits the word representation into sense representation from contextual and ontological information. The experiments show the effectiveness and robustness of our model that outperforms previous approaches in four public available benchmark datasets. + +> Given a trained word embedding and a lexical ontology that contains sense level relationships (e.g., synonym, hypernym, etc.), our model generates new sense vectors via constraining the distance between the sense vector and its word form vector, its sense neighbors and its contextual neighbors + + +[Influenced by](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1411.4166) (which post-processes and modifies word vectors to incorporate knowledge from semantic lexicons, while this creates new sense vectors)" +https://dl.acm.org/citation.cfm?doid=3184558.3186906|relatedDoc|https://arxiv.org/abs/1411.4166 +https://dl.acm.org/citation.cfm?doid=3184558.3186906|title|That Makes Sense: Joint Sense Retrofitting from Contextual and Ontological Information +https://dl.acm.org/citation.cfm?doid=3184558.3186906|creationTime|2018-05-10T14:57:18Z +http://www.pbs.org/wgbh/nova/elegant/|creationDate|2005-03-07 +http://www.pbs.org/wgbh/nova/elegant/|tag|http://www.semanlink.net/tag/pbs +http://www.pbs.org/wgbh/nova/elegant/|tag|http://www.semanlink.net/tag/theorie_des_cordes +http://www.pbs.org/wgbh/nova/elegant/|comment|Eleven dimensions, parallel universes, and a world made out of strings. It's not science fiction, it's string theory. +http://www.pbs.org/wgbh/nova/elegant/|title|String theory: The elegant universe +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|creationDate|2017-09-22 +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|tag|http://www.semanlink.net/tag/electronic_frontier_foundation +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|tag|http://www.semanlink.net/tag/w3c +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|tag|http://www.semanlink.net/tag/eme +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|title|An open letter to the W3C Director, CEO, team and membership Electronic Frontier Foundation +https://www.eff.org/deeplinks/2017/09/open-letter-w3c-director-ceo-team-and-membership|creationTime|2017-09-22T01:01:40Z +http://robohub.org/machine-consciousness-fact-or-fiction/|creationDate|2014-03-12 +http://robohub.org/machine-consciousness-fact-or-fiction/|tag|http://www.semanlink.net/tag/conscience_artificielle +http://robohub.org/machine-consciousness-fact-or-fiction/|tag|http://www.semanlink.net/tag/global_workspace_theory +http://robohub.org/machine-consciousness-fact-or-fiction/|title|Machine consciousness: Fact or fiction? Robohub +http://robohub.org/machine-consciousness-fact-or-fiction/|creationTime|2014-03-12T09:57:58Z +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|creationDate|2010-05-14 +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|tag|http://www.semanlink.net/tag/facebook +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|tag|http://www.semanlink.net/tag/semantic_web +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|tag|http://www.semanlink.net/tag/graph +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|title|Open Graph Protocol : Facebook se met au Web sémantique ? Les petites cases +http://www.lespetitescases.net/open-graph-protocol-facebook-se-met-au-web-semantique|creationTime|2010-05-14T21:18:22Z +http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf|creationDate|2010-10-27 +http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf|tag|http://www.semanlink.net/tag/semantic_enterprise +http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf|tag|http://www.semanlink.net/tag/topquadrant +http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf|title|Semantic Web Solutions at Work in the enterprise - TopQuadrant white paper +http://topquadrant.com/docs/marcom/TopQuadrant_Whitepaper_online.pdf|creationTime|2010-10-27T00:25:53Z +https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie|creationDate|2016-05-15 +https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie|tag|http://www.semanlink.net/tag/energie +https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie|tag|http://www.semanlink.net/tag/croissance +https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie|title|La croissance, une affaire d'énergie CNRS Le journal +https://lejournal.cnrs.fr/articles/la-croissance-une-affaire-denergie|creationTime|2016-05-15T13:53:31Z +http://www.laquadrature.net/|creationDate|2009-02-18 +http://www.laquadrature.net/|tag|http://www.semanlink.net/tag/big_brother +http://www.laquadrature.net/|tag|http://www.semanlink.net/tag/loi_sur_le_telechargement +http://www.laquadrature.net/|tag|http://www.semanlink.net/tag/droit_et_internet +http://www.laquadrature.net/|comment|La Quadrature du Net est un collectif de citoyens qui informe sur des projets législatifs menaçant les libertés individuelles, les droits fondamentaux et le développement économique et social à l'ère du numérique. +http://www.laquadrature.net/|title|La Quadrature du Net +http://www.laquadrature.net/|creationTime|2009-02-18T01:03:03Z +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|creationDate|2006-12-29 +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|tag|http://www.semanlink.net/tag/medecins_sans_frontieres +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|tag|http://www.semanlink.net/tag/patent +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|tag|http://www.semanlink.net/tag/medicaments_generiques +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|tag|http://www.semanlink.net/tag/novartis +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|tag|http://www.semanlink.net/tag/petition +http://www.msf.fr/site/actu.nsf/actus/petitionnovartisfaq201206?OpenDocument&loc=au|title|www.msf.fr : PÉTITION NOVARTIS - Un mauvais procès, une menace pour les malades des pays pauvres +http://www.milk.com/wall-o-shame/heavy_boots.html|creationDate|2008-03-29 +http://www.milk.com/wall-o-shame/heavy_boots.html|tag|http://www.semanlink.net/tag/science +http://www.milk.com/wall-o-shame/heavy_boots.html|tag|http://www.semanlink.net/tag/nasa +http://www.milk.com/wall-o-shame/heavy_boots.html|tag|http://www.semanlink.net/tag/education +http://www.milk.com/wall-o-shame/heavy_boots.html|tag|http://www.semanlink.net/tag/gravitation +http://www.milk.com/wall-o-shame/heavy_boots.html|tag|http://www.semanlink.net/tag/insolite +http://www.milk.com/wall-o-shame/heavy_boots.html|title|Heavy Boots +http://www.milk.com/wall-o-shame/heavy_boots.html|creationTime|2008-03-29T19:30:29Z +http://www.msnbc.msn.com/id/47225834#.T7vDLI7UN9p|creationDate|2012-05-22 +http://www.msnbc.msn.com/id/47225834#.T7vDLI7UN9p|tag|http://www.semanlink.net/tag/tree_of_life +http://www.msnbc.msn.com/id/47225834#.T7vDLI7UN9p|title|Strange organism has unique roots in tree of life - Technology & science - Science - LiveScience - msnbc.com +http://www.msnbc.msn.com/id/47225834#.T7vDLI7UN9p|creationTime|2012-05-22T18:50:09Z +https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini|creationDate|2017-07-21 +https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini|tag|http://www.semanlink.net/tag/julien_cardinal +https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini|tag|http://www.semanlink.net/tag/requin +https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini|title|Ampoules de Lorenzini — Wikipédia +https://fr.m.wikipedia.org/wiki/Ampoules_de_Lorenzini|creationTime|2017-07-21T02:03:54Z +http://cs231n.github.io/|creationDate|2017-08-23 +http://cs231n.github.io/|tag|http://www.semanlink.net/tag/computer_vision +http://cs231n.github.io/|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://cs231n.github.io/|tag|http://www.semanlink.net/tag/stanford +http://cs231n.github.io/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://cs231n.github.io/|title|CS231n Convolutional Neural Networks for Visual Recognition +http://cs231n.github.io/|creationTime|2017-08-23T18:42:51Z +http://jsfiddle.net/germesin/7EYxP/|creationDate|2012-08-08 +http://jsfiddle.net/germesin/7EYxP/|tag|http://www.semanlink.net/tag/sample_code +http://jsfiddle.net/germesin/7EYxP/|tag|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://jsfiddle.net/germesin/7EYxP/|tag|http://www.semanlink.net/tag/jsfiddle +http://jsfiddle.net/germesin/7EYxP/|title|VIE.js - Skeleton - jsFiddle +http://jsfiddle.net/germesin/7EYxP/|creationTime|2012-08-08T15:57:42Z +http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/|creationDate|2015-06-03 +http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/|tag|http://www.semanlink.net/tag/gastronomie +http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/|tag|http://www.semanlink.net/tag/cannibalisme +http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/|title|Quel goût a la chair d’homme ? Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2015/06/02/quel-gout-a-la-chair-dhomme/|creationTime|2015-06-03T00:27:42Z +http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html|creationDate|2009-09-23 +http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html|tag|http://www.semanlink.net/tag/sparql_construct +http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html|title|Appreciating SPARQL CONSTRUCT more - bobdc.blog +http://www.snee.com/bobdc.blog/2009/09/appreciating-sparql-construct.html|creationTime|2009-09-23T23:42:56Z +http://refine.deri.ie/|creationDate|2014-02-13 +http://refine.deri.ie/|tag|http://www.semanlink.net/tag/google_refine +http://refine.deri.ie/|tag|http://www.semanlink.net/tag/rdf +http://refine.deri.ie/|tag|http://www.semanlink.net/tag/deri +http://refine.deri.ie/|title|GRefine RDF Extension +http://refine.deri.ie/|creationTime|2014-02-13T22:50:26Z +http://www.programcreek.com/java-api-examples/index.php?source_dir=jena-master/jena-arq/src/main/java/org/apache/jena/riot/out/JsonLDWriter.java|creationDate|2016-04-09 +http://www.programcreek.com/java-api-examples/index.php?source_dir=jena-master/jena-arq/src/main/java/org/apache/jena/riot/out/JsonLDWriter.java|tag|http://www.semanlink.net/tag/jsonld_jena +http://www.programcreek.com/java-api-examples/index.php?source_dir=jena-master/jena-arq/src/main/java/org/apache/jena/riot/out/JsonLDWriter.java|title|JsonLdOptions +http://www.programcreek.com/java-api-examples/index.php?source_dir=jena-master/jena-arq/src/main/java/org/apache/jena/riot/out/JsonLDWriter.java|creationTime|2016-04-09T12:23:25Z +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|creationDate|2017-02-26 +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|tag|http://www.semanlink.net/tag/trump +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|tag|http://www.semanlink.net/tag/social_manipulation +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|tag|http://www.semanlink.net/tag/brexit +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|tag|http://www.semanlink.net/tag/social_networks +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|title|Revealed: how US billionaire helped to back Brexit Politics The Guardian +https://www.theguardian.com/politics/2017/feb/26/us-billionaire-mercer-helped-back-brexit|creationTime|2017-02-26T16:22:54Z +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|creationDate|2008-12-27 +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|tag|http://www.semanlink.net/tag/technique_de_l_insecte_sterile +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|tag|http://www.semanlink.net/tag/paludisme +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|title|La technique de l'insecte stérile va être testée pour lutter contre le paludisme et le chikungunya +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|creationTime|2008-12-27T20:44:01Z +http://www.lemonde.fr/planete/article/2008/12/27/la-technique-de-l-insecte-sterile-va-etre-testee-pour-lutter-contre-le-paludisme-et-le-chikungunya_1135695_3244.html#ens_id=1135776|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|creationDate|2006-06-19 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|tag|http://www.semanlink.net/tag/cafard +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|tag|http://www.semanlink.net/tag/robotique +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|title|Une colonie de cafards domptée par un mini-robot +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-784810,0.html|date|2006-06-18 +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|creationDate|2019-05-30 +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|tag|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|tag|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|tag|http://www.semanlink.net/tag/distant_supervision +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|tag|http://www.semanlink.net/tag/eswc_2019 +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|comment|"> Distant supervision has advantages of generating training data automatically for relation extraction by aligning triples in Knowledge Graphs with large-scale corpora. +>... we propose a novel hybrid graph model, which can incorporate heterogeneous background information in a unified framework, such as entity types and human-constructed triples. These various kinds of knowledge can be integrated efficiently even with several missing cases. In addition, we further employ an attention mechanism to identify the most confident information which can alleviate the side effect of noise." +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|title|A Hybrid Graph Model for Distant Supervision Relation Extraction Springer for Research & Development (ESWC 2019) +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|bookmarkOf|https://rd.springer.com/chapter/10.1007/978-3-030-21348-0_3 +http://www.semanlink.net/doc/2019/05/a_hybrid_graph_model_for_distan|creationTime|2019-05-30T12:40:54Z +http://www.the153club.org/citroen7.jpg|creationDate|2007-05-02 +http://www.the153club.org/citroen7.jpg|tag|http://www.semanlink.net/tag/jerma +http://www.the153club.org/citroen7.jpg|comment|Photographic record of the 2nd Haardt Audouin-Dubreuil Central African Mission, 1924 +http://www.the153club.org/citroen7.jpg|title|Cavaliers Jermas +http://www.the153club.org/citroen7.jpg|creationTime|2007-05-02T21:51:48Z +http://www.technorati.com|creationDate|2005-04-30 +http://www.technorati.com|tag|http://www.semanlink.net/tag/tagging +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|creationDate|2012-01-03 +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|tag|http://www.semanlink.net/tag/mit +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|tag|http://www.semanlink.net/tag/e_learning +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|tag|http://www.semanlink.net/tag/education +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|title|M.I.T. Game-Changer: Free Online Education For All - Forbes +http://www.forbes.com/sites/jamesmarshallcrotty/2011/12/21/m-i-t-game-changer-free-online-education-for-all/|creationTime|2012-01-03T07:47:21Z +http://www.bytemark.co.uk/index.html|creationDate|2006-01-02 +http://www.bytemark.co.uk/index.html|tag|http://www.semanlink.net/tag/linux_hosting +http://www.bytemark.co.uk/index.html|title|Bytemark: Welcome +http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met|creationDate|2019-05-22 +http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met|title|HazyResearch/metal: Snorkel MeTaL: A framework for training models with multi-task weak supervision +http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met|bookmarkOf|https://github.com/HazyResearch/metal +http://www.semanlink.net/doc/2019/05/hazyresearch_metal_snorkel_met|creationTime|2019-05-22T00:29:42Z +http://www.ysearchblog.com/archives/000654.html|creationDate|2008-11-19 +http://www.ysearchblog.com/archives/000654.html|tag|http://www.semanlink.net/tag/yahoo +http://www.ysearchblog.com/archives/000654.html|title|Yahoo! Search Blog: Yahoo! Search BOSS Releases Key Terms +http://www.ysearchblog.com/archives/000654.html|creationTime|2008-11-19T13:46:12Z +http://www.w3.org/wiki/Html-data-tf|creationDate|2013-07-06 +http://www.w3.org/wiki/Html-data-tf|tag|http://www.semanlink.net/tag/w3c_working_group +http://www.w3.org/wiki/Html-data-tf|tag|http://www.semanlink.net/tag/html_data +http://www.w3.org/wiki/Html-data-tf|title|Html-data-tf - W3C Wiki +http://www.w3.org/wiki/Html-data-tf|creationTime|2013-07-06T15:32:53Z +http://semanticweb.com/down-with-the-data-warehouse-long-live-the-semantic-data-warehouse_b23245|creationDate|2011-10-01 +http://semanticweb.com/down-with-the-data-warehouse-long-live-the-semantic-data-warehouse_b23245|tag|http://www.semanlink.net/tag/data_warehouse +http://semanticweb.com/down-with-the-data-warehouse-long-live-the-semantic-data-warehouse_b23245|title|Down with the Data Warehouse! Long Live the Semantic Data Warehouse! - semanticweb.com +http://semanticweb.com/down-with-the-data-warehouse-long-live-the-semantic-data-warehouse_b23245|creationTime|2011-10-01T13:36:06Z +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|creationDate|2011-12-17 +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|tag|http://www.semanlink.net/tag/automobile +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|tag|http://www.semanlink.net/tag/martin_hepp +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|title|KEYNOTE Panel: Semantics in the Automotive Industry +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4478|creationTime|2011-12-17T12:08:22Z +https://arxiv.org/abs/1709.02840|creationDate|2017-09-26 +https://arxiv.org/abs/1709.02840|tag|http://www.semanlink.net/tag/introduction +https://arxiv.org/abs/1709.02840|tag|http://www.semanlink.net/tag/machine_learning +https://arxiv.org/abs/1709.02840|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1709.02840|arxiv_author|Osvaldo Simeone +https://arxiv.org/abs/1709.02840|title|[1709.02840] A Brief Introduction to Machine Learning for Engineers +https://arxiv.org/abs/1709.02840|creationTime|2017-09-26T14:08:05Z +https://arxiv.org/abs/1709.02840|arxiv_summary|"This monograph aims at providing an introduction to key concepts, algorithms, +and theoretical results in machine learning. The treatment concentrates on +probabilistic models for supervised and unsupervised learning problems. It +introduces fundamental concepts and algorithms by building on first principles, +while also exposing the reader to more advanced topics with extensive pointers +to the literature, within a unified notation and mathematical framework. The +material is organized according to clearly defined categories, such as +discriminative and generative models, frequentist and Bayesian approaches, +exact and approximate inference, as well as directed and undirected models. +This monograph is meant as an entry point for researchers with a background in +probability and linear algebra." +https://arxiv.org/abs/1709.02840|arxiv_firstAuthor|Osvaldo Simeone +https://arxiv.org/abs/1709.02840|arxiv_updated|2018-05-17T18:28:52Z +https://arxiv.org/abs/1709.02840|arxiv_title|A Brief Introduction to Machine Learning for Engineers +https://arxiv.org/abs/1709.02840|arxiv_published|2017-09-08T19:21:26Z +https://arxiv.org/abs/1709.02840|arxiv_num|1709.02840 +http://groups.drupal.org/semantic-web|creationDate|2011-09-16 +http://groups.drupal.org/semantic-web|tag|http://www.semanlink.net/tag/drupal_rdf +http://groups.drupal.org/semantic-web|title|Semantic Web groups.drupal.org +http://groups.drupal.org/semantic-web|creationTime|2011-09-16T13:53:46Z +http://www.lesahel.org/index.php/culture/item/2999-r%C3%A9tro-festival-de-la-jeunesse-au-ccog--les-succ%C3%A8s-dhier-expos%C3%A9s-%C3%A0-la-jeune-g%C3%A9n%C3%A9ration|creationDate|2016-03-26 +http://www.lesahel.org/index.php/culture/item/2999-r%C3%A9tro-festival-de-la-jeunesse-au-ccog--les-succ%C3%A8s-dhier-expos%C3%A9s-%C3%A0-la-jeune-g%C3%A9n%C3%A9ration|tag|http://www.semanlink.net/tag/niger_festival_de_la_jeunesse +http://www.lesahel.org/index.php/culture/item/2999-r%C3%A9tro-festival-de-la-jeunesse-au-ccog--les-succ%C3%A8s-dhier-expos%C3%A9s-%C3%A0-la-jeune-g%C3%A9n%C3%A9ration|title|Rétro-festival de la jeunesse au CCOG : Les succès d'hier exposés à la jeune génération +http://www.lesahel.org/index.php/culture/item/2999-r%C3%A9tro-festival-de-la-jeunesse-au-ccog--les-succ%C3%A8s-dhier-expos%C3%A9s-%C3%A0-la-jeune-g%C3%A9n%C3%A9ration|creationTime|2016-03-26T12:02:38Z +http://www.histropedia.com/|creationDate|2017-11-01 +http://www.histropedia.com/|tag|http://www.semanlink.net/tag/histropedia +http://www.histropedia.com/|comment|> Transforming Wikipedia and Wikidata into the world's first timeline of everything +http://www.histropedia.com/|title|Histropedia - The Timeline of Everything +http://www.histropedia.com/|creationTime|2017-11-01T13:54:40Z +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|creationDate|2007-08-06 +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|tag|http://www.semanlink.net/tag/owl_introduction +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|tag|http://www.semanlink.net/tag/ontologies +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|title|bobdc.blog: Some great W3C explanations of basic ontology concepts +http://www.snee.com/bobdc.blog/2007/08/some_great_w3c_explanations_of.html|creationTime|2007-08-06T18:37:01Z +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|tag|http://www.semanlink.net/tag/dan_brickley +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|comment|"I prefer to think that url being a property of Thing was an intentional move, because the author chose to sidestep the httpRange-14 issue, and let URLs identify any type of resource, as is the case in Roy Fielding's description of resource (Ed Summers)
+So I'd not read too much into 'url'. It's somewhere you can put a Web +identifier for the thing being described. As conventions for this in +the Web standards community mature, we should be able to be more +precise on this. (DanBri)" +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|title|"Re: Meaning of property ""url"" from Ed Summers on 2012-10-23 (public-vocabs@w3.org from October 2012)" +http://lists.w3.org/Archives/Public/public-vocabs/2012Oct/0033.html|creationTime|2013-07-06T17:55:34Z +https://arxiv.org/abs/1803.05651|creationDate|2018-03-20 +https://arxiv.org/abs/1803.05651|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1803.05651|tag|http://www.semanlink.net/tag/word2vec +https://arxiv.org/abs/1803.05651|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1803.05651|arxiv_author|Maximilian Lam +https://arxiv.org/abs/1803.05651|comment|We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer +https://arxiv.org/abs/1803.05651|title|[1803.05651] Word2Bits - Quantized Word Vectors +https://arxiv.org/abs/1803.05651|creationTime|2018-03-20T17:36:21Z +https://arxiv.org/abs/1803.05651|arxiv_summary|"Word vectors require significant amounts of memory and storage, posing issues +to resource limited devices like mobile phones and GPUs. We show that high +quality quantized word vectors using 1-2 bits per parameter can be learned by +introducing a quantization function into Word2Vec. We furthermore show that +training with the quantization function acts as a regularizer. We train word +vectors on English Wikipedia (2017) and evaluate them on standard word +similarity and analogy tasks and on question answering (SQuAD). Our quantized +word vectors not only take 8-16x less space than full precision (32 bit) word +vectors but also outperform them on word similarity tasks and question +answering." +https://arxiv.org/abs/1803.05651|arxiv_firstAuthor|Maximilian Lam +https://arxiv.org/abs/1803.05651|arxiv_updated|2018-03-31T08:45:59Z +https://arxiv.org/abs/1803.05651|arxiv_title|Word2Bits - Quantized Word Vectors +https://arxiv.org/abs/1803.05651|arxiv_published|2018-03-15T09:21:34Z +https://arxiv.org/abs/1803.05651|arxiv_num|1803.05651 +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|creationDate|2012-05-04 +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|tag|http://www.semanlink.net/tag/foxconn +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|tag|http://www.semanlink.net/tag/iphone +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|tag|http://www.semanlink.net/tag/apple +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|comment|In other words, consumers don't want to pay more for iPhones and iPads than they already do just to ensure factory workers get better working conditions. It's all about money +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|title|Should you feel guilty for buying your iPhone? +http://edition.cnn.com/2012/01/31/tech/gaming-gadgets/apple-boycott-commentary/|creationTime|2012-05-04T01:13:44Z +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|creationDate|2018-05-29 +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|tag|http://www.semanlink.net/tag/representation_learning +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|tag|http://www.semanlink.net/tag/variational_autoencoder_vae +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|title|What a Disentangled Net We Weave: Representation Learning in VAEs (Pt. 1) +https://towardsdatascience.com/what-a-disentangled-net-we-weave-representation-learning-in-vaes-pt-1-9e5dbc205bd1|creationTime|2018-05-29T15:01:26Z +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|creationDate|2017-09-19 +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|tag|http://www.semanlink.net/tag/economie +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|tag|http://www.semanlink.net/tag/liberalisme +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|tag|http://www.semanlink.net/tag/morale +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|comment|Mandeville soutient qu'une société ne peut avoir en même temps morale et prospérité et que le vice, entendu en tant que recherche de son intérêt propre, est la condition de la prospérité +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|title|Mandeville : la Fable des Abeilles +https://fr.wikipedia.org/wiki/La_Fable_des_abeilles|creationTime|2017-09-19T14:22:17Z +http://fr.wikipedia.org/wiki/Giovanni_Battista_Belzoni|creationDate|2008-01-04 +http://fr.wikipedia.org/wiki/Giovanni_Battista_Belzoni|tag|http://www.semanlink.net/tag/belzoni +http://fr.wikipedia.org/wiki/Giovanni_Battista_Belzoni|title|Giovanni Battista Belzoni - Wikipédia +http://fr.wikipedia.org/wiki/Giovanni_Battista_Belzoni|creationTime|2008-01-04T01:53:44Z +https://theclevermachine.wordpress.com/2014/09/06/derivation-error-backpropagation-gradient-descent-for-neural-networks/|creationDate|2016-01-14 +https://theclevermachine.wordpress.com/2014/09/06/derivation-error-backpropagation-gradient-descent-for-neural-networks/|tag|http://www.semanlink.net/tag/backpropagation +https://theclevermachine.wordpress.com/2014/09/06/derivation-error-backpropagation-gradient-descent-for-neural-networks/|title|Derivation: Error Backpropagation & Gradient Descent for Neural Networks The Clever Machine +https://theclevermachine.wordpress.com/2014/09/06/derivation-error-backpropagation-gradient-descent-for-neural-networks/|creationTime|2016-01-14T13:45:25Z +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|creationDate|2011-08-11 +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|tag|http://www.semanlink.net/tag/google +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|tag|http://www.semanlink.net/tag/hash_bang_uris +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|tag|http://www.semanlink.net/tag/ajax_applications +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|title|Making AJAX Applications Crawlable - Google Code +http://code.google.com/intl/fr-FR/web/ajaxcrawling/docs/getting-started.html|creationTime|2011-08-11T11:27:45Z +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|creationDate|2018-01-04 +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|tag|http://www.semanlink.net/tag/jobbotization +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|tag|http://www.semanlink.net/tag/travail +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|tag|http://www.semanlink.net/tag/suede +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|tag|http://www.semanlink.net/tag/robotisation +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|comment|> We won’t protect jobs. But we will protect workers +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|title|While U.S. Workers Fear Automation, Swedish Employees Welcome It - MIT Technology Review +https://www.technologyreview.com/the-download/609857/while-us-workers-fear-automation-swedish-employees-welcome-it/|creationTime|2018-01-04T01:35:20Z +http://flask.pocoo.org/|creationDate|2018-03-09 +http://flask.pocoo.org/|tag|http://www.semanlink.net/tag/flask +http://flask.pocoo.org/|title|Flask (A Python Microframework) +http://flask.pocoo.org/|creationTime|2018-03-09T13:54:21Z +http://www.institut-de-france.fr/education/serres.html|creationDate|2011-03-07 +http://www.institut-de-france.fr/education/serres.html|tag|http://www.semanlink.net/tag/michel_serres +http://www.institut-de-france.fr/education/serres.html|tag|http://www.semanlink.net/tag/education +http://www.institut-de-france.fr/education/serres.html|comment|Nos institutions luisent d’un éclat qui ressemble, aujourd’hui, à celui des constellations dont l’astrophysique nous apprit jadis qu’elles étaient mortes déjà depuis longtemps. +http://www.institut-de-france.fr/education/serres.html|title|Les nouveaux défis de l'éducation - Petite Poucette par M. Michel Serres +http://www.institut-de-france.fr/education/serres.html|creationTime|2011-03-07T08:59:09Z +https://arxiv.org/abs/1709.03856|creationDate|2018-05-13 +https://arxiv.org/abs/1709.03856|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1709.03856|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1709.03856|tag|http://www.semanlink.net/tag/starspace +https://arxiv.org/abs/1709.03856|arxiv_author|Adam Fisch +https://arxiv.org/abs/1709.03856|arxiv_author|Sumit Chopra +https://arxiv.org/abs/1709.03856|arxiv_author|Keith Adams +https://arxiv.org/abs/1709.03856|arxiv_author|Jason Weston +https://arxiv.org/abs/1709.03856|arxiv_author|Antoine Bordes +https://arxiv.org/abs/1709.03856|arxiv_author|Ledell Wu +https://arxiv.org/abs/1709.03856|title|[1709.03856] StarSpace: Embed All The Things! +https://arxiv.org/abs/1709.03856|creationTime|2018-05-13T17:51:48Z +https://arxiv.org/abs/1709.03856|arxiv_summary|"We present StarSpace, a general-purpose neural embedding model that can solve +a wide variety of problems: labeling tasks such as text classification, ranking +tasks such as information retrieval/web search, collaborative filtering-based +or content-based recommendation, embedding of multi-relational graphs, and +learning word, sentence or document level embeddings. In each case the model +works by embedding those entities comprised of discrete features and comparing +them against each other -- learning similarities dependent on the task. +Empirical results on a number of tasks show that StarSpace is highly +competitive with existing methods, whilst also being generally applicable to +new cases where those methods are not." +https://arxiv.org/abs/1709.03856|arxiv_firstAuthor|Ledell Wu +https://arxiv.org/abs/1709.03856|arxiv_updated|2017-11-21T02:59:57Z +https://arxiv.org/abs/1709.03856|arxiv_title|StarSpace: Embed All The Things! +https://arxiv.org/abs/1709.03856|arxiv_published|2017-09-12T14:16:56Z +https://arxiv.org/abs/1709.03856|arxiv_num|1709.03856 +https://youtu.be/KAzhAXjUG28|creationDate|2017-06-03 +https://youtu.be/KAzhAXjUG28|tag|http://www.semanlink.net/tag/horreur_economique +https://youtu.be/KAzhAXjUG28|tag|http://www.semanlink.net/tag/cinema_bresilien +https://youtu.be/KAzhAXjUG28|tag|http://www.semanlink.net/tag/court_metrage +https://youtu.be/KAzhAXjUG28|title|ilha das flores (filme curta metragem) +https://youtu.be/KAzhAXjUG28|creationTime|2017-06-03T17:33:36Z +http://designshack.co.uk/tutorials/10-css-form-examples|creationDate|2008-06-02 +http://designshack.co.uk/tutorials/10-css-form-examples|tag|http://www.semanlink.net/tag/css +http://designshack.co.uk/tutorials/10-css-form-examples|tag|http://www.semanlink.net/tag/forms +http://designshack.co.uk/tutorials/10-css-form-examples|tag|http://www.semanlink.net/tag/sample_code +http://designshack.co.uk/tutorials/10-css-form-examples|title|10 CSS Form Examples Design Shack +http://designshack.co.uk/tutorials/10-css-form-examples|creationTime|2008-06-02T22:14:43Z +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|creationDate|2009-05-14 +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|tag|http://www.semanlink.net/tag/fps_post +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|tag|http://www.semanlink.net/tag/google_rich_snippets +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|comment|"It looks like ""Welcome Google, really"" is (was) not in Google's index." +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|title|Official Google Webmaster Central Blog: Introducing Rich Snippets - Welcome Google, really. +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html?showComment=1242257640000#c1729077015498462034|creationTime|2009-05-14T01:56:16Z +http://www.offconvex.org/2016/02/14/word-embeddings-2/|creationDate|2019-03-20 +http://www.offconvex.org/2016/02/14/word-embeddings-2/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2016/02/14/word-embeddings-2/|tag|http://www.semanlink.net/tag/word_embedding +http://www.offconvex.org/2016/02/14/word-embeddings-2/|comment|"second part for [this post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2015%2F12%2F12%2Fword-embeddings-1%2F) + +>- What properties of natural languages cause these low-dimensional embeddings to exist? +>- Why do low-dimensional embeddings work better at analogy solving than high dimensional embeddings? +>- Why do Semantic Relations correspond to Directions? + +" +http://www.offconvex.org/2016/02/14/word-embeddings-2/|relatedDoc|http://www.offconvex.org/2015/12/12/word-embeddings-1/ +http://www.offconvex.org/2016/02/14/word-embeddings-2/|title|Word Embeddings: Explaining their properties – Off the convex path (2016) +http://www.offconvex.org/2016/02/14/word-embeddings-2/|creationTime|2019-03-20T16:11:54Z +http://mobisocial.stanford.edu/musubi/public/|creationDate|2012-05-30 +http://mobisocial.stanford.edu/musubi/public/|tag|http://www.semanlink.net/tag/musubi +http://mobisocial.stanford.edu/musubi/public/|comment|"Manifesto
+Our principles are totally different from today's social networks whose business is to monetize users' data. We aim to create the best ecosystem for users, app developers, device manufacturers, and carriers. We believe openness and collaboration will lead to the best and biggest social network ever!
+The Concept
+Musubi is built on top of ESP - an Egocentric Social Platform whose social graphs are decentralized across phones and address books and supports encrypted messaging based on existing identities." +http://mobisocial.stanford.edu/musubi/public/|title|Musubi - A Mobile Social Network and App Platform +http://mobisocial.stanford.edu/musubi/public/|creationTime|2012-05-30T15:04:01Z +http://www.eclipsezone.com/eclipse/forums/t88527.html|creationDate|2007-10-22 +http://www.eclipsezone.com/eclipse/forums/t88527.html|tag|http://www.semanlink.net/tag/tomcat_in_eclipse +http://www.eclipsezone.com/eclipse/forums/t88527.html|tag|http://www.semanlink.net/tag/tomcat +http://www.eclipsezone.com/eclipse/forums/t88527.html|title|EclipseZone - Problems publishing webapp to Tomcat ... +http://www.eclipsezone.com/eclipse/forums/t88527.html|creationTime|2007-10-22T21:10:21Z +http://blog.restcase.com/restful-api-versioning-insights/|creationDate|2017-04-11 +http://blog.restcase.com/restful-api-versioning-insights/|tag|http://www.semanlink.net/tag/api +http://blog.restcase.com/restful-api-versioning-insights/|tag|http://www.semanlink.net/tag/rest +http://blog.restcase.com/restful-api-versioning-insights/|title|RESTFul API Versioning Insights +http://blog.restcase.com/restful-api-versioning-insights/|creationTime|2017-04-11T10:06:27Z +http://www.networkworld.com/community/node/33361|creationDate|2008-10-01 +http://www.networkworld.com/community/node/33361|tag|http://www.semanlink.net/tag/darpa +http://www.networkworld.com/community/node/33361|tag|http://www.semanlink.net/tag/mathematiques +http://www.networkworld.com/community/node/33361|title|DARPA Mathematical Challenges +http://www.networkworld.com/community/node/33361|creationTime|2008-10-01T23:09:10Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|tag|http://www.semanlink.net/tag/mike_bergman +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|comment|"As for SKOS, I think it is generally orthogonal to a schema.org +specification. Like any vocabulary on the Web, map to the concepts that +make sense for you. If ""about"" with namespace recognition were added to +schema.org, there would be no further questions about what to do with +SKOS or any other Web vocabulary. +
+schema.org != semantic web +" +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|title|Re: Should we adopt SKOS? from Mike Bergman on 2013-01-11 (public-vocabs@w3.org from January 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Jan/0082.html|creationTime|2013-07-06T18:46:50Z +http://colah.github.io/posts/2015-08-Backprop/|creationDate|2017-08-20 +http://colah.github.io/posts/2015-08-Backprop/|tag|http://www.semanlink.net/tag/christopher_olah +http://colah.github.io/posts/2015-08-Backprop/|tag|http://www.semanlink.net/tag/backpropagation +http://colah.github.io/posts/2015-08-Backprop/|title|Calculus on Computational Graphs: Backpropagation -- colah's blog +http://colah.github.io/posts/2015-08-Backprop/|creationTime|2017-08-20T20:10:27Z +http://arxiv.org/pdf/1608.04062v1.pdf|creationDate|2016-09-03 +http://arxiv.org/pdf/1608.04062v1.pdf|tag|http://www.semanlink.net/tag/backpropagation +http://arxiv.org/pdf/1608.04062v1.pdf|tag|http://www.semanlink.net/tag/deep_learning +http://arxiv.org/pdf/1608.04062v1.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Zhangyang Wang +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Shiyu Chang +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Qing Ling +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Thomas S. Huang +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Shuai Huang +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Xia Hu +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_author|Honghui Shi +http://arxiv.org/pdf/1608.04062v1.pdf|comment|This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. +http://arxiv.org/pdf/1608.04062v1.pdf|title|[1608.04062] Stacked Approximated Regression Machine: A Simple Deep Learning Approach +http://arxiv.org/pdf/1608.04062v1.pdf|creationTime|2016-09-03T12:32:25Z +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_summary|"With the agreement of my coauthors, I Zhangyang Wang would like to withdraw +the manuscript ""Stacked Approximated Regression Machine: A Simple Deep Learning +Approach"". Some experimental procedures were not included in the manuscript, +which makes a part of important claims not meaningful. In the relevant +research, I was solely responsible for carrying out the experiments; the other +coauthors joined in the discussions leading to the main algorithm. +Please see the updated text for more details." +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_firstAuthor|Zhangyang Wang +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_updated|2016-09-08T17:46:13Z +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_title|Stacked Approximated Regression Machine: A Simple Deep Learning Approach +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_published|2016-08-14T05:35:11Z +http://arxiv.org/pdf/1608.04062v1.pdf|arxiv_num|1608.04062 +http://www.semweb.pro/view?rql=Any+T+GROUPBY+T+ORDERBY+ST+WHERE+T+location+L%2C+T+start_time+ST%2C+U+leads+T%2C+T+in_conf+C%2C+T+in_track+TR%2C+T+in_state+S%2C+T+title+TT%2C+S+name+%22accepted%22%2C+C+url_id+%22semwebpro2012%22&vid=primary|creationDate|2012-05-03 +http://www.semweb.pro/view?rql=Any+T+GROUPBY+T+ORDERBY+ST+WHERE+T+location+L%2C+T+start_time+ST%2C+U+leads+T%2C+T+in_conf+C%2C+T+in_track+TR%2C+T+in_state+S%2C+T+title+TT%2C+S+name+%22accepted%22%2C+C+url_id+%22semwebpro2012%22&vid=primary|title|SemWeb.Pro 2012 : présentations +http://www.semweb.pro/view?rql=Any+T+GROUPBY+T+ORDERBY+ST+WHERE+T+location+L%2C+T+start_time+ST%2C+U+leads+T%2C+T+in_conf+C%2C+T+in_track+TR%2C+T+in_state+S%2C+T+title+TT%2C+S+name+%22accepted%22%2C+C+url_id+%22semwebpro2012%22&vid=primary|creationTime|2012-05-03T00:07:30Z +https://gist.github.com/jonschlinkert/5854601|creationDate|2016-01-14 +https://gist.github.com/jonschlinkert/5854601|tag|http://www.semanlink.net/tag/cheat_sheet +https://gist.github.com/jonschlinkert/5854601|tag|http://www.semanlink.net/tag/markdown +https://gist.github.com/jonschlinkert/5854601|title|A better markdown cheatsheet +https://gist.github.com/jonschlinkert/5854601|creationTime|2016-01-14T02:36:01Z +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|creationDate|2010-09-30 +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|tag|http://www.semanlink.net/tag/esclavage +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|tag|http://www.semanlink.net/tag/ressources_halieutiques +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|tag|http://www.semanlink.net/tag/afrique_de_l_ouest +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|tag|http://www.semanlink.net/tag/peche +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|title|'Slavery' uncovered on trawlers fishing for Europe The Guardian +http://www.guardian.co.uk/law/2010/sep/30/slavery-trawlers-europe|creationTime|2010-09-30T13:44:20Z +http://www.pbs.org/kcet/shapeoflife/index.html|creationDate|2005-03-07 +http://www.pbs.org/kcet/shapeoflife/index.html|tag|http://www.semanlink.net/tag/pbs +http://www.pbs.org/kcet/shapeoflife/index.html|tag|http://www.semanlink.net/tag/evolution +http://www.pbs.org/kcet/shapeoflife/index.html|tag|http://www.semanlink.net/tag/animal +http://www.pbs.org/kcet/shapeoflife/index.html|comment|A revolutionary eight-part television series that reveals the dramatic rise of the animal kingdom through the breakthroughs of scientific discovery. +http://www.pbs.org/kcet/shapeoflife/index.html|title|The shape of life +http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf|creationDate|2010-08-24 +http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf|tag|http://www.semanlink.net/tag/enterprise_system +http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf|comment|"If a company's systems are fragmented, its business is fragmented.
+Enter the enterprise system. A good ES is a technological tour de force. At its core is a single com- prehensive database.
+But the technical challenges, however great, are not the main reason enterprise systems fail. The biggest problems are business problems. Companies fail to reconcile the technological imperatives of the enterprise system with the business needs of the enterprise itself... An enterprise system imposes its own logic on company's strategy, culture, and organization.
+How similar can our information flows and our processes be to those of our competitors before we begin to undermine our own sources of differentiation in the market?
+But they never labeled the ES project as simply a technology initiative. Rather, they viewed it as an opportunity to take a fresh look at the company's strategy and organization. + + + + + + +" +http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf|title|Putting the Enterprise into the Enterprise System by Thomas H. Davenport +http://www.im.ethz.ch/education/HS08/davenport_hbr_98.pdf|creationTime|2010-08-24T10:48:01Z +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|creationDate|2011-04-05 +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|tag|http://www.semanlink.net/tag/ldow2011 +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|tag|http://www.semanlink.net/tag/goodrelations +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|tag|http://www.semanlink.net/tag/richard_cyganiak +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|comment|"Experimental results +highlight that early publishers of structured eCommerce data +benefit more due to structured data being more readily search +engine indexable, but the lack of available product ontologies and +product master datasheets is impeding the creation of a +semantically interlinked eCommerce Web." +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|title|Open eBusiness Ontology Usage: Investigating Community Implementation of GoodRelations +http://events.linkeddata.org/ldow2011/papers/ldow2011-paper12-ashraf.pdf|creationTime|2011-04-05T10:31:16Z +http://arxiv.org/abs/1601.01272|creationDate|2016-01-09 +http://arxiv.org/abs/1601.01272|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1601.01272|tag|http://www.semanlink.net/tag/lstm_networks +http://arxiv.org/abs/1601.01272|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://arxiv.org/abs/1601.01272|tag|http://www.semanlink.net/tag/rnn_based_language_model +http://arxiv.org/abs/1601.01272|arxiv_author|Arianna Bisazza +http://arxiv.org/abs/1601.01272|arxiv_author|Ke Tran +http://arxiv.org/abs/1601.01272|arxiv_author|Christof Monz +http://arxiv.org/abs/1601.01272|comment|"> Recurrent Neural Networks (RNN) have obtained excellent result in many natural language processing (NLP) tasks. However, understanding and interpreting the source of this success remains a challenge. +> +> In this paper, we propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only amplifies the power of RNN but also facilitates our understanding of its internal functioning and allows us to discover underlying patterns in data. +> +> We demonstrate the power of RMN on language modeling and sentence completion tasks. +> +> On language modeling, RMN outperforms Long Short-Term Memory (LSTM) network on three large German, Italian, and English dataset. Additionally we perform in-depth analysis of various linguistic dimensions that RMN captures. On Sentence Completion Challenge, for which it is essential to capture sentence coherence, our RMN obtains 69.2% accuracy, surpassing the previous state-of-the-art by a large margin. +" +http://arxiv.org/abs/1601.01272|title|[1601.01272] Recurrent Memory Networks for Language Modeling +http://arxiv.org/abs/1601.01272|creationTime|2016-01-09T00:35:09Z +http://arxiv.org/abs/1601.01272|arxiv_summary|"Recurrent Neural Networks (RNN) have obtained excellent result in many +natural language processing (NLP) tasks. However, understanding and +interpreting the source of this success remains a challenge. In this paper, we +propose Recurrent Memory Network (RMN), a novel RNN architecture, that not only +amplifies the power of RNN but also facilitates our understanding of its +internal functioning and allows us to discover underlying patterns in data. We +demonstrate the power of RMN on language modeling and sentence completion +tasks. On language modeling, RMN outperforms Long Short-Term Memory (LSTM) +network on three large German, Italian, and English dataset. Additionally we +perform in-depth analysis of various linguistic dimensions that RMN captures. +On Sentence Completion Challenge, for which it is essential to capture sentence +coherence, our RMN obtains 69.2% accuracy, surpassing the previous +state-of-the-art by a large margin." +http://arxiv.org/abs/1601.01272|arxiv_firstAuthor|Ke Tran +http://arxiv.org/abs/1601.01272|arxiv_updated|2016-04-22T11:13:11Z +http://arxiv.org/abs/1601.01272|arxiv_title|Recurrent Memory Networks for Language Modeling +http://arxiv.org/abs/1601.01272|arxiv_published|2016-01-06T18:44:07Z +http://arxiv.org/abs/1601.01272|arxiv_num|1601.01272 +http://alias-i.com/lingpipe/|creationDate|2017-05-23 +http://alias-i.com/lingpipe/|tag|http://www.semanlink.net/tag/nlp_tools +http://alias-i.com/lingpipe/|comment|java, not free +http://alias-i.com/lingpipe/|title|LingPipe +http://alias-i.com/lingpipe/|creationTime|2017-05-23T11:48:43Z +http://www.futurecrimesbook.com/|creationDate|2019-05-06 +http://www.futurecrimesbook.com/|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.futurecrimesbook.com/|title|Future Crimes +http://www.futurecrimesbook.com/|creationTime|2019-05-06T07:50:50Z +http://www.apple.com/html5/|creationDate|2010-09-03 +http://www.apple.com/html5/|tag|http://www.semanlink.net/tag/demo +http://www.apple.com/html5/|tag|http://www.semanlink.net/tag/html5 +http://www.apple.com/html5/|tag|http://www.semanlink.net/tag/apple +http://www.apple.com/html5/|title|Apple - HTML5 +http://www.apple.com/html5/|creationTime|2010-09-03T22:14:26Z +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|creationDate|2018-12-06 +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|tag|http://www.semanlink.net/tag/images_stereoscopiques +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|tag|http://www.semanlink.net/tag/vision +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|comment|[paper](/doc/?uri=http%3A%2F%2Fwww.jneurosci.org%2Fcontent%2F38%2F44%2F9563) +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|relatedDoc|http://www.jneurosci.org/content/38/44/9563 +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|title|Des modèles informatiques pour expliquer le développement de la vision (Biologie au CNRS) +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html|creationTime|2018-12-06T08:43:26Z +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|creationDate|2015-01-04 +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|tag|http://www.semanlink.net/tag/internet +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|tag|http://www.semanlink.net/tag/chine +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|tag|http://www.semanlink.net/tag/evgeny_morozov +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|title|Who’s the true enemy of internet freedom - China, Russia, or the US? Evgeny Morozov +http://www.theguardian.com/commentisfree/2015/jan/04/internet-freedom-china-russia-us-google-microsoft-digital-sovereignty|creationTime|2015-01-04T11:08:49Z +http://swse.deri.org/faq.html|creationDate|2007-07-20 +http://swse.deri.org/faq.html|tag|http://www.semanlink.net/tag/faq +http://swse.deri.org/faq.html|tag|http://www.semanlink.net/tag/swse +http://swse.deri.org/faq.html|title|SWSE FAQ +http://swse.deri.org/faq.html|creationTime|2007-07-20T00:48:56Z +http://www.pbs.org/wnet/secrets/case_plague/clues.html|creationDate|2005-11-01 +http://www.pbs.org/wnet/secrets/case_plague/clues.html|tag|http://www.semanlink.net/tag/peste +http://www.pbs.org/wnet/secrets/case_plague/clues.html|tag|http://www.semanlink.net/tag/sida +http://www.pbs.org/wnet/secrets/case_plague/clues.html|title|Secrets of the Dead . Mystery of the Black Death-2 PBS +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|creationDate|2007-03-12 +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|tag|http://www.semanlink.net/tag/rdf_and_soa +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|tag|http://www.semanlink.net/tag/good +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|tag|http://www.semanlink.net/tag/semantic_web_services_vs_soap +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|comment|"The purpose of this paper is not to propose a particular standardization effort for refining the existing XML-based, WS* approach to Web services, but to suggest another way of thinking about Service Oriented Architecture (SOA) in terms of RDF message exchange, even when custom XML formats are used for message serialization.
+Workshop on Web of Services for Enterprise Computing, W3C" +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|title|RDF and SOA +http://www.dbooth.org/2007/rdf-and-soa/rdf-and-soa-paper.htm|creationTime|2007-03-12T23:30:13Z +http://www.aclweb.org/anthology/Q16-1028|creationDate|2018-08-28 +http://www.aclweb.org/anthology/Q16-1028|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.aclweb.org/anthology/Q16-1028|tag|http://www.semanlink.net/tag/word_embedding +http://www.aclweb.org/anthology/Q16-1028|comment|"[Related YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DKR46z_V0BVw) + +Based on a generative model (random walk on words involving a latent discourse vector), +a rigorous justification for models such +as word2vec and GloVe, including the hyperparameter +choices for the latter, and a mathematical explanation for why these word embeddings +allow analogies to be solved using linear +algebra. +" +http://www.aclweb.org/anthology/Q16-1028|relatedDoc|https://www.youtube.com/watch?v=KR46z_V0BVw +http://www.aclweb.org/anthology/Q16-1028|title|A Latent Variable Model Approach to PMI-based Word Embeddings (2016) +http://www.aclweb.org/anthology/Q16-1028|creationTime|2018-08-28T11:25:11Z +https://arxiv.org/abs/1807.06036|creationDate|2019-04-23 +https://arxiv.org/abs/1807.06036|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1807.06036|tag|http://www.semanlink.net/tag/loosely_formatted_text +https://arxiv.org/abs/1807.06036|tag|http://www.semanlink.net/tag/entity_linking +https://arxiv.org/abs/1807.06036|arxiv_author|Sam Shah +https://arxiv.org/abs/1807.06036|arxiv_author|Pete Skomoroch +https://arxiv.org/abs/1807.06036|arxiv_author|Michael Conover +https://arxiv.org/abs/1807.06036|arxiv_author|Matthew Hayes +https://arxiv.org/abs/1807.06036|arxiv_author|Scott Blackburn +https://arxiv.org/abs/1807.06036|comment|"a production system for entity disambiguation on messy tex, based +on probabilistic tokenization and context-dependent document embeddings + +""Probabilistic tokenization"": uses the method described [here](/doc/2019/07/mining_quality_phrases_from_mas)" +https://arxiv.org/abs/1807.06036|relatedDoc|http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas +https://arxiv.org/abs/1807.06036|title|[1807.06036] Pangloss: Fast Entity Linking in Noisy Text Environments +https://arxiv.org/abs/1807.06036|creationTime|2019-04-23T23:58:40Z +https://arxiv.org/abs/1807.06036|arxiv_summary|"Entity linking is the task of mapping potentially ambiguous terms in text to +their constituent entities in a knowledge base like Wikipedia. This is useful +for organizing content, extracting structured data from textual documents, and +in machine learning relevance applications like semantic search, knowledge +graph construction, and question answering. Traditionally, this work has +focused on text that has been well-formed, like news articles, but in common +real world datasets such as messaging, resumes, or short-form social media, +non-grammatical, loosely-structured text adds a new dimension to this problem. +This paper presents Pangloss, a production system for entity disambiguation +on noisy text. Pangloss combines a probabilistic linear-time key phrase +identification algorithm with a semantic similarity engine based on +context-dependent document embeddings to achieve better than state-of-the-art +results (>5% in F1) compared to other research or commercially available +systems. In addition, Pangloss leverages a local embedded database with a +tiered architecture to house its statistics and metadata, which allows rapid +disambiguation in streaming contexts and on-device disambiguation in low-memory +environments such as mobile phones." +https://arxiv.org/abs/1807.06036|arxiv_firstAuthor|Michael Conover +https://arxiv.org/abs/1807.06036|arxiv_updated|2018-07-16T18:04:08Z +https://arxiv.org/abs/1807.06036|arxiv_title|Pangloss: Fast Entity Linking in Noisy Text Environments +https://arxiv.org/abs/1807.06036|arxiv_published|2018-07-16T18:04:08Z +https://arxiv.org/abs/1807.06036|arxiv_num|1807.06036 +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html|creationDate|2009-05-13 +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html|tag|http://www.semanlink.net/tag/guha +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html|tag|http://www.semanlink.net/tag/google_rich_snippets +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html|title|Official Google Webmaster Central Blog: Introducing Rich Snippets +http://googlewebmastercentral.blogspot.com/2009/05/introducing-rich-snippets.html|creationTime|2009-05-13T22:19:46Z +http://www.w3.org/2007/08/pyRdfa/|creationDate|2011-01-18 +http://www.w3.org/2007/08/pyRdfa/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/2007/08/pyRdfa/|tag|http://www.semanlink.net/tag/rdfa_tool +http://www.w3.org/2007/08/pyRdfa/|tag|http://www.semanlink.net/tag/dev_tools +http://www.w3.org/2007/08/pyRdfa/|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/2007/08/pyRdfa/|title|RDFa Distiller and Parser +http://www.w3.org/2007/08/pyRdfa/|creationTime|2011-01-18T12:08:04Z +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|creationDate|2010-10-26 +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|tag|http://www.semanlink.net/tag/apple_sucks +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|tag|http://www.semanlink.net/tag/mac_os_x +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|tag|http://www.semanlink.net/tag/java +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|comment|As of the release of Java for Mac OS X 10.6 Update 3, the Java runtime ported by Apple and that ships with Mac OS X is deprecated. Developers should not rely on the Apple-supplied Java runtime being present in future versions of Mac OS X. +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|title|Java for Mac OS X 10.6 Update 3 and 10.5 Update 8 Release Notes: New and Noteworthy +http://developer.apple.com/library/mac/#releasenotes/Java/JavaSnowLeopardUpdate3LeopardUpdate8RN/NewandNoteworthy/NewandNoteworthy.html|creationTime|2010-10-26T00:31:32Z +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|creationDate|2012-06-03 +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|tag|http://www.semanlink.net/tag/paul_krugman +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|tag|http://www.semanlink.net/tag/crise_de_la_dette +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|comment|In America and in Britain, the push for austerity is not really about debt and deficits. It’s about using deficit panic as an excuse to dismantle social programs. +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|title|The Austerity Agenda - NYTimes.com - Paul Krugman +http://www.nytimes.com/2012/06/01/opinion/krugman-the-austerity-agenda.html|creationTime|2012-06-03T11:51:37Z +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|creationDate|2018-09-11 +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|tag|http://www.semanlink.net/tag/nlp_facebook +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|tag|http://www.semanlink.net/tag/starspace +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|tag|http://www.semanlink.net/tag/text_multi_label_classification +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|title|[Learning Note] StarSpace For Multi-label Text Classification +https://towardsdatascience.com/learning-note-starspace-for-multi-label-text-classification-81de0e8fca53|creationTime|2018-09-11T00:58:07Z +http://manning.com/ingersoll/|creationDate|2014-03-15 +http://manning.com/ingersoll/|tag|http://www.semanlink.net/tag/grant_ingersoll +http://manning.com/ingersoll/|tag|http://www.semanlink.net/tag/livre +http://manning.com/ingersoll/|tag|http://www.semanlink.net/tag/nlp +http://manning.com/ingersoll/|comment|Taming Text is a hands-on, example-driven guide to working with unstructured text in the context of real-world applications. +http://manning.com/ingersoll/|title|Manning: Taming Text +http://manning.com/ingersoll/|creationTime|2014-03-15T13:46:11Z +https://bpcatalog.dev.java.net/nonav/ajax/progress-bar/design.html|creationDate|2006-10-06 +https://bpcatalog.dev.java.net/nonav/ajax/progress-bar/design.html|tag|http://www.semanlink.net/tag/ajax +https://bpcatalog.dev.java.net/nonav/ajax/progress-bar/design.html|tag|http://www.semanlink.net/tag/dev_tips +https://bpcatalog.dev.java.net/nonav/ajax/progress-bar/design.html|title|Progress Bar Using AJAX +http://pfia2018.loria.fr/journee-tal/|creationDate|2018-07-11 +http://pfia2018.loria.fr/journee-tal/|tag|http://www.semanlink.net/tag/pfia_2018 +http://pfia2018.loria.fr/journee-tal/|tag|http://www.semanlink.net/tag/nlp_conference +http://pfia2018.loria.fr/journee-tal/|title|Journee:TAL PFIA 2018 +http://pfia2018.loria.fr/journee-tal/|creationTime|2018-07-11T13:39:42Z +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|creationDate|2014-09-16 +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|tag|http://www.semanlink.net/tag/ui +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|tag|http://www.semanlink.net/tag/semantic_web +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|tag|http://www.semanlink.net/tag/responsive_design +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|tag|http://www.semanlink.net/tag/devices +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|tag|http://www.semanlink.net/tag/killer_app +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|comment|The killer app for the semantic web is allowing emerging devices to access the web in ways best suited for their UI +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|title|Semantic Web design The limits of responsive design and the API-driven web +https://medium.com/@mcriddy/semantic-web-design-92ef35f66c9f|creationTime|2014-09-16T19:01:07Z +https://www.dataquest.io/blog/data-science-quickstart-with-docker/|creationDate|2015-11-22 +https://www.dataquest.io/blog/data-science-quickstart-with-docker/|tag|http://www.semanlink.net/tag/python_4_data_science +https://www.dataquest.io/blog/data-science-quickstart-with-docker/|tag|http://www.semanlink.net/tag/docker +https://www.dataquest.io/blog/data-science-quickstart-with-docker/|title|How to setup a data science environment in minutes using Docker and Jupyter +https://www.dataquest.io/blog/data-science-quickstart-with-docker/|creationTime|2015-11-22T18:01:52Z +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|creationDate|2013-04-06 +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|tag|http://www.semanlink.net/tag/slime_mold +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|tag|http://www.semanlink.net/tag/pbs_program +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|comment|The slime mold Physarum polycephalum is a single cell without a brain, yet it can make surprisingly complicated decisions. +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|title|NOVA Slime Mold Smarts +http://www.pbs.org/wgbh/nova/nature/slime-mold-smarts.html|creationTime|2013-04-06T21:36:26Z +https://www.topbots.com/most-important-ai-research-papers-2018/|creationDate|2018-12-05 +https://www.topbots.com/most-important-ai-research-papers-2018/|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.topbots.com/most-important-ai-research-papers-2018/|tag|http://www.semanlink.net/tag/survey +https://www.topbots.com/most-important-ai-research-papers-2018/|title|Easy-To-Read Summary of Important AI Research Papers of 2018 +https://www.topbots.com/most-important-ai-research-papers-2018/|creationTime|2018-12-05T12:56:02Z +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|creationDate|2018-04-14 +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|tag|http://www.semanlink.net/tag/google_research +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|tag|http://www.semanlink.net/tag/information_visualization +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|tag|http://www.semanlink.net/tag/embeddings +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|tag|http://www.semanlink.net/tag/nlp +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|tag|http://www.semanlink.net/tag/ray_kurzweil +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|title|Research Blog: Introducing Semantic Experiences with Talk to Books and Semantris +https://research.googleblog.com/2018/04/introducing-semantic-experiences-with.html|creationTime|2018-04-14T11:41:25Z +https://stats385.github.io/|creationDate|2017-11-09 +https://stats385.github.io/|tag|http://www.semanlink.net/tag/stanford +https://stats385.github.io/|tag|http://www.semanlink.net/tag/deep_learning +https://stats385.github.io/|title|Theories of Deep Learning (STATS 385) +https://stats385.github.io/|creationTime|2017-11-09T10:20:08Z +https://jalammar.github.io/illustrated-bert/|creationDate|2018-12-03 +https://jalammar.github.io/illustrated-bert/|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +https://jalammar.github.io/illustrated-bert/|tag|http://www.semanlink.net/tag/ulmfit +https://jalammar.github.io/illustrated-bert/|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://jalammar.github.io/illustrated-bert/|tag|http://www.semanlink.net/tag/bert +https://jalammar.github.io/illustrated-bert/|tag|http://www.semanlink.net/tag/elmo +https://jalammar.github.io/illustrated-bert/|title|The Illustrated BERT, ELMo, and co. (How NLP Cracked Transfer Learning) – Jay Alammar +https://jalammar.github.io/illustrated-bert/|creationTime|2018-12-03T15:08:17Z +https://sutheeblog.wordpress.com/2017/03/20/a-biterm-topic-model-for-short-texts/|creationDate|2017-06-07 +https://sutheeblog.wordpress.com/2017/03/20/a-biterm-topic-model-for-short-texts/|tag|http://www.semanlink.net/tag/biterm_topic_model +https://sutheeblog.wordpress.com/2017/03/20/a-biterm-topic-model-for-short-texts/|title|A Biterm Topic Model for Short Texts +https://sutheeblog.wordpress.com/2017/03/20/a-biterm-topic-model-for-short-texts/|creationTime|2017-06-07T18:34:34Z +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|creationDate|2014-04-23 +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/algorithmes +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/machine_learning +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/topic_modeling +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/sanjeev_arora +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|tag|http://www.semanlink.net/tag/sparse_dictionary_learning +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|comment|"> Modern machine learning algorithms can extract useful information from text, images and videos. All these applications involve solving NP-hard problems in average case using heuristics. What properties of the input allow it to be solved effciently? Theoretically analyzing the heuristics is very challenging. Few results were known. +> +> This thesis takes a different approach: we identify natural properties of the input, then design new algorithms that provably works assuming the input has these properties. We are able to give new, provable and sometimes practical algorithms for learning tasks related to text corpus, images and social networks. +> +>...In theory, the assumptions in this thesis help us understand why intractable problems in machine learning can often be solved; in practice, the results suggest inherently new approaches for machine learning. + +Advisor: [Sanjeev Arora](tag:sanjeev_arora)" +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|title|Provable Algorithms for Machine Learning Problems by Rong Ge. +http://dataspace.princeton.edu/jspui/bitstream/88435/dsp019k41zd62n/1/Ge_princeton_0181D_10819.pdf|creationTime|2014-04-23T22:21:47Z +http://www2009.eprints.org/92/1/p911.pdf|creationDate|2016-01-04 +http://www2009.eprints.org/92/1/p911.pdf|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www2009.eprints.org/92/1/p911.pdf|tag|http://www.semanlink.net/tag/coupling +http://www2009.eprints.org/92/1/p911.pdf|tag|http://www.semanlink.net/tag/web_services +http://www2009.eprints.org/92/1/p911.pdf|comment|This paper presents a systematic study of the degree of coupling found in service-oriented systems +http://www2009.eprints.org/92/1/p911.pdf|title|Why is the Web Loosely Coupled? A Multi-Faceted Metric for Service Design +http://www2009.eprints.org/92/1/p911.pdf|creationTime|2016-01-04T11:51:02Z +http://rdfa.info/wiki/Tools|creationDate|2011-01-18 +http://rdfa.info/wiki/Tools|tag|http://www.semanlink.net/tag/tools +http://rdfa.info/wiki/Tools|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.info/wiki/Tools|title|Tools - RDFaWiki +http://rdfa.info/wiki/Tools|creationTime|2011-01-18T12:26:11Z +http://www.newscientist.com/article.ns?id=dn7470&print=true|creationDate|2005-06-06 +http://www.newscientist.com/article.ns?id=dn7470&print=true|tag|http://www.semanlink.net/tag/ibm +http://www.newscientist.com/article.ns?id=dn7470&print=true|tag|http://www.semanlink.net/tag/cerveau +http://www.newscientist.com/article.ns?id=dn7470&print=true|title|Mission to build a simulated brain begins +http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html|creationDate|2009-02-13 +http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html|tag|http://www.semanlink.net/tag/sesame +http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html|title|Getting started with Sesame - bobdc.blog +http://www.snee.com/bobdc.blog/2009/02/getting-started-with-sesame.html|creationTime|2009-02-13T18:18:30Z +http://solr-vs-elasticsearch.com/|creationDate|2015-03-11 +http://solr-vs-elasticsearch.com/|tag|http://www.semanlink.net/tag/elasticsearch +http://solr-vs-elasticsearch.com/|tag|http://www.semanlink.net/tag/solr +http://solr-vs-elasticsearch.com/|title|Apache Solr vs ElasticSearch - the Feature Smackdown! +http://solr-vs-elasticsearch.com/|creationTime|2015-03-11T18:20:29Z +http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1|creationDate|2011-04-04 +http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1|tag|http://www.semanlink.net/tag/e_learning +http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1|tag|http://www.semanlink.net/tag/semantic_web_assisted_learning +http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1|title|eLearn Magazine: The Semantic Web and E-learning +http://www.elearnmag.org/subpage.cfm?section=articles&article=77-1|creationTime|2011-04-04T15:56:08Z +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|creationDate|2008-03-29 +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|tag|http://www.semanlink.net/tag/flickr +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|tag|http://www.semanlink.net/tag/foaf +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|title|Semergence » Blog Archive » Why Flickr Doesn’t Do FOAF +http://www.semergence.com/2008/03/23/why-flickr-doesnt-do-foaf/|creationTime|2008-03-29T18:51:04Z +http://www.google.com/support/webmasters/bin/answer.py?answer=40349&ctx=related|creationDate|2007-12-21 +http://www.google.com/support/webmasters/bin/answer.py?answer=40349&ctx=related|tag|http://www.semanlink.net/tag/google +http://www.google.com/support/webmasters/bin/answer.py?answer=40349&ctx=related|title|How can I create a Google-friendly site? +http://www.google.com/support/webmasters/bin/answer.py?answer=40349&ctx=related|creationTime|2007-12-21T11:48:44Z +https://arxiv.org/abs/1806.04411|creationDate|2019-04-11 +https://arxiv.org/abs/1806.04411|tag|http://www.semanlink.net/tag/conditional_random_field +https://arxiv.org/abs/1806.04411|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1806.04411|tag|http://www.semanlink.net/tag/information_retrieval +https://arxiv.org/abs/1806.04411|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1806.04411|arxiv_author|John Foley +https://arxiv.org/abs/1806.04411|arxiv_author|Sheikh Muhammad Sarwar +https://arxiv.org/abs/1806.04411|arxiv_author|James Allan +https://arxiv.org/abs/1806.04411|comment|"**""Named Entity Search (NES)""** + +> We propose exploring **named entity recognition as a search task**, where the named entity class of interest is a query, and entities of that class are the relevant ""documents"". What should that query look like? Can we even perform NER-style labeling with tens of labels? This study presents an exploration of CRF-based NER models with handcrafted features and of how we might transform them into search queries. + +> We do not propose this as a replacement +for NER, but as something to be used for an ephemeral or contextual +class of entity, when it does not make sense to label hundreds or +thousands of instances to learn a classifier" +https://arxiv.org/abs/1806.04411|title|[1806.04411] Named Entity Recognition with Extremely Limited Data +https://arxiv.org/abs/1806.04411|creationTime|2019-04-11T23:40:34Z +https://arxiv.org/abs/1806.04411|arxiv_summary|"Traditional information retrieval treats named entity recognition as a +pre-indexing corpus annotation task, allowing entity tags to be indexed and +used during search. Named entity taggers themselves are typically trained on +thousands or tens of thousands of examples labeled by humans. +However, there is a long tail of named entities classes, and for these cases, +labeled data may be impossible to find or justify financially. We propose +exploring named entity recognition as a search task, where the named entity +class of interest is a query, and entities of that class are the relevant +""documents"". What should that query look like? Can we even perform NER-style +labeling with tens of labels? This study presents an exploration of CRF-based +NER models with handcrafted features and of how we might transform them into +search queries." +https://arxiv.org/abs/1806.04411|arxiv_firstAuthor|John Foley +https://arxiv.org/abs/1806.04411|arxiv_updated|2018-06-13T17:12:27Z +https://arxiv.org/abs/1806.04411|arxiv_title|Named Entity Recognition with Extremely Limited Data +https://arxiv.org/abs/1806.04411|arxiv_published|2018-06-12T09:33:23Z +https://arxiv.org/abs/1806.04411|arxiv_num|1806.04411 +http://searchengineland.com/bing-britannica-partnership-123930|creationDate|2013-01-24 +http://searchengineland.com/bing-britannica-partnership-123930|tag|http://www.semanlink.net/tag/bing +http://searchengineland.com/bing-britannica-partnership-123930|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://searchengineland.com/bing-britannica-partnership-123930|title|Bing Gets Its Own Knowledge Graph Via Britannica Partnership +http://searchengineland.com/bing-britannica-partnership-123930|creationTime|2013-01-24T17:41:41Z +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|creationDate|2015-02-25 +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|tag|http://www.semanlink.net/tag/json +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|title|How can I pretty-print JSON using JavaScript? - Stack Overflow +http://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|creationTime|2015-02-25T15:40:13Z +http://www.technorati.com/|creationDate|2005-04-29 +http://www.technorati.com/|tag|http://www.semanlink.net/tag/tagging +http://www.technorati.com/|title|http://www.technorati.com +http://www.technorati.com/|creationTime|2005-04-29T22:00:00Z +http://www.wired.com/wired/archive/14.04/collide.html|creationDate|2006-03-24 +http://www.wired.com/wired/archive/14.04/collide.html|tag|http://www.semanlink.net/tag/metaverse +http://www.wired.com/wired/archive/14.04/collide.html|tag|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://www.wired.com/wired/archive/14.04/collide.html|comment|"But if you view your avatar as an extension of yourself, moving from EverQuest to World of Warcraft is like volunteering for a lobotomy. You have to surrender the skills you've culti­vated, along with all your (other)worldly possessions.
+Within a decade, then, the notion of separate game worlds will probably seem like a quaint artifact of the frontier days of virtual reality. You'll still be able to engage in radically different experiences - from slaying orcs to cybersex - but they'll occur within a common architecture. The question is whether the underpinnings of this unified metaverse will be a proprietary product, like Windows, or an inclusive, open standard, like email and the Web. (The Open Source Metaverse Project is currently working on such a nonproprietary platform.)" +http://www.wired.com/wired/archive/14.04/collide.html|title|Wired 14.04: When Virtual Worlds Collide +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937|creationDate|2007-06-13 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937|tag|http://www.semanlink.net/tag/synonym_uris +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937|tag|http://www.semanlink.net/tag/chris_bizer +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937|title|[Linking-open-data] synonym URIs +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17937|creationTime|2007-06-13T23:35:47Z +http://www.conceptnet.io/|creationDate|2017-09-18 +http://www.conceptnet.io/|tag|http://www.semanlink.net/tag/word_embedding +http://www.conceptnet.io/|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.conceptnet.io/|comment|An open, multilingual knowledge graph +http://www.conceptnet.io/|title|ConceptNet +http://www.conceptnet.io/|creationTime|2017-09-18T16:53:50Z +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|creationDate|2012-01-18 +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|tag|http://www.semanlink.net/tag/insecte +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|title|De la libellule au microdrone : comment les insectes nous apprennent à voler +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|creationTime|2012-01-18T23:20:08Z +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|source|Le Monde +http://www.lemonde.fr/technologies/article/2012/01/18/de-la-libellule-au-microdrone-comment-les-insectes-nous-apprennent-a-voler_1625200_651865.html|date|2012-01-18 +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|creationDate|2018-05-19 +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|tag|http://www.semanlink.net/tag/diy +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|tag|http://www.semanlink.net/tag/gene_editing +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|tag|http://www.semanlink.net/tag/biohackers +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|title|As D.I.Y. Gene Editing Gains Popularity, ‘Someone Is Going to Get Hurt’ - The New York Times +https://www.nytimes.com/2018/05/14/science/biohackers-gene-editing-virus.html|creationTime|2018-05-19T15:46:02Z +http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html|creationDate|2015-11-28 +http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html|tag|http://www.semanlink.net/tag/terrorisme +http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html|tag|http://www.semanlink.net/tag/cryptography +http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html|title|« Le terrorisme ne se nourrit pas de la technologie, mais de la colère et de l’ignorance » +http://www.lemonde.fr/pixels/article/2015/11/27/le-terrorisme-ne-se-nourrit-pas-de-la-technologie-mais-de-la-colere-et-de-l-ignorance_4818981_4408996.html|creationTime|2015-11-28T04:15:40Z +http://flowplayer.org/tools/index.html|creationDate|2011-04-06 +http://flowplayer.org/tools/index.html|tag|http://www.semanlink.net/tag/web_tools +http://flowplayer.org/tools/index.html|tag|http://www.semanlink.net/tag/jquery +http://flowplayer.org/tools/index.html|title|jQuery TOOLS - The missing UI library for the Web +http://flowplayer.org/tools/index.html|creationTime|2011-04-06T10:46:44Z +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|creationDate|2016-09-29 +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|tag|http://www.semanlink.net/tag/word2vec +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|comment|creating domain-specific training corpuses to use with word2vec can have a dramatic impact on the results and how results can be much more meaningful within the scope of that domain. Another advantage of the domain-specific training corpuses is that they create much smaller models. +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|title|Using Cognonto to Generate Domain Specific word2vec Models Frederick Giasson +http://fgiasson.com/blog/index.php/2016/09/28/using-cognonto-to-generate-domain-specific-word2vec-models/|creationTime|2016-09-29T08:43:15Z +http://www.w3.org/TR/#tr_Linked_Data|creationDate|2014-09-14 +http://www.w3.org/TR/#tr_Linked_Data|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/#tr_Linked_Data|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/TR/#tr_Linked_Data|title|W3C - Linked Data: Standards and Drafts +http://www.w3.org/TR/#tr_Linked_Data|creationTime|2014-09-14T23:32:42Z +http://www.cliffsnotes.com/|creationDate|2011-12-28 +http://www.cliffsnotes.com/|tag|http://www.semanlink.net/tag/e_learning +http://www.cliffsnotes.com/|title|Get Homework Help with CliffsNotes Study Guides +http://www.cliffsnotes.com/|creationTime|2011-12-28T13:39:05Z +http://www.google.com/support/webmasters/bin/answer.py?answer=146750|creationDate|2011-02-09 +http://www.google.com/support/webmasters/bin/answer.py?answer=146750|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.google.com/support/webmasters/bin/answer.py?answer=146750|title|Marking up products for rich snippets +http://www.google.com/support/webmasters/bin/answer.py?answer=146750|creationTime|2011-02-09T01:06:33Z +http://www.proxml.be/users/paul/|creationDate|2010-12-16 +http://www.proxml.be/users/paul/|tag|http://www.semanlink.net/tag/semantic_web_blog +http://www.proxml.be/users/paul/|title|"""Living in the XML and RDF world""" +http://www.proxml.be/users/paul/|creationTime|2010-12-16T15:39:55Z +https://arxiv.org/abs/1803.01271|creationDate|2018-08-05 +https://arxiv.org/abs/1803.01271|tag|http://www.semanlink.net/tag/sequence_modeling_cnn_vs_rnn +https://arxiv.org/abs/1803.01271|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1803.01271|arxiv_author|Vladlen Koltun +https://arxiv.org/abs/1803.01271|arxiv_author|J. Zico Kolter +https://arxiv.org/abs/1803.01271|arxiv_author|Shaojie Bai +https://arxiv.org/abs/1803.01271|comment|We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks +https://arxiv.org/abs/1803.01271|title|[1803.01271] An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling +https://arxiv.org/abs/1803.01271|creationTime|2018-08-05T10:43:56Z +https://arxiv.org/abs/1803.01271|arxiv_summary|"For most deep learning practitioners, sequence modeling is synonymous with +recurrent networks. Yet recent results indicate that convolutional +architectures can outperform recurrent networks on tasks such as audio +synthesis and machine translation. Given a new sequence modeling task or +dataset, which architecture should one use? We conduct a systematic evaluation +of generic convolutional and recurrent architectures for sequence modeling. The +models are evaluated across a broad range of standard tasks that are commonly +used to benchmark recurrent networks. Our results indicate that a simple +convolutional architecture outperforms canonical recurrent networks such as +LSTMs across a diverse range of tasks and datasets, while demonstrating longer +effective memory. We conclude that the common association between sequence +modeling and recurrent networks should be reconsidered, and convolutional +networks should be regarded as a natural starting point for sequence modeling +tasks. To assist related work, we have made code available at +http://github.com/locuslab/TCN ." +https://arxiv.org/abs/1803.01271|arxiv_firstAuthor|Shaojie Bai +https://arxiv.org/abs/1803.01271|arxiv_updated|2018-04-19T14:32:38Z +https://arxiv.org/abs/1803.01271|arxiv_title|An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling +https://arxiv.org/abs/1803.01271|arxiv_published|2018-03-04T00:20:29Z +https://arxiv.org/abs/1803.01271|arxiv_num|1803.01271 +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|creationDate|2018-05-05 +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|tag|http://www.semanlink.net/tag/jure_leskovec +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|tag|http://www.semanlink.net/tag/graph_embeddings +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|comment|"Network representation learning (NRL): Approaches that automatically learn to encode network structure into low-dimensional embeddings, using techniques based on deep learning and nonlinear dimensionality reduction + +**Goal of representation learning for networks: efficient task-independant feature learning for ML in networks.** But it's hard. DL toolbox are designed for single sequences or grids (for instance CNN for images, RNN or word2vec are fixed size), but networks are far more complex! + +from the abstract: + +> In this tutorial, we will cover key advancements in NRL over the last decade, with an emphasis on fundamental advancements made in the last two years. We will discuss classic matrix factorization-based methods (e.g., Laplacian eigenmaps), random-walk based algorithms (e.g., DeepWalk and node2vec), as well as very recent advancements in graph convolutional networks (GCNs). We will cover methods to embed individual nodes (see [node embeddings](/tag/node_embeddings)) as well as approaches to embed entire (sub)graphs, and in doing so, we will present a unified framework for NRL. + +" +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|title|TUTORIAL: Representation Learning on Networks - TheWebConf 2018 +http://snap.stanford.edu/proj/embeddings-www/index.html#materials|creationTime|2018-05-05T13:31:59Z +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|creationDate|2014-06-16 +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|tag|http://www.semanlink.net/tag/electric_car +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|tag|http://www.semanlink.net/tag/patent +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|tag|http://www.semanlink.net/tag/tesla_inc +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|comment|Rival electric car makers are not the enemy – petrol stations are +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|title|Why electric car maker Tesla has torn up its patents - opinion - 16 June 2014 - New Scientist +http://www.newscientist.com/article/dn25734-why-electric-car-maker-tesla-has-torn-up-its-patents.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.U59UbMZGuww|creationTime|2014-06-16T22:39:00Z +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|creationDate|2013-09-13 +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|tag|http://www.semanlink.net/tag/semanlink_dev +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|tag|http://www.semanlink.net/tag/github +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|title|Hosting a Maven repository on github - Stack Overflow +http://stackoverflow.com/questions/14013644/hosting-a-maven-repository-on-github|creationTime|2013-09-13T15:15:51Z +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|creationDate|2006-11-18 +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|tag|http://www.semanlink.net/tag/chiffres +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|tag|http://www.semanlink.net/tag/pnud +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|tag|http://www.semanlink.net/tag/niger +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|tag|http://www.semanlink.net/tag/developpement_humain +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|comment|Le rapport est effrayant : selon l’ONU, le revenu combiné des 500 personnes les plus riches du monde est supérieur à celui des 416 millions les plus pauvres. Le Programme des Nations unies pour le développement (PNUD) a rendu public, jeudi dernier, son rapport annuel sur l’Indice mondial du développement humain 2006. La Norvège arrive en tête, tandis que l’Afrique reste à la traîne. Elle stagne par rapport au reste du monde dont le développement humain « s’accélère ». Le Niger dans tout ça ? Dernier de la classe ! Le Niger s’accroche à sa place favorite, 177ème sur 177, roi des derniers. Selon le Pnud, « le fossé se creuse ». C’est peu dire ! +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|title|Indice du développement humain 2006. Espérance de vie, niveau d’éducation et le revenu par habitant: le Niger bon dernier +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=3101|source|http://www.planeteafrique.com/Republicain-Niger +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|creationDate|2016-01-31 +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|tag|http://www.semanlink.net/tag/uber +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|tag|http://www.semanlink.net/tag/evgeny_morozov +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|comment|"Uber has so much money that, in at least some North American locations, it has been offering rides at rates so low that they didn’t even cover the combined cost of fuel and vehicle depreciation.
+The reason why Uber has so much cash is because, well, governments no longer do." +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|title|Cheap cab ride? You must have missed Uber’s true cost Evgeny Morozov Opinion The Guardian +http://www.theguardian.com/commentisfree/2016/jan/31/cheap-cab-ride-uber-true-cost-google-wealth-taxation?CMP=twt_gu|creationTime|2016-01-31T13:09:47Z +http://www.ldodds.com/blog/2003/09/rdf-forms/|creationDate|2012-03-01 +http://www.ldodds.com/blog/2003/09/rdf-forms/|tag|http://www.semanlink.net/tag/rdf_tools +http://www.ldodds.com/blog/2003/09/rdf-forms/|comment|RForms, short for RDF Forms, is a javascript library that provides a way to edit and present RDF in form like interfaces. +http://www.ldodds.com/blog/2003/09/rdf-forms/|title|http://code.google.com/p/rforms/ +http://www.ldodds.com/blog/2003/09/rdf-forms/|creationTime|2012-03-01T12:48:11Z +http://www.programmableweb.com/|creationDate|2006-05-22 +http://www.programmableweb.com/|tag|http://www.semanlink.net/tag/mashups +http://www.programmableweb.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://www.programmableweb.com/|comment|Keeping you up to date with the latest on mashups and the new Web 2.0 APIs +http://www.programmableweb.com/|title|ProgrammableWeb: Mashups and the Web as Platform +http://www.w3.org/community/rdfjs/wiki/Comparison_of_RDFJS_libraries|creationDate|2015-02-25 +http://www.w3.org/community/rdfjs/wiki/Comparison_of_RDFJS_libraries|tag|http://www.semanlink.net/tag/javascript_rdf +http://www.w3.org/community/rdfjs/wiki/Comparison_of_RDFJS_libraries|title|Comparison of RDFJS libraries - RDF JavaScript Libraries Community Group +http://www.w3.org/community/rdfjs/wiki/Comparison_of_RDFJS_libraries|creationTime|2015-02-25T10:49:31Z +https://arxiv.org/abs/1902.05309v1|creationDate|2019-02-18 +https://arxiv.org/abs/1902.05309v1|tag|http://www.semanlink.net/tag/sequence_labeling +https://arxiv.org/abs/1902.05309v1|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1902.05309v1|tag|http://www.semanlink.net/tag/transfer_learning +https://arxiv.org/abs/1902.05309v1|arxiv_author|Alessandro Moschitti +https://arxiv.org/abs/1902.05309v1|arxiv_author|Lingzhen Chen +https://arxiv.org/abs/1902.05309v1|comment|use-case ex: NER when the target data contains new categories +https://arxiv.org/abs/1902.05309v1|title|[1902.05309] Transfer Learning for Sequence Labeling Using Source Model and Target Data +https://arxiv.org/abs/1902.05309v1|creationTime|2019-02-18T08:30:22Z +https://arxiv.org/abs/1902.05309v1|arxiv_summary|"In this paper, we propose an approach for transferring the knowledge of a +neural model for sequence labeling, learned from the source domain, to a new +model trained on a target domain, where new label categories appear. Our +transfer learning (TL) techniques enable to adapt the source model using the +target data and new categories, without accessing to the source data. Our +solution consists in adding new neurons in the output layer of the target model +and transferring parameters from the source model, which are then fine-tuned +with the target data. Additionally, we propose a neural adapter to learn the +difference between the source and the target label distribution, which provides +additional important information to the target model. Our experiments on Named +Entity Recognition show that (i) the learned knowledge in the source model can +be effectively transferred when the target data contains new categories and +(ii) our neural adapter further improves such transfer." +https://arxiv.org/abs/1902.05309v1|arxiv_firstAuthor|Lingzhen Chen +https://arxiv.org/abs/1902.05309v1|arxiv_updated|2019-02-14T11:40:58Z +https://arxiv.org/abs/1902.05309v1|arxiv_title|Transfer Learning for Sequence Labeling Using Source Model and Target Data +https://arxiv.org/abs/1902.05309v1|arxiv_published|2019-02-14T11:40:58Z +https://arxiv.org/abs/1902.05309v1|arxiv_num|1902.05309 +http://blogs.zdnet.com/semantic-web/?p=131|creationDate|2008-06-22 +http://blogs.zdnet.com/semantic-web/?p=131|tag|http://www.semanlink.net/tag/www08 +http://blogs.zdnet.com/semantic-web/?p=131|tag|http://www.semanlink.net/tag/paul_miller +http://blogs.zdnet.com/semantic-web/?p=131|tag|http://www.semanlink.net/tag/tim_berners_lee +http://blogs.zdnet.com/semantic-web/?p=131|title|Sir Tim Berners-Lee addresses WWW2008 in Beijing +http://blogs.zdnet.com/semantic-web/?p=131|creationTime|2008-06-22T02:17:02Z +https://arxiv.org/abs/1903.05823|creationDate|2019-03-18 +https://arxiv.org/abs/1903.05823|tag|http://www.semanlink.net/tag/patent_landscaping +https://arxiv.org/abs/1903.05823|tag|http://www.semanlink.net/tag/graph_convolutional_networks +https://arxiv.org/abs/1903.05823|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1903.05823|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://arxiv.org/abs/1903.05823|arxiv_author|Sungchul Choi +https://arxiv.org/abs/1903.05823|arxiv_author|Eunjeong Lucy Park +https://arxiv.org/abs/1903.05823|arxiv_author|Seokkyu Choi +https://arxiv.org/abs/1903.05823|arxiv_author|Hyeonju Lee +https://arxiv.org/abs/1903.05823|comment|"a **transformer encoder** +for analyzing textual data present in patent documents +and a **graph convolutional network** for analyzing +patent metadata. + +A benchmarking dataset for patent landscaping +based on patent trends reports published by the +Korean Patent Office. Data acquisition using Google's BigQuery public datasets. + +10% improvement comparing to Google’s proposed Automated Patent Landscaping. + +Empirical analysis of the importance of features (text vs metadata, citations vs classification) + +" +https://arxiv.org/abs/1903.05823|title|[1903.05823] Deep Patent Landscaping Model Using Transformer and Graph Embedding +https://arxiv.org/abs/1903.05823|creationTime|2019-03-18T10:20:46Z +https://arxiv.org/abs/1903.05823|arxiv_summary|"Patent landscaping is a method used for searching related patents during a +research and development (R&D) project. To avoid the risk of patent +infringement and to follow current trends in technology, patent landscaping is +a crucial task required during the early stages of an R&D project. As the +process of patent landscaping requires advanced resources and can be tedious, +the demand for automated patent landscaping has been gradually increasing. +However, a shortage of well-defined benchmark datasets and comparable models +makes it difficult to find related research studies. In this paper, we propose +an automated patent landscaping model based on deep learning. To analyze the +text of patents, the proposed model uses a modified transformer structure. To +analyze the metadata of patents, we propose a graph embedding method that uses +a diffusion graph called Diff2Vec. Furthermore, we introduce four benchmark +datasets for comparing related research studies in patent landscaping. The +datasets are produced by querying Google BigQuery, based on a search formula +from a Korean patent attorney. The obtained results indicate that the proposed +model and datasets can attain state-of-the-art performance, as compared with +current patent landscaping models." +https://arxiv.org/abs/1903.05823|arxiv_firstAuthor|Seokkyu Choi +https://arxiv.org/abs/1903.05823|arxiv_updated|2019-11-22T00:54:27Z +https://arxiv.org/abs/1903.05823|arxiv_title|Deep Patent Landscaping Model Using Transformer and Graph Embedding +https://arxiv.org/abs/1903.05823|arxiv_published|2019-03-14T05:53:22Z +https://arxiv.org/abs/1903.05823|arxiv_num|1903.05823 +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|creationDate|2016-03-30 +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|tag|http://www.semanlink.net/tag/ruben_verborgh +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|tag|http://www.semanlink.net/tag/http_cache +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|tag|http://www.semanlink.net/tag/content_negotiation +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|title|http - Which CDN solutions support caching with content negotiation? - Stack Overflow +http://stackoverflow.com/questions/20242780/which-cdn-solutions-support-caching-with-content-negotiation|creationTime|2016-03-30T02:12:48Z +https://dandelion.eu/|creationDate|2019-04-24 +https://dandelion.eu/|tag|http://www.semanlink.net/tag/semantic_web_services +https://dandelion.eu/|tag|http://www.semanlink.net/tag/entity_linking +https://dandelion.eu/|tag|http://www.semanlink.net/tag/good +https://dandelion.eu/|tag|http://www.semanlink.net/tag/kg_and_nlp +https://dandelion.eu/|tag|http://www.semanlink.net/tag/nlp_as_a_service +https://dandelion.eu/|comment|> Dandelion API leverages its underlying Knowledge Graph, without relying on traditional NLP pipelines. This makes it faster, more scalable, easier to customize and natively language independent. +https://dandelion.eu/|title|Dandelion API - Semantic Text Analytics as a service +https://dandelion.eu/|creationTime|2019-04-24T13:46:53Z +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|creationDate|2018-02-13 +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|tag|http://www.semanlink.net/tag/word_embedding_compositionality +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|tag|http://www.semanlink.net/tag/embedding_evaluation +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|tag|http://www.semanlink.net/tag/word_embedding +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|comment|"(MS thesis, a [paper at TheWebConf 2018](/doc/?uri=https%3A%2F%2Fdoi.org%2F10.1145%2F3178876.3186007)) + +> This thesis explores a method to find better encodings of meaning a computer can work with. We specifically want to combine encodings of word meanings in such a way that a good encoding of their joint meaning is created. The act of combining multiple representations of meaning into a new representation of meaning is called semantic composition. + +Analysis of four word embeddings (Word2Vec, GloVe, fastText and Paragram) in terms of their semantic compositionality. A method to tune these embeddings towards better compositionality, using a simple neural network architecture with definitions and lemmas from WordNet. + +> Since dictionary definitions are semantically similar to their associated lemmas, they are the ideal candidate for our tuning method, as well as evaluating for compositionality. Our architecture allows for the embeddings to be composed using simple arithmetic operations, which makes these embeddings specifically suitable for production applications such as web search and data mining. We also explore more elaborate and involved compositional models, such as recurrent composition and convolutional composition. + + + +" +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|relatedDoc|https://doi.org/10.1145/3178876.3186007 +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|title|Improving the Compositionality of Word Embeddings (2017) +https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf|creationTime|2018-02-13T11:39:04Z +http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference|creationDate|2013-07-03 +http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference|tag|http://www.semanlink.net/tag/localization +http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference|title|localization - JavaScript for detecting browser language preference - Stack Overflow +http://stackoverflow.com/questions/1043339/javascript-for-detecting-browser-language-preference|creationTime|2013-07-03T13:43:13Z +http://infomesh.net/2002/rdfinhtml/|creationDate|2006-12-31 +http://infomesh.net/2002/rdfinhtml/|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://infomesh.net/2002/rdfinhtml/|title|RDF in HTML: Approaches +http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html|creationDate|2012-11-12 +http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html|title|Re: Using DBpedia resources as skos:Concepts? from François-Paul Servant on 2012-11-12 (public-esw-thes@w3.org from November 2012) +http://lists.w3.org/Archives/Public/public-esw-thes/2012Nov/0003.html|creationTime|2012-11-12T15:31:52Z +https://github.com/cmader/qSKOS/wiki/Quality-Criteria-for-SKOS-Vocabularies|creationDate|2011-04-12 +https://github.com/cmader/qSKOS/wiki/Quality-Criteria-for-SKOS-Vocabularies|tag|http://www.semanlink.net/tag/skos +https://github.com/cmader/qSKOS/wiki/Quality-Criteria-for-SKOS-Vocabularies|title|Quality Criteria for SKOS Vocabularies - GitHub +https://github.com/cmader/qSKOS/wiki/Quality-Criteria-for-SKOS-Vocabularies|creationTime|2011-04-12T09:44:43Z +http://news.bbc.co.uk/1/hi/health/4222460.stm|creationDate|2005-09-12 +http://news.bbc.co.uk/1/hi/health/4222460.stm|tag|http://www.semanlink.net/tag/cerveau +http://news.bbc.co.uk/1/hi/health/4222460.stm|tag|http://www.semanlink.net/tag/evolution +http://news.bbc.co.uk/1/hi/health/4222460.stm|title|BBC NEWS - Health - 'Proof' our brains are evolving +http://news.bbc.co.uk/1/hi/health/4222460.stm|source|BBC +https://forums.fast.ai/t/fast-ai-with-google-colab/18882|creationDate|2018-12-02 +https://forums.fast.ai/t/fast-ai-with-google-colab/18882|tag|http://www.semanlink.net/tag/fast_ai +https://forums.fast.ai/t/fast-ai-with-google-colab/18882|tag|http://www.semanlink.net/tag/google_colab +https://forums.fast.ai/t/fast-ai-with-google-colab/18882|title|Fast.ai with Google Colab +https://forums.fast.ai/t/fast-ai-with-google-colab/18882|creationTime|2018-12-02T00:23:16Z +https://aclweb.org/anthology/papers/D/D15/D15-1077/|creationDate|2019-04-24 +https://aclweb.org/anthology/papers/D/D15/D15-1077/|tag|http://www.semanlink.net/tag/nlp_automotive +https://aclweb.org/anthology/papers/D/D15/D15-1077/|tag|http://www.semanlink.net/tag/nlp_short_texts +https://aclweb.org/anthology/papers/D/D15/D15-1077/|tag|http://www.semanlink.net/tag/target_entity_disambiguation +https://aclweb.org/anthology/papers/D/D15/D15-1077/|tag|http://www.semanlink.net/tag/list_only_entity_linking +https://aclweb.org/anthology/papers/D/D15/D15-1077/|comment|"disambiguation of a list of homogenous entities in short texts using names only. + +> Suppose that GM wants to collect tweets that talk about its cars... + +A graph based model, using inverted index" +https://aclweb.org/anthology/papers/D/D15/D15-1077/|title|Name List Only? Target Entity Disambiguation in Short Texts - ACL Anthology (2015) +https://aclweb.org/anthology/papers/D/D15/D15-1077/|creationTime|2019-04-24T16:15:50Z +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|creationDate|2013-03-10 +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|tag|http://www.semanlink.net/tag/paleoanthropology_genetics +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|tag|http://www.semanlink.net/tag/genetique_humaine +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|tag|http://www.semanlink.net/tag/variabilite_du_genome_humain +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|tag|http://www.semanlink.net/tag/cameroun +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|tag|http://www.semanlink.net/tag/origines_de_l_homme +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|title|L’homme qui ne descendait pas d’Adam Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/03/10/homme-qui-ne-descendait-pas-d-adam/|creationTime|2013-03-10T20:16:34Z +http://trueg.wordpress.com/2011/06/08/nepomuk-2-0-and-the-data-management-service/|creationDate|2011-06-08 +http://trueg.wordpress.com/2011/06/08/nepomuk-2-0-and-the-data-management-service/|tag|http://www.semanlink.net/tag/nepomuk +http://trueg.wordpress.com/2011/06/08/nepomuk-2-0-and-the-data-management-service/|title|Nepomuk 2.0 and the Data Management Service « Trueg's Blog +http://trueg.wordpress.com/2011/06/08/nepomuk-2-0-and-the-data-management-service/|creationTime|2011-06-08T23:11:20Z +http://fr.slideshare.net/tonyh/semweb-meetupmarch2013|creationDate|2013-06-03 +http://fr.slideshare.net/tonyh/semweb-meetupmarch2013|tag|http://www.semanlink.net/tag/nature_journal +http://fr.slideshare.net/tonyh/semweb-meetupmarch2013|tag|http://www.semanlink.net/tag/linked_data_publishing +http://fr.slideshare.net/tonyh/semweb-meetupmarch2013|title|Techniques used in RDF Data Publishing at Nature Publishing Group +http://fr.slideshare.net/tonyh/semweb-meetupmarch2013|creationTime|2013-06-03T14:49:23Z +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|creationDate|2015-06-20 +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/question_raciale +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/arte +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/drogues +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/usa +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|tag|http://www.semanlink.net/tag/war_on_drugs +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|comment|"Des corrélations observables entre ""guerre contre la drogue"" et question raciale : depuis le XIXe siècle, tout se passe comme si les lois anti-drogue servaient avant tout d’arguments électoraux et d’armes contre des populations noires ou immigrées considérées comme une menace économique. +" +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|title|Les États-Unis et la drogue ARTE +http://www.arte.tv/guide/fr/041187-000/les-etats-unis-et-la-drogue?autoplay=1|creationTime|2015-06-20T18:25:54Z +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|creationDate|2010-01-06 +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|tag|http://www.semanlink.net/tag/monsanto +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|tag|http://www.semanlink.net/tag/documentaire +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|tag|http://www.semanlink.net/tag/arte +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|tag|http://www.semanlink.net/tag/ogm +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|comment|De la dioxine aux OGM, une multinationale qui vous veut du bien +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|title|Le Monde selon Monsanto +http://fr.wikipedia.org/wiki/Le_Monde_selon_Monsanto|creationTime|2010-01-06T00:11:45Z +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|creationDate|2015-11-30 +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|tag|http://www.semanlink.net/tag/javascript +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|tag|http://www.semanlink.net/tag/conway_s_game_of_life +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|tag|http://www.semanlink.net/tag/danny_ayers +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|title|Life on the Ocean Wave +http://bl.ocks.org/danja/ef309b9d3f392145c9c3|creationTime|2015-11-30T12:33:41Z +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|creationDate|2012-10-01 +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|tag|http://www.semanlink.net/tag/lod_mailing_list +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|comment|"s :expensiveProp o.
+[a v:Link ; v:property :expensiveProp ; v:subject s ; v:object o.]
+my proposal" +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|title|Expensive links in Linked Data from SERVANT Francois-Paul on 2012-09-28 (public-lod@w3.org from September 2012) +http://lists.w3.org/Archives/Public/public-lod/2012Sep/0102.html|creationTime|2012-10-01T14:45:02Z +http://java-source.net/open-source/html-parsers|creationDate|2005-03-05 +http://java-source.net/open-source/html-parsers|tag|http://www.semanlink.net/tag/java +http://java-source.net/open-source/html-parsers|tag|http://www.semanlink.net/tag/html_parsing +http://alex.nederlof.com/blog/2013/07/28/caching-using-annotations-with-jersey/|creationDate|2015-02-11 +http://alex.nederlof.com/blog/2013/07/28/caching-using-annotations-with-jersey/|tag|http://www.semanlink.net/tag/jersey_cache_control +http://alex.nederlof.com/blog/2013/07/28/caching-using-annotations-with-jersey/|title|Cache-Control using annotations with Jersey - LexTech +http://alex.nederlof.com/blog/2013/07/28/caching-using-annotations-with-jersey/|creationTime|2015-02-11T16:30:22Z +http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html|creationDate|2015-01-09 +http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html|tag|http://www.semanlink.net/tag/aqmi +http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html|tag|http://www.semanlink.net/tag/mali +http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html|title|Terrorisme Mali : deux ans après Serval, Aqmi reprend ses quartiers au Nord de Tombouctou Jeuneafrique.com - le premier site d'information et d'actualité sur l'Afrique +http://www.jeuneafrique.com/Article/ARTJAWEB20150109093737/mali-terrorisme-aqmi-jihad-terrorisme-mali-deux-ans-apr-s-serval-aqmi-reprend-ses-quartiers-au-nord-de-tombouctou.html|creationTime|2015-01-09T19:28:29Z +http://www.jspwiki.org|creationDate|2005-05-04 +http://www.jspwiki.org|tag|http://www.semanlink.net/tag/jsp +http://www.jspwiki.org|tag|http://www.semanlink.net/tag/wiki +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|creationDate|2013-07-13 +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|tag|http://www.semanlink.net/tag/nsa_spying_scandal +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|tag|http://www.semanlink.net/tag/humour_noir +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|tag|http://www.semanlink.net/tag/1984 +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|tag|http://www.semanlink.net/tag/obama +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|title|Yes we scan +https://plus.google.com/photos/109693896432057207496/albums/5887912756987863953/5887912753135956050?pid=5887912753135956050&oid=109693896432057207496|creationTime|2013-07-13T02:05:00Z +https://blog.risingstack.com/the-react-way-getting-started-tutorial/|creationDate|2016-07-09 +https://blog.risingstack.com/the-react-way-getting-started-tutorial/|tag|http://www.semanlink.net/tag/react_js +https://blog.risingstack.com/the-react-way-getting-started-tutorial/|tag|http://www.semanlink.net/tag/tutorial +https://blog.risingstack.com/the-react-way-getting-started-tutorial/|title|The React.js Way: Getting Started Tutorial RisingStack +https://blog.risingstack.com/the-react-way-getting-started-tutorial/|creationTime|2016-07-09T17:44:48Z +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|creationDate|2011-04-28 +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|tag|http://www.semanlink.net/tag/comedie +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|tag|http://www.semanlink.net/tag/film_allemand +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|tag|http://www.semanlink.net/tag/turquie +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|tag|http://www.semanlink.net/tag/immigration +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|comment|"Spielfilm, Deutschland/Turkei 2008
+Turcs, allemands, athées, homosexuels ou sans-papiers, les protagonistes de cette délicieuse comédie, qui se moque allègrement des préjugés, vont découvrir que prononcer la phrase rituelle ""Je le veux"" est plus compliqué qu'il n'y paraît. + +" +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|title|Evet - Ich will! +http://www.arte.tv/de/film/kino-auf-ARTE/die-filmtipps-der-woche----/Kino-auf-Arte/3839610,CmC=3839626.html|creationTime|2011-04-28T00:24:52Z +http://www.cominvent.com/2012/01/25/super-flexible-autocomplete-with-solr/|creationDate|2015-06-20 +http://www.cominvent.com/2012/01/25/super-flexible-autocomplete-with-solr/|tag|http://www.semanlink.net/tag/solr_autocomplete +http://www.cominvent.com/2012/01/25/super-flexible-autocomplete-with-solr/|title|Super flexible AutoComplete with Solr Cominvent AS - Enterprise search consultants +http://www.cominvent.com/2012/01/25/super-flexible-autocomplete-with-solr/|creationTime|2015-06-20T09:36:36Z +http://hyperscope.org/|creationDate|2006-09-12 +http://hyperscope.org/|tag|http://www.semanlink.net/tag/web +http://hyperscope.org/|tag|http://www.semanlink.net/tag/engelbart +http://hyperscope.org/|comment|A Web app based on tech legend Douglas Engelbart's 1968 NLS/Augment (oNLine System) +http://hyperscope.org/|title|HyperScope +http://www.w3.org/TR/2005/WD-swbp-skos-core-guide-20050510/|creationDate|2005-05-15 +http://www.w3.org/TR/2005/WD-swbp-skos-core-guide-20050510/|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/TR/2005/WD-swbp-skos-core-guide-20050510/|title|SKOS Core Guide +http://www.w3.org/TR/2005/WD-swbp-skos-core-guide-20050510/|creationTime|2005-05-15T22:00:00Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf|tag|http://www.semanlink.net/tag/sparql +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf|tag|http://www.semanlink.net/tag/sem_web_context +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf|title|SPARQL Query Mediation over RDF Data Sources with Disparate Contexts +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-07.pdf|creationTime|2012-04-16T11:51:02Z +http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp|creationDate|2010-09-24 +http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp|tag|http://www.semanlink.net/tag/sap +http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp|tag|http://www.semanlink.net/tag/business_intelligence_and_semantic_web +http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp|title|Semantic Web Meets BI In New Project Whose Partners Include SAP, Sheffield Hallam University, Ontotext +http://www.semanticweb.com/rdf/semantic_web_meets_bi_in_new_project_whose_partners_include_sap_sheffield_hallam_university_ontotext_171855.asp|creationTime|2010-09-24T15:36:50Z +http://dannyayers.com/2006/07/11/hey-lazyweb--live|creationDate|2006-07-12 +http://dannyayers.com/2006/07/11/hey-lazyweb--live|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/07/11/hey-lazyweb--live|tag|http://www.semanlink.net/tag/simile_timeline +http://dannyayers.com/2006/07/11/hey-lazyweb--live|tag|http://www.semanlink.net/tag/live_clipboard +http://dannyayers.com/2006/07/11/hey-lazyweb--live|title|Hey LazyWeb! : Live Clipboard and Timeline +http://dannyayers.com/2006/07/11/hey-lazyweb--live|seeAlso|http://dannyayers.com/2006/07/09/sparql-timeline-ps +http://blogs.zdnet.com/semantic-web/?p=128|creationDate|2008-04-24 +http://blogs.zdnet.com/semantic-web/?p=128|tag|http://www.semanlink.net/tag/paul_miller +http://blogs.zdnet.com/semantic-web/?p=128|tag|http://www.semanlink.net/tag/fps_and_ldow2008 +http://blogs.zdnet.com/semantic-web/?p=128|tag|http://www.semanlink.net/tag/link_to_me +http://blogs.zdnet.com/semantic-web/?p=128|title|Linked Data on the Web, WWW2008 The Semantic Web ZDNet.com +http://blogs.zdnet.com/semantic-web/?p=128|creationTime|2008-04-24T09:48:13Z +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|creationDate|2012-04-25 +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|tag|http://www.semanlink.net/tag/wordpress +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|tag|http://www.semanlink.net/tag/sparql_endpoint +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|tag|http://www.semanlink.net/tag/benjamin_nowack +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|title|LH tools; a sparql endpoint for WordPress Shawfactor +http://shawfactor.com/2012/03/29/lh-tools-a-sparql-endpoint-for-wordpress/|creationTime|2012-04-25T14:12:27Z +http://snurf.bdash.net.nz/|creationDate|2006-02-06 +http://snurf.bdash.net.nz/|tag|http://www.semanlink.net/tag/python +http://snurf.bdash.net.nz/|tag|http://www.semanlink.net/tag/blog_software +http://snurf.bdash.net.nz/|comment|File system based +http://snurf.bdash.net.nz/|title|Snurf: a Python-based Blogging System +http://danbri.org/words/2007/11/04/223|creationDate|2007-11-07 +http://danbri.org/words/2007/11/04/223|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2007/11/04/223|tag|http://www.semanlink.net/tag/foaf +http://danbri.org/words/2007/11/04/223|comment|Diagram showing FOAF classes and properties (and some DOAP, GEO, OWL, SKOS, and SIOC too) +http://danbri.org/words/2007/11/04/223|title|danbri’s foaf stories » FOAF diagram +http://danbri.org/words/2007/11/04/223|creationTime|2007-11-07T16:44:45Z +http://en.gravatar.com/hyperfp|creationDate|2010-10-01 +http://en.gravatar.com/hyperfp|tag|http://www.semanlink.net/tag/fps +http://en.gravatar.com/hyperfp|title|fps - Gravatar Profile +http://en.gravatar.com/hyperfp|creationTime|2010-10-01T00:18:03Z +https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software|creationDate|2017-07-11 +https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software|tag|http://www.semanlink.net/tag/stanford_pos_tagger +https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software|tag|http://www.semanlink.net/tag/nltk +https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software|title|NLTK: Installing Third Party Software · nltk Wiki +https://github.com/nltk/nltk/wiki/Installing-Third-Party-Software|creationTime|2017-07-11T18:14:58Z +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|creationDate|2015-11-19 +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|tag|http://www.semanlink.net/tag/ei +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|tag|http://www.semanlink.net/tag/disruption +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|tag|http://www.semanlink.net/tag/attentats_13_11_2015 +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|title|Bernard Stiegler : « Ce n’est qu’en projetant un véritable avenir qu’on pourra combattre Daech » +http://www.lemonde.fr/emploi/article/2015/11/19/bernard-stiegler-ce-n-est-qu-en-projetant-un-veritable-avenir-qu-on-pourra-combattre-daech_4813660_1698637.html|creationTime|2015-11-19T22:34:50Z +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|creationDate|2014-04-23 +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|tag|http://www.semanlink.net/tag/topic_modeling +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|tag|http://www.semanlink.net/tag/www08 +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|comment|In this paper, we formally define the problem of topic modeling with network structure (TMN). We propose a novel solution to this problem, which regularizes a statistical topic model with a harmonic regularizer based on a graph structure in the data. The proposed method combines topic modeling and social network analysis, and leverages the power of both statistical topic models and discrete regularization. The output of this model can summarize well topics in text, map a topic onto the network, and discover topical communities. +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|title|Topic modeling with network regularization +http://www.scopus.com/record/display.url?eid=2-s2.0-57349152312&origin=inward&txGid=7A2D7638D1A90FC842E0E0E1C688AFC1.kqQeWtawXauCyC8ghhRGJg|creationTime|2014-04-23T10:54:41Z +http://turtlescript.sourceforge.net/|creationDate|2013-09-06 +http://turtlescript.sourceforge.net/|tag|http://www.semanlink.net/tag/javascript_rdf +http://turtlescript.sourceforge.net/|tag|http://www.semanlink.net/tag/rdfa +http://turtlescript.sourceforge.net/|tag|http://www.semanlink.net/tag/turtle_in_html +http://turtlescript.sourceforge.net/|comment|TurtleScript is a project to demonstrate the utility of including the Turtle RDF serialization in HTML files as a form of metadata. It is intended as proposed alternative or supplement to microformats and RDFa. +http://turtlescript.sourceforge.net/|title|TurtleScript +http://turtlescript.sourceforge.net/|creationTime|2013-09-06T18:20:12Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|creationDate|2011-08-20 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/osema_2011 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/deri +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/fps_paper +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/semantic_web +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/osema_deri_renault_paper +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/constraint_satisfaction_problem +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/fadi_badra +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/automobile +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|title|A Semantic Web Representation of a Product Range Specification based on Constraint Satisfaction Problem in the Automotive Industry +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-748/paper4.pdf|creationTime|2011-08-20T13:31:15Z +http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with|creationDate|2008-10-11 +http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with|tag|http://www.semanlink.net/tag/diacritics_in_uri +http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with|tag|http://www.semanlink.net/tag/tomcat +http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with|title|Java UTF–8 international character support with Tomcat and Oracle, 26/03/07, Kieran's blog +http://blogs.warwick.ac.uk/kieranshaw/entry/utf-8_internationalisation_with|creationTime|2008-10-11T18:54:05Z +https://twitter.com/akulith/status/1080460889839595522|creationDate|2019-01-03 +https://twitter.com/akulith/status/1080460889839595522|tag|http://www.semanlink.net/tag/brexit +https://twitter.com/akulith/status/1080460889839595522|title|clarification about how the process under Article 50 TEU works +https://twitter.com/akulith/status/1080460889839595522|creationTime|2019-01-03T14:32:01Z +http://lists.w3.org/Archives/Public/uri/2003Jan/0005|creationDate|2007-11-17 +http://lists.w3.org/Archives/Public/uri/2003Jan/0005|tag|http://www.semanlink.net/tag/uri_reference +http://lists.w3.org/Archives/Public/uri/2003Jan/0005|title|Rationalizing the term URI +http://lists.w3.org/Archives/Public/uri/2003Jan/0005|creationTime|2007-11-17T16:01:13Z +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|creationDate|2017-07-26 +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|tag|http://www.semanlink.net/tag/text_similarity +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|tag|http://www.semanlink.net/tag/similarity_queries +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|tag|http://www.semanlink.net/tag/shingles +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|comment|"**Jaccard similarity**: similarity of sets, based on the relative size of their intersection -> **finding textually similar documents in a large corpus, near duplicates**. [Collaborative Filtering](/tag/collaborative_filtering) as a Similar-Sets Problem (cf. online purchases, movie ratings) + +**Shingling** turns the problem of textual similarity of documents into a pb of similarity of sets + +k-shingle: substring of length k found within a document. k: 5 for emails. Hashing shingles. Shingles built from words (stop word + 2 following words) + +Similarity-Preserving Summaries of Sets: shingles sets are large -> compress large sets into small representations (“signatures”) that preserve similarity: **[Minhashing](/tag/minhash)** - related to Jaccard similarity (good explanation in [wikipedia](https://en.wikipedia.org/wiki/MinHash)) + +It still may be impossible to find the pairs of docs with greatest similarity efficiently -> **[Locality-Sensitive Hashing](/tag/locality_sensitive_hashing)** for Documents + +Distance measures + +Theory of Locality-Sensitive Functions + +LSH famiies for other distance measures + +Applications of Locality-Sensitive Hashing: + +- entity resolution +- matching fingerprints +- matching newpapers articles + +Methods for High Degrees of Similarity: LSH-based methods most effective when the degree of similarity we +accept is relatively low. When we want to find sets that are almost identical, other methods can be faster. + + + + + + + +" +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|title|Finding Similar Items +http://infolab.stanford.edu/~ullman/mmds/ch3a.pdf|creationTime|2017-07-26T13:41:20Z +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|creationDate|2012-06-30 +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|tag|http://www.semanlink.net/tag/aqmi +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|tag|http://www.semanlink.net/tag/abrutis +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|tag|http://www.semanlink.net/tag/tombouctou +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|title|Ançar Eddine va détruire tous les mausolées de Tombouctou +http://www.lemonde.fr/afrique/article/2012/06/30/ancar-eddine-annonce-la-destruction-des-tous-les-mausolees-de-tombouctou_1727451_3212.html|creationTime|2012-06-30T14:45:48Z +https://arxiv.org/abs/1810.04805|creationDate|2018-10-12 +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/bert +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/neural_machine_translation +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/language_model +https://arxiv.org/abs/1810.04805|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1810.04805|arxiv_author|Ming-Wei Chang +https://arxiv.org/abs/1810.04805|arxiv_author|Kristina Toutanova +https://arxiv.org/abs/1810.04805|arxiv_author|Kenton Lee +https://arxiv.org/abs/1810.04805|arxiv_author|Jacob Devlin +https://arxiv.org/abs/1810.04805|title|[1810.04805] BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding +https://arxiv.org/abs/1810.04805|creationTime|2018-10-12T14:36:01Z +https://arxiv.org/abs/1810.04805|arxiv_summary|"We introduce a new language representation model called BERT, which stands +for Bidirectional Encoder Representations from Transformers. Unlike recent +language representation models, BERT is designed to pre-train deep +bidirectional representations from unlabeled text by jointly conditioning on +both left and right context in all layers. As a result, the pre-trained BERT +model can be fine-tuned with just one additional output layer to create +state-of-the-art models for a wide range of tasks, such as question answering +and language inference, without substantial task-specific architecture +modifications. +BERT is conceptually simple and empirically powerful. It obtains new +state-of-the-art results on eleven natural language processing tasks, including +pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI +accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering +Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 +(5.1 point absolute improvement)." +https://arxiv.org/abs/1810.04805|arxiv_firstAuthor|Jacob Devlin +https://arxiv.org/abs/1810.04805|arxiv_updated|2019-05-24T20:37:26Z +https://arxiv.org/abs/1810.04805|arxiv_title|BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding +https://arxiv.org/abs/1810.04805|arxiv_published|2018-10-11T00:50:01Z +https://arxiv.org/abs/1810.04805|arxiv_num|1810.04805 +https://arxiv.org/abs/1604.06737|creationDate|2018-03-03 +https://arxiv.org/abs/1604.06737|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1604.06737|tag|http://www.semanlink.net/tag/categorical_variables +https://arxiv.org/abs/1604.06737|tag|http://www.semanlink.net/tag/statistical_classification +https://arxiv.org/abs/1604.06737|tag|http://www.semanlink.net/tag/entity_embeddings +https://arxiv.org/abs/1604.06737|arxiv_author|Cheng Guo +https://arxiv.org/abs/1604.06737|arxiv_author|Felix Berkhahn +https://arxiv.org/abs/1604.06737|comment|> We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables +https://arxiv.org/abs/1604.06737|title|[1604.06737] Entity Embeddings of Categorical Variables +https://arxiv.org/abs/1604.06737|creationTime|2018-03-03T17:13:44Z +https://arxiv.org/abs/1604.06737|arxiv_summary|"We map categorical variables in a function approximation problem into +Euclidean spaces, which are the entity embeddings of the categorical variables. +The mapping is learned by a neural network during the standard supervised +training process. Entity embedding not only reduces memory usage and speeds up +neural networks compared with one-hot encoding, but more importantly by mapping +similar values close to each other in the embedding space it reveals the +intrinsic properties of the categorical variables. We applied it successfully +in a recent Kaggle competition and were able to reach the third position with +relative simple features. We further demonstrate in this paper that entity +embedding helps the neural network to generalize better when the data is sparse +and statistics is unknown. Thus it is especially useful for datasets with lots +of high cardinality features, where other methods tend to overfit. We also +demonstrate that the embeddings obtained from the trained neural network boost +the performance of all tested machine learning methods considerably when used +as the input features instead. As entity embedding defines a distance measure +for categorical variables it can be used for visualizing categorical data and +for data clustering." +https://arxiv.org/abs/1604.06737|arxiv_firstAuthor|Cheng Guo +https://arxiv.org/abs/1604.06737|arxiv_updated|2016-04-22T16:34:30Z +https://arxiv.org/abs/1604.06737|arxiv_title|Entity Embeddings of Categorical Variables +https://arxiv.org/abs/1604.06737|arxiv_published|2016-04-22T16:34:30Z +https://arxiv.org/abs/1604.06737|arxiv_num|1604.06737 +http://www.sparqlets.org/clipboard/home|creationDate|2006-06-08 +http://www.sparqlets.org/clipboard/home|tag|http://www.semanlink.net/tag/sparql_clipboard +http://www.sparqlets.org/clipboard/home|tag|http://www.semanlink.net/tag/benjamin_nowack +http://www.sparqlets.org/clipboard/home|title|SPARQL Clipboard Demo +http://www.volkswagen.co.uk/vocabularies/vvo/ns|creationDate|2011-09-23 +http://www.volkswagen.co.uk/vocabularies/vvo/ns|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.volkswagen.co.uk/vocabularies/vvo/ns|tag|http://www.semanlink.net/tag/volkswagen +http://www.volkswagen.co.uk/vocabularies/vvo/ns|tag|http://www.semanlink.net/tag/car_options_ontology +http://www.volkswagen.co.uk/vocabularies/vvo/ns|tag|http://www.semanlink.net/tag/goodrelations +http://www.volkswagen.co.uk/vocabularies/vvo/ns|title|The Volkswagen Vehicles Ontology (VVO) +http://www.volkswagen.co.uk/vocabularies/vvo/ns|creationTime|2011-09-23T01:01:47Z +https://arxiv.org/abs/1603.08861|creationDate|2018-02-13 +https://arxiv.org/abs/1603.08861|tag|http://www.semanlink.net/tag/semi_supervised_learning +https://arxiv.org/abs/1603.08861|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://arxiv.org/abs/1603.08861|tag|http://www.semanlink.net/tag/graph_embeddings +https://arxiv.org/abs/1603.08861|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1603.08861|arxiv_author|William W. Cohen +https://arxiv.org/abs/1603.08861|arxiv_author|Zhilin Yang +https://arxiv.org/abs/1603.08861|arxiv_author|Ruslan Salakhutdinov +https://arxiv.org/abs/1603.08861|title|[1603.08861] Revisiting Semi-Supervised Learning with Graph Embeddings +https://arxiv.org/abs/1603.08861|creationTime|2018-02-13T15:38:38Z +https://arxiv.org/abs/1603.08861|arxiv_summary|"We present a semi-supervised learning framework based on graph embeddings. +Given a graph between instances, we train an embedding for each instance to +jointly predict the class label and the neighborhood context in the graph. We +develop both transductive and inductive variants of our method. In the +transductive variant of our method, the class labels are determined by both the +learned embeddings and input feature vectors, while in the inductive variant, +the embeddings are defined as a parametric function of the feature vectors, so +predictions can be made on instances not seen during training. On a large and +diverse set of benchmark tasks, including text classification, distantly +supervised entity extraction, and entity classification, we show improved +performance over many of the existing models." +https://arxiv.org/abs/1603.08861|arxiv_firstAuthor|Zhilin Yang +https://arxiv.org/abs/1603.08861|arxiv_updated|2016-05-26T23:57:09Z +https://arxiv.org/abs/1603.08861|arxiv_title|Revisiting Semi-Supervised Learning with Graph Embeddings +https://arxiv.org/abs/1603.08861|arxiv_published|2016-03-29T17:46:16Z +https://arxiv.org/abs/1603.08861|arxiv_num|1603.08861 +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|creationDate|2014-05-02 +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|tag|http://www.semanlink.net/tag/language_model +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|tag|http://www.semanlink.net/tag/google_research +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|comment|Google Research releases data and code to build a 1B word training and test setup for statistical language modeling +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|title|A Billion Words: Because today's language modeling standard should be higher +http://googleresearch.blogspot.fr/2014/04/a-billion-words-because-todays-language.html|creationTime|2014-05-02T15:34:59Z +http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose|creationDate|2017-11-24 +http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose|tag|http://www.semanlink.net/tag/anglais +http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose|tag|http://www.semanlink.net/tag/words +http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose|title|BBC - Culture - Twenty-six words we don’t want to lose +http://www.bbc.com/culture/story/20171122-twenty-six-words-we-dont-want-to-lose|creationTime|2017-11-24T01:42:38Z +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|creationDate|2015-09-22 +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|tag|http://www.semanlink.net/tag/jean_jacques_annaud +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|tag|http://www.semanlink.net/tag/dalai_lama +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|tag|http://www.semanlink.net/tag/tibet +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|title|Seven Years in Tibet (Jean-Jacques Annaud) +https://en.wikipedia.org/wiki/Seven_Years_in_Tibet_(1997_film)|creationTime|2015-09-22T22:35:03Z +https://franceisai.com/conferences/conference-2018|creationDate|2018-10-15 +https://franceisai.com/conferences/conference-2018|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://franceisai.com/conferences/conference-2018|comment|"[some tweets](https://twitter.com/enahpets/status/1052537794764128257) + +" +https://franceisai.com/conferences/conference-2018|relatedDoc|https://twitter.com/enahpets/status/1052537794764128257 +https://franceisai.com/conferences/conference-2018|title|Conference - France is AI +https://franceisai.com/conferences/conference-2018|creationTime|2018-10-15T18:06:56Z +http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html|creationDate|2011-02-04 +http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html|tag|http://www.semanlink.net/tag/jersey +http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html|tag|http://www.semanlink.net/tag/tomcat +http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html|title|REST: Jersey configuration on Tomcat - Surya Suravarapu's Blog +http://www.suryasuravarapu.com/2009/02/rest-jersey-configuration-on-tomcat.html|creationTime|2011-02-04T00:45:07Z +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|creationDate|2012-01-05 +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|tag|http://www.semanlink.net/tag/workshop +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|tag|http://www.semanlink.net/tag/nlp +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|tag|http://www.semanlink.net/tag/eswc +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|tag|http://www.semanlink.net/tag/linked_data +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|title|Question answering over Linked Data - Interacting with Linked Data +http://greententacle.techfak.uni-bielefeld.de/~cunger/qald/index.php?x=home&q=1|creationTime|2012-01-05T12:56:06Z +https://github.com/robert-bor/aho-corasick|creationDate|2019-04-24 +https://github.com/robert-bor/aho-corasick|tag|http://www.semanlink.net/tag/aho_corasick_algorithm +https://github.com/robert-bor/aho-corasick|tag|http://www.semanlink.net/tag/github_project +https://github.com/robert-bor/aho-corasick|comment|"Nowadays most free-text searching is based on Lucene-like approaches, where the search text is parsed into its various components. For every keyword a lookup is done to see where it occurs. When looking for a couple of keywords this approach is great. But what about it if you are not looking for just a couple of keywords, but a 100,000 of them? Like, for example, checking against a dictionary? + +This is where the Aho-Corasick algorithm shines. +" +https://github.com/robert-bor/aho-corasick|title|Aho-Corasick (java implementation) +https://github.com/robert-bor/aho-corasick|creationTime|2019-04-24T11:45:40Z +http://www.ird.fr/bani/|creationDate|2006-03-06 +http://www.ird.fr/bani/|tag|http://www.semanlink.net/tag/orstom +http://www.ird.fr/bani/|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.ird.fr/bani/|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.ird.fr/bani/|comment|"Présentation de la collection de squelettes et de restes osseux de l'I.R.S.H., Institut de Recherche en Sciences Humaines de l'Université de Niamey. Développée dans le cadre du projet ""Conservatoire Archéologique"" financé par le Fond d'Aide et de Coopération français et mis en oeuvre par l'ORSTOM et l'I.R.S.H. Description de la plupart des sépultures préislamiques fouillées officiellement entre 1978 et 1992 dans le nord du Niger. + +" +http://www.ird.fr/bani/|title|BANI (Base d'Anthropologie physique du Niger) Plan de BANI +http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html|creationDate|2016-02-01 +http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html|tag|http://www.semanlink.net/tag/francois_chollet +http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html|title|How convolutional neural networks see the world +http://blog.keras.io/how-convolutional-neural-networks-see-the-world.html|creationTime|2016-02-01T14:52:12Z +http://www.w3.org/Submission/SWRL/|creationDate|2007-08-24 +http://www.w3.org/Submission/SWRL/|tag|http://www.semanlink.net/tag/swrl +http://www.w3.org/Submission/SWRL/|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/Submission/SWRL/|tag|http://www.semanlink.net/tag/ian_horrocks +http://www.w3.org/Submission/SWRL/|comment|W3C Member Submission 21 May 2004 +http://www.w3.org/Submission/SWRL/|title|SWRL: A Semantic Web Rule Language Combining OWL and RuleML +http://www.w3.org/Submission/SWRL/|creationTime|2007-08-24T00:03:32Z +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|creationDate|2019-03-07 +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|tag|http://www.semanlink.net/tag/arbres_remarquables +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|tag|http://www.semanlink.net/tag/deforestation +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|tag|http://www.semanlink.net/tag/canada +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|title|The last great tree: a majestic relic of Canada's vanishing rainforest Environment The Guardian +https://www.theguardian.com/environment/2019/mar/05/the-last-great-tree-a-majestic-relic-of-canadas-vanishing-boreal-forest|creationTime|2019-03-07T08:57:49Z +http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges|creationDate|2006-08-19 +http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges|tag|http://www.semanlink.net/tag/mashups +http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges|comment|An introduction to mashups +http://www-128.ibm.com/developerworks/library/x-mashups.html?ca=dgr-lnxw16MashupChallenges|title|Mashups: The new breed of Web app +http://i.stanford.edu/~ullman/mmds.html|creationDate|2013-03-29 +http://i.stanford.edu/~ullman/mmds.html|tag|http://www.semanlink.net/tag/big_data +http://i.stanford.edu/~ullman/mmds.html|tag|http://www.semanlink.net/tag/data_mining +http://i.stanford.edu/~ullman/mmds.html|title|Mining of Massive Datasets +http://i.stanford.edu/~ullman/mmds.html|creationTime|2013-03-29T01:20:02Z +https://hal.archives-ouvertes.fr/hal-01626196/document|creationDate|2017-12-30 +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/entity_linking +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/cea_list +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/irit +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/embeddings +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/eswc +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/jose_moreno +https://hal.archives-ouvertes.fr/hal-01626196/document|tag|http://www.semanlink.net/tag/entity_embeddings +https://hal.archives-ouvertes.fr/hal-01626196/document|comment|"The general approach for the entity linking task is to generate, for a given mention, a set of candidate entities from the base and, in a second step, determine which is the best +one. This paper proposes a novel method for the second step which is +based on the **joint learning of embeddings for the words in the text and +the entities in the knowledge base**. + +" +https://hal.archives-ouvertes.fr/hal-01626196/document|title|Combining word and entity embeddings for entity linking (ESWC 2017) +https://hal.archives-ouvertes.fr/hal-01626196/document|creationTime|2017-12-30T01:14:53Z +https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme|creationDate|2017-12-10 +https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme|tag|http://www.semanlink.net/tag/belleme +https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme|tag|http://www.semanlink.net/tag/moyen_age +https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme|title|Seigneurie de Bellême +https://fr.wikipedia.org/wiki/Seigneurie_de_Bell%C3%AAme|creationTime|2017-12-10T14:58:36Z +http://www.semanticpedia.org|creationDate|2012-12-10 +http://www.semanticpedia.org|tag|http://www.semanlink.net/tag/semanticpedia +http://www.semanticpedia.org|title|Sémanticpédia +http://www.semanticpedia.org|creationTime|2012-12-10T11:40:32Z +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|creationDate|2017-10-23 +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/francois_chollet +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/using_word_embedding +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/nlp_text_classification +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/keras_embedding_layer +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/tutorial +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/nn_4_nlp +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|comment|"Text classification using pre-trained GloVe embeddings (loaded into a frozen Keras Embedding layer) and a [convolutional neural network](/tag/convolutional_neural_network) +" +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|title|Using pre-trained word embeddings in a Keras model +https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html|creationTime|2017-10-23T01:07:38Z +http://www.creativesynthesis.net/blog/projects/graph-gear/|creationDate|2008-02-16 +http://www.creativesynthesis.net/blog/projects/graph-gear/|tag|http://www.semanlink.net/tag/graph_visualization +http://www.creativesynthesis.net/blog/projects/graph-gear/|comment|Graph Gear allows you to quickly render a customizable interaction graph that is described by a graph xml file. All you need to do is embed it in your page and pass the xml over javascript. It provides nice interactive capabilities and a force directed layout. +http://www.creativesynthesis.net/blog/projects/graph-gear/|title|Graph Gear :: Creative Synthesis +http://www.creativesynthesis.net/blog/projects/graph-gear/|creationTime|2008-02-16T16:47:40Z +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|creationDate|2008-11-20 +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|tag|http://www.semanlink.net/tag/oregon +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|tag|http://www.semanlink.net/tag/nissan +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|tag|http://www.semanlink.net/tag/electric_car +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|title|Nissan announces electric car deal with Oregon The Detroit News +http://www.detnews.com/apps/pbcs.dll/article?AID=/20081119/AUTO01/811190449/1148/AUTO01|creationTime|2008-11-20T22:04:00Z +http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/|creationDate|2012-07-26 +http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/|title|Seven Pillars of the Open Semantic Enterprise » AI3:::Adaptive Information +http://www.mkbergman.com/859/seven-pillars-of-the-open-semantic-enterprise/|creationTime|2012-07-26T20:48:52Z +http://hallojs.org/markdown.html|creationDate|2012-06-06 +http://hallojs.org/markdown.html|tag|http://www.semanlink.net/tag/henri_bergius +http://hallojs.org/markdown.html|tag|http://www.semanlink.net/tag/javascript +http://hallojs.org/markdown.html|tag|http://www.semanlink.net/tag/interactive_knowledge_stack +http://hallojs.org/markdown.html|title|Hallo.js - Editing Markdown in WYSIWYG +http://hallojs.org/markdown.html|creationTime|2012-06-06T22:28:23Z +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|creationDate|2015-02-11 +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|tag|http://www.semanlink.net/tag/jax_rs +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|tag|http://www.semanlink.net/tag/jersey_cache_control +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|tag|http://www.semanlink.net/tag/cache +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|title|JAX-RS RESTEasy Cache control with ETag example - How To Do In Java +http://howtodoinjava.com/2013/06/05/jax-rs-resteasy-cache-control-with-etag-example/|creationTime|2015-02-11T16:23:29Z +http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/|creationDate|2008-12-10 +http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/|tag|http://www.semanlink.net/tag/mona_lisa +http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/|tag|http://www.semanlink.net/tag/genetic_programming +http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/|title|Genetic Programming: Evolution of Mona Lisa « Roger Alsing Weblog +http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/|creationTime|2008-12-10T14:56:07Z +http://www.lab41.org/anything2vec/|creationDate|2016-03-05 +http://www.lab41.org/anything2vec/|tag|http://www.semanlink.net/tag/word_embedding +http://www.lab41.org/anything2vec/|tag|http://www.semanlink.net/tag/word2vec +http://www.lab41.org/anything2vec/|title|2Vec or Not 2Vec? +http://www.lab41.org/anything2vec/|creationTime|2016-03-05T14:37:01Z +http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html|creationDate|2012-10-25 +http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html|tag|http://www.semanlink.net/tag/tools +http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html|title|Tools for exporting SKOS to human readable form (HTML) from Peter Krantz on 2012-10-24 (public-esw-thes@w3.org from October 2012) +http://lists.w3.org/Archives/Public/public-esw-thes/2012Oct/0003.html|creationTime|2012-10-25T11:56:36Z +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|creationDate|2006-05-26 +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|tag|http://www.semanlink.net/tag/enseignement_scientifique +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|tag|http://www.semanlink.net/tag/assemblee_nationale +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|tag|http://www.semanlink.net/tag/ecole +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|comment|La mission a observé, tant en France qu'à l'étranger, un tel enthousiasme à enseigner, une telle curiosité et une telle soif d'apprendre, dès lors que l'on sort des modes traditionnels de transmission des savoirs qu'elle considère que le levier du changement réside dans ces laboratoires pédagogiques qui tendent à se multiplier bien plus que dans une énième révision des programmes ou des horaires. +http://www.assemblee-nationale.fr/12/rap-info/i3061.asp|title|Rapport d'information de M. Jean-Marie Rolland sur l'enseignement des disciplines scientifiques dans le primaire et le secondaire +https://karpathy.github.io/2019/04/25/recipe/|creationDate|2019-04-25 +https://karpathy.github.io/2019/04/25/recipe/|tag|http://www.semanlink.net/tag/andrej_karpathy +https://karpathy.github.io/2019/04/25/recipe/|tag|http://www.semanlink.net/tag/artificial_neural_network +https://karpathy.github.io/2019/04/25/recipe/|title|A Recipe for Training Neural Networks (Andrej Karpathy blog) +https://karpathy.github.io/2019/04/25/recipe/|creationTime|2019-04-25T23:01:13Z +https://aclweb.org/anthology/papers/C/C18/C18-1139/|creationDate|2019-04-24 +https://aclweb.org/anthology/papers/C/C18/C18-1139/|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://aclweb.org/anthology/papers/C/C18/C18-1139/|comment|"**""contextual string embeddings""** based on the internal states of a character language model + +- trained without any explicit notion of words +- words are contextualized by their surrounding text + +[Github](https://github.com/zalandoresearch/flair) +" +https://aclweb.org/anthology/papers/C/C18/C18-1139/|relatedDoc|https://github.com/zalandoresearch/flair +https://aclweb.org/anthology/papers/C/C18/C18-1139/|title|Contextual String Embeddings for Sequence Labeling - ACL Anthology (2018) +https://aclweb.org/anthology/papers/C/C18/C18-1139/|creationTime|2019-04-24T14:27:13Z +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html|creationDate|2018-08-02 +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html|title|La revanche des bactériophages sur CRISPR-Cas9 - CNRS +http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/a-goulet.html|creationTime|2018-08-02T21:51:17Z +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|creationDate|2015-11-08 +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|tag|http://www.semanlink.net/tag/tutorial +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|comment|"The idea behind RNNs is to make use of sequential information. In a traditional neural network we assume that all inputs (and outputs) are independent of each other. But for many tasks that’s a very bad idea. If you want to predict the next word in a sentence you better know which words came before it. **RNNs are called recurrent because they perform the same task for every element of a sequence**, with the output being depended on the previous computations. Another way to think about RNNs is that they have **a “memory” which captures information about what has been calculated so far**. In theory RNNs can make use of information in arbitrarily long sequences, but in practice they are limited to looking back only a few steps +" +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|title|Recurrent Neural Networks Tutorial, Part 1 – Introduction to RNNs WildML +http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/|creationTime|2015-11-08T18:38:46Z +https://www.youtube.com/watch?v=029RscqsNLk|creationDate|2014-11-11 +https://www.youtube.com/watch?v=029RscqsNLk|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=029RscqsNLk|tag|http://www.semanlink.net/tag/reinhard_mey +https://www.youtube.com/watch?v=029RscqsNLk|title|Reinhard Mey - Annabelle 1972 - YouTube +https://www.youtube.com/watch?v=029RscqsNLk|creationTime|2014-11-11T23:32:01Z +http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki|creationDate|2008-04-17 +http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki|tag|http://www.semanlink.net/tag/kiwi_project +http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki|title|KiWi: Knowledge in a Wiki +http://blogs.sun.com/bblfish/entry/kiwi_knowledge_in_a_wiki|creationTime|2008-04-17T13:14:31Z +http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840|creationDate|2008-01-10 +http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840|tag|http://www.semanlink.net/tag/rebellion_touaregue +http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840|tag|http://www.semanlink.net/tag/niger +http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840|title|IRIN - NIGER: Des rebelles accusés de mener une campagne de terreur urbaine inspirée de l’Irak +http://www.irinnews.org/fr/ReportFrench.aspx?ReportId=75840|creationTime|2008-01-10T00:54:03Z +https://arxiv.org/abs/1503.00759|creationDate|2017-10-24 +https://arxiv.org/abs/1503.00759|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1503.00759|tag|http://www.semanlink.net/tag/statistical_relational_learning +https://arxiv.org/abs/1503.00759|arxiv_author|Kevin Murphy +https://arxiv.org/abs/1503.00759|arxiv_author|Volker Tresp +https://arxiv.org/abs/1503.00759|arxiv_author|Maximilian Nickel +https://arxiv.org/abs/1503.00759|arxiv_author|Evgeniy Gabrilovich +https://arxiv.org/abs/1503.00759|title|[1503.00759] A Review of Relational Machine Learning for Knowledge Graphs +https://arxiv.org/abs/1503.00759|creationTime|2017-10-24T14:44:20Z +https://arxiv.org/abs/1503.00759|arxiv_summary|"Relational machine learning studies methods for the statistical analysis of +relational, or graph-structured, data. In this paper, we provide a review of +how such statistical models can be ""trained"" on large knowledge graphs, and +then used to predict new facts about the world (which is equivalent to +predicting new edges in the graph). In particular, we discuss two fundamentally +different kinds of statistical relational models, both of which can scale to +massive datasets. The first is based on latent feature models such as tensor +factorization and multiway neural networks. The second is based on mining +observable patterns in the graph. We also show how to combine these latent and +observable models to get improved modeling power at decreased computational +cost. Finally, we discuss how such statistical models of graphs can be combined +with text-based information extraction methods for automatically constructing +knowledge graphs from the Web. To this end, we also discuss Google's Knowledge +Vault project as an example of such combination." +https://arxiv.org/abs/1503.00759|arxiv_firstAuthor|Maximilian Nickel +https://arxiv.org/abs/1503.00759|arxiv_updated|2015-09-28T17:40:35Z +https://arxiv.org/abs/1503.00759|arxiv_title|A Review of Relational Machine Learning for Knowledge Graphs +https://arxiv.org/abs/1503.00759|arxiv_published|2015-03-02T21:35:41Z +https://arxiv.org/abs/1503.00759|arxiv_num|1503.00759 +https://www.youtube.com/watch?v=gUilOCTqPC4|creationDate|2017-08-23 +https://www.youtube.com/watch?v=gUilOCTqPC4|tag|http://www.semanlink.net/tag/jean_rouch +https://www.youtube.com/watch?v=gUilOCTqPC4|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=gUilOCTqPC4|comment|They call on the new gods, the gods of the city, the gods of the technology, the gods of power: the Haouka +https://www.youtube.com/watch?v=gUilOCTqPC4|title|Jean Rouch – Les Maîtres Fous [1955] [1/2] - YouTube +https://www.youtube.com/watch?v=gUilOCTqPC4|creationTime|2017-08-23T14:26:51Z +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|creationDate|2019-05-28 +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|tag|http://www.semanlink.net/tag/virtual_knowledge_graph +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|tag|http://www.semanlink.net/tag/ontology_based_data_access +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|tag|http://www.semanlink.net/tag/data_integration +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|comment|"> **Virtual knowledge graph (VKG) paradigm for data integration and access**, also known in the literature as **Ontology-based Data Access**. Instead of structuring the integration layer as a collection of relational tables, the VKG paradigm replaces the rigid structure of tables with the flexibility of graphs that are kept virtual and embed domain knowledge +" +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|title|Virtual Knowledge Graphs: An Overview of Systems and Use Cases MIT Press Journals (2019) +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|bookmarkOf|https://www.mitpressjournals.org/doi/full/10.1162/dint_a_00011 +http://www.semanlink.net/doc/2019/05/virtual_knowledge_graphs_an_ov|creationTime|2019-05-28T17:01:59Z +http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it|creationDate|2014-03-15 +http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it|tag|http://www.semanlink.net/tag/solr_and_nlp +http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it|title|lucene - SOLR and Natural Language Parsing - Can I use it? - Stack Overflow +http://stackoverflow.com/questions/2954814/solr-and-natural-language-parsing-can-i-use-it|creationTime|2014-03-15T13:49:46Z +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|creationDate|2011-06-03 +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|tag|http://www.semanlink.net/tag/decroissance +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|tag|http://www.semanlink.net/tag/liberation +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|tag|http://www.semanlink.net/tag/ecologie +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|title|"Pierre Rabhi : ""Vivre sobrement, c'est une forme de libération"" - LeMonde.fr" +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|creationTime|2011-06-03T18:07:43Z +http://www.lemonde.fr/planete/chat/2011/06/02/posez-vos-questions-a-pierre-rabhi_1531227_3244.html|source|Le Monde +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|creationDate|2018-10-16 +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|tag|http://www.semanlink.net/tag/tepuys +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|tag|http://www.semanlink.net/tag/peinture_rupestre +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|tag|http://www.semanlink.net/tag/colombie +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|title|Chiribiquete National Park - Wikipedia +https://en.wikipedia.org/wiki/Chiribiquete_National_Park#Gallery|creationTime|2018-10-16T00:24:46Z +https://github.com/src-d/wmd-relax|creationDate|2017-11-12 +https://github.com/src-d/wmd-relax|tag|http://www.semanlink.net/tag/github_project +https://github.com/src-d/wmd-relax|tag|http://www.semanlink.net/tag/word_mover_s_distance +https://github.com/src-d/wmd-relax|title|Calculates Word Mover's Distance Insanely Fast +https://github.com/src-d/wmd-relax|creationTime|2017-11-12T02:54:34Z +http://www.mkbergman.com/?p=447|creationDate|2008-06-24 +http://www.mkbergman.com/?p=447|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.mkbergman.com/?p=447|tag|http://www.semanlink.net/tag/link_to_me +http://www.mkbergman.com/?p=447|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/?p=447|tag|http://www.semanlink.net/tag/fps_and_ldow2008 +http://www.mkbergman.com/?p=447|comment|"> The Linking Open Data (LOD) group formed about 18 months ago to showcase Linked Data techniques began with open data. As a parallel concept to sever the idea that it only applies to open data, François-Paul Servant has specifically identified Linking Enterprise Data. + +Zitgist Offers a Definition and Some Answers to Enterprise Questions" +http://www.mkbergman.com/?p=447|title|What is Linked Data? » AI3:::Adaptive Information +http://www.mkbergman.com/?p=447|creationTime|2008-06-24T20:26:42Z +https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/|creationDate|2018-08-07 +https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/|tag|http://www.semanlink.net/tag/automatic_tagging +https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/|tag|http://www.semanlink.net/tag/microsoft_research +https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/|title|Automatic Tag Recommendation Algorithms for Social Recommender Systems - Microsoft Research (2009) +https://www.microsoft.com/en-us/research/publication/automatic-tag-recommendation-algorithms-for-social-recommender-systems/|creationTime|2018-08-07T16:31:30Z +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|creationDate|2013-08-20 +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|tag|http://www.semanlink.net/tag/digital_humanities +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|tag|http://www.semanlink.net/tag/distant_reading +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|tag|http://www.semanlink.net/tag/literary_criticism +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|tag|http://www.semanlink.net/tag/topic_modeling +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|tag|http://www.semanlink.net/tag/data_mining +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|title|The Remaking of Reading: Data Mining and the Digital Humanities +http://www.csee.umbc.edu/~hillol/NGDM07/abstracts/talks/MKirschenbaum.pdf|creationTime|2013-08-20T16:32:06Z +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|creationDate|2017-11-21 +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|tag|http://www.semanlink.net/tag/wikidata_query_service +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|tag|http://www.semanlink.net/tag/sample_code +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|comment|"> reuse the code for #Wikidata #query #ui to build your own #SPARQL driven application +> Tourist guide with just a few lines of JS code +" +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|title|wikidata-query-gui/nearby.html at master · wikimedia/wikidata-query-gui +https://github.com/wikimedia/wikidata-query-gui/blob/master/examples/app/nearby.html#L79|creationTime|2017-11-21T11:12:08Z +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|creationDate|2017-06-14 +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|tag|http://www.semanlink.net/tag/rada_mihalcea +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|tag|http://www.semanlink.net/tag/textrank +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|comment|paper, description of the TextRank algorithm +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|title|TextRank: Bringing Order into Texts (2004) +http://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf|creationTime|2017-06-14T01:16:22Z +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|creationDate|2011-12-27 +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|tag|http://www.semanlink.net/tag/linked_data_dev +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|comment|A collection of best practices and a simple approach for a Linked Data architecture +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|title|Toward a Basic Profile for Linked Data +http://www.ibm.com/developerworks/rational/library/basic-profile-linked-data/index.html|creationTime|2011-12-27T19:17:06Z +http://en.wikipedia.org/wiki/Zhang_Qian|creationDate|2007-04-29 +http://en.wikipedia.org/wiki/Zhang_Qian|tag|http://www.semanlink.net/tag/zhang_qian +http://en.wikipedia.org/wiki/Zhang_Qian|comment|"Zhang Qian was an imperial envoy in the 2nd century BCE, during the time of the Han Dynasty. He was the first official diplomat to bring back reliable information about Central Asia to the Chinese imperial court
+On route he was captured by the Xiongnu and detained for ten years. There he married a Xiongnu wife and gained the trust of the Xiongnu leader. +" +http://en.wikipedia.org/wiki/Zhang_Qian|title|Zhang Qian - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Zhang_Qian|creationTime|2007-04-29T00:55:19Z +http://blog.erratasec.com/2016/06/etheriumdao-hack-similfied.html#.V3UTeo56_-k|creationDate|2016-06-30 +http://blog.erratasec.com/2016/06/etheriumdao-hack-similfied.html#.V3UTeo56_-k|tag|http://www.semanlink.net/tag/dao_attack +http://blog.erratasec.com/2016/06/etheriumdao-hack-similfied.html#.V3UTeo56_-k|title|Errata Security: Ethereum/TheDAO hack simplified +http://blog.erratasec.com/2016/06/etheriumdao-hack-similfied.html#.V3UTeo56_-k|creationTime|2016-06-30T15:08:36Z +https://www.tensorflow.org/extras/candidate_sampling.pdf|creationDate|2018-07-07 +https://www.tensorflow.org/extras/candidate_sampling.pdf|tag|http://www.semanlink.net/tag/candidate_sampling +https://www.tensorflow.org/extras/candidate_sampling.pdf|tag|http://www.semanlink.net/tag/tensorflow +https://www.tensorflow.org/extras/candidate_sampling.pdf|title|What is Candidate Sampling +https://www.tensorflow.org/extras/candidate_sampling.pdf|creationTime|2018-07-07T15:04:54Z +http://esw.w3.org/Foaf+ssl|creationDate|2011-01-18 +http://esw.w3.org/Foaf+ssl|tag|http://www.semanlink.net/tag/foaf_ssl +http://esw.w3.org/Foaf+ssl|tag|http://www.semanlink.net/tag/henry_story +http://esw.w3.org/Foaf+ssl|title|Foaf+ssl - ESW Wiki +http://esw.w3.org/Foaf+ssl|creationTime|2011-01-18T10:36:18Z +http://paulgraham.com/95.html|creationDate|2014-12-27 +http://paulgraham.com/95.html|tag|http://www.semanlink.net/tag/programmers +http://paulgraham.com/95.html|tag|http://www.semanlink.net/tag/immigration +http://paulgraham.com/95.html|tag|http://www.semanlink.net/tag/paul_graham +http://paulgraham.com/95.html|title|Let the Other 95% of Great Programmers In +http://paulgraham.com/95.html|creationTime|2014-12-27T11:34:09Z +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|creationDate|2019-01-27 +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|tag|http://www.semanlink.net/tag/jobbotization +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|tag|http://www.semanlink.net/tag/digital_economy +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|tag|http://www.semanlink.net/tag/davos +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|tag|http://www.semanlink.net/tag/ai_dangers +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|comment|“The choice isn’t between automation and non-automation,” said Erik Brynjolfsson, the director of M.I.T.’s Initiative on the Digital Economy. “It’s between whether you use the technology in a way that creates shared prosperity, or more concentration of wealth.” +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|title|The Hidden Automation Agenda of the Davos Elite - The New York Times +https://www.nytimes.com/2019/01/25/technology/automation-davos-world-economic-forum.html|creationTime|2019-01-27T01:06:32Z +https://arxiv.org/abs/1806.05662|creationDate|2018-06-23 +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/graph_embeddings +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/yann_lecun +https://arxiv.org/abs/1806.05662|tag|http://www.semanlink.net/tag/transfer_learning +https://arxiv.org/abs/1806.05662|arxiv_author|Jake Zhao +https://arxiv.org/abs/1806.05662|arxiv_author|Kaiming He +https://arxiv.org/abs/1806.05662|arxiv_author|Yann LeCun +https://arxiv.org/abs/1806.05662|arxiv_author|Ruslan Salakhutdinov +https://arxiv.org/abs/1806.05662|arxiv_author|Bhuwan Dhingra +https://arxiv.org/abs/1806.05662|arxiv_author|William W. Cohen +https://arxiv.org/abs/1806.05662|arxiv_author|Zhilin Yang +https://arxiv.org/abs/1806.05662|comment|Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. +https://arxiv.org/abs/1806.05662|title|[1806.05662] GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations +https://arxiv.org/abs/1806.05662|creationTime|2018-06-23T00:58:21Z +https://arxiv.org/abs/1806.05662|arxiv_summary|"Modern deep transfer learning approaches have mainly focused on learning +generic feature vectors from one task that are transferable to other tasks, +such as word embeddings in language and pretrained convolutional features in +vision. However, these approaches usually transfer unary features and largely +ignore more structured graphical representations. This work explores the +possibility of learning generic latent relational graphs that capture +dependencies between pairs of data units (e.g., words or pixels) from +large-scale unlabeled data and transferring the graphs to downstream tasks. Our +proposed transfer learning framework improves performance on various tasks +including question answering, natural language inference, sentiment analysis, +and image classification. We also show that the learned graphs are generic +enough to be transferred to different embeddings on which the graphs have not +been trained (including GloVe embeddings, ELMo embeddings, and task-specific +RNN hidden unit), or embedding-free units such as image pixels." +https://arxiv.org/abs/1806.05662|arxiv_firstAuthor|Zhilin Yang +https://arxiv.org/abs/1806.05662|arxiv_updated|2018-07-02T20:24:33Z +https://arxiv.org/abs/1806.05662|arxiv_title|GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations +https://arxiv.org/abs/1806.05662|arxiv_published|2018-06-14T17:41:19Z +https://arxiv.org/abs/1806.05662|arxiv_num|1806.05662 +https://github.com/d3/d3|creationDate|2017-06-28 +https://github.com/d3/d3|tag|http://www.semanlink.net/tag/d3js +https://github.com/d3/d3|tag|http://www.semanlink.net/tag/github_project +https://github.com/d3/d3|comment|JavaScript library for visualizing data using web standards (SVG, Canvas and HTML). D3 combines visualization and interaction techniques with a data-driven approach to DOM manipulation +https://github.com/d3/d3|title|D3: Data-Driven Documents +https://github.com/d3/d3|creationTime|2017-06-28T15:00:55Z +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|creationDate|2012-02-03 +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|tag|http://www.semanlink.net/tag/hongrie +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|tag|http://www.semanlink.net/tag/extreme_droite +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|title|La Hongrie rongée par la haine +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|creationTime|2012-02-03T22:24:35Z +http://www.lemonde.fr/m/article/2012/02/03/la-hongrie-rongee-par-la-haine_1637775_1575563.html#ens_id=1276800|source|Le Monde +https://arxiv.org/abs/1601.03764|creationDate|2018-08-28 +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/emnlp_2018 +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/sanjeev_arora +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/sparse_dictionary_learning +https://arxiv.org/abs/1601.03764|tag|http://www.semanlink.net/tag/lexical_ambiguity +https://arxiv.org/abs/1601.03764|arxiv_author|Sanjeev Arora +https://arxiv.org/abs/1601.03764|arxiv_author|Tengyu Ma +https://arxiv.org/abs/1601.03764|arxiv_author|Yuanzhi Li +https://arxiv.org/abs/1601.03764|arxiv_author|Andrej Risteski +https://arxiv.org/abs/1601.03764|arxiv_author|Yingyu Liang +https://arxiv.org/abs/1601.03764|comment|"> Here it is shown that multiple word senses reside +in linear superposition within the word +embedding and simple sparse coding can recover +vectors that approximately capture the +senses + +> Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. + +> The success of the approach is mathematically explained using a variant of +the random walk on discourses model + +(""random walk"": a generative model for language). Under the assumptions of this model, there +exists a linear relationship between the vector of a +word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) + +[Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) +" +https://arxiv.org/abs/1601.03764|relatedDoc|https://www.offconvex.org/2016/07/10/embeddingspolysemy/ +https://arxiv.org/abs/1601.03764|title|[1601.03764] Linear Algebraic Structure of Word Senses, with Applications to Polysemy +https://arxiv.org/abs/1601.03764|creationTime|2018-08-28T11:00:08Z +https://arxiv.org/abs/1601.03764|arxiv_summary|"Word embeddings are ubiquitous in NLP and information retrieval, but it is +unclear what they represent when the word is polysemous. Here it is shown that +multiple word senses reside in linear superposition within the word embedding +and simple sparse coding can recover vectors that approximately capture the +senses. The success of our approach, which applies to several embedding +methods, is mathematically explained using a variant of the random walk on +discourses model (Arora et al., 2016). A novel aspect of our technique is that +each extracted word sense is accompanied by one of about 2000 ""discourse atoms"" +that gives a succinct description of which other words co-occur with that word +sense. Discourse atoms can be of independent interest, and make the method +potentially more useful. Empirical tests are used to verify and support the +theory." +https://arxiv.org/abs/1601.03764|arxiv_firstAuthor|Sanjeev Arora +https://arxiv.org/abs/1601.03764|arxiv_updated|2018-12-07T17:30:03Z +https://arxiv.org/abs/1601.03764|arxiv_title|Linear Algebraic Structure of Word Senses, with Applications to Polysemy +https://arxiv.org/abs/1601.03764|arxiv_published|2016-01-14T22:02:18Z +https://arxiv.org/abs/1601.03764|arxiv_num|1601.03764 +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|creationDate|2017-03-11 +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|tag|http://www.semanlink.net/tag/pib +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|tag|http://www.semanlink.net/tag/depense_publique +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|comment|"Ben oui, dire ""la dépense publique = 56% du PIB"" ne signifie pas que l'état consomme 56% du PIB ! +" +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|title|Calculée comme la dépense publique, la dépense privée dépasserait 200% du PIB - 8 mai 2014 - L'Obs +http://tempsreel.nouvelobs.com/rue89/rue89-chez-les-economistes-atterres/20140507.RUE0109/calculee-comme-la-depense-publique-la-depense-privee-depasserait-200-du-pib.html|creationTime|2017-03-11T14:02:54Z +http://claudehartmann1.wixsite.com/clhartmann|creationDate|2018-02-18 +http://claudehartmann1.wixsite.com/clhartmann|tag|http://www.semanlink.net/tag/claude_hartmann +http://claudehartmann1.wixsite.com/clhartmann|title|Claude Hartmann +http://claudehartmann1.wixsite.com/clhartmann|creationTime|2018-02-18T09:21:30Z +http://www.apollon.uio.no/vis/art/2007_4/Artikler/The_Tree_of_Life|creationDate|2008-01-25 +http://www.apollon.uio.no/vis/art/2007_4/Artikler/The_Tree_of_Life|tag|http://www.semanlink.net/tag/biologie +http://www.apollon.uio.no/vis/art/2007_4/Artikler/The_Tree_of_Life|title|Apollon: The Tree of Life Has Lost a Branch +http://www.apollon.uio.no/vis/art/2007_4/Artikler/The_Tree_of_Life|creationTime|2008-01-25T08:43:48Z +https://webmasters.googleblog.com/2016/05/introducing-rich-cards.html|creationDate|2016-05-18 +https://webmasters.googleblog.com/2016/05/introducing-rich-cards.html|tag|http://www.semanlink.net/tag/google_rich_cards +https://webmasters.googleblog.com/2016/05/introducing-rich-cards.html|title|Official Google Webmaster Central Blog: Introducing rich cards +https://webmasters.googleblog.com/2016/05/introducing-rich-cards.html|creationTime|2016-05-18T23:25:14Z +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|creationDate|2008-05-15 +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|tag|http://www.semanlink.net/tag/synonym_uris +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|tag|http://www.semanlink.net/tag/uri_synonymity +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|tag|http://www.semanlink.net/tag/hugh_glaser +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|tag|http://www.semanlink.net/tag/data_web +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|comment|"This paper sets out an architecture for managing URI equivalences on the Web of Data by using Consistent Reference Services.
+The underlying philosophy of the CRS is to treat URIs as first-class entities and separate the +equivalences of a URI into a separate knowledge base that will be aware of both intra- +repository and inter-repository synonymity. Equivalent URIs are grouped into +bundles which are themselves given their own URI. When an application wishes to +find an equivalent URI, the CRS can be queried to retrieve the corresponding bundle. +" +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|title|Managing URI Synonymity to Enable Consistent Reference on the Semantic Web +http://eprints.ecs.soton.ac.uk/15614/1/camera-ready.pdf|creationTime|2008-05-15T21:55:45Z +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|creationDate|2015-12-11 +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|tag|http://www.semanlink.net/tag/machine_learning +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|tag|http://www.semanlink.net/tag/bayesian_classification +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|tag|http://www.semanlink.net/tag/artificial_neural_network +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|title|A Learning Advance in Artificial Intelligence Rivals Human Abilities - The New York Times +http://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html|creationTime|2015-12-11T15:52:17Z +https://mailman.stanford.edu/pipermail/java-nlp-user/2009-November/000300.html|creationDate|2014-04-08 +https://mailman.stanford.edu/pipermail/java-nlp-user/2009-November/000300.html|tag|http://www.semanlink.net/tag/stanford_classifier +https://mailman.stanford.edu/pipermail/java-nlp-user/2009-November/000300.html|title|[java-nlp-user] Stanford NER: confidence scores +https://mailman.stanford.edu/pipermail/java-nlp-user/2009-November/000300.html|creationTime|2014-04-08T16:24:34Z +http://grc.com/dos/grcdos.htm|creationDate|2006-01-01 +http://grc.com/dos/grcdos.htm|tag|http://www.semanlink.net/tag/zombie_pcs +http://grc.com/dos/grcdos.htm|tag|http://www.semanlink.net/tag/isp +http://grc.com/dos/grcdos.htm|tag|http://www.semanlink.net/tag/hackers +http://grc.com/dos/grcdos.htm|title|The Attacks on GRC.COM +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|creationDate|2014-04-23 +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|tag|http://www.semanlink.net/tag/chine_ecologie +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|tag|http://www.semanlink.net/tag/abeille +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|tag|http://www.semanlink.net/tag/travail +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|tag|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|tag|http://www.semanlink.net/tag/fruit +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|title|Dans les vergers du Sichuan, les hommes font le travail des abeilles +http://www.lemonde.fr/planete/article/2014/04/23/dans-les-vergers-du-sichuan-les-hommes-font-le-travail-des-abeilles_4405686_3244.html|creationTime|2014-04-23T21:52:03Z +http://wiki.apache.org/incubator/LindaProposal#Abstract|creationDate|2012-11-19 +http://wiki.apache.org/incubator/LindaProposal#Abstract|tag|http://www.semanlink.net/tag/apache_marmotta +http://wiki.apache.org/incubator/LindaProposal#Abstract|tag|http://www.semanlink.net/tag/sebastian_schaffert +http://wiki.apache.org/incubator/LindaProposal#Abstract|comment|The goal of Apache Linda is to provide an open implementation of a Linked Data Platform that can be used, extended, and deployed easily by organizations who want to publish Linked Data or build custom applications on Linked Data. +http://wiki.apache.org/incubator/LindaProposal#Abstract|title|LindaProposal - Incubator Wiki +http://wiki.apache.org/incubator/LindaProposal#Abstract|creationTime|2012-11-19T14:12:00Z +http://www.scottaaronson.com/blog/?p=2756|creationDate|2016-06-03 +http://www.scottaaronson.com/blog/?p=2756|tag|http://www.semanlink.net/tag/conscience_artificielle +http://www.scottaaronson.com/blog/?p=2756|tag|http://www.semanlink.net/tag/roger_penrose +http://www.scottaaronson.com/blog/?p=2756|title|“Can computers become conscious?”: My reply to Roger Penrose +http://www.scottaaronson.com/blog/?p=2756|creationTime|2016-06-03T22:08:34Z +http://www.xs4all.nl/~kspaink/fishman/home.html|creationDate|2005-09-01 +http://www.xs4all.nl/~kspaink/fishman/home.html|tag|http://www.semanlink.net/tag/internet +http://www.xs4all.nl/~kspaink/fishman/home.html|tag|http://www.semanlink.net/tag/scientologie +http://www.xs4all.nl/~kspaink/fishman/home.html|tag|http://www.semanlink.net/tag/infringing_material +http://www.xs4all.nl/~kspaink/fishman/home.html|tag|http://www.semanlink.net/tag/hyperlinks +http://www.xs4all.nl/~kspaink/fishman/home.html|tag|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.xs4all.nl/~kspaink/fishman/home.html|comment|"Although I won, there's one thing that seriously bugs me, and other people. The court ruled that hyperlinks and url's refering to pages that contain infringing material must in themselves be considered to be infringing. That cuts at the heart of the net. To name one example: it makes search engines illegal: they often refer to pages that contain infringing material. + + + +This is the famous Fishman Affidavit. To explain why is has become so famous, I'll give you some more information. And please take a look at the Scientology Litigation Kit, where I list the materials used for my defense and the lawsuit materials (lawsuits, plea, defense, rulings) and at postings and news about the Dutch Protest." +http://www.xs4all.nl/~kspaink/fishman/home.html|title|Karin Spaink - The Fishman Affidavit: contents +https://www.wintellect.com/creating-machine-learning-web-api-flask/|creationDate|2018-09-25 +https://www.wintellect.com/creating-machine-learning-web-api-flask/|tag|http://www.semanlink.net/tag/flask +https://www.wintellect.com/creating-machine-learning-web-api-flask/|title|Creating a Machine Learning Web API with Flask - Wintellect +https://www.wintellect.com/creating-machine-learning-web-api-flask/|creationTime|2018-09-25T09:51:21Z +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|creationDate|2008-09-10 +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|tag|http://www.semanlink.net/tag/what_is_life +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|tag|http://www.semanlink.net/tag/biotechnologies +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|tag|http://www.semanlink.net/tag/synthetic_life +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|comment|Szostak's protocells are built from fatty molecules that can trap bits of nucleic acids that contain the source code for replication. Combined with a process that harnesses external energy from the sun or chemical reactions, they could form a self-replicating, evolving system that satisfies the conditions of life, but isn't anything like life on earth now, but might represent life as it began or could exist elsewhere in the universe. +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|title|Biologists on the Verge of Creating New Form of Life Wired Science from Wired.com +http://blog.wired.com/wiredscience/2008/09/biologists-on-t.html|creationTime|2008-09-10T15:45:47Z +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|creationDate|2017-06-23 +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|tag|http://www.semanlink.net/tag/tutorial +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|title|Gensim tutorials +https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials|creationTime|2017-06-23T17:18:52Z +http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg|creationDate|2011-01-14 +http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg|tag|http://www.semanlink.net/tag/dosso +http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg|tag|http://www.semanlink.net/tag/jermakoye +http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg|title|Dosso +http://media.tumblr.com/tumblr_l66nt4w0xq1qbh7fc.jpg|creationTime|2011-01-14T00:07:37Z +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|creationDate|2019-05-30 +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|tag|http://www.semanlink.net/tag/unsupervised_text_classification +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|tag|http://www.semanlink.net/tag/category_embedding +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|tag|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|tag|http://www.semanlink.net/tag/eswc_2019 +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|comment|> we propose a novel probabilistic model for Knowledge-Based Short Text Categorization (KBSTC), **which does not require any labeled training data to classify a short text**. This is achieved by leveraging **entities and categories from large knowledge bases**, which are further embedded into a common vector space, for which we propose a new entity and category embedding model. **Given a short text, its category (e.g. Business, Sports, etc.) can then be derived based on the entities mentioned in the text by exploiting semantic similarity between entities and categories** +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|title|Knowledge-Based Short Text Categorization Using Entity and Category Embedding Springer for Research & Development (2019) +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|bookmarkOf|https://rd.springer.com/chapter/10.1007/978-3-030-21348-0_23 +http://www.semanlink.net/doc/2019/05/knowledge_based_short_text_cate|creationTime|2019-05-30T11:38:19Z +http://css.maxdesign.com.au/selectutorial/index.htm|creationDate|2005-05-25 +http://css.maxdesign.com.au/selectutorial/index.htm|tag|http://www.semanlink.net/tag/howto +http://css.maxdesign.com.au/selectutorial/index.htm|tag|http://www.semanlink.net/tag/css +http://css.maxdesign.com.au/selectutorial/index.htm|title|Selectutorial: CSS selectors +http://www.semweb.pro/talk/2474|creationDate|2012-02-13 +http://www.semweb.pro/talk/2474|tag|http://www.semanlink.net/tag/semweb_pro_2012 +http://www.semweb.pro/talk/2474|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://www.semweb.pro/talk/2474|title|Linked Data et description du produit automobile (SemWeb.Pro) +http://www.semweb.pro/talk/2474|creationTime|2012-02-13T19:16:22Z +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|creationDate|2013-11-19 +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|tag|http://www.semanlink.net/tag/mars +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|tag|http://www.semanlink.net/tag/craig_venter +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|title|Developing a Fax Machine to Copy Life on Mars - NYTimes.com +http://www.nytimes.com/2013/11/18/science/developing-a-fax-machine-to-copy-life-on-mars.html|creationTime|2013-11-19T00:22:53Z +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|creationDate|2018-11-11 +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|tag|http://www.semanlink.net/tag/blackboxnlp_workshop_2018 +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|tag|http://www.semanlink.net/tag/slides +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|tag|http://www.semanlink.net/tag/yoav_goldberg +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|title|Trying to Understand Recurrent Neural Networks for Language Processing (slides) +http://u.cs.biu.ac.il/~yogo/blackbox2018.pdf|creationTime|2018-11-11T23:29:46Z +http://km.aifb.uni-karlsruhe.de/ws/esoe2007|creationDate|2007-06-23 +http://km.aifb.uni-karlsruhe.de/ws/esoe2007|tag|http://www.semanlink.net/tag/semantic_tagging +http://km.aifb.uni-karlsruhe.de/ws/esoe2007|comment|"The Semantic Web and collaborative tagging are two complementary approaches...Bundles, classification, relations or tagging of tags are some promising ways to enforce some kinds of structure for tags in order to enable scalability and findability...There is a growing interest in marrying the two paradigms in order to create large-scale semantic and intelligent content.
+Abstract submissions: July 23, submissions due: July 30 + + + +" +http://km.aifb.uni-karlsruhe.de/ws/esoe2007|title|International Workshop on Emergent Semantics and Ontology Evolution +http://km.aifb.uni-karlsruhe.de/ws/esoe2007|creationTime|2007-06-23T15:03:58Z +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|tag|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|title|Combining Knowledge with Deep Convolutional Neural Networks for Short Text Classification (2017) +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|bookmarkOf|https://www.ijcai.org/proceedings/2017/0406.pdf +http://www.semanlink.net/doc/2019/05/combining_knowledge_with_deep_c|creationTime|2019-05-20T19:13:43Z +https://arxiv.org/abs/1602.04938|creationDate|2018-09-09 +https://arxiv.org/abs/1602.04938|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1602.04938|tag|http://www.semanlink.net/tag/lime +https://arxiv.org/abs/1602.04938|arxiv_author|Marco Tulio Ribeiro +https://arxiv.org/abs/1602.04938|arxiv_author|Carlos Guestrin +https://arxiv.org/abs/1602.04938|arxiv_author|Sameer Singh +https://arxiv.org/abs/1602.04938|comment|technique that explains the predictions of any classifier by learning an interpretable model locally around the prediction +https://arxiv.org/abs/1602.04938|title|"[1602.04938] ""Why Should I Trust You?"": Explaining the Predictions of Any Classifier" +https://arxiv.org/abs/1602.04938|creationTime|2018-09-09T15:22:41Z +https://arxiv.org/abs/1602.04938|arxiv_summary|"Despite widespread adoption, machine learning models remain mostly black +boxes. Understanding the reasons behind predictions is, however, quite +important in assessing trust, which is fundamental if one plans to take action +based on a prediction, or when choosing whether to deploy a new model. Such +understanding also provides insights into the model, which can be used to +transform an untrustworthy model or prediction into a trustworthy one. In this +work, we propose LIME, a novel explanation technique that explains the +predictions of any classifier in an interpretable and faithful manner, by +learning an interpretable model locally around the prediction. We also propose +a method to explain models by presenting representative individual predictions +and their explanations in a non-redundant way, framing the task as a submodular +optimization problem. We demonstrate the flexibility of these methods by +explaining different models for text (e.g. random forests) and image +classification (e.g. neural networks). We show the utility of explanations via +novel experiments, both simulated and with human subjects, on various scenarios +that require trust: deciding if one should trust a prediction, choosing between +models, improving an untrustworthy classifier, and identifying why a classifier +should not be trusted." +https://arxiv.org/abs/1602.04938|arxiv_firstAuthor|Marco Tulio Ribeiro +https://arxiv.org/abs/1602.04938|arxiv_updated|2016-08-09T17:54:52Z +https://arxiv.org/abs/1602.04938|arxiv_title|"""Why Should I Trust You?"": Explaining the Predictions of Any Classifier" +https://arxiv.org/abs/1602.04938|arxiv_published|2016-02-16T08:20:14Z +https://arxiv.org/abs/1602.04938|arxiv_num|1602.04938 +http://events.linkeddata.org/ldow2008/|creationDate|2007-12-17 +http://events.linkeddata.org/ldow2008/|tag|http://www.semanlink.net/tag/ldow2008 +http://events.linkeddata.org/ldow2008/|title|Linked Data on the Web (LDOW2008) - Workshop at WWW2008, Beijing, China +http://events.linkeddata.org/ldow2008/|creationTime|2007-12-17T12:12:34Z +https://blogs.oracle.com/bblfish/entry/temporal_relations|creationDate|2012-11-14 +https://blogs.oracle.com/bblfish/entry/temporal_relations|tag|http://www.semanlink.net/tag/time_in_rdf +https://blogs.oracle.com/bblfish/entry/temporal_relations|tag|http://www.semanlink.net/tag/henry_story +https://blogs.oracle.com/bblfish/entry/temporal_relations|title|Temporal Relations (The Sun BabelFish Blog) +https://blogs.oracle.com/bblfish/entry/temporal_relations|creationTime|2012-11-14T20:03:30Z +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|creationDate|2012-04-10 +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|tag|http://www.semanlink.net/tag/nlp +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|tag|http://www.semanlink.net/tag/nlp_tools +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|tag|http://www.semanlink.net/tag/java_tool +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|comment|a Java suite of tools originally developed at the University of Sheffield beginning in 1995 and now used worldwide by a wide community of scientists, companies, teachers and students for all sorts of natural language processing tasks, including information extraction in many languages. +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|title|General Architecture for Text Engineering (GATE) - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/General_Architecture_for_Text_Engineering|creationTime|2012-04-10T02:07:27Z +http://pchere.blogspot.com/2005/02/absolutely-delicious-complete-tool.html|creationDate|2005-05-23 +http://pchere.blogspot.com/2005/02/absolutely-delicious-complete-tool.html|tag|http://www.semanlink.net/tag/del_icio_us +http://pchere.blogspot.com/2005/02/absolutely-delicious-complete-tool.html|title|Quick Online Tips: Absolutely Del.icio.us - Complete Tool Collection +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|creationDate|2018-10-10 +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|tag|http://www.semanlink.net/tag/uberisation +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|tag|http://www.semanlink.net/tag/silicon_valley +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|comment|The plan is no gift to the masses, but a tool for our further enslavement +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|title|Universal Basic Income Is Silicon Valley’s Latest Scam +https://medium.com/s/powertrip/universal-basic-income-is-silicon-valleys-latest-scam-fd3e130b69a0|creationTime|2018-10-10T23:55:25Z +https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u|creationDate|2015-12-04 +https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u|tag|http://www.semanlink.net/tag/rigolo +https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u|tag|http://www.semanlink.net/tag/mark_zuckerberg +https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u|title|AN OPEN LETTER TO MY PARENTS — The Daily Show — Medium +https://medium.com/the-daily-show/an-open-letter-to-my-parents-1174b1c14dcc#.k4yad3k7u|creationTime|2015-12-04T00:10:40Z +https://www.elastic.co/fr/blog/elastic-enterprise-search-beta1-released|creationDate|2019-05-09 +https://www.elastic.co/fr/blog/elastic-enterprise-search-beta1-released|tag|http://www.semanlink.net/tag/elasticsearch +https://www.elastic.co/fr/blog/elastic-enterprise-search-beta1-released|title|Introducing the Elastic Enterprise Search Beta: Search Everything, Anywhere Elastic Blog +https://www.elastic.co/fr/blog/elastic-enterprise-search-beta1-released|creationTime|2019-05-09T08:30:33Z +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|creationDate|2016-02-17 +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|tag|http://www.semanlink.net/tag/ibm +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|tag|http://www.semanlink.net/tag/microsoft +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|tag|http://www.semanlink.net/tag/blockchain +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|title|IBM and Microsoft Will Let You Roll Your Own Blockchain WIRED +http://www.wired.com/2016/02/ibm-and-microsoft-will-let-you-roll-your-own-blockchain/|creationTime|2016-02-17T23:50:01Z +http://www.lemonde.fr/idees/article/2013/03/25/les-moocs-a-l-assaut-du-mammouth_1853831_3232.html|creationDate|2013-04-03 +http://www.lemonde.fr/idees/article/2013/03/25/les-moocs-a-l-assaut-du-mammouth_1853831_3232.html|tag|http://www.semanlink.net/tag/mooc +http://www.lemonde.fr/idees/article/2013/03/25/les-moocs-a-l-assaut-du-mammouth_1853831_3232.html|title|Les MOOCs à l'assaut du mammouth +http://www.lemonde.fr/idees/article/2013/03/25/les-moocs-a-l-assaut-du-mammouth_1853831_3232.html|creationTime|2013-04-03T23:30:50Z +http://www.w3.org/TR/skos-reference/|creationDate|2008-05-08 +http://www.w3.org/TR/skos-reference/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/skos-reference/|tag|http://www.semanlink.net/tag/skos_w3c_document +http://www.w3.org/TR/skos-reference/|title|SKOS Reference +http://www.w3.org/TR/skos-reference/|creationTime|2008-05-08T16:46:46Z +http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/|creationDate|2012-10-22 +http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/|tag|http://www.semanlink.net/tag/linked_data +http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/|tag|http://www.semanlink.net/tag/nathan_rixham +http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/|title|on the issue of conflicting requirements when creating linked data applications. – webr3.org +http://webr3.org/blog/semantic-web/conflicting-requirements-when-creating-linked-data-applications/|creationTime|2012-10-22T10:35:59Z +https://www.npmjs.com/package/markdown-it-relativelink|creationDate|2017-04-01 +https://www.npmjs.com/package/markdown-it-relativelink|tag|http://www.semanlink.net/tag/markdown_ittt +https://www.npmjs.com/package/markdown-it-relativelink|title|markdown-it-relativelink +https://www.npmjs.com/package/markdown-it-relativelink|creationTime|2017-04-01T03:14:42Z +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|creationDate|2008-02-01 +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|tag|http://www.semanlink.net/tag/sparql +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|tag|http://www.semanlink.net/tag/sparql_sample_code +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|tag|http://www.semanlink.net/tag/wikipedia_page_to_concept +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|comment|"SELECT ?subject ?p +WHERE { + { ?subject ?p http://en.wikipedia.org/wiki/Well-Tempered_Clavier } +} + + + +" +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|title|wiki to dbpedia with sparql +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fen.wikipedia.org%2Fwiki%2FWell-Tempered_Clavier%3E+%7D%0D%0A%7D|creationTime|2008-02-01T22:31:16Z +http://www.sciencedirect.com/science/article/pii/S0925231215014502|creationDate|2017-07-21 +http://www.sciencedirect.com/science/article/pii/S0925231215014502|tag|http://www.semanlink.net/tag/nlp_short_texts +http://www.sciencedirect.com/science/article/pii/S0925231215014502|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.sciencedirect.com/science/article/pii/S0925231215014502|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://www.sciencedirect.com/science/article/pii/S0925231215014502|title|Semantic expansion using word embedding clustering and convolutional neural network for improving short text classification - ScienceDirect +http://www.sciencedirect.com/science/article/pii/S0925231215014502|creationTime|2017-07-21T01:36:21Z +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|creationDate|2008-03-29 +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|tag|http://www.semanlink.net/tag/semantic_web_business +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|tag|http://www.semanlink.net/tag/art +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|tag|http://www.semanlink.net/tag/mutualart_com +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|comment|MutualArt.com is a global initiative to link art collectors with artists, museums, galleries and information sources including the leading art publications, auction house information and prices. It is the first major application of the semantic web to a consumer service. +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|title|“The semantic web enables us to use portals in a more intelligent fashion, so we can do business more efficiently” at Cloudlands +http://www.johnbreslin.com/blog/2008/03/25/the-semantic-web-enables-us-to-use-portals-in-a-more-intelligent-fashion-so-we-can-do-business-more-efficiently/|creationTime|2008-03-29T18:54:44Z +http://www.rioscenarium.com.br/|creationDate|2013-05-15 +http://www.rioscenarium.com.br/|tag|http://www.semanlink.net/tag/samba +http://www.rioscenarium.com.br/|tag|http://www.semanlink.net/tag/ldow2013 +http://www.rioscenarium.com.br/|tag|http://www.semanlink.net/tag/rio_de_janeiro +http://www.rioscenarium.com.br/|title|Rio Scenarium +http://www.rioscenarium.com.br/|creationTime|2013-05-15T16:26:37Z +http://validator.w3.org/|creationDate|2007-08-08 +http://validator.w3.org/|tag|http://www.semanlink.net/tag/w3c +http://validator.w3.org/|tag|http://www.semanlink.net/tag/validation +http://validator.w3.org/|title|The W3C Markup Validation Service +http://validator.w3.org/|creationTime|2007-08-08T17:05:17Z +http://stackoverflow.com/questions/978061/http-get-with-request-body|creationDate|2017-04-27 +http://stackoverflow.com/questions/978061/http-get-with-request-body|tag|http://www.semanlink.net/tag/http +http://stackoverflow.com/questions/978061/http-get-with-request-body|tag|http://www.semanlink.net/tag/rest +http://stackoverflow.com/questions/978061/http-get-with-request-body|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/978061/http-get-with-request-body|title|rest - HTTP GET with request body - Stack Overflow +http://stackoverflow.com/questions/978061/http-get-with-request-body|creationTime|2017-04-27T09:58:55Z +http://www.paulgraham.com/gh.html|creationDate|2005-12-14 +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/hackers +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/travail +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/java +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/good +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/productivite +http://www.paulgraham.com/gh.html|tag|http://www.semanlink.net/tag/python +http://www.paulgraham.com/gh.html|title|Great Hackers +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|creationDate|2014-03-17 +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|tag|http://www.semanlink.net/tag/e_learning +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|tag|http://www.semanlink.net/tag/ted +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|tag|http://www.semanlink.net/tag/inde +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|comment|A School in the Cloud among the mangrove trees +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|title|Sugata Mitra opens his first independent learning lab in India TED Blog +http://blog.ted.com/2014/03/15/sugata-mitra-opens-first-independent-school-in-the-cloud-in-india/|creationTime|2014-03-17T08:32:22Z +https://tomaugspurger.github.io/sklearn-dask-tabular.html|creationDate|2018-09-17 +https://tomaugspurger.github.io/sklearn-dask-tabular.html|tag|http://www.semanlink.net/tag/pandas +https://tomaugspurger.github.io/sklearn-dask-tabular.html|tag|http://www.semanlink.net/tag/scikit_learn +https://tomaugspurger.github.io/sklearn-dask-tabular.html|title|datas-frame – Tabular Data in Scikit-Learn and Dask-ML +https://tomaugspurger.github.io/sklearn-dask-tabular.html|creationTime|2018-09-17T18:06:59Z +http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable|creationDate|2013-02-28 +http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable|tag|http://www.semanlink.net/tag/hbase +http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable|tag|http://www.semanlink.net/tag/tutorial +http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable|title|Understanding HBase and BigTable - Jimbojw.com +http://jimbojw.com/wiki/index.php?title=Understanding_Hbase_and_BigTable|creationTime|2013-02-28T16:06:36Z +http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308|creationDate|2014-02-03 +http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308|tag|http://www.semanlink.net/tag/cognitive_computing +http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308|tag|http://www.semanlink.net/tag/big_data_semantic_web +http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308|title|Cognitive computing can take the semantic Web to the next level Big Data - InfoWorld +http://www.infoworld.com/d/big-data/cognitive-computing-can-take-the-semantic-web-the-next-level-235308|creationTime|2014-02-03T23:21:32Z +https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf|creationDate|2016-01-05 +https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf|tag|http://www.semanlink.net/tag/cheat_sheet +https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf|tag|http://www.semanlink.net/tag/probabilites +https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf|title|Probability Cheatsheet +https://static1.squarespace.com/static/54bf3241e4b0f0d81bf7ff36/t/55e9494fe4b011aed10e48e5/1441352015658/probability_cheatsheet.pdf|creationTime|2016-01-05T15:16:51Z +http://en.wikipedia.org/wiki/American_Gangster_(film)|creationDate|2010-02-12 +http://en.wikipedia.org/wiki/American_Gangster_(film)|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/American_Gangster_(film)|tag|http://www.semanlink.net/tag/film_policier +http://en.wikipedia.org/wiki/American_Gangster_(film)|tag|http://www.semanlink.net/tag/ridley_scott +http://en.wikipedia.org/wiki/American_Gangster_(film)|title|American Gangster +http://en.wikipedia.org/wiki/American_Gangster_(film)|creationTime|2010-02-12T23:57:11Z +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|creationDate|2017-05-24 +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|tag|http://www.semanlink.net/tag/nlp_sample_code +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|tag|http://www.semanlink.net/tag/tf_idf +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|tag|http://www.semanlink.net/tag/automatic_summarization +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|title|Summarize Documents using Tf-Idf – Alexander Crosson – Medium +https://medium.com/@acrosson/summarize-documents-using-tf-idf-bdee8f60b71|creationTime|2017-05-24T17:10:17Z +http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454|creationDate|2014-03-07 +http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454|tag|http://www.semanlink.net/tag/cloud_and_linked_data +http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454|tag|http://www.semanlink.net/tag/sindice +http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454|title|SindiceTech Helps Enterprises Build Private Linked Data Clouds - Semanticweb.com +http://semanticweb.com/sindicetech-helps-enterprises-build-private-linked-data-clouds_b30454|creationTime|2014-03-07T11:42:20Z +http://fr.wikipedia.org/wiki/Le_Festin_de_Babette|creationDate|2006-03-18 +http://fr.wikipedia.org/wiki/Le_Festin_de_Babette|tag|http://www.semanlink.net/tag/film_danois +http://fr.wikipedia.org/wiki/Le_Festin_de_Babette|tag|http://www.semanlink.net/tag/karen_blixen +http://fr.wikipedia.org/wiki/Le_Festin_de_Babette|comment|"Babettes Gaestebud. Film danois (1987) de Gabriel Axel avec Stéphane Audran.
+Exilée au Danemark suite à la répression de la Commune de Paris en 1871, une française (Stéphane Audran) est engagée par les austères filles d'un pasteur. Lorsque, 14 ans plus tard, elle gagne à la loterie, elle utilise l'argent pour leur cuisiner un festin... + + +" +http://fr.wikipedia.org/wiki/Le_Festin_de_Babette|title|Le Festin de Babette +http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/|creationDate|2011-12-18 +http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/|tag|http://www.semanlink.net/tag/dbpedia +http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/|tag|http://www.semanlink.net/tag/skos +http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/|title|Using DBpedia to generate SKOS thesauri « About the social semantic web +http://ablvienna.wordpress.com/2011/12/01/using-dbpedia-to-generate-skos-thesauri/|creationTime|2011-12-18T14:50:51Z +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|creationDate|2008-12-29 +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|tag|http://www.semanlink.net/tag/senegal +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|tag|http://www.semanlink.net/tag/salsa +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|tag|http://www.semanlink.net/tag/disque_a_retrouver +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|comment|Laba journeyed to Paris in the early '80s to record Maestro Laba Sosseh con l'Orquesta Aragón: Akoguin Theresa. +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|title|Laba Sosseh +http://www.descarga.com/cgi-bin/db/archives/Profile88?BEsrYwYo;;181|creationTime|2008-12-29T20:17:50Z +http://wit.tuwien.ac.at/people/michlmayr/addatag/|creationDate|2007-04-03 +http://wit.tuwien.ac.at/people/michlmayr/addatag/|tag|http://www.semanlink.net/tag/steve_cayzer +http://wit.tuwien.ac.at/people/michlmayr/addatag/|tag|http://www.semanlink.net/tag/tagging +http://wit.tuwien.ac.at/people/michlmayr/addatag/|comment|If a user's tagging data is treated as a continuous stream of information about a user's interests, it can be used to create a rich user profile. The profile should represent the most important parts of a user's behaviour. Both persistent long-term interests and transient short-term interests should co-exist in the profile. +http://wit.tuwien.ac.at/people/michlmayr/addatag/|title|WIT - The Add-A-Tag algorithm +http://wit.tuwien.ac.at/people/michlmayr/addatag/|creationTime|2007-04-03T23:17:48Z +http://python-guide-pt-br.readthedocs.io/en/latest/writing/style/|creationDate|2017-06-04 +http://python-guide-pt-br.readthedocs.io/en/latest/writing/style/|tag|http://www.semanlink.net/tag/python +http://python-guide-pt-br.readthedocs.io/en/latest/writing/style/|title|Code Style — The Hitchhiker's Guide to Python +http://python-guide-pt-br.readthedocs.io/en/latest/writing/style/|creationTime|2017-06-04T23:29:37Z +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|creationDate|2011-02-04 +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|tag|http://www.semanlink.net/tag/nsa +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|title|La NSA, la DGSE et la DCRI ne disent pas merci à l’Hadopi - BUG BROTHER - Blog LeMonde.fr +http://bugbrother.blog.lemonde.fr/2011/02/04/les-services-de-renseignement-ne-disent-pas-merci-a-lhadopi/|creationTime|2011-02-04T17:08:26Z +http://download.tensorflow.org/paper/whitepaper2015.pdf|creationDate|2015-11-09 +http://download.tensorflow.org/paper/whitepaper2015.pdf|tag|http://www.semanlink.net/tag/tensorflow +http://download.tensorflow.org/paper/whitepaper2015.pdf|title|TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems +http://download.tensorflow.org/paper/whitepaper2015.pdf|creationTime|2015-11-09T18:49:56Z +http://autourduciel.blog.lemonde.fr/2014/11/11/suivez-en-direct-latterrissage-de-philae-sur-la-comete-67p-le-12-novembre/|creationDate|2014-11-11 +http://autourduciel.blog.lemonde.fr/2014/11/11/suivez-en-direct-latterrissage-de-philae-sur-la-comete-67p-le-12-novembre/|tag|http://www.semanlink.net/tag/philae +http://autourduciel.blog.lemonde.fr/2014/11/11/suivez-en-direct-latterrissage-de-philae-sur-la-comete-67p-le-12-novembre/|title|Suivez en direct l’atterrissage de Philae sur la comète 67P le 12 novembre Autour du Ciel +http://autourduciel.blog.lemonde.fr/2014/11/11/suivez-en-direct-latterrissage-de-philae-sur-la-comete-67p-le-12-novembre/|creationTime|2014-11-11T14:50:03Z +https://www6.software.ibm.com/developerworks/education/x-xmlcss/|creationDate|2006-06-25 +https://www6.software.ibm.com/developerworks/education/x-xmlcss/|tag|http://www.semanlink.net/tag/displaying_xml_with_css +https://www6.software.ibm.com/developerworks/education/x-xmlcss/|tag|http://www.semanlink.net/tag/ibm_developerworks +https://www6.software.ibm.com/developerworks/education/x-xmlcss/|title|Display XML with Cascading Stylesheets, Part 1: Use Cascading Stylesheets to display XML +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|creationDate|2014-08-28 +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|tag|http://www.semanlink.net/tag/evolution +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|tag|http://www.semanlink.net/tag/pregnancy +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|tag|http://www.semanlink.net/tag/genetique +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|tag|http://www.semanlink.net/tag/cerveau +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|tag|http://www.semanlink.net/tag/primate +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|title|Pregnancy is a war between mother and child – Suzanne Sadedin – Aeon +http://aeon.co/magazine/science/pregnancy-is-a-battleground-between-mother-father-and-baby/|creationTime|2014-08-28T15:20:14Z +http://www.lemonde.fr/afrique/article/2017/08/01/le-jour-de-l-umuganda-tout-le-monde-travaille-au-rwanda_5167306_3212.html|creationDate|2017-08-02 +http://www.lemonde.fr/afrique/article/2017/08/01/le-jour-de-l-umuganda-tout-le-monde-travaille-au-rwanda_5167306_3212.html|tag|http://www.semanlink.net/tag/rwanda +http://www.lemonde.fr/afrique/article/2017/08/01/le-jour-de-l-umuganda-tout-le-monde-travaille-au-rwanda_5167306_3212.html|title|Le jour de l’umuganda, tout le monde travaille au Rwanda +http://www.lemonde.fr/afrique/article/2017/08/01/le-jour-de-l-umuganda-tout-le-monde-travaille-au-rwanda_5167306_3212.html|creationTime|2017-08-02T10:44:38Z +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|creationDate|2012-09-01 +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|tag|http://www.semanlink.net/tag/henri_bergius +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|tag|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|tag|http://www.semanlink.net/tag/rdfa +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|title|Using RDFa to make a web page editable - Henri Bergius +http://bergie.iki.fi/blog/using_rdfa_to_make_a_web_page_editable/|creationTime|2012-09-01T15:41:51Z +http://www.paulgraham.com/marginal.html|creationDate|2006-08-29 +http://www.paulgraham.com/marginal.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/marginal.html|comment|"People at big companies don't realize the extent to which +they live in an environment that is one large, ongoing test for the +wrong qualities." +http://www.paulgraham.com/marginal.html|title|The Power of the Marginal +http://www.jenitennison.com/blog/node/94|creationDate|2012-08-09 +http://www.jenitennison.com/blog/node/94|tag|http://www.semanlink.net/tag/rdfquery +http://www.jenitennison.com/blog/node/94|tag|http://www.semanlink.net/tag/rdfa +http://www.jenitennison.com/blog/node/94|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.jenitennison.com/blog/node/94|title|rdfQuery: Progressive Enhancement with RDFa Jeni's Musings +http://www.jenitennison.com/blog/node/94|creationTime|2012-08-09T15:10:45Z +https://github.com/lanthaler/JsonLD|creationDate|2014-10-29 +https://github.com/lanthaler/JsonLD|tag|http://www.semanlink.net/tag/markus_lanthaler +https://github.com/lanthaler/JsonLD|tag|http://www.semanlink.net/tag/json_ld +https://github.com/lanthaler/JsonLD|title|lanthaler/JsonLD processor written in PHP, Github +https://github.com/lanthaler/JsonLD|creationTime|2014-10-29T01:06:39Z +http://newsletter.ruder.io/|creationDate|2018-05-29 +http://newsletter.ruder.io/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://newsletter.ruder.io/|tag|http://www.semanlink.net/tag/nlp +http://newsletter.ruder.io/|tag|http://www.semanlink.net/tag/news +http://newsletter.ruder.io/|tag|http://www.semanlink.net/tag/ml_nlp_blog +http://newsletter.ruder.io/|title|NLP News Revue +http://newsletter.ruder.io/|creationTime|2018-05-29T15:16:21Z +https://www.cnrs.fr/occitanie-ouest/actualites/article/alerte-presse-les-plantes-graminees-peuvent-acquerir-les-genes-de-leurs|creationDate|2019-02-22 +https://www.cnrs.fr/occitanie-ouest/actualites/article/alerte-presse-les-plantes-graminees-peuvent-acquerir-les-genes-de-leurs|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +https://www.cnrs.fr/occitanie-ouest/actualites/article/alerte-presse-les-plantes-graminees-peuvent-acquerir-les-genes-de-leurs|title|CNRS - Les plantes graminées peuvent acquérir les gènes de leurs voisines +https://www.cnrs.fr/occitanie-ouest/actualites/article/alerte-presse-les-plantes-graminees-peuvent-acquerir-les-genes-de-leurs|creationTime|2019-02-22T13:23:39Z +http://comments.gmane.org/gmane.org.w3c.semantic-web/18585|creationDate|2013-01-24 +http://comments.gmane.org/gmane.org.w3c.semantic-web/18585|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://comments.gmane.org/gmane.org.w3c.semantic-web/18585|tag|http://www.semanlink.net/tag/freebase +http://comments.gmane.org/gmane.org.w3c.semantic-web/18585|title|Knowledge Graph links to Freebase - W3C Semantic Web discussion list +http://comments.gmane.org/gmane.org.w3c.semantic-web/18585|creationTime|2013-01-24T17:45:07Z +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|creationDate|2008-11-26 +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|tag|http://www.semanlink.net/tag/delinquance +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|title|Les mauvais comptes des mineurs délinquants +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|creationTime|2008-11-26T20:48:27Z +http://www.lemonde.fr/societe/article/2008/11/26/les-mauvais-comptes-des-mineurs-delinquants_1123316_3224.html|source|Le Monde +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|creationDate|2010-06-07 +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|tag|http://www.semanlink.net/tag/relations_franco_americaines +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|tag|http://www.semanlink.net/tag/9_3 +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|title|"Washington à la conquête du ""9-3""" +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|creationTime|2010-06-07T13:34:26Z +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|source|Le Monde +http://www.lemonde.fr/societe/article/2010/06/05/washington-a-la-conquete-du-9-3_1368266_3224.html|date|2010-06-06 +https://nordicapis.com/5-best-speech-to-text-apis/|creationDate|2019-03-30 +https://nordicapis.com/5-best-speech-to-text-apis/|tag|http://www.semanlink.net/tag/speech_recognition +https://nordicapis.com/5-best-speech-to-text-apis/|title|5 Best Speech-to-Text APIs Nordic APIs +https://nordicapis.com/5-best-speech-to-text-apis/|creationTime|2019-03-30T15:47:47Z +https://www.youtube.com/watch?v=KR46z_V0BVw|creationDate|2018-06-10 +https://www.youtube.com/watch?v=KR46z_V0BVw|tag|http://www.semanlink.net/tag/sanjeev_arora +https://www.youtube.com/watch?v=KR46z_V0BVw|tag|http://www.semanlink.net/tag/word_embedding +https://www.youtube.com/watch?v=KR46z_V0BVw|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=KR46z_V0BVw|comment|"Why do low-dimensional word vectors exist? + +> a text corpus is imagined as being generated by a random walk in a latent variable space, and the word production is via a loglinear distribution. This model is shown to imply several empirically discovered past methods for word embedding like word2vec, GloVe, PMI etc + +[Related paper](/doc/?uri=http%3A%2F%2Fwww.aclweb.org%2Fanthology%2FQ16-1028)" +https://www.youtube.com/watch?v=KR46z_V0BVw|relatedDoc|http://www.aclweb.org/anthology/Q16-1028 +https://www.youtube.com/watch?v=KR46z_V0BVw|title|"Sanjeev Arora on ""A theoretical approach to semantic representations"" - YouTube (2016)" +https://www.youtube.com/watch?v=KR46z_V0BVw|creationTime|2018-06-10T15:07:37Z +http://news.bbc.co.uk/2/hi/americas/7460364.stm|creationDate|2008-06-25 +http://news.bbc.co.uk/2/hi/americas/7460364.stm|tag|http://www.semanlink.net/tag/cuivre +http://news.bbc.co.uk/2/hi/americas/7460364.stm|tag|http://www.semanlink.net/tag/chine +http://news.bbc.co.uk/2/hi/americas/7460364.stm|tag|http://www.semanlink.net/tag/industrie_miniere +http://news.bbc.co.uk/2/hi/americas/7460364.stm|tag|http://www.semanlink.net/tag/perou +http://news.bbc.co.uk/2/hi/americas/7460364.stm|title|BBC NEWS Peru's 'copper mountain' in Chinese hands +http://news.bbc.co.uk/2/hi/americas/7460364.stm|creationTime|2008-06-25T21:39:52Z +http://news.bbc.co.uk/2/hi/americas/7460364.stm|source|BBC +http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/|creationDate|2012-07-03 +http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/|tag|http://www.semanlink.net/tag/apache_stanbol +http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/|tag|http://www.semanlink.net/tag/multi_language_support +http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/|title|Apache Stanbol now with multi-language support IKS Blog – The Semantic CMS Community +http://blog.iks-project.eu/apache-stanbol-now-with-multi-language-support/|creationTime|2012-07-03T00:52:42Z +http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf|creationDate|2018-05-05 +http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf|tag|http://www.semanlink.net/tag/spectral_clustering +http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf|title|A Tutorial on Spectral Clustering +http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf|creationTime|2018-05-05T12:25:48Z +https://arxiv.org/abs/1511.07972|creationDate|2017-10-24 +https://arxiv.org/abs/1511.07972|tag|http://www.semanlink.net/tag/memory_embeddings +https://arxiv.org/abs/1511.07972|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1511.07972|arxiv_author|Cristóbal Esteban +https://arxiv.org/abs/1511.07972|arxiv_author|Yinchong Yang +https://arxiv.org/abs/1511.07972|arxiv_author|Denis Krompaß +https://arxiv.org/abs/1511.07972|arxiv_author|Volker Tresp +https://arxiv.org/abs/1511.07972|arxiv_author|Stephan Baier +https://arxiv.org/abs/1511.07972|title|[1511.07972] Learning with Memory Embeddings +https://arxiv.org/abs/1511.07972|creationTime|2017-10-24T14:47:21Z +https://arxiv.org/abs/1511.07972|arxiv_summary|"Embedding learning, a.k.a. representation learning, has been shown to be able +to model large-scale semantic knowledge graphs. A key concept is a mapping of +the knowledge graph to a tensor representation whose entries are predicted by +models using latent representations of generalized entities. Latent variable +models are well suited to deal with the high dimensionality and sparsity of +typical knowledge graphs. In recent publications the embedding models were +extended to also consider time evolutions, time patterns and subsymbolic +representations. In this paper we map embedding models, which were developed +purely as solutions to technical problems for modelling temporal knowledge +graphs, to various cognitive memory functions, in particular to semantic and +concept memory, episodic memory, sensory memory, short-term memory, and working +memory. We discuss learning, query answering, the path from sensory input to +semantic decoding, and the relationship between episodic memory and semantic +memory. We introduce a number of hypotheses on human memory that can be derived +from the developed mathematical models." +https://arxiv.org/abs/1511.07972|arxiv_firstAuthor|Volker Tresp +https://arxiv.org/abs/1511.07972|arxiv_updated|2016-05-07T09:06:15Z +https://arxiv.org/abs/1511.07972|arxiv_title|Learning with Memory Embeddings +https://arxiv.org/abs/1511.07972|arxiv_published|2015-11-25T07:06:09Z +https://arxiv.org/abs/1511.07972|arxiv_num|1511.07972 +http://www.blosxom.com/|creationDate|2006-02-04 +http://www.blosxom.com/|tag|http://www.semanlink.net/tag/blosxom +http://www.blosxom.com/|title|blosxom :: the zen of blogging :: documentation/users/configure/static.txt blosxom +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|creationDate|2014-08-30 +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|tag|http://www.semanlink.net/tag/uris_within_uris +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|tag|http://www.semanlink.net/tag/rdf_forms +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|title|URIs within URIs from Luca Matteis on 2014-08-22 (public-lod@w3.org from August 2014) +http://lists.w3.org/Archives/Public/public-lod/2014Aug/0086.html|creationTime|2014-08-30T12:48:21Z +http://www.lespetitescases.net/dbpedia-en-action-la-suite|creationDate|2008-05-19 +http://www.lespetitescases.net/dbpedia-en-action-la-suite|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/dbpedia-en-action-la-suite|tag|http://www.semanlink.net/tag/sparql_sample_code +http://www.lespetitescases.net/dbpedia-en-action-la-suite|tag|http://www.semanlink.net/tag/google_maps +http://www.lespetitescases.net/dbpedia-en-action-la-suite|tag|http://www.semanlink.net/tag/dbpedia +http://www.lespetitescases.net/dbpedia-en-action-la-suite|tag|http://www.semanlink.net/tag/sparql_demo +http://www.lespetitescases.net/dbpedia-en-action-la-suite|title|Dbpedia en action la suite Les petites cases +http://www.lespetitescases.net/dbpedia-en-action-la-suite|creationTime|2008-05-19T18:51:54Z +https://hal.archives-ouvertes.fr/hal-01841594|creationDate|2018-06-08 +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/laure_soulier +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/lexical_ambiguity +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/lynda_tamine +https://hal.archives-ouvertes.fr/hal-01841594|tag|http://www.semanlink.net/tag/eswc +https://hal.archives-ouvertes.fr/hal-01841594|comment|from the abstract: Previous work in information retrieval have shown that using evidence, such as concepts and relations, from external knowledge sources could enhance the retrieval performance... This paper presents a new tri-partite neural document language framework that leverages explicit knowledge to jointly constrain word, concept, and document learning representations to tackle a number of issues including polysemy and granularity mismatch. +https://hal.archives-ouvertes.fr/hal-01841594|title| A Tri-Partite Neural Document Language Model for Semantic Information Retrieval (2018 - ESWC conference) +https://hal.archives-ouvertes.fr/hal-01841594|creationTime|2018-06-08T15:23:26Z +https://www.genuitec.com/jersey-resteasy-comparison/|creationDate|2018-08-05 +https://www.genuitec.com/jersey-resteasy-comparison/|tag|http://www.semanlink.net/tag/jersey +https://www.genuitec.com/jersey-resteasy-comparison/|tag|http://www.semanlink.net/tag/resteasy +https://www.genuitec.com/jersey-resteasy-comparison/|tag|http://www.semanlink.net/tag/jax_rs +https://www.genuitec.com/jersey-resteasy-comparison/|tag|http://www.semanlink.net/tag/rest +https://www.genuitec.com/jersey-resteasy-comparison/|title|Jersey vs. RESTEasy: A JAX-RS Implementation Comparison - Genuitec +https://www.genuitec.com/jersey-resteasy-comparison/|creationTime|2018-08-05T18:48:14Z +https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|creationDate|2019-04-12 +https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|tag|http://www.semanlink.net/tag/boura +https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|title|Les sites archéologiques de Bura Asinda-Sikka au Niger +https://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|creationTime|2019-04-12T23:15:17Z +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|creationDate|2014-05-21 +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|tag|http://www.semanlink.net/tag/google +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|tag|http://www.semanlink.net/tag/right_to_be_forgotten +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|tag|http://www.semanlink.net/tag/europe +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|tag|http://www.semanlink.net/tag/evgeny_morozov +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|title|On Google's Right to Know vs. Europe's Right to Be Forgotten New Republic +http://www.newrepublic.com/article/117844/googles-right-know-vs-europes-right-be-forgotten|creationTime|2014-05-21T21:38:43Z +http://googleresearch.blogspot.fr/2015/11/tensorflow-googles-latest-machine_9.html?m=1|creationDate|2015-11-09 +http://googleresearch.blogspot.fr/2015/11/tensorflow-googles-latest-machine_9.html?m=1|tag|http://www.semanlink.net/tag/tensorflow +http://googleresearch.blogspot.fr/2015/11/tensorflow-googles-latest-machine_9.html?m=1|title|Research Blog: TensorFlow - Google’s latest machine learning system, open sourced for everyone +http://googleresearch.blogspot.fr/2015/11/tensorflow-googles-latest-machine_9.html?m=1|creationTime|2015-11-09T18:52:15Z +http://gabrielecirulli.github.io/2048/|creationDate|2014-03-11 +http://gabrielecirulli.github.io/2048/|tag|http://www.semanlink.net/tag/jeux_en_ligne +http://gabrielecirulli.github.io/2048/|title|2048 +http://gabrielecirulli.github.io/2048/|creationTime|2014-03-11T01:42:56Z +http://leobard.twoday.net/stories/2817481/|creationDate|2006-12-23 +http://leobard.twoday.net/stories/2817481/|tag|http://www.semanlink.net/tag/leo_sauermann +http://leobard.twoday.net/stories/2817481/|title|semantic weltbild 2.0: PhD step1: integrating data into the semantic desktop +http://developers.any23.org/|creationDate|2010-03-11 +http://developers.any23.org/|tag|http://www.semanlink.net/tag/rdf_tools +http://developers.any23.org/|tag|http://www.semanlink.net/tag/converting_data_into_rdf +http://developers.any23.org/|title|Anything to Triples +http://developers.any23.org/|creationTime|2010-03-11T14:42:00Z +http://www.uea.ac.uk/~mga07vju/JWP.pdf|creationDate|2011-11-14 +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/anne_haour +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/haoussa +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/histoire_des_jermas +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/boura +http://www.uea.ac.uk/~mga07vju/JWP.pdf|tag|http://www.semanlink.net/tag/boube_gado +http://www.uea.ac.uk/~mga07vju/JWP.pdf|comment|"Author Ann Haour + +> Gado (1993) outlines several spheres of cultural influence in the Niger +valley of the past 2000 years based on the regional distribution of key surface +material culture items backed by radiocarbon dates. The first phase, +typified for example by Bura, Kareygusu, Yaasan, and the statuette facies at Kareygoru, occupied the first millennium A.D. and relates to the Songhai sphere. The second, which includes Rosi, Tondikwarey, and the Kareygoru mounds, **involved the arrival of modern local groups such Zarma and Hausa +and the reuse of sites such as Bura, although the people of Kareygusu, for +one, seem to have remained in situ** (Gado, 1993)." +http://www.uea.ac.uk/~mga07vju/JWP.pdf|title|One Hundred Years of Archaeology in Niger +http://www.uea.ac.uk/~mga07vju/JWP.pdf|creationTime|2011-11-14T14:14:20Z +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|creationDate|2011-03-08 +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|tag|http://www.semanlink.net/tag/read_write_linked_data +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|title|Read-Write linked data - Design Issues +http://www.w3.org/DesignIssues/ReadWriteLinkedData.html|creationTime|2011-03-08T09:49:01Z +http://wiki.ontoworld.org/|creationDate|2006-03-03 +http://wiki.ontoworld.org/|tag|http://www.semanlink.net/tag/semantic_wiki +http://wiki.ontoworld.org/|comment|This site runs Semantic MediaWiki, an extension to the MediaWiki-Software (which powers Wikipedia). +http://wiki.ontoworld.org/|title|Wiki@OntoWorld +https://arxiv.org/abs/1503.08895|creationDate|2018-10-23 +https://arxiv.org/abs/1503.08895|tag|http://www.semanlink.net/tag/deep_learning_attention +https://arxiv.org/abs/1503.08895|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://arxiv.org/abs/1503.08895|tag|http://www.semanlink.net/tag/memory_networks +https://arxiv.org/abs/1503.08895|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1503.08895|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1503.08895|arxiv_author|Rob Fergus +https://arxiv.org/abs/1503.08895|arxiv_author|Jason Weston +https://arxiv.org/abs/1503.08895|arxiv_author|Sainbayar Sukhbaatar +https://arxiv.org/abs/1503.08895|arxiv_author|Arthur Szlam +https://arxiv.org/abs/1503.08895|comment|"Neural network with a recurrent attention model over a possibly large external memory. + +cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html)" +https://arxiv.org/abs/1503.08895|title|[1503.08895] End-To-End Memory Networks +https://arxiv.org/abs/1503.08895|creationTime|2018-10-23T20:17:35Z +https://arxiv.org/abs/1503.08895|arxiv_summary|"We introduce a neural network with a recurrent attention model over a +possibly large external memory. The architecture is a form of Memory Network +(Weston et al., 2015) but unlike the model in that work, it is trained +end-to-end, and hence requires significantly less supervision during training, +making it more generally applicable in realistic settings. It can also be seen +as an extension of RNNsearch to the case where multiple computational steps +(hops) are performed per output symbol. The flexibility of the model allows us +to apply it to tasks as diverse as (synthetic) question answering and to +language modeling. For the former our approach is competitive with Memory +Networks, but with less supervision. For the latter, on the Penn TreeBank and +Text8 datasets our approach demonstrates comparable performance to RNNs and +LSTMs. In both cases we show that the key concept of multiple computational +hops yields improved results." +https://arxiv.org/abs/1503.08895|arxiv_firstAuthor|Sainbayar Sukhbaatar +https://arxiv.org/abs/1503.08895|arxiv_updated|2015-11-24T19:41:57Z +https://arxiv.org/abs/1503.08895|arxiv_title|End-To-End Memory Networks +https://arxiv.org/abs/1503.08895|arxiv_published|2015-03-31T03:05:37Z +https://arxiv.org/abs/1503.08895|arxiv_num|1503.08895 +http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier|creationDate|2014-04-08 +http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier|tag|http://www.semanlink.net/tag/stanford_classifier +http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier|title|java - Method(s) to output confidence score from Stanford Classifier? - Stack Overflow +http://stackoverflow.com/questions/18038153/methods-to-output-confidence-score-from-stanford-classifier|creationTime|2014-04-08T16:53:07Z +http://rogerdudler.github.io/git-guide/index.fr.html|creationDate|2019-03-21 +http://rogerdudler.github.io/git-guide/index.fr.html|tag|http://www.semanlink.net/tag/git +http://rogerdudler.github.io/git-guide/index.fr.html|tag|http://www.semanlink.net/tag/tutorial +http://rogerdudler.github.io/git-guide/index.fr.html|title|git - petit guide - no deep shit! +http://rogerdudler.github.io/git-guide/index.fr.html|creationTime|2019-03-21T16:16:25Z +https://arxiv.org/abs/1904.08398|creationDate|2019-04-18 +https://arxiv.org/abs/1904.08398|tag|http://www.semanlink.net/tag/nlp_text_classification +https://arxiv.org/abs/1904.08398|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1904.08398|tag|http://www.semanlink.net/tag/bert +https://arxiv.org/abs/1904.08398|arxiv_author|Ashutosh Adhikari +https://arxiv.org/abs/1904.08398|arxiv_author|Raphael Tang +https://arxiv.org/abs/1904.08398|arxiv_author|Jimmy Lin +https://arxiv.org/abs/1904.08398|arxiv_author|Achyudh Ram +https://arxiv.org/abs/1904.08398|title|[1904.08398] DocBERT: BERT for Document Classification +https://arxiv.org/abs/1904.08398|creationTime|2019-04-18T17:26:35Z +https://arxiv.org/abs/1904.08398|arxiv_summary|"We present, to our knowledge, the first application of BERT to document +classification. A few characteristics of the task might lead one to think that +BERT is not the most appropriate model: syntactic structures matter less for +content categories, documents can often be longer than typical BERT input, and +documents often have multiple labels. Nevertheless, we show that a +straightforward classification model using BERT is able to achieve the state of +the art across four popular datasets. To address the computational expense +associated with BERT inference, we distill knowledge from BERT-large to small +bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x +fewer parameters. The primary contribution of our paper is improved baselines +that can provide the foundation for future work." +https://arxiv.org/abs/1904.08398|arxiv_firstAuthor|Ashutosh Adhikari +https://arxiv.org/abs/1904.08398|arxiv_updated|2019-08-22T05:09:47Z +https://arxiv.org/abs/1904.08398|arxiv_title|DocBERT: BERT for Document Classification +https://arxiv.org/abs/1904.08398|arxiv_published|2019-04-17T17:55:18Z +https://arxiv.org/abs/1904.08398|arxiv_num|1904.08398 +http://www.bbc.com/news/science-environment-30055383|creationDate|2014-11-14 +http://www.bbc.com/news/science-environment-30055383|tag|http://www.semanlink.net/tag/philae +http://www.bbc.com/news/science-environment-30055383|title|BBC News - Comet lander: Future of Philae probe 'uncertain' +http://www.bbc.com/news/science-environment-30055383|creationTime|2014-11-14T21:48:03Z +http://landonf.bikemonkey.org/static/soylatte/|creationDate|2009-06-19 +http://landonf.bikemonkey.org/static/soylatte/|tag|http://www.semanlink.net/tag/mac_os_x +http://landonf.bikemonkey.org/static/soylatte/|tag|http://www.semanlink.net/tag/java +http://landonf.bikemonkey.org/static/soylatte/|title|SoyLatte: Java 6 Port for Mac OS X 10.4 and 10.5 (Intel) +http://landonf.bikemonkey.org/static/soylatte/|creationTime|2009-06-19T13:11:01Z +http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335|creationDate|2011-09-21 +http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335|tag|http://www.semanlink.net/tag/semantic_web +http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335|tag|http://www.semanlink.net/tag/sustainable_materials_lifecycle +http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335|title|The Semantic Web For the Sustainable Materials Lifecycle - semanticweb.com +http://semanticweb.com/the-semantic-web-for-the-sustainable-materials-lifecycle_b23335|creationTime|2011-09-21T23:14:19Z +http://www.sitepoint.com/blogs/2005/12/22/mvc-and-web-apps-oil-and-water/|creationDate|2005-12-31 +http://www.sitepoint.com/blogs/2005/12/22/mvc-and-web-apps-oil-and-water/|tag|http://www.semanlink.net/tag/mvc +http://www.sitepoint.com/blogs/2005/12/22/mvc-and-web-apps-oil-and-water/|tag|http://www.semanlink.net/tag/ajax +http://www.sitepoint.com/blogs/2005/12/22/mvc-and-web-apps-oil-and-water/|title|SitePoint Blogs » MVC and web apps: oil and water +https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html|creationDate|2019-02-19 +https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html|tag|http://www.semanlink.net/tag/bigquery +https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html|comment|"> **BigQuery ML**, a capability inside BigQuery that allows to build and deploy machine learning models on massive structured or semi-structured datasets + +> because the core components of gradient descent can be implemented using common SQL operation, we were able to repurpose the existing BigQuery SQL processing engine for BigQuery ML. + +- Paper (2017): [SQML: large-scale in-database machine learning with pure SQL](https://dl.acm.org/citation.cfm?doid=3127479.3132746) +- [User guide](https://cloud.google.com/bigquery/docs/bigqueryml-intro) +" +https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html|title|Google AI Blog: Machine Learning in Google BigQuery (2018) +https://ai.googleblog.com/2018/07/machine-learning-in-google-bigquery.html|creationTime|2019-02-19T20:54:08Z +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|creationDate|2018-05-11 +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|tag|http://www.semanlink.net/tag/nlp_sample_code +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|tag|http://www.semanlink.net/tag/spacy +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|tag|http://www.semanlink.net/tag/word_embedding +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|comment|"including an implementation of a ""what to read next?"" recommender system" +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|title|NLP using Word Vectors with Spacy - CLDSPN Kaggle +https://www.kaggle.com/zackakil/nlp-using-word-vectors-with-spacy-cldspn/code|creationTime|2018-05-11T08:42:59Z +http://www.visteon.com/media/newsroom/2013/130517_story1.html|creationDate|2013-06-08 +http://www.visteon.com/media/newsroom/2013/130517_story1.html|tag|http://www.semanlink.net/tag/destination_prediction +http://www.visteon.com/media/newsroom/2013/130517_story1.html|title|Visteon Cockpit Concept Learns the Driver's HABIT +http://www.visteon.com/media/newsroom/2013/130517_story1.html|creationTime|2013-06-08T09:28:08Z +https://groups.google.com/forum/?fromgroups#!forum/lmf-users|creationDate|2012-07-10 +https://groups.google.com/forum/?fromgroups#!forum/lmf-users|tag|http://www.semanlink.net/tag/linked_media_framework +https://groups.google.com/forum/?fromgroups#!forum/lmf-users|tag|http://www.semanlink.net/tag/google_groups +https://groups.google.com/forum/?fromgroups#!forum/lmf-users|title|LMF Users - Google Groups +https://groups.google.com/forum/?fromgroups#!forum/lmf-users|creationTime|2012-07-10T23:53:20Z +http://www.la-grange.net/2011/07/14/api-version|creationDate|2011-07-20 +http://www.la-grange.net/2011/07/14/api-version|tag|http://www.semanlink.net/tag/rest +http://www.la-grange.net/2011/07/14/api-version|tag|http://www.semanlink.net/tag/dev_tips +http://www.la-grange.net/2011/07/14/api-version|title|API et versions +http://www.la-grange.net/2011/07/14/api-version|creationTime|2011-07-20T22:59:49Z +http://discoveryhub.co/|creationDate|2013-09-07 +http://discoveryhub.co/|tag|http://www.semanlink.net/tag/linked_data_exploration +http://discoveryhub.co/|tag|http://www.semanlink.net/tag/to_see +http://discoveryhub.co/|tag|http://www.semanlink.net/tag/web_search +http://discoveryhub.co/|comment|The exploratory search is a new way to search the web, not to find what you are searching, but to find what you are not searching, and might be intersting for you ! +http://discoveryhub.co/|title|Discovery Hub +http://discoveryhub.co/|creationTime|2013-09-07T09:56:32Z +http://jt400.sourceforge.net/|creationDate|2009-04-08 +http://jt400.sourceforge.net/|tag|http://www.semanlink.net/tag/db2connect +http://jt400.sourceforge.net/|tag|http://www.semanlink.net/tag/as400 +http://jt400.sourceforge.net/|title|JTOpen Overview +http://jt400.sourceforge.net/|creationTime|2009-04-08T15:18:03Z +http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/|creationDate|2011-07-07 +http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/|tag|http://www.semanlink.net/tag/fichage +http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/|title|Un fichier de 45M de « gens honnêtes » BUG BROTHER +http://bugbrother.blog.lemonde.fr/2011/07/07/un-fichier-de-45m-de-gens-honnetes/|creationTime|2011-07-07T22:55:00Z +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|creationDate|2018-05-26 +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|tag|http://www.semanlink.net/tag/good +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|tag|http://www.semanlink.net/tag/sanjeev_arora +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|tag|http://www.semanlink.net/tag/word_embedding +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|tag|http://www.semanlink.net/tag/semantic_hashing +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|title|A Theoretical Approach to Semantic Coding and Hashing Simons Institute for the Theory of Computing (2016) +https://simons.berkeley.edu/talks/sanjeev-arora-2016-11-15|creationTime|2018-05-26T17:22:33Z +http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020|creationDate|2006-12-19 +http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020|tag|http://www.semanlink.net/tag/gina_lollobrigida +http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020|tag|http://www.semanlink.net/tag/film_italien +http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020|comment|Le aventure di Pinocchio, film italien de Luigi Comencini avec Gina Lollobrigida +http://www.cinemovies.fr/fiche_cast.php?IDfilm=4020|title|Les Aventures de Pinocchio +http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/|creationDate|2017-01-21 +http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/|tag|http://www.semanlink.net/tag/trump +http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/|comment|My Crazy Year with Trump +http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/|title|Katy Tur Talks Covering Donald Trump's Candidacy for NBC +http://www.marieclaire.com/politics/a21997/donald-trump-katy-tur/|creationTime|2017-01-21T11:39:48Z +http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin|creationDate|2013-10-01 +http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin|tag|http://www.semanlink.net/tag/bitcoin +http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin|tag|http://www.semanlink.net/tag/cringely +http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin|title|I, Cringely I have my doubts about Bitcoin +http://www.cringely.com/2013/09/30/doubts-bitcoin/?utm_source=rss&utm_medium=rss&utm_campaign=doubts-bitcoin|creationTime|2013-10-01T18:58:47Z +http://www.polymer-project.org/|creationDate|2014-01-02 +http://www.polymer-project.org/|tag|http://www.semanlink.net/tag/javascript +http://www.polymer-project.org/|tag|http://www.semanlink.net/tag/web_dev +http://www.polymer-project.org/|comment|Polymer is a new type of library for the web, built on top of Web Components, and designed to leverage the evolving web platform on modern browsers. +http://www.polymer-project.org/|title|Polymer +http://www.polymer-project.org/|creationTime|2014-01-02T10:18:57Z +http://n2.talis.com/wiki/Platform_API|creationDate|2008-07-20 +http://n2.talis.com/wiki/Platform_API|tag|http://www.semanlink.net/tag/talis_platform +http://n2.talis.com/wiki/Platform_API|title|Platform API - n² wiki +http://n2.talis.com/wiki/Platform_API|creationTime|2008-07-20T02:11:42Z +http://morph.talis.com/|creationDate|2009-04-01 +http://morph.talis.com/|tag|http://www.semanlink.net/tag/talis +http://morph.talis.com/|tag|http://www.semanlink.net/tag/sw_online_tools +http://morph.talis.com/|title|Talis Semantic Web Formats Converter +http://morph.talis.com/|creationTime|2009-04-01T01:44:56Z +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|creationDate|2008-09-12 +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|tag|http://www.semanlink.net/tag/linked_data_demo +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|tag|http://www.semanlink.net/tag/kingsley_idehen +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|tag|http://www.semanlink.net/tag/openlink +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|title|Description of: http://demo.openlinksw.com/Northwind/Customer/ALFKI +http://demo.openlinksw.com/about/html/http://demo.openlinksw.com/Northwind/Customer/ALFKI|creationTime|2008-09-12T14:26:37Z +http://dbpedia.org/search/|creationDate|2007-05-13 +http://dbpedia.org/search/|tag|http://www.semanlink.net/tag/dbpedia +http://dbpedia.org/search/|title|Search DBpedia.org +http://dbpedia.org/search/|creationTime|2007-05-13T19:47:46Z +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|creationDate|2010-01-13 +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|tag|http://www.semanlink.net/tag/crise_financiere +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|comment|"""Jusqu'en 1981, le ratio dette/PIB [aux USA] était parfaitement stable. L'économie n'avait pas besoin de dette pour croître régulièrement. Des règles collectives assuraient une progression régulière des salaires et un partage équitable de la productivité entre salariés et actionnaires. Ce ""compromis fordiste"" a permis aux Etats-Unis de connaître 30 ans de stabilité. Sans dette.""""" +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|title|Crise financière : comment éviter l'explosion ?, par Pierre Larrouturou - LeMonde.fr +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|creationTime|2010-01-13T21:23:07Z +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2010/01/13/crise-financiere-comment-eviter-l-explosion-par-pierre-larrouturou_1291024_3232.html|date|2010-01-14 +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|creationDate|2018-12-12 +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|tag|http://www.semanlink.net/tag/keras +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|tag|http://www.semanlink.net/tag/sample_code +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|tag|http://www.semanlink.net/tag/combining_numerical_and_text_features +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|title|Combining numerical and text features in (deep) neural networks - Digital Thinking +http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/|creationTime|2018-12-12T11:38:27Z +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|creationDate|2019-05-28 +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|tag|http://www.semanlink.net/tag/coreference_resolution +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|tag|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|comment|[github](https://github.com/huggingface/neuralcoref) +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|title|State-of-the-art neural coreference resolution for chatbots +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|bookmarkOf|https://medium.com/huggingface/state-of-the-art-neural-coreference-resolution-for-chatbots-3302365dcf30 +http://www.semanlink.net/doc/2019/05/state_of_the_art_neural_corefer|creationTime|2019-05-28T16:11:01Z +https://www.youtube.com/watch?v=DQKQKCe1xl0|creationDate|2016-03-27 +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/good +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/uberisation +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/critique_du_capitalisme +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/south_by_southwest +https://www.youtube.com/watch?v=DQKQKCe1xl0|tag|http://www.semanlink.net/tag/digital_economy +https://www.youtube.com/watch?v=DQKQKCe1xl0|title|Douglas Rushkoff Distributed: A New OS for the Digital Economy SXSW Interactive 2016 - YouTube +https://www.youtube.com/watch?v=DQKQKCe1xl0|creationTime|2016-03-27T12:03:01Z +http://maple.cs.umbc.edu/|creationDate|2006-05-04 +http://maple.cs.umbc.edu/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://maple.cs.umbc.edu/|comment|"""exploring artificial intelligence...""
+The Multi-Agent Planning and Learning Lab at UMBC is devoted to exploring artificial intelligence and developing A.I. solutions to real-world problems. Our interests and current projects span both the theoretical and practical aspects of artificial intelligence. + +Our research reaches outside of multiagent systems, planning, and machine learning to include cognitive science, computational sociology, bioinformatics, and human-computer interaction. +" +http://maple.cs.umbc.edu/|title|MAPLE Lab @ UMBC Multi-Agent Planning and Learning +http://www.english-for-techies.net/|creationDate|2007-03-09 +http://www.english-for-techies.net/|tag|http://www.semanlink.net/tag/anglais +http://www.english-for-techies.net/|title|ENGLISH  FOR TECHIES (AND NON-TECHIES, TOO) +http://www.english-for-techies.net/|creationTime|2007-03-09T23:27:59Z +http://www.youtube.com/watch?v=BGS7SpI7obY|creationDate|2013-12-06 +http://www.youtube.com/watch?v=BGS7SpI7obY|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=BGS7SpI7obY|tag|http://www.semanlink.net/tag/music_of_africa +http://www.youtube.com/watch?v=BGS7SpI7obY|tag|http://www.semanlink.net/tag/nelson_mandela +http://www.youtube.com/watch?v=BGS7SpI7obY|comment|Johnny Clegg (With Nelson Mandela) - Asimbonanga - 1999 +http://www.youtube.com/watch?v=BGS7SpI7obY|title|Asimbonanga +http://www.youtube.com/watch?v=BGS7SpI7obY|creationTime|2013-12-06T01:06:57Z +https://openreview.net/forum?id=rJedbn0ctQ|creationDate|2018-10-20 +https://openreview.net/forum?id=rJedbn0ctQ|comment|"[About the GEM paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1810.00438). GEM compared with [#SIF](/tag/sif_embeddings), ""[Sentences as subspaces](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358)"", [""USIF""](doc:?uri=http%3A%2F%2Fwww.aclweb.org%2Fanthology%2FW18-3012%2F)" +https://openreview.net/forum?id=rJedbn0ctQ|relatedDoc|http://www.aclweb.org/anthology/W18-3012/ +https://openreview.net/forum?id=rJedbn0ctQ|relatedDoc|https://arxiv.org/abs/1810.00438 +https://openreview.net/forum?id=rJedbn0ctQ|relatedDoc|https://arxiv.org/abs/1704.05358 +https://openreview.net/forum?id=rJedbn0ctQ|title|Zero-training Sentence Embedding via Orthogonal Basis OpenReview +https://openreview.net/forum?id=rJedbn0ctQ|creationTime|2018-10-20T14:41:37Z +http://kill3.wordpress.com/|creationDate|2012-08-15 +http://kill3.wordpress.com/|tag|http://www.semanlink.net/tag/maven +http://kill3.wordpress.com/|tag|http://www.semanlink.net/tag/javascript +http://kill3.wordpress.com/|title|Managing Javascript Libraries as Maven WAR Overlays - A Needle in a Stack Trace +http://kill3.wordpress.com/|creationTime|2012-08-15T14:38:59Z +http://lists.w3.org/Archives/Public/public-hydra/2014May/0003.html|creationDate|2015-02-18 +http://lists.w3.org/Archives/Public/public-hydra/2014May/0003.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2014May/0003.html|title|RE: Newbie questions about Issue tracker demo API +http://lists.w3.org/Archives/Public/public-hydra/2014May/0003.html|creationTime|2015-02-18T01:09:43Z +http://en.wikipedia.org/wiki/HTTP_ETag|creationDate|2015-05-14 +http://en.wikipedia.org/wiki/HTTP_ETag|tag|http://www.semanlink.net/tag/http_cache +http://en.wikipedia.org/wiki/HTTP_ETag|comment|ETags provide a mechanism for client cache to validate if it's cached content is still up-to-date +http://en.wikipedia.org/wiki/HTTP_ETag|title|HTTP ETag - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/HTTP_ETag|creationTime|2015-05-14T14:51:00Z +http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html|creationDate|2015-02-18 +http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html|tag|http://www.semanlink.net/tag/hydra_templated_links +http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html|title|TemplatedLink +http://lists.w3.org/Archives/Public/public-hydra/2013Jul/0010.html|creationTime|2015-02-18T01:19:34Z +http://www.nature.com/news/game-playing-software-holds-lessons-for-neuroscience-1.16979|creationDate|2016-01-12 +http://www.nature.com/news/game-playing-software-holds-lessons-for-neuroscience-1.16979|tag|http://www.semanlink.net/tag/google_deepmind +http://www.nature.com/news/game-playing-software-holds-lessons-for-neuroscience-1.16979|title|Game-playing software holds lessons for neuroscience : Nature News & Comment +http://www.nature.com/news/game-playing-software-holds-lessons-for-neuroscience-1.16979|creationTime|2016-01-12T18:33:23Z +http://www.w3.org/TR/skos-primer/|creationDate|2008-03-01 +http://www.w3.org/TR/skos-primer/|tag|http://www.semanlink.net/tag/skos_w3c_document +http://www.w3.org/TR/skos-primer/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/skos-primer/|title|SKOS Simple Knowledge Organization System Primer +http://www.w3.org/TR/skos-primer/|creationTime|2008-03-01T19:41:53Z +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|creationDate|2016-09-17 +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|tag|http://www.semanlink.net/tag/drm_in_html_5 +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|tag|http://www.semanlink.net/tag/dvd +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|tag|http://www.semanlink.net/tag/drm +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|tag|http://www.semanlink.net/tag/tim_berners_lee +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|title|What I wish Tim Berners-Lee understood about DRM Technology The Guardian +https://www.theguardian.com/technology/blog/2013/mar/12/tim-berners-lee-drm-cory-doctorow|creationTime|2016-09-17T15:07:59Z +http://www.census.gov/ipc/www/worldhis.html|creationDate|2007-04-03 +http://www.census.gov/ipc/www/worldhis.html|tag|http://www.semanlink.net/tag/demographie +http://www.census.gov/ipc/www/worldhis.html|tag|http://www.semanlink.net/tag/population_mondiale +http://www.census.gov/ipc/www/worldhis.html|title|Historical Estimates of World Population +http://www.census.gov/ipc/www/worldhis.html|creationTime|2007-04-03T23:42:37Z +http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html|creationDate|2011-09-27 +http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html|tag|http://www.semanlink.net/tag/semantic_web_propaganda +http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html|title|Saving Months, Not Milliseconds: Do More Faster with the Semantic Web - TechnicaLee Speaking +http://www.thefigtrees.net/lee/blog/2011/09/saving_months_not_milliseconds.html|creationTime|2011-09-27T11:09:32Z +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|creationDate|2007-07-26 +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|tag|http://www.semanlink.net/tag/niger +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|tag|http://www.semanlink.net/tag/uranium +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|title|RFI - Les promesses de l’uranium du Niger à l’épreuve +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|creationTime|2007-07-26T12:58:27Z +http://www.rfi.fr/francais/actu/articles/091/article_53988.asp|source|RFI +http://delicious-java.sourceforge.net/|creationDate|2006-09-25 +http://delicious-java.sourceforge.net/|tag|http://www.semanlink.net/tag/javadoc +http://delicious-java.sourceforge.net/|tag|http://www.semanlink.net/tag/delicious_api +http://delicious-java.sourceforge.net/|tag|http://www.semanlink.net/tag/delicious_java +http://delicious-java.sourceforge.net/|title|delicious java API +http://www.newscientist.com/channel/life/dn14094-bacteria-make-major-evolutionary-shift-in-the-lab.html|creationDate|2008-06-12 +http://www.newscientist.com/channel/life/dn14094-bacteria-make-major-evolutionary-shift-in-the-lab.html|tag|http://www.semanlink.net/tag/evolution +http://www.newscientist.com/channel/life/dn14094-bacteria-make-major-evolutionary-shift-in-the-lab.html|title|Bacteria make major evolutionary shift in the lab -New Scientist +http://www.newscientist.com/channel/life/dn14094-bacteria-make-major-evolutionary-shift-in-the-lab.html|creationTime|2008-06-12T23:32:15Z +http://blog.semantic-web.at/2014/07/15/from-taxonomies-over-ontologies-to-knowledge-graphs/|creationDate|2014-07-28 +http://blog.semantic-web.at/2014/07/15/from-taxonomies-over-ontologies-to-knowledge-graphs/|tag|http://www.semanlink.net/tag/poolparty +http://blog.semantic-web.at/2014/07/15/from-taxonomies-over-ontologies-to-knowledge-graphs/|title|From Taxonomies over Ontologies to Knowledge Graphs The Semantic Puzzle +http://blog.semantic-web.at/2014/07/15/from-taxonomies-over-ontologies-to-knowledge-graphs/|creationTime|2014-07-28T01:48:42Z +https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/|creationDate|2016-04-09 +https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/|tag|http://www.semanlink.net/tag/json_ld +https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/|tag|http://www.semanlink.net/tag/brinxmat +https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/|title|So I created workable JSON-LD Brinxmat's blog +https://brinxmat.wordpress.com/2016/02/28/so-i-created-workable-json-ld/|creationTime|2016-04-09T01:08:16Z +http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm|creationDate|2010-10-12 +http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm|tag|http://www.semanlink.net/tag/film_francais +http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm|tag|http://www.semanlink.net/tag/pierre_fresnay +http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm|title|LE BRISEUR DE CHAINES +http://www.cinema-francais.fr/les_films/films_d/films_daniel_norman_jacques/le_briseur_de_chaines.htm|creationTime|2010-10-12T01:21:55Z +https://en.wikipedia.org/wiki/Match_Point|creationDate|2017-02-13 +https://en.wikipedia.org/wiki/Match_Point|tag|http://www.semanlink.net/tag/woody_allen +https://en.wikipedia.org/wiki/Match_Point|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Match_Point|title|Match Point +https://en.wikipedia.org/wiki/Match_Point|creationTime|2017-02-13T00:09:27Z +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|creationDate|2019-02-23 +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|tag|http://www.semanlink.net/tag/document_embeddings +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|tag|http://www.semanlink.net/tag/cluster_analysis +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|tag|http://www.semanlink.net/tag/patent +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|comment|uses [this method](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1511.06335) +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|relatedDoc|https://arxiv.org/abs/1511.06335 +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|title|[Jaeyoung2018] Patent Document Clustering with Deep Embeddings +https://www.researchgate.net/publication/325251122_Patent_Document_Clustering_with_Deep_Embeddings|creationTime|2019-02-23T17:43:33Z +https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43|creationDate|2017-11-07 +https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43|tag|http://www.semanlink.net/tag/short_text_clustering +https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43|comment|the issue of clustering short texts, which are free answers gathered during brain storming seminars. Those answers are short, often incomplete, and highly biased toward the question, so establishing a notion of proximity between texts is a challenging task. In addition, the number of answers is counted up to hundred instances, which causes sparsity. We present three text clustering methods in order to choose the best one for this specific task, then we show how the method can be improved by a semantic enrichment, including neural-based distributional models and external knowledge resources. +https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43|title|Semantic Enriched Short Text Clustering SpringerLink +https://link.springer.com/chapter/10.1007%2F978-3-319-60438-1_43|creationTime|2017-11-07T23:04:40Z +http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html|creationDate|2014-02-11 +http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html|tag|http://www.semanlink.net/tag/europe +http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html|title|OGM : qui en produit, qui en importe en Europe ? +http://www.lemonde.fr/planete/article/2014/02/11/les-ogm-peu-cultives-en-europe-mais-massivement-importes_4363693_3244.html|creationTime|2014-02-11T21:24:17Z +http://research.microsoft.com/apps/pubs/?id=175447|creationDate|2013-03-22 +http://research.microsoft.com/apps/pubs/?id=175447|tag|http://www.semanlink.net/tag/destination_prediction +http://research.microsoft.com/apps/pubs/?id=175447|tag|http://www.semanlink.net/tag/microsoft +http://research.microsoft.com/apps/pubs/?id=175447|title|Destination Prediction by Sub-Trajectory Synthesis and Privacy Protection Against Such Prediction - Microsoft Research +http://research.microsoft.com/apps/pubs/?id=175447|creationTime|2013-03-22T01:00:20Z +https://rare-technologies.com/implementing-poincare-embeddings/|creationDate|2018-05-20 +https://rare-technologies.com/implementing-poincare-embeddings/|tag|http://www.semanlink.net/tag/poincare_embeddings +https://rare-technologies.com/implementing-poincare-embeddings/|tag|http://www.semanlink.net/tag/gensim +https://rare-technologies.com/implementing-poincare-embeddings/|title|Implementing Poincaré Embeddings RARE Technologies +https://rare-technologies.com/implementing-poincare-embeddings/|creationTime|2018-05-20T09:01:07Z +http://www.prestashop.com/|creationDate|2011-04-07 +http://www.prestashop.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://www.prestashop.com/|tag|http://www.semanlink.net/tag/software +http://www.prestashop.com/|tag|http://www.semanlink.net/tag/open_source +http://www.prestashop.com/|comment|Déjà plus de 50 000 boutiques utilisent PrestaShop à travers le monde ! +http://www.prestashop.com/|title|PrestaShop Free Open-Source e-Commerce Software for Web 2.0 +http://www.prestashop.com/|creationTime|2011-04-07T09:39:28Z +https://arxiv.org/abs/1710.04087|creationDate|2017-10-14 +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/ludovic_denoyer +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/guillaume_lample +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/machine_translation +https://arxiv.org/abs/1710.04087|tag|http://www.semanlink.net/tag/unsupervised_machine_translation +https://arxiv.org/abs/1710.04087|arxiv_author|Ludovic Denoyer +https://arxiv.org/abs/1710.04087|arxiv_author|Guillaume Lample +https://arxiv.org/abs/1710.04087|arxiv_author|Marc'Aurelio Ranzato +https://arxiv.org/abs/1710.04087|arxiv_author|Hervé Jégou +https://arxiv.org/abs/1710.04087|arxiv_author|Alexis Conneau +https://arxiv.org/abs/1710.04087|comment|> we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way +https://arxiv.org/abs/1710.04087|title|[1710.04087] Word Translation Without Parallel Data +https://arxiv.org/abs/1710.04087|creationTime|2017-10-14T13:56:33Z +https://arxiv.org/abs/1710.04087|arxiv_summary|"State-of-the-art methods for learning cross-lingual word embeddings have +relied on bilingual dictionaries or parallel corpora. Recent studies showed +that the need for parallel data supervision can be alleviated with +character-level information. While these methods showed encouraging results, +they are not on par with their supervised counterparts and are limited to pairs +of languages sharing a common alphabet. In this work, we show that we can build +a bilingual dictionary between two languages without using any parallel +corpora, by aligning monolingual word embedding spaces in an unsupervised way. +Without using any character information, our model even outperforms existing +supervised methods on cross-lingual tasks for some language pairs. Our +experiments demonstrate that our method works very well also for distant +language pairs, like English-Russian or English-Chinese. We finally describe +experiments on the English-Esperanto low-resource language pair, on which there +only exists a limited amount of parallel data, to show the potential impact of +our method in fully unsupervised machine translation. Our code, embeddings and +dictionaries are publicly available." +https://arxiv.org/abs/1710.04087|arxiv_firstAuthor|Alexis Conneau +https://arxiv.org/abs/1710.04087|arxiv_updated|2018-01-30T14:41:51Z +https://arxiv.org/abs/1710.04087|arxiv_title|Word Translation Without Parallel Data +https://arxiv.org/abs/1710.04087|arxiv_published|2017-10-11T14:24:28Z +https://arxiv.org/abs/1710.04087|arxiv_num|1710.04087 +http://en.wikipedia.org/wiki/List_of_animals_by_number_of_neurons|creationDate|2008-06-22 +http://en.wikipedia.org/wiki/List_of_animals_by_number_of_neurons|tag|http://www.semanlink.net/tag/number_of_neurons +http://en.wikipedia.org/wiki/List_of_animals_by_number_of_neurons|title|List of animals by number of neurons - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/List_of_animals_by_number_of_neurons|creationTime|2008-06-22T02:01:31Z +http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html|creationDate|2013-06-16 +http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html|tag|http://www.semanlink.net/tag/rdfa +http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html|tag|http://www.semanlink.net/tag/css +http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html|title|Highlight RDFa with CSS from Niklas Lindström on 2012-05-20 (public-rdfa-wg@w3.org from May 2012) +http://lists.w3.org/Archives/Public/public-rdfa-wg/2012May/0122.html|creationTime|2013-06-16T11:09:25Z +http://deeplearning4j.org/|creationDate|2016-01-03 +http://deeplearning4j.org/|tag|http://www.semanlink.net/tag/deep_learning +http://deeplearning4j.org/|tag|http://www.semanlink.net/tag/open_source +http://deeplearning4j.org/|tag|http://www.semanlink.net/tag/java_library +http://deeplearning4j.org/|title|Deeplearning4j - Open-source, distributed deep learning for the JVM +http://deeplearning4j.org/|creationTime|2016-01-03T23:01:25Z +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|creationDate|2017-06-27 +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|tag|http://www.semanlink.net/tag/nlp_topic_extraction +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|tag|http://www.semanlink.net/tag/nlp_microsoft +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|tag|http://www.semanlink.net/tag/machine_learned_ranking +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|tag|http://www.semanlink.net/tag/ranking_svm +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|tag|http://www.semanlink.net/tag/microsoft_research +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|comment|"Previously, automatic keyphrase extraction was formalized as classification and learning methods for classification were utilized. This paper points out that it is more essential to **cast the keyphrase extraction problem as ranking** and employ a **learning to rank** method to perform the task. As example, it employs Ranking SVM, a state-of-art method of learning to rank, in keyphrase extraction +" +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|title|A Ranking Approach to Keyphrase Extraction - Microsoft Research (2009) +https://www.microsoft.com/en-us/research/publication/a-ranking-approach-to-keyphrase-extraction/|creationTime|2017-06-27T12:47:09Z +http://www.gigamonkeys.com/book/|creationDate|2006-02-02 +http://www.gigamonkeys.com/book/|tag|http://www.semanlink.net/tag/lisp +http://www.gigamonkeys.com/book/|title|Practical Common Lisp +https://arxiv.org/abs/1506.08422|creationDate|2017-12-03 +https://arxiv.org/abs/1506.08422|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +https://arxiv.org/abs/1506.08422|tag|http://www.semanlink.net/tag/topic_embeddings +https://arxiv.org/abs/1506.08422|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1506.08422|arxiv_author|Xin-Yu Dai +https://arxiv.org/abs/1506.08422|arxiv_author|Li-Qiang Niu +https://arxiv.org/abs/1506.08422|comment|Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec +https://arxiv.org/abs/1506.08422|title|[1506.08422] Topic2Vec: Learning Distributed Representations of Topics +https://arxiv.org/abs/1506.08422|creationTime|2017-12-03T17:36:27Z +https://arxiv.org/abs/1506.08422|arxiv_summary|"Latent Dirichlet Allocation (LDA) mining thematic structure of documents +plays an important role in nature language processing and machine learning +areas. However, the probability distribution from LDA only describes the +statistical relationship of occurrences in the corpus and usually in practice, +probability is not the best choice for feature representations. Recently, +embedding methods have been proposed to represent words and documents by +learning essential concepts and representations, such as Word2Vec and Doc2Vec. +The embedded representations have shown more effectiveness than LDA-style +representations in many tasks. In this paper, we propose the Topic2Vec approach +which can learn topic representations in the same semantic vector space with +words, as an alternative to probability. The experimental results show that +Topic2Vec achieves interesting and meaningful results." +https://arxiv.org/abs/1506.08422|arxiv_firstAuthor|Li-Qiang Niu +https://arxiv.org/abs/1506.08422|arxiv_updated|2015-06-28T16:17:40Z +https://arxiv.org/abs/1506.08422|arxiv_title|Topic2Vec: Learning Distributed Representations of Topics +https://arxiv.org/abs/1506.08422|arxiv_published|2015-06-28T16:17:40Z +https://arxiv.org/abs/1506.08422|arxiv_num|1506.08422 +http://nlp.stanford.edu/software/index.shtml|creationDate|2014-03-15 +http://nlp.stanford.edu/software/index.shtml|tag|http://www.semanlink.net/tag/stanford +http://nlp.stanford.edu/software/index.shtml|tag|http://www.semanlink.net/tag/nlp +http://nlp.stanford.edu/software/index.shtml|tag|http://www.semanlink.net/tag/nlp_tools +http://nlp.stanford.edu/software/index.shtml|title|The Stanford NLP (Natural Language Processing) Group / software +http://nlp.stanford.edu/software/index.shtml|creationTime|2014-03-15T11:02:57Z +https://blog.slock.it/a-dao-counter-attack-613548408dd7#.ovxo55nv1|creationDate|2016-06-20 +https://blog.slock.it/a-dao-counter-attack-613548408dd7#.ovxo55nv1|tag|http://www.semanlink.net/tag/dao_attack +https://blog.slock.it/a-dao-counter-attack-613548408dd7#.ovxo55nv1|title|A DAO Counter-Attack — Slock.it Blog +https://blog.slock.it/a-dao-counter-attack-613548408dd7#.ovxo55nv1|creationTime|2016-06-20T10:31:44Z +http://fr.wikipedia.org/wiki/Zarmas|creationDate|2012-09-14 +http://fr.wikipedia.org/wiki/Zarmas|tag|http://www.semanlink.net/tag/jerma +http://fr.wikipedia.org/wiki/Zarmas|title|Zarmas - Wikipédia +http://fr.wikipedia.org/wiki/Zarmas|creationTime|2012-09-14T23:54:04Z +http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html|creationDate|2015-04-16 +http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html|tag|http://www.semanlink.net/tag/javadoc +http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html|tag|http://www.semanlink.net/tag/aterm +http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html|title|aterm javadoc +http://www.meta-environment.org/doc/api/aterm-java/index.html?overview-summary.html|creationTime|2015-04-16T11:29:11Z +http://www.bbc.co.uk/blogs/thereporters/maggieshiels/2010/05/the_antifacebook.html|creationDate|2010-05-17 +http://www.bbc.co.uk/blogs/thereporters/maggieshiels/2010/05/the_antifacebook.html|tag|http://www.semanlink.net/tag/facebook +http://www.bbc.co.uk/blogs/thereporters/maggieshiels/2010/05/the_antifacebook.html|title|dot.Maggie: The anti-Facebook +http://www.bbc.co.uk/blogs/thereporters/maggieshiels/2010/05/the_antifacebook.html|creationTime|2010-05-17T14:31:09Z +http://www.openlinksw.com/blog/~kidehen/?id=1384|creationDate|2008-06-12 +http://www.openlinksw.com/blog/~kidehen/?id=1384|tag|http://www.semanlink.net/tag/library_of_congress +http://www.openlinksw.com/blog/~kidehen/?id=1384|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/~kidehen/?id=1384|tag|http://www.semanlink.net/tag/linked_data_application +http://www.openlinksw.com/blog/~kidehen/?id=1384|title|Linked Data in Action: Library of Congress +http://www.openlinksw.com/blog/~kidehen/?id=1384|creationTime|2008-06-12T08:21:12Z +http://ipython.org/|creationDate|2015-01-04 +http://ipython.org/|tag|http://www.semanlink.net/tag/python +http://ipython.org/|title|IPython +http://ipython.org/|creationTime|2015-01-04T15:49:08Z +https://arxiv.org/abs/1812.04616|creationDate|2018-12-14 +https://arxiv.org/abs/1812.04616|tag|http://www.semanlink.net/tag/neural_machine_translation +https://arxiv.org/abs/1812.04616|tag|http://www.semanlink.net/tag/language_model +https://arxiv.org/abs/1812.04616|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1812.04616|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +https://arxiv.org/abs/1812.04616|arxiv_author|Yulia Tsvetkov +https://arxiv.org/abs/1812.04616|arxiv_author|Sachin Kumar +https://arxiv.org/abs/1812.04616|comment|"predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) + +[@honnibal](https://twitter.com/honnibal/status/1073513114468081664) + +" +https://arxiv.org/abs/1812.04616|title|[1812.04616] Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs +https://arxiv.org/abs/1812.04616|creationTime|2018-12-14T14:50:03Z +https://arxiv.org/abs/1812.04616|arxiv_summary|"The Softmax function is used in the final layer of nearly all existing +sequence-to-sequence models for language generation. However, it is usually the +slowest layer to compute which limits the vocabulary size to a subset of most +frequent types; and it has a large memory footprint. We propose a general +technique for replacing the softmax layer with a continuous embedding layer. +Our primary innovations are a novel probabilistic loss, and a training and +inference procedure in which we generate a probability distribution over +pre-trained word embeddings, instead of a multinomial distribution over the +vocabulary obtained via softmax. We evaluate this new class of +sequence-to-sequence models with continuous outputs on the task of neural +machine translation. We show that our models obtain upto 2.5x speed-up in +training time while performing on par with the state-of-the-art models in terms +of translation quality. These models are capable of handling very large +vocabularies without compromising on translation quality. They also produce +more meaningful errors than in the softmax-based models, as these errors +typically lie in a subspace of the vector space of the reference translations." +https://arxiv.org/abs/1812.04616|arxiv_firstAuthor|Sachin Kumar +https://arxiv.org/abs/1812.04616|arxiv_updated|2019-03-22T03:08:01Z +https://arxiv.org/abs/1812.04616|arxiv_title|Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs +https://arxiv.org/abs/1812.04616|arxiv_published|2018-12-10T20:00:36Z +https://arxiv.org/abs/1812.04616|arxiv_num|1812.04616 +http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/|creationDate|2018-03-05 +http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/|tag|http://www.semanlink.net/tag/feature_selection +http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/|tag|http://www.semanlink.net/tag/principal_component_analysis +http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/|title|Principal Component Analysis (PCA) for Feature Selection and some of its Pitfalls · Johannes Otterbach +http://jotterbach.github.io/2016/03/24/Principal_Component_Analysis/|creationTime|2018-03-05T11:29:06Z +http://www.qwant.com/|creationDate|2013-09-02 +http://www.qwant.com/|tag|http://www.semanlink.net/tag/search_engines +http://www.qwant.com/|creationTime|2013-09-02T13:51:11Z +http://rapid-i.com/wiki/index.php?title=Integrating_RapidMiner_into_your_application|creationDate|2013-09-11 +http://rapid-i.com/wiki/index.php?title=Integrating_RapidMiner_into_your_application|tag|http://www.semanlink.net/tag/rapidminer_java +http://rapid-i.com/wiki/index.php?title=Integrating_RapidMiner_into_your_application|title|Integrating RapidMiner into your application - Rapid-I-Wiki +http://rapid-i.com/wiki/index.php?title=Integrating_RapidMiner_into_your_application|creationTime|2013-09-11T00:54:09Z +https://www.depends-on-the-definition.com/|creationDate|2018-09-09 +https://www.depends-on-the-definition.com/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://www.depends-on-the-definition.com/|tag|http://www.semanlink.net/tag/ml_nlp_blog +https://www.depends-on-the-definition.com/|title|Depends on the definition - it's about machine learning, data science and more +https://www.depends-on-the-definition.com/|creationTime|2018-09-09T15:32:10Z +http://ceur-ws.org/Vol-840/03-paper-26.pdf|creationDate|2012-04-17 +http://ceur-ws.org/Vol-840/03-paper-26.pdf|tag|http://www.semanlink.net/tag/linked_learning_2012 +http://ceur-ws.org/Vol-840/03-paper-26.pdf|tag|http://www.semanlink.net/tag/linked_learning +http://ceur-ws.org/Vol-840/03-paper-26.pdf|tag|http://www.semanlink.net/tag/semantic_cms +http://ceur-ws.org/Vol-840/03-paper-26.pdf|title|Semantic CMS and Wikis as Platforms for Linked Learning +http://ceur-ws.org/Vol-840/03-paper-26.pdf|creationTime|2012-04-17T11:57:37Z +http://www.awesometapes.com/|creationDate|2016-09-22 +http://www.awesometapes.com/|tag|http://www.semanlink.net/tag/music_of_africa +http://www.awesometapes.com/|title|Awesome Tapes From Africa +http://www.awesometapes.com/|creationTime|2016-09-22T18:56:11Z +http://www.w3.org/blog/news/archives/3943|creationDate|2014-07-10 +http://www.w3.org/blog/news/archives/3943|tag|http://www.semanlink.net/tag/csv +http://www.w3.org/blog/news/archives/3943|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/blog/news/archives/3943|tag|http://www.semanlink.net/tag/data_web +http://www.w3.org/blog/news/archives/3943|title|Draft Model for Tabular Data and Metadata on the Web, and a Draft Metadata Vocabulary for Tabular Data Published W3C News +http://www.w3.org/blog/news/archives/3943|creationTime|2014-07-10T11:24:29Z +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|creationDate|2018-11-06 +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|tag|http://www.semanlink.net/tag/tutorial +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|tag|http://www.semanlink.net/tag/slides +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|tag|http://www.semanlink.net/tag/chatbot +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|tag|http://www.semanlink.net/tag/emnlp_2018 +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|comment|"by Dr Wei Wu (Microsoft Xiaolce - chatbot with 200 millions users in China) and Dr Rui Yan (Peking Univ) + +- Chit-chat (casual, non goal oriented) open-domain. Must be relevant to the context and diverse (informative) to be engaging. +- why creating a chat? to prove an AI can speak like a human, commercial reasons, link to services. + +Task oriented vs non task oriented: this tutorial is about the second one. + +Retrieval based vs generation based. + +Basic knowledge of DL for chatbots: + +- word embeddings +- sentence embeddings (CNN, RNN) +- dialogue modeling: seq-to-seq with attention + +Response selection for retrieval based chatbots: + +- single turn response selection (slides 37-57) + - framework 1: matching with seq embeddings + - framework 2: matching with message-response interaction (46) + - extension of 1: KnowledgeMatching with External Knowledge (53) + - extension of 2: RepresentationsMatching with Multiple Levels of Representations (54) + - insights from comparison between 1 and 2 (57) +- multi turn response selection (62) + - context is now: mess + history + - again, 2 frameworks + +Emerging directions (79): + +- matching with better representations + - Self-Attention (82) + - fusing multiple types of repr. But how to fuse matters (83) + - pre-training + + +Learning a matching model for response selection (84) + +Generation based models for chatbots: + +- single turn generarion (89) + - Basic generation model + - seq2seq + - Attention + - Bi-directional modeling +- multi turn generation + - Contexts are important + - Context sensitive models + - Hierarchical context modeling + - Latent variable modeling + - Hierarchical memory networks + +Diversity in conversations (99) + +Content introducing (106) + +Additional elements (113) + +- Topics in cnversation +- Emotions + +Persona in chat: + +- Persona +- ... +- Knowledge +- Common sense + +RL and Adversarial learning in conversations (125) + +Evaluation (132) + +Future trends: + +- Reasoning in dialogues +- X-grounded dialogues +" +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|title|Deep Chit-Chat: deep learning for chatbots (EMNLP 2018 Tutorial) +http://ruiyan.me/pubs/tutorial-emnlp18.pdf|creationTime|2018-11-06T14:37:53Z +http://www.infoworld.com/d/adventures-in-it/13-best-practices-it-outsourcing-034|creationDate|2010-07-31 +http://www.infoworld.com/d/adventures-in-it/13-best-practices-it-outsourcing-034|tag|http://www.semanlink.net/tag/outsourcing +http://www.infoworld.com/d/adventures-in-it/13-best-practices-it-outsourcing-034|title|13 best practices for IT outsourcing Adventures in IT - InfoWorld +http://www.infoworld.com/d/adventures-in-it/13-best-practices-it-outsourcing-034|creationTime|2010-07-31T17:18:51Z +http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm|tag|http://www.semanlink.net/tag/ogm +http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm|comment|"Most GM plants contain a gene for antibiotic resistance, but there are fears this could transfer to bacteria, making them immune to common drugs. + +Researchers from Tennessee say their new method carries no such risk." +http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm|title|BBC NEWS Science/Nature Scientists hope to ease GM fears +http://news.bbc.co.uk/1/hi/sci/tech/4167040.stm|source|BBC +http://www.sauverledarfour.org/appel.php|creationDate|2008-11-27 +http://www.sauverledarfour.org/appel.php|tag|http://www.semanlink.net/tag/petition +http://www.sauverledarfour.org/appel.php|tag|http://www.semanlink.net/tag/darfour +http://www.sauverledarfour.org/appel.php|tag|http://www.semanlink.net/tag/indifference +http://www.sauverledarfour.org/appel.php|comment|De l'indifférence des peuples naît l'inaction des gouvernants. +http://www.sauverledarfour.org/appel.php|title|Sauver Le Darfour - Appel +http://www.sauverledarfour.org/appel.php|creationTime|2008-11-27T01:34:20Z +http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html|creationDate|2015-08-07 +http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html|tag|http://www.semanlink.net/tag/dosso +http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html|title|La conquête de l’ouest (de l’Afrique) : Demande à la poussière +http://www.lemonde.fr/festival/article/2015/08/05/demande-a-la-poussiere_4712519_4415198.html|creationTime|2015-08-07T23:10:22Z +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|creationDate|2018-10-22 +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|tag|http://www.semanlink.net/tag/chris_manning +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|tag|http://www.semanlink.net/tag/sequence_labeling +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|tag|http://www.semanlink.net/tag/named_entity_recognition +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|tag|http://www.semanlink.net/tag/conditional_random_field +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|comment|"> we show the close connection between CRF and “sequence model” neural nets, and present an empirical investigation to compare their performance on two sequence labeling tasks – Named Entity Recognition and Syntactic Chunking. Our results suggest that **non-linear models are highly effective in low-dimensional distributional spaces. Somewhat surprisingly, we find that a non-linear architecture offers no benefits in a high-dimensional discrete feature space**. + +" +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|title|Effect of Non-linear Deep Architecture in Sequence Labeling +https://nlp.stanford.edu/pubs/wang-manning-ijcnlp13-nonlinear.pdf|creationTime|2018-10-22T08:28:19Z +http://opensourceconnections.com/blog/2013/06/07/search-as-you-type-with-solr/|creationDate|2017-07-11 +http://opensourceconnections.com/blog/2013/06/07/search-as-you-type-with-solr/|tag|http://www.semanlink.net/tag/solr +http://opensourceconnections.com/blog/2013/06/07/search-as-you-type-with-solr/|title|Search-As-You-Type with Solr +http://opensourceconnections.com/blog/2013/06/07/search-as-you-type-with-solr/|creationTime|2017-07-11T03:39:08Z +https://metadatacenter.org/|creationDate|2017-02-10 +https://metadatacenter.org/|tag|http://www.semanlink.net/tag/rdf_editor +https://metadatacenter.org/|comment|un éditeur rdf basé sur des templates +https://metadatacenter.org/|title|Home CEDAR - Center for Expanded Data Annotation and Retrieval +https://metadatacenter.org/|creationTime|2017-02-10T13:49:01Z +http://news.bbc.co.uk/2/hi/science/nature/4467676.stm|creationDate|2005-11-27 +http://news.bbc.co.uk/2/hi/science/nature/4467676.stm|tag|http://www.semanlink.net/tag/hayabusa +http://news.bbc.co.uk/2/hi/science/nature/4467676.stm|comment|Scientists believe it collected the debris, but will only be sure when Hayabusa returns to Earth in 2007. +http://news.bbc.co.uk/2/hi/science/nature/4467676.stm|title|BBC NEWS - Probe 'gathers asteroid material' +http://news.bbc.co.uk/2/hi/science/nature/4467676.stm|source|BBC +https://livebook.manning.com/#!/book/deep-learning-with-python|creationDate|2017-12-07 +https://livebook.manning.com/#!/book/deep-learning-with-python|tag|http://www.semanlink.net/tag/deep_learning_book +https://livebook.manning.com/#!/book/deep-learning-with-python|tag|http://www.semanlink.net/tag/francois_chollet +https://livebook.manning.com/#!/book/deep-learning-with-python|tag|http://www.semanlink.net/tag/livre +https://livebook.manning.com/#!/book/deep-learning-with-python|title|liveBook - Deep Learning with Python +https://livebook.manning.com/#!/book/deep-learning-with-python|creationTime|2017-12-07T23:35:09Z +http://www.pandia.com/sew/481-gartner.html|creationDate|2008-06-22 +http://www.pandia.com/sew/481-gartner.html|tag|http://www.semanlink.net/tag/google +http://www.pandia.com/sew/481-gartner.html|comment|Posted on Monday 2 July 2007 +http://www.pandia.com/sew/481-gartner.html|title|» Google: one million servers and counting +http://www.pandia.com/sew/481-gartner.html|creationTime|2008-06-22T03:00:31Z +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|creationDate|2018-11-10 +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|tag|http://www.semanlink.net/tag/sentence_embeddings +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|tag|http://www.semanlink.net/tag/word_embedding +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|tag|http://www.semanlink.net/tag/word_mover_s_distance +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|tag|http://www.semanlink.net/tag/nlp_ibm +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|comment|"unsupervised embeddings of sentences of variable length from pre-trained word embeddings (better on short length text). + +(Builds on the word mover's distance, but using ideas borrowed from kernel methods approximation, gets a representation of sentences, instead of just a distance between them) + + + + + +" +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|title|Word Mover's Embedding: From Word2Vec to Document Embedding (2018) +https://aclanthology.coli.uni-saarland.de/papers/D18-1482/d18-1482|creationTime|2018-11-10T15:38:38Z +https://jakevdp.github.io/PythonDataScienceHandbook/02.02-the-basics-of-numpy-arrays.html|creationDate|2019-01-07 +https://jakevdp.github.io/PythonDataScienceHandbook/02.02-the-basics-of-numpy-arrays.html|tag|http://www.semanlink.net/tag/numpy +https://jakevdp.github.io/PythonDataScienceHandbook/02.02-the-basics-of-numpy-arrays.html|title|The Basics of NumPy Arrays Python Data Science Handbook +https://jakevdp.github.io/PythonDataScienceHandbook/02.02-the-basics-of-numpy-arrays.html|creationTime|2019-01-07T09:19:09Z +https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas|creationDate|2017-06-20 +https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas|tag|http://www.semanlink.net/tag/pandas +https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas|title|python - Select rows from a DataFrame based on values in a column in pandas - Stack Overflow +https://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas|creationTime|2017-06-20T09:40:20Z +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|creationDate|2013-12-21 +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|tag|http://www.semanlink.net/tag/encryption +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|tag|http://www.semanlink.net/tag/nsa +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|title|Reuters - Secret contract tied NSA and security industry pioneer +http://mobile.reuters.com/article/idUSBRE9BJ1C220131220?irpc=932|creationTime|2013-12-21T15:16:04Z +http://developer.yahoo.net/yui/|creationDate|2006-02-17 +http://developer.yahoo.net/yui/|tag|http://www.semanlink.net/tag/ajax +http://developer.yahoo.net/yui/|tag|http://www.semanlink.net/tag/yahoo +http://developer.yahoo.net/yui/|title|Yahoo! UI Library +http://www.veotag.com/|creationDate|2007-05-19 +http://www.veotag.com/|tag|http://www.semanlink.net/tag/video +http://www.veotag.com/|tag|http://www.semanlink.net/tag/tagging +http://www.veotag.com/|comment|"veotag is a new service that lets you display clickable text, called ""veotags,"" within an audio or video file.
+Your audience can see the veotags whenever they play your file on the web. Clicking on a veotag lets your audience jump right to that part of the file." +http://www.veotag.com/|title|veotag +http://www.veotag.com/|creationTime|2007-05-19T14:35:03Z +http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html|creationDate|2012-08-06 +http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html|tag|http://www.semanlink.net/tag/wikidata +http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html|comment|first draft of how Wikidata phase II will be published in RDF +http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html|title|Export of Wikidata in RDF +http://lists.wikimedia.org/pipermail/wikidata-l/2012-August/000908.html|creationTime|2012-08-06T15:39:30Z +http://wordle.net/|creationDate|2008-08-26 +http://wordle.net/|tag|http://www.semanlink.net/tag/tag_cloud +http://wordle.net/|title|Wordle - Beautiful Word Clouds +http://wordle.net/|creationTime|2008-08-26T11:45:25Z +http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1|creationDate|2008-10-05 +http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1|tag|http://www.semanlink.net/tag/mathematiques +http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1|tag|http://www.semanlink.net/tag/jean_paul +http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1|title|On two iterative methods for approximating the roots of a polynomial +http://books.google.com/books?id=Ru5LT3RIeoUC&pg=PA165&dq=%22jean-paul+cardinal%22&lr=&ei=DC_pSM2aIYmUzAS1gvTlDg&hl=fr&sig=ACfU3U3Y3-UViYESab_eG0X6o4h704SGGg#PPA165,M1|creationTime|2008-10-05T23:21:05Z +http://bibliontology.com/|creationDate|2008-06-04 +http://bibliontology.com/|tag|http://www.semanlink.net/tag/ontologies +http://bibliontology.com/|tag|http://www.semanlink.net/tag/frederick_giasson +http://bibliontology.com/|title|Bibliographic Ontology Specification +http://bibliontology.com/|creationTime|2008-06-04T23:49:53Z +https://www.w3.org/blog/2017/02/on-eme-in-html5/|creationDate|2017-04-02 +https://www.w3.org/blog/2017/02/on-eme-in-html5/|tag|http://www.semanlink.net/tag/drm_in_html_5 +https://www.w3.org/blog/2017/02/on-eme-in-html5/|tag|http://www.semanlink.net/tag/eme +https://www.w3.org/blog/2017/02/on-eme-in-html5/|tag|http://www.semanlink.net/tag/tim_berners_lee +https://www.w3.org/blog/2017/02/on-eme-in-html5/|title|On EME in HTML5 W3C Blog +https://www.w3.org/blog/2017/02/on-eme-in-html5/|creationTime|2017-04-02T12:06:31Z +http://www.amundsen.com/blog/archives/1072|creationDate|2012-02-13 +http://www.amundsen.com/blog/archives/1072|tag|http://www.semanlink.net/tag/roy_t_fielding +http://www.amundsen.com/blog/archives/1072|tag|http://www.semanlink.net/tag/rest +http://www.amundsen.com/blog/archives/1072|tag|http://www.semanlink.net/tag/hypermedia +http://www.amundsen.com/blog/archives/1072|comment|the details on how users can change the current state of the application is contained within the message itself (not in the application code running in the client). +http://www.amundsen.com/blog/archives/1072|title|mca blog [Hypermedia: data as a first-class element] +http://www.amundsen.com/blog/archives/1072|creationTime|2012-02-13T22:43:04Z +http://passeurdesciences.blog.lemonde.fr/2014/05/11/qui-mangeait-qui-il-y-a-500-millions-dannees/|creationDate|2014-05-11 +http://passeurdesciences.blog.lemonde.fr/2014/05/11/qui-mangeait-qui-il-y-a-500-millions-dannees/|tag|http://www.semanlink.net/tag/explosion_cambrienne +http://passeurdesciences.blog.lemonde.fr/2014/05/11/qui-mangeait-qui-il-y-a-500-millions-dannees/|title|Qui mangeait qui il y a 500 millions d’années Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2014/05/11/qui-mangeait-qui-il-y-a-500-millions-dannees/|creationTime|2014-05-11T18:52:03Z +https://arxiv.org/abs/1811.06031|creationDate|2018-11-17 +https://arxiv.org/abs/1811.06031|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1811.06031|tag|http://www.semanlink.net/tag/sebastian_ruder +https://arxiv.org/abs/1811.06031|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1811.06031|tag|http://www.semanlink.net/tag/multi_task_learning +https://arxiv.org/abs/1811.06031|arxiv_author|Victor Sanh +https://arxiv.org/abs/1811.06031|arxiv_author|Sebastian Ruder +https://arxiv.org/abs/1811.06031|arxiv_author|Thomas Wolf +https://arxiv.org/abs/1811.06031|comment|"[Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) +" +https://arxiv.org/abs/1811.06031|relatedDoc|https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601 +https://arxiv.org/abs/1811.06031|title|[1811.06031] A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks +https://arxiv.org/abs/1811.06031|creationTime|2018-11-17T10:24:49Z +https://arxiv.org/abs/1811.06031|arxiv_summary|"Much effort has been devoted to evaluate whether multi-task learning can be +leveraged to learn rich representations that can be used in various Natural +Language Processing (NLP) down-stream applications. However, there is still a +lack of understanding of the settings in which multi-task learning has a +significant effect. In this work, we introduce a hierarchical model trained in +a multi-task learning setup on a set of carefully selected semantic tasks. The +model is trained in a hierarchical fashion to introduce an inductive bias by +supervising a set of low level tasks at the bottom layers of the model and more +complex tasks at the top layers of the model. This model achieves +state-of-the-art results on a number of tasks, namely Named Entity Recognition, +Entity Mention Detection and Relation Extraction without hand-engineered +features or external NLP tools like syntactic parsers. The hierarchical +training supervision induces a set of shared semantic representations at lower +layers of the model. We show that as we move from the bottom to the top layers +of the model, the hidden states of the layers tend to represent more complex +semantic information." +https://arxiv.org/abs/1811.06031|arxiv_firstAuthor|Victor Sanh +https://arxiv.org/abs/1811.06031|arxiv_updated|2018-11-26T06:15:05Z +https://arxiv.org/abs/1811.06031|arxiv_title|A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks +https://arxiv.org/abs/1811.06031|arxiv_published|2018-11-14T19:42:03Z +https://arxiv.org/abs/1811.06031|arxiv_num|1811.06031 +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|creationDate|2017-01-13 +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|tag|http://www.semanlink.net/tag/tutorial +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|tag|http://www.semanlink.net/tag/java_8_lambdas +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|tag|http://www.semanlink.net/tag/java_8 +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|title|Java 8 - tutorial +http://www.studytrails.com/java/java8/Java8_Lambdas_FunctionalProgramming/|creationTime|2017-01-13T13:11:24Z +http://code.google.com/p/jsdoc-toolkit/|creationDate|2011-01-10 +http://code.google.com/p/jsdoc-toolkit/|tag|http://www.semanlink.net/tag/javascript_tool +http://code.google.com/p/jsdoc-toolkit/|title|jsdoc-toolkit +http://code.google.com/p/jsdoc-toolkit/|creationTime|2011-01-10T01:31:37Z +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|creationDate|2011-12-22 +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/technology_enhanced_learning +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/education +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/workshop +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/www_2012 +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|title|Emerging Web Technologies, Facing the Future of Education — EducTice +http://eductice.inrp.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|creationTime|2011-12-22T22:54:36Z +http://leobard.twoday.net/stories/2879108/|creationDate|2006-12-23 +http://leobard.twoday.net/stories/2879108/|tag|http://www.semanlink.net/tag/gnowsis +http://leobard.twoday.net/stories/2879108/|tag|http://www.semanlink.net/tag/personal_information_management +http://leobard.twoday.net/stories/2879108/|tag|http://www.semanlink.net/tag/leo_sauermann +http://leobard.twoday.net/stories/2879108/|comment|If Personal Information Management is the main use of Personal Computers, why is then not part of the Operating System of the computers? Why does it only handle files and folders, and not Persons, Projects and Topics? +http://leobard.twoday.net/stories/2879108/|title|semantic weltbild 2.0: PhD step2: the research question and how can I answer it (is it possible to write a PhD on gnowsis?) +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|creationDate|2005-09-29 +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|tag|http://www.semanlink.net/tag/rdf +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|tag|http://www.semanlink.net/tag/leigh_dodds +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|tag|http://www.semanlink.net/tag/rest +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|tag|http://www.semanlink.net/tag/social_content_services +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|tag|http://www.semanlink.net/tag/foaf +http://idealliance.org/proceedings/xtech05/papers/02-07-04/|title|Connecting Social Content Services using FOAF, RDF and REST +http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html|creationDate|2017-06-12 +http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html|tag|http://www.semanlink.net/tag/ane +http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html|title|Pourquoi les Chinois veulent la peau des ânes africains +http://www.lemonde.fr/afrique/article/2017/03/28/pourquoi-les-chinois-veulent-la-peau-des-anes-africains_5101859_3212.html|creationTime|2017-06-12T00:53:44Z +https://dl.acm.org/citation.cfm?doid=3178876.3186024|creationDate|2018-05-10 +https://dl.acm.org/citation.cfm?doid=3178876.3186024|tag|http://www.semanlink.net/tag/semi_supervised_learning +https://dl.acm.org/citation.cfm?doid=3178876.3186024|tag|http://www.semanlink.net/tag/embeddings +https://dl.acm.org/citation.cfm?doid=3178876.3186024|tag|http://www.semanlink.net/tag/relation_extraction +https://dl.acm.org/citation.cfm?doid=3178876.3186024|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://dl.acm.org/citation.cfm?doid=3178876.3186024|comment|"Extraction de relations de corpus de textes de façon semi-supervisée, dans un contexte où on a peu de données labellisées décrivant les relations. + +Par exemple, des données labellisées indiquent que le texte ""Beijing, capital of China"" correspond à la relation entre entités : (""Beijing"", ""Capital Of"", ""China), et on voudrait pouvoir extraire les entités et relations pertinentes à partir de texte tel que ""Paris, France's capital,..."" + +Le papier décrit une méthode qui combine deux modules, l'un basé sur l'extraction automatique de patterns (par ex ""[Head], Capital Of [Tail]"") et l'autre sur la ""sémantique distributionnelle"" (du type ""word embeddings""). Ces deux modules collaborent, le premier permettant de créer des instances de relations augmentant la base de connaissance sur lequel entrainer le second, et le second aidant le premier à déterminer des patterns informatifs (""co-entrainement"") + + +" +https://dl.acm.org/citation.cfm?doid=3178876.3186024|title|Weakly-supervised Relation Extraction by Pattern-enhanced Embedding Learning +https://dl.acm.org/citation.cfm?doid=3178876.3186024|creationTime|2018-05-10T14:42:58Z +https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d|creationDate|2018-09-15 +https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d|tag|http://www.semanlink.net/tag/support_vector_machine +https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d|comment|> The weights obtained from svm.coef_ represent the vector coordinates which are orthogonal to the hyperplane and their direction indicates the predicted class. The absolute size of the coefficients in relation to each other can then be used to determine feature importance for the data separation task +https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d|title|Visualising Top Features in Linear SVM with Scikit Learn and Matplotlib +https://medium.com/@aneesha/visualising-top-features-in-linear-svm-with-scikit-learn-and-matplotlib-3454ab18a14d|creationTime|2018-09-15T18:22:29Z +http://arxiv.org/abs/1312.6184v5|creationDate|2014-10-06 +http://arxiv.org/abs/1312.6184v5|tag|http://www.semanlink.net/tag/deep_learning +http://arxiv.org/abs/1312.6184v5|tag|http://www.semanlink.net/tag/machine_learning +http://arxiv.org/abs/1312.6184v5|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1312.6184v5|arxiv_author|Rich Caruana +http://arxiv.org/abs/1312.6184v5|arxiv_author|Lei Jimmy Ba +http://arxiv.org/abs/1312.6184v5|title|[1312.6184] Do Deep Nets Really Need to be Deep? +http://arxiv.org/abs/1312.6184v5|creationTime|2014-10-06T00:29:41Z +http://arxiv.org/abs/1312.6184v5|arxiv_summary|"Currently, deep neural networks are the state of the art on problems such as +speech recognition and computer vision. In this extended abstract, we show that +shallow feed-forward networks can learn the complex functions previously +learned by deep nets and achieve accuracies previously only achievable with +deep models. Moreover, in some cases the shallow neural nets can learn these +deep functions using a total number of parameters similar to the original deep +model. We evaluate our method on the TIMIT phoneme recognition task and are +able to train shallow fully-connected nets that perform similarly to complex, +well-engineered, deep convolutional architectures. Our success in training +shallow neural nets to mimic deeper models suggests that there probably exist +better algorithms for training shallow feed-forward nets than those currently +available." +http://arxiv.org/abs/1312.6184v5|arxiv_firstAuthor|Lei Jimmy Ba +http://arxiv.org/abs/1312.6184v5|arxiv_updated|2014-10-11T00:19:10Z +http://arxiv.org/abs/1312.6184v5|arxiv_title|Do Deep Nets Really Need to be Deep? +http://arxiv.org/abs/1312.6184v5|arxiv_published|2013-12-21T00:47:43Z +http://arxiv.org/abs/1312.6184v5|arxiv_num|1312.6184 +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|creationDate|2017-09-25 +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|tag|http://www.semanlink.net/tag/toyota +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|tag|http://www.semanlink.net/tag/mirek_sopek +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|tag|http://www.semanlink.net/tag/schema_org +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|tag|http://www.semanlink.net/tag/automotive_ontology_community_group +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|title|Toyota Motor Europe use of schema.org and auto.schema.org vocabularies Automotive Ontology Community Group +https://www.w3.org/community/gao/2017/09/12/toyota-motor-europe-use-of-schema-org-and-auto-schema-org-vocabularies/|creationTime|2017-09-25T15:07:24Z +http://www.hotel-la-desirade.com/|creationDate|2006-01-23 +http://www.hotel-la-desirade.com/|tag|http://www.semanlink.net/tag/pierre_rebour +http://www.hotel-la-desirade.com/|title|HOTEL LA DESIRADE Belle Ile en mer : chambres et suites dans un hôtel-village +http://www.bbc.com/news/technology-30691393|creationDate|2015-01-06 +http://www.bbc.com/news/technology-30691393|tag|http://www.semanlink.net/tag/toyota +http://www.bbc.com/news/technology-30691393|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.bbc.com/news/technology-30691393|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.bbc.com/news/technology-30691393|title|BBC News - CES 2015: Toyota opens up hydrogen patents +http://www.bbc.com/news/technology-30691393|creationTime|2015-01-06T14:01:58Z +http://www.liberation.fr/culture/01012357658-petite-poucette-la-generation-mutante|creationDate|2011-09-05 +http://www.liberation.fr/culture/01012357658-petite-poucette-la-generation-mutante|tag|http://www.semanlink.net/tag/michel_serres +http://www.liberation.fr/culture/01012357658-petite-poucette-la-generation-mutante|title|Petite Poucette, la génération mutante +http://www.liberation.fr/culture/01012357658-petite-poucette-la-generation-mutante|creationTime|2011-09-05T14:41:01Z +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|creationDate|2018-08-07 +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|tag|http://www.semanlink.net/tag/ontologies +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|tag|http://www.semanlink.net/tag/semanlink2_related +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|tag|http://www.semanlink.net/tag/automatic_tagging +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|tag|http://www.semanlink.net/tag/machine_learning +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|title|Using Machine Learning to Support Continuous Ontology Development (2010) +https://www.researchgate.net/publication/221630712_Using_Machine_Learning_to_Support_Continuous_Ontology_Development|creationTime|2018-08-07T16:00:18Z +http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/|creationDate|2011-05-24 +http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/|tag|http://www.semanlink.net/tag/seevl +http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/|tag|http://www.semanlink.net/tag/json +http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/|title|The Semantic Puzzle Seevl: Explore the cultural universe based on semantic web technologies +http://blog.semantic-web.at/2011/05/19/seevl-explore-the-cultural-universe-based-on-semantic-web-technologies/|creationTime|2011-05-24T22:02:04Z +http://www.speech.sri.com/projects/srilm/|creationDate|2012-03-24 +http://www.speech.sri.com/projects/srilm/|tag|http://www.semanlink.net/tag/language_model +http://www.speech.sri.com/projects/srilm/|tag|http://www.semanlink.net/tag/nlp_class +http://www.speech.sri.com/projects/srilm/|tag|http://www.semanlink.net/tag/nlp_tools +http://www.speech.sri.com/projects/srilm/|comment|SRILM is a toolkit for building and applying statistical language models (LMs), primarily for use in speech recognition, statistical tagging and segmentation, and machine translation. +http://www.speech.sri.com/projects/srilm/|title|SRILM - The SRI Language Modeling Toolkit +http://www.speech.sri.com/projects/srilm/|creationTime|2012-03-24T09:00:01Z +http://www.lemonde.fr/pixels/article/2017/12/12/d-anciens-cadres-de-facebook-expriment-leur-culpabilite-d-avoir-contribue-a-son-succes_5228538_4408996.html|creationDate|2017-12-12 +http://www.lemonde.fr/pixels/article/2017/12/12/d-anciens-cadres-de-facebook-expriment-leur-culpabilite-d-avoir-contribue-a-son-succes_5228538_4408996.html|tag|http://www.semanlink.net/tag/facebook +http://www.lemonde.fr/pixels/article/2017/12/12/d-anciens-cadres-de-facebook-expriment-leur-culpabilite-d-avoir-contribue-a-son-succes_5228538_4408996.html|title|D’anciens cadres de Facebook expriment leur « culpabilité » d’avoir contribué à son succès +http://www.lemonde.fr/pixels/article/2017/12/12/d-anciens-cadres-de-facebook-expriment-leur-culpabilite-d-avoir-contribue-a-son-succes_5228538_4408996.html|creationTime|2017-12-12T17:54:57Z +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|creationDate|2017-06-14 +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|tag|http://www.semanlink.net/tag/python_sample_code +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|tag|http://www.semanlink.net/tag/nlp_sample_code +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|tag|http://www.semanlink.net/tag/good +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|comment|"Candidate identification + +- remove stop words and punctuation, filtering for words with certain part of speech / POS patterns, using external knowledge bases like wordnet or wikipedia as references of good/bad keyphrases + +Keyphrase selection + +- frequency stats (TF-IDT, BM25). Not very good (the best keyphrases aren’t necessarily the most frequent within a document) + +- unsupervised + - graph based ranking: + - the importance of a candidate is determined by its relatedness to other candidates + - frequency of co-occurence + - semantic relatedness + - a doc is represented as a graph (nodes = candidates) + - algos: + - TextRank + - DivRank + - topic-based clustering +- supervised + - previously seen as a classification problem, + - KEA + - now seen as a ranking problem + - ranking SVM + +finally, some sample code in python + +" +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|title|Intro to Automatic Keyphrase Extraction +http://bdewilde.github.io/blog/2014/09/23/intro-to-automatic-keyphrase-extraction/|creationTime|2017-06-14T00:08:15Z +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|creationDate|2006-09-25 +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|tag|http://www.semanlink.net/tag/niamey +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|tag|http://www.semanlink.net/tag/google_maps +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|tag|http://www.semanlink.net/tag/niger +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|comment|"2006-09-25 : le Niger a un bras mort au pied du Grand Hôtel +" +http://maps.google.com/maps?f=q&hl=fr&q=niamey&ie=UTF8&z=14&ll=13.50582,2.107658&spn=0.062676,0.103168&t=h&om=1|title|GoogleMap Niamey +https://www.wired.com/story/itunes-downloads-https-encryption/|creationDate|2018-12-09 +https://www.wired.com/story/itunes-downloads-https-encryption/|tag|http://www.semanlink.net/tag/itunes +https://www.wired.com/story/itunes-downloads-https-encryption/|tag|http://www.semanlink.net/tag/https +https://www.wired.com/story/itunes-downloads-https-encryption/|title|iTunes Doesn't Encrypt Downloads—on Purpose WIRED +https://www.wired.com/story/itunes-downloads-https-encryption/|creationTime|2018-12-09T15:47:00Z +https://arxiv.org/abs/1809.00782|creationDate|2018-09-06 +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/open_domain_question_answering +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/graph_embeddings +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/these_irit_renault_biblio +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/kg_and_nlp +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1809.00782|tag|http://www.semanlink.net/tag/emnlp_2018 +https://arxiv.org/abs/1809.00782|arxiv_author|Ruslan Salakhutdinov +https://arxiv.org/abs/1809.00782|arxiv_author|Manzil Zaheer +https://arxiv.org/abs/1809.00782|arxiv_author|Haitian Sun +https://arxiv.org/abs/1809.00782|arxiv_author|Kathryn Mazaitis +https://arxiv.org/abs/1809.00782|arxiv_author|Bhuwan Dhingra +https://arxiv.org/abs/1809.00782|arxiv_author|William W. Cohen +https://arxiv.org/abs/1809.00782|comment|"QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. + +> In practice, some questions are best answered +using text, while others are best answered using +KBs. A natural question, then, is how to effectively +combine both types of information. Surprisingly +little prior work has looked at this problem." +https://arxiv.org/abs/1809.00782|title|[1809.00782] Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text +https://arxiv.org/abs/1809.00782|creationTime|2018-09-06T01:38:28Z +https://arxiv.org/abs/1809.00782|arxiv_summary|"Open Domain Question Answering (QA) is evolving from complex pipelined +systems to end-to-end deep neural networks. Specialized neural models have been +developed for extracting answers from either text alone or Knowledge Bases +(KBs) alone. In this paper we look at a more practical setting, namely QA over +the combination of a KB and entity-linked text, which is appropriate when an +incomplete KB is available with a large text corpus. Building on recent +advances in graph representation learning we propose a novel model, GRAFT-Net, +for extracting answers from a question-specific subgraph containing text and KB +entities and relations. We construct a suite of benchmark tasks for this +problem, varying the difficulty of questions, the amount of training data, and +KB completeness. We show that GRAFT-Net is competitive with the +state-of-the-art when tested using either KBs or text alone, and vastly +outperforms existing methods in the combined setting. Source code is available +at https://github.com/OceanskySun/GraftNet ." +https://arxiv.org/abs/1809.00782|arxiv_firstAuthor|Haitian Sun +https://arxiv.org/abs/1809.00782|arxiv_updated|2018-09-04T03:15:56Z +https://arxiv.org/abs/1809.00782|arxiv_title|Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text +https://arxiv.org/abs/1809.00782|arxiv_published|2018-09-04T03:15:56Z +https://arxiv.org/abs/1809.00782|arxiv_num|1809.00782 +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|creationDate|2018-04-18 +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|tag|http://www.semanlink.net/tag/chris_manning +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|comment|"Goal: to enhance DL systems with reasoning capabilities from the ground-up + +[Abstract](https://cs.unc.edu/tcsdls/tcsdls-bios-abstracts-2017-2018/#Manning) + +- allowing them to perform transparent multi-step reasoning processes +- while retaining end-to-end differentiability and scalability to real-world problems + +> I get the feeling that if we're going to make further progress in AI, we actually have to get back to some of these problems of knowledge representation reasoning + +- From ML to machine reasoning +- the CLEVR task +- Memory-Attention-Composition Networks + +What is reasoning? (Bottou 2011) + +- manipulating previously acquired knowledge in order to answer a question +- not necessarily achieved by making logical inference (eg: algebraic manipulations of matrices) +- composition rules -> combination of operations to address new tasks +" +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|title|"Christopher Manning - ""Building Neural Network Models That Can Reason"" (TCSDLS 2017-2018) - YouTube" +https://www.youtube.com/watch?time_continue=3456&v=5qf_MZX0YCw|creationTime|2018-04-18T00:14:39Z +http://semanticweb.com/rdfa-1-1-lite_b24088#more-24088|creationDate|2011-10-23 +http://semanticweb.com/rdfa-1-1-lite_b24088#more-24088|tag|http://www.semanlink.net/tag/rdfa_1_1_lite +http://semanticweb.com/rdfa-1-1-lite_b24088#more-24088|title|Introduction to: RDFa 1.1 Lite - semanticweb.com +http://semanticweb.com/rdfa-1-1-lite_b24088#more-24088|creationTime|2011-10-23T17:52:20Z +http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182|creationDate|2012-10-03 +http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182|tag|http://www.semanlink.net/tag/rdfquery +http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182|tag|http://www.semanlink.net/tag/turtle +http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182|title|jquery.rdf.turtle.js - rdfquery - RDF processing in your browser - Google Project Hosting +http://code.google.com/p/rdfquery/source/browse/trunk/jquery.rdf.turtle.js?r=182|creationTime|2012-10-03T14:18:00Z +https://arxiv.org/abs/1806.01261|creationDate|2018-06-13 +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/artificial_general_intelligence +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/combinatorial_generalization +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/google_deepmind +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/todo_read +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/frequently_cited_paper +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/ml_google +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/relational_inductive_biases +https://arxiv.org/abs/1806.01261|tag|http://www.semanlink.net/tag/graph_neural_networks +https://arxiv.org/abs/1806.01261|arxiv_author|Justin Gilmer +https://arxiv.org/abs/1806.01261|arxiv_author|Ashish Vaswani +https://arxiv.org/abs/1806.01261|arxiv_author|Mateusz Malinowski +https://arxiv.org/abs/1806.01261|arxiv_author|Caglar Gulcehre +https://arxiv.org/abs/1806.01261|arxiv_author|Jessica B. Hamrick +https://arxiv.org/abs/1806.01261|arxiv_author|Yujia Li +https://arxiv.org/abs/1806.01261|arxiv_author|Pushmeet Kohli +https://arxiv.org/abs/1806.01261|arxiv_author|Daan Wierstra +https://arxiv.org/abs/1806.01261|arxiv_author|David Raposo +https://arxiv.org/abs/1806.01261|arxiv_author|Victoria Langston +https://arxiv.org/abs/1806.01261|arxiv_author|Andrew Ballard +https://arxiv.org/abs/1806.01261|arxiv_author|Oriol Vinyals +https://arxiv.org/abs/1806.01261|arxiv_author|Adam Santoro +https://arxiv.org/abs/1806.01261|arxiv_author|Kelsey Allen +https://arxiv.org/abs/1806.01261|arxiv_author|Chris Dyer +https://arxiv.org/abs/1806.01261|arxiv_author|Nicolas Heess +https://arxiv.org/abs/1806.01261|arxiv_author|Francis Song +https://arxiv.org/abs/1806.01261|arxiv_author|Victor Bapst +https://arxiv.org/abs/1806.01261|arxiv_author|Ryan Faulkner +https://arxiv.org/abs/1806.01261|arxiv_author|Alvaro Sanchez-Gonzalez +https://arxiv.org/abs/1806.01261|arxiv_author|Peter W. Battaglia +https://arxiv.org/abs/1806.01261|arxiv_author|Razvan Pascanu +https://arxiv.org/abs/1806.01261|arxiv_author|Matt Botvinick +https://arxiv.org/abs/1806.01261|arxiv_author|Andrea Tacchetti +https://arxiv.org/abs/1806.01261|arxiv_author|Charles Nash +https://arxiv.org/abs/1806.01261|arxiv_author|Vinicius Zambaldi +https://arxiv.org/abs/1806.01261|arxiv_author|George Dahl +https://arxiv.org/abs/1806.01261|comment|"> generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI + +> A key signature of human intelligence is the ability to make infine use of finite means"" (Humboldt, +1836; Chomsky, 1965) (ex: words / sentences + +> Here we explore how to improve modern AI's capacity for **combinatorial generalization** by +biasing learning towards structured representations and computations, and in particular, systems +that operate on graphs. + +(papier recommandé par [Peter Bloem](tag:peter_bloem))" +https://arxiv.org/abs/1806.01261|title|[1806.01261] Relational inductive biases, deep learning, and graph networks +https://arxiv.org/abs/1806.01261|creationTime|2018-06-13T13:34:03Z +https://arxiv.org/abs/1806.01261|arxiv_summary|"Artificial intelligence (AI) has undergone a renaissance recently, making +major progress in key domains such as vision, language, control, and +decision-making. This has been due, in part, to cheap data and cheap compute +resources, which have fit the natural strengths of deep learning. However, many +defining characteristics of human intelligence, which developed under much +different pressures, remain out of reach for current approaches. In particular, +generalizing beyond one's experiences--a hallmark of human intelligence from +infancy--remains a formidable challenge for modern AI. +The following is part position paper, part review, and part unification. We +argue that combinatorial generalization must be a top priority for AI to +achieve human-like abilities, and that structured representations and +computations are key to realizing this objective. Just as biology uses nature +and nurture cooperatively, we reject the false choice between +""hand-engineering"" and ""end-to-end"" learning, and instead advocate for an +approach which benefits from their complementary strengths. We explore how +using relational inductive biases within deep learning architectures can +facilitate learning about entities, relations, and rules for composing them. We +present a new building block for the AI toolkit with a strong relational +inductive bias--the graph network--which generalizes and extends various +approaches for neural networks that operate on graphs, and provides a +straightforward interface for manipulating structured knowledge and producing +structured behaviors. We discuss how graph networks can support relational +reasoning and combinatorial generalization, laying the foundation for more +sophisticated, interpretable, and flexible patterns of reasoning. As a +companion to this paper, we have released an open-source software library for +building graph networks, with demonstrations of how to use them in practice." +https://arxiv.org/abs/1806.01261|arxiv_firstAuthor|Peter W. Battaglia +https://arxiv.org/abs/1806.01261|arxiv_updated|2018-10-17T17:51:36Z +https://arxiv.org/abs/1806.01261|arxiv_title|Relational inductive biases, deep learning, and graph networks +https://arxiv.org/abs/1806.01261|arxiv_published|2018-06-04T17:58:18Z +https://arxiv.org/abs/1806.01261|arxiv_num|1806.01261 +https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf|creationDate|2017-10-21 +https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf|tag|http://www.semanlink.net/tag/guha +https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf|tag|http://www.semanlink.net/tag/open_knowledge_network +https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf|title|Open Knowledge Network +https://www.nitrd.gov/nitrdgroups/images/9/96/OKN_Moore_Guha.pdf|creationTime|2017-10-21T11:44:40Z +https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/|creationDate|2015-06-28 +https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/|tag|http://www.semanlink.net/tag/node_js +https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/|tag|http://www.semanlink.net/tag/netflix +https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/|title|Building With Node.js At Netflix +https://www.talentbuddy.co/blog/building-with-node-js-at-netflix/|creationTime|2015-06-28T09:30:53Z +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|creationDate|2014-04-13 +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|tag|http://www.semanlink.net/tag/manipulation +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|tag|http://www.semanlink.net/tag/cookie +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|tag|http://www.semanlink.net/tag/big_brother +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|title|Comment les sites de commerce nous manipulent +http://www.lemonde.fr/technologies/article/2014/04/10/big-brother-ce-vendeur_4399335_651865.html|creationTime|2014-04-13T19:33:43Z +http://aclweb.org/anthology/C18-1139|creationDate|2018-08-24 +http://aclweb.org/anthology/C18-1139|tag|http://www.semanlink.net/tag/word_embedding +http://aclweb.org/anthology/C18-1139|comment|"> we propose to leverage the internal states of a trained character language model to produce a novel type of word embedding which we refer to as contextual string embeddings. Our proposed embeddings have the distinct properties that they (a) are trained without any explicit notion of words and thus fundamentally model words as sequences of characters, and (b) are contextualized by their surrounding text, meaning that the same word will have different embeddings depending on its contextual use. + +[Github](https://github.com/zalandoresearch/flair) +" +http://aclweb.org/anthology/C18-1139|relatedDoc|https://github.com/zalandoresearch/flair +http://aclweb.org/anthology/C18-1139|title|Contextual String Embeddings for Sequence Labeling (2018) +http://aclweb.org/anthology/C18-1139|creationTime|2018-08-24T10:08:38Z +http://inverseprobability.com/talks/notes/gaussian-processes.html|creationDate|2019-02-11 +http://inverseprobability.com/talks/notes/gaussian-processes.html|tag|http://www.semanlink.net/tag/gaussian_process +http://inverseprobability.com/talks/notes/gaussian-processes.html|title|Gaussian Processes +http://inverseprobability.com/talks/notes/gaussian-processes.html|creationTime|2019-02-11T11:45:29Z +https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus|creationDate|2017-06-20 +https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus|tag|http://www.semanlink.net/tag/nlp_french +https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus|tag|http://www.semanlink.net/tag/brown_corpus +https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus|title|French equivalent of the brown corpus - Open Data Stack Exchange +https://opendata.stackexchange.com/questions/3378/french-equivalent-of-the-brown-corpus|creationTime|2017-06-20T13:45:35Z +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|creationDate|2019-05-19 +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|tag|http://www.semanlink.net/tag/louvre +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|tag|http://www.semanlink.net/tag/arameen +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|tag|http://www.semanlink.net/tag/hittite +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|tag|http://www.semanlink.net/tag/decouverte_archeologique +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|title|Exposition - Royaumes oubliés - De l'empire hittite aux Araméens Musée du Louvre Paris +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|bookmarkOf|https://www.louvre.fr/expositions/royaumes-oubliesde-l-empire-hittite-aux-arameens +http://www.semanlink.net/doc/2019/05/exposition_royaumes_oublies_|creationTime|2019-05-19T13:48:44Z +http://www.dw-world.de/dw/0,,2617,00.html|creationDate|2010-06-30 +http://www.dw-world.de/dw/0,,2617,00.html|tag|http://www.semanlink.net/tag/deutsch +http://www.dw-world.de/dw/0,,2617,00.html|title|Apprendre l’allemand Deutsche Welle +http://www.dw-world.de/dw/0,,2617,00.html|creationTime|2010-06-30T00:42:38Z +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|creationDate|2013-01-29 +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|tag|http://www.semanlink.net/tag/neelie_kroes +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|tag|http://www.semanlink.net/tag/aaron_swartz +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|comment|"""if our laws, frameworks and practices stand in the way of us getting all those benefits [of openness], then maybe they need to be changed""" +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|title|European Commission VP Neelie Kroes Weighs in on Aaron Swartz +http://www.opposingviews.com/i/technology/eu-vp-aaron-swartz-if-our-laws-hold-back-benefits-openness-we-should-change-those-laws|creationTime|2013-01-29T18:33:53Z +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|creationDate|2016-05-10 +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|tag|http://www.semanlink.net/tag/solr +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|tag|http://www.semanlink.net/tag/docker_volumes +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|tag|http://www.semanlink.net/tag/docker +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|comment|see How can I mount a host directory as a data volume? +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|title|docker-solr/Docker-FAQ.md at master · docker-solr/docker-solr +https://github.com/docker-solr/docker-solr/blob/master/Docker-FAQ.md|creationTime|2016-05-10T01:34:13Z +https://arxiv.org/abs/1607.01759|creationDate|2017-09-10 +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/fasttext +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/tomas_mikolov +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/nlp_text_classification +https://arxiv.org/abs/1607.01759|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1607.01759|arxiv_author|Piotr Bojanowski +https://arxiv.org/abs/1607.01759|arxiv_author|Edouard Grave +https://arxiv.org/abs/1607.01759|arxiv_author|Tomas Mikolov +https://arxiv.org/abs/1607.01759|arxiv_author|Armand Joulin +https://arxiv.org/abs/1607.01759|comment|"A simple and efficient baseline for text classification. + +**Our word features can +be averaged** together to form good sentence representations. + +Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. +" +https://arxiv.org/abs/1607.01759|title|[1607.01759] Bag of Tricks for Efficient Text Classification +https://arxiv.org/abs/1607.01759|creationTime|2017-09-10T12:07:48Z +https://arxiv.org/abs/1607.01759|arxiv_summary|"This paper explores a simple and efficient baseline for text classification. +Our experiments show that our fast text classifier fastText is often on par +with deep learning classifiers in terms of accuracy, and many orders of +magnitude faster for training and evaluation. We can train fastText on more +than one billion words in less than ten minutes using a standard multicore~CPU, +and classify half a million sentences among~312K classes in less than a minute." +https://arxiv.org/abs/1607.01759|arxiv_firstAuthor|Armand Joulin +https://arxiv.org/abs/1607.01759|arxiv_updated|2016-08-09T17:38:43Z +https://arxiv.org/abs/1607.01759|arxiv_title|Bag of Tricks for Efficient Text Classification +https://arxiv.org/abs/1607.01759|arxiv_published|2016-07-06T19:40:15Z +https://arxiv.org/abs/1607.01759|arxiv_num|1607.01759 +http://www.hydra-cg.com/|creationDate|2014-10-29 +http://www.hydra-cg.com/|tag|http://www.semanlink.net/tag/hydra +http://www.hydra-cg.com/|tag|http://www.semanlink.net/tag/linked_data_fragments +http://www.hydra-cg.com/|tag|http://www.semanlink.net/tag/w3c_community_group +http://www.hydra-cg.com/|comment|Hydra simplifies the development of interoperable, hypermedia-driven Web APIs +http://www.hydra-cg.com/|title|Hydra W3C Community Group +http://www.hydra-cg.com/|creationTime|2014-10-29T01:28:52Z +https://github.com/thunlp/OpenKE|creationDate|2019-04-23 +https://github.com/thunlp/OpenKE|tag|http://www.semanlink.net/tag/emnlp_2018 +https://github.com/thunlp/OpenKE|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://github.com/thunlp/OpenKE|tag|http://www.semanlink.net/tag/github_project +https://github.com/thunlp/OpenKE|comment|[paper at EMNLP 2018](https://www.aclweb.org/anthology/papers/D/D18/D18-2024/) +https://github.com/thunlp/OpenKE|title|thunlp/OpenKE: An Open-Source Package for Knowledge Embedding (KE) +https://github.com/thunlp/OpenKE|creationTime|2019-04-23T20:10:11Z +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|creationDate|2018-11-15 +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|tag|http://www.semanlink.net/tag/multimodal_models +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|tag|http://www.semanlink.net/tag/autoencoder +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|tag|http://www.semanlink.net/tag/grounded_language_learning +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|comment|"learning multimodal word representations by integrating textual, visual and auditory inputs. + + +" +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|title|Associative Multichannel Autoencoder for Multimodal Word Representation (2018) +https://aclanthology.coli.uni-saarland.de/papers/D18-1011/d18-1011|creationTime|2018-11-15T01:27:25Z +http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html|creationDate|2011-08-20 +http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html|tag|http://www.semanlink.net/tag/software +http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html|tag|http://www.semanlink.net/tag/economie +http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html|title|Marc Andreessen on Why Software Is Eating the World - WSJ.com +http://online.wsj.com/article/SB10001424053111903480904576512250915629460.html|creationTime|2011-08-20T23:14:43Z +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|creationDate|2011-01-18 +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|tag|http://www.semanlink.net/tag/linked_data +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|tag|http://www.semanlink.net/tag/bbc_programmes +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|tag|http://www.semanlink.net/tag/yves_raymond +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|comment|The general philosophy behind BBC Programmes is that our web site is our API +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|title|BBC - Radio Labs: Brands, series, categories and tracklists on the new BBC Programmes +http://www.bbc.co.uk/blogs/radiolabs/2009/04/brands_series_categories_and_t.shtml|creationTime|2011-01-18T22:54:18Z +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|creationDate|2007-04-28 +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|tag|http://www.semanlink.net/tag/ivan_herman +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|tag|http://www.semanlink.net/tag/sweo_interest_group +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|title|Practical Semantic Web Use Cases on ESW Wiki +http://esw.w3.org/topic/SweoIG/TaskForces/Use_Cases|creationTime|2007-04-28T16:47:30Z +http://deeplearning.net/software_links/|creationDate|2016-01-12 +http://deeplearning.net/software_links/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://deeplearning.net/software_links/|tag|http://www.semanlink.net/tag/deep_learning +http://deeplearning.net/software_links/|title|Software links « Deep Learning +http://deeplearning.net/software_links/|creationTime|2016-01-12T18:36:13Z +http://i.imgur.com/dAtcCfH.gif|creationDate|2013-04-11 +http://i.imgur.com/dAtcCfH.gif|tag|http://www.semanlink.net/tag/souvenirs +http://i.imgur.com/dAtcCfH.gif|tag|http://www.semanlink.net/tag/jeux +http://i.imgur.com/dAtcCfH.gif|title|snake byte +http://i.imgur.com/dAtcCfH.gif|creationTime|2013-04-11T22:03:52Z +http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html|creationDate|2008-05-15 +http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html|tag|http://www.semanlink.net/tag/sw_coreferences +http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html|tag|http://www.semanlink.net/tag/uri_synonymity +http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html|title|Managing Co-reference (Was: A Semantic Elephant?) +http://lists.w3.org/Archives/Public/semantic-web/2008May/0078.html|creationTime|2008-05-15T22:23:32Z +https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html|creationDate|2018-12-15 +https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html|tag|http://www.semanlink.net/tag/gilets_jaunes +https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html|tag|http://www.semanlink.net/tag/florence_aubenas +https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html|title|« Gilets jaunes » : « La révolte des ronds-points », par Florence Aubenas +https://www.lemonde.fr/societe/article/2018/12/15/sur-les-ronds-points-les-gilets-jaunes-a-la-croisee-des-chemins_5397928_3224.html|creationTime|2018-12-15T10:31:06Z +http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0|creationDate|2015-03-11 +http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0|tag|http://www.semanlink.net/tag/solr_not_english_only +http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0|comment|Cette fiche explique principalement comment paramétrer l’analyse textuelle et comment améliorer le fichier de schéma pour l’accorder à ses besoins, notamment par une meilleure gestion du français. +http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0|title|Configuration avancée du moteur Solr techoop +http://techoop.insite.coop/content/configuration-avancee-du-moteur-solr-0|creationTime|2015-03-11T00:15:21Z +https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/|creationDate|2018-12-23 +https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/|tag|http://www.semanlink.net/tag/2018 +https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/|tag|http://www.semanlink.net/tag/biologie +https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/|title|Quanta’s Year in Biology (2018) Quanta Magazine +https://www.quantamagazine.org/quantas-year-in-biology-2018-20181221/|creationTime|2018-12-23T23:43:00Z +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|creationDate|2006-11-06 +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|tag|http://www.semanlink.net/tag/chine +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|tag|http://www.semanlink.net/tag/cringely +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|tag|http://www.semanlink.net/tag/ipv6 +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|tag|http://www.semanlink.net/tag/armee_americaine +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|tag|http://www.semanlink.net/tag/future_combat_systems +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|comment|"Implementing IPv6 will incur an infrastructure cost of around $200 billion, and that's just for the U.S....
In the current addressing scheme, China received a very small number of IP addresses, and this was causing them a lot of difficulty...So they made a national decision to implement IPv6 and put in a good network design...Of course, the rest of the world is still on the old system and to communicate with China an address translation is needed. This is becoming a pain. Countries who want to do lots of business with China or who want to do lots of business through the Internet (India) are now seriously looking at their own IPv6 plans... +
+China has done something very impressive and now others are taking notice. We (the U.S.) think we control the Internet, but China is proving otherwise. +
+And what is happening in the USA? Well we have Net Neutrality. We have a telco rebuilding a national monopoly. We have Cisco and Microsoft working together on Network Admission Control (NAC). I can see a time in the near future when they'll try to charge me for every PC in my house. While China is building a national resource, our government is letting companies turn the public Internet into an expensive private toll road. +
+But we'll move to IPv6, that's for sure, if only to make sure Halliburton has plenty of business. + +" +http://www.pbs.org/cringely/pulpit/2006/pulpit_20061102_001174.html|title|I, Cringely . The Pulpit . The $200 Billion Lunch PBS +http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/|creationDate|2012-05-29 +http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/|tag|http://www.semanlink.net/tag/virus +http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/|comment|8% du génome humain d'origine virale +http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/|title|« Les humains sont apparentés aux virus » Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2012/05/28/les-humains-sont-apparentes-aux-virus/|creationTime|2012-05-29T08:35:47Z +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|creationDate|2012-09-06 +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|tag|http://www.semanlink.net/tag/git +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|tag|http://www.semanlink.net/tag/egit +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|tag|http://www.semanlink.net/tag/eclipse +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|tag|http://www.semanlink.net/tag/m2eclipse +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|comment|"j'ai fait ce qui est dit ds meilleure réponse, sélectionné ds ""more SCM connectors in the m2e Marketplace"" egit. Demande un mdp tigris - cancel. Hélas: cannot complete the install, cannot satisfy depedency, missing requirement: maven scm handler for egit. solution + + + +" +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|title|importing a maven project into eclipse from git - Stack Overflow +http://stackoverflow.com/questions/4869815/importing-a-maven-project-into-eclipse-from-git|creationTime|2012-09-06T13:27:38Z +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|creationDate|2017-06-07 +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|tag|http://www.semanlink.net/tag/topic_modeling +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|tag|http://www.semanlink.net/tag/ipython_notebook +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|title|News classification with topic models in gensim +https://github.com/RaRe-Technologies/gensim/blob/cc74b668ccbbfd558d5a54050c4489e6e06fed3d/docs/notebooks/gensim_news_classification.ipynb|creationTime|2017-06-07T13:16:18Z +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|creationDate|2011-03-27 +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|tag|http://www.semanlink.net/tag/evolution +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|tag|http://www.semanlink.net/tag/adn +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|tag|http://www.semanlink.net/tag/virus +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|tag|http://www.semanlink.net/tag/genomique +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|comment|La comparaison des gènes d'espèces appartenant aux trois principaux domaines indique que 324 gènes seulement furent conservés entre les trois embranchements, dérivant probablement d'un ancêtre commun. On peut donc supposer qu'il exista un cellule progénitrice de toutes les formes de vie, LUCA, le dernier ancêtre commun universel. Toutefois les gènes contrôlant la réplication de l'ADN, pourtant si cruciaux pour l'identité génomique et sa conservation ne font pas partie de cet ensemble ! Aussi, en terme de nombre de gènes, un virus à ADN pourrait facilement fournir cette quantité de gènes. On peut imaginer que les premiers virus à ADN étaient des cellules dégénérées correspondant à des lignées très anciennes aujourd'hui disparues, ayant ou non précédé LUCA. +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|title|Le rôle des virus dans l'évolution +http://www.astrosurf.com/luxorion/bio-role-virus-evolution2.htm|creationTime|2011-03-27T14:30:25Z +http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/|creationDate|2013-03-18 +http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/|tag|http://www.semanlink.net/tag/open_data +http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/|tag|http://www.semanlink.net/tag/afrique +http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/|title|Africa’s Information Highway - The AfDB Launches Open Data Platforms for 20 African Countries - African Development Bank +http://www.afdb.org/en/news-and-events/article/africas-information-highway-the-afdb-launches-open-data-platforms-for-20-african-countries-11604/|creationTime|2013-03-18T10:56:51Z +http://twill.apache.org/|creationDate|2017-04-13 +http://twill.apache.org/|tag|http://www.semanlink.net/tag/apache_org +http://twill.apache.org/|tag|http://www.semanlink.net/tag/distributed_computing +http://twill.apache.org/|comment|abstraction over Apache Hadoop® YARN that reduces the complexity of developing distributed applications. Allows you to use YARN’s distributed capabilities with a programming model that is similar to running threads. +http://twill.apache.org/|title|Apache Twill – Home +http://twill.apache.org/|creationTime|2017-04-13T17:00:54Z +https://alexgkendall.com/computer_vision/bayesian_deep_learning_for_safe_ai/|creationDate|2018-11-05 +https://alexgkendall.com/computer_vision/bayesian_deep_learning_for_safe_ai/|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://alexgkendall.com/computer_vision/bayesian_deep_learning_for_safe_ai/|title|Deep Learning Is Not Good Enough, We Need Bayesian Deep Learning for Safe AI - Home +https://alexgkendall.com/computer_vision/bayesian_deep_learning_for_safe_ai/|creationTime|2018-11-05T09:50:17Z +http://keshif.me/demo/VisTools|creationDate|2015-09-02 +http://keshif.me/demo/VisTools|tag|http://www.semanlink.net/tag/data_visualization_tools +http://keshif.me/demo/VisTools|title|Visualization Tools +http://keshif.me/demo/VisTools|creationTime|2015-09-02T13:13:22Z +http://www.bbc.co.uk/news/technology-16306742|creationDate|2011-12-28 +http://www.bbc.co.uk/news/technology-16306742|tag|http://www.semanlink.net/tag/summly +http://www.bbc.co.uk/news/technology-16306742|title|BBC News - British teenage designer of Summly app hits jackpot +http://www.bbc.co.uk/news/technology-16306742|creationTime|2011-12-28T12:53:01Z +http://dig.csail.mit.edu/breadcrumbs/node/149|creationDate|2006-07-17 +http://dig.csail.mit.edu/breadcrumbs/node/149|tag|http://www.semanlink.net/tag/javascript_rdf_parser +http://dig.csail.mit.edu/breadcrumbs/node/149|comment|To the best of my knowledge, RDFParser is fully compliant with the RDF/XML specification. The parser passes all of the positive parser test cases from the W3 +http://dig.csail.mit.edu/breadcrumbs/node/149|title|JavaScript RDF/XML Parser +http://n2.talis.com/wiki/RDF_JSON_Specification|creationDate|2010-12-06 +http://n2.talis.com/wiki/RDF_JSON_Specification|tag|http://www.semanlink.net/tag/rdf_in_json +http://n2.talis.com/wiki/RDF_JSON_Specification|title|RDF JSON Specification - n² wiki +http://n2.talis.com/wiki/RDF_JSON_Specification|creationTime|2010-12-06T18:10:14Z +http://www.youtube.com/watch?v=Hjc5H1Blw6g|creationDate|2009-06-14 +http://www.youtube.com/watch?v=Hjc5H1Blw6g|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=Hjc5H1Blw6g|tag|http://www.semanlink.net/tag/carnaval +http://www.youtube.com/watch?v=Hjc5H1Blw6g|tag|http://www.semanlink.net/tag/samba +http://www.youtube.com/watch?v=Hjc5H1Blw6g|tag|http://www.semanlink.net/tag/rio_de_janeiro +http://www.youtube.com/watch?v=Hjc5H1Blw6g|title|"""O carnaval É a maior caricatura Na folia O povo esquece a amargura"" (Salgueiro - Samba-enredo 1983)" +http://www.youtube.com/watch?v=Hjc5H1Blw6g|creationTime|2009-06-14T21:28:21Z +http://www.cs.umd.edu/hcil/piccolo/index.shtml|creationDate|2005-09-26 +http://www.cs.umd.edu/hcil/piccolo/index.shtml|tag|http://www.semanlink.net/tag/graph_visualization +http://www.cs.umd.edu/hcil/piccolo/index.shtml|title|Piccolo Home Page +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html|creationDate|2007-01-02 +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html|tag|http://www.semanlink.net/tag/roy_t_fielding +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html|tag|http://www.semanlink.net/tag/httprange_14_solution +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html|comment|"we provide advice to the community that they may mint ""http"" URIs for any resource provided that they follow this simple rule for the sake of removing ambiguity: a) If an ""http"" resource responds to a GET request with a 2xx response, then the resource identified by that URI is an information resource; b) If an ""http"" resource responds to a GET request with a 303 (See Other) response, then the resource identified by that URI could be any resource; c) If an ""http"" resource responds to a GET request with a 4xx (error) response, then the nature of the resource is unknown. +" +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0039.html|title|[httpRange-14] Resolved from Roy T. Fielding on 2005-06-19 (www-tag@w3.org from June 2005) +http://www.w3.org/2004/02/skos/mapping.rdf|creationDate|2007-07-18 +http://www.w3.org/2004/02/skos/mapping.rdf|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/2004/02/skos/mapping.rdf|comment|An RDF vocabulary for describing mappings between concept schemes. +http://www.w3.org/2004/02/skos/mapping.rdf|title|SKOS Mapping [RDF/OWL Description] +http://www.w3.org/2004/02/skos/mapping.rdf|creationTime|2007-07-18T23:26:23Z +http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html|creationDate|2006-06-25 +http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html|tag|http://www.semanlink.net/tag/graph_visualization +http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html|tag|http://www.semanlink.net/tag/semanlink_todo +http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html|tag|http://www.semanlink.net/tag/javascript +http://www.kylescholz.com/blog/2006/06/force_directed_graphs_in_javas.html|title|kylescholz.com :: blog: Force Directed Graphs in Javascript? +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|creationDate|2012-05-01 +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|tag|http://www.semanlink.net/tag/thucydide +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|tag|http://www.semanlink.net/tag/grece_antique +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|tag|http://www.semanlink.net/tag/tsunami +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|tag|http://www.semanlink.net/tag/herodote +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|title|La vengeance de Poséidon était bien un tsunami Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2012/04/29/la-vengeance-de-poseidon-etait-bien-un-tsunami/|creationTime|2012-05-01T11:45:15Z +http://www.gillesbalmisse.com/annuaire/|creationDate|2005-06-15 +http://www.gillesbalmisse.com/annuaire/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.gillesbalmisse.com/annuaire/|tag|http://www.semanlink.net/tag/knowledge_management +http://www.gillesbalmisse.com/annuaire/|title|Gilles Balmisse - Annuaire des outils de knowledge management, de travail collaboratif et de veille +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|creationDate|2018-11-04 +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/scientific_information_extraction +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/knowledge_graph +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/knowledge_graph_construction +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/good +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|comment|"> A multi-task setup of identifying +and classifying entities, relations, and coreference +clusters in scientific articles. +> The framework supports **construction of a scientific +knowledge graph** + +[http://nlp.cs.washington.edu/sciIE/](http://nlp.cs.washington.edu/sciIE/) + +" +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|title|Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction +https://aclanthology.coli.uni-saarland.de/papers/D18-1360/d18-1360|creationTime|2018-11-04T09:31:50Z +http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/|creationDate|2013-05-15 +http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/|tag|http://www.semanlink.net/tag/luis_von_ahn +http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/|tag|http://www.semanlink.net/tag/www_2013 +http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/|title|“We need people to translate the whole Web”: Luis Von Ahn WWW 2013 – Rio de Janeiro, Brazil +http://www2013.org/2013/04/03/we-need-people-to-translate-the-whole-web-luis-von-ahn-a-visionary-of-human-computation/|creationTime|2013-05-15T15:43:20Z +http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis|creationDate|2013-03-13 +http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis|tag|http://www.semanlink.net/tag/nosql +http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis|comment|Comparison of noSQL databases +http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis|title|Cassandra vs MongoDB vs CouchDB vs Redis vs Riak vs HBase vs Couchbase vs Hypertable vs ElasticSearch vs Accumulo vs VoltDB vs Scalaris comparison :: Software architect Kristof Kovacs +http://kkovacs.eu/cassandra-vs-mongodb-vs-couchdb-vs-redis|creationTime|2013-03-13T12:13:28Z +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|creationDate|2018-05-20 +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/nn_4_nlp +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/sequence_labeling +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/named_entity_recognition +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/good +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/yves_peirsman +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|tag|http://www.semanlink.net/tag/conditional_random_field +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|comment|"> the old +and the new-style NLP are not diametrically +opposed: just as it is possible (and useful!) to +incorporate neural-network features into a CRF, +CRFs have influenced some of the best deep +learning models for sequence labelling + +This blog post go through the ways of doing NER, starting with CRF: + +- When you develop a CRF, +a lot of time goes into finding feature functions (Does a word start with a capital? Is it uppercase? Is it a digit?...) +- Pb: semantic similarity between words. +- Gazetteers: lists with names of people, locations and organizations that are known in advance. +- feed Word Embeddings to a CRF: one way is to cluster a set +of word embeddings by distributional similarity, and +provide the CRF with the cluster IDs of a token and its +context words. +- Use word and character based embeddings +- LSTM not good enough -> biLSTM +- **biLSTM predict all labels independently of each other -> add a CRF layer** which outputs a matrix of transition scores between two states: dynamic programming can help find the optimal tag sequence for the sentence + + + +" +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|title|Named Entity Recognition and the Road to Deep Learning (2017) +http://nlp.town/blog/ner-and-the-road-to-deep-learning/|creationTime|2018-05-20T22:54:22Z +http://dannyayers.com/2012/02/26/Everyone-has-a-Graph-Store|creationDate|2012-03-01 +http://dannyayers.com/2012/02/26/Everyone-has-a-Graph-Store|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2012/02/26/Everyone-has-a-Graph-Store|title|Danny on : Everyone has a Graph Store +http://dannyayers.com/2012/02/26/Everyone-has-a-Graph-Store|creationTime|2012-03-01T21:46:20Z +http://brandonrose.org/clustering|creationDate|2017-06-28 +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/good +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/python_sample_code +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/data_visualisation +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/nlp_sample_code +http://brandonrose.org/clustering|tag|http://www.semanlink.net/tag/python_nlp +http://brandonrose.org/clustering|title|Document Clustering with Python +http://brandonrose.org/clustering|creationTime|2017-06-28T14:55:01Z +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|creationDate|2008-03-26 +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|tag|http://www.semanlink.net/tag/online_course_materials +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|tag|http://www.semanlink.net/tag/education +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|tag|http://www.semanlink.net/tag/cringely +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|comment|"Younger, technical kids and their Generation Y parents are going to +demand radical changes to our educational systems." +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|title|I, Cringely . The Pulpit . War of the Worlds PBS +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080321_004574.html|creationTime|2008-03-26T21:05:39Z +http://www.epimorphics.com/web/projects/linked-data-api|creationDate|2012-03-20 +http://www.epimorphics.com/web/projects/linked-data-api|tag|http://www.semanlink.net/tag/epimorphics +http://www.epimorphics.com/web/projects/linked-data-api|tag|http://www.semanlink.net/tag/linked_data_api +http://www.epimorphics.com/web/projects/linked-data-api|title|Linked data API Epimorphics +http://www.epimorphics.com/web/projects/linked-data-api|creationTime|2012-03-20T17:51:47Z +http://www.thefigtrees.net/lee/blog/2006/06/exploring_the_sparql_clipboard.html|creationDate|2006-06-08 +http://www.thefigtrees.net/lee/blog/2006/06/exploring_the_sparql_clipboard.html|tag|http://www.semanlink.net/tag/sparql_clipboard +http://www.thefigtrees.net/lee/blog/2006/06/exploring_the_sparql_clipboard.html|comment|This demo is a fantastic example of what we can accomplish with data that is represented in a lingua franca and that is accessible via a query language. If you add in the ability for this data to be distributed across the web, you end up with an almost ridiculously flexible infrastructure that empowers web authors and developers to integrate data in exciting and unforeseen ways with a very low barrier to entry. +http://www.thefigtrees.net/lee/blog/2006/06/exploring_the_sparql_clipboard.html|title|TechnicaLee Speaking: Exploring the SPARQL Clipboard Demo +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|creationDate|2011-03-24 +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|tag|http://www.semanlink.net/tag/goodrelations +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|tag|http://www.semanlink.net/tag/vso +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|comment|Traditional Search Engine Optimization (SEO) tries to put you on top of all search results, but quite clearly, that can work only for one company. GoodRelations puts you on top of Web visibility for people who are looking for exactly your products or services. +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|title|VSO / Goodrelations +http://lists.w3.org/Archives/Public/semantic-web/2010Aug/0115.html|creationTime|2011-03-24T16:31:25Z +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|creationDate|2015-01-06 +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|tag|http://www.semanlink.net/tag/functional_programming +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|tag|http://www.semanlink.net/tag/haskell +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|tag|http://www.semanlink.net/tag/javascript +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|title|Translation from Haskell to JavaScript of selected portions of the best introduction to monads I've ever read – The If Works +https://blog.jcoglan.com/2011/03/05/translation-from-haskell-to-javascript-of-selected-portions-of-the-best-introduction-to-monads-ive-ever-read/|creationTime|2015-01-06T12:24:54Z +http://www.europarl.europa.eu/RegData/etudes/STUD/2017/595374/IPOL_STU(2017)595374_EN.pdf|creationDate|2017-11-03 +http://www.europarl.europa.eu/RegData/etudes/STUD/2017/595374/IPOL_STU(2017)595374_EN.pdf|tag|http://www.semanlink.net/tag/brexit +http://www.europarl.europa.eu/RegData/etudes/STUD/2017/595374/IPOL_STU(2017)595374_EN.pdf|title|Economic Impact of Brexit on the EU27 +http://www.europarl.europa.eu/RegData/etudes/STUD/2017/595374/IPOL_STU(2017)595374_EN.pdf|creationTime|2017-11-03T00:08:30Z +https://arxiv.org/abs/1709.08568|creationDate|2017-09-29 +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/human_level_ai +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/deep_learning_attention +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/thought_vector +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/representation_learning +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/yoshua_bengio +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/consciousness_prior +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/conscience_artificielle +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/good +https://arxiv.org/abs/1709.08568|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1709.08568|arxiv_author|Yoshua Bengio +https://arxiv.org/abs/1709.08568|comment|"""consciousness seen as the formation of a low-dimensional combination of a few concepts constituting a conscious thought, i.e., **consciousness as awareness at a particular time instant**"": the projection of a big vector (all the things conscious and unconscious in brain). Attention: additional mechanism describing what mind chooses to focus on. + +[YouTube video](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DYr1mOzC93xs)" +https://arxiv.org/abs/1709.08568|relatedDoc|https://www.youtube.com/watch?v=Yr1mOzC93xs +https://arxiv.org/abs/1709.08568|title|[1709.08568] The Consciousness Prior +https://arxiv.org/abs/1709.08568|creationTime|2017-09-29T14:44:19Z +https://arxiv.org/abs/1709.08568|arxiv_summary|"A new prior is proposed for learning representations of high-level concepts +of the kind we manipulate with language. This prior can be combined with other +priors in order to help disentangling abstract factors from each other. It is +inspired by cognitive neuroscience theories of consciousness, seen as a +bottleneck through which just a few elements, after having been selected by +attention from a broader pool, are then broadcast and condition further +processing, both in perception and decision-making. The set of recently +selected elements one becomes aware of is seen as forming a low-dimensional +conscious state. This conscious state is combining the few concepts +constituting a conscious thought, i.e., what one is immediately conscious of at +a particular moment. We claim that this architectural and +information-processing constraint corresponds to assumptions about the joint +distribution between high-level concepts. To the extent that these assumptions +are generally true (and the form of natural language seems consistent with +them), they can form a useful prior for representation learning. A +low-dimensional thought or conscious state is analogous to a sentence: it +involves only a few variables and yet can make a statement with very high +probability of being true. This is consistent with a joint distribution (over +high-level concepts) which has the form of a sparse factor graph, i.e., where +the dependencies captured by each factor of the factor graph involve only very +few variables while creating a strong dip in the overall energy function. The +consciousness prior also makes it natural to map conscious states to natural +language utterances or to express classical AI knowledge in a form similar to +facts and rules, albeit capturing uncertainty as well as efficient search +mechanisms implemented by attention mechanisms." +https://arxiv.org/abs/1709.08568|arxiv_firstAuthor|Yoshua Bengio +https://arxiv.org/abs/1709.08568|arxiv_updated|2019-12-02T22:53:39Z +https://arxiv.org/abs/1709.08568|arxiv_title|The Consciousness Prior +https://arxiv.org/abs/1709.08568|arxiv_published|2017-09-25T15:59:11Z +https://arxiv.org/abs/1709.08568|arxiv_num|1709.08568 +http://www.xml.com/pub/a/2007/02/14/introducing-rdfa.html|creationDate|2007-04-10 +http://www.xml.com/pub/a/2007/02/14/introducing-rdfa.html|tag|http://www.semanlink.net/tag/rdfa +http://www.xml.com/pub/a/2007/02/14/introducing-rdfa.html|title|XML.com: Introducing RDFa +http://www.xml.com/pub/a/2007/02/14/introducing-rdfa.html|creationTime|2007-04-10T23:15:02Z +http://www.besthistorysites.net/AncientBiblical_Africa.shtml|creationDate|2006-09-28 +http://www.besthistorysites.net/AncientBiblical_Africa.shtml|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.besthistorysites.net/AncientBiblical_Africa.shtml|title|www.besthistorysites.net: Ancient/Biblical - Africa +http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php|creationDate|2007-07-17 +http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php|tag|http://www.semanlink.net/tag/grddl +http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php|tag|http://www.semanlink.net/tag/danny_ayers +http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php|title|Nodalities: GRDDL Specifications (and Quick Reference) +http://blogs.talis.com/nodalities/2007/07/grddl_specifications_and_quick.php|creationTime|2007-07-17T22:59:22Z +https://www.atlantis-press.com/journals/ijcis/25868611|creationDate|2019-02-14 +https://www.atlantis-press.com/journals/ijcis/25868611|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://www.atlantis-press.com/journals/ijcis/25868611|title|A Supervised Requirement-oriented Patent Classification Scheme Based on the Combination of Metadata and Citation Information (2015) +https://www.atlantis-press.com/journals/ijcis/25868611|creationTime|2019-02-14T11:43:23Z +http://courses.blockgeeks.com/|creationDate|2017-02-08 +http://courses.blockgeeks.com/|tag|http://www.semanlink.net/tag/blockchain +http://courses.blockgeeks.com/|tag|http://www.semanlink.net/tag/online_course_materials +http://courses.blockgeeks.com/|title|Blockgeeks +http://courses.blockgeeks.com/|creationTime|2017-02-08T23:55:12Z +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|creationDate|2012-01-18 +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|tag|http://www.semanlink.net/tag/croissance +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|tag|http://www.semanlink.net/tag/ecologie +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|tag|http://www.semanlink.net/tag/decroissance +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|title|A-t-on atteint un « pic des objets » ? Eco(lo) +http://ecologie.blog.lemonde.fr/2012/01/18/a-t-on-atteint-un-pic-des-objets/|creationTime|2012-01-18T08:29:35Z +https://en.wikipedia.org/wiki/Django_Unchained|creationDate|2017-01-16 +https://en.wikipedia.org/wiki/Django_Unchained|tag|http://www.semanlink.net/tag/quentin_tarantino +https://en.wikipedia.org/wiki/Django_Unchained|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Django_Unchained|title|Django Unchained +https://en.wikipedia.org/wiki/Django_Unchained|creationTime|2017-01-16T00:26:51Z +http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/|creationDate|2013-03-12 +http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/|tag|http://www.semanlink.net/tag/immune_system +http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/|tag|http://www.semanlink.net/tag/tasmanian_devil +http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/|title|Peut-on guérir le diable du cancer? Un éléphant dans mon salon +http://animaux.blog.lemonde.fr/2013/03/12/peut-on-guerir-le-diable-du-cancer/|creationTime|2013-03-12T08:25:57Z +http://www.newscientist.com/article.ns?id=mg18625054.000|creationDate|2005-06-27 +http://www.newscientist.com/article.ns?id=mg18625054.000|tag|http://www.semanlink.net/tag/quantum_computing +http://www.newscientist.com/article.ns?id=mg18625054.000|title|Quantum computer springs a leak - Technology Print New Scientist +http://ruder.io/semi-supervised/|creationDate|2018-04-26 +http://ruder.io/semi-supervised/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/semi-supervised/|tag|http://www.semanlink.net/tag/semi_supervised_learning +http://ruder.io/semi-supervised/|title|An overview of proxy-label approaches for semi-supervised learning +http://ruder.io/semi-supervised/|creationTime|2018-04-26T14:15:55Z +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|creationDate|2008-07-17 +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|tag|http://www.semanlink.net/tag/sparql +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|tag|http://www.semanlink.net/tag/semantic_mashups +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|title|SPARQLScript - Semantic Mashups made easy - benjamin nowack's blog +http://bnode.org/blog/2008/07/16/sparqlscript-semantic-mashups-made-easy|creationTime|2008-07-17T10:38:00Z +http://www.iesl.cs.umass.edu/data/wiki-links|creationDate|2013-03-12 +http://www.iesl.cs.umass.edu/data/wiki-links|tag|http://www.semanlink.net/tag/wikilinks_corpus +http://www.iesl.cs.umass.edu/data/wiki-links|title|Wikilinks - Information Extraction and Synthesis Laboratory +http://www.iesl.cs.umass.edu/data/wiki-links|creationTime|2013-03-12T14:54:37Z +http://www.mkbergman.com/?page_id=325|creationDate|2007-01-24 +http://www.mkbergman.com/?page_id=325|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.mkbergman.com/?page_id=325|tag|http://www.semanlink.net/tag/simile_exhibit +http://www.mkbergman.com/?page_id=325|comment|as a SIMILE exhibit +http://www.mkbergman.com/?page_id=325|title|AI3’s Comprehensive Listing of Semantic Web and Related Tools +http://internetactu.blog.lemonde.fr/2013/10/18/la-nouvelle-ecologie-du-temps/|creationDate|2013-10-18 +http://internetactu.blog.lemonde.fr/2013/10/18/la-nouvelle-ecologie-du-temps/|tag|http://www.semanlink.net/tag/high_frequency_trading +http://internetactu.blog.lemonde.fr/2013/10/18/la-nouvelle-ecologie-du-temps/|title|La nouvelle écologie du temps InternetActu +http://internetactu.blog.lemonde.fr/2013/10/18/la-nouvelle-ecologie-du-temps/|creationTime|2013-10-18T20:20:04Z +http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive|creationDate|2014-02-25 +http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive|tag|http://www.semanlink.net/tag/google_hummingbird +http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive|tag|http://www.semanlink.net/tag/seo +http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive|title|Google's Hummingbird Update: Friend or Foe of the Marketing Executive? ClickZ +http://www.clickz.com/clickz/column/2303735/googles-hummingbird-update-friend-or-foe-of-the-marketing-executive|creationTime|2014-02-25T14:50:54Z +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|creationDate|2012-04-12 +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|tag|http://www.semanlink.net/tag/facebook +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|tag|http://www.semanlink.net/tag/hugh_glaser +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|title|Re: See Other from Hugh Glaser +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|creationTime|2012-04-12T00:15:22Z +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0339.html|seeAlso|http://lists.w3.org/Archives/Public/public-lod/2012Mar/0382.html +http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom|creationDate|2015-03-01 +http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom|tag|http://www.semanlink.net/tag/big_data +http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom|title|De quoi le Big Data est-il le nom ? Les petites cases +http://www.lespetitescases.net/de-quoi-le-Big-Data-est-il-le-nom|creationTime|2015-03-01T11:04:17Z +http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing|creationDate|2015-01-01 +http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing|tag|http://www.semanlink.net/tag/chine_afrique +http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing|comment|Pollute Africa instead of China +http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing|title|Say Goodbye to 'Made in China' - Bloomberg View +http://www.bloombergview.com/articles/2014-12-29/welcome-to-the-era-of-chinese-outsourcing|creationTime|2015-01-01T15:38:40Z +http://www.co-ode.org/|creationDate|2007-07-26 +http://www.co-ode.org/|tag|http://www.semanlink.net/tag/howto_tutorial_faq +http://www.co-ode.org/|tag|http://www.semanlink.net/tag/ontologies +http://www.co-ode.org/|tag|http://www.semanlink.net/tag/protege +http://www.co-ode.org/|tag|http://www.semanlink.net/tag/owl_dl +http://www.co-ode.org/|comment|The CO-ODE project aims to build authoring tools and infrastructure that make ontology engineering easier. We specifically support the development and use of OWL-DL ontologies, by being heavily involved in the creation of infrastructure and plugins for the Protégé platform and more recently, OWL1.1 support for the OWL API. +http://www.co-ode.org/|title|CO-ODE +http://www.co-ode.org/|creationTime|2007-07-26T12:36:40Z +http://fr.slideshare.net/fpservant/ldow2013|creationDate|2013-05-28 +http://fr.slideshare.net/fpservant/ldow2013|tag|http://www.semanlink.net/tag/fpservant_slideshare +http://fr.slideshare.net/fpservant/ldow2013|tag|http://www.semanlink.net/tag/fps_ldow_2013 +http://fr.slideshare.net/fpservant/ldow2013|title|Describing Customizable Products on the Web of Data (LDOW 2013) +http://fr.slideshare.net/fpservant/ldow2013|creationTime|2013-05-28T16:19:18Z +http://torrez.us/archives/2007/05/17/531/|creationDate|2007-05-19 +http://torrez.us/archives/2007/05/17/531/|tag|http://www.semanlink.net/tag/rdfa +http://torrez.us/archives/2007/05/17/531/|tag|http://www.semanlink.net/tag/xtech_2007 +http://torrez.us/archives/2007/05/17/531/|tag|http://www.semanlink.net/tag/elias_torres +http://torrez.us/archives/2007/05/17/531/|comment|"Next release of Firefox plugin Operator will support RDFa.
+This post includes examples from the presentation at XTech. +" +http://torrez.us/archives/2007/05/17/531/|title|Elias Torres » Blog Archive » Operator Overload +http://torrez.us/archives/2007/05/17/531/|creationTime|2007-05-19T14:07:04Z +http://www.securityweek.com/hackers-used-sophisticated-smb-worm-tool-attack-sony|creationDate|2014-12-20 +http://www.securityweek.com/hackers-used-sophisticated-smb-worm-tool-attack-sony|tag|http://www.semanlink.net/tag/sony_hack +http://www.securityweek.com/hackers-used-sophisticated-smb-worm-tool-attack-sony|title|Hackers Used Sophisticated SMB Worm Tool to Attack Sony SecurityWeek.Com +http://www.securityweek.com/hackers-used-sophisticated-smb-worm-tool-attack-sony|creationTime|2014-12-20T10:23:44Z +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|creationDate|2015-04-01 +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|tag|http://www.semanlink.net/tag/unknown_tag +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|tag|http://www.semanlink.net/tag/informatique +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|tag|http://www.semanlink.net/tag/functional_programming +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|title|Poésie et esthétisme du logiciel : interview de Gérard Huet binaire +http://binaire.blog.lemonde.fr/2015/03/27/gerard-huet-la-poesie-du-logiciel/|creationTime|2015-04-01T21:14:26Z +http://www.zones-sensibles.org/index.php?mod=auteurs&a=06|creationDate|2013-03-08 +http://www.zones-sensibles.org/index.php?mod=auteurs&a=06|tag|http://www.semanlink.net/tag/high_frequency_trading +http://www.zones-sensibles.org/index.php?mod=auteurs&a=06|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.zones-sensibles.org/index.php?mod=auteurs&a=06|title|Zones sensibles +http://www.zones-sensibles.org/index.php?mod=auteurs&a=06|creationTime|2013-03-08T15:40:34Z +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|creationDate|2017-12-12 +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|tag|http://www.semanlink.net/tag/machine_learning +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|tag|http://www.semanlink.net/tag/google_brain +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|tag|http://www.semanlink.net/tag/nips_2017 +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|title|Machine Learning for Systems and Systems for Machine Learning (NIPS 2017) +http://learningsys.org/nips17/assets/slides/dean-nips17.pdf|creationTime|2017-12-12T10:57:13Z +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|creationDate|2013-07-15 +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|tag|http://www.semanlink.net/tag/elevage_porcin +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|title|More on MRSA on Farms and in Farm Workers, and the Arguments for and Against - Wired Science +http://www.wired.com/wiredscience/2013/07/mrsa-st398-2/|creationTime|2013-07-15T10:03:12Z +http://searchcio.techtarget.com/news/950602/Top-10-risks-of-offshore-outsourcing|creationDate|2010-08-25 +http://searchcio.techtarget.com/news/950602/Top-10-risks-of-offshore-outsourcing|tag|http://www.semanlink.net/tag/outsourcing +http://searchcio.techtarget.com/news/950602/Top-10-risks-of-offshore-outsourcing|title|Top 10 risks of offshore outsourcing +http://searchcio.techtarget.com/news/950602/Top-10-risks-of-offshore-outsourcing|creationTime|2010-08-25T14:43:20Z +http://www.france-universite-numerique.fr/|creationDate|2013-10-10 +http://www.france-universite-numerique.fr/|tag|http://www.semanlink.net/tag/mooc +http://www.france-universite-numerique.fr/|tag|http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche +http://www.france-universite-numerique.fr/|tag|http://www.semanlink.net/tag/site_web_gouvernemental +http://www.france-universite-numerique.fr/|title|France Université Numérique - Découvrir, apprendre et réussir +http://www.france-universite-numerique.fr/|creationTime|2013-10-10T01:34:59Z +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|creationDate|2012-01-06 +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|tag|http://www.semanlink.net/tag/jean_paul +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|tag|http://www.semanlink.net/tag/education +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|comment|Lecturing has never been an effective teaching technique and now that information is everywhere, some say it's a waste of time. Indeed, physicists have the data to prove it. +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|title|Physicists Seek To Lose The Lecture As Teaching Tool : NPR +http://www.npr.org/2012/01/01/144550920/physicists-seek-to-lose-the-lecture-as-teaching-tool?sc=fb&cc=fp|creationTime|2012-01-06T21:35:25Z +http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf|creationDate|2014-07-27 +http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf|tag|http://www.semanlink.net/tag/cultural_heritage +http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf|tag|http://www.semanlink.net/tag/europe +http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf|title|Towards an integrated approach to cultural heritage for Europe +http://ec.europa.eu/culture/library/publications/2014-heritage-communication_en.pdf|creationTime|2014-07-27T00:54:57Z +https://www.w3.org/Data/events/data-ws-2019/papers.html|creationDate|2019-02-28 +https://www.w3.org/Data/events/data-ws-2019/papers.html|tag|http://www.semanlink.net/tag/w3c +https://www.w3.org/Data/events/data-ws-2019/papers.html|tag|http://www.semanlink.net/tag/graph_database +https://www.w3.org/Data/events/data-ws-2019/papers.html|tag|http://www.semanlink.net/tag/workshop +https://www.w3.org/Data/events/data-ws-2019/papers.html|tag|http://www.semanlink.net/tag/rdf +https://www.w3.org/Data/events/data-ws-2019/papers.html|tag|http://www.semanlink.net/tag/rdf_and_property_graphs +https://www.w3.org/Data/events/data-ws-2019/papers.html|title|W3C Workshop on Web Standardization for Graph Data +https://www.w3.org/Data/events/data-ws-2019/papers.html|creationTime|2019-02-28T17:48:31Z +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|creationDate|2006-09-12 +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|tag|http://www.semanlink.net/tag/uri_encoding +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|tag|http://www.semanlink.net/tag/developer_documentation +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|tag|http://www.semanlink.net/tag/tomcat +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|comment|By default, Tomcat uses ISO-8859-1 character encoding when decoding URLs received from a browser. +http://confluence.atlassian.com/display/DOC/Configuring+Tomcat's+URI+encoding|title|Configuring Tomcat's URI encoding +http://www.pnetliteratura.pt/noticia.asp?id=1555|creationDate|2008-11-28 +http://www.pnetliteratura.pt/noticia.asp?id=1555|tag|http://www.semanlink.net/tag/gastronomie +http://www.pnetliteratura.pt/noticia.asp?id=1555|tag|http://www.semanlink.net/tag/portugal +http://www.pnetliteratura.pt/noticia.asp?id=1555|tag|http://www.semanlink.net/tag/ofir +http://www.pnetliteratura.pt/noticia.asp?id=1555|tag|http://www.semanlink.net/tag/amazonie +http://www.pnetliteratura.pt/noticia.asp?id=1555|title|Chefes Rui Paula e Ofir Oliveira unem sabores selvagens da Amazónia aos tradicionais do Douro +http://www.pnetliteratura.pt/noticia.asp?id=1555|creationTime|2008-11-28T23:00:35Z +https://arxiv.org/abs/1703.03129|creationDate|2018-10-23 +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/google_brain +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/memory_in_deep_learning +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/rare_events +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/few_shot_learning +https://arxiv.org/abs/1703.03129|tag|http://www.semanlink.net/tag/k_nearest_neighbors_algorithm +https://arxiv.org/abs/1703.03129|arxiv_author|Aurko Roy +https://arxiv.org/abs/1703.03129|arxiv_author|Łukasz Kaiser +https://arxiv.org/abs/1703.03129|arxiv_author|Samy Bengio +https://arxiv.org/abs/1703.03129|arxiv_author|Ofir Nachum +https://arxiv.org/abs/1703.03129|comment|"> a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. +> Our memory module can be easily added to any part of a supervised neural network" +https://arxiv.org/abs/1703.03129|title|[1703.03129] Learning to Remember Rare Events +https://arxiv.org/abs/1703.03129|creationTime|2018-10-23T12:36:58Z +https://arxiv.org/abs/1703.03129|arxiv_summary|"Despite recent advances, memory-augmented deep neural networks are still +limited when it comes to life-long and one-shot learning, especially in +remembering rare events. We present a large-scale life-long memory module for +use in deep learning. The module exploits fast nearest-neighbor algorithms for +efficiency and thus scales to large memory sizes. Except for the +nearest-neighbor query, the module is fully differentiable and trained +end-to-end with no extra supervision. It operates in a life-long manner, i.e., +without the need to reset it during training. +Our memory module can be easily added to any part of a supervised neural +network. To show its versatility we add it to a number of networks, from simple +convolutional ones tested on image classification to deep sequence-to-sequence +and recurrent-convolutional models. In all cases, the enhanced network gains +the ability to remember and do life-long one-shot learning. Our module +remembers training examples shown many thousands of steps in the past and it +can successfully generalize from them. We set new state-of-the-art for one-shot +learning on the Omniglot dataset and demonstrate, for the first time, life-long +one-shot learning in recurrent neural networks on a large-scale machine +translation task." +https://arxiv.org/abs/1703.03129|arxiv_firstAuthor|Łukasz Kaiser +https://arxiv.org/abs/1703.03129|arxiv_updated|2017-03-09T04:36:15Z +https://arxiv.org/abs/1703.03129|arxiv_title|Learning to Remember Rare Events +https://arxiv.org/abs/1703.03129|arxiv_published|2017-03-09T04:36:15Z +https://arxiv.org/abs/1703.03129|arxiv_num|1703.03129 +http://www.liberation.fr/actualite/societe/271050.FR.php|creationDate|2007-09-25 +http://www.liberation.fr/actualite/societe/271050.FR.php|tag|http://www.semanlink.net/tag/carte_d_identite +http://www.liberation.fr/actualite/societe/271050.FR.php|tag|http://www.semanlink.net/tag/administration_francaise +http://www.liberation.fr/actualite/societe/271050.FR.php|tag|http://www.semanlink.net/tag/certificat_de_nationalite +http://www.liberation.fr/actualite/societe/271050.FR.php|tag|http://www.semanlink.net/tag/ca_craint +http://www.liberation.fr/actualite/societe/271050.FR.php|comment|"Pour une carte d’identité, un document religieux est exigé d’une pied-noir en Seine-Saint-Denis.
+Tous les Français nés à l’étranger ou de parents étrangers sont victimes de ce zèle administratif. En 1993, lorsque la carte d’identité papier a été remplacée par sa version informatisée, l’administration en a profité pour donner un tour de vis. +" +http://www.liberation.fr/actualite/societe/271050.FR.php|title|La «consonance israélite» réveille le zèle administratif +http://www.liberation.fr/actualite/societe/271050.FR.php|creationTime|2007-09-25T21:58:47Z +http://docs.jquery.com/Main_Page|creationDate|2009-05-21 +http://docs.jquery.com/Main_Page|tag|http://www.semanlink.net/tag/javascript_librairies +http://docs.jquery.com/Main_Page|title|jQuery JavaScript Library +http://docs.jquery.com/Main_Page|creationTime|2009-05-21T00:07:22Z +http://www.eclipsezone.com/eclipse/forums/t76213.html|creationDate|2007-11-12 +http://www.eclipsezone.com/eclipse/forums/t76213.html|tag|http://www.semanlink.net/tag/bug +http://www.eclipsezone.com/eclipse/forums/t76213.html|tag|http://www.semanlink.net/tag/wtp +http://www.eclipsezone.com/eclipse/forums/t76213.html|title|EclipseZone - J2EE Module Dependencies not persisting ... +http://www.eclipsezone.com/eclipse/forums/t76213.html|creationTime|2007-11-12T01:05:07Z +https://arxiv.org/abs/1103.0398|creationDate|2018-01-17 +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/multi_task_learning +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/nlp +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/frequently_cited_paper +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/ronan_collobert +https://arxiv.org/abs/1103.0398|tag|http://www.semanlink.net/tag/deep_nlp +https://arxiv.org/abs/1103.0398|arxiv_author|Ronan Collobert +https://arxiv.org/abs/1103.0398|arxiv_author|Michael Karlen +https://arxiv.org/abs/1103.0398|arxiv_author|Koray Kavukcuoglu +https://arxiv.org/abs/1103.0398|arxiv_author|Leon Bottou +https://arxiv.org/abs/1103.0398|arxiv_author|Pavel Kuksa +https://arxiv.org/abs/1103.0398|arxiv_author|Jason Weston +https://arxiv.org/abs/1103.0398|comment|"seminal work + +Abstract: + +> a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements +" +https://arxiv.org/abs/1103.0398|title|[1103.0398] Natural Language Processing (almost) from Scratch +https://arxiv.org/abs/1103.0398|creationTime|2018-01-17T18:40:10Z +https://arxiv.org/abs/1103.0398|arxiv_summary|"We propose a unified neural network architecture and learning algorithm that +can be applied to various natural language processing tasks including: +part-of-speech tagging, chunking, named entity recognition, and semantic role +labeling. This versatility is achieved by trying to avoid task-specific +engineering and therefore disregarding a lot of prior knowledge. Instead of +exploiting man-made input features carefully optimized for each task, our +system learns internal representations on the basis of vast amounts of mostly +unlabeled training data. This work is then used as a basis for building a +freely available tagging system with good performance and minimal computational +requirements." +https://arxiv.org/abs/1103.0398|arxiv_firstAuthor|Ronan Collobert +https://arxiv.org/abs/1103.0398|arxiv_updated|2011-03-02T11:34:50Z +https://arxiv.org/abs/1103.0398|arxiv_title|Natural Language Processing (almost) from Scratch +https://arxiv.org/abs/1103.0398|arxiv_published|2011-03-02T11:34:50Z +https://arxiv.org/abs/1103.0398|arxiv_num|1103.0398 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/|creationDate|2007-06-23 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/|tag|http://www.semanlink.net/tag/d2rq +http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/|comment|As Semantic Web technologies are getting mature, there is a growing need for RDF applications to access the content of non-RDF, legacy databases without having to replicate the whole database into RDF. D2RQ is a declarative language to describe mappings between relational database schemata and OWL/RDFS ontologies. The mappings allow RDF applications to access the content of huge, non-RDF databases using Semantic Web query languages like SPARQL. +http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/|title|D2RQ - Treating Non-RDF Databases as Virtual RDF Graphs - Chris Bizer +http://sites.wiwiss.fu-berlin.de/suhl/bizer/d2rq/|creationTime|2007-06-23T13:42:58Z +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|creationDate|2013-02-07 +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|tag|http://www.semanlink.net/tag/owl_dl +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|tag|http://www.semanlink.net/tag/skos_owl +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|tag|http://www.semanlink.net/tag/owl_2 +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|title|SKOS Now Interoperates with OWL 2 AI3:::Adaptive Information +http://www.mkbergman.com/944/skos-now-interoperates-with-owl-2/|creationTime|2013-02-07T08:51:14Z +https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier|creationDate|2017-12-19 +https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier|tag|http://www.semanlink.net/tag/brexit +https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier|comment|In a blow to remain campaigners, Barnier contends that the UK would be unable to revoke article 50 unilaterally +https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier|title|UK cannot have a special deal for the City, says EU's Brexit negotiator Politics The Guardian +https://www.theguardian.com/politics/2017/dec/18/uk-cannot-have-a-special-deal-for-the-city-says-eu-brexit-negotiator-barnier|creationTime|2017-12-19T14:06:45Z +http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm|creationDate|2018-04-29 +http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm|tag|http://www.semanlink.net/tag/artificial_neurons +http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm|title|CNRS INSIS - Un pas vers les puces miniatures intelligentes +http://www.cnrs.fr/insis/recherche/actualites/2018/04/puces-miniatures-intelligentes.htm|creationTime|2018-04-29T23:07:54Z +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|creationDate|2011-09-23 +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|tag|http://www.semanlink.net/tag/semantic_web_platform +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|tag|http://www.semanlink.net/tag/volkswagen +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|title|Volkswagen’s Use of structWSF in their Semantic Web Platform at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2011/09/21/volkswagens-use-of-structwsf-in-their-semantic-web-platform/|creationTime|2011-09-23T00:54:53Z +http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/|creationDate|2010-11-07 +http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/|tag|http://www.semanlink.net/tag/r2rml +http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/|tag|http://www.semanlink.net/tag/ivan_herman +http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/|title|My first mapping from RDB to RDF using R2RML « Ivan’s private site +http://ivan-herman.name/2010/11/02/my-first-mapping-from-rdb-to-rdf-using-r2rml/|creationTime|2010-11-07T13:06:23Z +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|creationDate|2010-05-14 +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|tag|http://www.semanlink.net/tag/dbtune +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|tag|http://www.semanlink.net/tag/sparql_endpoint +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|tag|http://www.semanlink.net/tag/bbc_programmes +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|title|Live SPARQL end-point for BBC Programmes - DBTune blog +http://blog.dbtune.org/post/2010/01/13/Live-SPARQL-end-point-for-BBC-Programmes|creationTime|2010-05-14T10:28:23Z +http://thisisafrica.me/land-grabbing-africa-new-colonialism/|creationDate|2014-07-22 +http://thisisafrica.me/land-grabbing-africa-new-colonialism/|tag|http://www.semanlink.net/tag/african_land_grab +http://thisisafrica.me/land-grabbing-africa-new-colonialism/|title|Land Grabbing in Africa, the new colonialism This Is Africa +http://thisisafrica.me/land-grabbing-africa-new-colonialism/|creationTime|2014-07-22T18:23:59Z +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|creationDate|2010-08-21 +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|tag|http://www.semanlink.net/tag/enseignement_francais +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|tag|http://www.semanlink.net/tag/langues_anciennes +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|comment|Effectivement, mieux vaut que les élèves n'entendent pas trop parler de l'Athènes antique, où les hauts fonctionnaires étaient astreints à rendre compte de leur gestion au sortir de leur charge… +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|title|Langues anciennes, cibles émouvantes +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|creationTime|2010-08-21T12:31:12Z +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2010/08/20/langues-anciennes-cibles-emouvantes_1400980_3232.html|date|2010-08-21 +http://ged2018.sci-web.net/index.html|creationDate|2018-09-10 +http://ged2018.sci-web.net/index.html|tag|http://www.semanlink.net/tag/graph_embeddings +http://ged2018.sci-web.net/index.html|title|Graph embedding Day - Lyon +http://ged2018.sci-web.net/index.html|creationTime|2018-09-10T22:56:23Z +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|creationDate|2013-08-20 +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|tag|http://www.semanlink.net/tag/workshop +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|tag|http://www.semanlink.net/tag/guha +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|tag|http://www.semanlink.net/tag/dan_brickley +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|tag|http://www.semanlink.net/tag/schema_org +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|title|schema.org's position paper for the Open Data on the Web workshop +http://www.w3.org/2013/04/odw/odw13_submission_53.pdf|creationTime|2013-08-20T15:18:03Z +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|creationDate|2010-07-30 +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|tag|http://www.semanlink.net/tag/business_case_semantic_web +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|tag|http://www.semanlink.net/tag/daimler +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|tag|http://www.semanlink.net/tag/semantic_web_portal +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|title|KnowledgeWeb - DaimlerChrysler Semantic Web Portal business case +http://knowledgeweb.semanticweb.org/o2i/index99b9.html?page=auto.php|creationTime|2010-07-30T15:30:24Z +http://www.quickonlinetips.com/archives/2005/02/absolutely-delicious-complete-tools-collection/|creationDate|2006-05-13 +http://www.quickonlinetips.com/archives/2005/02/absolutely-delicious-complete-tools-collection/|tag|http://www.semanlink.net/tag/del_icio_us +http://www.quickonlinetips.com/archives/2005/02/absolutely-delicious-complete-tools-collection/|title|Quick Online Tips » Absolutely Del.icio.us - Complete Tools Collection +https://dl.acm.org/citation.cfm?doid=3184558.3186979|creationDate|2018-04-28 +https://dl.acm.org/citation.cfm?doid=3184558.3186979|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://dl.acm.org/citation.cfm?doid=3184558.3186979|tag|http://www.semanlink.net/tag/medical_information_search +https://dl.acm.org/citation.cfm?doid=3184558.3186979|title|Smart-MD: Neural Paragraph Retrieval of Medical Topics +https://dl.acm.org/citation.cfm?doid=3184558.3186979|creationTime|2018-04-28T17:45:44Z +http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage|creationDate|2009-05-29 +http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage|tag|http://www.semanlink.net/tag/cory_doctorow +http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage|tag|http://www.semanlink.net/tag/net_neutrality +http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage|title|Cory Doctorow Draws the Line On Net Neutrality +http://www.guardian.co.uk/technology/2009/may/19/we-must-ensure-google-garage|creationTime|2009-05-29T00:14:36Z +http://www.xml.com/pub/a/2002/05/08/deviant.html|creationDate|2006-02-23 +http://www.xml.com/pub/a/2002/05/08/deviant.html|tag|http://www.semanlink.net/tag/soap_vs_rest +http://www.xml.com/pub/a/2002/05/08/deviant.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.xml.com/pub/a/2002/05/08/deviant.html|title|XML.com: REST Roundup +https://www.npmjs.com/package/jsonld|creationDate|2015-02-25 +https://www.npmjs.com/package/jsonld|tag|http://www.semanlink.net/tag/manu_sporny +https://www.npmjs.com/package/jsonld|tag|http://www.semanlink.net/tag/javascript_rdf +https://www.npmjs.com/package/jsonld|tag|http://www.semanlink.net/tag/json_ld +https://www.npmjs.com/package/jsonld|comment|"on github +" +https://www.npmjs.com/package/jsonld|title|jsonld.js: A JSON-LD Processor and API implementation in JavaScript. +https://www.npmjs.com/package/jsonld|creationTime|2015-02-25T10:50:59Z +https://arxiv.org/abs/1707.00418|creationDate|2018-03-16 +https://arxiv.org/abs/1707.00418|tag|http://www.semanlink.net/tag/multi_label_classification +https://arxiv.org/abs/1707.00418|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1707.00418|arxiv_author|Chih-Kuan Yeh +https://arxiv.org/abs/1707.00418|arxiv_author|Wei-Chieh Wu +https://arxiv.org/abs/1707.00418|arxiv_author|Wei-Jen Ko +https://arxiv.org/abs/1707.00418|arxiv_author|Yu-Chiang Frank Wang +https://arxiv.org/abs/1707.00418|comment|"Uses [Deep Canonical Correlation Analysis](/tag/deep_canonical_correlation_analysis) and autoencoder structures to **learn a latent subspace from both feature and label domains** for multi-label classification. + +(several implementations on github) + + +" +https://arxiv.org/abs/1707.00418|title|[1707.00418] Learning Deep Latent Spaces for Multi-Label Classification +https://arxiv.org/abs/1707.00418|creationTime|2018-03-16T23:37:58Z +https://arxiv.org/abs/1707.00418|arxiv_summary|"Multi-label classification is a practical yet challenging task in machine +learning related fields, since it requires the prediction of more than one +label category for each input instance. We propose a novel deep neural networks +(DNN) based model, Canonical Correlated AutoEncoder (C2AE), for solving this +task. Aiming at better relating feature and label domain data for improved +classification, we uniquely perform joint feature and label embedding by +deriving a deep latent space, followed by the introduction of label-correlation +sensitive loss function for recovering the predicted label outputs. Our C2AE is +achieved by integrating the DNN architectures of canonical correlation analysis +and autoencoder, which allows end-to-end learning and prediction with the +ability to exploit label dependency. Moreover, our C2AE can be easily extended +to address the learning problem with missing labels. Our experiments on +multiple datasets with different scales confirm the effectiveness and +robustness of our proposed method, which is shown to perform favorably against +state-of-the-art methods for multi-label classification." +https://arxiv.org/abs/1707.00418|arxiv_firstAuthor|Chih-Kuan Yeh +https://arxiv.org/abs/1707.00418|arxiv_updated|2017-07-03T06:37:01Z +https://arxiv.org/abs/1707.00418|arxiv_title|Learning Deep Latent Spaces for Multi-Label Classification +https://arxiv.org/abs/1707.00418|arxiv_published|2017-07-03T06:37:01Z +https://arxiv.org/abs/1707.00418|arxiv_num|1707.00418 +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|creationDate|2018-07-27 +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|tag|http://www.semanlink.net/tag/travail +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|tag|http://www.semanlink.net/tag/anticipation +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|tag|http://www.semanlink.net/tag/jobbotization +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|title|Six scénarios d'un monde sans travail CNRS Le journal +https://lejournal.cnrs.fr/articles/six-scenarios-dun-monde-sans-travail|creationTime|2018-07-27T22:55:59Z +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|creationDate|2014-07-03 +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|tag|http://www.semanlink.net/tag/politique_economique_francaise +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|tag|http://www.semanlink.net/tag/souverainete_numerique +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|tag|http://www.semanlink.net/tag/henri_verdier +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|tag|http://www.semanlink.net/tag/digital_economy +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|title|Souveraineté numérique: la piste industrielle +http://www.paristechreview.com/2014/06/30/souverainete-numerique/#.U7UZoKe8i2k.twitter|creationTime|2014-07-03T11:48:40Z +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|creationDate|2009-05-14 +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|tag|http://www.semanlink.net/tag/bien_envoye +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|tag|http://www.semanlink.net/tag/attali +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|title|Une loi scandaleuse et ridicule - Conversation avec Jacques Attali - Lexpress +http://blogs.lexpress.fr/attali/2009/03/une-loi-scandaleuse-et-ridicul.php|creationTime|2009-05-14T18:20:12Z +http://www.diigo.com/|creationDate|2010-05-10 +http://www.diigo.com/|tag|http://www.semanlink.net/tag/social_networks +http://www.diigo.com/|tag|http://www.semanlink.net/tag/tagging +http://www.diigo.com/|comment|"""knowledge sharing community""" +http://www.diigo.com/|title|Diigo - Web Highlighter and Sticky Notes, Online Bookmarking and Annotation, Personal Learning Network. +http://www.diigo.com/|creationTime|2010-05-10T09:13:12Z +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|creationDate|2018-03-04 +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|tag|http://www.semanlink.net/tag/transfer_learning +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|tag|http://www.semanlink.net/tag/tensorflow +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|tag|http://www.semanlink.net/tag/image_classification +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|tag|http://www.semanlink.net/tag/multi_label_classification +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|tag|http://www.semanlink.net/tag/keras +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|title|Codes of Interest: Using Bottleneck Features for Multi-Class Classification in Keras and TensorFlow +http://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html|creationTime|2018-03-04T16:49:06Z +http://bennolan.com/behaviour/|creationDate|2005-11-06 +http://bennolan.com/behaviour/|tag|http://www.semanlink.net/tag/css +http://bennolan.com/behaviour/|tag|http://www.semanlink.net/tag/ajax +http://bennolan.com/behaviour/|tag|http://www.semanlink.net/tag/javascript +http://bennolan.com/behaviour/|title|Behaviour : Using CSS selectors to apply Javascript behaviours +http://martinfowler.com/nosql.html|creationDate|2013-03-14 +http://martinfowler.com/nosql.html|tag|http://www.semanlink.net/tag/nosql +http://martinfowler.com/nosql.html|title|NoSQL Guide +http://martinfowler.com/nosql.html|creationTime|2013-03-14T01:03:48Z +http://videolectures.net/eswc2012_chevalier_servant_product_customization/|creationDate|2012-07-04 +http://videolectures.net/eswc2012_chevalier_servant_product_customization/|tag|http://www.semanlink.net/tag/eswc_2012 +http://videolectures.net/eswc2012_chevalier_servant_product_customization/|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://videolectures.net/eswc2012_chevalier_servant_product_customization/|title|Product customization as Linked Data - videolectures.net +http://videolectures.net/eswc2012_chevalier_servant_product_customization/|creationTime|2012-07-04T13:24:48Z +http://www2012.org/proceedings/proceedings/p449.pdf|creationDate|2012-04-19 +http://www2012.org/proceedings/proceedings/p449.pdf|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www2012.org/proceedings/proceedings/p449.pdf|tag|http://www.semanlink.net/tag/yago +http://www2012.org/proceedings/proceedings/p449.pdf|tag|http://www.semanlink.net/tag/www_2012 +http://www2012.org/proceedings/proceedings/p449.pdf|comment|Wei Shen, Jianyong Wang, Ping Luo, Min Wang +http://www2012.org/proceedings/proceedings/p449.pdf|title|LINDEN: Linking Named Entities with Knowledge Base via Semantic Knowledge +http://www2012.org/proceedings/proceedings/p449.pdf|creationTime|2012-04-19T14:27:44Z +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|creationDate|2013-10-16 +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|tag|http://www.semanlink.net/tag/ministere_de_l_enseignement_superieur_et_de_la_recherche +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|tag|http://www.semanlink.net/tag/mooc +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|tag|http://www.semanlink.net/tag/google +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|title|Derrière le MOOC à la française : Google +http://www.lemonde.fr/technologies/article/2013/10/16/derriere-le-mooc-a-la-francaise-google_3496887_651865.html|creationTime|2013-10-16T21:40:29Z +http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/|creationDate|2016-12-11 +http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/|tag|http://www.semanlink.net/tag/solar_storm +http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/|tag|http://www.semanlink.net/tag/catastrophe_naturelle +http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/|title|Sommes-nous prêts à affronter un tsunami solaire ? Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2012/04/18/sommes-nous-prets-a-affronter-un-tsunami-solaire/|creationTime|2016-12-11T18:02:22Z +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|creationDate|2017-08-16 +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|tag|http://www.semanlink.net/tag/geoffrey_hinton +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|tag|http://www.semanlink.net/tag/deep_learning +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|tag|http://www.semanlink.net/tag/ng +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|comment|"""A thought is just a big vector of neural activity"" - not a symbolic expression +" +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|title|Heroes of Deep Learning: Andrew Ng interviews Geoffrey Hinton - YouTube +https://www.youtube.com/watch?v=-eyhCTvrEtE&list=PLfsVAYSMwsksjfpy8P2t_I52mugGeA5gR&index=1|creationTime|2017-08-16T10:32:18Z +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|creationDate|2018-03-29 +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|tag|http://www.semanlink.net/tag/francois_chollet +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|tag|http://www.semanlink.net/tag/manipulation +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|tag|http://www.semanlink.net/tag/artificial_intelligence +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|tag|http://www.semanlink.net/tag/dark_side_of_tech +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|comment|what really worries me when it comes to AI: the highly effective, highly scalable manipulation of human behavior that AI enables, and its malicious use by corporations and governments +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|title|What worries me about AI – François Chollet – Medium +https://medium.com/@francois.chollet/what-worries-me-about-ai-ed9df072b704|creationTime|2018-03-29T19:38:10Z +https://arxiv.org/abs/1805.03793|creationDate|2018-05-22 +https://arxiv.org/abs/1805.03793|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1805.03793|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1805.03793|arxiv_author|Haisong Zhang +https://arxiv.org/abs/1805.03793|arxiv_author|Shuming Shi +https://arxiv.org/abs/1805.03793|arxiv_author|Yan Song +https://arxiv.org/abs/1805.03793|arxiv_author|Wayne Xin Zhao +https://arxiv.org/abs/1805.03793|arxiv_author|Jialong Han +https://arxiv.org/abs/1805.03793|title|[1805.03793] hyperdoc2vec: Distributed Representations of Hypertext Documents +https://arxiv.org/abs/1805.03793|creationTime|2018-05-22T11:22:24Z +https://arxiv.org/abs/1805.03793|arxiv_summary|"Hypertext documents, such as web pages and academic papers, are of great +importance in delivering information in our daily life. Although being +effective on plain documents, conventional text embedding methods suffer from +information loss if directly adapted to hyper-documents. In this paper, we +propose a general embedding approach for hyper-documents, namely, hyperdoc2vec, +along with four criteria characterizing necessary information that +hyper-document embedding models should preserve. Systematic comparisons are +conducted between hyperdoc2vec and several competitors on two tasks, i.e., +paper classification and citation recommendation, in the academic paper domain. +Analyses and experiments both validate the superiority of hyperdoc2vec to other +models w.r.t. the four criteria." +https://arxiv.org/abs/1805.03793|arxiv_firstAuthor|Jialong Han +https://arxiv.org/abs/1805.03793|arxiv_updated|2018-05-10T02:42:03Z +https://arxiv.org/abs/1805.03793|arxiv_title|hyperdoc2vec: Distributed Representations of Hypertext Documents +https://arxiv.org/abs/1805.03793|arxiv_published|2018-05-10T02:42:03Z +https://arxiv.org/abs/1805.03793|arxiv_num|1805.03793 +http://scikit-learn.org|creationDate|2015-10-16 +http://scikit-learn.org|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org|title|scikit-learn: machine learning in Python +http://scikit-learn.org|creationTime|2015-10-16T00:17:32Z +https://www.youtube.com/watch?v=YJnddoa8sHk|creationDate|2017-12-12 +https://www.youtube.com/watch?v=YJnddoa8sHk|tag|http://www.semanlink.net/tag/deep_learning +https://www.youtube.com/watch?v=YJnddoa8sHk|tag|http://www.semanlink.net/tag/youtube_tutorial +https://www.youtube.com/watch?v=YJnddoa8sHk|tag|http://www.semanlink.net/tag/nips_2017 +https://www.youtube.com/watch?v=YJnddoa8sHk|title|Deep Learning: Practice and Trends (NIPS 2017 Tutorial, parts I & II) - YouTube +https://www.youtube.com/watch?v=YJnddoa8sHk|creationTime|2017-12-12T11:00:44Z +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|creationDate|2012-11-30 +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|tag|http://www.semanlink.net/tag/new_yorker +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|tag|http://www.semanlink.net/tag/asimov +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|tag|http://www.semanlink.net/tag/google_car +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|tag|http://www.semanlink.net/tag/driverless_car +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|tag|http://www.semanlink.net/tag/moral_machines +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|comment|“Ethical subroutines” may sound like science fiction, but once upon a time, so did self-driving cars. +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|title|Google’s Driver-less Car and Morality : The New Yorker +http://www.newyorker.com/online/blogs/newsdesk/2012/11/google-driverless-car-morality.html|creationTime|2012-11-30T22:26:18Z +https://arxiv.org/abs/1412.6623|creationDate|2018-01-28 +https://arxiv.org/abs/1412.6623|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1412.6623|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1412.6623|tag|http://www.semanlink.net/tag/gaussian_embedding +https://arxiv.org/abs/1412.6623|tag|http://www.semanlink.net/tag/andrew_mccallum +https://arxiv.org/abs/1412.6623|arxiv_author|Andrew McCallum +https://arxiv.org/abs/1412.6623|arxiv_author|Luke Vilnis +https://arxiv.org/abs/1412.6623|comment|"> Current work in lexical distributed representations maps each word to a point vector in low-dimensional space. Mapping instead to a density provides many interesting advantages + +> Novel word embedding algorithms that embed words directly as Gaussian distributional potential functions in an infinite dimensional function space. This allows us to map word types not only to vectors but to soft regions in space, modeling uncertainty, inclusion, and entailment, as well as providing a rich geometry of the latent space." +https://arxiv.org/abs/1412.6623|title|[1412.6623] Word Representations via Gaussian Embedding +https://arxiv.org/abs/1412.6623|creationTime|2018-01-28T17:27:24Z +https://arxiv.org/abs/1412.6623|arxiv_summary|"Current work in lexical distributed representations maps each word to a point +vector in low-dimensional space. Mapping instead to a density provides many +interesting advantages, including better capturing uncertainty about a +representation and its relationships, expressing asymmetries more naturally +than dot product or cosine similarity, and enabling more expressive +parameterization of decision boundaries. This paper advocates for density-based +distributed embeddings and presents a method for learning representations in +the space of Gaussian distributions. We compare performance on various word +embedding benchmarks, investigate the ability of these embeddings to model +entailment and other asymmetric relationships, and explore novel properties of +the representation." +https://arxiv.org/abs/1412.6623|arxiv_firstAuthor|Luke Vilnis +https://arxiv.org/abs/1412.6623|arxiv_updated|2015-05-01T10:14:58Z +https://arxiv.org/abs/1412.6623|arxiv_title|Word Representations via Gaussian Embedding +https://arxiv.org/abs/1412.6623|arxiv_published|2014-12-20T07:42:40Z +https://arxiv.org/abs/1412.6623|arxiv_num|1412.6623 +http://www.consortiuminfo.org/bulletins/pdf/jun05/feature.pdf|creationDate|2005-07-04 +http://www.consortiuminfo.org/bulletins/pdf/jun05/feature.pdf|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.consortiuminfo.org/bulletins/pdf/jun05/feature.pdf|tag|http://www.semanlink.net/tag/semantic_web +http://www.consortiuminfo.org/bulletins/pdf/jun05/feature.pdf|title|Consortium Standards Bulletin - THE SEMANTIC WEB: AN INTERVIEW WITH TIM BERNERS-LEE +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|creationDate|2012-01-08 +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|tag|http://www.semanlink.net/tag/freedom_box +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|tag|http://www.semanlink.net/tag/internet_libre +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|title|Commotion, le projet d'un Internet hors de tout contrôle +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|creationTime|2012-01-08T10:44:51Z +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|source|Le Monde +http://www.lemonde.fr/technologies/article/2011/08/30/commotion-le-projet-d-un-internet-hors-de-tout-controle_1565282_651865.html#ens_id=1280415|date|2011-08-31 +https://blog.floydhub.com/language-translator/|creationDate|2018-10-11 +https://blog.floydhub.com/language-translator/|tag|http://www.semanlink.net/tag/neural_machine_translation +https://blog.floydhub.com/language-translator/|title|Found in translation: Building a language translator from scratch with deep learning - FloydHub +https://blog.floydhub.com/language-translator/|creationTime|2018-10-11T08:37:16Z +https://arxiv.org/abs/1812.09449|creationDate|2019-04-24 +https://arxiv.org/abs/1812.09449|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1812.09449|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1812.09449|tag|http://www.semanlink.net/tag/survey +https://arxiv.org/abs/1812.09449|arxiv_author|Chenliang Li +https://arxiv.org/abs/1812.09449|arxiv_author|Jing Li +https://arxiv.org/abs/1812.09449|arxiv_author|Aixin Sun +https://arxiv.org/abs/1812.09449|arxiv_author|Jianglei Han +https://arxiv.org/abs/1812.09449|comment|mainly focus on generic NEs in English language +https://arxiv.org/abs/1812.09449|title|[1812.09449] A Survey on Deep Learning for Named Entity Recognition +https://arxiv.org/abs/1812.09449|creationTime|2019-04-24T00:28:42Z +https://arxiv.org/abs/1812.09449|arxiv_summary|"Named entity recognition (NER) is the task to identify mentions of rigid +designators from text belonging to predefined semantic types such as person, +location, organization etc. NER always serves as the foundation for many +natural language applications such as question answering, text summarization, +and machine translation. Early NER systems got a huge success in achieving good +performance with the cost of human engineering in designing domain-specific +features and rules. In recent years, deep learning, empowered by continuous +real-valued vector representations and semantic composition through nonlinear +processing, has been employed in NER systems, yielding stat-of-the-art +performance. In this paper, we provide a comprehensive review on existing deep +learning techniques for NER. We first introduce NER resources, including tagged +NER corpora and off-the-shelf NER tools. Then, we systematically categorize +existing works based on a taxonomy along three axes: distributed +representations for input, context encoder, and tag decoder. Next, we survey +the most representative methods for recent applied techniques of deep learning +in new NER problem settings and applications. Finally, we present readers with +the challenges faced by NER systems and outline future directions in this area." +https://arxiv.org/abs/1812.09449|arxiv_firstAuthor|Jing Li +https://arxiv.org/abs/1812.09449|arxiv_updated|2020-03-18T15:57:10Z +https://arxiv.org/abs/1812.09449|arxiv_title|A Survey on Deep Learning for Named Entity Recognition +https://arxiv.org/abs/1812.09449|arxiv_published|2018-12-22T04:54:13Z +https://arxiv.org/abs/1812.09449|arxiv_num|1812.09449 +https://developers.google.com/machine-learning/rules-of-ml/|creationDate|2018-05-21 +https://developers.google.com/machine-learning/rules-of-ml/|tag|http://www.semanlink.net/tag/google +https://developers.google.com/machine-learning/rules-of-ml/|tag|http://www.semanlink.net/tag/machine_learning +https://developers.google.com/machine-learning/rules-of-ml/|title|Règles du machine learning :    Google Developers +https://developers.google.com/machine-learning/rules-of-ml/|creationTime|2018-05-21T10:43:52Z +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|creationDate|2006-12-06 +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|tag|http://www.semanlink.net/tag/taxonomies +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|tag|http://www.semanlink.net/tag/information_retrieval +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|comment|Information retrieval systems have to deal with uncertain knowledge and query results should reflect this uncertainty in some manner. We present a new probabilistic method to approach the problem. In our method, degrees of subsumption, i.e., overlap between concepts can be modeled and computed efficiently using Bayesian networks based on RDF(S) ontologies. +http://www.seco.tkk.fi/publications/2006/holi-hyvonen-modeling-uncertainty-in-2006.pdf|title|Modeling Uncertainty in Semantic Web Taxonomies +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|creationDate|2012-11-12 +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|tag|http://www.semanlink.net/tag/automobile +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|tag|http://www.semanlink.net/tag/link_to_me +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|title|The Architecture of Future Automotive Applications based on Web Technologies +http://www.w3.org/2012/11/web-and-automotive/submissions/webautomotive1_submission_24.pdf|creationTime|2012-11-12T14:37:36Z +http://community.ofset.org/wiki/Main_Page|creationDate|2005-10-11 +http://community.ofset.org/wiki/Main_Page|tag|http://www.semanlink.net/tag/education +http://community.ofset.org/wiki/Main_Page|tag|http://www.semanlink.net/tag/wiki +http://community.ofset.org/wiki/Main_Page|tag|http://www.semanlink.net/tag/jean_paul +http://community.ofset.org/wiki/Main_Page|comment|OFSET is a community of volunteers willing to develop and to enhance free software for education. +http://community.ofset.org/wiki/Main_Page|title|OFSET Wiki +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|tag|http://www.semanlink.net/tag/nlp +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|tag|http://www.semanlink.net/tag/linked_data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|comment|"NERD, an API and a front-end user inter- face powered by an ontology to unify various named entity extractors
+NIF: AN NLP INTERCHANGE FORMAT + +" +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|title|NERD meets NIF: Lifting NLP Extraction Results to the Linked Data Cloud +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-02.pdf|creationTime|2012-04-16T09:35:13Z +https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601|creationDate|2018-11-17 +https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601|comment|"blog post about [this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1811.06031) +" +https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601|relatedDoc|https://arxiv.org/abs/1811.06031 +https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601|title|HMTL: Multi-task learning for state of the art NLP – dair.ai – Medium +https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601|creationTime|2018-11-17T13:22:12Z +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|creationDate|2017-11-06 +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|tag|http://www.semanlink.net/tag/nn_4_nlp +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|comment|> In fact the emerging consensus is that even for NLP, CNNs beat RNNs! +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|title|Usually RNNs are used for NLP, when do CNNs in NLP make sense? - Quora +https://www.quora.com/Usually-RNNs-are-used-for-NLP-when-do-CNNs-in-NLP-make-sense|creationTime|2017-11-06T19:04:57Z +http://en.wikipedia.org/wiki/Percy_Schmeiser|creationDate|2007-10-23 +http://en.wikipedia.org/wiki/Percy_Schmeiser|tag|http://www.semanlink.net/tag/colza_transgenique +http://en.wikipedia.org/wiki/Percy_Schmeiser|tag|http://www.semanlink.net/tag/justice +http://en.wikipedia.org/wiki/Percy_Schmeiser|tag|http://www.semanlink.net/tag/monsanto +http://en.wikipedia.org/wiki/Percy_Schmeiser|title|Percy Schmeiser - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Percy_Schmeiser|creationTime|2007-10-23T00:24:33Z +http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html|creationDate|2010-12-27 +http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html|comment|"Now I'm asking, what's the harm in identifying both the foaf:Person +and the implicit skos:Concept with the same URI, e.g., http://dbpedia.org/resource/Michelle_Obama? +" +http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html|title|Using DBpedia resources as skos:Concepts? +http://lists.w3.org/Archives/Public/public-esw-thes/2009Nov/0000.html|creationTime|2010-12-27T18:40:01Z +http://www.ontotext.com/owlim|creationDate|2012-06-13 +http://www.ontotext.com/owlim|tag|http://www.semanlink.net/tag/jena_and_database +http://www.ontotext.com/owlim|tag|http://www.semanlink.net/tag/triplestore +http://www.ontotext.com/owlim|comment|OWLIM is a family of semantic repositories, or RDF database management systems, with the following characteristics: native RDF engines, implemented in Java delivering full performance through both Sesame and Jena robust support for the semantics of RDFS, OWL 2 RL and OWL 2 QL best scalability, loading and query evaluation performance +http://www.ontotext.com/owlim|title|OWLIM Ontotext +http://www.ontotext.com/owlim|creationTime|2012-06-13T14:09:07Z +http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-09-16 +http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/capitalisme +http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/blockchain +http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|La Blockchain signera-t-elle la fin du capitalisme ? - +http://maisouvaleweb.fr/la-blockchain-signera-t-elle-la-fin-du-capitalisme/?utm_content=bufferbb3f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-09-16T00:35:40Z +https://www.coursera.org/course/compneuro|creationDate|2013-11-30 +https://www.coursera.org/course/compneuro|tag|http://www.semanlink.net/tag/coursera_computational_neuroscience +https://www.coursera.org/course/compneuro|title|Computational Neuroscience Coursera +https://www.coursera.org/course/compneuro|creationTime|2013-11-30T21:51:42Z +http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data|creationDate|2008-05-08 +http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data|tag|http://www.semanlink.net/tag/rdf_vs_xml +http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data|title|All XML roads lead to RDF +http://blogs.sun.com/bblfish/entry/how_applying_xml_to_data|creationTime|2008-05-08T14:03:26Z +http://www.magyaradam.com/|creationDate|2014-06-24 +http://www.magyaradam.com/|tag|http://www.semanlink.net/tag/photo +http://www.magyaradam.com/|tag|http://www.semanlink.net/tag/temps +http://www.magyaradam.com/|tag|http://www.semanlink.net/tag/art +http://www.magyaradam.com/|title|Adam Magyar +http://www.magyaradam.com/|creationTime|2014-06-24T22:49:09Z +http://maven.apache.org/plugins/maven-war-plugin/|creationDate|2012-08-14 +http://maven.apache.org/plugins/maven-war-plugin/|tag|http://www.semanlink.net/tag/maven +http://maven.apache.org/plugins/maven-war-plugin/|title|Maven WAR Plugin +http://maven.apache.org/plugins/maven-war-plugin/|creationTime|2012-08-14T19:15:45Z +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|creationDate|2011-05-13 +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|tag|http://www.semanlink.net/tag/heredite +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|tag|http://www.semanlink.net/tag/evolution +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|tag|http://www.semanlink.net/tag/jean_claude_ameisen +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|comment|La vérité étant plus étrange que la fiction, la biologie a découvert la réalité de l’existence d’êtres vivants formés de combinaisons d’autres êtres vivants. Lynn Margulis et Dorian Sagan. Microcosmos : Quatre milliards d’années d’évolution à partir de nos ancêtres microbiens. +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|title|France Inter > Sur les épaules de Darwin > A la recherche des mystères de l'hérédité (3) +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104428|creationTime|2011-05-13T00:32:18Z +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|creationDate|2017-05-23 +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|tag|http://www.semanlink.net/tag/clustering_of_text_documents +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|tag|http://www.semanlink.net/tag/solr_and_nlp +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|tag|http://www.semanlink.net/tag/carrot2 +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|title|Result Clustering - Apache Solr Reference Guide - Apache Software Foundation +https://cwiki.apache.org/confluence/display/solr/Result+Clustering#ResultClustering-PerformanceConsiderations|creationTime|2017-05-23T11:57:01Z +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|creationDate|2008-09-11 +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|tag|http://www.semanlink.net/tag/outsourcing +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|tag|http://www.semanlink.net/tag/banque +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|tag|http://www.semanlink.net/tag/inde +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|title|Cost-Cutting in New York, but a Boom in India +http://www.nytimes.com/2008/08/12/business/worldbusiness/12indiawall.html|creationTime|2008-09-11T17:05:13Z +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|creationDate|2017-06-19 +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|tag|http://www.semanlink.net/tag/python_4_data_science +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|tag|http://www.semanlink.net/tag/decision_tree_learning +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|tag|http://www.semanlink.net/tag/r +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|tag|http://www.semanlink.net/tag/tutorial +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|title|A Complete Tutorial on Tree Based Modeling from Scratch (in R & Python) +https://www.analyticsvidhya.com/blog/2016/04/complete-tutorial-tree-based-modeling-scratch-in-python/|creationTime|2017-06-19T11:14:51Z +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|creationDate|2018-07-09 +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|tag|http://www.semanlink.net/tag/good +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|tag|http://www.semanlink.net/tag/knowledge_extraction +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|tag|http://www.semanlink.net/tag/tutorial +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|tag|http://www.semanlink.net/tag/information_retrieval +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|title|SIGIR 2018 Tutorial - Knowledge Extraction and Inference from Text: Shallow, Deep, and Everything in Between +https://sites.google.com/site/knowxtext/root/sigir-2018-tutorial|creationTime|2018-07-09T18:29:04Z +http://www.jneurosci.org/content/38/44/9563|creationDate|2018-12-06 +http://www.jneurosci.org/content/38/44/9563|tag|http://www.semanlink.net/tag/vision +http://www.jneurosci.org/content/38/44/9563|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.jneurosci.org/content/38/44/9563|tag|http://www.semanlink.net/tag/images_stereoscopiques +http://www.jneurosci.org/content/38/44/9563|tag|http://www.semanlink.net/tag/hebbian_theory +http://www.jneurosci.org/content/38/44/9563|comment|"> Our approach allows us to view early neural selectivity, not as an optimization problem, but as an emergent property driven by biological rules of plasticity. + +[Blog post (Biologie au CNRS)](/doc/?uri=http%3A%2F%2Fwww4.cnrs-dir.fr%2Finsb%2Frecherche%2Fparutions%2Farticles2018%2Fb-cottereau.html)" +http://www.jneurosci.org/content/38/44/9563|relatedDoc|http://www4.cnrs-dir.fr/insb/recherche/parutions/articles2018/b-cottereau.html +http://www.jneurosci.org/content/38/44/9563|title|Emergence of Binocular Disparity Selectivity through Hebbian Learning Journal of Neuroscience +http://www.jneurosci.org/content/38/44/9563|creationTime|2018-12-06T08:45:35Z +https://www.greenpeace.fr/carte-fermes-usines/|creationDate|2018-11-28 +https://www.greenpeace.fr/carte-fermes-usines/|tag|http://www.semanlink.net/tag/elevage_industriel +https://www.greenpeace.fr/carte-fermes-usines/|tag|http://www.semanlink.net/tag/greenpeace +https://www.greenpeace.fr/carte-fermes-usines/|title|La carte des fermes-usines - Greenpeace France +https://www.greenpeace.fr/carte-fermes-usines/|creationTime|2018-11-28T13:34:59Z +http://cran.r-project.org/doc/manuals/R-intro.html|creationDate|2013-12-03 +http://cran.r-project.org/doc/manuals/R-intro.html|tag|http://www.semanlink.net/tag/tutorial +http://cran.r-project.org/doc/manuals/R-intro.html|tag|http://www.semanlink.net/tag/r +http://cran.r-project.org/doc/manuals/R-intro.html|title|An Introduction to R +http://cran.r-project.org/doc/manuals/R-intro.html|creationTime|2013-12-03T11:13:10Z +https://www.opensemanticsearch.org/|creationDate|2018-04-18 +https://www.opensemanticsearch.org/|tag|http://www.semanlink.net/tag/elasticsearch +https://www.opensemanticsearch.org/|tag|http://www.semanlink.net/tag/semantic_web_search_engine +https://www.opensemanticsearch.org/|tag|http://www.semanlink.net/tag/semantic_web_tools +https://www.opensemanticsearch.org/|title|Open Semantic Search: Your own search engine for documents, images, tables, files, intranet & news +https://www.opensemanticsearch.org/|creationTime|2018-04-18T10:00:40Z +http://www.bbc.com/news/technology-30998361|creationDate|2015-02-16 +http://www.bbc.com/news/technology-30998361|tag|http://www.semanlink.net/tag/driverless_car +http://www.bbc.com/news/technology-30998361|tag|http://www.semanlink.net/tag/unknown_tag +http://www.bbc.com/news/technology-30998361|title|BBC News - Could driverless cars own themselves? +http://www.bbc.com/news/technology-30998361|creationTime|2015-02-16T22:50:18Z +https://arxiv.org/pdf/1004.5370.pdf|creationDate|2017-11-07 +https://arxiv.org/pdf/1004.5370.pdf|tag|http://www.semanlink.net/tag/semantic_hashing +https://arxiv.org/pdf/1004.5370.pdf|tag|http://www.semanlink.net/tag/nearest_neighbor_search +https://arxiv.org/pdf/1004.5370.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1004.5370.pdf|tag|http://www.semanlink.net/tag/learning_to_hash +https://arxiv.org/pdf/1004.5370.pdf|arxiv_author|Dell Zhang +https://arxiv.org/pdf/1004.5370.pdf|arxiv_author|Jinsong Lu +https://arxiv.org/pdf/1004.5370.pdf|arxiv_author|Jun Wang +https://arxiv.org/pdf/1004.5370.pdf|arxiv_author|Deng Cai +https://arxiv.org/pdf/1004.5370.pdf|comment|"Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: +first find the optimal l-bit binary codes for all documents in +the given corpus via unsupervised learning, then train +l classifiers via supervised learning to predict the l-bit code +for any query document unseen before. + +(méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) + + +" +https://arxiv.org/pdf/1004.5370.pdf|relatedDoc|https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4 +https://arxiv.org/pdf/1004.5370.pdf|title|[1004.5370] Self-Taught Hashing for Fast Similarity Search +https://arxiv.org/pdf/1004.5370.pdf|creationTime|2017-11-07T11:48:17Z +https://arxiv.org/pdf/1004.5370.pdf|arxiv_summary|"The ability of fast similarity search at large scale is of great importance +to many Information Retrieval (IR) applications. A promising way to accelerate +similarity search is semantic hashing which designs compact binary codes for a +large number of documents so that semantically similar documents are mapped to +similar codes (within a short Hamming distance). Although some recently +proposed techniques are able to generate high-quality codes for documents known +in advance, obtaining the codes for previously unseen documents remains to be a +very challenging problem. In this paper, we emphasise this issue and propose a +novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the +optimal $l$-bit binary codes for all documents in the given corpus via +unsupervised learning, and then train $l$ classifiers via supervised learning +to predict the $l$-bit code for any query document unseen before. Our +experiments on three real-world text datasets show that the proposed approach +using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine +(SVM) outperforms state-of-the-art techniques significantly." +https://arxiv.org/pdf/1004.5370.pdf|arxiv_firstAuthor|Dell Zhang +https://arxiv.org/pdf/1004.5370.pdf|arxiv_updated|2010-04-29T19:25:17Z +https://arxiv.org/pdf/1004.5370.pdf|arxiv_title|Self-Taught Hashing for Fast Similarity Search +https://arxiv.org/pdf/1004.5370.pdf|arxiv_published|2010-04-29T19:25:17Z +https://arxiv.org/pdf/1004.5370.pdf|arxiv_num|1004.5370 +http://michaelmoore.com/trumpwillwin/|creationDate|2016-07-24 +http://michaelmoore.com/trumpwillwin/|tag|http://www.semanlink.net/tag/michael_moore +http://michaelmoore.com/trumpwillwin/|tag|http://www.semanlink.net/tag/trump +http://michaelmoore.com/trumpwillwin/|title|5 Reasons Why Trump Will Win MICHAEL MOORE +http://michaelmoore.com/trumpwillwin/|creationTime|2016-07-24T14:08:16Z +https://arxiv.org/abs/1810.00438|creationDate|2018-10-06 +https://arxiv.org/abs/1810.00438|tag|http://www.semanlink.net/tag/nlp_microsoft +https://arxiv.org/abs/1810.00438|tag|http://www.semanlink.net/tag/good +https://arxiv.org/abs/1810.00438|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1810.00438|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1810.00438|arxiv_author|Chenguang Zhu +https://arxiv.org/abs/1810.00438|arxiv_author|Ziyi Yang +https://arxiv.org/abs/1810.00438|arxiv_author|Weizhu Chen +https://arxiv.org/abs/1810.00438|comment|"**training-free approach for building sentence representations**, ""Geometric Embedding"" (GEM), based on the **geometric structure** of word embedding space. + +> we build an orthogonal basis of the subspace spanned by a word and its surrounding context in a sentence. **We model the semantic meaning of a word in a sentence** based on two aspects. One is its relatedness to the word vector subspace already spanned by its contextual words. The other is the word’s novel semantic meaning which shall be introduced as a new basis vector perpendicular to this existing subspace + +[on www.groundai.com](https://www.groundai.com/project/zero-training-sentence-embedding-via-orthogonal-basis/) + +[Open Revieww](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DrJedbn0ctQ) ; [Related to this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1704.05358) + + +" +https://arxiv.org/abs/1810.00438|relatedDoc|https://openreview.net/forum?id=rJedbn0ctQ +https://arxiv.org/abs/1810.00438|relatedDoc|https://arxiv.org/abs/1704.05358 +https://arxiv.org/abs/1810.00438|title|[1810.00438] Parameter-free Sentence Embedding via Orthogonal Basis +https://arxiv.org/abs/1810.00438|creationTime|2018-10-06T18:01:18Z +https://arxiv.org/abs/1810.00438|arxiv_summary|"We propose a simple and robust non-parameterized approach for building +sentence representations. Inspired by the Gram-Schmidt Process in geometric +theory, we build an orthogonal basis of the subspace spanned by a word and its +surrounding context in a sentence. We model the semantic meaning of a word in a +sentence based on two aspects. One is its relatedness to the word vector +subspace already spanned by its contextual words. The other is the word's novel +semantic meaning which shall be introduced as a new basis vector perpendicular +to this existing subspace. Following this motivation, we develop an innovative +method based on orthogonal basis to combine pre-trained word embeddings into +sentence representations. This approach requires zero parameters, along with +efficient inference performance. We evaluate our approach on 11 downstream NLP +tasks. Our model shows superior performance compared with non-parameterized +alternatives and it is competitive to other approaches relying on either large +amounts of labelled data or prolonged training time." +https://arxiv.org/abs/1810.00438|arxiv_firstAuthor|Ziyi Yang +https://arxiv.org/abs/1810.00438|arxiv_updated|2019-12-06T05:01:36Z +https://arxiv.org/abs/1810.00438|arxiv_title|Parameter-free Sentence Embedding via Orthogonal Basis +https://arxiv.org/abs/1810.00438|arxiv_published|2018-09-30T18:26:30Z +https://arxiv.org/abs/1810.00438|arxiv_num|1810.00438 +http://www.vogella.com/articles/Python/article.html|creationDate|2013-04-22 +http://www.vogella.com/articles/Python/article.html|tag|http://www.semanlink.net/tag/eclipse +http://www.vogella.com/articles/Python/article.html|tag|http://www.semanlink.net/tag/pydev +http://www.vogella.com/articles/Python/article.html|tag|http://www.semanlink.net/tag/python +http://www.vogella.com/articles/Python/article.html|tag|http://www.semanlink.net/tag/tutorial +http://www.vogella.com/articles/Python/article.html|title|Python Development with PyDev and Eclipse - Tutorial +http://www.vogella.com/articles/Python/article.html|creationTime|2013-04-22T10:46:41Z +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|creationDate|2014-07-17 +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|tag|http://www.semanlink.net/tag/industrie_pharmaceutique +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|tag|http://www.semanlink.net/tag/biotech_industry +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|title|Why The First YC-Backed Biotech Company May Just Be The Future Of Pharma TechCrunch +http://techcrunch.com/2014/07/16/why-the-first-yc-backed-biotech-company-may-just-be-the-future-of-pharma/|creationTime|2014-07-17T14:54:01Z +http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html|creationDate|2009-02-06 +http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html|tag|http://www.semanlink.net/tag/virtuoso_doc +http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html|title|Mapping Relation Data to RDF with Virtuoso's RDF Views +http://virtuoso.openlinksw.com/Whitepapers/html/rdf_views/virtuoso_rdf_views_example.html|creationTime|2009-02-06T22:05:36Z +http://proceedings.mlr.press/v37/kusnerb15.pdf|creationDate|2017-06-09 +http://proceedings.mlr.press/v37/kusnerb15.pdf|tag|http://www.semanlink.net/tag/word_mover_s_distance +http://proceedings.mlr.press/v37/kusnerb15.pdf|title|From Word Embeddings To Document Distances (Kusner 2015) +http://proceedings.mlr.press/v37/kusnerb15.pdf|creationTime|2017-06-09T14:27:25Z +http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/|creationDate|2006-03-11 +http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/|tag|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/|comment|"This note introduces Semantic Web languages such as RDF +Schema and OWL, and shows how they can be used in tandem with mainstream +object-oriented languages. We show that the Semantic Web can serve as a platform +on which domain models can be created, shared and reused." +http://www.w3.org/TR/2006/NOTE-sw-oosd-primer-20060309/|title|A Semantic Web Primer for Object-Oriented Software Developers +http://datalift.org/fr/index.html|creationDate|2010-07-09 +http://datalift.org/fr/index.html|tag|http://www.semanlink.net/tag/linked_data +http://datalift.org/fr/index.html|comment|Datalift est un projet de recherche expérimentale financé par l'agence nationale de la recherche. Le but du projet est de développer une plateforme pour publier et interconnecter des jeux de données sur le web de données. Datalift à la fois publie des jeux de données provenant d'un réseau de partenaires et propose un ensemble d'outils facilitant le processus de publication de jeux de données. +http://datalift.org/fr/index.html|title|DataLift - un ascenseur pour vos données +http://datalift.org/fr/index.html|creationTime|2010-07-09T11:34:05Z +http://moalquraishi.wordpress.com/2014/05/25/what-does-a-neural-network-actually-do/|creationDate|2014-05-30 +http://moalquraishi.wordpress.com/2014/05/25/what-does-a-neural-network-actually-do/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://moalquraishi.wordpress.com/2014/05/25/what-does-a-neural-network-actually-do/|title|What Does a Neural Network Actually Do? « Some Thoughts on a Mysterious Universe +http://moalquraishi.wordpress.com/2014/05/25/what-does-a-neural-network-actually-do/|creationTime|2014-05-30T15:21:37Z +http://www2.cnrs.fr/en/2903.htm|creationDate|2017-04-03 +http://www2.cnrs.fr/en/2903.htm|tag|http://www.semanlink.net/tag/cnrs +http://www2.cnrs.fr/en/2903.htm|tag|http://www.semanlink.net/tag/memristor +http://www2.cnrs.fr/en/2903.htm|tag|http://www.semanlink.net/tag/julie_grollier +http://www2.cnrs.fr/en/2903.htm|title|Electronic synapses that can learn : towards an artificial brain ? - CNRS Web site +http://www2.cnrs.fr/en/2903.htm|creationTime|2017-04-03T17:55:50Z +http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/|creationDate|2013-12-03 +http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/|tag|http://www.semanlink.net/tag/mathematiques +http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/|tag|http://www.semanlink.net/tag/rigolo +http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/|title|Headlines from a Mathematically Literate World Math with Bad Drawings +http://mathwithbaddrawings.com/2013/12/02/headlines-from-a-mathematically-literate-world/|creationTime|2013-12-03T13:43:44Z +http://topquadrant.com/solutions/ent_vocab_net.html|creationDate|2010-10-26 +http://topquadrant.com/solutions/ent_vocab_net.html|tag|http://www.semanlink.net/tag/topbraid +http://topquadrant.com/solutions/ent_vocab_net.html|comment|TopBraid Enterprise Vocabulary Net (TopBraid EVN) is a web-based solution for simplified development and management of interconnected controlled vocabularies. +http://topquadrant.com/solutions/ent_vocab_net.html|title|TopQuadrant Solutions Enterprise Vocabulary Net +http://topquadrant.com/solutions/ent_vocab_net.html|creationTime|2010-10-26T22:50:26Z +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|creationDate|2012-03-20 +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|tag|http://www.semanlink.net/tag/lord_s_resistance_army +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|tag|http://www.semanlink.net/tag/marketing +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|tag|http://www.semanlink.net/tag/social_networks +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|title|"Derrière la vidéo ""Kony 2012"", le marketing de l'émotion" +http://www.lemonde.fr/ameriques/article/2012/03/20/derriere-la-video-kony-2012-le-marketing-de-l-emotion_1672757_3222.html|creationTime|2012-03-20T18:05:32Z +http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/|creationDate|2016-03-27 +http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/|tag|http://www.semanlink.net/tag/synthetic_life +http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/|tag|http://www.semanlink.net/tag/craig_venter +http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/|title|The Mystery of the Minimal Cell, Craig Venter’s New Synthetic Life Form WIRED +http://www.wired.com/2016/03/mystery-minimal-cell-craig-venters-new-synthetic-life-form/|creationTime|2016-03-27T01:13:20Z +http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/|creationDate|2014-07-28 +http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/|tag|http://www.semanlink.net/tag/umbel +http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/|title|New UMBEL Concept Noun Tagger Web Service & Other Improvements at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2014/07/21/new-umbel-concept-noun-tagger-web-service-other-improvements/|creationTime|2014-07-28T01:47:17Z +http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm|creationDate|2013-12-22 +http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm|title|L'implémentation neuronale des mécanismes Bayésiens - Psychologie cognitive expérimentale - Stanislas Dehaene - Collège de France +http://www.college-de-france.fr/site/stanislas-dehaene/course-2012-02-14-09h30.htm|creationTime|2013-12-22T15:41:11Z +http://labs.systemone.at/wikipedia3|creationDate|2006-04-24 +http://labs.systemone.at/wikipedia3|tag|http://www.semanlink.net/tag/wikipedia +http://labs.systemone.at/wikipedia3|tag|http://www.semanlink.net/tag/huge_rdf_data_source +http://labs.systemone.at/wikipedia3|tag|http://www.semanlink.net/tag/rdf +http://labs.systemone.at/wikipedia3|comment|Wikipedia³ is a conversion of the English Wikipedia into RDF. It's a monthly updated dataset containing around 47 million triples. +http://labs.systemone.at/wikipedia3|title|System One - Wikipedia3 +http://sourceforge.net/scm/?type=cvs&group_id=40417|creationDate|2009-05-28 +http://sourceforge.net/scm/?type=cvs&group_id=40417|tag|http://www.semanlink.net/tag/cvs +http://sourceforge.net/scm/?type=cvs&group_id=40417|tag|http://www.semanlink.net/tag/jena_dev +http://sourceforge.net/scm/?type=cvs&group_id=40417|title|SourceForge.net: Jena: SCM +http://sourceforge.net/scm/?type=cvs&group_id=40417|creationTime|2009-05-28T18:29:49Z +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|creationDate|2013-10-14 +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|tag|http://www.semanlink.net/tag/film_de_guerre +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|tag|http://www.semanlink.net/tag/1ere_guerre_mondiale +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|tag|http://www.semanlink.net/tag/cocteau +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|title|Thomas l'imposteur (film, 1965) +http://fr.wikipedia.org/wiki/Thomas_l%27imposteur_(film,_1965)|creationTime|2013-10-14T01:38:39Z +http://2010.lucene-eurocon.org/slides/Integration-of-Natural-Language-Processing-tools-with-Solr_Joan-Codina-Filba.pdf|creationDate|2014-03-15 +http://2010.lucene-eurocon.org/slides/Integration-of-Natural-Language-Processing-tools-with-Solr_Joan-Codina-Filba.pdf|tag|http://www.semanlink.net/tag/solr_and_nlp +http://2010.lucene-eurocon.org/slides/Integration-of-Natural-Language-Processing-tools-with-Solr_Joan-Codina-Filba.pdf|title|Integration of Natural Language Processing tools with Solr +http://2010.lucene-eurocon.org/slides/Integration-of-Natural-Language-Processing-tools-with-Solr_Joan-Codina-Filba.pdf|creationTime|2014-03-15T14:11:20Z +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|creationDate|2019-01-25 +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|tag|http://www.semanlink.net/tag/cheat_sheet +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|tag|http://www.semanlink.net/tag/artificial_intelligence +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|comment|The Most Complete List of Best AI Cheat Sheets +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|title|Cheat Sheets for AI, Neural Networks, Machine Learning, Deep Learning & Big Data +https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463|creationTime|2019-01-25T18:33:11Z +http://www.textuality.com/tag/uri-comp-4|creationDate|2007-11-17 +http://www.textuality.com/tag/uri-comp-4|tag|http://www.semanlink.net/tag/tim_bray +http://www.textuality.com/tag/uri-comp-4|tag|http://www.semanlink.net/tag/good_practice_when_generating_uris +http://www.textuality.com/tag/uri-comp-4|comment|"include ""Good Practice When Generating URIs""" +http://www.textuality.com/tag/uri-comp-4|title|How to Compare Uniform Resource Identifiers +http://www.textuality.com/tag/uri-comp-4|creationTime|2007-11-17T16:11:00Z +https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview|creationDate|2012-03-01 +https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview|tag|http://www.semanlink.net/tag/henry_story +https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview|tag|http://www.semanlink.net/tag/rdf_forms +https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview|title|Web Finger proposals overview (The Sun BabelFish Blog) +https://blogs.oracle.com/bblfish/entry/web_finger_proposals_overview|creationTime|2012-03-01T11:47:05Z +http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics|creationDate|2008-05-12 +http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics|comment|So, for a resource of type skos:Concept, any properties of that resource (such as creator, date of modification, source etc.) should be interpreted as properties of a concept, and not as properties of some 'real world thing' that that resource may be a conceptualisation of +http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics|title|SkosDesign/ConceptSemantics - W3C Semantic Web Deployment Wiki +http://www.w3.org/2006/07/SWD/wiki/SkosDesign/ConceptSemantics|creationTime|2008-05-12T19:27:25Z +http://www.cse.ucsd.edu/users/dasgupta/mcgrawhill/|creationDate|2006-12-11 +http://www.cse.ucsd.edu/users/dasgupta/mcgrawhill/|tag|http://www.semanlink.net/tag/algorithmes +http://www.cse.ucsd.edu/users/dasgupta/mcgrawhill/|comment|by Sanjoy Dasgupta, Christos Papadimitriou and Umesh Vazirani. Book available as a free PDF download. +http://www.cse.ucsd.edu/users/dasgupta/mcgrawhill/|title|"""Algorithms"" (Book)" +http://blog.takipi.com/how-to-get-started-with-java-machine-learning/|creationDate|2016-07-08 +http://blog.takipi.com/how-to-get-started-with-java-machine-learning/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://blog.takipi.com/how-to-get-started-with-java-machine-learning/|title|How to Get Started with Java Machine Learning Takipi Blog +http://blog.takipi.com/how-to-get-started-with-java-machine-learning/|creationTime|2016-07-08T11:45:56Z +http://www.coactus.com/blog/2006/12/validation-considered-harmful|creationDate|2006-12-16 +http://www.coactus.com/blog/2006/12/validation-considered-harmful|tag|http://www.semanlink.net/tag/dev +http://www.coactus.com/blog/2006/12/validation-considered-harmful|tag|http://www.semanlink.net/tag/validation +http://www.coactus.com/blog/2006/12/validation-considered-harmful|tag|http://www.semanlink.net/tag/validation_xml_vs_rdf +http://www.coactus.com/blog/2006/12/validation-considered-harmful|comment|On the Web, you need to be able to process messages from the future. +http://www.coactus.com/blog/2006/12/validation-considered-harmful|title|Validation considered harmful +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|creationDate|2007-07-26 +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|tag|http://www.semanlink.net/tag/niamey +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|tag|http://www.semanlink.net/tag/niger_agriculture +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|tag|http://www.semanlink.net/tag/exode_rural +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|comment|«Avec la récupération de terres pour le travail agricole, des plateaux dénudés autour de Niamey ont changé de visage en peu de temps. »Au Niger, une grande campagne de remise en culture de terres dégradées par l'érosion contribue à freiner l'exode rural dans la région de Niamey. +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|title|RFI - Niger : retour à la terre - Reportage Afrique +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|creationTime|2007-07-26T13:06:14Z +http://www.rfi.fr/francais/actu/articles/087/article_50669.asp|source|RFI +https://blog.usievents.com/interview-keren-elazari-hackers-potentiel-disruptif-dont-nous-avons-besoin/|creationDate|2017-04-30 +https://blog.usievents.com/interview-keren-elazari-hackers-potentiel-disruptif-dont-nous-avons-besoin/|tag|http://www.semanlink.net/tag/hackers +https://blog.usievents.com/interview-keren-elazari-hackers-potentiel-disruptif-dont-nous-avons-besoin/|title|"Interview Keren Elazari : "" Les hackers ont le potentiel disruptif dont nous avons besoin""" +https://blog.usievents.com/interview-keren-elazari-hackers-potentiel-disruptif-dont-nous-avons-besoin/|creationTime|2017-04-30T14:20:39Z +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|creationDate|2008-03-30 +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|tag|http://www.semanlink.net/tag/richard_cyganiak +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|tag|http://www.semanlink.net/tag/alexandre_passant +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|tag|http://www.semanlink.net/tag/sioc +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|tag|http://www.semanlink.net/tag/ldow2008 +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|title|Weaving SIOC into the Web of Linked Data +http://events.linkeddata.org/ldow2008/papers/01-bojars-passant-weaving-sioc.pdf|creationTime|2008-03-30T20:23:15Z +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|creationDate|2006-04-09 +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|tag|http://www.semanlink.net/tag/masse_manquante +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|tag|http://www.semanlink.net/tag/physique_des_particules_modele_standard +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|tag|http://www.semanlink.net/tag/minos_neutrino_experiment +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|comment|Physicists have confirmed that neutrinos, which are thought to have played a key role during the creation of the Universe, have mass. +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|title|BBC NEWS - Light shed on mysterious particle +http://news.bbc.co.uk/1/hi/sci/tech/4862112.stm|source|BBC +http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard|creationDate|2006-01-30 +http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard|tag|http://www.semanlink.net/tag/sw_wiki +http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard|tag|http://www.semanlink.net/tag/dur_a_trouver +http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard|comment|This is a place to link DOAP Descriptions similar to FOAFBulletinBoard but specifically and only for applications and demos that themselves concern the Semantic Web. +http://esw.w3.org/topic/SemanticWebDOAPBulletinBoard|title|SemanticWebDOAPBulletinBoard - ESW Wiki +http://structureddynamics.com/linked_data.html|creationDate|2009-04-14 +http://structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/faq +http://structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/link_to_me +http://structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/linked_data +http://structureddynamics.com/linked_data.html|title|Linked Data FAQ +http://structureddynamics.com/linked_data.html|creationTime|2009-04-14T01:06:34Z +https://neo4j.com/developer/cypher-query-language/|creationDate|2016-11-21 +https://neo4j.com/developer/cypher-query-language/|tag|http://www.semanlink.net/tag/neo4j +https://neo4j.com/developer/cypher-query-language/|title|Neo4j's Graph Query Language: An Introduction to Cypher +https://neo4j.com/developer/cypher-query-language/|creationTime|2016-11-21T14:37:46Z +http://www.w3.org/2004/02/skos/extensions.rdf|creationDate|2007-07-18 +http://www.w3.org/2004/02/skos/extensions.rdf|tag|http://www.semanlink.net/tag/owl +http://www.w3.org/2004/02/skos/extensions.rdf|tag|http://www.semanlink.net/tag/rdf +http://www.w3.org/2004/02/skos/extensions.rdf|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/2004/02/skos/extensions.rdf|comment|An RDF vocabulary containing extensions to SKOS Core useful for specialised applications. +http://www.w3.org/2004/02/skos/extensions.rdf|title|SKOS Extensions [RDF/OWL Description] +http://www.w3.org/2004/02/skos/extensions.rdf|creationTime|2007-07-18T23:28:18Z +http://redlink.co/redlinks-90seconds-of-fame-at-the-pioneers-festival/|creationDate|2013-11-03 +http://redlink.co/redlinks-90seconds-of-fame-at-the-pioneers-festival/|tag|http://www.semanlink.net/tag/john_pereira +http://redlink.co/redlinks-90seconds-of-fame-at-the-pioneers-festival/|title|Redlink’s 90seconds of fame at the Pioneers Festival redlink +http://redlink.co/redlinks-90seconds-of-fame-at-the-pioneers-festival/|creationTime|2013-11-03T10:47:10Z +http://www.reuters.com/article/mergersNews/idUSN1931556620070720?sp=true|creationDate|2007-07-28 +http://www.reuters.com/article/mergersNews/idUSN1931556620070720?sp=true|tag|http://www.semanlink.net/tag/facebook +http://www.reuters.com/article/mergersNews/idUSN1931556620070720?sp=true|title|Facebook buys start-up Parakey for undisclosed sum Reuters +http://www.reuters.com/article/mergersNews/idUSN1931556620070720?sp=true|creationTime|2007-07-28T18:49:39Z +http://julie.grollier.free.fr/index.htm|creationDate|2018-10-27 +http://julie.grollier.free.fr/index.htm|tag|http://www.semanlink.net/tag/julie_grollier +http://julie.grollier.free.fr/index.htm|tag|http://www.semanlink.net/tag/brains_in_silicon +http://julie.grollier.free.fr/index.htm|title|Julie Grollier - personal website: Nanodevices - Bio-inspired computing - Spin Transfer Torque - Memristors +http://julie.grollier.free.fr/index.htm|creationTime|2018-10-27T17:38:20Z +https://github.com/nlptown/nlp-notebooks|creationDate|2019-02-07 +https://github.com/nlptown/nlp-notebooks|tag|http://www.semanlink.net/tag/github_project +https://github.com/nlptown/nlp-notebooks|tag|http://www.semanlink.net/tag/yves_peirsman +https://github.com/nlptown/nlp-notebooks|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/nlptown/nlp-notebooks|title|A collection of notebooks for Natural Language Processing from NLP Town +https://github.com/nlptown/nlp-notebooks|creationTime|2019-02-07T00:48:41Z +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|creationDate|2019-01-27 +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|tag|http://www.semanlink.net/tag/nn_dev +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|tag|http://www.semanlink.net/tag/nn_tips +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|tag|http://www.semanlink.net/tag/artificial_neural_network +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|tag|http://www.semanlink.net/tag/troubleshooting +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|tag|http://www.semanlink.net/tag/howto +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|title|Troubleshooting Deep Neural Networks +http://josh-tobin.com/assets/pdf/troubleshooting-deep-neural-networks-01-19.pdf|creationTime|2019-01-27T11:50:54Z +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|creationDate|2007-11-20 +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|tag|http://www.semanlink.net/tag/rdfa +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|tag|http://www.semanlink.net/tag/alistair_miles +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|tag|http://www.semanlink.net/tag/e_learning +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|tag|http://www.semanlink.net/tag/skos +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|comment|Both SKOS and RDFa have interesting consequences for e-learning technology, and especially for leveraging the Web as a platform for delivering learning. +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|title|Alistair Miles » SKOS and RDFa in e-Learning +http://isegserv.itd.rl.ac.uk/blogs/alistair/archives/84|creationTime|2007-11-20T21:51:20Z +https://antrix.net/static/pages/python-for-java/online/#the_zen_of_python|creationDate|2017-06-06 +https://antrix.net/static/pages/python-for-java/online/#the_zen_of_python|tag|http://www.semanlink.net/tag/python +https://antrix.net/static/pages/python-for-java/online/#the_zen_of_python|title|Python for the busy Java Developer +https://antrix.net/static/pages/python-for-java/online/#the_zen_of_python|creationTime|2017-06-06T07:22:37Z +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|creationDate|2014-04-07 +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|tag|http://www.semanlink.net/tag/tips +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|comment|"Learn to Say “I Dont Know""" +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|title|10 Tips to Improve your Text Classification Algorithm Accuracy and Performance Thinknook +http://thinknook.com/10-ways-to-improve-your-classification-algorithm-performance-2013-01-21/|creationTime|2014-04-07T10:13:59Z +http://www.w3.org/2011/11/WebAutomotive/|creationDate|2012-02-06 +http://www.w3.org/2011/11/WebAutomotive/|tag|http://www.semanlink.net/tag/workshop +http://www.w3.org/2011/11/WebAutomotive/|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2011/11/WebAutomotive/|comment|"""Web Applications In Your Car""" +http://www.w3.org/2011/11/WebAutomotive/|title|W3C Web and Automotive Workshop +http://www.w3.org/2011/11/WebAutomotive/|creationTime|2012-02-06T10:37:58Z +http://media.ford.com/article_display.cfm?article_id=34591|creationDate|2013-04-06 +http://media.ford.com/article_display.cfm?article_id=34591|tag|http://www.semanlink.net/tag/destination_prediction +http://media.ford.com/article_display.cfm?article_id=34591|tag|http://www.semanlink.net/tag/ford +http://media.ford.com/article_display.cfm?article_id=34591|tag|http://www.semanlink.net/tag/google +http://media.ford.com/article_display.cfm?article_id=34591|title|Ford Developers Look to Use Google Prediction API to Optimize Energy Efficiency; Research Presented at Google I/O Ford Motor Company Newsroom +http://media.ford.com/article_display.cfm?article_id=34591|creationTime|2013-04-06T13:14:34Z +http://scikit-learn.org/stable/modules/scaling_strategies.html|creationDate|2018-09-15 +http://scikit-learn.org/stable/modules/scaling_strategies.html|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/modules/scaling_strategies.html|tag|http://www.semanlink.net/tag/external_memory_algorithm +http://scikit-learn.org/stable/modules/scaling_strategies.html|comment|using out-of-core learning +http://scikit-learn.org/stable/modules/scaling_strategies.html|title|Strategies to scale computationally: bigger data — scikit-learn documentation +http://scikit-learn.org/stable/modules/scaling_strategies.html|creationTime|2018-09-15T18:42:54Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html|creationDate|2006-05-27 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html|tag|http://www.semanlink.net/tag/union_europeenne +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html|title|Le rêve européen confisqué +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-776470,0.html|date|2006-05-27 +http://www.w3.org/2004/03/trix/|creationDate|2010-05-14 +http://www.w3.org/2004/03/trix/|tag|http://www.semanlink.net/tag/named_graphs +http://www.w3.org/2004/03/trix/|title|Named Graphs / Semantic Web Interest Group +http://www.w3.org/2004/03/trix/|creationTime|2010-05-14T11:23:02Z +http://arxiv.org/abs/1602.05314|creationDate|2016-02-26 +http://arxiv.org/abs/1602.05314|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1602.05314|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://arxiv.org/abs/1602.05314|arxiv_author|Tobias Weyand +http://arxiv.org/abs/1602.05314|arxiv_author|James Philbin +http://arxiv.org/abs/1602.05314|arxiv_author|Ilya Kostrikov +http://arxiv.org/abs/1602.05314|title|[1602.05314] PlaNet - Photo Geolocation with Convolutional Neural Networks +http://arxiv.org/abs/1602.05314|creationTime|2016-02-26T13:00:13Z +http://arxiv.org/abs/1602.05314|arxiv_summary|"Is it possible to build a system to determine the location where a photo was +taken using just its pixels? In general, the problem seems exceptionally +difficult: it is trivial to construct situations where no location can be +inferred. Yet images often contain informative cues such as landmarks, weather +patterns, vegetation, road markings, and architectural details, which in +combination may allow one to determine an approximate location and occasionally +an exact location. Websites such as GeoGuessr and View from your Window suggest +that humans are relatively good at integrating these cues to geolocate images, +especially en-masse. In computer vision, the photo geolocation problem is +usually approached using image retrieval methods. In contrast, we pose the +problem as one of classification by subdividing the surface of the earth into +thousands of multi-scale geographic cells, and train a deep network using +millions of geotagged images. While previous approaches only recognize +landmarks or perform approximate matching using global image descriptors, our +model is able to use and integrate multiple visible cues. We show that the +resulting model, called PlaNet, outperforms previous approaches and even +attains superhuman levels of accuracy in some cases. Moreover, we extend our +model to photo albums by combining it with a long short-term memory (LSTM) +architecture. By learning to exploit temporal coherence to geolocate uncertain +photos, we demonstrate that this model achieves a 50% performance improvement +over the single-image model." +http://arxiv.org/abs/1602.05314|arxiv_firstAuthor|Tobias Weyand +http://arxiv.org/abs/1602.05314|arxiv_updated|2016-02-17T06:27:55Z +http://arxiv.org/abs/1602.05314|arxiv_title|PlaNet - Photo Geolocation with Convolutional Neural Networks +http://arxiv.org/abs/1602.05314|arxiv_published|2016-02-17T06:27:55Z +http://arxiv.org/abs/1602.05314|arxiv_num|1602.05314 +https://en.wikipedia.org/wiki/Body_Heat|creationDate|2016-03-27 +https://en.wikipedia.org/wiki/Body_Heat|tag|http://www.semanlink.net/tag/film_noir +https://en.wikipedia.org/wiki/Body_Heat|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Body_Heat|comment|"avec Kathleen Turner (""a career on adventurousness and frank sexuality born of robust physicality"") +" +https://en.wikipedia.org/wiki/Body_Heat|title|Body Heat +https://en.wikipedia.org/wiki/Body_Heat|creationTime|2016-03-27T22:41:40Z +http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf|creationDate|2016-09-25 +http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf|tag|http://www.semanlink.net/tag/semantic_technology +http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf|title|The Unreasonable Effectiveness of Data +http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf|creationTime|2016-09-25T12:19:46Z +http://www.springerlink.com/content/978-1-4419-7664-2/contents/|creationDate|2011-02-02 +http://www.springerlink.com/content/978-1-4419-7664-2/contents/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.springerlink.com/content/978-1-4419-7664-2/contents/|tag|http://www.semanlink.net/tag/enterprise_data +http://www.springerlink.com/content/978-1-4419-7664-2/contents/|title|Linking Enterprise Data : the book +http://www.springerlink.com/content/978-1-4419-7664-2/contents/|creationTime|2011-02-02T22:26:20Z +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|creationDate|2008-10-20 +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|tag|http://www.semanlink.net/tag/indo_europeen +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|tag|http://www.semanlink.net/tag/momie +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|tag|http://www.semanlink.net/tag/archeologie +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|tag|http://www.semanlink.net/tag/ouigour +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|tag|http://www.semanlink.net/tag/histoire_de_la_chine +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|title|Momies du Taklamakan +http://chine.blog.lemonde.fr/2008/10/17/a-l%E2%80%99ouest-5-momies/|creationTime|2008-10-20T00:36:40Z +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|creationDate|2018-03-06 +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|tag|http://www.semanlink.net/tag/named_entity_recognition +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|tag|http://www.semanlink.net/tag/matthew_honnibal +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|tag|http://www.semanlink.net/tag/spacy +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|title|SPACY'S ENTITY RECOGNITION MODEL: incremental parsing with Bloom embeddings & residual CNNs - YouTube +https://www.youtube.com/watch?v=sqDHBH9IjRU&t=8m25s|creationTime|2018-03-06T11:59:39Z +https://arxiv.org/abs/1608.05426|creationDate|2018-07-23 +https://arxiv.org/abs/1608.05426|tag|http://www.semanlink.net/tag/yoav_goldberg +https://arxiv.org/abs/1608.05426|tag|http://www.semanlink.net/tag/cross_lingual_word_embeddings +https://arxiv.org/abs/1608.05426|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1608.05426|arxiv_author|Yoav Goldberg +https://arxiv.org/abs/1608.05426|arxiv_author|Omer Levy +https://arxiv.org/abs/1608.05426|arxiv_author|Anders Søgaard +https://arxiv.org/abs/1608.05426|title|[1608.05426] A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments +https://arxiv.org/abs/1608.05426|creationTime|2018-07-23T12:54:24Z +https://arxiv.org/abs/1608.05426|arxiv_summary|"While cross-lingual word embeddings have been studied extensively in recent +years, the qualitative differences between the different algorithms remain +vague. We observe that whether or not an algorithm uses a particular feature +set (sentence IDs) accounts for a significant performance gap among these +algorithms. This feature set is also used by traditional alignment algorithms, +such as IBM Model-1, which demonstrate similar performance to state-of-the-art +embedding algorithms on a variety of benchmarks. Overall, we observe that +different algorithmic approaches for utilizing the sentence ID feature space +result in similar performance. This paper draws both empirical and theoretical +parallels between the embedding and alignment literature, and suggests that +adding additional sources of information, which go beyond the traditional +signal of bilingual sentence-aligned corpora, may substantially improve +cross-lingual word embeddings, and that future baselines should at least take +such features into account." +https://arxiv.org/abs/1608.05426|arxiv_firstAuthor|Omer Levy +https://arxiv.org/abs/1608.05426|arxiv_updated|2017-01-09T20:49:18Z +https://arxiv.org/abs/1608.05426|arxiv_title|A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments +https://arxiv.org/abs/1608.05426|arxiv_published|2016-08-18T20:27:46Z +https://arxiv.org/abs/1608.05426|arxiv_num|1608.05426 +https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us|creationDate|2017-07-30 +https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us|tag|http://www.semanlink.net/tag/brexit +https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us|tag|http://www.semanlink.net/tag/chlorinated_chicken +https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us|title|Brussels attacks Liam Fox's 'ignorant' remarks on chlorinated chicken Politics The Guardian +https://www.theguardian.com/politics/2017/jul/25/brussels-attacks-liam-foxs-ignorant-remarks-chlorinated-chicken-eu-trade-deal-us|creationTime|2017-07-30T01:22:47Z +http://jsonformatter.curiousconcept.com/|creationDate|2012-05-26 +http://jsonformatter.curiousconcept.com/|tag|http://www.semanlink.net/tag/json +http://jsonformatter.curiousconcept.com/|tag|http://www.semanlink.net/tag/validation +http://jsonformatter.curiousconcept.com/|title|JSON Formatter & Validator +http://jsonformatter.curiousconcept.com/|creationTime|2012-05-26T23:16:06Z +https://arxiv.org/abs/1805.04032|creationDate|2018-05-30 +https://arxiv.org/abs/1805.04032|tag|http://www.semanlink.net/tag/survey +https://arxiv.org/abs/1805.04032|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1805.04032|tag|http://www.semanlink.net/tag/sense_embeddings +https://arxiv.org/abs/1805.04032|arxiv_author|Mohammad Taher Pilehvar +https://arxiv.org/abs/1805.04032|arxiv_author|Jose Camacho-Collados +https://arxiv.org/abs/1805.04032|comment|"Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). + +Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. + +two main branches of sense representation : + +- unsupervised +- knowledge-based" +https://arxiv.org/abs/1805.04032|title|[1805.04032] From Word to Sense Embeddings: A Survey on Vector Representations of Meaning +https://arxiv.org/abs/1805.04032|creationTime|2018-05-30T23:44:56Z +https://arxiv.org/abs/1805.04032|arxiv_summary|"Over the past years, distributed semantic representations have proved to be +effective and flexible keepers of prior knowledge to be integrated into +downstream applications. This survey focuses on the representation of meaning. +We start from the theoretical background behind word vector space models and +highlight one of their major limitations: the meaning conflation deficiency, +which arises from representing a word with all its possible meanings as a +single vector. Then, we explain how this deficiency can be addressed through a +transition from the word level to the more fine-grained level of word senses +(in its broader acceptation) as a method for modelling unambiguous lexical +meaning. We present a comprehensive overview of the wide range of techniques in +the two main branches of sense representation, i.e., unsupervised and +knowledge-based. Finally, this survey covers the main evaluation procedures and +applications for this type of representation, and provides an analysis of four +of its important aspects: interpretability, sense granularity, adaptability to +different domains and compositionality." +https://arxiv.org/abs/1805.04032|arxiv_firstAuthor|Jose Camacho-Collados +https://arxiv.org/abs/1805.04032|arxiv_updated|2018-10-26T09:34:36Z +https://arxiv.org/abs/1805.04032|arxiv_title|From Word to Sense Embeddings: A Survey on Vector Representations of Meaning +https://arxiv.org/abs/1805.04032|arxiv_published|2018-05-10T15:56:48Z +https://arxiv.org/abs/1805.04032|arxiv_num|1805.04032 +http://www.mkbergman.com/?p=370|creationDate|2007-11-14 +http://www.mkbergman.com/?p=370|tag|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.mkbergman.com/?p=370|tag|http://www.semanlink.net/tag/linked_data +http://www.mkbergman.com/?p=370|title|The Encyclopedia of Life and Linking Open Data » AI3:::Adaptive Information +http://www.mkbergman.com/?p=370|creationTime|2007-11-14T14:09:25Z +http://www.ldodds.com/blog/archives/000330.html|creationDate|2008-04-14 +http://www.ldodds.com/blog/archives/000330.html|tag|http://www.semanlink.net/tag/google_app_engine +http://www.ldodds.com/blog/archives/000330.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000330.html|title|Lost Boy: Google AppEngine for Personal Web Presence? +http://www.ldodds.com/blog/archives/000330.html|creationTime|2008-04-14T14:22:30Z +http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey|creationDate|2017-04-11 +http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey|tag|http://www.semanlink.net/tag/jersey_cache_control +http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey|title|http - EntityTag - Value, Caching, Comparison - how to in Jersey - Stack Overflow +http://stackoverflow.com/questions/11138215/entitytag-value-caching-comparison-how-to-in-jersey|creationTime|2017-04-11T17:16:55Z +https://pytorch.org/tutorials/beginner/nn_tutorial.html|creationDate|2019-01-16 +https://pytorch.org/tutorials/beginner/nn_tutorial.html|tag|http://www.semanlink.net/tag/tutorial +https://pytorch.org/tutorials/beginner/nn_tutorial.html|tag|http://www.semanlink.net/tag/pytorch +https://pytorch.org/tutorials/beginner/nn_tutorial.html|tag|http://www.semanlink.net/tag/jeremy_howard +https://pytorch.org/tutorials/beginner/nn_tutorial.html|title|What is torch.nn really? — PyTorch Tutorials 1.0.0 +https://pytorch.org/tutorials/beginner/nn_tutorial.html|creationTime|2019-01-16T22:21:35Z +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|creationDate|2009-11-12 +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|tag|http://www.semanlink.net/tag/rules_language +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|tag|http://www.semanlink.net/tag/sparql_construct +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|title|A rules language for RDF - bobdc.blog +http://www.snee.com/bobdc.blog/2009/10/a-rules-language-for-rdf.html|creationTime|2009-11-12T13:47:03Z +http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_|creationDate|2019-05-28 +http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_|tag|http://www.semanlink.net/tag/pocketsphinx +http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_|title|Codes of Interest: Easy Speech Recognition in Python with PyAudio and Pocketsphinx +http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_|bookmarkOf|https://www.codesofinterest.com/2017/03/python-speech-recognition-pocketsphinx.html +http://www.semanlink.net/doc/2019/05/codes_of_interest_easy_speech_|creationTime|2019-05-28T22:57:38Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|creationDate|2019-04-19 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|tag|http://www.semanlink.net/tag/active_learning +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|tag|http://www.semanlink.net/tag/survey +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|comment|"The key idea behind active learning is that a machine learning algorithm can achieve greater accuracy with fewer labeled training instances if it is allowed to choose the data from which is learns +" +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|title|Active learning literature survey (2010) +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.167.4245|creationTime|2019-04-19T16:57:41Z +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|creationDate|2011-11-06 +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|tag|http://www.semanlink.net/tag/drogues +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|tag|http://www.semanlink.net/tag/anonymous +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|tag|http://www.semanlink.net/tag/mexique +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|title|Au Mexique, Anonymous fait fléchir un cartel de la drogue +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|creationTime|2011-11-06T16:28:44Z +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|source|Le Monde +http://www.lemonde.fr/ameriques/article/2011/11/05/au-mexique-anonymous-fait-flechir-un-cartel-de-la-drogue_1599427_3222.html|date|2011-11-06 +https://aclanthology.info/papers/D18-1360/d18-1360|creationDate|2019-02-09 +https://aclanthology.info/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +https://aclanthology.info/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://aclanthology.info/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/knowledge_graph_construction +https://aclanthology.info/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.info/papers/D18-1360/d18-1360|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +https://aclanthology.info/papers/D18-1360/d18-1360|comment|"Attempting to answer questions such as: ""What is the task described in this paper?"", ""what method was used in solving the task?"", ""what dataset did the paper use?"". The multi-task setup reduces cascading errors between tasks and leverages cross-sentence relations through coreference links." +https://aclanthology.info/papers/D18-1360/d18-1360|title|Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction - ACL Anthology +https://aclanthology.info/papers/D18-1360/d18-1360|creationTime|2019-02-09T11:28:06Z +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|creationDate|2018-09-15 +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|tag|http://www.semanlink.net/tag/lehman_brothers +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|tag|http://www.semanlink.net/tag/crise_des_subprimes +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|tag|http://www.semanlink.net/tag/crise_financiere +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|title|Dix ans après Lehman Brothers : en attendant la prochaine crise +https://www.lemonde.fr/idees/article/2018/09/15/dix-ans-apres-lehman-brothers-en-attendant-la-prochaine-crise_5355537_3232.html|creationTime|2018-09-15T16:41:51Z +http://www.talis.com/platform/demos/|creationDate|2010-08-25 +http://www.talis.com/platform/demos/|tag|http://www.semanlink.net/tag/talis_platform +http://www.talis.com/platform/demos/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.talis.com/platform/demos/|tag|http://www.semanlink.net/tag/linked_data_demo +http://www.talis.com/platform/demos/|tag|http://www.semanlink.net/tag/bbc +http://www.talis.com/platform/demos/|title|Talis Platform - Demos +http://www.talis.com/platform/demos/|creationTime|2010-08-25T11:57:15Z +http://www.economist.com/news/obituary/21569674-aaron-swartz-computer-programmer-and-activist-committed-suicide-january-11th-aged-26-aaron?frsc=dg%7Cb|creationDate|2013-01-18 +http://www.economist.com/news/obituary/21569674-aaron-swartz-computer-programmer-and-activist-committed-suicide-january-11th-aged-26-aaron?frsc=dg%7Cb|tag|http://www.semanlink.net/tag/aaron_swartz +http://www.economist.com/news/obituary/21569674-aaron-swartz-computer-programmer-and-activist-committed-suicide-january-11th-aged-26-aaron?frsc=dg%7Cb|title|Aaron Swartz The Economist +http://www.economist.com/news/obituary/21569674-aaron-swartz-computer-programmer-and-activist-committed-suicide-january-11th-aged-26-aaron?frsc=dg%7Cb|creationTime|2013-01-18T00:55:19Z +http://protegewiki.stanford.edu/wiki/OWLViz|creationDate|2011-06-08 +http://protegewiki.stanford.edu/wiki/OWLViz|tag|http://www.semanlink.net/tag/graphviz +http://protegewiki.stanford.edu/wiki/OWLViz|tag|http://www.semanlink.net/tag/protege +http://protegewiki.stanford.edu/wiki/OWLViz|tag|http://www.semanlink.net/tag/notes_d_install +http://protegewiki.stanford.edu/wiki/OWLViz|comment|"installé dans: /usr/local/graphviz-2.14/ !!! +" +http://protegewiki.stanford.edu/wiki/OWLViz|title|Installing Graphviz for protegé +http://protegewiki.stanford.edu/wiki/OWLViz|creationTime|2011-06-08T16:45:58Z +http://pt.wikipedia.org/wiki/Parque_Nacional_Serra_da_Capivara|creationDate|2008-10-26 +http://pt.wikipedia.org/wiki/Parque_Nacional_Serra_da_Capivara|tag|http://www.semanlink.net/tag/first_americans +http://pt.wikipedia.org/wiki/Parque_Nacional_Serra_da_Capivara|title|Parque Nacional Serra da Capivara - Wikipédia, a enciclopédia livre +http://pt.wikipedia.org/wiki/Parque_Nacional_Serra_da_Capivara|creationTime|2008-10-26T23:38:45Z +http://www.dajobe.org/2005/04-sparql/|creationDate|2008-04-08 +http://www.dajobe.org/2005/04-sparql/|tag|http://www.semanlink.net/tag/david_beckett +http://www.dajobe.org/2005/04-sparql/|tag|http://www.semanlink.net/tag/sparql +http://www.dajobe.org/2005/04-sparql/|title|SPARQL Reference Card +http://www.dajobe.org/2005/04-sparql/|creationTime|2008-04-08T11:59:41Z +http://owlgred.lumii.lv/online_visualization/5dq2|creationDate|2015-01-30 +http://owlgred.lumii.lv/online_visualization/5dq2|tag|http://www.semanlink.net/tag/ontologie_visualization +http://owlgred.lumii.lv/online_visualization/5dq2|tag|http://www.semanlink.net/tag/configuration_ontology +http://owlgred.lumii.lv/online_visualization/5dq2|title|Online Ontology Visualization · OWLGrEd: ConfigurationOntology +http://owlgred.lumii.lv/online_visualization/5dq2|creationTime|2015-01-30T07:27:48Z +http://www.w3.org/2001/sw/SW-FAQ|creationDate|2007-04-25 +http://www.w3.org/2001/sw/SW-FAQ|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/SW-FAQ|tag|http://www.semanlink.net/tag/faq +http://www.w3.org/2001/sw/SW-FAQ|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.w3.org/2001/sw/SW-FAQ|title|W3C Semantic Web FAQ +http://www.w3.org/2001/sw/SW-FAQ|creationTime|2007-04-25T15:24:41Z +http://www.macports.org/|creationDate|2010-05-26 +http://www.macports.org/|tag|http://www.semanlink.net/tag/mac_software +http://www.macports.org/|tag|http://www.semanlink.net/tag/open_source +http://www.macports.org/|tag|http://www.semanlink.net/tag/mac_os_x +http://www.macports.org/|comment|"open-source community initiative to design an easy-to-use system for compiling, installing, + and upgrading either command-line, X11 or Aqua based open-source software on the + Mac OS X operating system" +http://www.macports.org/|title|macports +http://www.macports.org/|creationTime|2010-05-26T15:15:51Z +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|creationDate|2005-06-15 +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|tag|http://www.semanlink.net/tag/multimedia +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|tag|http://www.semanlink.net/tag/semantic_web +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|tag|http://www.semanlink.net/tag/google +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|comment|The following projects are proposed as Open Source in the Google's Summer of Code initiative. +http://semanticweb.deit.univpm.it/tiki-index.php?page=ProjectProposalPage|title|SEMEDIA Semantic Web and Multimedia Group +http://www.marco.org/2013/07/03/lockdown|creationDate|2013-07-04 +http://www.marco.org/2013/07/03/lockdown|tag|http://www.semanlink.net/tag/google +http://www.marco.org/2013/07/03/lockdown|tag|http://www.semanlink.net/tag/googleplus +http://www.marco.org/2013/07/03/lockdown|tag|http://www.semanlink.net/tag/facebook +http://www.marco.org/2013/07/03/lockdown|tag|http://www.semanlink.net/tag/rss +http://www.marco.org/2013/07/03/lockdown|comment|Google Reader is just the latest casualty of the war that Facebook started, seemingly accidentally: the battle to own everything. +http://www.marco.org/2013/07/03/lockdown|title|Lockdown – Marco.org +http://www.marco.org/2013/07/03/lockdown|creationTime|2013-07-04T14:03:05Z +https://arxiv.org/abs/1804.01486|creationDate|2018-04-14 +https://arxiv.org/abs/1804.01486|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1804.01486|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1804.01486|tag|http://www.semanlink.net/tag/medical_data +https://arxiv.org/abs/1804.01486|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://arxiv.org/abs/1804.01486|arxiv_author|Benjamin Kompa +https://arxiv.org/abs/1804.01486|arxiv_author|Griffin Weber +https://arxiv.org/abs/1804.01486|arxiv_author|Xu Shi +https://arxiv.org/abs/1804.01486|arxiv_author|Nathan P. Palmer +https://arxiv.org/abs/1804.01486|arxiv_author|Tianxi Cai +https://arxiv.org/abs/1804.01486|arxiv_author|Inbar Fried +https://arxiv.org/abs/1804.01486|arxiv_author|Isaac S. Kohane +https://arxiv.org/abs/1804.01486|arxiv_author|Allen Schmaltz +https://arxiv.org/abs/1804.01486|arxiv_author|Andrew L. Beam +https://arxiv.org/abs/1804.01486|title|[1804.01486] Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data +https://arxiv.org/abs/1804.01486|creationTime|2018-04-14T11:10:40Z +https://arxiv.org/abs/1804.01486|arxiv_summary|"Word embeddings are a popular approach to unsupervised learning of word +relationships that are widely used in natural language processing. In this +article, we present a new set of embeddings for medical concepts learned using +an extremely large collection of multimodal medical data. Leaning on recent +theoretical insights, we demonstrate how an insurance claims database of 60 +million members, a collection of 20 million clinical notes, and 1.7 million +full text biomedical journal articles can be combined to embed concepts into a +common space, resulting in the largest ever set of embeddings for 108,477 +medical concepts. To evaluate our approach, we present a new benchmark +methodology based on statistical power specifically designed to test embeddings +of medical concepts. Our approach, called cui2vec, attains state-of-the-art +performance relative to previous methods in most instances. Finally, we provide +a downloadable set of pre-trained embeddings for other researchers to use, as +well as an online tool for interactive exploration of the cui2vec embeddings" +https://arxiv.org/abs/1804.01486|arxiv_firstAuthor|Andrew L. Beam +https://arxiv.org/abs/1804.01486|arxiv_updated|2019-08-20T00:32:33Z +https://arxiv.org/abs/1804.01486|arxiv_title|Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data +https://arxiv.org/abs/1804.01486|arxiv_published|2018-04-04T16:02:54Z +https://arxiv.org/abs/1804.01486|arxiv_num|1804.01486 +https://github.com/openlink/rdf-editor|creationDate|2016-01-15 +https://github.com/openlink/rdf-editor|tag|http://www.semanlink.net/tag/rdf_editor +https://github.com/openlink/rdf-editor|tag|http://www.semanlink.net/tag/openlink +https://github.com/openlink/rdf-editor|comment|The OpenLink RDF Editor enables editing of RDF documents (in TURTLE notation) stored in a variety of HTTP accessible documents. Actual document access requires the target document is served from a system that supports at least one of the following open standards: Linked Data Platform (LDP), WebDAV, SPARQL 1.1 Update, or the SPARQL Graph Protocol. +https://github.com/openlink/rdf-editor|title|openlink/rdf-editor: The OpenLink RDF Edito... - GitHub +https://github.com/openlink/rdf-editor|creationTime|2016-01-15T10:46:13Z +http://www.cross-browser.com/x/lib/view.php|creationDate|2005-10-19 +http://www.cross-browser.com/x/lib/view.php|tag|http://www.semanlink.net/tag/sample_code +http://www.cross-browser.com/x/lib/view.php|tag|http://www.semanlink.net/tag/ajax +http://www.cross-browser.com/x/lib/view.php|tag|http://www.semanlink.net/tag/javascript +http://www.cross-browser.com/x/lib/view.php|title|Cross-Browser.com - XV: X Library Viewer +http://www.jasons-toolbox.com/SlightlyThickerBox/|creationDate|2006-07-07 +http://www.jasons-toolbox.com/SlightlyThickerBox/|tag|http://www.semanlink.net/tag/ajax +http://www.jasons-toolbox.com/SlightlyThickerBox/|comment|"Image galleries in Ajax +" +http://www.jasons-toolbox.com/SlightlyThickerBox/|title|Slightly ThickerBox +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|creationDate|2018-06-03 +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|tag|http://www.semanlink.net/tag/immigration +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|tag|http://www.semanlink.net/tag/voyager +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|tag|http://www.semanlink.net/tag/etat_policier +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|tag|http://www.semanlink.net/tag/ca_craint +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|tag|http://www.semanlink.net/tag/sahara +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|title|Au Sahara, voyager devient un crime +https://www.lemonde.fr/afrique/article/2018/06/01/au-sahara-voyager-devient-un-crime_5308325_3212.html|creationTime|2018-06-03T15:11:20Z +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|creationDate|2009-06-14 +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|tag|http://www.semanlink.net/tag/boube_gado +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|tag|http://www.semanlink.net/tag/jerma +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|tag|http://www.semanlink.net/tag/histoire_du_niger +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|title|Boubé Gado, Le Zarmatarey, Contribution à l'histoire des populations d'entre Niger et Dallol Mawri +http://www.persee.fr/web/revues/home/prescript/article/jafr_0399-0346_1980_num_50_1_2000_t1_0152_0000_2|creationTime|2009-06-14T18:55:42Z +http://search.datao.net/|creationDate|2014-11-08 +http://search.datao.net/|tag|http://www.semanlink.net/tag/olivier_rossel +http://search.datao.net/|tag|http://www.semanlink.net/tag/datao +http://search.datao.net/|tag|http://www.semanlink.net/tag/data_web +http://search.datao.net/|comment|We index alive endpoints of the LinkedData +http://search.datao.net/|title|search.datao.net +http://search.datao.net/|creationTime|2014-11-08T07:37:28Z +http://www.courrierinternational.com/article/2014/04/10/le-nouveau-rwanda|creationDate|2014-05-02 +http://www.courrierinternational.com/article/2014/04/10/le-nouveau-rwanda|tag|http://www.semanlink.net/tag/rwanda +http://www.courrierinternational.com/article/2014/04/10/le-nouveau-rwanda|title|"""Le nouveau Rwanda"" Courrier international" +http://www.courrierinternational.com/article/2014/04/10/le-nouveau-rwanda|creationTime|2014-05-02T19:15:52Z +http://www.dglflf.culture.gouv.fr/Actualites/Programme_Semanticpedia.pdf|creationDate|2012-11-19 +http://www.dglflf.culture.gouv.fr/Actualites/Programme_Semanticpedia.pdf|tag|http://www.semanlink.net/tag/semanticpedia +http://www.dglflf.culture.gouv.fr/Actualites/Programme_Semanticpedia.pdf|title|Présentation SémanticPedia +http://www.dglflf.culture.gouv.fr/Actualites/Programme_Semanticpedia.pdf|creationTime|2012-11-19T14:16:17Z +http://code.google.com/p/lmf/wiki/PrinciplesLinkedMedia|creationDate|2012-07-04 +http://code.google.com/p/lmf/wiki/PrinciplesLinkedMedia|tag|http://www.semanlink.net/tag/linked_media_framework +http://code.google.com/p/lmf/wiki/PrinciplesLinkedMedia|title|PrinciplesLinkedMedia - lmf - The concepts behind Linked Media and how it extends Linked Data +http://code.google.com/p/lmf/wiki/PrinciplesLinkedMedia|creationTime|2012-07-04T23:59:52Z +https://arxiv.org/abs/1901.02860|creationDate|2019-01-11 +https://arxiv.org/abs/1901.02860|tag|http://www.semanlink.net/tag/acl_2019 +https://arxiv.org/abs/1901.02860|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://arxiv.org/abs/1901.02860|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://arxiv.org/abs/1901.02860|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1901.02860|arxiv_author|Zhilin Yang +https://arxiv.org/abs/1901.02860|arxiv_author|Jaime Carbonell +https://arxiv.org/abs/1901.02860|arxiv_author|Zihang Dai +https://arxiv.org/abs/1901.02860|arxiv_author|Ruslan Salakhutdinov +https://arxiv.org/abs/1901.02860|arxiv_author|Quoc V. Le +https://arxiv.org/abs/1901.02860|arxiv_author|Yiming Yang +https://arxiv.org/abs/1901.02860|title|[1901.02860] Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context +https://arxiv.org/abs/1901.02860|creationTime|2019-01-11T17:32:14Z +https://arxiv.org/abs/1901.02860|arxiv_summary|"Transformers have a potential of learning longer-term dependency, but are +limited by a fixed-length context in the setting of language modeling. We +propose a novel neural architecture Transformer-XL that enables learning +dependency beyond a fixed length without disrupting temporal coherence. It +consists of a segment-level recurrence mechanism and a novel positional +encoding scheme. Our method not only enables capturing longer-term dependency, +but also resolves the context fragmentation problem. As a result, +Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer +than vanilla Transformers, achieves better performance on both short and long +sequences, and is up to 1,800+ times faster than vanilla Transformers during +evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity +to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion +Word, and 54.5 on Penn Treebank (without finetuning). When trained only on +WikiText-103, Transformer-XL manages to generate reasonably coherent, novel +text articles with thousands of tokens. Our code, pretrained models, and +hyperparameters are available in both Tensorflow and PyTorch." +https://arxiv.org/abs/1901.02860|arxiv_firstAuthor|Zihang Dai +https://arxiv.org/abs/1901.02860|arxiv_updated|2019-06-02T21:21:48Z +https://arxiv.org/abs/1901.02860|arxiv_title|Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context +https://arxiv.org/abs/1901.02860|arxiv_published|2019-01-09T18:28:19Z +https://arxiv.org/abs/1901.02860|arxiv_num|1901.02860 +http://www.tela-botanica.org/|creationDate|2008-06-30 +http://www.tela-botanica.org/|tag|http://www.semanlink.net/tag/botanique +http://www.tela-botanica.org/|title|Tela Botanica - Le réseau de la botanique francophone +http://www.tela-botanica.org/|creationTime|2008-06-30T21:47:09Z +https://sagascience.com/jeanrouch/|creationDate|2017-08-23 +https://sagascience.com/jeanrouch/|tag|http://www.semanlink.net/tag/jean_rouch +https://sagascience.com/jeanrouch/|title|Sagascience - Jean Rouch L’ethnologue-cinéaste +https://sagascience.com/jeanrouch/|creationTime|2017-08-23T12:51:32Z +https://sagascience.com/jeanrouch/|source|https://sagascience.com/jeanrouch +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|creationDate|2017-05-24 +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/python_sample_code +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/nlp_sample_code +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/python_nlp +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/shallow_parsing_chunking +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|tag|http://www.semanlink.net/tag/nltk +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|title|Extract Subject Matter of Documents Using NLP – Alexander Crosson – Medium +https://medium.com/@acrosson/extract-subject-matter-of-documents-using-nlp-e284c1c61824|creationTime|2017-05-24T17:32:42Z +http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem|creationDate|2013-12-16 +http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem|tag|http://www.semanlink.net/tag/bitcoin +http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem|tag|http://www.semanlink.net/tag/byzantine_fault_tolerance +http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem|title|cdixon tumblr, Bitcoin and the Byzantine Generals Problem +http://nonchalantrepreneur.com/post/70130104170/bitcoin-and-the-byzantine-generals-problem|creationTime|2013-12-16T11:15:52Z +http://developer.apple.com/internet/webcontent/xmlhttpreq.html|creationDate|2005-05-14 +http://developer.apple.com/internet/webcontent/xmlhttpreq.html|tag|http://www.semanlink.net/tag/ajax +http://developer.apple.com/internet/webcontent/xmlhttpreq.html|tag|http://www.semanlink.net/tag/apple_developer_connection +http://developer.apple.com/internet/webcontent/xmlhttpreq.html|title|Dynamic HTML and XML: The XMLHttpRequest Object +http://tech.groups.yahoo.com/group/jena-dev/message/35751|creationDate|2008-07-14 +http://tech.groups.yahoo.com/group/jena-dev/message/35751|tag|http://www.semanlink.net/tag/dev_tips +http://tech.groups.yahoo.com/group/jena-dev/message/35751|tag|http://www.semanlink.net/tag/arq_property_functions +http://tech.groups.yahoo.com/group/jena-dev/message/35751|tag|http://www.semanlink.net/tag/andy_seaborne +http://tech.groups.yahoo.com/group/jena-dev/message/35751|tag|http://www.semanlink.net/tag/fps_post +http://tech.groups.yahoo.com/group/jena-dev/message/35751|comment|"when handling a describe query with ARQ, is it possible to add statements to the returned RDF, based on a ""magic property""? (Yes, it is)" +http://tech.groups.yahoo.com/group/jena-dev/message/35751|title|ARQ: a question about property functions +http://tech.groups.yahoo.com/group/jena-dev/message/35751|creationTime|2008-07-14T14:12:03Z +https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585|creationDate|2019-03-06 +https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585|tag|http://www.semanlink.net/tag/neo4j +https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585|tag|http://www.semanlink.net/tag/publication_scientifique +https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585|title|Finding experts in GrapAL – Semantic Scholar +https://blog.semanticscholar.org/finding-experts-in-grapal-b97ef280e585|creationTime|2019-03-06T20:24:44Z +http://videolectures.net/|creationDate|2010-01-08 +http://videolectures.net/|tag|http://www.semanlink.net/tag/video +http://videolectures.net/|title|VideoLectures - exchange ideas & share knowledge +http://videolectures.net/|creationTime|2010-01-08T03:38:43Z +http://www.ivan-herman.net/WebLog/WorkRelated/SemanticWeb/ursw06.html|creationDate|2006-11-07 +http://www.ivan-herman.net/WebLog/WorkRelated/SemanticWeb/ursw06.html|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.ivan-herman.net/WebLog/WorkRelated/SemanticWeb/ursw06.html|title|Ivan's Blog : Workshop on Uncertainty Reasoning on the SW +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|creationDate|2013-12-16 +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|tag|http://www.semanlink.net/tag/college +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|tag|http://www.semanlink.net/tag/erudition +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|tag|http://www.semanlink.net/tag/grece_antique +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|title|Erudition, piège à cons ? Alchimie du collège +http://maragoyet.blog.lemonde.fr/2013/12/13/erudition-piege-a-cons/|creationTime|2013-12-16T15:34:56Z +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|creationDate|2010-03-07 +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|tag|http://www.semanlink.net/tag/union_europeenne +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|comment|Déni de démocratie, volonté de se plier aux desiderata des industriels, mépris implicite d'une agriculture qui ne serait pas productiviste : voilà ce qu'est cette décision. Mais il y a plus grave encore, qui touche à l'idéal européen lui-même. Pour continuer à imposer les OGM, la Commission veut que chaque Etat puisse choisir s'il les autorise ou pas chez lui. Cela revient à diviser l'Union, à la laisser aller à hue et à dia sur une question éminemment importante, qui n'est pas de simple convenance nationale, mais implique une politique agricole commune. +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|title|Mal à l'Europe - Chronique Ecologie, Hervé Kempf +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|creationTime|2010-03-07T22:53:47Z +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2010/03/06/mal-a-l-europe-par-herve-kempf_1315314_3232.html|date|2010-03-07 +http://www.africatime.com/niger/nouvelle.asp?no_nouvelle=200085&no_categorie=2|creationDate|2007-08-21 +http://www.africatime.com/niger/nouvelle.asp?no_nouvelle=200085&no_categorie=2|tag|http://www.semanlink.net/tag/zinder_alimentation_en_eau +http://www.africatime.com/niger/nouvelle.asp?no_nouvelle=200085&no_categorie=2|title|Fin du problème d'eau de la ville de Zinder (juin 2005) Souvenirs d'un enfant du Damagaram +http://www.africatime.com/niger/nouvelle.asp?no_nouvelle=200085&no_categorie=2|creationTime|2007-08-21T23:49:24Z +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|creationDate|2011-12-17 +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/data_web +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/provocative_idea +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/rant +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/richard_cyganiak +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|comment|"The web of data will succeed *because* it conflates a thing and a web page about the thing. + +[TimBL's answer](https://lists.w3.org/Archives/Public/public-lod/2011Jun/0262.html)" +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|title|Re: Squaring the HTTP-range-14 circle +http://lists.w3.org/Archives/Public/public-lod/2011Jun/0186.html|creationTime|2011-12-17T15:38:05Z +http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html|creationDate|2012-08-06 +http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html|tag|http://www.semanlink.net/tag/graph_visualization +http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html|title|visualization of skos thesauri (public-esw-thes@w3.org from July 2012) +http://lists.w3.org/Archives/Public/public-esw-thes/2012Jul/0007.html|creationTime|2012-08-06T00:12:36Z +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|creationDate|2012-10-15 +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|tag|http://www.semanlink.net/tag/google_rich_snippets +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|tag|http://www.semanlink.net/tag/wiki +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|tag|http://www.semanlink.net/tag/goodrelations +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|title|Why is Google not showing rich snippets for my pages? - GoodRelations Wiki +http://wiki.goodrelations-vocabulary.org/FFAQ#Why_is_Google_not_showing_rich_snippets_for_my_pages.3F|creationTime|2012-10-15T13:48:21Z +http://blogs.sun.com/roller/page/searchguy/20050610|creationDate|2005-06-15 +http://blogs.sun.com/roller/page/searchguy/20050610|tag|http://www.semanlink.net/tag/taxonomies +http://blogs.sun.com/roller/page/searchguy/20050610|title|Automatically building semantic taxonomies The Search Guy Weblog +https://arxiv.org/abs/1810.09164|creationDate|2019-04-26 +https://arxiv.org/abs/1810.09164|tag|http://www.semanlink.net/tag/nlp_short_texts +https://arxiv.org/abs/1810.09164|tag|http://www.semanlink.net/tag/entity_linking +https://arxiv.org/abs/1810.09164|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1810.09164|tag|http://www.semanlink.net/tag/wikidata +https://arxiv.org/abs/1810.09164|arxiv_author|Mohammad Akbari +https://arxiv.org/abs/1810.09164|arxiv_author|Andrew D. O'Harney +https://arxiv.org/abs/1810.09164|arxiv_author|Marc Sloan +https://arxiv.org/abs/1810.09164|arxiv_author|Stefano Bragaglia +https://arxiv.org/abs/1810.09164|arxiv_author|Alberto Cetoli +https://arxiv.org/abs/1810.09164|comment|"Evaluation of different deep learning **techniques to create +a context vector from graphs, aimed at high-accuracy NED**. (neural +approach for entity disambiguation using graphs as background +knowledge) + +> We tackle Named Entity Disambiguation (NED) by comparing entities +in short sentences with Wikidata graphs. Creating a context vector +from graphs through deep learning is a challenging problem that has +never been applied to NED. Our main contribution is to present an +experimental study of recent neural techniques, as well as a discussion +about which graph features are most important for the disambiguation +task... + +[published paper](https://rd.springer.com/chapter/10.1007/978-3-030-15719-7_10) + + +In NED, the system +must be able to generate a context for an entity in a text and an entity +in a knowledge base, then correctly link the two. + +Explore whether representing graphs +as triplets is more useful than using the full topological information of the graph +" +https://arxiv.org/abs/1810.09164|title|[1810.09164] Named Entity Disambiguation using Deep Learning on Graphs +https://arxiv.org/abs/1810.09164|creationTime|2019-04-26T17:37:17Z +https://arxiv.org/abs/1810.09164|arxiv_summary|"We tackle \ac{NED} by comparing entities in short sentences with \wikidata{} +graphs. Creating a context vector from graphs through deep learning is a +challenging problem that has never been applied to \ac{NED}. Our main +contribution is to present an experimental study of recent neural techniques, +as well as a discussion about which graph features are most important for the +disambiguation task. In addition, a new dataset (\wikidatadisamb{}) is created +to allow a clean and scalable evaluation of \ac{NED} with \wikidata{} entries, +and to be used as a reference in future research. In the end our results show +that a \ac{Bi-LSTM} encoding of the graph triplets performs best, improving +upon the baseline models and scoring an \rm{F1} value of $91.6\%$ on the +\wikidatadisamb{} test set" +https://arxiv.org/abs/1810.09164|arxiv_firstAuthor|Alberto Cetoli +https://arxiv.org/abs/1810.09164|arxiv_updated|2018-10-22T10:16:07Z +https://arxiv.org/abs/1810.09164|arxiv_title|Named Entity Disambiguation using Deep Learning on Graphs +https://arxiv.org/abs/1810.09164|arxiv_published|2018-10-22T10:16:07Z +https://arxiv.org/abs/1810.09164|arxiv_num|1810.09164 +https://www2018.thewebconf.org/program/web-content-analysis/|creationDate|2018-01-27 +https://www2018.thewebconf.org/program/web-content-analysis/|tag|http://www.semanlink.net/tag/nlp_hierarchical_text_classification +https://www2018.thewebconf.org/program/web-content-analysis/|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://www2018.thewebconf.org/program/web-content-analysis/|tag|http://www.semanlink.net/tag/word_embedding +https://www2018.thewebconf.org/program/web-content-analysis/|tag|http://www.semanlink.net/tag/topic_modeling_over_short_texts +https://www2018.thewebconf.org/program/web-content-analysis/|comment|"[CFP](https://www2018.thewebconf.org/call-for-papers/research-tracks-cfp/web-content-analysis/) + +> In previous years, ‘content analysis’ and ‘semantic and knowledge’ were in separate track. This year, we combined these tracks to emphasize the close relationship between these topics; **the use of content to curate knowledge and the use of knowledge to guide content analysis and intelligent usage**. + +Some of the accepted papers: +### [Large-Scale Hierarchical Text Classification with Recursively Regularized Deep Graph-CNN](https://doi.org/10.1145/3178876.3186005) + +[Hierarchical Text Classification](/tag/nlp_hierarchical_text_classification): Text classification to a hierarchical taxonomy of topics, using graph representation of text, and CNN over this graph + +Renvoie à ce qui a été vu dans le tutorial ""Graph-based Text Representations"" + +from the abstract: + +> a graph-CNN based deep learning model to first convert texts to graph-of-words, and then use graph convolution operations to convolve the word graph. Graph-of-words representation of texts has the advantage of capturing non-consecutive and long-distance semantics. CNN models have the advantage of learning different level of semantics. To further leverage the hierarchy of labels, we regularize the deep architecture with the dependency among labels + +Conversion of text to graph: potentially given a single document + +### [Weakly-supervised Relation Extraction by Pattern-enhanced Embedding Learning](https://doi.org/10.1145/3178876.3186024 ) + +Extraction de relations de corpus de textes de façon semi-supervisée, dans un contexte où on a peu de données labellisées décrivant les relations. + +Par exemple, des données labellisées indique que le texte ""Beijing, capital of China"" correspond à la relation entre entités : (""Beijing"", ""Capital Of"", ""China), et on voudrait pouvoir extraire les entités et relations pertinentes à partir de texte tel que ""Paris, France's capital,..."" + +Le papier décrit une méthode qui combine deux modules, l'un basé sur l'extraction automatique de patterns (par ex ""[Head], Capital Of [Tail]"") et l'autre sur la ""sémantique distributionnelle"" (du type ""word embeddings""). Ces deux modules collaborent, le premier permettant de créer des instances de relations augmentant la base de connaissance sur lequel entrainer le second, et le second aidant le premier à déterminer des patterns informatifs (""co-entrainement"") + +### [Scalable Instance Reconstruction in Knowledge Bases via Relatedness Affiliated Embedding](https://doi.org/10.1145/3178876.3186017) + +Knowledge base completion problem: usually, it is formulated as a link prediction problem, but not here. A novel knowledge embedding model (""Joint Modelling and Learning of Relatedness and Embedding"") + +### [Improving Word Embedding Compositionality using Lexicographic Definitions](https://doi.org/10.1145/3178876.3186007) + +comment obtenir les meilleures représentations de texte à partir de représentations de mots (word embeddings) ? L'auteur utilise des ressources lexicographiques (wordnet) pour ses tests : l'embedding obtenu pour la définition d'un mot est-il proche de celui du mot ? + +Le papier s'appuie sur une [thèse du même auteur](https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf), claire et bien écrite. + +### [CESI: Canonicalizing Open Knowledge Bases using Embeddings and Side Information](https://doi.org/10.1145/3178876.3186030) + +Amélioration de l'extraction de triplets (nom phrase, property, nom phrase) à partir de texte en calculant des embeddings pour les ""nom phrases"" (~entités) + +### [Short-Text Topic Modeling via Non-negative Matrix Factorization Enriched with Local Word-Context Correlations](https://doi.org/10.1145/3178876.3186009) + +Topic modeling for short texts, leveraging the word-context semantic correlations in the training + +### [Towards Annotating Relational Data on the Web with Language Models](https://doi.org/10.1145/3178876.3186029) + +### A paper by [David Blei](/tag/david_blei): (Dynamic Embeddings for Language Evolution) + + +" +https://www2018.thewebconf.org/program/web-content-analysis/|relatedDoc|https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf +https://www2018.thewebconf.org/program/web-content-analysis/|relatedDoc|https://doi.org/10.1145/3178876.3186007 +https://www2018.thewebconf.org/program/web-content-analysis/|title|RESEARCH TRACK: Web Content Analysis, Semantics and Knowledge +https://www2018.thewebconf.org/program/web-content-analysis/|creationTime|2018-01-27T15:36:02Z +https://blog.twitter.com/2015/autograd-for-torch|creationDate|2015-11-07 +https://blog.twitter.com/2015/autograd-for-torch|tag|http://www.semanlink.net/tag/deep_learning +https://blog.twitter.com/2015/autograd-for-torch|tag|http://www.semanlink.net/tag/machine_learning_tool +https://blog.twitter.com/2015/autograd-for-torch|comment|new framework for simplifying deep learning research: autograd for Torch +https://blog.twitter.com/2015/autograd-for-torch|title|Autograd for Torch +https://blog.twitter.com/2015/autograd-for-torch|creationTime|2015-11-07T10:48:06Z +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|creationDate|2018-03-03 +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|tag|http://www.semanlink.net/tag/transductive_learning +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|tag|http://www.semanlink.net/tag/missing_labels_ml +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|tag|http://www.semanlink.net/tag/transductive_svm +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|title|Machine Learning with Missing Labels: Transductive SVMs +https://calculatedcontent.com/2014/09/23/machine-learning-with-missing-labels-transductive-svms/|creationTime|2018-03-03T14:27:07Z +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|creationDate|2019-04-17 +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|tag|http://www.semanlink.net/tag/speech_recognition +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|comment|"> You cannot change what google returns. You can only process the results. Fortunately, you can process the results to increase the chance of a match. For example, you could use a phonetic matching algorithm like Soundex + +[Related question](https://stackoverflow.com/questions/6103467/android-speech-recognition-append-dictionary/6105264#6105264)" +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|title|android - speech recognition reduce possible search results - Stack Overflow +https://stackoverflow.com/questions/8778409/speech-recognition-reduce-possible-search-results/8779478#8779478|creationTime|2019-04-17T00:04:35Z +http://www.atelier-francais.org/event/open-data-lexception-culturelle|creationDate|2011-09-10 +http://www.atelier-francais.org/event/open-data-lexception-culturelle|tag|http://www.semanlink.net/tag/open_data +http://www.atelier-francais.org/event/open-data-lexception-culturelle|tag|http://www.semanlink.net/tag/meetup_web_semantique +http://www.atelier-francais.org/event/open-data-lexception-culturelle|tag|http://www.semanlink.net/tag/linked_data +http://www.atelier-francais.org/event/open-data-lexception-culturelle|tag|http://www.semanlink.net/tag/patrimoine +http://www.atelier-francais.org/event/open-data-lexception-culturelle|title|Open data : l'exception culturelle? Atelier Français +http://www.atelier-francais.org/event/open-data-lexception-culturelle|creationTime|2011-09-10T22:48:30Z +http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html|creationDate|2012-09-10 +http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html|tag|http://www.semanlink.net/tag/configuration_ontology +http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html|tag|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html|title|COLD - Configuration ontology / LOV +http://lov.okfn.org/dataset/lov/details/vocabulary_cold.html|creationTime|2012-09-10T16:32:28Z +https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5|creationDate|2018-12-22 +https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5|tag|http://www.semanlink.net/tag/deep_learning +https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5|tag|http://www.semanlink.net/tag/time_series +https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5|title|A Radical New Neural Network Design Could Overcome Big Challenges in AI +https://medium.com/mit-technology-review/a-radical-new-neural-network-design-could-overcome-big-challenges-in-ai-56b6af3fe9a5|creationTime|2018-12-22T10:48:32Z +https://twitter.com/TensorFlow/status/1055538593941409792|creationDate|2018-10-26 +https://twitter.com/TensorFlow/status/1055538593941409792|tag|http://www.semanlink.net/tag/tensorflow +https://twitter.com/TensorFlow/status/1055538593941409792|tag|http://www.semanlink.net/tag/dev_tips +https://twitter.com/TensorFlow/status/1055538593941409792|title|TensorFlow: how to load and save models at every epoch so you never lose time or data. +https://twitter.com/TensorFlow/status/1055538593941409792|creationTime|2018-10-26T16:31:02Z +http://www.ldp4j.org/#/|creationDate|2014-07-21 +http://www.ldp4j.org/#/|tag|http://www.semanlink.net/tag/java_library +http://www.ldp4j.org/#/|tag|http://www.semanlink.net/tag/ldp_implementations +http://www.ldp4j.org/#/|comment|"Open-source Java-based framework for the development of read-write Linked Data applications based on the LDP 1.0 specification.
+Provides an extensible LDP Server and an extensible LDP Client
+How to + +" +http://www.ldp4j.org/#/|title|LDP4j +http://www.ldp4j.org/#/|creationTime|2014-07-21T02:40:30Z +http://images.math.cnrs.fr/|creationDate|2010-07-20 +http://images.math.cnrs.fr/|tag|http://www.semanlink.net/tag/mathematiques +http://images.math.cnrs.fr/|title|Images des mathématiques +http://images.math.cnrs.fr/|creationTime|2010-07-20T15:39:06Z +http://reference.sitepoint.com/javascript|creationDate|2010-05-27 +http://reference.sitepoint.com/javascript|tag|http://www.semanlink.net/tag/brouteur +http://reference.sitepoint.com/javascript|tag|http://www.semanlink.net/tag/javascript +http://reference.sitepoint.com/javascript|comment|Includes browser compatibility +http://reference.sitepoint.com/javascript|title|JavaScript Reference +http://reference.sitepoint.com/javascript|creationTime|2010-05-27T13:19:36Z +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|creationDate|2011-12-26 +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|tag|http://www.semanlink.net/tag/biologie +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|tag|http://www.semanlink.net/tag/physique +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|title|Mécanique du vivant : les physiciens récrivent la biologie +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|creationTime|2011-12-26T13:49:21Z +http://www.lemonde.fr/planete/article/2011/12/23/mecanique-du-vivant-les-physiciens-recrivent-la-biologie_1621701_3244.html#ens_id=1622560|source|Le Monde +http://fr.wikipedia.org/wiki/Acoustique_musicale#Param.C3.A8tres_du_sonore_et_attributs_du_musical|creationDate|2007-11-28 +http://fr.wikipedia.org/wiki/Acoustique_musicale#Param.C3.A8tres_du_sonore_et_attributs_du_musical|tag|http://www.semanlink.net/tag/acoustique_musicale +http://fr.wikipedia.org/wiki/Acoustique_musicale#Param.C3.A8tres_du_sonore_et_attributs_du_musical|title|Acoustique musicale - Wikipédia +http://fr.wikipedia.org/wiki/Acoustique_musicale#Param.C3.A8tres_du_sonore_et_attributs_du_musical|creationTime|2007-11-28T01:31:33Z +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|creationDate|2017-11-21 +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|tag|http://www.semanlink.net/tag/topic_modeling +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|tag|http://www.semanlink.net/tag/dimensionality_reduction +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|tag|http://www.semanlink.net/tag/word_embedding +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|comment|Usefulness of topic models and word embeddings for non-NLP tasks +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|title|DS Toolbox - Topic Models - DS lore +http://nadbordrozd.github.io/blog/2015/11/29/ds-toolbox-topic-models/|creationTime|2017-11-21T18:42:20Z +http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860|creationDate|2012-04-14 +http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860|tag|http://www.semanlink.net/tag/goodrelations +http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860|tag|http://www.semanlink.net/tag/google +http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860|title|Google Becomes Answer Engine With Semantic Technology − Great News For Retailers +http://searchengineland.com/google-becomes-answer-engine-with-semantic-technology-great-news-for-retailers-116860|creationTime|2012-04-14T12:00:36Z +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/|creationDate|2007-07-13 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/|tag|http://www.semanlink.net/tag/owled_2007 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/|title|CEUR-WS.org/Vol-258 - OWL: Experiences and Directions 2007 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/|creationTime|2007-07-13T18:44:04Z +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|creationDate|2007-11-10 +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|tag|http://www.semanlink.net/tag/eclipse +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|tag|http://www.semanlink.net/tag/bug +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|tag|http://www.semanlink.net/tag/wtp +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|comment|"""duplicate classpath entry"" supposed to be resolved +" +https://bugs.eclipse.org/bugs/show_bug.cgi?id=139241|creationTime|2007-11-10T03:40:09Z +http://www.notube.tv/|creationDate|2010-05-14 +http://www.notube.tv/|tag|http://www.semanlink.net/tag/television +http://www.notube.tv/|tag|http://www.semanlink.net/tag/hypiosvocampparismay2010 +http://www.notube.tv/|title|NoTube semantic television project - making television more personal +http://www.notube.tv/|creationTime|2010-05-14T10:01:38Z +http://simia.net/wiki/How_much_information_is_in_a_language%3F|creationDate|2019-04-16 +http://simia.net/wiki/How_much_information_is_in_a_language%3F|tag|http://www.semanlink.net/tag/how_much_information_in_a_language +http://simia.net/wiki/How_much_information_is_in_a_language%3F|tag|http://www.semanlink.net/tag/denny_vrandecic +http://simia.net/wiki/How_much_information_is_in_a_language%3F|title|How much information is in a language? - Simia +http://simia.net/wiki/How_much_information_is_in_a_language%3F|creationTime|2019-04-16T18:21:32Z +http://2007.xtech.org/public/schedule/paper/49|creationDate|2007-05-18 +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/david_beckett +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/rdf +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/json +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/turtle +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/rdf_in_json +http://2007.xtech.org/public/schedule/paper/49|tag|http://www.semanlink.net/tag/xtech_2007 +http://2007.xtech.org/public/schedule/paper/49|comment|This paper discusses textual formats as a trend using a case study of the Turtle RDF Syntax developed by the author and using developing a new JSON textual format for RDF as an example of the tradeoffs that need to be considered. +http://2007.xtech.org/public/schedule/paper/49|title|XTech 2007: without the X - the return of {{Textual}} markup +http://2007.xtech.org/public/schedule/paper/49|creationTime|2007-05-18T22:02:35Z +http://www.w3.org/TR/powder-use-cases/#tagsrus|creationDate|2008-05-14 +http://www.w3.org/TR/powder-use-cases/#tagsrus|tag|http://www.semanlink.net/tag/semantic_tagging +http://www.w3.org/TR/powder-use-cases/#tagsrus|tag|http://www.semanlink.net/tag/powder +http://www.w3.org/TR/powder-use-cases/#tagsrus|title|POWDER: Use Cases and Requirements +http://www.w3.org/TR/powder-use-cases/#tagsrus|creationTime|2008-05-14T21:28:46Z +http://whc.unesco.org/fr/list/1225/|creationDate|2016-09-17 +http://whc.unesco.org/fr/list/1225/|tag|http://www.semanlink.net/tag/loropeni +http://whc.unesco.org/fr/list/1225/|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://whc.unesco.org/fr/list/1225/|title|Ruines de Loropéni - UNESCO World Heritage Centre +http://whc.unesco.org/fr/list/1225/|creationTime|2016-09-17T14:06:55Z +https://explosion.ai/blog/deep-learning-formula-nlp|creationDate|2017-07-20 +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/spacy +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/word_embedding +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/deep_nlp +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/matthew_honnibal +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/deep_learning_attention +https://explosion.ai/blog/deep-learning-formula-nlp|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://explosion.ai/blog/deep-learning-formula-nlp|comment|"> A four-step strategy for deep learning with text + +> Word embeddings let you treat individual words as related units of meaning, rather than entirely distinct IDs. However, most NLP problems require understanding of longer spans of text, not just individual words. There's now a simple and flexible solution that is achieving excellent performance on a wide range of problems. After embedding the text into a sequence of vectors, bidirectional RNNs are used to encode the vectors into a sentence matrix. The rows of this matrix can be understood as token vectors — they are sensitive to the sentential context of the token. The final piece of the puzzle is called an attention mechanism. This lets you reduce the sentence matrix down to a sentence vector, ready for prediction." +https://explosion.ai/blog/deep-learning-formula-nlp|title|Embed, encode, attend, predict: The new deep learning formula for state-of-the-art NLP models Blog Explosion AI +https://explosion.ai/blog/deep-learning-formula-nlp|creationTime|2017-07-20T00:12:06Z +https://explosion.ai/blog/deep-learning-formula-nlp|date|2016-11 +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|creationDate|2008-04-11 +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|tag|http://www.semanlink.net/tag/foaf +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|tag|http://www.semanlink.net/tag/rdf_and_social_networks +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|tag|http://www.semanlink.net/tag/semantic_camp_paris +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|tag|http://www.semanlink.net/tag/sioc +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|title|SemwebCampParis numéro 2 : Alexandre Passant +http://apassant.net/blog/2008/04/10/semwebcampparis-numero-2/|creationTime|2008-04-11T16:00:07Z +http://dl.acm.org/citation.cfm?id=359563|creationDate|2013-04-29 +http://dl.acm.org/citation.cfm?id=359563|tag|http://www.semanlink.net/tag/nosql_and_eventual_consistency +http://dl.acm.org/citation.cfm?id=359563|tag|http://www.semanlink.net/tag/distributed_computing +http://dl.acm.org/citation.cfm?id=359563|tag|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +http://dl.acm.org/citation.cfm?id=359563|comment|The concept of one event happening before another in a distributed system is examined, and is shown to define a partial ordering of the events. A distributed algorithm is given for synchronizing a system of logical clocks which can be used to totally order the events. The use of the total ordering is illustrated with a method for solving synchronization problems. The algorithm is then specialized for synchronizing physical clocks, and a bound is derived on how far out of synchrony the clocks can become. +http://dl.acm.org/citation.cfm?id=359563|title|Time, clocks, and the ordering of events in a distributed system +http://dl.acm.org/citation.cfm?id=359563|creationTime|2013-04-29T00:27:10Z +https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/|creationDate|2015-01-29 +https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/|tag|http://www.semanlink.net/tag/function_closures +https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/|tag|http://www.semanlink.net/tag/r +https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/|title|Lexical scope and function closures in R Darren Wilkinson's research blog +https://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/|creationTime|2015-01-29T00:46:52Z +http://trialox.org/|creationDate|2011-09-09 +http://trialox.org/|tag|http://www.semanlink.net/tag/semantic_web_company +http://trialox.org/|tag|http://www.semanlink.net/tag/semantic_cms +http://trialox.org/|tag|http://www.semanlink.net/tag/clerezza +http://trialox.org/|title|trialox ag +http://trialox.org/|creationTime|2011-09-09T21:49:40Z +http://graphite.ecs.soton.ac.uk/browser/|creationDate|2018-07-10 +http://graphite.ecs.soton.ac.uk/browser/|tag|http://www.semanlink.net/tag/rdf_browser +http://graphite.ecs.soton.ac.uk/browser/|comment|[ex montrant un tag semanlink](http://graphite.ecs.soton.ac.uk/browser/?uri=http%3A%2F%2Fwww.semanlink.net%2Ftag%2Fnlp) +http://graphite.ecs.soton.ac.uk/browser/|title|Q&D RDF Browser +http://graphite.ecs.soton.ac.uk/browser/|creationTime|2018-07-10T18:48:22Z +https://groups.drupal.org/node/23216|creationDate|2015-03-06 +https://groups.drupal.org/node/23216|tag|http://www.semanlink.net/tag/drupal_rdf +https://groups.drupal.org/node/23216|tag|http://www.semanlink.net/tag/frederick_giasson +https://groups.drupal.org/node/23216|tag|http://www.semanlink.net/tag/solr_rdf +https://groups.drupal.org/node/23216|title|RDF for Solr: Possible implementation strategies +https://groups.drupal.org/node/23216|creationTime|2015-03-06T15:22:27Z +http://www.alta.asn.au/events/altss2004/course_notes/ALTSS-Curran-Maxent.pdf|creationDate|2012-04-10 +http://www.alta.asn.au/events/altss2004/course_notes/ALTSS-Curran-Maxent.pdf|tag|http://www.semanlink.net/tag/maxent_for_nlp +http://www.alta.asn.au/events/altss2004/course_notes/ALTSS-Curran-Maxent.pdf|title|Maximum Entropy Models for NLP +http://www.alta.asn.au/events/altss2004/course_notes/ALTSS-Curran-Maxent.pdf|creationTime|2012-04-10T02:05:19Z +http://scikit-learn.org/stable/_static/ml_map.png|creationDate|2015-10-19 +http://scikit-learn.org/stable/_static/ml_map.png|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/_static/ml_map.png|tag|http://www.semanlink.net/tag/good +http://scikit-learn.org/stable/_static/ml_map.png|tag|http://www.semanlink.net/tag/cheat_sheet +http://scikit-learn.org/stable/_static/ml_map.png|title|scikit learn: machine learning map +http://scikit-learn.org/stable/_static/ml_map.png|creationTime|2015-10-19T10:51:16Z +https://sites.google.com/view/federated-kbs-akbc19|creationDate|2019-04-29 +https://sites.google.com/view/federated-kbs-akbc19|tag|http://www.semanlink.net/tag/knowledge_graph +https://sites.google.com/view/federated-kbs-akbc19|tag|http://www.semanlink.net/tag/guha +https://sites.google.com/view/federated-kbs-akbc19|tag|http://www.semanlink.net/tag/francois_scharffe +https://sites.google.com/view/federated-kbs-akbc19|title|Federated KBs at AKBC2019 +https://sites.google.com/view/federated-kbs-akbc19|creationTime|2019-04-29T09:37:47Z +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|creationDate|2013-04-06 +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|tag|http://www.semanlink.net/tag/the_guardian +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|tag|http://www.semanlink.net/tag/royaume_uni +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|tag|http://www.semanlink.net/tag/offshore_leaks +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|title|Offshore Leaks : silence radio à Londres +http://www.lemonde.fr/economie/article/2013/04/06/offshore-leaks-silence-radio-a-londres_3155281_3234.html|creationTime|2013-04-06T14:55:33Z +https://dl.acm.org/citation.cfm?id=1321475|creationDate|2019-04-16 +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/wikipedia +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/word_sense_disambiguation +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/lesk_algorithm +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/nlp_topic_extraction +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/frequently_cited_paper +https://dl.acm.org/citation.cfm?id=1321475|tag|http://www.semanlink.net/tag/rada_mihalcea +https://dl.acm.org/citation.cfm?id=1321475|comment|use of Wikipedia as a resource for automatic keyword extraction and word sense disambiguation +https://dl.acm.org/citation.cfm?id=1321475|title|Wikify!: linking documents to encyclopedic knowledge (2007) +https://dl.acm.org/citation.cfm?id=1321475|creationTime|2019-04-16T22:51:58Z +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|creationDate|2017-07-11 +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|tag|http://www.semanlink.net/tag/nltk +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|title|What are all possible pos tags of NLTK? - Stack Overflow +https://stackoverflow.com/questions/15388831/what-are-all-possible-pos-tags-of-nltk|creationTime|2017-07-11T14:50:14Z +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|creationDate|2016-06-12 +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|tag|http://www.semanlink.net/tag/irlande_du_nord +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|tag|http://www.semanlink.net/tag/rigolo +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|tag|http://www.semanlink.net/tag/euro_2016 +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|tag|http://www.semanlink.net/tag/robert_mcliam_wilson +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|comment|Encouragez les vrais sans-espoirs. N’oubliez pas, la vie est nulle et à la fin on meurt. Seul. Entraînez-vous pour l’horreur et l’abnégation de votre courte vie avec un zest de détresse footballistique. +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|title|Robert McLiam Wilson : «Si vous n’aimez pas l’Euro, supportez l’Irlande du Nord» - Libération +http://www.liberation.fr/sports/2016/06/10/robert-mcliam-wilson-si-vous-n-aimez-pas-l-euro-supportez-l-irlande-du-nord_1458755|creationTime|2016-06-12T09:22:08Z +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|creationDate|2012-01-15 +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|tag|http://www.semanlink.net/tag/sarkozy_immigration +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|tag|http://www.semanlink.net/tag/enseignement_francais +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|tag|http://www.semanlink.net/tag/gueant +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|tag|http://www.semanlink.net/tag/universite +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|title|L’université est universelle : notre matière grise est de toutes les couleurs ! Signez l’appel contre la chasse aux étudiants étrangers +http://resistanceinventerre.wordpress.com/2011/12/21/luniversite-est-universelle-notre-matiere-grise-est-de-toutes-les-couleurs-signez-lappel-contre-la-chasse-aux-etudiants-etrangers/|creationTime|2012-01-15T13:04:38Z +https://code.fb.com/open-source/pytorch-biggraph/|creationDate|2019-05-12 +https://code.fb.com/open-source/pytorch-biggraph/|tag|http://www.semanlink.net/tag/github_project +https://code.fb.com/open-source/pytorch-biggraph/|tag|http://www.semanlink.net/tag/graph_embeddings +https://code.fb.com/open-source/pytorch-biggraph/|tag|http://www.semanlink.net/tag/facebook_fair +https://code.fb.com/open-source/pytorch-biggraph/|tag|http://www.semanlink.net/tag/pytorch +https://code.fb.com/open-source/pytorch-biggraph/|comment|"> A new tool from Facebook AI Research that enables training of multi-relation graph embeddings for very large graphs. PyTorch-BigGraph (PBG) handles graphs with billions of nodes and trillions of edges. Since PBG is written in PyTorch, researchers and engineers can easily swap in their own loss functions, models, and other components. + +[Github](https://github.com/facebookresearch/PyTorch-BigGraph), [Blog post](https://ai.facebook.com/blog/open-sourcing-pytorch-biggraph-for-faster-embeddings-of-extremely-large-graphs)" +https://code.fb.com/open-source/pytorch-biggraph/|title|PyTorch-BigGraph: Faster embeddings of large graphs - Facebook Code +https://code.fb.com/open-source/pytorch-biggraph/|creationTime|2019-05-12T12:23:13Z +https://github.com/fozziethebeat/S-Space|creationDate|2016-01-18 +https://github.com/fozziethebeat/S-Space|tag|http://www.semanlink.net/tag/github_project +https://github.com/fozziethebeat/S-Space|tag|http://www.semanlink.net/tag/nlp_tools +https://github.com/fozziethebeat/S-Space|comment|a collection of algorithms for building Semantic Spaces. Semantics space algorithms capture the statistical regularities of words in a text corpora and map each word to a high-dimensional vector that represents the semantics. +https://github.com/fozziethebeat/S-Space|title|fozziethebeat/S-Space - Java - GitHub +https://github.com/fozziethebeat/S-Space|creationTime|2016-01-18T01:22:07Z +https://arxiv.org/abs/1806.06259|creationDate|2018-06-19 +https://arxiv.org/abs/1806.06259|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1806.06259|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1806.06259|tag|http://www.semanlink.net/tag/embedding_evaluation +https://arxiv.org/abs/1806.06259|tag|http://www.semanlink.net/tag/elmo +https://arxiv.org/abs/1806.06259|arxiv_author|Roberto Silveira +https://arxiv.org/abs/1806.06259|arxiv_author|Christian S. Perone +https://arxiv.org/abs/1806.06259|arxiv_author|Thomas S. Paula +https://arxiv.org/abs/1806.06259|comment|"a simple approach using bag-of-words with a recently introduced language model for deep context-dependent word embeddings proved to yield better results in many tasks when compared to sentence encoders trained on entailment datasets + +> We also show, however, that we are still far away from a universal encoder that can perform consistently across several downstream tasks. + + + + +" +https://arxiv.org/abs/1806.06259|title|[1806.06259] Evaluation of sentence embeddings in downstream and linguistic probing tasks +https://arxiv.org/abs/1806.06259|creationTime|2018-06-19T10:15:34Z +https://arxiv.org/abs/1806.06259|arxiv_summary|"Despite the fast developmental pace of new sentence embedding methods, it is +still challenging to find comprehensive evaluations of these different +techniques. In the past years, we saw significant improvements in the field of +sentence embeddings and especially towards the development of universal +sentence encoders that could provide inductive transfer to a wide variety of +downstream tasks. In this work, we perform a comprehensive evaluation of recent +methods using a wide variety of downstream and linguistic feature probing +tasks. We show that a simple approach using bag-of-words with a recently +introduced language model for deep context-dependent word embeddings proved to +yield better results in many tasks when compared to sentence encoders trained +on entailment datasets. We also show, however, that we are still far away from +a universal encoder that can perform consistently across several downstream +tasks." +https://arxiv.org/abs/1806.06259|arxiv_firstAuthor|Christian S. Perone +https://arxiv.org/abs/1806.06259|arxiv_updated|2018-06-16T16:07:49Z +https://arxiv.org/abs/1806.06259|arxiv_title|Evaluation of sentence embeddings in downstream and linguistic probing tasks +https://arxiv.org/abs/1806.06259|arxiv_published|2018-06-16T16:07:49Z +https://arxiv.org/abs/1806.06259|arxiv_num|1806.06259 +http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm|creationDate|2008-04-13 +http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm|tag|http://www.semanlink.net/tag/tasmanian_devil +http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm|title|BBC NEWS Hope over Tasmanian Devil cancer +http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm|creationTime|2008-04-13T13:04:50Z +http://news.bbc.co.uk/2/hi/asia-pacific/7323794.stm|source|BBC +http://semanticweb.com/its-time-to-take-on-temporal-data-management-for-semantic-data_b39347#more-39347|creationDate|2013-09-10 +http://semanticweb.com/its-time-to-take-on-temporal-data-management-for-semantic-data_b39347#more-39347|tag|http://www.semanlink.net/tag/time_in_rdf +http://semanticweb.com/its-time-to-take-on-temporal-data-management-for-semantic-data_b39347#more-39347|title|It’s Time To Take On Temporal Data Management For Semantic Data - semanticweb.com +http://semanticweb.com/its-time-to-take-on-temporal-data-management-for-semantic-data_b39347#more-39347|creationTime|2013-09-10T01:45:26Z +http://web.mit.edu/press/2010/genomic-fossil.html|creationDate|2010-12-21 +http://web.mit.edu/press/2010/genomic-fossil.html|tag|http://www.semanlink.net/tag/paleontologie +http://web.mit.edu/press/2010/genomic-fossil.html|tag|http://www.semanlink.net/tag/origine_de_la_vie +http://web.mit.edu/press/2010/genomic-fossil.html|tag|http://www.semanlink.net/tag/genomique +http://web.mit.edu/press/2010/genomic-fossil.html|comment|"Analysis of modern-day genomes finds evidence for ancient environmental change
+""Many of the new genes appearing in the Archean Expansion are oxygen related, and could be the first biological evidence of the Great Oxidation Event, the period in Earth's history when oxygen became so plentiful that many anaerobic life forms may have become extinct.""" +http://web.mit.edu/press/2010/genomic-fossil.html|title|Scientists decipher 3 billion-year-old genomic fossils +http://web.mit.edu/press/2010/genomic-fossil.html|creationTime|2010-12-21T15:13:36Z +http://www.readwriteweb.com/archives/online_training_and_learning.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationDate|2012-01-02 +http://www.readwriteweb.com/archives/online_training_and_learning.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|tag|http://www.semanlink.net/tag/e_learning +http://www.readwriteweb.com/archives/online_training_and_learning.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|title|Making You More Awesome: The Red-Hot World of Online Learning Services +http://www.readwriteweb.com/archives/online_training_and_learning.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationTime|2012-01-02T12:46:20Z +http://see.stanford.edu/see/courses.aspx|creationDate|2014-03-17 +http://see.stanford.edu/see/courses.aspx|tag|http://www.semanlink.net/tag/stanford +http://see.stanford.edu/see/courses.aspx|tag|http://www.semanlink.net/tag/online_course_materials +http://see.stanford.edu/see/courses.aspx|title|Stanford School of Engineering - Stanford Engineering Everywhere +http://see.stanford.edu/see/courses.aspx|creationTime|2014-03-17T14:47:15Z +https://arxiv.org/abs/1609.08496|creationDate|2017-06-07 +https://arxiv.org/abs/1609.08496|tag|http://www.semanlink.net/tag/topic_modeling_over_short_texts +https://arxiv.org/abs/1609.08496|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1609.08496|tag|http://www.semanlink.net/tag/topic_models_word_embedding +https://arxiv.org/abs/1609.08496|arxiv_author|Jipeng Qiang +https://arxiv.org/abs/1609.08496|arxiv_author|Ping Chen +https://arxiv.org/abs/1609.08496|arxiv_author|Tong Wang +https://arxiv.org/abs/1609.08496|arxiv_author|Xindong Wu +https://arxiv.org/abs/1609.08496|comment|New method, Embedding-based Topic Model (ETM), to learn latent topics from short texts. ETM not only solves the problem of very limited word co-occurrence information by aggregating short texts into long pseudo-texts, but also utilizes a Markov Random Field regularized model that gives correlated words a better chance to be put into the same topic +https://arxiv.org/abs/1609.08496|title|[1609.08496] Topic Modeling over Short Texts by Incorporating Word Embeddings +https://arxiv.org/abs/1609.08496|creationTime|2017-06-07T18:13:32Z +https://arxiv.org/abs/1609.08496|arxiv_summary|"Inferring topics from the overwhelming amount of short texts becomes a +critical but challenging task for many content analysis tasks, such as content +charactering, user interest profiling, and emerging topic detecting. Existing +methods such as probabilistic latent semantic analysis (PLSA) and latent +Dirichlet allocation (LDA) cannot solve this prob- lem very well since only +very limited word co-occurrence information is available in short texts. This +paper studies how to incorporate the external word correlation knowledge into +short texts to improve the coherence of topic modeling. Based on recent results +in word embeddings that learn se- mantically representations for words from a +large corpus, we introduce a novel method, Embedding-based Topic Model (ETM), +to learn latent topics from short texts. ETM not only solves the problem of +very limited word co-occurrence information by aggregating short texts into +long pseudo- texts, but also utilizes a Markov Random Field regularized model +that gives correlated words a better chance to be put into the same topic. The +experiments on real-world datasets validate the effectiveness of our model +comparing with the state-of-the-art models." +https://arxiv.org/abs/1609.08496|arxiv_firstAuthor|Jipeng Qiang +https://arxiv.org/abs/1609.08496|arxiv_updated|2016-09-27T15:26:07Z +https://arxiv.org/abs/1609.08496|arxiv_title|Topic Modeling over Short Texts by Incorporating Word Embeddings +https://arxiv.org/abs/1609.08496|arxiv_published|2016-09-27T15:26:07Z +https://arxiv.org/abs/1609.08496|arxiv_num|1609.08496 +https://arxiv.org/abs/1809.01797|creationDate|2018-09-07 +https://arxiv.org/abs/1809.01797|tag|http://www.semanlink.net/tag/knowledge_base +https://arxiv.org/abs/1809.01797|tag|http://www.semanlink.net/tag/natural_language_generation +https://arxiv.org/abs/1809.01797|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1809.01797|arxiv_author|Heng Ji +https://arxiv.org/abs/1809.01797|arxiv_author|Lifu Huang +https://arxiv.org/abs/1809.01797|arxiv_author|Qingyun Wang +https://arxiv.org/abs/1809.01797|arxiv_author|Kevin Knight +https://arxiv.org/abs/1809.01797|arxiv_author|Xiaoman Pan +https://arxiv.org/abs/1809.01797|arxiv_author|Zhiying Jiang +https://arxiv.org/abs/1809.01797|arxiv_author|Boliang Zhang +https://arxiv.org/abs/1809.01797|title|[1809.01797] Describing a Knowledge Base +https://arxiv.org/abs/1809.01797|creationTime|2018-09-07T12:57:23Z +https://arxiv.org/abs/1809.01797|arxiv_summary|"We aim to automatically generate natural language descriptions about an input +structured knowledge base (KB). We build our generation framework based on a +pointer network which can copy facts from the input KB, and add two attention +mechanisms: (i) slot-aware attention to capture the association between a slot +type and its corresponding slot value; and (ii) a new \emph{table position +self-attention} to capture the inter-dependencies among related slots. For +evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we +propose a KB reconstruction based metric by extracting a KB from the generation +output and comparing it with the input KB. We also create a new data set which +includes 106,216 pairs of structured KBs and their corresponding natural +language descriptions for two distinct entity types. Experiments show that our +approach significantly outperforms state-of-the-art methods. The reconstructed +KB achieves 68.8% - 72.6% F-score." +https://arxiv.org/abs/1809.01797|arxiv_firstAuthor|Qingyun Wang +https://arxiv.org/abs/1809.01797|arxiv_updated|2018-09-30T04:36:18Z +https://arxiv.org/abs/1809.01797|arxiv_title|Describing a Knowledge Base +https://arxiv.org/abs/1809.01797|arxiv_published|2018-09-06T02:56:58Z +https://arxiv.org/abs/1809.01797|arxiv_num|1809.01797 +http://youtube.com/watch?v=mAuYfQCgSQU|creationDate|2007-09-18 +http://youtube.com/watch?v=mAuYfQCgSQU|tag|http://www.semanlink.net/tag/youtube +http://youtube.com/watch?v=mAuYfQCgSQU|tag|http://www.semanlink.net/tag/mami_wata +http://youtube.com/watch?v=mAuYfQCgSQU|tag|http://www.semanlink.net/tag/moussa_poussi +http://youtube.com/watch?v=mAuYfQCgSQU|comment|"This song is dedicated to Mami Wata, mother of the river and water.
+Cette chanson est dédiée à Mami Wata, mère du Fleuve et de l'eau.
+Recorded live in Niamey, August 19, 2007.
+ + + + +" +http://youtube.com/watch?v=mAuYfQCgSQU|title|YouTube - Samba Diko - Moussa Poussi +http://youtube.com/watch?v=mAuYfQCgSQU|creationTime|2007-09-18T01:09:08Z +http://news.bbc.co.uk/2/hi/science/nature/8022612.stm|creationDate|2009-05-06 +http://news.bbc.co.uk/2/hi/science/nature/8022612.stm|tag|http://www.semanlink.net/tag/herschel_telescope +http://news.bbc.co.uk/2/hi/science/nature/8022612.stm|title|BBC NEWS Telescopes given 'go' for launch +http://news.bbc.co.uk/2/hi/science/nature/8022612.stm|creationTime|2009-05-06T23:42:56Z +http://news.bbc.co.uk/2/hi/science/nature/8022612.stm|source|BBC +https://twitter.com/RichardSocher/status/1021917140801052672|creationDate|2018-07-25 +https://twitter.com/RichardSocher/status/1021917140801052672|tag|http://www.semanlink.net/tag/richard_socher +https://twitter.com/RichardSocher/status/1021917140801052672|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +https://twitter.com/RichardSocher/status/1021917140801052672|tag|http://www.semanlink.net/tag/multi_task_learning +https://twitter.com/RichardSocher/status/1021917140801052672|title|Slides motivating true multitask learning in AI and NLP +https://twitter.com/RichardSocher/status/1021917140801052672|creationTime|2018-07-25T13:10:51Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256|creationDate|2007-09-25 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256|tag|http://www.semanlink.net/tag/lod_mailing_list +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256|tag|http://www.semanlink.net/tag/fps_post +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256|title|[Linking-open-data] How to get the uri of a non-information resource from the corresponding HTML page? +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=21256|creationTime|2007-09-25T22:21:08Z +http://crypto.stanford.edu/~blynn/c/object.html|creationDate|2019-04-07 +http://crypto.stanford.edu/~blynn/c/object.html|tag|http://www.semanlink.net/tag/object_oriented_programming +http://crypto.stanford.edu/~blynn/c/object.html|title|Object-oriented oblivion +http://crypto.stanford.edu/~blynn/c/object.html|creationTime|2019-04-07T11:00:25Z +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|creationDate|2019-05-22 +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/ulmfit +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|comment|> A Step-by-Step Guide for Building an Anti-Semitic Tweet Classifier +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|title|A Technique for Building NLP Classifiers Efficiently with Transfer Learning and Weak Supervision +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|bookmarkOf|https://towardsdatascience.com/a-technique-for-building-nlp-classifiers-efficiently-with-transfer-learning-and-weak-supervision-a8e2f21ca9c8 +http://www.semanlink.net/doc/2019/05/a_technique_for_building_nlp_cl|creationTime|2019-05-22T00:18:41Z +https://arxiv.org/abs/1801.04016|creationDate|2018-02-21 +https://arxiv.org/abs/1801.04016|tag|http://www.semanlink.net/tag/judea_pearl +https://arxiv.org/abs/1801.04016|tag|http://www.semanlink.net/tag/artificial_general_intelligence +https://arxiv.org/abs/1801.04016|tag|http://www.semanlink.net/tag/machine_learning +https://arxiv.org/abs/1801.04016|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1801.04016|tag|http://www.semanlink.net/tag/human_level_ai +https://arxiv.org/abs/1801.04016|arxiv_author|Judea Pearl +https://arxiv.org/abs/1801.04016|comment|To achieve human level intelligence, learning machines need the guidance of a model of reality, similar to the ones used in causal inference tasks +https://arxiv.org/abs/1801.04016|title|[1801.04016] Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution +https://arxiv.org/abs/1801.04016|creationTime|2018-02-21T23:48:03Z +https://arxiv.org/abs/1801.04016|arxiv_summary|"Current machine learning systems operate, almost exclusively, in a +statistical, or model-free mode, which entails severe theoretical limits on +their power and performance. Such systems cannot reason about interventions and +retrospection and, therefore, cannot serve as the basis for strong AI. To +achieve human level intelligence, learning machines need the guidance of a +model of reality, similar to the ones used in causal inference tasks. To +demonstrate the essential role of such models, I will present a summary of +seven tasks which are beyond reach of current machine learning systems and +which have been accomplished using the tools of causal modeling." +https://arxiv.org/abs/1801.04016|arxiv_firstAuthor|Judea Pearl +https://arxiv.org/abs/1801.04016|arxiv_updated|2018-01-11T23:37:48Z +https://arxiv.org/abs/1801.04016|arxiv_title|Theoretical Impediments to Machine Learning With Seven Sparks from the Causal Revolution +https://arxiv.org/abs/1801.04016|arxiv_published|2018-01-11T23:37:48Z +https://arxiv.org/abs/1801.04016|arxiv_num|1801.04016 +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|creationDate|2017-09-18 +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|tag|http://www.semanlink.net/tag/salesforce +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|tag|http://www.semanlink.net/tag/richard_socher +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|tag|http://www.semanlink.net/tag/word_embedding +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|tag|http://www.semanlink.net/tag/nlp +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|comment|"Models that use pretrained word vectors must learn how to use them. Our work picks up where word vectors left off by looking to improve over randomly initialized methods for contextualizing word vectors through training on an intermediate task -> We teach a neural network how to understand words in context by first teaching it how to translate English to German + +" +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|title|Learned in translation: contextualized word vectors (Salesforce Research) +https://einstein.ai/research/learned-in-translation-contextualized-word-vectors|creationTime|2017-09-18T15:12:24Z +http://ceur-ws.org/Vol-748/paper4.pdf|creationDate|2011-10-25 +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/fps_paper +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/alexandre_passant +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/osema_deri_renault_paper +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/constraint_satisfaction_problem +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/automobile +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/fadi_badra +http://ceur-ws.org/Vol-748/paper4.pdf|tag|http://www.semanlink.net/tag/osema_2011 +http://ceur-ws.org/Vol-748/paper4.pdf|title|A Semantic Web Representation of a Product Range Specification based on Constraint Satisfaction Problem in the Automotive Industry +http://ceur-ws.org/Vol-748/paper4.pdf|creationTime|2011-10-25T15:00:27Z +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|creationDate|2015-12-17 +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|tag|http://www.semanlink.net/tag/kingsley_idehen +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|tag|http://www.semanlink.net/tag/openlink +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|tag|http://www.semanlink.net/tag/data_sniffer +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|title|OpenLink Structured Data Sniffer now available in the Chrome Web Store As an... +https://plus.google.com/106943062990152739506/posts/jLEUpys7fUW|creationTime|2015-12-17T14:13:56Z +http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php|creationDate|2008-06-04 +http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php|tag|http://www.semanlink.net/tag/searchmonkey +http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php|comment|Looking at the facts, Mika and the Yahoo! search team realized that they could not count on enhancing search by leveraging metadata on today's web - it simply does not exist to the extent needed. At the same time, it was clear that enhancing search results and cross linking them to other pieces of information on the web is compelling and potentially disruptive. Yahoo! realized that in order to make this work, they need to incentivize and enable publishers to control search result presentation. And thus, SearchMonkey was born. +http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php|title|Making the Web Searchable: The Story of SearchMonkey +http://www.readwriteweb.com/archives/semtech_making_the_web_searchable_searchmonkey.php|creationTime|2008-06-04T23:42:55Z +http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)|creationDate|2014-11-05 +http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)|tag|http://www.semanlink.net/tag/phil_archer +http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)|tag|http://www.semanlink.net/tag/semweb_pro +http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)|title|The Next 10 Years of Success +http://www.w3.org/2014/Talks/1105_phila_semwebpro/#(1)|creationTime|2014-11-05T20:27:12Z +http://afs.github.io/rdf-patch/|creationDate|2013-08-21 +http://afs.github.io/rdf-patch/|tag|http://www.semanlink.net/tag/rdf +http://afs.github.io/rdf-patch/|tag|http://www.semanlink.net/tag/http_patch +http://afs.github.io/rdf-patch/|tag|http://www.semanlink.net/tag/andy_seaborne +http://afs.github.io/rdf-patch/|tag|http://www.semanlink.net/tag/ldp_updates +http://afs.github.io/rdf-patch/|comment|RDF Patch is a file format for recording changes made to an RDF dataset. It can be used for replicating changes between multiple copies of the same dataset or as an incremental backups format. The design emphasizes deployment concerns such as scalability and efficient processing. +http://afs.github.io/rdf-patch/|title|RDF Patch – Describing Changes to an RDF Dataset +http://afs.github.io/rdf-patch/|creationTime|2013-08-21T00:42:16Z +http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm|creationDate|2009-05-05 +http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm|tag|http://www.semanlink.net/tag/big_brother +http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm|title|BBC NEWS Big Brother is watching us all +http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm|creationTime|2009-05-05T16:31:20Z +http://news.bbc.co.uk/2/hi/programmes/from_our_own_correspondent/6995061.stm|source|BBC +https://github.com/agazzarini/SolRDF|creationDate|2014-12-23 +https://github.com/agazzarini/SolRDF|tag|http://www.semanlink.net/tag/solr +https://github.com/agazzarini/SolRDF|tag|http://www.semanlink.net/tag/github_project +https://github.com/agazzarini/SolRDF|comment|SolRDF (i.e. Solr + RDF) is a set of Solr extensions for managing (index and search) RDF data. +https://github.com/agazzarini/SolRDF|title|SolRDF - GitHub +https://github.com/agazzarini/SolRDF|creationTime|2014-12-23T00:41:40Z +http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641|creationDate|2016-09-19 +http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641|tag|http://www.semanlink.net/tag/journal +http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641|tag|http://www.semanlink.net/tag/information_sur_internet +http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641|title|Quand l’oligopole de l’internet courtise les éditeurs de presse InaGlobal +http://www.inaglobal.fr/presse/article/quand-l-oligopole-de-l-internet-courtise-les-editeurs-de-presse-8641|creationTime|2016-09-19T11:26:47Z +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|creationDate|2018-04-05 +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|tag|http://www.semanlink.net/tag/nlp_stanford +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|tag|http://www.semanlink.net/tag/sentence_embeddings +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|comment|a new take on sentence embeddings +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|title|Context is Everything: Finding Meaning Statistically in Semantic Spaces (CS224n 2018) +http://web.stanford.edu/class/cs224n/reports/6838634.pdf|creationTime|2018-04-05T02:09:47Z +http://www.senat.fr/rap/r12-720/r12-7201.pdf|creationDate|2013-07-07 +http://www.senat.fr/rap/r12-720/r12-7201.pdf|tag|http://www.semanlink.net/tag/intervention_francaise_au_mali +http://www.senat.fr/rap/r12-720/r12-7201.pdf|tag|http://www.semanlink.net/tag/sahel +http://www.senat.fr/rap/r12-720/r12-7201.pdf|tag|http://www.semanlink.net/tag/chevenement +http://www.senat.fr/rap/r12-720/r12-7201.pdf|tag|http://www.semanlink.net/tag/aqmi +http://www.senat.fr/rap/r12-720/r12-7201.pdf|title|Rapport d'information sur la situation au Sahel (Jean-Pierre Chevènement et Gérard Larcher) +http://www.senat.fr/rap/r12-720/r12-7201.pdf|creationTime|2013-07-07T00:50:37Z +http://www.smbc-comics.com/comic/path-of-a-hero|creationDate|2016-11-01 +http://www.smbc-comics.com/comic/path-of-a-hero|tag|http://www.semanlink.net/tag/statistics +http://www.smbc-comics.com/comic/path-of-a-hero|tag|http://www.semanlink.net/tag/rigolo +http://www.smbc-comics.com/comic/path-of-a-hero|comment|"Why you should ""understand basic statistical concepts""" +http://www.smbc-comics.com/comic/path-of-a-hero|title|Path of a Hero +http://www.smbc-comics.com/comic/path-of-a-hero|creationTime|2016-11-01T19:56:48Z +http://nlp.stanford.edu/IR-book/|creationDate|2012-04-01 +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/good +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/ai_book +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/nlp_class +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/information_retrieval_techniques +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/chris_manning +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/nlp_stanford +http://nlp.stanford.edu/IR-book/|tag|http://www.semanlink.net/tag/information_retrieval +http://nlp.stanford.edu/IR-book/|title|Introduction to Information Retrieval, Cambridge University Press (2008) Manning, Raghavan, and Schütze +http://nlp.stanford.edu/IR-book/|creationTime|2012-04-01T21:47:22Z +https://en.wikipedia.org/wiki/Ishi|creationDate|2017-05-26 +https://en.wikipedia.org/wiki/Ishi|tag|http://www.semanlink.net/tag/amerindien +https://en.wikipedia.org/wiki/Ishi|comment|"Ishi (c. 1861 – March 25, 1916) was the last known member of the Yahi, a group of the Yana of the U.S. state of California. Widely acclaimed in his time as the ""last wild Indian"" in America, Ishi lived most of his life completely outside modern culture. At 50 years of age, in 1911, he emerged near the present-day foothills of Lassen Peak, also known as Wa ganu p'a." +https://en.wikipedia.org/wiki/Ishi|title|Ishi - Wikipedia +https://en.wikipedia.org/wiki/Ishi|creationTime|2017-05-26T00:37:54Z +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|creationDate|2007-04-20 +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|tag|http://www.semanlink.net/tag/open_source +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|tag|http://www.semanlink.net/tag/linked_data +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|tag|http://www.semanlink.net/tag/freie_universitat_berlin +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|tag|http://www.semanlink.net/tag/semantic_web_application +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|title|Open Source Projects @ Freie Universität Berlin +http://sites.wiwiss.fu-berlin.de/suhl/forschung/websys/opensource/index.html|creationTime|2007-04-20T21:01:21Z +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|creationDate|2017-06-02 +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|tag|http://www.semanlink.net/tag/gensim +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|comment|basic steps necessary to use gensim to create a corpus, train models (log entropy and latent semantic analysis), and perform semantic similarity comparisons and queries. (Note: to train the models, you need to provide your own background corpus) +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|title|(Relatively) quick and easy Gensim example code William Bert +https://williambert.online/2012/05/relatively-quick-and-easy-gensim-example-code/|creationTime|2017-06-02T01:22:28Z +http://redfoot.net/|creationDate|2006-11-14 +http://redfoot.net/|tag|http://www.semanlink.net/tag/rdf_application +http://redfoot.net/|comment|Redfoot is a hypercoding system which is being used to create a webized operating system and is also being used to create applications. It is built around the notion of an RDF Graph for persistence rather than a File Tree . It provides standard web mechanisms to transport information across different machines and programmatically specifies the tasks for bundling and installing new software features across networked machines. The functions of Redfoot are analogous to that of an operating system kernel that manages resources across the web. +http://redfoot.net/|title|Redfoot: Hypercoding System +https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet|creationDate|2018-07-01 +https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet|tag|http://www.semanlink.net/tag/securite_informatique +https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet|tag|http://www.semanlink.net/tag/hack +https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet|title|The Biggest Digital Heist in History Isn’t Over Yet - Bloomberg +https://www.bloomberg.com/news/features/2018-06-25/the-biggest-digital-heist-in-history-isn-t-over-yet|creationTime|2018-07-01T23:52:37Z +https://jyx.jyu.fi/dspace/handle/123456789/56299|creationDate|2018-01-03 +https://jyx.jyu.fi/dspace/handle/123456789/56299|tag|http://www.semanlink.net/tag/rdf_embeddings +https://jyx.jyu.fi/dspace/handle/123456789/56299|title|Global RDF Vector Space Embeddings +https://jyx.jyu.fi/dspace/handle/123456789/56299|creationTime|2018-01-03T17:06:57Z +http://wiki.apache.org/solr/Solrj|creationDate|2012-05-04 +http://wiki.apache.org/solr/Solrj|tag|http://www.semanlink.net/tag/solr +http://wiki.apache.org/solr/Solrj|comment|Solrj is a java client to access solr +http://wiki.apache.org/solr/Solrj|title|Solrj - Solr Wiki +http://wiki.apache.org/solr/Solrj|creationTime|2012-05-04T00:53:48Z +http://www.lesinrocks.com/2014/12/20/actualite/amazon-killer-lempire-des-libraires-contre-attaque-11542327/|creationDate|2014-12-28 +http://www.lesinrocks.com/2014/12/20/actualite/amazon-killer-lempire-des-libraires-contre-attaque-11542327/|tag|http://www.semanlink.net/tag/amazon +http://www.lesinrocks.com/2014/12/20/actualite/amazon-killer-lempire-des-libraires-contre-attaque-11542327/|title|Les Inrocks - Amazon-Killer : l'empire des libraires contre-attaque +http://www.lesinrocks.com/2014/12/20/actualite/amazon-killer-lempire-des-libraires-contre-attaque-11542327/|creationTime|2014-12-28T10:48:55Z +http://fr.slideshare.net/jasontucker/how-to-stream-a-meetup-or-live-event|creationDate|2014-11-17 +http://fr.slideshare.net/jasontucker/how-to-stream-a-meetup-or-live-event|title|How to stream a meetup or live event +http://fr.slideshare.net/jasontucker/how-to-stream-a-meetup-or-live-event|creationTime|2014-11-17T21:21:23Z +http://www.data-publica.com/content/lexique-de-lopen-data/|creationDate|2012-10-23 +http://www.data-publica.com/content/lexique-de-lopen-data/|tag|http://www.semanlink.net/tag/open_data +http://www.data-publica.com/content/lexique-de-lopen-data/|tag|http://www.semanlink.net/tag/data_publica +http://www.data-publica.com/content/lexique-de-lopen-data/|title|Petit Lexique de l’Open Data Data Publica +http://www.data-publica.com/content/lexique-de-lopen-data/|creationTime|2012-10-23T00:39:27Z +http://www.betaversion.org/~stefano/linotype/news/85/|creationDate|2007-01-09 +http://www.betaversion.org/~stefano/linotype/news/85/|tag|http://www.semanlink.net/tag/stefano_mazzocchi +http://www.betaversion.org/~stefano/linotype/news/85/|tag|http://www.semanlink.net/tag/folksonomy +http://www.betaversion.org/~stefano/linotype/news/85/|tag|http://www.semanlink.net/tag/tag_ontology +http://www.betaversion.org/~stefano/linotype/news/85/|title|Stefano's Linotype ~ Folksologies: de-idealizing ontologies +http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html|creationDate|2008-08-12 +http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html|tag|http://www.semanlink.net/tag/skos +http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html|title|SKOS and SWOOP: how - bobdc.blog +http://www.snee.com/bobdc.blog/2008/08/skos_and_swoop_how.html|creationTime|2008-08-12T15:15:19Z +https://www.slideshare.net/sopekmir/graphchain|creationDate|2018-04-25 +https://www.slideshare.net/sopekmir/graphchain|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://www.slideshare.net/sopekmir/graphchain|tag|http://www.semanlink.net/tag/blockchain +https://www.slideshare.net/sopekmir/graphchain|tag|http://www.semanlink.net/tag/mirek_sopek +https://www.slideshare.net/sopekmir/graphchain|tag|http://www.semanlink.net/tag/rdf +https://www.slideshare.net/sopekmir/graphchain|title|GraphChain +https://www.slideshare.net/sopekmir/graphchain|creationTime|2018-04-25T23:02:27Z +http://en.wikipedia.org/wiki/Two_envelopes_problem|creationDate|2012-09-21 +http://en.wikipedia.org/wiki/Two_envelopes_problem|tag|http://www.semanlink.net/tag/paradoxe +http://en.wikipedia.org/wiki/Two_envelopes_problem|tag|http://www.semanlink.net/tag/probabilites +http://en.wikipedia.org/wiki/Two_envelopes_problem|title|Two envelopes problem +http://en.wikipedia.org/wiki/Two_envelopes_problem|creationTime|2012-09-21T21:15:09Z +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|creationDate|2007-11-30 +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|tag|http://www.semanlink.net/tag/hugo +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|tag|http://www.semanlink.net/tag/actualite +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|tag|http://www.semanlink.net/tag/crise_des_banlieues +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|comment|"Ecrit suite à l'incendie de la bibliothèque des Tuileries durant la Commune en 1871. +" +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|title|A qui la faute? de Victor Hugo +http://meslectures-karine.blogspot.com/2007/06/qui-la-faute-de-victor-hugo.html|creationTime|2007-11-30T14:14:39Z +http://www.w3.org/2003/g/talk62/slides|creationDate|2006-03-08 +http://www.w3.org/2003/g/talk62/slides|tag|http://www.semanlink.net/tag/dan_connolly +http://www.w3.org/2003/g/talk62/slides|tag|http://www.semanlink.net/tag/grddl +http://www.w3.org/2003/g/talk62/slides|tag|http://www.semanlink.net/tag/semantic_web +http://www.w3.org/2003/g/talk62/slides|tag|http://www.semanlink.net/tag/microformats +http://www.w3.org/2003/g/talk62/slides|title|Practical Semantic Web Deployment with Microformats and GRDDL (1) +http://ebiquity.umbc.edu/paper/html/id/235/|creationDate|2005-06-15 +http://ebiquity.umbc.edu/paper/html/id/235/|tag|http://www.semanlink.net/tag/owl +http://ebiquity.umbc.edu/paper/html/id/235/|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://ebiquity.umbc.edu/paper/html/id/235/|tag|http://www.semanlink.net/tag/ontology_mapping +http://ebiquity.umbc.edu/paper/html/id/235/|title|UMBC eBiquity Publication: A Bayesian Methodology towards Automatic Ontology Mapping +http://www.netcrucible.com/blog/PermaLink.aspx?guid=d9508178-c4e4-4175-bd7f-0e261e1a4739|creationDate|2005-10-13 +http://www.netcrucible.com/blog/PermaLink.aspx?guid=d9508178-c4e4-4175-bd7f-0e261e1a4739|tag|http://www.semanlink.net/tag/ws_vs_pox_http +http://www.netcrucible.com/blog/PermaLink.aspx?guid=d9508178-c4e4-4175-bd7f-0e261e1a4739|title|The War is Over (WS-* vs. POX/HTTP) +https://www.nature.com/articles/s41598-018-30619-y|creationDate|2018-08-28 +https://www.nature.com/articles/s41598-018-30619-y|tag|http://www.semanlink.net/tag/convolutional_neural_network +https://www.nature.com/articles/s41598-018-30619-y|tag|http://www.semanlink.net/tag/optical_computing +https://www.nature.com/articles/s41598-018-30619-y|tag|http://www.semanlink.net/tag/image_classification +https://www.nature.com/articles/s41598-018-30619-y|comment|> a layer of optical computing prior to electronic computing, improving performance on image classification tasks while adding minimal electronic computational cost or processing time +https://www.nature.com/articles/s41598-018-30619-y|title|Hybrid optical-electronic convolutional neural networks with optimized diffractive optics for image classification Scientific Reports +https://www.nature.com/articles/s41598-018-30619-y|creationTime|2018-08-28T09:39:54Z +http://deeplearning.net/tutorial/rnnslu.html|creationDate|2017-06-16 +http://deeplearning.net/tutorial/rnnslu.html|tag|http://www.semanlink.net/tag/tutorial +http://deeplearning.net/tutorial/rnnslu.html|tag|http://www.semanlink.net/tag/word_embedding +http://deeplearning.net/tutorial/rnnslu.html|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://deeplearning.net/tutorial/rnnslu.html|title|Recurrent Neural Networks with Word Embeddings — DeepLearning 0.1 documentation +http://deeplearning.net/tutorial/rnnslu.html|creationTime|2017-06-16T02:00:02Z +http://www.la-grange.net/2011/03/02/google-ads|creationDate|2011-03-26 +http://www.la-grange.net/2011/03/02/google-ads|tag|http://www.semanlink.net/tag/publicite_internet +http://www.la-grange.net/2011/03/02/google-ads|tag|http://www.semanlink.net/tag/google_advertising +http://www.la-grange.net/2011/03/02/google-ads|tag|http://www.semanlink.net/tag/google +http://www.la-grange.net/2011/03/02/google-ads|title|Comprendre comment Google vous traque +http://www.la-grange.net/2011/03/02/google-ads|creationTime|2011-03-26T13:54:57Z +http://lespetitescases.net/amusons-nous-avec-rdfa|creationDate|2007-05-30 +http://lespetitescases.net/amusons-nous-avec-rdfa|tag|http://www.semanlink.net/tag/rdfa +http://lespetitescases.net/amusons-nous-avec-rdfa|tag|http://www.semanlink.net/tag/gautier_poupeau +http://lespetitescases.net/amusons-nous-avec-rdfa|comment|Exemple d'utilisation de RDFa (lecture de liens vers dbPedia inclus dans la page) +http://lespetitescases.net/amusons-nous-avec-rdfa|title|Amusons-nous avec RDFa Les petites cases +http://lespetitescases.net/amusons-nous-avec-rdfa|creationTime|2007-05-30T21:56:17Z +http://www.bbc.co.uk/news/science-environment-21866464|creationDate|2013-03-21 +http://www.bbc.co.uk/news/science-environment-21866464|tag|http://www.semanlink.net/tag/big_bang +http://www.bbc.co.uk/news/science-environment-21866464|tag|http://www.semanlink.net/tag/cosmic_microwave_background +http://www.bbc.co.uk/news/science-environment-21866464|title|BBC News - Planck satellite: Maps detail Universe's ancient light +http://www.bbc.co.uk/news/science-environment-21866464|creationTime|2013-03-21T13:58:21Z +http://web.it.kth.se/~rassul/exjobb/rapporter/sima-emil.pdf|creationDate|2005-10-28 +http://web.it.kth.se/~rassul/exjobb/rapporter/sima-emil.pdf|tag|http://www.semanlink.net/tag/semantically_searchable_distributed_repository +http://web.it.kth.se/~rassul/exjobb/rapporter/sima-emil.pdf|title|Carbonara - a semantically searchable distributed repository +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|creationDate|2019-01-29 +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|tag|http://www.semanlink.net/tag/feedly +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|tag|http://www.semanlink.net/tag/google_colab +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|tag|http://www.semanlink.net/tag/machine_learning +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|tag|http://www.semanlink.net/tag/nlp +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|comment|Colaboratory notebook that shows how you can apply ML and NLP to the content of your own @feedly feeds. +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|title|Run NLP Experiments using the Feedly API.ipynb - Colaboratory +https://colab.research.google.com/drive/1jUpGwTaY9vJsUVw1tgwwXqKz6UOsvV1a|creationTime|2019-01-29T00:47:23Z +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-09-07 +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/computational_neuroscience +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/facial_recognition +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Facial Recognition: Cracking the Brain’s Code CNRS News +https://news.cnrs.fr/opinions/facial-recognition-cracking-the-brains-code?utm_content=buffer1e5d5&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-09-07T01:41:14Z +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|creationDate|2018-09-16 +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|tag|http://www.semanlink.net/tag/nlp_sample_code +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|tag|http://www.semanlink.net/tag/named_entity_recognition +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|tag|http://www.semanlink.net/tag/scikit_learn +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|tag|http://www.semanlink.net/tag/external_memory_algorithm +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|tag|http://www.semanlink.net/tag/conditional_random_field +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|title|Named Entity Recognition and Classification with Scikit-Learn +https://towardsdatascience.com/named-entity-recognition-and-classification-with-scikit-learn-f05372f07ba2|creationTime|2018-09-16T10:15:39Z +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|creationDate|2017-09-10 +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|tag|http://www.semanlink.net/tag/latent_semantic_analysis +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|tag|http://www.semanlink.net/tag/gensim +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|tag|http://www.semanlink.net/tag/concept_search +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|comment|using gensim to perform concept searches on English Wikipedia. +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|title|Concept Search on Wikipedia · Chris McCormick +http://mccormickml.com/2017/02/22/concept-search-on-wikipedia/|creationTime|2017-09-10T17:25:47Z +http://www.prescod.net/rest/mistakes/|creationDate|2005-10-13 +http://www.prescod.net/rest/mistakes/|tag|http://www.semanlink.net/tag/rest +http://www.prescod.net/rest/mistakes/|title|Common REST Mistakes +https://lejournal.cnrs.fr/articles/les-defis-de-la-voiture-a-hydrogene?utm_content=buffer8a0d7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-03-02 +https://lejournal.cnrs.fr/articles/les-defis-de-la-voiture-a-hydrogene?utm_content=buffer8a0d7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/hydrogen_cars +https://lejournal.cnrs.fr/articles/les-defis-de-la-voiture-a-hydrogene?utm_content=buffer8a0d7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Les défis de la voiture à hydrogène CNRS Le journal +https://lejournal.cnrs.fr/articles/les-defis-de-la-voiture-a-hydrogene?utm_content=buffer8a0d7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-03-02T12:27:16Z +https://fr.wikipedia.org/wiki/Dans_ses_yeux|creationDate|2018-08-22 +https://fr.wikipedia.org/wiki/Dans_ses_yeux|tag|http://www.semanlink.net/tag/film_argentin +https://fr.wikipedia.org/wiki/Dans_ses_yeux|title|El secreto de sus ojos +https://fr.wikipedia.org/wiki/Dans_ses_yeux|creationTime|2018-08-22T23:00:27Z +http://code.google.com/p/lucene-skos/|creationDate|2012-05-04 +http://code.google.com/p/lucene-skos/|tag|http://www.semanlink.net/tag/skos +http://code.google.com/p/lucene-skos/|tag|http://www.semanlink.net/tag/lucene +http://code.google.com/p/lucene-skos/|title|lucene-skos - A SKOS analyzer module for Apache Lucene and Solr - Google Project Hosting +http://code.google.com/p/lucene-skos/|creationTime|2012-05-04T00:38:20Z +https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations|creationDate|2016-05-22 +https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations|tag|http://www.semanlink.net/tag/apache +https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations|tag|http://www.semanlink.net/tag/nginx +https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations|title|Apache vs Nginx: Practical Considerations DigitalOcean +https://www.digitalocean.com/community/tutorials/apache-vs-nginx-practical-considerations|creationTime|2016-05-22T15:10:20Z +http://www.elasticsearch.org/|creationDate|2013-04-06 +http://www.elasticsearch.org/|tag|http://www.semanlink.net/tag/lucene +http://www.elasticsearch.org/|tag|http://www.semanlink.net/tag/open_source +http://www.elasticsearch.org/|tag|http://www.semanlink.net/tag/cloud +http://www.elasticsearch.org/|comment|open source, distributed RESTful search and analytics for the cloud +http://www.elasticsearch.org/|title|elasticsearch +http://www.elasticsearch.org/|creationTime|2013-04-06T17:46:56Z +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|creationDate|2007-11-09 +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|tag|http://www.semanlink.net/tag/ipod +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|tag|http://www.semanlink.net/tag/recyclage +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|tag|http://www.semanlink.net/tag/fix_it +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|title|Don’t Throw Out Your Broken iPod; Fix It via the Web +http://www.nytimes.com/2007/11/08/technology/personaltech/08basics.html|creationTime|2007-11-09T13:20:12Z +http://chinafrica.info/|creationDate|2016-03-28 +http://chinafrica.info/|tag|http://www.semanlink.net/tag/chine_afrique +http://chinafrica.info/|title|Chinafrica Le magazine du nouveau monde +http://chinafrica.info/|creationTime|2016-03-28T17:01:29Z +http://www.semanticuniverse.com/|creationDate|2010-05-20 +http://www.semanticuniverse.com/|tag|http://www.semanlink.net/tag/semantic_web +http://www.semanticuniverse.com/|tag|http://www.semanlink.net/tag/semantic_web_blog +http://www.semanticuniverse.com/|comment|Education the world about semantic web technologies and applications +http://www.semanticuniverse.com/|title|Semantic Universe +http://www.semanticuniverse.com/|creationTime|2010-05-20T00:47:05Z +http://es.wikipedia.org/wiki/Todo_sobre_mi_madre|creationDate|2009-01-31 +http://es.wikipedia.org/wiki/Todo_sobre_mi_madre|tag|http://www.semanlink.net/tag/film +http://es.wikipedia.org/wiki/Todo_sobre_mi_madre|tag|http://www.semanlink.net/tag/pedro_almodovar +http://es.wikipedia.org/wiki/Todo_sobre_mi_madre|title|Todo sobre mi madre +http://es.wikipedia.org/wiki/Todo_sobre_mi_madre|creationTime|2009-01-31T23:48:19Z +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|creationDate|2009-11-03 +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|tag|http://www.semanlink.net/tag/linkto_semanlink +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|tag|http://www.semanlink.net/tag/folksonomies_ontologies +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|tag|http://www.semanlink.net/tag/semantic_tagging +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|comment|"Social tagging systems have recently become very popular as a means to classify large sets of resources shared among on-line communities over the social Web. However, the folksonomies resulting from the use of these systems revealed limitations: tags are ambiguous and their spelling may vary, and folksonomies are difficult to exploit in order to retrieve or exchange information. This report compares the recent attempts to overcome these limitations and to support the use of folksonomies with formal languages and ontologies from the Semantic Web.
+Projet ISICIL : Intégration Sémantique de l'Information par des Communautés d'Intelligence en Ligne" +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|title|Linking Folksonomies and Ontologies for Supporting Knowledge Sharing: a State of the Art +http://isicil.inria.fr/docs/Livrables/ISICIL-ANR-EA01-FolksonomiesOntologies-0906.pdf|creationTime|2009-11-03T21:46:35Z +http://www.pbs.org/wnet/secrets/case_plague/index.html|creationDate|2005-11-01 +http://www.pbs.org/wnet/secrets/case_plague/index.html|tag|http://www.semanlink.net/tag/peste +http://www.pbs.org/wnet/secrets/case_plague/index.html|tag|http://www.semanlink.net/tag/sida +http://www.pbs.org/wnet/secrets/case_plague/index.html|title|Secrets of the Dead . Mystery of the Black Death PBS +http://itunes.parisdescartes.fr/|creationDate|2009-03-14 +http://itunes.parisdescartes.fr/|tag|http://www.semanlink.net/tag/online_course_materials +http://itunes.parisdescartes.fr/|tag|http://www.semanlink.net/tag/universite +http://itunes.parisdescartes.fr/|tag|http://www.semanlink.net/tag/itunes +http://itunes.parisdescartes.fr/|title|iTunes U. Paris Descartes +http://itunes.parisdescartes.fr/|creationTime|2009-03-14T15:44:38Z +http://www.hymn-project.org/jhymndoc/|creationDate|2005-12-02 +http://www.hymn-project.org/jhymndoc/|tag|http://www.semanlink.net/tag/drm +http://www.hymn-project.org/jhymndoc/|tag|http://www.semanlink.net/tag/itunes +http://www.hymn-project.org/jhymndoc/|title|JHymn Info and Help +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|creationDate|2017-08-17 +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|tag|http://www.semanlink.net/tag/liberte_d_expression +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|tag|http://www.semanlink.net/tag/censorship +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|tag|http://www.semanlink.net/tag/neo_nazis +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|tag|http://www.semanlink.net/tag/internet_regulation +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|tag|http://www.semanlink.net/tag/charlottesville +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|title|Why We Terminated Daily Stormer +https://blog.cloudflare.com/why-we-terminated-daily-stormer/|creationTime|2017-08-17T13:18:41Z +http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf|creationDate|2015-01-28 +http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf|tag|http://www.semanlink.net/tag/matlab +http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf|tag|http://www.semanlink.net/tag/r +http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf|title|MATLAB / R Reference +http://www.math.umaine.edu/~hiebeler/comp/matlabR.pdf|creationTime|2015-01-28T01:30:26Z +http://www.xbrl.org/|creationDate|2010-07-01 +http://www.xbrl.org/|tag|http://www.semanlink.net/tag/xbrl +http://www.xbrl.org/|title|XBRL: eXtensible Business Reporting Language +http://www.xbrl.org/|creationTime|2010-07-01T17:05:06Z +https://github.com/evolvingweb/ajax-solr/wiki|creationDate|2014-03-15 +https://github.com/evolvingweb/ajax-solr/wiki|tag|http://www.semanlink.net/tag/javascript_librairies +https://github.com/evolvingweb/ajax-solr/wiki|tag|http://www.semanlink.net/tag/ajax +https://github.com/evolvingweb/ajax-solr/wiki|tag|http://www.semanlink.net/tag/solr +https://github.com/evolvingweb/ajax-solr/wiki|comment|a JavaScript library for creating user interfaces to Apache Solr +https://github.com/evolvingweb/ajax-solr/wiki|title|Ajax solr +https://github.com/evolvingweb/ajax-solr/wiki|creationTime|2014-03-15T14:04:45Z +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|creationDate|2006-10-15 +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|tag|http://www.semanlink.net/tag/semantic_desktop +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|tag|http://www.semanlink.net/tag/semantic_indexing +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|comment|"Aperture is an open source library and framework for crawling and indexing information sources such as file systems, websites and mail boxes. Aperture supports a number of common source types and document formats out-of-the-box and provides easy ways to extend it with custom implementations. +
+Aperture does pretty much of what Apple's Spotlight does currently, except that it only works on the local file system. Just as with Apple's Spotlight, the crawled information can then be queried. But unlike Spotlight, it could be done using the much more powerful W3C standard SPARQL query language." +http://blogs.sun.com/bblfish/entry/aperture_to_the_semantic_web|title|The Sun BabelFish Blog: Aperture to the semantic desktop +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|creationDate|2019-02-20 +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|tag|http://www.semanlink.net/tag/google_patents +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|comment|[github](https://github.com/google/patents-public-data/tree/master/models/landscaping) +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|title|Automated patent landscaping (google/patents-public-data) +https://github.com/google/patents-public-data/blob/master/models/landscaping/AutomatedPatentLandscaping.pdf|creationTime|2019-02-20T08:14:36Z +http://www.semantic-web-days.net/proceedings/ontoprise_SemanticWebDays2005.pdf|creationDate|2006-06-28 +http://www.semantic-web-days.net/proceedings/ontoprise_SemanticWebDays2005.pdf|tag|http://www.semanlink.net/tag/automobile +http://www.semantic-web-days.net/proceedings/ontoprise_SemanticWebDays2005.pdf|tag|http://www.semanlink.net/tag/ontologies +http://www.semantic-web-days.net/proceedings/ontoprise_SemanticWebDays2005.pdf|title|Ontologies@ Work -Experience from Automotive and Engineering Industry +http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/|creationDate|2013-03-27 +http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/|tag|http://www.semanlink.net/tag/business_intelligence +http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/|tag|http://www.semanlink.net/tag/big_data +http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/|title|Why Big Data is Choking Business Managers Innovation Insights Wired.com +http://www.wired.com/insights/2013/03/why-big-data-is-choking-business-managers/|creationTime|2013-03-27T23:08:46Z +http://nlp.seas.harvard.edu/2018/04/03/attention.html|creationDate|2018-10-12 +http://nlp.seas.harvard.edu/2018/04/03/attention.html|tag|http://www.semanlink.net/tag/nlp_sample_code +http://nlp.seas.harvard.edu/2018/04/03/attention.html|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://nlp.seas.harvard.edu/2018/04/03/attention.html|comment|"an “annotated” version of the ""Attention is All You Need"" paper in the form of a line-by-line implementation" +http://nlp.seas.harvard.edu/2018/04/03/attention.html|title|The Annotated Transformer +http://nlp.seas.harvard.edu/2018/04/03/attention.html|creationTime|2018-10-12T19:10:45Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|creationDate|2007-04-14 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|tag|http://www.semanlink.net/tag/linked_data +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|tag|http://www.semanlink.net/tag/chris_bizer +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|tag|http://www.semanlink.net/tag/livre +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|comment|"Serving RDF descriptions of your books. The RDF book mashup demonstrates how Web 2.0 data sources like Amazon, Google or Yahoo can be integrated into the Semantic Web. +" +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|title|RDF Book Mashup +http://sites.wiwiss.fu-berlin.de/suhl/bizer/bookmashup/|creationTime|2007-04-14T01:16:26Z +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|creationDate|2012-07-31 +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|tag|http://www.semanlink.net/tag/semantic_technology +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|tag|http://www.semanlink.net/tag/knowledge_representation +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|title|The Rationale for Semantic Technologies » AI3:::Adaptive Information +http://www.mkbergman.com/1015/the-rationale-for-semantic-technologies/|creationTime|2012-07-31T09:55:52Z +http://www.bbc.com/news/technology-35639549|creationDate|2016-03-03 +http://www.bbc.com/news/technology-35639549|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.bbc.com/news/technology-35639549|tag|http://www.semanlink.net/tag/smartphone +http://www.bbc.com/news/technology-35639549|title|Is your smartphone listening to you? - BBC News +http://www.bbc.com/news/technology-35639549|creationTime|2016-03-03T14:21:43Z +https://atom.io|creationDate|2014-09-09 +https://atom.io|tag|http://www.semanlink.net/tag/atom_github +https://atom.io|title|ATOM, a hackable text editor for the 21st Century +https://atom.io|creationTime|2014-09-09T14:57:45Z +http://dannyayers.com/2007/03/28/using-those-profiles|creationDate|2007-04-03 +http://dannyayers.com/2007/03/28/using-those-profiles|tag|http://www.semanlink.net/tag/semanlink_related +http://dannyayers.com/2007/03/28/using-those-profiles|tag|http://www.semanlink.net/tag/personal_information_management +http://dannyayers.com/2007/03/28/using-those-profiles|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2007/03/28/using-those-profiles|comment|As an individual, if I could capture the information that's flowing through my del.icio.us plus Flickr, Bloglines, Google Calendar, Amazon Wishlist even... plus LinkedIn etc - together with my own blog, personal notetaking/wiki/code repository, todo lists, plans etc, and integrate all this in a triplestore, and front it with some kind of Planet/Longwell hybrid (with a little bit of access control), it would be a hugely useful personal knowledge management system. +http://dannyayers.com/2007/03/28/using-those-profiles|title|Using those profiles +http://dannyayers.com/2007/03/28/using-those-profiles|creationTime|2007-04-03T23:15:01Z +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|creationDate|2018-09-21 +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|tag|http://www.semanlink.net/tag/histoire_de_la_vie +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|tag|http://www.semanlink.net/tag/fossile +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|tag|http://www.semanlink.net/tag/paleontologie +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|title|Le Dickinsonia, le plus ancien animal sur Terre, était ovale et plat +https://www.lemonde.fr/paleontologie/article/2018/09/20/le-dickinsonia-le-plus-ancien-animal-sur-terre-etait-ovale-et-plat_5358003_1650762.html|creationTime|2018-09-21T08:19:59Z +http://dannyayers.com/2007/07/17/can-opml-2|creationDate|2007-07-17 +http://dannyayers.com/2007/07/17/can-opml-2|tag|http://www.semanlink.net/tag/grddl +http://dannyayers.com/2007/07/17/can-opml-2|tag|http://www.semanlink.net/tag/opml +http://dannyayers.com/2007/07/17/can-opml-2|title|Can OPML 2.0 be part of the Semantic Web? +http://dannyayers.com/2007/07/17/can-opml-2|creationTime|2007-07-17T23:13:08Z +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|creationDate|2008-04-14 +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|tag|http://www.semanlink.net/tag/google_app_engine +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|tag|http://www.semanlink.net/tag/danny_ayers +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|tag|http://www.semanlink.net/tag/talis_platform +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|title|Nodalities: Google App Engine and the Joy of WebArch +http://blogs.talis.com/nodalities/2008/04/google_app_engine_and_the_joy.php|creationTime|2008-04-14T14:19:08Z +http://topics.cs.princeton.edu/Science/|creationDate|2013-08-29 +http://topics.cs.princeton.edu/Science/|tag|http://www.semanlink.net/tag/topic_modeling +http://topics.cs.princeton.edu/Science/|tag|http://www.semanlink.net/tag/david_blei +http://topics.cs.princeton.edu/Science/|tag|http://www.semanlink.net/tag/science +http://topics.cs.princeton.edu/Science/|title|Modeling the Evolution of Science +http://topics.cs.princeton.edu/Science/|creationTime|2013-08-29T15:08:14Z +http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf|creationDate|2014-04-28 +http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf|tag|http://www.semanlink.net/tag/ml_sequential_data +http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf|title|Machine Learning for Sequential Data: A Review +http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf|creationTime|2014-04-28T15:56:10Z +http://youtube.com/watch?v=JqWoJkvryvM|creationDate|2007-09-18 +http://youtube.com/watch?v=JqWoJkvryvM|tag|http://www.semanlink.net/tag/moussa_poussi +http://youtube.com/watch?v=JqWoJkvryvM|tag|http://www.semanlink.net/tag/youtube +http://youtube.com/watch?v=JqWoJkvryvM|comment|"That ""sibo"" is not the fish of the river. Sibo. Even when it lies, you'll say it tells the truth. It walks on my head, it walks in my flesh until my eyes cry. Sibo, it caresses me till the morning. Recorded live in Niamey, August 19, 2007.
+" +http://youtube.com/watch?v=JqWoJkvryvM|title|YouTube - Sibo - Moussa Poussi +http://youtube.com/watch?v=JqWoJkvryvM|creationTime|2007-09-18T01:15:52Z +http://tech.groups.yahoo.com/group/jena-dev/message/46281|creationDate|2010-12-22 +http://tech.groups.yahoo.com/group/jena-dev/message/46281|tag|http://www.semanlink.net/tag/jena_tdb +http://tech.groups.yahoo.com/group/jena-dev/message/46281|tag|http://www.semanlink.net/tag/jena_dev +http://tech.groups.yahoo.com/group/jena-dev/message/46281|title|jena-dev : Message: Re: [jena-dev] Could TDB sort triples by objects? +http://tech.groups.yahoo.com/group/jena-dev/message/46281|creationTime|2010-12-22T23:41:16Z +https://github.com/cygri/tarql|creationDate|2013-02-08 +https://github.com/cygri/tarql|tag|http://www.semanlink.net/tag/csv +https://github.com/cygri/tarql|tag|http://www.semanlink.net/tag/sparql_1_1 +https://github.com/cygri/tarql|tag|http://www.semanlink.net/tag/arq +https://github.com/cygri/tarql|tag|http://www.semanlink.net/tag/richard_cyganiak +https://github.com/cygri/tarql|comment|SPARQL for Tables Tarql is a command-line tool for converting CSV files to RDF using SPARQL 1.1 syntax. It's written in Java and based on Apache ARQ. +https://github.com/cygri/tarql|title|Tarql: SPARQL for Tables - cygri/tarql · GitHub +https://github.com/cygri/tarql|creationTime|2013-02-08T13:17:51Z +http://www.wildml.com/|creationDate|2017-09-26 +http://www.wildml.com/|tag|http://www.semanlink.net/tag/deep_nlp +http://www.wildml.com/|tag|http://www.semanlink.net/tag/deep_learning +http://www.wildml.com/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/|title|WildML – Artificial Intelligence, Deep Learning, and NLP +http://www.wildml.com/|creationTime|2017-09-26T14:10:17Z +https://arxiv.org/abs/1803.02893|creationDate|2019-03-20 +https://arxiv.org/abs/1803.02893|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1803.02893|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1803.02893|tag|http://www.semanlink.net/tag/nlp_google +https://arxiv.org/abs/1803.02893|arxiv_author|Honglak Lee +https://arxiv.org/abs/1803.02893|arxiv_author|Lajanugen Logeswaran +https://arxiv.org/abs/1803.02893|comment|"""**Quick Thoughts**"". Framework for learning sentence representations from unlabelled data. + +> we reformulate the problem of predicting the context in which a sentence appears as a classification problem. +" +https://arxiv.org/abs/1803.02893|title|[1803.02893] An efficient framework for learning sentence representations +https://arxiv.org/abs/1803.02893|creationTime|2019-03-20T17:47:59Z +https://arxiv.org/abs/1803.02893|arxiv_summary|"In this work we propose a simple and efficient framework for learning +sentence representations from unlabelled data. Drawing inspiration from the +distributional hypothesis and recent work on learning sentence representations, +we reformulate the problem of predicting the context in which a sentence +appears as a classification problem. Given a sentence and its context, a +classifier distinguishes context sentences from other contrastive sentences +based on their vector representations. This allows us to efficiently learn +different types of encoding functions, and we show that the model learns +high-quality sentence representations. We demonstrate that our sentence +representations outperform state-of-the-art unsupervised and supervised +representation learning methods on several downstream NLP tasks that involve +understanding sentence semantics while achieving an order of magnitude speedup +in training time." +https://arxiv.org/abs/1803.02893|arxiv_firstAuthor|Lajanugen Logeswaran +https://arxiv.org/abs/1803.02893|arxiv_updated|2018-03-07T22:02:10Z +https://arxiv.org/abs/1803.02893|arxiv_title|An efficient framework for learning sentence representations +https://arxiv.org/abs/1803.02893|arxiv_published|2018-03-07T22:02:10Z +https://arxiv.org/abs/1803.02893|arxiv_num|1803.02893 +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|creationDate|2017-04-28 +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/nlp +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/cortical_io +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/computational_neuroscience +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/jeff_hawkins +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|tag|http://www.semanlink.net/tag/sparse_distributed_memory +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|title|How Mimicking Brain Function is Revolutionising NLP - Dataconomy +http://dataconomy.com/2014/09/how-an-austrian-startup-is-mimicking-brain-function-to-revolutionise-nlp/|creationTime|2017-04-28T22:47:59Z +http://www.bnode.org/archives2/59|creationDate|2006-06-20 +http://www.bnode.org/archives2/59|tag|http://www.semanlink.net/tag/sparql_clipboard +http://www.bnode.org/archives2/59|title|"Web Clipboard: Adding liveliness to ""Live Clipboard"" with eRDF, JSON, and SPARQL." +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|creationDate|2016-03-27 +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|tag|http://www.semanlink.net/tag/deep_learning +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|tag|http://www.semanlink.net/tag/guha +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|tag|http://www.semanlink.net/tag/cyc +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|tag|http://www.semanlink.net/tag/knowledge_based_ai +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|title|One Genius’ Lonely Crusade to Teach a Computer Common Sense WIRED +http://www.wired.com/2016/03/doug-lenat-artificial-intelligence-common-sense-engine/|creationTime|2016-03-27T17:52:52Z +https://ai.stanford.edu/blog/weak-supervision/|creationDate|2019-03-12 +https://ai.stanford.edu/blog/weak-supervision/|tag|http://www.semanlink.net/tag/ai_stanford +https://ai.stanford.edu/blog/weak-supervision/|tag|http://www.semanlink.net/tag/active_learning +https://ai.stanford.edu/blog/weak-supervision/|tag|http://www.semanlink.net/tag/weak_supervision +https://ai.stanford.edu/blog/weak-supervision/|tag|http://www.semanlink.net/tag/snorkel +https://ai.stanford.edu/blog/weak-supervision/|tag|http://www.semanlink.net/tag/ai_knowledge +https://ai.stanford.edu/blog/weak-supervision/|comment|"[Newer version](http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr) of more or less the same thing + + +" +https://ai.stanford.edu/blog/weak-supervision/|relatedDoc|http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr +https://ai.stanford.edu/blog/weak-supervision/|title|Weak Supervision: A New Programming Paradigm for Machine Learning SAIL Blog +https://ai.stanford.edu/blog/weak-supervision/|creationTime|2019-03-12T13:33:27Z +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|creationDate|2007-05-31 +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|tag|http://www.semanlink.net/tag/rdf_driven_web_sites +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|tag|http://www.semanlink.net/tag/richard_cyganiak +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|tag|http://www.semanlink.net/tag/erdf +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|tag|http://www.semanlink.net/tag/rdfa +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|comment|Let’s assume you have a web application driven by data from an RDF triple store. You generate HTML pages by querying the triple store and inserting the bits and pieces into an HTML template. Now if you add eRDF or RDFa annotations to the HTML template, in a way that reflects the original RDF data, then by definition the annotations completely specify what data you need to populate the page. And the template itself therefore must be sufficient to extract all the required triples from the store. No coding needed! +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|title|dowhatimean.net » Less code: eRDF templates for RDF-driven web sites +http://dowhatimean.net/2007/03/less-code-erdf-templates-for-rdf-driven-web-sites|creationTime|2007-05-31T01:20:37Z +http://www.overmundo.com.br/guia/sabor-selvagem|creationDate|2007-09-11 +http://www.overmundo.com.br/guia/sabor-selvagem|tag|http://www.semanlink.net/tag/ofir +http://www.overmundo.com.br/guia/sabor-selvagem|tag|http://www.semanlink.net/tag/belem +http://www.overmundo.com.br/guia/sabor-selvagem|comment|Não se trata exatamente de um restaurante, e, sim, de uma enorme sala de jantar no quintal de Ofir Cavalcante, um dos chefs mais criativos do Pará. Como não existe cardápio, o cliente paga uma quantia fixa com direito a entrada, prato principal e sobremesa +http://www.overmundo.com.br/guia/sabor-selvagem|title|Sabor Selvagem +http://www.overmundo.com.br/guia/sabor-selvagem|creationTime|2007-09-11T21:31:00Z +https://www.youtube.com/watch?v=CMS6Ds3qryY|creationDate|2018-03-04 +https://www.youtube.com/watch?v=CMS6Ds3qryY|tag|http://www.semanlink.net/tag/palestine +https://www.youtube.com/watch?v=CMS6Ds3qryY|tag|http://www.semanlink.net/tag/banksy +https://www.youtube.com/watch?v=CMS6Ds3qryY|title|Banksy documentary: Welcome to the Banksy art hotel in Bethlehem - YouTube +https://www.youtube.com/watch?v=CMS6Ds3qryY|creationTime|2018-03-04T10:09:04Z +http://code.google.com/p/jquery-jsonp/|creationDate|2012-01-31 +http://code.google.com/p/jquery-jsonp/|tag|http://www.semanlink.net/tag/jquery +http://code.google.com/p/jquery-jsonp/|tag|http://www.semanlink.net/tag/jsonp +http://code.google.com/p/jquery-jsonp/|comment|Alternative solution to jQuery's implementation of JSONP. +http://code.google.com/p/jquery-jsonp/|title|jQuery-JSONP +http://code.google.com/p/jquery-jsonp/|creationTime|2012-01-31T18:37:25Z +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|creationDate|2007-06-09 +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|tag|http://www.semanlink.net/tag/chirac_ami_des_africains +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|tag|http://www.semanlink.net/tag/afrique_francophone +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|tag|http://www.semanlink.net/tag/chine_afrique +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|tag|http://www.semanlink.net/tag/francafrique +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|comment|Virtually wherever one looks in French-speaking Africa today, one finds evidence of a postcolonial policy in tatters, and more startling still, given the tenacity of French claims over the decades, an open sense of failure, of exhaustion and of frank resignation. +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|title|Tattered French African empire looks toward China - International Herald Tribune +http://www.iht.com/articles/2007/06/07/africa/letter.1-70823.php|creationTime|2007-06-09T11:25:37Z +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|creationDate|2019-01-15 +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/porto_rico +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/rechauffement_climatique +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/insect_collapse +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|comment|Scientist Brad Lister returned to Puerto Rican rainforest after 35 years to find 98% of ground insects had vanished +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|title|Insect collapse: ‘We are destroying our life support systems’ Environment The Guardian +https://www.theguardian.com/environment/2019/jan/15/insect-collapse-we-are-destroying-our-life-support-systems?CMP=share_btn_tw|creationTime|2019-01-15T13:25:00Z +http://dbpedia.neofonie.de/browse/|creationDate|2010-06-22 +http://dbpedia.neofonie.de/browse/|tag|http://www.semanlink.net/tag/sw_demo +http://dbpedia.neofonie.de/browse/|tag|http://www.semanlink.net/tag/wikipedia +http://dbpedia.neofonie.de/browse/|tag|http://www.semanlink.net/tag/dbpedia +http://dbpedia.neofonie.de/browse/|tag|http://www.semanlink.net/tag/faceted_search +http://dbpedia.neofonie.de/browse/|title|Faceted Wikipedia Search +http://dbpedia.neofonie.de/browse/|creationTime|2010-06-22T18:34:56Z +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|creationDate|2017-09-26 +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|tag|http://www.semanlink.net/tag/google_structured_data_testing_tool +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|tag|http://www.semanlink.net/tag/json_ld +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|tag|http://www.semanlink.net/tag/dan_brickley +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|tag|http://www.semanlink.net/tag/martynas_jusevicius +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|comment|"> The Google Structured Data Testing Tool in its current incarnation is +largely oriented towards vocabulary that Google understands, even +though it is perfectly harmless to include other information in your +structured data" +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|title|Re: Google Structured Data Testing Tool fails on valid JSON-LD from Dan Brickley on 2016-04-07 (public-schemaorg@w3.org from April 2016) +https://lists.w3.org/Archives/Public/public-schemaorg/2016Apr/0022.html|creationTime|2017-09-26T15:47:52Z +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122-10.html|creationDate|2013-10-12 +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122-10.html|title|Arnaque et Divulgation données personnelles par Numéricable : Internet - Page 2 - Forum Que Choisir +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122-10.html|creationTime|2013-10-12T12:41:54Z +http://www.foretpriveefrancaise.com/comment-estimer-le-prix-du-bois-sur-pied-150189.html|creationDate|2011-11-14 +http://www.foretpriveefrancaise.com/comment-estimer-le-prix-du-bois-sur-pied-150189.html|tag|http://www.semanlink.net/tag/plantation_d_arbres +http://www.foretpriveefrancaise.com/comment-estimer-le-prix-du-bois-sur-pied-150189.html|title|Comment estimer le prix du bois sur pied ? +http://www.foretpriveefrancaise.com/comment-estimer-le-prix-du-bois-sur-pied-150189.html|creationTime|2011-11-14T21:14:07Z +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|creationDate|2018-09-20 +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|tag|http://www.semanlink.net/tag/lexical_ambiguity +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|tag|http://www.semanlink.net/tag/sanjeev_arora +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|tag|http://www.semanlink.net/tag/word_embedding +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|tag|http://www.semanlink.net/tag/sparse_dictionary_learning +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|comment|"Extracting word senses from embeddings. [About this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1601.03764) + +" +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|relatedDoc|https://arxiv.org/abs/1601.03764 +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|title|Linear algebraic structure of word meanings – Off the convex path +https://www.offconvex.org/2016/07/10/embeddingspolysemy/|creationTime|2018-09-20T23:47:32Z +http://www.xml.com/pub/au/225|creationDate|2005-06-15 +http://www.xml.com/pub/au/225|tag|http://www.semanlink.net/tag/web +http://www.xml.com/pub/au/225|title|Joe Gregorio: Restful Web columns +http://voiretagir.org/spip.php?article50|creationDate|2010-05-10 +http://voiretagir.org/spip.php?article50|tag|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://voiretagir.org/spip.php?article50|tag|http://www.semanlink.net/tag/livre +http://voiretagir.org/spip.php?article50|tag|http://www.semanlink.net/tag/physique +http://voiretagir.org/spip.php?article50|tag|http://www.semanlink.net/tag/documentaire_tv +http://voiretagir.org/spip.php?article50|tag|http://www.semanlink.net/tag/progres_technique +http://voiretagir.org/spip.php?article50|title|Un siècle de progrès sans merci +http://voiretagir.org/spip.php?article50|creationTime|2010-05-10T17:27:41Z +http://www.bbc.co.uk/dna/h2g2/A2207297|creationDate|2005-06-26 +http://www.bbc.co.uk/dna/h2g2/A2207297|tag|http://www.semanlink.net/tag/nebra_sky_disc +http://www.bbc.co.uk/dna/h2g2/A2207297|title|BBC - The Nebra Sky Disc +http://www.bbc.co.uk/dna/h2g2/A2207297|seeAlso|http://www.semanlink.net/doc/2005/06/Nebra%20Sky%20Disk.jpg +http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html|creationDate|2007-08-21 +http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html|tag|http://www.semanlink.net/tag/deforestation +http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html|title|Pillages chinois en forêt tropicale +http://www.lefigaro.fr/reportage/20070814.FIG000000020_pillages_chinois_en_foret_tropicale.html|creationTime|2007-08-21T22:25:11Z +https://www.theguardian.com/society/2016/aug/27/millions-at-risk-as-deadly-fungal-infections-acquire-drug-resistance|creationDate|2016-08-28 +https://www.theguardian.com/society/2016/aug/27/millions-at-risk-as-deadly-fungal-infections-acquire-drug-resistance|tag|http://www.semanlink.net/tag/fungal_infections +https://www.theguardian.com/society/2016/aug/27/millions-at-risk-as-deadly-fungal-infections-acquire-drug-resistance|title|Millions at risk as deadly fungal infections acquire drug resistance Society The Guardian +https://www.theguardian.com/society/2016/aug/27/millions-at-risk-as-deadly-fungal-infections-acquire-drug-resistance|creationTime|2016-08-28T15:56:39Z +https://arxiv.org/pdf/1711.07128.pdf|creationDate|2017-12-15 +https://arxiv.org/pdf/1711.07128.pdf|tag|http://www.semanlink.net/tag/deep_learning +https://arxiv.org/pdf/1711.07128.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1711.07128.pdf|tag|http://www.semanlink.net/tag/keyword_spotting +https://arxiv.org/pdf/1711.07128.pdf|arxiv_author|Yundong Zhang +https://arxiv.org/pdf/1711.07128.pdf|arxiv_author|Naveen Suda +https://arxiv.org/pdf/1711.07128.pdf|arxiv_author|Vikas Chandra +https://arxiv.org/pdf/1711.07128.pdf|arxiv_author|Liangzhen Lai +https://arxiv.org/pdf/1711.07128.pdf|title|[1711.07128] Hello Edge: Keyword Spotting on Microcontrollers +https://arxiv.org/pdf/1711.07128.pdf|creationTime|2017-12-15T09:04:47Z +https://arxiv.org/pdf/1711.07128.pdf|arxiv_summary|"Keyword spotting (KWS) is a critical component for enabling speech based user +interactions on smart devices. It requires real-time response and high accuracy +for good user experience. Recently, neural networks have become an attractive +choice for KWS architecture because of their superior accuracy compared to +traditional speech processing algorithms. Due to its always-on nature, KWS +application has highly constrained power budget and typically runs on tiny +microcontrollers with limited memory and compute capability. The design of +neural network architecture for KWS must consider these constraints. In this +work, we perform neural network architecture evaluation and exploration for +running KWS on resource-constrained microcontrollers. We train various neural +network architectures for keyword spotting published in literature to compare +their accuracy and memory/compute requirements. We show that it is possible to +optimize these neural network architectures to fit within the memory and +compute constraints of microcontrollers without sacrificing accuracy. We +further explore the depthwise separable convolutional neural network (DS-CNN) +and compare it against other neural network architectures. DS-CNN achieves an +accuracy of 95.4%, which is ~10% higher than the DNN model with similar number +of parameters." +https://arxiv.org/pdf/1711.07128.pdf|arxiv_firstAuthor|Yundong Zhang +https://arxiv.org/pdf/1711.07128.pdf|arxiv_updated|2018-02-14T19:24:55Z +https://arxiv.org/pdf/1711.07128.pdf|arxiv_title|Hello Edge: Keyword Spotting on Microcontrollers +https://arxiv.org/pdf/1711.07128.pdf|arxiv_published|2017-11-20T03:19:03Z +https://arxiv.org/pdf/1711.07128.pdf|arxiv_num|1711.07128 +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|creationDate|2008-11-21 +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|tag|http://www.semanlink.net/tag/conscience_artificielle +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|tag|http://www.semanlink.net/tag/technological_singularity +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|tag|http://www.semanlink.net/tag/anticipation +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|tag|http://www.semanlink.net/tag/turing_test +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|tag|http://www.semanlink.net/tag/ray_kurzweil +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|comment|Ray Kurzweil, inventor and futurist on the Turing Test, human vs machine intelligence, why being funny is clever, and the dangers of advanced technologies... +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|title|"Kurzweil: ""Technology is a double-edged sword""" +http://www.silicon.com/silicon/management/itpro/0,39024675,39345605,00.htm|creationTime|2008-11-21T23:43:55Z +http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/|creationDate|2015-03-28 +http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/|tag|http://www.semanlink.net/tag/paul_krugman +http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/|title|Why We’re in a New Gilded Age by Paul Krugman The New York Review of Books +http://www.nybooks.com/articles/archives/2014/may/08/thomas-piketty-new-gilded-age/|creationTime|2015-03-28T00:37:37Z +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|creationDate|2017-04-27 +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|tag|http://www.semanlink.net/tag/elasticsearch +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|tag|http://www.semanlink.net/tag/bioinformatics +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|tag|http://www.semanlink.net/tag/adn +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|title|Uncoiling the Data in DNA: Elasticsearch as a BioInformatics Research Tool Elastic +https://www.elastic.co/blog/uncoiling-data-in-dna-elasticsearch-as-a-bioinformatics-research-tool|creationTime|2017-04-27T17:05:54Z +http://gigaom.com/2013/12/07/why-cognition-as-a-service-is-the-next-operating-system-battlefield/|creationDate|2014-02-03 +http://gigaom.com/2013/12/07/why-cognition-as-a-service-is-the-next-operating-system-battlefield/|tag|http://www.semanlink.net/tag/cognition_as_a_service +http://gigaom.com/2013/12/07/why-cognition-as-a-service-is-the-next-operating-system-battlefield/|title|Why Cognition-as-a-Service is the next operating system battlefield — Tech News and Analysis +http://gigaom.com/2013/12/07/why-cognition-as-a-service-is-the-next-operating-system-battlefield/|creationTime|2014-02-03T22:56:51Z +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|creationDate|2017-11-06 +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|tag|http://www.semanlink.net/tag/yoav_goldberg +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|tag|http://www.semanlink.net/tag/nn_4_nlp +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|tag|http://www.semanlink.net/tag/good +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|comment|"[my notes](/sl/doc/2017/11/A%20Primer%20on%20Neural%20Network%20Models%20for%20NLP-Notes.md) + + +" +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|title|Goldberg, Y. (2016). A Primer on Neural Network Models for Natural Language Processing. Journal of Artificial Intelligence Research +https://www.jair.org/media/4992/live-4992-9623-jair.pdf|creationTime|2017-11-06T11:23:40Z +http://www.cs.cmu.edu/~brettb/papers/06itsc-driver-intent.pdf|creationDate|2013-03-21 +http://www.cs.cmu.edu/~brettb/papers/06itsc-driver-intent.pdf|tag|http://www.semanlink.net/tag/destination_prediction +http://www.cs.cmu.edu/~brettb/papers/06itsc-driver-intent.pdf|title|Learning to Predict Driver Route and Destination Intent +http://www.cs.cmu.edu/~brettb/papers/06itsc-driver-intent.pdf|creationTime|2013-03-21T17:43:06Z +http://wiki.blojsom.com/wiki/display/blojsom/About+blojsom|creationDate|2005-08-29 +http://wiki.blojsom.com/wiki/display/blojsom/About+blojsom|tag|http://www.semanlink.net/tag/blojsom +http://wiki.blojsom.com/wiki/display/blojsom/About+blojsom|comment|"A Java-based, full-featured, multi-blog, multi-user software package that was inspired by blosxom. +The software behind Apple's Mac OS X Tiger Server Weblog Server." +http://wiki.blojsom.com/wiki/display/blojsom/About+blojsom|title|About blojsom - Confluence +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|creationDate|2017-07-22 +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|tag|http://www.semanlink.net/tag/douglas_rushkoff +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|tag|http://www.semanlink.net/tag/silicon_valley +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|title|Silicon Valley's push for universal basic income is — surprise! — totally self-serving - LA Times +http://www.latimes.com/opinion/op-ed/la-oe-rushkoff-universal-basic-income-silicon-valley-20170721-story.html|creationTime|2017-07-22T02:36:00Z +https://alarmingdevelopment.org/?p=1173|creationDate|2018-08-05 +https://alarmingdevelopment.org/?p=1173|tag|http://www.semanlink.net/tag/hypercard +https://alarmingdevelopment.org/?p=1173|tag|http://www.semanlink.net/tag/programming +https://alarmingdevelopment.org/?p=1173|comment|Programming today is exactly what you’d expect to get by paying an isolated subculture of nerdy young men to entertain themselves for fifty years +https://alarmingdevelopment.org/?p=1173|title|The problem with programming and how to fix it – Alarming Development +https://alarmingdevelopment.org/?p=1173|creationTime|2018-08-05T11:04:44Z +http://www.bbc.co.uk/news/science-environment-23244768|creationDate|2013-07-11 +http://www.bbc.co.uk/news/science-environment-23244768|tag|http://www.semanlink.net/tag/artificial_life +http://www.bbc.co.uk/news/science-environment-23244768|title|BBC News - Scientists building the world's first synthetic yeast +http://www.bbc.co.uk/news/science-environment-23244768|creationTime|2013-07-11T19:25:49Z +https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md|creationDate|2014-06-29 +https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md|tag|http://www.semanlink.net/tag/rdf +https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md|tag|http://www.semanlink.net/tag/andy_seaborne +https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md|title|RDF Binary encoding using Thrift. +https://github.com/afs/rdf-thrift/blob/master/rdf-binary.md|creationTime|2014-06-29T09:28:35Z +https://t.co/abYFX5zXXq|creationDate|2019-02-27 +https://t.co/abYFX5zXXq|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +https://t.co/abYFX5zXXq|tag|http://www.semanlink.net/tag/sebastian_ruder +https://t.co/abYFX5zXXq|tag|http://www.semanlink.net/tag/slides +https://t.co/abYFX5zXXq|title|Neural Transfer Learning for Natural Language Processing - Seb Ruder PhD Thesis +https://t.co/abYFX5zXXq|creationTime|2019-02-27T13:54:03Z +https://commons.apache.org/proper/commons-csv/|creationDate|2015-10-13 +https://commons.apache.org/proper/commons-csv/|tag|http://www.semanlink.net/tag/apache_org +https://commons.apache.org/proper/commons-csv/|tag|http://www.semanlink.net/tag/csv +https://commons.apache.org/proper/commons-csv/|title|Commons CSV +https://commons.apache.org/proper/commons-csv/|creationTime|2015-10-13T17:05:51Z +http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/|creationDate|2010-12-14 +http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/|tag|http://www.semanlink.net/tag/ontologies +http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/|comment|The vision of NKE is to produce ontologies as a result of users navigating through a system. +http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/|title|Navigational Knowledge Engineering (NKE) and HANNE +http://blog.aksw.org/2010/navigational-knowledge-engineering-nke-and-hanne/|creationTime|2010-12-14T23:16:45Z +http://hyperdata.org/taglia/|creationDate|2009-04-11 +http://hyperdata.org/taglia/|tag|http://www.semanlink.net/tag/semanlink_related +http://hyperdata.org/taglia/|tag|http://www.semanlink.net/tag/talis_platform +http://hyperdata.org/taglia/|tag|http://www.semanlink.net/tag/del_icio_us +http://hyperdata.org/taglia/|tag|http://www.semanlink.net/tag/danny_ayers +http://hyperdata.org/taglia/|tag|http://www.semanlink.net/tag/tag_ontology +http://hyperdata.org/taglia/|comment|Takes all your bookmarks from del.icio.us, converts to RDF (using Tag Ontology), pushes into a Talis Platform store +http://hyperdata.org/taglia/|title|tagliatelle delicious RDFizer +http://hyperdata.org/taglia/|creationTime|2009-04-11T00:12:14Z +https://communication.revues.org/6650|creationDate|2016-09-05 +https://communication.revues.org/6650|tag|http://www.semanlink.net/tag/linking_open_data +https://communication.revues.org/6650|tag|http://www.semanlink.net/tag/lod_museum +https://communication.revues.org/6650|title|La construction d’un espace patrimonial partagé dans le Web de données ouvert +https://communication.revues.org/6650|creationTime|2016-09-05T11:43:19Z +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|creationDate|2014-05-20 +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|tag|http://www.semanlink.net/tag/2eme_guerre_mondiale +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|tag|http://www.semanlink.net/tag/allemagne +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|tag|http://www.semanlink.net/tag/france +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|tag|http://www.semanlink.net/tag/nazisme +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|title|volle.com: La France, cette mal aimée +http://michelvolle.blogspot.fr/2014/05/la-france-cette-mal-aimee.html|creationTime|2014-05-20T23:04:48Z +http://swordfish.rdfweb.org/photos/2006/05/10/index.html|creationDate|2006-05-13 +http://swordfish.rdfweb.org/photos/2006/05/10/index.html|tag|http://www.semanlink.net/tag/jena_user_conference +http://swordfish.rdfweb.org/photos/2006/05/10/index.html|title|JUC day 1: photos +http://www.naturenw.org/|creationDate|2008-05-18 +http://www.naturenw.org/|tag|http://www.semanlink.net/tag/oregon +http://www.naturenw.org/|title|Nature of the Northwest +http://www.naturenw.org/|creationTime|2008-05-18T23:58:26Z +http://www.resgeol04.org/dalle.html|creationDate|2011-10-05 +http://www.resgeol04.org/dalle.html|tag|http://www.semanlink.net/tag/ammonite +http://www.resgeol04.org/dalle.html|title|La dalle à ammonites, Réserve géologique de Haute Provence +http://www.resgeol04.org/dalle.html|creationTime|2011-10-05T23:49:45Z +http://julie.grollier.free.fr/STT.htm|creationDate|2018-10-27 +http://julie.grollier.free.fr/STT.htm|tag|http://www.semanlink.net/tag/brains_in_silicon +http://julie.grollier.free.fr/STT.htm|tag|http://www.semanlink.net/tag/julie_grollier +http://julie.grollier.free.fr/STT.htm|comment|"The classical way to perform data processing is to reduce all sources of noise to the maximum. An interesting alternative strategy is, on the contrary, to exploit noise for computing. In this trend, stochastic computing has a great potential for the implementation of **low power information processing systems**. Indeed noise is often seen as a key element of neural computation, beneficial for a number of operations as near-threshold signaling and decision making. And spin torque devices, just like neurons, can exhibit noise induced sensitivity improvement, for example via stochastic resonance. We are working on the development of **probabilistic bio-inspired hardware** exploiting the controlled stochasticity provided by spin torque +" +http://julie.grollier.free.fr/STT.htm|title|Julie Grollier - Spin Transfer Torque +http://julie.grollier.free.fr/STT.htm|creationTime|2018-10-27T17:28:16Z +http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306|creationDate|2012-11-14 +http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306|tag|http://www.semanlink.net/tag/goodrelations +http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306|tag|http://www.semanlink.net/tag/schema_org +http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306|title|GoodRelations Fully Integrated with Schema.org - semanticweb.com +http://semanticweb.com/goodrelations-fully-integrated-with-schema-org_b33306|creationTime|2012-11-14T11:31:30Z +https://github.com/xiaohuiyan/BTM|creationDate|2017-06-07 +https://github.com/xiaohuiyan/BTM|tag|http://www.semanlink.net/tag/github_project +https://github.com/xiaohuiyan/BTM|tag|http://www.semanlink.net/tag/biterm_topic_model +https://github.com/xiaohuiyan/BTM|title|Biterm Topic Model (github) +https://github.com/xiaohuiyan/BTM|creationTime|2017-06-07T18:40:48Z +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|creationDate|2018-01-01 +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|tag|http://www.semanlink.net/tag/natural_language_generation +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|tag|http://www.semanlink.net/tag/rant +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|tag|http://www.semanlink.net/tag/yoav_goldberg +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|title|An Adversarial Review of “Adversarial Generation of Natural Language” +https://medium.com/@yoav.goldberg/an-adversarial-review-of-adversarial-generation-of-natural-language-409ac3378bd7|creationTime|2018-01-01T12:39:30Z +https://arxiv.org/abs/1901.11504|creationDate|2019-02-17 +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/kd_mkb_biblio +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/nlu +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/nlp_microsoft +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/pre_trained_language_models +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/multi_task_learning +https://arxiv.org/abs/1901.11504|tag|http://www.semanlink.net/tag/bert +https://arxiv.org/abs/1901.11504|arxiv_author|Jianfeng Gao +https://arxiv.org/abs/1901.11504|arxiv_author|Pengcheng He +https://arxiv.org/abs/1901.11504|arxiv_author|Xiaodong Liu +https://arxiv.org/abs/1901.11504|arxiv_author|Weizhu Chen +https://arxiv.org/abs/1901.11504|comment|outperforms BERT in nine of eleven benchmark NLP tasks +https://arxiv.org/abs/1901.11504|title|[1901.11504] Multi-Task Deep Neural Networks for Natural Language Understanding +https://arxiv.org/abs/1901.11504|creationTime|2019-02-17T12:30:18Z +https://arxiv.org/abs/1901.11504|arxiv_summary|"In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for +learning representations across multiple natural language understanding (NLU) +tasks. MT-DNN not only leverages large amounts of cross-task data, but also +benefits from a regularization effect that leads to more general +representations in order to adapt to new tasks and domains. MT-DNN extends the +model proposed in Liu et al. (2015) by incorporating a pre-trained +bidirectional transformer language model, known as BERT (Devlin et al., 2018). +MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, +SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% +(2.2% absolute improvement). We also demonstrate using the SNLI and SciTail +datasets that the representations learned by MT-DNN allow domain adaptation +with substantially fewer in-domain labels than the pre-trained BERT +representations. The code and pre-trained models are publicly available at +https://github.com/namisan/mt-dnn." +https://arxiv.org/abs/1901.11504|arxiv_firstAuthor|Xiaodong Liu +https://arxiv.org/abs/1901.11504|arxiv_updated|2019-05-30T00:01:20Z +https://arxiv.org/abs/1901.11504|arxiv_title|Multi-Task Deep Neural Networks for Natural Language Understanding +https://arxiv.org/abs/1901.11504|arxiv_published|2019-01-31T18:07:25Z +https://arxiv.org/abs/1901.11504|arxiv_num|1901.11504 +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|creationDate|2005-06-25 +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/ted_nelson +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/bill_gates +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/alan_kay +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/engelbart +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|tag|http://www.semanlink.net/tag/hippies +http://pisani.blog.lemonde.fr/pisani/2005/06/cest_grce_aux_h_1.html|title|Transnets, des gadgets aux réseaux: C’est grâce aux hippies (2) +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|creationDate|2008-03-29 +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|tag|http://www.semanlink.net/tag/ivan_herman +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|title|The Semantic Web in Action - Scientific American - December 2007 +http://thefigtrees.net/lee/sw/sciam/semantic-web-in-action|creationTime|2008-03-29T18:27:19Z +http://en.wikipedia.org/wiki/Timeline_of_evolution|creationDate|2011-10-20 +http://en.wikipedia.org/wiki/Timeline_of_evolution|tag|http://www.semanlink.net/tag/evolution +http://en.wikipedia.org/wiki/Timeline_of_evolution|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://en.wikipedia.org/wiki/Timeline_of_evolution|title|Timeline of evolution - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Timeline_of_evolution|creationTime|2011-10-20T00:43:59Z +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|creationDate|2011-06-07 +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|tag|http://www.semanlink.net/tag/dean_allemang +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|tag|http://www.semanlink.net/tag/topbraid +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|tag|http://www.semanlink.net/tag/microdata +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|title|Stop Press: Microdata in TopBraid +http://dallemang.typepad.com/my_weblog/2011/06/stop-press-microdata-in-topbraid.html|creationTime|2011-06-07T14:06:53Z +https://towardsdatascience.com/|creationDate|2018-05-29 +https://towardsdatascience.com/|tag|http://www.semanlink.net/tag/data_science +https://towardsdatascience.com/|title|Towards Data Science +https://towardsdatascience.com/|creationTime|2018-05-29T15:02:02Z +http://www.quakr.co.uk/|creationDate|2007-05-17 +http://www.quakr.co.uk/|tag|http://www.semanlink.net/tag/katie_portwin +http://www.quakr.co.uk/|tag|http://www.semanlink.net/tag/xtech_2007 +http://www.quakr.co.uk/|tag|http://www.semanlink.net/tag/3d +http://www.quakr.co.uk/|comment|"Is it possible to build a virtual world from user-contributed photographic metadata?
Quakr is a project to build the world one photo at a time. +" +http://www.quakr.co.uk/|title|quakr +http://www.quakr.co.uk/|creationTime|2007-05-17T23:29:54Z +http://ajaxmatters.com|creationDate|2005-06-15 +http://ajaxmatters.com|tag|http://www.semanlink.net/tag/ajax +http://ajaxmatters.com|title|AJAX Matters - Asynchronous JavaScript and XML and XMLHTTP development information +http://rdfweb.org/topic/Smushing|creationDate|2005-07-04 +http://rdfweb.org/topic/Smushing|tag|http://www.semanlink.net/tag/smushing +http://rdfweb.org/topic/Smushing|comment|Smushing is an informal term for 'merging data based on knowledge of uniquely identifying properties'. +http://rdfweb.org/topic/Smushing|title|Smushing - FOAF Wiki +http://semantic-technology-companies.sti2.at/index.html|creationDate|2011-03-01 +http://semantic-technology-companies.sti2.at/index.html|tag|http://www.semanlink.net/tag/semantic_web_company +http://semantic-technology-companies.sti2.at/index.html|title|Semantic Technology Companies +http://semantic-technology-companies.sti2.at/index.html|creationTime|2011-03-01T13:38:32Z +https://github.com/RubenVerborgh/Hydra-Architecture-Diagram/|creationDate|2017-03-24 +https://github.com/RubenVerborgh/Hydra-Architecture-Diagram/|tag|http://www.semanlink.net/tag/hydra +https://github.com/RubenVerborgh/Hydra-Architecture-Diagram/|title|Hydra Architecture Diagram +https://github.com/RubenVerborgh/Hydra-Architecture-Diagram/|creationTime|2017-03-24T14:41:09Z +http://www.ebusiness-unibw.org/ontologies/opdm/|creationDate|2014-04-19 +http://www.ebusiness-unibw.org/ontologies/opdm/|tag|http://www.semanlink.net/tag/goodrelations +http://www.ebusiness-unibw.org/ontologies/opdm/|tag|http://www.semanlink.net/tag/ontologies +http://www.ebusiness-unibw.org/ontologies/opdm/|tag|http://www.semanlink.net/tag/martin_hepp +http://www.ebusiness-unibw.org/ontologies/opdm/|tag|http://www.semanlink.net/tag/product_description +http://www.ebusiness-unibw.org/ontologies/opdm/|title|OPDM - Ontologies +http://www.ebusiness-unibw.org/ontologies/opdm/|creationTime|2014-04-19T10:49:21Z +http://www.w3.org/2010/Talks/0622-SemTech-IH/|creationDate|2010-06-30 +http://www.w3.org/2010/Talks/0622-SemTech-IH/|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/2010/Talks/0622-SemTech-IH/|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.w3.org/2010/Talks/0622-SemTech-IH/|tag|http://www.semanlink.net/tag/slides +http://www.w3.org/2010/Talks/0622-SemTech-IH/|title|Introduction to Semantic Web Technologies, slides Ivan Herman at semtech 2010 +http://www.w3.org/2010/Talks/0622-SemTech-IH/|creationTime|2010-06-30T13:21:29Z +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|creationDate|2010-07-06 +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|tag|http://www.semanlink.net/tag/sparql +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|tag|http://www.semanlink.net/tag/jpa +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|tag|http://www.semanlink.net/tag/rdf +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|title|Empire: RDF & SPARQL Meet JPA Semantic Universe +http://www.semanticuniverse.com/articles-empire-where-rdf-sparql-meet-java-persistence-api.html|creationTime|2010-07-06T14:00:10Z +http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/|creationDate|2008-10-28 +http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/|tag|http://www.semanlink.net/tag/obama +http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/|tag|http://www.semanlink.net/tag/rigolo +http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/|title|Episode 7 : la pause Obama - Le Feuilleton - Blog LeMonde.fr +http://lefeuilleton.blog.lemonde.fr/2008/10/24/episode-7-la-pause-obama/|creationTime|2008-10-28T22:39:30Z +http://scikit-learn.org/stable/modules/cross_validation.html|creationDate|2016-01-11 +http://scikit-learn.org/stable/modules/cross_validation.html|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/modules/cross_validation.html|tag|http://www.semanlink.net/tag/cross_validation +http://scikit-learn.org/stable/modules/cross_validation.html|title|Cross-validation: evaluating estimator performance — scikit-learn documentation +http://scikit-learn.org/stable/modules/cross_validation.html|creationTime|2016-01-11T17:52:23Z +https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-09-06 +https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/cerveau +https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/bacteries +https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Bacteria Use Brainlike Bursts of Electricity to Communicate Quanta Magazine +https://www.quantamagazine.org/bacteria-use-brainlike-bursts-of-electricity-to-communicate-20170905/?utm_content=buffer5a5cf&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-09-06T10:18:24Z +http://www.newyorker.com/online/blogs/newsdesk/2013/01/the-libraries-of-timbuktu.html|creationDate|2013-01-29 +http://www.newyorker.com/online/blogs/newsdesk/2013/01/the-libraries-of-timbuktu.html|tag|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.newyorker.com/online/blogs/newsdesk/2013/01/the-libraries-of-timbuktu.html|title|Has the Great Library of Timbuktu Been Lost? : The New Yorker +http://www.newyorker.com/online/blogs/newsdesk/2013/01/the-libraries-of-timbuktu.html|creationTime|2013-01-29T18:25:15Z +http://www.eswc2007.org/preliminaryprogram.cfm|creationDate|2007-06-04 +http://www.eswc2007.org/preliminaryprogram.cfm|tag|http://www.semanlink.net/tag/eswc_2007 +http://www.eswc2007.org/preliminaryprogram.cfm|title|4th European Semantic Web Conference 2007 +http://www.eswc2007.org/preliminaryprogram.cfm|creationTime|2007-06-04T07:52:19Z +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|creationDate|2009-02-04 +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|tag|http://www.semanlink.net/tag/clandestins +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|tag|http://www.semanlink.net/tag/israel +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|tag|http://www.semanlink.net/tag/film +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|tag|http://www.semanlink.net/tag/jerusalem +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|comment|"Dans un village d'Afrique du Sud, le jeune James est choisi pour accomplir une mission : un pélerinage dans la ville sainte de Jérusalem. Mais à son arrivée en Israel, il doit travailler comme un clandestin. + +" +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|title|Le Voyage de James à Jérusalem - Un film de Ra'anan Alexandrowicz avec Siyabonga Melongisi Shibe, Arieh Elias, Salim Dau, ... +http://www.cinemovies.fr/fiche_film.php?IDfilm=2854|creationTime|2009-02-04T23:37:50Z +http://www.bbc.co.uk/news/technology-26065991|creationDate|2014-02-17 +http://www.bbc.co.uk/news/technology-26065991|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.bbc.co.uk/news/technology-26065991|tag|http://www.semanlink.net/tag/new_africa +http://www.bbc.co.uk/news/technology-26065991|title|BBC News - IBM's Watson in Africa to help solve problems +http://www.bbc.co.uk/news/technology-26065991|creationTime|2014-02-17T23:56:52Z +http://www.freepatentsonline.com/EP1700233.html|creationDate|2009-11-26 +http://www.freepatentsonline.com/EP1700233.html|tag|http://www.semanlink.net/tag/database +http://www.freepatentsonline.com/EP1700233.html|tag|http://www.semanlink.net/tag/koskas +http://www.freepatentsonline.com/EP1700233.html|tag|http://www.semanlink.net/tag/patent +http://www.freepatentsonline.com/EP1700233.html|tag|http://www.semanlink.net/tag/radix_trees +http://www.freepatentsonline.com/EP1700233.html|title|METHOD FOR ORGANIZING A DATABASE - Patent EP1700233 +http://www.freepatentsonline.com/EP1700233.html|creationTime|2009-11-26T09:43:44Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|creationDate|2007-06-13 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|tag|http://www.semanlink.net/tag/equivalence_mining +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|tag|http://www.semanlink.net/tag/yves_raymond +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|tag|http://www.semanlink.net/tag/jamendo +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|tag|http://www.semanlink.net/tag/musicbrainz +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|comment|"In order to map the Jamendo dataset to the Musicbrainz dataset, I developed a small equivalence miner.
The main problem was that it was impossible to use literal lookup techniques for these datasets - many small bands have the same name, some are just represented in the jamendo dataset but there are some matching names in the musicbrainz dataset, etc.
This equivalence miner does two things: it disambiguates and propagates equivalence relationships. Moreover, it works in a linked data style (the only input data it ""needs"" is a start URI)." +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|title|[Linking-open-data] Equivalence miner +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17924|creationTime|2007-06-13T23:20:48Z +http://www.javaworld.com/jw-02-2001/jw-0209-double.html|creationDate|2012-02-14 +http://www.javaworld.com/jw-02-2001/jw-0209-double.html|tag|http://www.semanlink.net/tag/java_dev +http://www.javaworld.com/jw-02-2001/jw-0209-double.html|title|Double-checked locking: Clever, but broken - JavaWorld +http://www.javaworld.com/jw-02-2001/jw-0209-double.html|creationTime|2012-02-14T14:29:11Z +http://www.eyaloren.org/pubs/sfsw2006.pdf|creationDate|2006-04-04 +http://www.eyaloren.org/pubs/sfsw2006.pdf|tag|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.eyaloren.org/pubs/sfsw2006.pdf|tag|http://www.semanlink.net/tag/rdf +http://www.eyaloren.org/pubs/sfsw2006.pdf|tag|http://www.semanlink.net/tag/ruby +http://www.eyaloren.org/pubs/sfsw2006.pdf|comment|"Abstract: Although most developers are object-oriented, programming RDF is triple-oriented. Bridging this gap, by developing a truly ob ject-oriented API that uses domain terminology, is not straightforward, because of the dynamic and semi-structured nature of RDF and the openworld semantics of RDF Schema.
+We present ActiveRDF, our object-oriented library for accessing RDF +data. ActiveRDF is completely dynamic, offers full manipulation and +querying of RDF data, does not rely on a schema and can be used against +different data-stores. In addition, the integration with the popular Rails +framework enables very easy development of Semantic Web applications. +" +http://www.eyaloren.org/pubs/sfsw2006.pdf|title|ActiveRDF: ob ject-oriented RDF in Ruby ActiveRDF: object-oriented RDF in Ruby +https://stackoverflow.com/questions/4864883/safaris-reader-mode-open-source-solution|creationDate|2019-03-23 +https://stackoverflow.com/questions/4864883/safaris-reader-mode-open-source-solution|tag|http://www.semanlink.net/tag/reader_mode_browsers +https://stackoverflow.com/questions/4864883/safaris-reader-mode-open-source-solution|title|"web - Safari's ""Reader Mode"" - Open source solution? - Stack Overflow" +https://stackoverflow.com/questions/4864883/safaris-reader-mode-open-source-solution|creationTime|2019-03-23T14:19:27Z +https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware|creationDate|2018-02-02 +https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware|tag|http://www.semanlink.net/tag/hack +https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware|tag|http://www.semanlink.net/tag/agriculture +https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware|title|Why American Farmers Are Hacking Their Tractors With Ukrainian Firmware - Motherboard +https://motherboard.vice.com/en_us/article/xykkkd/why-american-farmers-are-hacking-their-tractors-with-ukrainian-firmware|creationTime|2018-02-02T17:41:30Z +https://arxiv.org/abs/1706.04902|creationDate|2018-05-20 +https://arxiv.org/abs/1706.04902|tag|http://www.semanlink.net/tag/cross_lingual_word_embeddings +https://arxiv.org/abs/1706.04902|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1706.04902|tag|http://www.semanlink.net/tag/sebastian_ruder +https://arxiv.org/abs/1706.04902|tag|http://www.semanlink.net/tag/survey +https://arxiv.org/abs/1706.04902|arxiv_author|Sebastian Ruder +https://arxiv.org/abs/1706.04902|arxiv_author|Anders Søgaard +https://arxiv.org/abs/1706.04902|arxiv_author|Ivan Vulić +https://arxiv.org/abs/1706.04902|title|[1706.04902] A Survey Of Cross-lingual Word Embedding Models +https://arxiv.org/abs/1706.04902|creationTime|2018-05-20T12:01:50Z +https://arxiv.org/abs/1706.04902|arxiv_summary|"Cross-lingual representations of words enable us to reason about word meaning +in multilingual contexts and are a key facilitator of cross-lingual transfer +when developing natural language processing models for low-resource languages. +In this survey, we provide a comprehensive typology of cross-lingual word +embedding models. We compare their data requirements and objective functions. +The recurring theme of the survey is that many of the models presented in the +literature optimize for the same objectives, and that seemingly different +models are often equivalent modulo optimization strategies, hyper-parameters, +and such. We also discuss the different ways cross-lingual word embeddings are +evaluated, as well as future challenges and research horizons." +https://arxiv.org/abs/1706.04902|arxiv_firstAuthor|Sebastian Ruder +https://arxiv.org/abs/1706.04902|arxiv_updated|2019-10-06T10:01:48Z +https://arxiv.org/abs/1706.04902|arxiv_title|A Survey Of Cross-lingual Word Embedding Models +https://arxiv.org/abs/1706.04902|arxiv_published|2017-06-15T14:46:56Z +https://arxiv.org/abs/1706.04902|arxiv_num|1706.04902 +http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/|creationDate|2015-07-14 +http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/|tag|http://www.semanlink.net/tag/visual_search +http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/|title|Visually index Instagram pictures and find them in real-time +http://apassant.net/2015/07/14/visually-index-instagram-pictures-find-real-time/|creationTime|2015-07-14T23:43:10Z +http://www.ldodds.com/projects/slug/|creationDate|2006-03-29 +http://www.ldodds.com/projects/slug/|tag|http://www.semanlink.net/tag/semantic_web_crawler +http://www.ldodds.com/projects/slug/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/projects/slug/|comment|Slug is a web crawler (or Scutter) designed for harvesting semantic web content. Implemented in Java using the Jena API, Slug provides a configurable, modular framework that allows a great degree of flexibility in configuring the retrieval, processing and storage of harvested content. +http://www.ldodds.com/projects/slug/|title|Slug: A Semantic Web Crawler +http://ieet.org/index.php/IEET/more/rinesi20150925|creationDate|2015-10-03 +http://ieet.org/index.php/IEET/more/rinesi20150925|tag|http://www.semanlink.net/tag/volkswagate +http://ieet.org/index.php/IEET/more/rinesi20150925|tag|http://www.semanlink.net/tag/internet_of_things +http://ieet.org/index.php/IEET/more/rinesi20150925|comment|cars, street lights, and even buildings will behave in the same vaguely suspicious way +http://ieet.org/index.php/IEET/more/rinesi20150925|title|The price of the Internet of Things will be a vague dread of a malicious world +http://ieet.org/index.php/IEET/more/rinesi20150925|creationTime|2015-10-03T10:16:35Z +https://www.bbc.co.uk/news/science-environment-45172671|creationDate|2018-08-13 +https://www.bbc.co.uk/news/science-environment-45172671|tag|http://www.semanlink.net/tag/crise_ecologique +https://www.bbc.co.uk/news/science-environment-45172671|tag|http://www.semanlink.net/tag/paradis_fiscaux +https://www.bbc.co.uk/news/science-environment-45172671|title|Tax haven link to rainforest destruction and illegal fishing - BBC News +https://www.bbc.co.uk/news/science-environment-45172671|creationTime|2018-08-13T17:50:11Z +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|creationDate|2013-07-18 +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|tag|http://www.semanlink.net/tag/google_research +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|tag|http://www.semanlink.net/tag/freebase +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|title|11 Billion Clues in 800 Million Documents: A Web Research Corpus Annotated with Freebase Concepts +http://googleresearch.blogspot.fr/2013/07/11-billion-clues-in-800-million.html|creationTime|2013-07-18T16:29:32Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|creationDate|2005-09-23 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|tag|http://www.semanlink.net/tag/royaume_uni +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|tag|http://www.semanlink.net/tag/lutte_anti_terroriste +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|tag|http://www.semanlink.net/tag/ca_craint +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|title|Un Britannique condamné à quinze ans de prison pour détention de documents prohibés +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3214,50-692419,0.html|date|2005-09-23 +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|creationDate|2007-10-05 +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|tag|http://www.semanlink.net/tag/mit +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|tag|http://www.semanlink.net/tag/online_course_materials +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|comment|MIT is committed to advancing education and discovery through knowledge open to everyone. OCW shares free lecture notes, exams, and other resources from more than 1700 courses spanning MIT's entire curriculum. +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|title|Free Online Course Materials MIT OpenCourseWare +http://ocw.mit.edu/OcwWeb/web/home/home/index.htm|creationTime|2007-10-05T21:11:48Z +http://archive.org/web/|creationDate|2017-01-25 +http://archive.org/web/|tag|http://www.semanlink.net/tag/semanlink_related +http://archive.org/web/|tag|http://www.semanlink.net/tag/archive +http://archive.org/web/|tag|http://www.semanlink.net/tag/bookmarks +http://archive.org/web/|title|Internet Archive: Wayback Machine +http://archive.org/web/|creationTime|2017-01-25T23:43:42Z +http://mappings.dbpedia.org/index.php/Main_Page|creationDate|2012-11-19 +http://mappings.dbpedia.org/index.php/Main_Page|tag|http://www.semanlink.net/tag/dbpedia +http://mappings.dbpedia.org/index.php/Main_Page|comment|help to enhance the information in DBpedia. +http://mappings.dbpedia.org/index.php/Main_Page|title|DBpedia Mappings Wiki +http://mappings.dbpedia.org/index.php/Main_Page|creationTime|2012-11-19T15:26:54Z +http://mednews.stanford.edu/stanmed/2005winter/rna.html|creationDate|2005-09-12 +http://mednews.stanford.edu/stanmed/2005winter/rna.html|tag|http://www.semanlink.net/tag/arn +http://mednews.stanford.edu/stanmed/2005winter/rna.html|title|Secret life of RNA- Stanford Medicine Magazine - Stanford University School of Medicine +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|creationDate|2007-08-23 +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|tag|http://www.semanlink.net/tag/swrl +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|tag|http://www.semanlink.net/tag/bijan_parsia +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|comment|SWRL is something of a de facto standard for extending OWL with rules with DL Safe SWRL rules (as I shall canonically call them) being the most commonly implemented varient. +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|title|Thinking Clearly» Understanding SWRL (Part 1) +http://clarkparsia.com/weblog/2007/08/12/understanding-swrl-part-1/|creationTime|2007-08-23T23:58:29Z +http://www.w3.org/2001/tag/group/track/issues/57|creationDate|2012-04-11 +http://www.w3.org/2001/tag/group/track/issues/57|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/2001/tag/group/track/issues/57|title|ISSUE-57: Mechanisms for obtaining information about the meaning of a given URI - Technical Architecture Group Tracker +http://www.w3.org/2001/tag/group/track/issues/57|creationTime|2012-04-11T13:40:40Z +http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/|creationDate|2011-12-04 +http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/|tag|http://www.semanlink.net/tag/football +http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/|tag|http://www.semanlink.net/tag/bresil +http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/|title|Socrates : le romantique et le révolutionnaire +http://latta.blog.lemonde.fr/2011/12/04/socrates-le-romantique-et-le-revolutionnaire/|creationTime|2011-12-04T21:32:24Z +https://github.com/iliaschalkidis/ELMo-keras|creationDate|2018-11-14 +https://github.com/iliaschalkidis/ELMo-keras|tag|http://www.semanlink.net/tag/elmo +https://github.com/iliaschalkidis/ELMo-keras|tag|http://www.semanlink.net/tag/keras +https://github.com/iliaschalkidis/ELMo-keras|tag|http://www.semanlink.net/tag/github_project +https://github.com/iliaschalkidis/ELMo-keras|comment|"based on the tensorflow implementation presented by Allen NLP +" +https://github.com/iliaschalkidis/ELMo-keras|title|iliaschalkidis/ELMo-keras: Re-implementation of ELMo on Keras +https://github.com/iliaschalkidis/ELMo-keras|creationTime|2018-11-14T21:32:37Z +https://fr.wikipedia.org/wiki/Timbuktu_(film)|creationDate|2019-05-14 +https://fr.wikipedia.org/wiki/Timbuktu_(film)|tag|http://www.semanlink.net/tag/fondamentalisme_islamique +https://fr.wikipedia.org/wiki/Timbuktu_(film)|tag|http://www.semanlink.net/tag/film +https://fr.wikipedia.org/wiki/Timbuktu_(film)|tag|http://www.semanlink.net/tag/tombouctou +https://fr.wikipedia.org/wiki/Timbuktu_(film)|comment|film franco-mauritanien réalisé par Abderrahmane Sissako +https://fr.wikipedia.org/wiki/Timbuktu_(film)|title|Timbuktu (film) +https://fr.wikipedia.org/wiki/Timbuktu_(film)|creationTime|2019-05-14T21:21:56Z +http://www.phildawes.net/blog/2007/04/30/some-ideas-for-static-triple-indexing/|creationDate|2007-05-20 +http://www.phildawes.net/blog/2007/04/30/some-ideas-for-static-triple-indexing/|tag|http://www.semanlink.net/tag/triplestore +http://www.phildawes.net/blog/2007/04/30/some-ideas-for-static-triple-indexing/|title|Phil Dawes’ Stuff » Some ideas for static triple indexing +http://www.phildawes.net/blog/2007/04/30/some-ideas-for-static-triple-indexing/|creationTime|2007-05-20T12:35:56Z +https://arxiv.org/abs/1601.01343|creationDate|2019-01-27 +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/ikuya_yamada +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/wikipedia2vec +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/entity_linking +https://arxiv.org/abs/1601.01343|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1601.01343|arxiv_author|Ikuya Yamada +https://arxiv.org/abs/1601.01343|arxiv_author|Yoshiyasu Takefuji +https://arxiv.org/abs/1601.01343|arxiv_author|Hiroyuki Shindo +https://arxiv.org/abs/1601.01343|arxiv_author|Hideaki Takeda +https://arxiv.org/abs/1601.01343|comment|"> An embedding method specifically **designed for NED** that jointly **maps words and entities into the same continuous vector space**. +> We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words + +Technique later used in [Wikipedia2Vec](doc:?uri=https%3A%2F%2Fwikipedia2vec.github.io%2Fwikipedia2vec%2F), by the same team. [Neural Attentive Bag-of-Entities Model for Text Classification](https://arxiv.org/abs/1909.01259) uses Wikipedia2Vec model." +https://arxiv.org/abs/1601.01343|relatedDoc|https://wikipedia2vec.github.io/wikipedia2vec/ +https://arxiv.org/abs/1601.01343|relatedDoc|http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b +https://arxiv.org/abs/1601.01343|title|[1601.01343] Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation +https://arxiv.org/abs/1601.01343|creationTime|2019-01-27T15:29:16Z +https://arxiv.org/abs/1601.01343|arxiv_summary|"Named Entity Disambiguation (NED) refers to the task of resolving multiple +named entity mentions in a document to their correct references in a knowledge +base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method +specifically designed for NED. The proposed method jointly maps words and +entities into the same continuous vector space. We extend the skip-gram model +by using two models. The KB graph model learns the relatedness of entities +using the link structure of the KB, whereas the anchor context model aims to +align vectors such that similar words and entities occur close to one another +in the vector space by leveraging KB anchors and their context words. By +combining contexts based on the proposed embedding with standard NED features, +we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset +and 85.2% on the TAC 2010 dataset." +https://arxiv.org/abs/1601.01343|arxiv_firstAuthor|Ikuya Yamada +https://arxiv.org/abs/1601.01343|arxiv_updated|2016-06-10T01:51:26Z +https://arxiv.org/abs/1601.01343|arxiv_title|Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation +https://arxiv.org/abs/1601.01343|arxiv_published|2016-01-06T22:19:20Z +https://arxiv.org/abs/1601.01343|arxiv_num|1601.01343 +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|creationDate|2018-08-13 +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|tag|http://www.semanlink.net/tag/survey +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|tag|http://www.semanlink.net/tag/feature_learning +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|tag|http://www.semanlink.net/tag/text_feature_extraction +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|comment|"outlines the common methods used in +text feature extraction first, and then expands frequently used deep learning methods in text feature extraction and +its applications, and forecasts the application of deep learning in feature extraction" +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|title|Text feature extraction based on deep learning: a review (2017) +https://www.researchgate.net/publication/321841361_Text_feature_extraction_based_on_deep_learning_a_review|creationTime|2018-08-13T14:21:24Z +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|creationDate|2010-11-12 +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|tag|http://www.semanlink.net/tag/boolean +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|tag|http://www.semanlink.net/tag/benchmark +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|comment|description of ARALIA format +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|title|A Benchmark of Boolean Formulae +http://iml.univ-mrs.fr/~arauzy/aralia/benchmark.html|creationTime|2010-11-12T15:27:08Z +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|creationDate|2009-06-22 +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|tag|http://www.semanlink.net/tag/semanlink_related +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|tag|http://www.semanlink.net/tag/hierarchical_tags +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|tag|http://www.semanlink.net/tag/tagging +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|tag|http://www.semanlink.net/tag/del_icio_us +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|title|The Tetherless World Weblog » I will pay delicious $100 for hierarchical tagging +http://tw.rpi.edu/weblog/2009/06/19/i-will-pay-delicious-100-for-hierarchical-tagging/|creationTime|2009-06-22T13:03:26Z +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|creationDate|2018-10-09 +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|tag|http://www.semanlink.net/tag/sentence_embeddings +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|tag|http://www.semanlink.net/tag/nlp_sample_code +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|tag|http://www.semanlink.net/tag/automatic_summarization +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|title|Unsupervised Text Summarization using Sentence Embeddings +https://medium.com/jatana/unsupervised-text-summarization-using-sentence-embeddings-adb15ce83db1|creationTime|2018-10-09T10:15:02Z +http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/|creationDate|2013-12-14 +http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/|tag|http://www.semanlink.net/tag/afrique +http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/|tag|http://www.semanlink.net/tag/arduino +http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/|title|With a mix of Arduino, Raspberry Pi and fun, Maker Box hopes to bolster Africa's future tech skills ZDNet +http://www.zdnet.com/with-a-mix-of-arduino-raspberry-pi-and-fun-maker-box-hopes-to-bolster-africas-future-tech-skills-7000023770/|creationTime|2013-12-14T19:32:02Z +https://www.ipbes.net/|creationDate|2018-03-26 +https://www.ipbes.net/|tag|http://www.semanlink.net/tag/crise_ecologique +https://www.ipbes.net/|title|IPBES Science and policy for people and nature +https://www.ipbes.net/|creationTime|2018-03-26T23:24:38Z +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|creationDate|2015-01-31 +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|tag|http://www.semanlink.net/tag/javascript_rdf_parser +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|tag|http://www.semanlink.net/tag/javascript_rdf +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|tag|http://www.semanlink.net/tag/turtle +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|comment|Writing a spec-compatible Turtle parser has been an interesting journey. +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|title|Lightning-fast RDF in JavaScript Ruben Verborgh +http://ruben.verborgh.org/blog/2013/04/30/lightning-fast-rdf-in-javascript/|creationTime|2015-01-31T00:27:41Z +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|creationDate|2009-08-25 +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|tag|http://www.semanlink.net/tag/virtuoso +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|title|Virtuoso: generating RDF Views +http://www.youtube.com/watch?v=bj7AbJ0ZYCk|creationTime|2009-08-25T13:33:29Z +http://web.stanford.edu/class/cs224n/|creationDate|2017-09-10 +http://web.stanford.edu/class/cs224n/|tag|http://www.semanlink.net/tag/chris_manning +http://web.stanford.edu/class/cs224n/|tag|http://www.semanlink.net/tag/deep_nlp +http://web.stanford.edu/class/cs224n/|tag|http://www.semanlink.net/tag/nlp_stanford +http://web.stanford.edu/class/cs224n/|tag|http://www.semanlink.net/tag/richard_socher +http://web.stanford.edu/class/cs224n/|comment|"[Notes winter17](https://github.com/stanfordnlp/cs224n-winter17-notes) + +" +http://web.stanford.edu/class/cs224n/|title|CS224n: Natural Language Processing with Deep Learning +http://web.stanford.edu/class/cs224n/|creationTime|2017-09-10T12:32:37Z +http://machinelearningmastery.com/java-machine-learning/|creationDate|2015-12-30 +http://machinelearningmastery.com/java-machine-learning/|tag|http://www.semanlink.net/tag/java +http://machinelearningmastery.com/java-machine-learning/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://machinelearningmastery.com/java-machine-learning/|title|Java Machine Learning +http://machinelearningmastery.com/java-machine-learning/|creationTime|2015-12-30T00:25:58Z +https://arxiv.org/abs/1803.11175|creationDate|2018-05-29 +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/ray_kurzweil +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/google_research +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/dans_deep_averaging_neural_networks +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/transfer_learning +https://arxiv.org/abs/1803.11175|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1803.11175|arxiv_author|Yun-Hsuan Sung +https://arxiv.org/abs/1803.11175|arxiv_author|Brian Strope +https://arxiv.org/abs/1803.11175|arxiv_author|Steve Yuan +https://arxiv.org/abs/1803.11175|arxiv_author|Ray Kurzweil +https://arxiv.org/abs/1803.11175|arxiv_author|Mario Guajardo-Cespedes +https://arxiv.org/abs/1803.11175|arxiv_author|Noah Constant +https://arxiv.org/abs/1803.11175|arxiv_author|Yinfei Yang +https://arxiv.org/abs/1803.11175|arxiv_author|Daniel Cer +https://arxiv.org/abs/1803.11175|arxiv_author|Nan Hua +https://arxiv.org/abs/1803.11175|arxiv_author|Rhomni St. John +https://arxiv.org/abs/1803.11175|arxiv_author|Chris Tar +https://arxiv.org/abs/1803.11175|arxiv_author|Sheng-yi Kong +https://arxiv.org/abs/1803.11175|arxiv_author|Nicole Limtiaco +https://arxiv.org/abs/1803.11175|comment|"models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. + +> With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task + +mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture" +https://arxiv.org/abs/1803.11175|title|[1803.11175] Universal Sentence Encoder +https://arxiv.org/abs/1803.11175|creationTime|2018-05-29T16:50:18Z +https://arxiv.org/abs/1803.11175|arxiv_summary|"We present models for encoding sentences into embedding vectors that +specifically target transfer learning to other NLP tasks. The models are +efficient and result in accurate performance on diverse transfer tasks. Two +variants of the encoding models allow for trade-offs between accuracy and +compute resources. For both variants, we investigate and report the +relationship between model complexity, resource consumption, the availability +of transfer task training data, and task performance. Comparisons are made with +baselines that use word level transfer learning via pretrained word embeddings +as well as baselines do not use any transfer learning. We find that transfer +learning using sentence embeddings tends to outperform word level transfer. +With transfer learning via sentence embeddings, we observe surprisingly good +performance with minimal amounts of supervised training data for a transfer +task. We obtain encouraging results on Word Embedding Association Tests (WEAT) +targeted at detecting model bias. Our pre-trained sentence encoding models are +made freely available for download and on TF Hub." +https://arxiv.org/abs/1803.11175|arxiv_firstAuthor|Daniel Cer +https://arxiv.org/abs/1803.11175|arxiv_updated|2018-04-12T17:03:44Z +https://arxiv.org/abs/1803.11175|arxiv_title|Universal Sentence Encoder +https://arxiv.org/abs/1803.11175|arxiv_published|2018-03-29T17:43:03Z +https://arxiv.org/abs/1803.11175|arxiv_num|1803.11175 +http://wiki.fasterxml.com/JacksonInFiveMinutes|creationDate|2015-03-09 +http://wiki.fasterxml.com/JacksonInFiveMinutes|tag|http://www.semanlink.net/tag/jackson +http://wiki.fasterxml.com/JacksonInFiveMinutes|tag|http://www.semanlink.net/tag/tutorial +http://wiki.fasterxml.com/JacksonInFiveMinutes|title|JacksonInFiveMinutes - FasterXML Wiki +http://wiki.fasterxml.com/JacksonInFiveMinutes|creationTime|2015-03-09T19:13:41Z +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|creationDate|2019-05-23 +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/links +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|tag|http://www.semanlink.net/tag/nlp_current_state +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|comment|we can learn hierarchical contextualized representations on web-scale datasets leveraging unsupervised (or self-supervised) signals such as language modeling and transfer this pre-training to downstream tasks +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|title|🌻 The Best and Most Current of Modern Natural Language Processing +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|bookmarkOf|https://medium.com/huggingface/the-best-and-most-current-of-modern-natural-language-processing-5055f409a1d1 +http://www.semanlink.net/doc/2019/05/%F0%9F%8C%BB_the_best_and_most_current_of|creationTime|2019-05-23T10:48:49Z +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|creationDate|2019-01-29 +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|tag|http://www.semanlink.net/tag/distributional_semantics +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|title|Querying machine learning distributional semantics with SPARQL - bobdc.blog +http://www.snee.com/bobdc.blog/2019/01/querying-machine-learning-dist.html|creationTime|2019-01-29T00:59:35Z +https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4|creationDate|2018-01-03 +https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4|comment|lists libraries and approaches for knowledge graph embeddings +https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4|title|Awesome Knowledge Graph Embedding Approaches +https://gist.github.com/mommi84/07f7c044fa18aaaa7b5133230207d8d4|creationTime|2018-01-03T16:41:48Z +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|creationDate|2014-07-14 +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|tag|http://www.semanlink.net/tag/open_data +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|tag|http://www.semanlink.net/tag/henri_verdier +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|tag|http://www.semanlink.net/tag/data_gouv_fr +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|title|5 minutes de franche (rigolade) Open Data devant 5 ministres à la Conférence de Paris L'informatique Conviviale +http://pezziardi.net/2014/05/02/5-minutes-de-franche-rigolade-open-data-devant-5-ministres-a-la-conference-de-paris/|creationTime|2014-07-14T02:59:23Z +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|creationDate|2008-12-08 +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|tag|http://www.semanlink.net/tag/linked_data +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|tag|http://www.semanlink.net/tag/grece_antique +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|tag|http://www.semanlink.net/tag/asie_mineure +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|tag|http://www.semanlink.net/tag/rdfa +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|comment|The Greek, Roman and Byzantine Pottery at Ilion (GRPBIlion) database exports its content as Linked Data using RDFa +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|title|Mediterranean Ceramics: RDFa at Ilion +http://mediterraneanceramics.blogspot.com/2008/12/rdfa-at-ilion.html|creationTime|2008-12-08T11:33:21Z +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|creationDate|2013-08-21 +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|tag|http://www.semanlink.net/tag/python +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|tag|http://www.semanlink.net/tag/data_mining_tools +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|tag|http://www.semanlink.net/tag/r +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|title|R, Python, RapidMiner and Weka. Testing my faith? BInalytics +http://binalytics.wordpress.com/2013/04/24/r-python-rapidminer-and-weka-testing-my-faith/|creationTime|2013-08-21T16:36:36Z +https://arxiv.org/abs/1802.04865|creationDate|2018-08-27 +https://arxiv.org/abs/1802.04865|tag|http://www.semanlink.net/tag/out_of_distribution_detection +https://arxiv.org/abs/1802.04865|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1802.04865|arxiv_author|Terrance DeVries +https://arxiv.org/abs/1802.04865|arxiv_author|Graham W. Taylor +https://arxiv.org/abs/1802.04865|title|[1802.04865] Learning Confidence for Out-of-Distribution Detection in Neural Networks +https://arxiv.org/abs/1802.04865|creationTime|2018-08-27T00:13:24Z +https://arxiv.org/abs/1802.04865|arxiv_summary|"Modern neural networks are very powerful predictive models, but they are +often incapable of recognizing when their predictions may be wrong. Closely +related to this is the task of out-of-distribution detection, where a network +must determine whether or not an input is outside of the set on which it is +expected to safely perform. To jointly address these issues, we propose a +method of learning confidence estimates for neural networks that is simple to +implement and produces intuitively interpretable outputs. We demonstrate that +on the task of out-of-distribution detection, our technique surpasses recently +proposed techniques which construct confidence based on the network's output +distribution, without requiring any additional labels or access to +out-of-distribution examples. Additionally, we address the problem of +calibrating out-of-distribution detectors, where we demonstrate that +misclassified in-distribution examples can be used as a proxy for +out-of-distribution examples." +https://arxiv.org/abs/1802.04865|arxiv_firstAuthor|Terrance DeVries +https://arxiv.org/abs/1802.04865|arxiv_updated|2018-02-13T21:31:36Z +https://arxiv.org/abs/1802.04865|arxiv_title|Learning Confidence for Out-of-Distribution Detection in Neural Networks +https://arxiv.org/abs/1802.04865|arxiv_published|2018-02-13T21:31:36Z +https://arxiv.org/abs/1802.04865|arxiv_num|1802.04865 +http://www.w3.org/TR/rdf-interfaces/|creationDate|2011-05-12 +http://www.w3.org/TR/rdf-interfaces/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/rdf-interfaces/|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/TR/rdf-interfaces/|comment|The RDF Interfaces Specification defines a set of standardized interfaces for working with RDF data in a programming environment. +http://www.w3.org/TR/rdf-interfaces/|title|RDF Interfaces 1.0 +http://www.w3.org/TR/rdf-interfaces/|creationTime|2011-05-12T22:25:08Z +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|creationDate|2017-06-09 +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|tag|http://www.semanlink.net/tag/using_word_embedding +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|tag|http://www.semanlink.net/tag/gensim +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|tag|http://www.semanlink.net/tag/word2vec_howto +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|tag|http://www.semanlink.net/tag/word_embedding +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|comment|"Types of word embeddings: + +- Frequency based Embedding + - Count Vector + - TF-IDF Vector + - Co-Occurrence Vector + - Co_occurence matrix (with a fixed context window), size V*V or V * N (Vocab size * subset of V size) matrix. + - PCA or SVD: keeping the k most important eigenvalues +- Prediction based Embedding + - CBOW (Continuous Bag Of Words). 1 hidden layer, one output layer. Predict the probability of a word given a context + - Skip-gram. Predict the proba of the context given a word + +Sample code using gensim" +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|title|An Intuitive Understanding of Word Embeddings: From Count Vectors to Word2Vec +https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/|creationTime|2017-06-09T17:48:39Z +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|creationDate|2008-08-15 +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|tag|http://www.semanlink.net/tag/propriete_privee +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|tag|http://www.semanlink.net/tag/protection_de_l_environnement +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|tag|http://www.semanlink.net/tag/deforestation +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|comment|La propriété privée est désormais le moyen retenu pour empêcher les promoteurs, l’industrie ou la négligence de ravager la nature. +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|title|Infos de la Planète - Tendance : Ces milliardaires qui croient sauver la planète… - The Guardian (Royaume-Uni) - 2008-02-13 +http://www.infosdelaplanete.org/4199/tendance-ces-milliardaires-qui-croient-sauver-la-planete.html|creationTime|2008-08-15T11:49:41Z +http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/|creationDate|2011-05-12 +http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/|tag|http://www.semanlink.net/tag/soren_auer +http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/|tag|http://www.semanlink.net/tag/lod2 +http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/|title|The Semantic Puzzle The hype, the hope and the LOD2: Sören Auer engaged in the next generation LOD +http://blog.semantic-web.at/2011/04/20/the-hype-the-hope-and-the-lod2-soren-auer-engaded-in-the-next-generation-lod/|creationTime|2011-05-12T22:03:55Z +http://www.lemonde.fr/pixels/article/2017/10/18/intelligence-artificielle-toujours-plus-puissant-alphago-apprend-desormais-sans-donnees-humaines_5202931_4408996.html|creationDate|2017-10-18 +http://www.lemonde.fr/pixels/article/2017/10/18/intelligence-artificielle-toujours-plus-puissant-alphago-apprend-desormais-sans-donnees-humaines_5202931_4408996.html|tag|http://www.semanlink.net/tag/alphago +http://www.lemonde.fr/pixels/article/2017/10/18/intelligence-artificielle-toujours-plus-puissant-alphago-apprend-desormais-sans-donnees-humaines_5202931_4408996.html|title|Intelligence artificielle : toujours plus puissant, AlphaGo apprend désormais sans données humaines +http://www.lemonde.fr/pixels/article/2017/10/18/intelligence-artificielle-toujours-plus-puissant-alphago-apprend-desormais-sans-donnees-humaines_5202931_4408996.html|creationTime|2017-10-18T22:38:12Z +http://arxiv.org/abs/1506.01094|creationDate|2015-10-31 +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/path_queries +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/embeddings +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/knowledge_graph +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://arxiv.org/abs/1506.01094|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://arxiv.org/abs/1506.01094|arxiv_author|Percy Liang +http://arxiv.org/abs/1506.01094|arxiv_author|Kelvin Guu +http://arxiv.org/abs/1506.01094|arxiv_author|John Miller +http://arxiv.org/abs/1506.01094|comment|"Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new ""compositional"" training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy." +http://arxiv.org/abs/1506.01094|title|[1506.01094] Traversing Knowledge Graphs in Vector Space +http://arxiv.org/abs/1506.01094|creationTime|2015-10-31T00:11:12Z +http://arxiv.org/abs/1506.01094|arxiv_summary|"Path queries on a knowledge graph can be used to answer compositional +questions such as ""What languages are spoken by people living in Lisbon?"". +However, knowledge graphs often have missing facts (edges) which disrupts path +queries. Recent models for knowledge base completion impute missing facts by +embedding knowledge graphs in vector spaces. We show that these models can be +recursively applied to answer path queries, but that they suffer from cascading +errors. This motivates a new ""compositional"" training objective, which +dramatically improves all models' ability to answer path queries, in some cases +more than doubling accuracy. On a standard knowledge base completion task, we +also demonstrate that compositional training acts as a novel form of structural +regularization, reliably improving performance across all base models (reducing +errors by up to 43%) and achieving new state-of-the-art results." +http://arxiv.org/abs/1506.01094|arxiv_firstAuthor|Kelvin Guu +http://arxiv.org/abs/1506.01094|arxiv_updated|2015-08-19T05:16:24Z +http://arxiv.org/abs/1506.01094|arxiv_title|Traversing Knowledge Graphs in Vector Space +http://arxiv.org/abs/1506.01094|arxiv_published|2015-06-03T00:38:25Z +http://arxiv.org/abs/1506.01094|arxiv_num|1506.01094 +http://www.snipsnap.org/|creationDate|2006-02-06 +http://www.snipsnap.org/|tag|http://www.semanlink.net/tag/java +http://www.snipsnap.org/|tag|http://www.semanlink.net/tag/blog_software +http://www.snipsnap.org/|tag|http://www.semanlink.net/tag/wiki_software +http://www.snipsnap.org/|title|SnipSnap :: start SnipSnap +http://simile.mit.edu/piggy-bank/|creationDate|2005-06-01 +http://simile.mit.edu/piggy-bank/|tag|http://www.semanlink.net/tag/simile +http://simile.mit.edu/piggy-bank/|tag|http://www.semanlink.net/tag/firefox +http://simile.mit.edu/piggy-bank/|comment|extension to the Firefox web browser that turns it into a “Semantic Web browser”, letting you make use of existing information on the Web in more useful and flexible ways. +http://simile.mit.edu/piggy-bank/|title|SIMILE Piggy Bank +http://www.w3.org/2001/sw/wiki/images/8/83/20130909_rdfvalidation.pdf|creationDate|2015-02-18 +http://www.w3.org/2001/sw/wiki/images/8/83/20130909_rdfvalidation.pdf|tag|http://www.semanlink.net/tag/rdf_forms +http://www.w3.org/2001/sw/wiki/images/8/83/20130909_rdfvalidation.pdf|title|Forms to direct interaction with Linked Data Platform APIs +http://www.w3.org/2001/sw/wiki/images/8/83/20130909_rdfvalidation.pdf|creationTime|2015-02-18T15:15:03Z +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|creationDate|2017-09-04 +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/lost_city +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/nigeria +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/benin +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|title|Benin City, the mighty medieval capital now lost without trace Cities The Guardian +https://www.theguardian.com/cities/2016/mar/18/story-of-cities-5-benin-city-edo-nigeria-mighty-medieval-capital-lost-without-trace?CMP=share_btn_tw|creationTime|2017-09-04T21:04:00Z +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|creationDate|2019-02-10 +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|tag|http://www.semanlink.net/tag/pre_trained_language_models +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|tag|http://www.semanlink.net/tag/lilian_weng +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|tag|http://www.semanlink.net/tag/language_model +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|tag|http://www.semanlink.net/tag/survey +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|title|Generalized Language Models +https://lilianweng.github.io/lil-log/2019/01/31/generalized-language-models.html|creationTime|2019-02-10T19:15:29Z +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|creationDate|2018-10-09 +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|tag|http://www.semanlink.net/tag/entity_embeddings +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|tag|http://www.semanlink.net/tag/nlp_sample_code +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|tag|http://www.semanlink.net/tag/embeddings +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|comment|"> How deep learning can represent War and Peace as a vector + +set-up is based on the assumption that books whose wikipedia page link to similar Wikipedia pages are similar to one another" +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|title|Neural Network Embeddings Explained – Towards Data Science +https://towardsdatascience.com/neural-network-embeddings-explained-4d028e6f0526|creationTime|2018-10-09T10:02:39Z +http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php|creationDate|2010-01-26 +http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php|tag|http://www.semanlink.net/tag/immigration +http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php|comment|Ces types sont incroyablement courageux. Comme dirait Gado, ça fait pitié. +http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php|title|France 5 - La République des clandestins +http://www.france5.fr/programmes/articles/actu-societe/692-la-republique-des-clandestins.php|creationTime|2010-01-26T01:19:26Z +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|creationDate|2013-08-14 +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/amazonie +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/arte +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/elevage +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/developpement_durable +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/bresil +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|tag|http://www.semanlink.net/tag/foret +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|title|Amazonie éternelle ARTE +http://www.arte.tv/guide/fr/048210-000/amazonie-eternelle|creationTime|2013-08-14T00:17:38Z +http://en.wikipedia.org/wiki/Czech_Dream|creationDate|2007-05-09 +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/republique_tcheque +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/hoax +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/rigolo +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/film +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/documentaire +http://en.wikipedia.org/wiki/Czech_Dream|tag|http://www.semanlink.net/tag/publicite +http://en.wikipedia.org/wiki/Czech_Dream|comment|"Documentary film about a large-scale hoax on the advertising industries and the Czech public, culminating in the ""opening event"" of a fake ""hypermarket"". The advertising campaign slogans were ""don't come"" and ""don't spend"", etc. +" +http://en.wikipedia.org/wiki/Czech_Dream|title|Czech Dream +http://en.wikipedia.org/wiki/Czech_Dream|creationTime|2007-05-09T00:01:23Z +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|creationDate|2010-03-29 +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|tag|http://www.semanlink.net/tag/taxe_carbone +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|tag|http://www.semanlink.net/tag/inegalites +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|title|Taxe carbone et inégalités +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|creationTime|2010-03-29T08:49:48Z +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2010/03/27/taxe-carbone-et-inegalites-par-herve-kempt_1325039_3232.html|date|2010-03-28 +http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/|creationDate|2014-09-25 +http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/|tag|http://www.semanlink.net/tag/exploration_marsienne +http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/|tag|http://www.semanlink.net/tag/inde_moderne +http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/|title|MAVEN et MOM, 2 nouveaux satellites pour Mars Autour du Ciel +http://autourduciel.blog.lemonde.fr/2014/09/21/maven-et-mom-2-nouveaux-satellites-pour-mars/|creationTime|2014-09-25T00:40:25Z +https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823|creationDate|2018-11-18 +https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823|tag|http://www.semanlink.net/tag/tree_of_life +https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823|comment|Hemimastigotes are more different from all other living things than animals are from fungi +https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823|title|Rare microbes lead scientists to discover new branch on the tree of life CBC News +https://www.cbc.ca/news/technology/hemimastigotes-supra-kingdom-1.4715823|creationTime|2018-11-18T10:31:17Z +http://datahub.io/|creationDate|2013-09-02 +http://datahub.io/|tag|http://www.semanlink.net/tag/okfn_datahub +http://datahub.io/|comment|"the free, powerful data management platform from the Open Knowledge Foundation
+""Give your data a home""" +http://datahub.io/|title|The Datahub +http://datahub.io/|creationTime|2013-09-02T11:04:56Z +http://www.businessinsider.com/spritz-speed-reading-gifs-2014-2|creationDate|2014-03-01 +http://www.businessinsider.com/spritz-speed-reading-gifs-2014-2|tag|http://www.semanlink.net/tag/spritz +http://www.businessinsider.com/spritz-speed-reading-gifs-2014-2|title|How To Read A 223-Page Novel In Just 77 Minutes +http://www.businessinsider.com/spritz-speed-reading-gifs-2014-2|creationTime|2014-03-01T13:13:28Z +http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-12-06 +http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Taming Recurrent Neural Networks for Better Summarization Abigail See +http://www.abigailsee.com/2017/04/16/taming-rnns-for-better-summarization.html?utm_content=buffer2fba4&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-12-06T23:32:43Z +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|creationDate|2017-06-06 +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|tag|http://www.semanlink.net/tag/python_tips +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|tag|http://www.semanlink.net/tag/diacritics +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|title|What is the best way to remove accents in a Python unicode string? - Stack Overflow +https://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string|creationTime|2017-06-06T11:28:37Z +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|creationDate|2017-07-11 +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|tag|http://www.semanlink.net/tag/stanford_pos_tagger +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|tag|http://www.semanlink.net/tag/java_in_python +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|tag|http://www.semanlink.net/tag/nltk +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|comment|"[en cas de pb](https://gist.github.com/alvations/e1df0ba227e542955a8a) + +**including how to use Java NLP Tools in python** + +``` +export CLASSPATH=/Users/fps/_fps/DeveloperTools/stanford-postagger-full/stanford-postagger.jar # ATTENTION, stanford-postagger.jar, pas stanford-postagger-3.8.0.jar +export STANFORD_MODELS=/Users/fps/_fps/DeveloperTools/stanford-postagger-full/models +python +``` + +``` +from nltk.tag import StanfordPOSTagger + +st = StanfordPOSTagger('english-bidirectional-distsim.tagger') +st.tag('What is the airspeed of an unladen swallow ?'.split()) + +st = StanfordPOSTagger('french.tagger') +st.tag('Les plats servis sont toujours les mêmes et la qualité des plats est en nette baisse'.split()) + +``` + +[('Les', 'DET'), ('plats', 'NOUN'), ('servis', 'ADJ'), ('sont', 'VERB'), ('toujours', 'ADV'), ('les', 'DET'), ('mêmes', 'ADJ'), ('et', 'CONJ'), ('la', 'DET'), ('qualité', 'NOUN'), ('des', 'DET'), ('plats', 'NOUN'), ('est', 'VERB'), ('en', 'ADP'), ('nette', 'ADJ'), ('baisse', 'NOUN')] + + +" +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|title|Dive Into NLTK, Part V: Using Stanford Text Analysis Tools in Python – Text Mining Online +http://textminingonline.com/dive-into-nltk-part-v-using-stanford-text-analysis-tools-in-python|creationTime|2017-07-11T18:16:16Z +http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html|creationDate|2007-03-02 +http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html|tag|http://www.semanlink.net/tag/howto +http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html|tag|http://www.semanlink.net/tag/mac_os_x +http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html|title|Partage Internet sur mac osx +http://www.siteduzero.com/tuto-3-10663-1-partage-internet.html|creationTime|2007-03-02T22:44:11Z +http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html|creationDate|2011-12-26 +http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html|tag|http://www.semanlink.net/tag/jupiter_europe +http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html|tag|http://www.semanlink.net/tag/nasa +http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html|title|Jupiter's Moon Europa is Target for Possible NASA Lander Jupiter & Moons Europa & Search for Alien Life Space.com +http://www.space.com/13883-nasa-jupiter-moon-europa-lander-mission.html|creationTime|2011-12-26T21:38:04Z +https://www.udacity.com/self-driving-car|creationDate|2016-10-08 +https://www.udacity.com/self-driving-car|tag|http://www.semanlink.net/tag/driverless_car +https://www.udacity.com/self-driving-car|tag|http://www.semanlink.net/tag/open_source +https://www.udacity.com/self-driving-car|title|Open Source Self-Driving Car Udacity +https://www.udacity.com/self-driving-car|creationTime|2016-10-08T15:01:54Z +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|creationDate|2018-06-09 +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|tag|http://www.semanlink.net/tag/autoencoder +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|tag|http://www.semanlink.net/tag/keras +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|tag|http://www.semanlink.net/tag/cluster_analysis +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|title|How to do Unsupervised Clustering with Keras – Chengwei Zhang – Medium +https://medium.com/@chengweizhang2012/how-to-do-unsupervised-clustering-with-keras-9e1284448437|creationTime|2018-06-09T09:23:35Z +http://www.semantic-web-days.net/|creationDate|2007-04-20 +http://www.semantic-web-days.net/|tag|http://www.semanlink.net/tag/semantic_web_conferences +http://www.semantic-web-days.net/|tag|http://www.semanlink.net/tag/rules +http://www.semantic-web-days.net/|comment|The Semantic Web Days portal wants to inform about Semantic Web conferences, news from the community, and the latest research results on Rules and Reasoning on the Web. +http://www.semantic-web-days.net/|title|Semantic Web Days +http://www.semantic-web-days.net/|creationTime|2007-04-20T00:36:58Z +https://en.wikipedia.org/wiki/Monstrous_moonshine|creationDate|2018-10-19 +https://en.wikipedia.org/wiki/Monstrous_moonshine|tag|http://www.semanlink.net/tag/mathematiques +https://en.wikipedia.org/wiki/Monstrous_moonshine|title|Monstrous moonshine - Wikipedia +https://en.wikipedia.org/wiki/Monstrous_moonshine|creationTime|2018-10-19T14:48:15Z +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|creationDate|2006-05-26 +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|tag|http://www.semanlink.net/tag/mission_voulet_chanoine +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|tag|http://www.semanlink.net/tag/sarraounia_mangou +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|tag|http://www.semanlink.net/tag/thucydide +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|comment|"""Ce roman..."", (qui met en scène Voulet au moment de l'attaque du village de Sarranounia Mangou)... a fait émerger un vieux souvenir de Collège : une version grecque... où le général Brasidas, exhortait ses troupes poursuivies par les Barbares et inférieures en nombre, à se battre tout en faisant retraite. +" +http://www.lycee-chateaubriand.fr/cru-atala/publications/logeat_antiquite.htm|title|Version grecque +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|creationDate|2010-04-10 +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|tag|http://www.semanlink.net/tag/banlieue +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|tag|http://www.semanlink.net/tag/tahar_ben_jelloun +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|title|La banlieue s'ennuie, par Tahar Ben Jelloun +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|creationTime|2010-04-10T15:28:02Z +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2010/04/10/la-banlieue-s-ennuie-par-tahar-ben-jelloun_1331700_3232.html|date|2010-04-11 +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|creationDate|2010-07-18 +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|tag|http://www.semanlink.net/tag/freebase +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|tag|http://www.semanlink.net/tag/metaweb +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|tag|http://www.semanlink.net/tag/google +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|title|Google acquires Metaweb and Freebase +http://ebiquity.umbc.edu/blogger/2010/07/16/google-acquires-metaweb-and-freebase/|creationTime|2010-07-18T19:28:55Z +http://techland.time.com/2013/04/02/an-interview-with-computing-pioneer-alan-kay/|creationDate|2013-04-04 +http://techland.time.com/2013/04/02/an-interview-with-computing-pioneer-alan-kay/|tag|http://www.semanlink.net/tag/alan_kay +http://techland.time.com/2013/04/02/an-interview-with-computing-pioneer-alan-kay/|title|An Interview with Computing Pioneer Alan Kay TIME.com +http://techland.time.com/2013/04/02/an-interview-with-computing-pioneer-alan-kay/|creationTime|2013-04-04T14:09:09Z +http://pyvandenbussche.info/2017/translating-embeddings-transe/|creationDate|2017-09-09 +http://pyvandenbussche.info/2017/translating-embeddings-transe/|tag|http://www.semanlink.net/tag/transe +http://pyvandenbussche.info/2017/translating-embeddings-transe/|tag|http://www.semanlink.net/tag/pierre_yves_vandenbussche +http://pyvandenbussche.info/2017/translating-embeddings-transe/|comment|Translating Embeddings (TransE), a method for the prediction of missing relationships in knowledge graphs ([paper](http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela)) +http://pyvandenbussche.info/2017/translating-embeddings-transe/|relatedDoc|http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela +http://pyvandenbussche.info/2017/translating-embeddings-transe/|title|Translating Embeddings (TransE) – Pierre-Yves Vandenbussche +http://pyvandenbussche.info/2017/translating-embeddings-transe/|creationTime|2017-09-09T14:02:59Z +http://www.nzdl.org/Kea/index.html|creationDate|2017-06-26 +http://www.nzdl.org/Kea/index.html|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.nzdl.org/Kea/index.html|comment|"can be used for free indexing or for indexing with a controlled vocabulary. Java implementation +" +http://www.nzdl.org/Kea/index.html|title|Kea (Keyphrase Extraction Algorithm) +http://www.nzdl.org/Kea/index.html|creationTime|2017-06-26T14:52:19Z +http://hdl.handle.net/2142/97430|creationDate|2018-03-05 +http://hdl.handle.net/2142/97430|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +http://hdl.handle.net/2142/97430|tag|http://www.semanlink.net/tag/multi_label_classification +http://hdl.handle.net/2142/97430|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://hdl.handle.net/2142/97430|comment|MA Thesis +http://hdl.handle.net/2142/97430|title|Examination of machine learning methods for multi-label classification of intellectual property documents (2017) +http://hdl.handle.net/2142/97430|creationTime|2018-03-05T11:41:06Z +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|creationDate|2010-05-31 +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|tag|http://www.semanlink.net/tag/linking_open_data +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|tag|http://www.semanlink.net/tag/ibm +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|comment|"A common vocabulary is vital to smooth business operation, yet codifying and maintaining an enterprise vocabulary is an arduous, manual task. We describe a process to automatically extract a domain specific vocabulary (terms and types) from unstructured data in the en- terprise guided by term definitions in Linked Open Data (LOD). We validate our techniques by applying them to the IT (Information Tech- nology) domain, taking 58 Gartner analyst reports and using two specific LOD sources – DBpedia and Freebase. We show initial findings that ad- dress the generalizability of these techniques for vocabulary extraction in new domains, such as the energy industry. +
IBM Watson Research Center" +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|title|Extracting Enterprise Vocabularies Using Linked Open Data +http://data.semanticweb.org/pdfs/iswc/2009/in-use/paper143.pdf|creationTime|2010-05-31T12:06:48Z +http://infoheap.com/url-encode-decode-online-tool/|creationDate|2017-09-07 +http://infoheap.com/url-encode-decode-online-tool/|tag|http://www.semanlink.net/tag/uri_encoding +http://infoheap.com/url-encode-decode-online-tool/|tag|http://www.semanlink.net/tag/tools +http://infoheap.com/url-encode-decode-online-tool/|title|Url encode decode online tool - InfoHeap +http://infoheap.com/url-encode-decode-online-tool/|creationTime|2017-09-07T13:23:34Z +http://www.r-project.org/|creationDate|2013-12-03 +http://www.r-project.org/|tag|http://www.semanlink.net/tag/r +http://www.r-project.org/|title|The R Project for Statistical Computing +http://www.r-project.org/|creationTime|2013-12-03T13:50:03Z +https://www.debuggex.com/|creationDate|2015-05-22 +https://www.debuggex.com/|tag|http://www.semanlink.net/tag/regex +https://www.debuggex.com/|title|Debuggex: Online visual regex tester. JavaScript, Python, and PCRE. +https://www.debuggex.com/|creationTime|2015-05-22T00:15:11Z +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|creationDate|2012-03-21 +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|tag|http://www.semanlink.net/tag/jersey +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|tag|http://www.semanlink.net/tag/howto +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|tag|http://www.semanlink.net/tag/jquery +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|title|RESTful services with jQuery and Java using JAX-RS and Jersey +http://coenraets.org/blog/2011/12/restful-services-with-jquery-and-java-using-jax-rs-and-jersey/|creationTime|2012-03-21T23:29:02Z +https://twitter.com/RubenVerborgh/status/815212205935394817|creationDate|2016-12-31 +https://twitter.com/RubenVerborgh/status/815212205935394817|tag|http://www.semanlink.net/tag/linked_data_fragments +https://twitter.com/RubenVerborgh/status/815212205935394817|tag|http://www.semanlink.net/tag/ruben_verborgh +https://twitter.com/RubenVerborgh/status/815212205935394817|title|"Ruben Verborgh sur Twitter : ""Just published 25,000+ #RDF triples of my own data as #OpenData: https://t.co/1jqy3ZgrjJ Query them live: https://t.co/B96KBPnG9C #dogfood""" +https://twitter.com/RubenVerborgh/status/815212205935394817|creationTime|2016-12-31T18:23:28Z +http://24ways.org/|creationDate|2006-01-10 +http://24ways.org/|tag|http://www.semanlink.net/tag/css +http://24ways.org/|tag|http://www.semanlink.net/tag/ajax +http://24ways.org/|tag|http://www.semanlink.net/tag/howto +http://24ways.org/|title|24 ways +http://www.xml.com/pub/a/2005/06/22/skos.html|creationDate|2005-06-24 +http://www.xml.com/pub/a/2005/06/22/skos.html|tag|http://www.semanlink.net/tag/skos +http://www.xml.com/pub/a/2005/06/22/skos.html|title|XML.com: Introducing SKOS +https://hal.archives-ouvertes.fr/hal-01517094|creationDate|2018-01-03 +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/semantic_gap +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/medical_information_search +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/medical_data +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/laure_soulier +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/document_embeddings +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/lynda_tamine +https://hal.archives-ouvertes.fr/hal-01517094|tag|http://www.semanlink.net/tag/irit +https://hal.archives-ouvertes.fr/hal-01517094|comment|"In this paper, we study how to optimize the document representation by leveraging neural-based approaches to capture latent representations built upon both validated medical concepts specified in an external resource as well as the used words. + +**Document vectors are learned so they allow predicting concepts in their context** + + +" +https://hal.archives-ouvertes.fr/hal-01517094|title|Learning Concept-Driven Document Embeddings for Medical Information Search (2017) +https://hal.archives-ouvertes.fr/hal-01517094|creationTime|2018-01-03T15:44:56Z +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|creationDate|2017-10-02 +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|tag|http://www.semanlink.net/tag/semantic_tagging +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|tag|http://www.semanlink.net/tag/microsoft_research +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|tag|http://www.semanlink.net/tag/nlp_microsoft +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|comment|"> new simple, yet effective approaches to +learn domain specific word embeddings. + +## Intro + +> Adapting word embeddings, such as jointly capturing +syntactic and semantic information, can further enrich semantic +word representations for several tasks, e.g., sentiment +analysis (Tang et al. 2014), named entity recognition +(Lebret, Legrand, and Collobert 2013), entity-relation extraction +(Weston et al. 2013), etc. (Yu and Dredze 2014) +has introduced a lightly supervised word embedding learning +extending word2vec. They incorporate prior information to the objective +function as a regularization term considering synonymy relations +between words from Wordnet (Fellbaum 1999). + +> In this work, we go one step further and investigate if +enriching the word2vec word embeddings trained on unstructured/ +unlabeled text with domain specific semantic relations +obtained from knowledge sources (e.g., knowledge +graphs, search query logs, etc.) can help to discover relation +aware word embeddings. Unlike earlier work, **we encode the +information about the relations between phrases, thereby, +entities and relation mentions are all embedded into a low dimensional +vector space**. + +## Related work (Learning Word Embeddings with Priors) + +- word2vec +- Relational Constrained Model (RTM) (Yu and Dredze 2014) +While CBOW learns lexical word embeddings from provided text, the RTM learns embeddings of words based on their similarity to other words provided by a knowledge resource (eg. wordnet) +- Joint model (Yu and Dredze 2014) +combines CBOW and RTM through linear combination +" +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|title|Enriching Word Embeddings Using Knowledge Graph for Semantic Tagging in Conversational Dialog Systems - Microsoft Research (2015) +https://www.microsoft.com/en-us/research/publication/enriching-word-embeddings-using-knowledge-graph-for-semantic-tagging-in-conversational-dialog-systems/|creationTime|2017-10-02T00:09:19Z +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|creationDate|2011-07-18 +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|tag|http://www.semanlink.net/tag/ecologie +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|tag|http://www.semanlink.net/tag/monsanto +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|tag|http://www.semanlink.net/tag/anonymous +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|title|Anonymous hacks Monsanto: Operation Green Rights begins - National Anonymous Examiner.com +http://www.examiner.com/anonymous-in-national/anonymous-hacks-monsanto-operation-green-rights-begins|creationTime|2011-07-18T13:06:48Z +http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/|creationDate|2013-12-16 +http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/|tag|http://www.semanlink.net/tag/science_fiction +http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/|tag|http://www.semanlink.net/tag/video +http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/|title|"Le web : ""the next 10 years""… en 10 courts métrages SF Le Mixer" +http://laurentdupin.wordpress.com/2013/12/09/le-web-the-next-10-years-en-10-courts-metrages-sf/|creationTime|2013-12-16T11:33:36Z +https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2015-09-17 +https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/howto +https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/technical_documentation +https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|How to Build a Helpful Search for Technical Documentation : The Laravel Example Milliseconds Matter +https://blog.algolia.com/how-to-build-a-helpful-search-for-technical-documentation-the-laravel-example/?utm_content=buffer45ce6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2015-09-17T23:11:45Z +http://stitch.cs.vu.nl/demo.html|creationDate|2006-04-07 +http://stitch.cs.vu.nl/demo.html|tag|http://www.semanlink.net/tag/rdf_browser +http://stitch.cs.vu.nl/demo.html|tag|http://www.semanlink.net/tag/skos +http://stitch.cs.vu.nl/demo.html|tag|http://www.semanlink.net/tag/rijksmuseum +http://stitch.cs.vu.nl/demo.html|comment|"The STITCH DEMO webserver demonstrates the added value of semantical integration of multiple collections. +
+The collections that are linked by the prototype RDF Multi-Faceted-Search browser are the Rijksmuseum Masterpieces Collection (ARIA) and the Medieval Illuminated Manuscripts at the National Library of the Netherlands
+SKOS is used to enable mixing/merging of separate vocabularies: +Cultural-heritage collections are typically indexed with metadata derived from a range of different vocabularies, such as AAT, Iconclass and in-house standards. This presents a problem when one wants to use multiple collections in an interoperable way. In general, it is unrealistic to assume unification of vocabularies. Vocabularies have been developed in many sub-domains, each with their own emphasis and scope. Still, there is significant overlap between the vocabularies used for indexing. The prime research objective of this subproject is to develop theory, methods and tools for allowing metadata interoperability through semantic links between the vocabularies. + +" +http://stitch.cs.vu.nl/demo.html|title|"DEMO of the STITCH project (""Semantic Interoperability To access Cultural Heritage"")" +https://stats.stackexchange.com/questions/352036/what-should-i-do-when-my-neural-network-doesnt-learn|creationDate|2019-01-27 +https://stats.stackexchange.com/questions/352036/what-should-i-do-when-my-neural-network-doesnt-learn|tag|http://www.semanlink.net/tag/nn_dev +https://stats.stackexchange.com/questions/352036/what-should-i-do-when-my-neural-network-doesnt-learn|title|What should I do when my neural network doesn't learn? +https://stats.stackexchange.com/questions/352036/what-should-i-do-when-my-neural-network-doesnt-learn|creationTime|2019-01-27T12:41:57Z +http://www.spritzinc.com/|creationDate|2014-03-01 +http://www.spritzinc.com/|tag|http://www.semanlink.net/tag/spritz +http://www.spritzinc.com/|title|Spritz +http://www.spritzinc.com/|creationTime|2014-03-01T15:07:45Z +https://cmusphinx.github.io/wiki/tutorialconcepts/|creationDate|2019-04-17 +https://cmusphinx.github.io/wiki/tutorialconcepts/|tag|http://www.semanlink.net/tag/speech_recognition +https://cmusphinx.github.io/wiki/tutorialconcepts/|title|Basic concepts of speech recognition – CMUSphinx Open Source Speech Recognition +https://cmusphinx.github.io/wiki/tutorialconcepts/|creationTime|2019-04-17T00:17:24Z +https://twitter.com/cecilejanssens/status/1104134423673479169|creationDate|2019-03-10 +https://twitter.com/cecilejanssens/status/1104134423673479169|tag|http://www.semanlink.net/tag/roc_curve +https://twitter.com/cecilejanssens/status/1104134423673479169|title|"Cecile Janssens sur Twitter : ""The area under the ROC curve (AUC) is so frequently criticized...""" +https://twitter.com/cecilejanssens/status/1104134423673479169|creationTime|2019-03-10T09:54:21Z +http://code.google.com/p/linked-data-api/wiki/Specification|creationDate|2010-02-25 +http://code.google.com/p/linked-data-api/wiki/Specification|tag|http://www.semanlink.net/tag/leigh_dodds +http://code.google.com/p/linked-data-api/wiki/Specification|tag|http://www.semanlink.net/tag/linked_data_api +http://code.google.com/p/linked-data-api/wiki/Specification|comment|"The API is intended to be a middle-ware layer that can be deployed +in-front of a SPARQL endpoint, providing the ability to create a +RESTful data access layer for accessing the RDF data contained in the +triple store. The middle-ware is configurable, and is intended to +support a range of different access patterns and output formats. ""Out +of the box"" the system provides delivery of the standard range of RDF +serialisations, as well as simple JSON and XML serializations for +descriptions of lists of resources. The API essentially maps +parameterized URLs to underlying SPARQL queries, mediating the content +negotiation of the results into a suitable format for the client." +http://code.google.com/p/linked-data-api/wiki/Specification|title|Specification - linked-data-api - Linked Data API Specification - Project Hosting on Google Code +http://code.google.com/p/linked-data-api/wiki/Specification|creationTime|2010-02-25T13:17:42Z +https://arxiv.org/abs/1807.03748|creationDate|2018-07-21 +https://arxiv.org/abs/1807.03748|tag|http://www.semanlink.net/tag/unsupervised_machine_learning +https://arxiv.org/abs/1807.03748|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1807.03748|tag|http://www.semanlink.net/tag/google_deepmind +https://arxiv.org/abs/1807.03748|tag|http://www.semanlink.net/tag/representation_learning +https://arxiv.org/abs/1807.03748|tag|http://www.semanlink.net/tag/contrastive_self_supervised_learning +https://arxiv.org/abs/1807.03748|arxiv_author|Yazhe Li +https://arxiv.org/abs/1807.03748|arxiv_author|Aaron van den Oord +https://arxiv.org/abs/1807.03748|arxiv_author|Oriol Vinyals +https://arxiv.org/abs/1807.03748|comment|"> a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful [autoregressive models](/tag/autoregressive_model). We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using [negative sampling](/tag/negative_sampling). + +a contrastive method that can be applied to any form of data that can be expressed in an ordered sequence: text, speech, video..." +https://arxiv.org/abs/1807.03748|title|[1807.03748] Representation Learning with Contrastive Predictive Coding +https://arxiv.org/abs/1807.03748|creationTime|2018-07-21T10:05:02Z +https://arxiv.org/abs/1807.03748|arxiv_summary|"While supervised learning has enabled great progress in many applications, +unsupervised learning has not seen such widespread adoption, and remains an +important and challenging endeavor for artificial intelligence. In this work, +we propose a universal unsupervised learning approach to extract useful +representations from high-dimensional data, which we call Contrastive +Predictive Coding. The key insight of our model is to learn such +representations by predicting the future in latent space by using powerful +autoregressive models. We use a probabilistic contrastive loss which induces +the latent space to capture information that is maximally useful to predict +future samples. It also makes the model tractable by using negative sampling. +While most prior work has focused on evaluating representations for a +particular modality, we demonstrate that our approach is able to learn useful +representations achieving strong performance on four distinct domains: speech, +images, text and reinforcement learning in 3D environments." +https://arxiv.org/abs/1807.03748|arxiv_firstAuthor|Aaron van den Oord +https://arxiv.org/abs/1807.03748|arxiv_updated|2019-01-22T18:47:12Z +https://arxiv.org/abs/1807.03748|arxiv_title|Representation Learning with Contrastive Predictive Coding +https://arxiv.org/abs/1807.03748|arxiv_published|2018-07-10T16:52:11Z +https://arxiv.org/abs/1807.03748|arxiv_num|1807.03748 +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|creationDate|2014-12-18 +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|tag|http://www.semanlink.net/tag/big_data_semantic_web +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|tag|http://www.semanlink.net/tag/hadoop +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|tag|http://www.semanlink.net/tag/big_data_tools +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|title|Hadoop:What it is and how people use it: my own summary. bobdc.blog +http://www.snee.com/bobdc.blog/2014/12/hadoop.html|creationTime|2014-12-18T13:09:16Z +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|creationDate|2008-10-20 +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|tag|http://www.semanlink.net/tag/php +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|tag|http://www.semanlink.net/tag/installing_apps +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|tag|http://www.semanlink.net/tag/phpmyadmin +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|tag|http://www.semanlink.net/tag/leopard +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|tag|http://www.semanlink.net/tag/mysql +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|title|Waffle With Meaning » Installing PHP, MySQL and phpMyAdmin on OS X 10.5 (Leopard) +http://wafflewithmeaning.com/2008/01/10/installing-php-mysql-and-phpmyadmin-on-os-x-105-leopard/|creationTime|2008-10-20T14:37:46Z +http://tenbyten.org/10x10.html|creationDate|2008-02-15 +http://tenbyten.org/10x10.html|tag|http://www.semanlink.net/tag/information_visualization +http://tenbyten.org/10x10.html|title|10x10 / 100 Words and Pictures that Define the Time / by Jonathan J. Harris +http://tenbyten.org/10x10.html|creationTime|2008-02-15T22:38:52Z +http://www.youtube.com/watch?v=ure2RdTZm8c|creationDate|2008-11-10 +http://www.youtube.com/watch?v=ure2RdTZm8c|tag|http://www.semanlink.net/tag/miriam_makeba +http://www.youtube.com/watch?v=ure2RdTZm8c|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=ure2RdTZm8c|title|Miriam Makeba (Mama Africa) - Khawuleza 1966 +http://www.youtube.com/watch?v=ure2RdTZm8c|creationTime|2008-11-10T10:29:43Z +https://twitter.com/dpkingma/status/1070856305831624704|creationDate|2018-12-07 +https://twitter.com/dpkingma/status/1070856305831624704|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://twitter.com/dpkingma/status/1070856305831624704|tag|http://www.semanlink.net/tag/google_brain +https://twitter.com/dpkingma/status/1070856305831624704|tag|http://www.semanlink.net/tag/likelihood +https://twitter.com/dpkingma/status/1070856305831624704|tag|http://www.semanlink.net/tag/machine_learning +https://twitter.com/dpkingma/status/1070856305831624704|comment|"Durk Kingma sur Twitter + +> ""It is my personal belief is that sufficiently powerful likelihood-based generative models will usher in a new era of machine learning, allowing us to tackle important limitations of current machine learning, such as lacking data efficiency and generalization. [7/8]""" +https://twitter.com/dpkingma/status/1070856305831624704|title|Durk Kingma sur Twitter : about likelihood-based generative models +https://twitter.com/dpkingma/status/1070856305831624704|creationTime|2018-12-07T08:38:44Z +http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/|creationDate|2014-06-26 +http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/|tag|http://www.semanlink.net/tag/sparql +http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/|tag|http://www.semanlink.net/tag/r +http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/|title|SPARQL with R in less than 5 minutes (R news & tutorials) +http://www.r-bloggers.com/sparql-with-r-in-less-than-5-minutes/|creationTime|2014-06-26T20:08:35Z +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|creationDate|2016-07-31 +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|tag|http://www.semanlink.net/tag/syrie +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|tag|http://www.semanlink.net/tag/usa +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|tag|http://www.semanlink.net/tag/ei +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|title|Comment les Etats-Unis viennent de sauver Daech d’une défaite annoncée en Syrie Un si Proche Orient +http://filiu.blog.lemonde.fr/2016/07/09/comment-les-etats-unis-viennent-de-sauver-daech-dune-defaite-annoncee-en-syrie/|creationTime|2016-07-31T00:03:51Z +http://www.bbc.co.uk/news/technology-25667292|creationDate|2014-01-11 +http://www.bbc.co.uk/news/technology-25667292|tag|http://www.semanlink.net/tag/jeux_en_ligne +http://www.bbc.co.uk/news/technology-25667292|tag|http://www.semanlink.net/tag/net +http://www.bbc.co.uk/news/technology-25667292|tag|http://www.semanlink.net/tag/news +http://www.bbc.co.uk/news/technology-25667292|tag|http://www.semanlink.net/tag/bbc +http://www.bbc.co.uk/news/technology-25667292|title|BBC News - Cicada 3301: The dark net treasure trail reopens +http://www.bbc.co.uk/news/technology-25667292|creationTime|2014-01-11T23:17:16Z +http://queue.acm.org/detail.cfm?id=1961297|creationDate|2011-04-19 +http://queue.acm.org/detail.cfm?id=1961297|tag|http://www.semanlink.net/tag/nosql +http://queue.acm.org/detail.cfm?id=1961297|tag|http://www.semanlink.net/tag/sql +http://queue.acm.org/detail.cfm?id=1961297|comment|CONTRARY TO POPULAR BELIEF, SQL AND NOSQL ARE REALLY JUST TWO SIDES OF THE SAME COIN +http://queue.acm.org/detail.cfm?id=1961297|title|A co-Relational Model of Data for Large Shared Data Banks - ACM Queue +http://queue.acm.org/detail.cfm?id=1961297|creationTime|2011-04-19T01:03:00Z +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|creationDate|2008-05-06 +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|tag|http://www.semanlink.net/tag/nkos +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|tag|http://www.semanlink.net/tag/skos +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|tag|http://www.semanlink.net/tag/workshop +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|comment|Please email proposals (approx. 500 words including aims, methods, main findings) by June 20th to Marianne Lykke Nielsen (mln@db.dk). Advance indication that you intend to submit a presentation would be helpful. Proposals will be peer-reviewed by the program committee and notification of acceptance will be given by July 4th. The early registration deadline for the conference and the workshop is July 31st. +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|title|Networked Knowledge Organization Systems and Services The 7th European Networked Knowledge Organization Systems (NKOS) Workshop +http://www.comp.glam.ac.uk/pages/research/hypermedia/nkos/nkos2008/|creationTime|2008-05-06T21:31:17Z +http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html|creationDate|2012-07-29 +http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html|tag|http://www.semanlink.net/tag/paleoanthropology_genetics +http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html|tag|http://www.semanlink.net/tag/genetique_humaine +http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html|title|Cousins of Neanderthals Left DNA in Africa, Scientists Report - NYTimes.com +http://www.nytimes.com/2012/07/27/science/cousins-of-neanderthals-left-dna-in-africa-scientists-report.html|creationTime|2012-07-29T01:11:04Z +http://es.wikipedia.org/wiki/Yag%C3%A1n|creationDate|2007-03-28 +http://es.wikipedia.org/wiki/Yag%C3%A1n|tag|http://www.semanlink.net/tag/yagan +http://es.wikipedia.org/wiki/Yag%C3%A1n|comment|Yagán (ou Yamana), peuple de la Terre De Feu. Quasi disparu, victime de sa rencontre avec le monde occidental (les phoques, qui constituaient l'essentiel de leur alimentation, ont été massacrés pour leur huile, qui servait à l'éclairage des villes européennes) +http://es.wikipedia.org/wiki/Yag%C3%A1n|title|Yagán - Wikipedia, la enciclopedia libre +http://es.wikipedia.org/wiki/Yag%C3%A1n|creationTime|2007-03-28T23:44:45Z +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|creationDate|2011-09-13 +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|tag|http://www.semanlink.net/tag/node_js +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|tag|http://www.semanlink.net/tag/triplestore +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|tag|http://www.semanlink.net/tag/danny_ayers +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|title|RDF store for use with node.js? +http://answers.semanticweb.com/questions/3858/rdf-store-for-use-with-nodejs|creationTime|2011-09-13T14:14:49Z +http://en.wikipedia.org/wiki/Touch_of_Evil|creationDate|2008-05-15 +http://en.wikipedia.org/wiki/Touch_of_Evil|tag|http://www.semanlink.net/tag/film_noir +http://en.wikipedia.org/wiki/Touch_of_Evil|tag|http://www.semanlink.net/tag/charlton_heston +http://en.wikipedia.org/wiki/Touch_of_Evil|tag|http://www.semanlink.net/tag/orson_welles +http://en.wikipedia.org/wiki/Touch_of_Evil|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Touch_of_Evil|tag|http://www.semanlink.net/tag/marlene_dietrich +http://en.wikipedia.org/wiki/Touch_of_Evil|title|Touch of Evil - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Touch_of_Evil|creationTime|2008-05-15T22:29:43Z +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|creationDate|2007-06-13 +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|tag|http://www.semanlink.net/tag/jamendo +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|tag|http://www.semanlink.net/tag/musicbrainz +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|tag|http://www.semanlink.net/tag/linked_data +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|title|Linking open data: interlinking the Jamendo and the Musicbrainz datasets - DBTune blog +http://blog.dbtune.org:80/post/2007/06/11/Linking-open-data:-interlinking-the-Jamendo-and-the-Musicbrainz-datasets|creationTime|2007-06-13T23:26:17Z +http://www.humanite.fr/bernard-stiegler-nous-devons-rendre-aux-gens-le-temps-gagne-par-lautomatisation-609824|creationDate|2016-07-24 +http://www.humanite.fr/bernard-stiegler-nous-devons-rendre-aux-gens-le-temps-gagne-par-lautomatisation-609824|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.humanite.fr/bernard-stiegler-nous-devons-rendre-aux-gens-le-temps-gagne-par-lautomatisation-609824|title|Bernard Stiegler « Nous devons rendre aux gens le temps gagné par l’automatisation » L'Humanité +http://www.humanite.fr/bernard-stiegler-nous-devons-rendre-aux-gens-le-temps-gagne-par-lautomatisation-609824|creationTime|2016-07-24T19:50:34Z +http://jena.sourceforge.net/ARQ/Tutorial/data.html|creationDate|2007-07-07 +http://jena.sourceforge.net/ARQ/Tutorial/data.html|tag|http://www.semanlink.net/tag/sparql_tutorial +http://jena.sourceforge.net/ARQ/Tutorial/data.html|tag|http://www.semanlink.net/tag/sparql_and_jena +http://jena.sourceforge.net/ARQ/Tutorial/data.html|title|ARQ - SPARQL Tutorial +http://jena.sourceforge.net/ARQ/Tutorial/data.html|creationTime|2007-07-07T13:51:51Z +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|creationDate|2006-07-14 +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|comment|"An update to the 2001 Scientific American article. +" +http://www.geospatialsemanticweb.com/wp-content/uploads/2006/07/01637364.pdf|title|The Semantic Web Revisited +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|creationDate|2005-09-05 +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|tag|http://www.semanlink.net/tag/millennium_goal +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|tag|http://www.semanlink.net/tag/pauvrete +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|tag|http://www.semanlink.net/tag/environnement +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|title|BBC NEWS - Environment key to helping poor +http://news.bbc.co.uk/1/hi/sci/tech/4199138.stm|source|BBC +http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf|creationDate|2017-06-05 +http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf|comment|Facial identity is encoded via a remarkably simple neural code that relies on the ability of neurons to distinguish facial features along specific axes in face space +http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf|title|The Code for Facial Identity in the Primate Brain +http://www.cell.com/cell/pdf/S0092-8674(17)30538-X.pdf|creationTime|2017-06-05T10:16:05Z +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|creationDate|2011-12-31 +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|tag|http://www.semanlink.net/tag/james_stewart +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|comment|an American Christmas drama film produced and directed by Frank Capra (1946) +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|title|It's a Wonderful Life +http://en.wikipedia.org/wiki/It's_a_Wonderful_Life|creationTime|2011-12-31T02:08:39Z +https://www.bbc.com/news/science-environment-47659640|creationDate|2019-03-30 +https://www.bbc.com/news/science-environment-47659640|tag|http://www.semanlink.net/tag/tasmanian_devil +https://www.bbc.com/news/science-environment-47659640|title|Tasmanian devils 'adapting to coexist with cancer' - BBC News +https://www.bbc.com/news/science-environment-47659640|creationTime|2019-03-30T13:17:37Z +https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2|creationDate|2018-02-03 +https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2|tag|http://www.semanlink.net/tag/imbalanced_data +https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2|tag|http://www.semanlink.net/tag/statistical_classification +https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2|title|Dealing with Imbalanced Classes in Machine Learning +https://towardsdatascience.com/dealing-with-imbalanced-classes-in-machine-learning-d43d6fa19d2|creationTime|2018-02-03T16:55:27Z +https://hal.archives-ouvertes.fr/hal-01910235|creationDate|2018-11-28 +https://hal.archives-ouvertes.fr/hal-01910235|tag|http://www.semanlink.net/tag/francois_yvon +https://hal.archives-ouvertes.fr/hal-01910235|tag|http://www.semanlink.net/tag/neural_machine_translation +https://hal.archives-ouvertes.fr/hal-01910235|title|Using Monolingual Data in Neural Machine Translation: a Systematic Study +https://hal.archives-ouvertes.fr/hal-01910235|creationTime|2018-11-28T23:02:34Z +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|creationDate|2014-04-11 +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|tag|http://www.semanlink.net/tag/innovation +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|tag|http://www.semanlink.net/tag/politique_economique_francaise +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|tag|http://www.semanlink.net/tag/politique_de_l_innovation +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|tag|http://www.semanlink.net/tag/henri_verdier +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|title|Les fossoyeurs de l’innovation L'Âge de la multitude +http://colin-verdier.com/les-fossoyeurs-de-l-innovation/|creationTime|2014-04-11T23:47:28Z +http://blog.wolfram.com/2014/02/24/starting-to-demo-the-wolfram-language/|creationDate|2014-03-03 +http://blog.wolfram.com/2014/02/24/starting-to-demo-the-wolfram-language/|tag|http://www.semanlink.net/tag/wolfram_language +http://blog.wolfram.com/2014/02/24/starting-to-demo-the-wolfram-language/|title|Starting to Demo the Wolfram Language—Wolfram Blog +http://blog.wolfram.com/2014/02/24/starting-to-demo-the-wolfram-language/|creationTime|2014-03-03T00:04:47Z +http://www.w3.org/2001/sw/wiki/Main_Page|creationDate|2010-01-20 +http://www.w3.org/2001/sw/wiki/Main_Page|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2001/sw/wiki/Main_Page|tag|http://www.semanlink.net/tag/semantic_web +http://www.w3.org/2001/sw/wiki/Main_Page|title|Semantic Web Standards - wiki at w3c +http://www.w3.org/2001/sw/wiki/Main_Page|creationTime|2010-01-20T18:22:15Z +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|creationDate|2017-04-12 +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|tag|http://www.semanlink.net/tag/rest +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|tag|http://www.semanlink.net/tag/citation +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|tag|http://www.semanlink.net/tag/hateoas +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|tag|http://www.semanlink.net/tag/roy_t_fielding +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|comment|if the engine of application state (and hence the API) is not being driven by hypertext, then it cannot be RESTful and cannot be a REST API. Period. Is there some broken manual somewhere that needs to be fixed? +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|title|REST APIs must be hypertext-driven » Untangled +http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven|creationTime|2017-04-12T13:36:29Z +http://www.spurl.net/discover/user/fps/|creationDate|2005-04-28 +http://www.spurl.net/discover/user/fps/|tag|http://www.semanlink.net/tag/fps +http://www.spurl.net/discover/user/fps/|tag|http://www.semanlink.net/tag/spurl +https://spinningup.openai.com/en/latest/spinningup/keypapers.html|creationDate|2018-11-09 +https://spinningup.openai.com/en/latest/spinningup/keypapers.html|tag|http://www.semanlink.net/tag/reinforcement_learning +https://spinningup.openai.com/en/latest/spinningup/keypapers.html|title|Key Papers in Deep RL — OpenAI - Spinning Up documentation +https://spinningup.openai.com/en/latest/spinningup/keypapers.html|creationTime|2018-11-09T13:56:17Z +http://cifre.anrt.asso.fr/|creationDate|2019-05-09 +http://cifre.anrt.asso.fr/|tag|http://www.semanlink.net/tag/cifre +http://cifre.anrt.asso.fr/|title|Conventions CIFRE +http://cifre.anrt.asso.fr/|creationTime|2019-05-09T14:32:00Z +http://www.bbc.com/news/technology-28976849|creationDate|2014-08-29 +http://www.bbc.com/news/technology-28976849|tag|http://www.semanlink.net/tag/flickr +http://www.bbc.com/news/technology-28976849|tag|http://www.semanlink.net/tag/historic_images +http://www.bbc.com/news/technology-28976849|comment|An American academic is creating a searchable database of 12 million historic copyright-free images. +http://www.bbc.com/news/technology-28976849|title|BBC News - Millions of historic images posted to Flickr +http://www.bbc.com/news/technology-28976849|creationTime|2014-08-29T19:03:56Z +http://www.universcience.tv/video-la-vitesse-de-la-lumiere-3139.html|creationDate|2015-01-06 +http://www.universcience.tv/video-la-vitesse-de-la-lumiere-3139.html|tag|http://www.semanlink.net/tag/lumiere +http://www.universcience.tv/video-la-vitesse-de-la-lumiere-3139.html|title|Mesurer le vitesse de la lumière dans sa cuisine +http://www.universcience.tv/video-la-vitesse-de-la-lumiere-3139.html|creationTime|2015-01-06T19:51:36Z +http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/|creationDate|2018-03-04 +http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/|tag|http://www.semanlink.net/tag/livre +http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/|tag|http://www.semanlink.net/tag/crispr_cas9 +http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/|title|Aux origines de CRISPR +http://internetactu.blog.lemonde.fr/2018/03/04/aux-origines-de-crispr/|creationTime|2018-03-04T12:05:41Z +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|creationDate|2012-02-20 +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|tag|http://www.semanlink.net/tag/droit_d_auteur +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|tag|http://www.semanlink.net/tag/numerisation_des_oeuvres_indisponibles +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|tag|http://www.semanlink.net/tag/j_hallucine +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|comment|"""Des droits et... des devoirs"" , comme on dit toujours: détenir un droit d'auteur doit impliquer l'obligation de mettre l'oeuvre à disposition à un prix raisonnable. Sinon, il est alors non seulement légitime, mais de salubrité publique, que l'on pallie le défaut du détenteur du ""droit"" en question. +" +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|title|Des auteurs et des ayants droit contre la numérisation des œuvres indisponibles +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|creationTime|2012-02-20T18:58:42Z +http://www.lemonde.fr/imprimer/article/2012/02/20/1645991.html|source|Le Monde +http://tos-dr.info/|creationDate|2012-08-15 +http://tos-dr.info/|tag|http://www.semanlink.net/tag/droit_et_internet +http://tos-dr.info/|title|Terms of Service; Didn't Read +http://tos-dr.info/|creationTime|2012-08-15T11:51:35Z +https://www.quora.com/Why-dont-pure-functional-programming-languages-provide-a-loop-construct|creationDate|2017-06-05 +https://www.quora.com/Why-dont-pure-functional-programming-languages-provide-a-loop-construct|tag|http://www.semanlink.net/tag/functional_programming +https://www.quora.com/Why-dont-pure-functional-programming-languages-provide-a-loop-construct|title|Why don't pure functional programming languages provide a loop construct? - Quora +https://www.quora.com/Why-dont-pure-functional-programming-languages-provide-a-loop-construct|creationTime|2017-06-05T11:52:50Z +http://www.mspace.fm/projects/richtags/|creationDate|2007-01-04 +http://www.mspace.fm/projects/richtags/|tag|http://www.semanlink.net/tag/mspace +http://www.mspace.fm/projects/richtags/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.mspace.fm/projects/richtags/|tag|http://www.semanlink.net/tag/tagging +http://www.mspace.fm/projects/richtags/|comment|Rich Tags: Supporting Better Exploration of Digital Repositories with Semantic Social Tagging +http://www.mspace.fm/projects/richtags/|title|mSpace - Projects - Rich Tags +http://stackoverflow.com/questions/tagged/schema.org|creationDate|2013-07-07 +http://stackoverflow.com/questions/tagged/schema.org|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/tagged/schema.org|tag|http://www.semanlink.net/tag/schema_org +http://stackoverflow.com/questions/tagged/schema.org|title|Newest 'schema.org' Questions - Stack Overflow +http://stackoverflow.com/questions/tagged/schema.org|creationTime|2013-07-07T17:13:13Z +http://www.ebusiness-unibw.org/events/ecweb2014/|creationDate|2014-04-18 +http://www.ebusiness-unibw.org/events/ecweb2014/|tag|http://www.semanlink.net/tag/ec_web_14 +http://www.ebusiness-unibw.org/events/ecweb2014/|title|EC-Web'14: The 15th International Conference on Electronic Commerce and Web Technologies +http://www.ebusiness-unibw.org/events/ecweb2014/|creationTime|2014-04-18T23:35:51Z +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|creationDate|2019-01-01 +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|tag|http://www.semanlink.net/tag/learned_index_structures +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|tag|http://www.semanlink.net/tag/database +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|comment|cf. [The case for learned index structures](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1712.01208v1) +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|relatedDoc|https://arxiv.org/abs/1712.01208v1 +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|title|SageDB: A Learned Database System +http://cidrdb.org/cidr2019/papers/p117-kraska-cidr19.pdf|creationTime|2019-01-01T13:23:57Z +http://rue89.nouvelobs.com/2016/06/20/crypto-monnaie-ether-connait-crise-a-rendre-jaloux-lehman-brothers-264401|creationDate|2016-06-21 +http://rue89.nouvelobs.com/2016/06/20/crypto-monnaie-ether-connait-crise-a-rendre-jaloux-lehman-brothers-264401|tag|http://www.semanlink.net/tag/dao_attack +http://rue89.nouvelobs.com/2016/06/20/crypto-monnaie-ether-connait-crise-a-rendre-jaloux-lehman-brothers-264401|title|La crypto-monnaie Ether connaît une crise à rendre jaloux Lehman Brothers - Rue89 - L'Obs +http://rue89.nouvelobs.com/2016/06/20/crypto-monnaie-ether-connait-crise-a-rendre-jaloux-lehman-brothers-264401|creationTime|2016-06-21T10:18:33Z +http://ode.openlinksw.com/|creationDate|2010-12-28 +http://ode.openlinksw.com/|tag|http://www.semanlink.net/tag/openlink +http://ode.openlinksw.com/|tag|http://www.semanlink.net/tag/rdf_browser +http://ode.openlinksw.com/|title|OpenLink Data Explorer Extension +http://ode.openlinksw.com/|creationTime|2010-12-28T16:56:49Z +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|creationDate|2012-12-30 +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|tag|http://www.semanlink.net/tag/immune_system +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|tag|http://www.semanlink.net/tag/bat +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|tag|http://www.semanlink.net/tag/virus +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|title|Did learning to fly give bats super-immunity? - New Scientist +http://www.newscientist.com/article/dn23020-did-learning-to-fly-give-bats-superimmunity.html|creationTime|2012-12-30T13:22:33Z +https://news.cnrs.fr/opinions/fermis-paradox-and-missing-aliens|creationDate|2017-06-04 +https://news.cnrs.fr/opinions/fermis-paradox-and-missing-aliens|tag|http://www.semanlink.net/tag/fermi_paradox +https://news.cnrs.fr/opinions/fermis-paradox-and-missing-aliens|title|Fermi’s Paradox and the Missing Aliens CNRS News +https://news.cnrs.fr/opinions/fermis-paradox-and-missing-aliens|creationTime|2017-06-04T11:17:22Z +http://web.mit.edu/remy/|creationDate|2013-07-22 +http://web.mit.edu/remy/|tag|http://www.semanlink.net/tag/tcp +http://web.mit.edu/remy/|tag|http://www.semanlink.net/tag/machine_learning +http://web.mit.edu/remy/|title|TCP ex Machina +http://web.mit.edu/remy/|creationTime|2013-07-22T09:52:42Z +http://www.mycarevent.com/Deliverables/DL3.4_Terminology_Method_GIIRM_FL_v01.00.pdf|creationDate|2008-01-20 +http://www.mycarevent.com/Deliverables/DL3.4_Terminology_Method_GIIRM_FL_v01.00.pdf|tag|http://www.semanlink.net/tag/mycarevent +http://www.mycarevent.com/Deliverables/DL3.4_Terminology_Method_GIIRM_FL_v01.00.pdf|title|MyCarEvent - Terminology and Methodology for populating the generic information model +http://www.mycarevent.com/Deliverables/DL3.4_Terminology_Method_GIIRM_FL_v01.00.pdf|creationTime|2008-01-20T11:15:32Z +http://www.w3.org/2007/03/VLDB/|creationDate|2007-04-19 +http://www.w3.org/2007/03/VLDB/|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.w3.org/2007/03/VLDB/|tag|http://www.semanlink.net/tag/sparql +http://www.w3.org/2007/03/VLDB/|comment|Interesting write-up about relational database to RDF mapping on the W3C website +http://www.w3.org/2007/03/VLDB/|title|RDF and SPARQL: Using Semantic Web Technology to Integrate the World's Data +http://www.w3.org/2007/03/VLDB/|creationTime|2007-04-19T22:45:28Z +https://www.researchgate.net/post/How_to_find_semantic_similarity_between_two_documents|creationDate|2017-05-18 +https://www.researchgate.net/post/How_to_find_semantic_similarity_between_two_documents|tag|http://www.semanlink.net/tag/text_similarity +https://www.researchgate.net/post/How_to_find_semantic_similarity_between_two_documents|title|How to find semantic similarity between two documents? (researchgate) +https://www.researchgate.net/post/How_to_find_semantic_similarity_between_two_documents|creationTime|2017-05-18T09:46:08Z +http://blog.codeship.com/json-ld-building-meaningful-data-apis/|creationDate|2016-03-17 +http://blog.codeship.com/json-ld-building-meaningful-data-apis/|tag|http://www.semanlink.net/tag/tutorial +http://blog.codeship.com/json-ld-building-meaningful-data-apis/|tag|http://www.semanlink.net/tag/json_ld +http://blog.codeship.com/json-ld-building-meaningful-data-apis/|title|JSON-LD: Building Meaningful Data APIs - via @codeship via @codeship +http://blog.codeship.com/json-ld-building-meaningful-data-apis/|creationTime|2016-03-17T23:55:43Z +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|creationDate|2008-03-06 +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|tag|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|tag|http://www.semanlink.net/tag/sursauts_gamma +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|tag|http://www.semanlink.net/tag/supernova +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|comment|A spectacular, rotating binary star system is a ticking time bomb, ready to throw out a searing beam of high-energy gamma rays – and Earth may be right in the line of fire. +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|title|WR 104: The prototype Pinwheel Nebula +http://www.physics.usyd.edu.au/~gekko/pinwheel.html|creationTime|2008-03-06T21:45:06Z +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|tag|http://www.semanlink.net/tag/ruben_verborgh +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|tag|http://www.semanlink.net/tag/personal_data +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|title|Getting my personal data out of Facebook Ruben Verborgh +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|bookmarkOf|https://ruben.verborgh.org/facebook/ +http://www.semanlink.net/doc/2019/05/getting_my_personal_data_out_of|creationTime|2019-05-20T14:17:30Z +http://www.w3.org/TR/wordnet-rdf/|creationDate|2006-06-19 +http://www.w3.org/TR/wordnet-rdf/|tag|http://www.semanlink.net/tag/wordnet +http://www.w3.org/TR/wordnet-rdf/|comment|"This document presents a standard conversion of Princeton WordNet +to RDF/OWL. It describes how it was converted and gives examples +of how it may be queried for use in Semantic Web applications." +http://www.w3.org/TR/wordnet-rdf/|title|RDF/OWL Representation of WordNet +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|creationDate|2012-09-06 +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|tag|http://www.semanlink.net/tag/egit +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|tag|http://www.semanlink.net/tag/eclipse_juno +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|tag|http://www.semanlink.net/tag/m2eclipse +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|title|eclipse - Can't install Maven SCM Handler for EGit for Juno - Stack Overflow +http://stackoverflow.com/questions/11707971/cant-install-maven-scm-handler-for-egit-for-juno|creationTime|2012-09-06T14:53:44Z +http://www.paths-project.eu/|creationDate|2012-06-19 +http://www.paths-project.eu/|tag|http://www.semanlink.net/tag/cultural_heritage +http://www.paths-project.eu/|tag|http://www.semanlink.net/tag/digital_collections +http://www.paths-project.eu/|tag|http://www.semanlink.net/tag/personal_information_management +http://www.paths-project.eu/|comment|Our vision is to enable: personalised paths through digital library collections, and offer suggestions about items to look at and assist in their interpretation, supporting user’s knowledge discovery and exploration. We aim to make it easy for users to explore cultural heritage material by taking them along a trial, or pathway, created by experts, by themselves or by other users. +http://www.paths-project.eu/|title|PATHS EU project - PATHS +http://www.paths-project.eu/|creationTime|2012-06-19T12:36:07Z +http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance|creationDate|2012-11-01 +http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance|tag|http://www.semanlink.net/tag/finance +http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance|tag|http://www.semanlink.net/tag/mandelbrot +http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance|title|Benoît Mandelbrot et l'histoire de la finance +http://econoclaste.org.free.fr/dotclear/index.php/?2010/10/16/1748-benoit-mandelbrot-et-l-histoire-de-la-finance|creationTime|2012-11-01T16:24:02Z +https://arxiv.org/abs/1903.05872v1|creationDate|2019-03-17 +https://arxiv.org/abs/1903.05872v1|tag|http://www.semanlink.net/tag/semanlink2_related +https://arxiv.org/abs/1903.05872v1|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1903.05872v1|tag|http://www.semanlink.net/tag/cold_start_problem +https://arxiv.org/abs/1903.05872v1|tag|http://www.semanlink.net/tag/personal_knowledge_graph +https://arxiv.org/abs/1903.05872v1|tag|http://www.semanlink.net/tag/personal_information_management +https://arxiv.org/abs/1903.05872v1|arxiv_author|Christian Jilek +https://arxiv.org/abs/1903.05872v1|arxiv_author|Markus Schröder +https://arxiv.org/abs/1903.05872v1|arxiv_author|Andreas Dengel +https://arxiv.org/abs/1903.05872v1|comment|"Cold start problem in personal semantic services. An interactive concept mining +approach proposing concept candidates." +https://arxiv.org/abs/1903.05872v1|title|[1903.05872] Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services +https://arxiv.org/abs/1903.05872v1|creationTime|2019-03-17T23:33:13Z +https://arxiv.org/abs/1903.05872v1|arxiv_summary|"Semantic services (e.g. Semantic Desktops) are still afflicted by a cold +start problem: in the beginning, the user's personal information sphere, i.e. +files, mails, bookmarks, etc., is not represented by the system. Information +extraction tools used to kick-start the system typically create 1:1 +representations of the different information items. Higher level concepts, for +example found in file names, mail subjects or in the content body of these +items, are not extracted. Leaving these concepts out may lead to +underperformance, having to many of them (e.g. by making every found term a +concept) will clutter the arising knowledge graph with non-helpful relations. +In this paper, we present an interactive concept mining approach proposing +concept candidates gathered by exploiting given schemata of usual personal +information management applications and analysing the personal information +sphere using various metrics. To heed the subjective view of the user, a +graphical user interface allows to easily rank and give feedback on proposed +concept candidates, thus keeping only those actually considered relevant. A +prototypical implementation demonstrates major steps of our approach." +https://arxiv.org/abs/1903.05872v1|arxiv_firstAuthor|Markus Schröder +https://arxiv.org/abs/1903.05872v1|arxiv_updated|2019-03-14T09:37:53Z +https://arxiv.org/abs/1903.05872v1|arxiv_title|Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services +https://arxiv.org/abs/1903.05872v1|arxiv_published|2019-03-14T09:37:53Z +https://arxiv.org/abs/1903.05872v1|arxiv_num|1903.05872 +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|creationDate|2010-07-17 +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|tag|http://www.semanlink.net/tag/jeux +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|tag|http://www.semanlink.net/tag/intelligence +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|tag|http://www.semanlink.net/tag/jeu_d_echecs +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|tag|http://www.semanlink.net/tag/education +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|tag|http://www.semanlink.net/tag/intelligence_collective +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|title|Pouvons-nous devenir plus intelligents, individuellement comme collectivement ? +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|creationTime|2010-07-17T15:31:22Z +http://www.lemonde.fr/technologies/article/2010/07/16/pouvons-nous-devenir-plus-intelligents-individuellement-comme-collectivement_1389003_651865.html|source|Le Monde +https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres|creationDate|2015-01-28 +https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres|tag|http://www.semanlink.net/tag/evgeny_morozov +https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres|comment|It’s primarily from data and not their algorithms that powerful companies currently derive their advantages, and the only way to curb that power is to take the data completely out of the market realm, so that no company can own them. +https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres|title|Socialize the data centers +https://newleftreview.org/II/91/evgeny-morozov-socialize-the-data-centres|creationTime|2015-01-28T17:33:06Z +https://jena.apache.org/documentation/query/text-query.html|creationDate|2015-08-31 +https://jena.apache.org/documentation/query/text-query.html|tag|http://www.semanlink.net/tag/solr_rdf +https://jena.apache.org/documentation/query/text-query.html|tag|http://www.semanlink.net/tag/lucene +https://jena.apache.org/documentation/query/text-query.html|tag|http://www.semanlink.net/tag/jena +https://jena.apache.org/documentation/query/text-query.html|tag|http://www.semanlink.net/tag/text_search +https://jena.apache.org/documentation/query/text-query.html|title|Jena Text +https://jena.apache.org/documentation/query/text-query.html|creationTime|2015-08-31T13:05:32Z +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|creationDate|2011-07-18 +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|tag|http://www.semanlink.net/tag/php +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|tag|http://www.semanlink.net/tag/tips +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|tag|http://www.semanlink.net/tag/mysql +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|comment|" + + +" +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|title|php - Mac OS Snow Leopard: Why does my mysql.default_socket value not change in my phpinfo() page? - Server Fault +http://serverfault.com/questions/179372/mac-os-snow-leopard-why-does-my-mysql-default-socket-value-not-change-in-my-phpi|creationTime|2011-07-18T18:48:08Z +http://aclweb.org/anthology/P18-2020|creationDate|2018-07-12 +http://aclweb.org/anthology/P18-2020|tag|http://www.semanlink.net/tag/named_entity_recognition +http://aclweb.org/anthology/P18-2020|comment|"BiLSTM outperforms the CRF when large datasets are available and performs inferior for the smallest dataset +" +http://aclweb.org/anthology/P18-2020|title|A Named Entity Recognition Shootout for German (2018) +http://aclweb.org/anthology/P18-2020|creationTime|2018-07-12T08:43:49Z +https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/|creationDate|2018-06-19 +https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/|tag|http://www.semanlink.net/tag/ulmfit +https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/|title|Understanding the Working of Universal Language Model Fine Tuning (ULMFiT) – Let the Machines Learn +https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/|creationTime|2018-06-19T10:06:38Z +http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750|creationDate|2012-04-25 +http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750|tag|http://www.semanlink.net/tag/google_rich_snippets +http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750|tag|http://www.semanlink.net/tag/product_description +http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750|title|Rich snippets - Products +http://support.google.com/webmasters/bin/answer.py?hl=en&answer=146750|creationTime|2012-04-25T13:04:51Z +http://internetactu.blog.lemonde.fr/2012/11/30/linnovation-educative-une-question-economique/|creationDate|2012-11-30 +http://internetactu.blog.lemonde.fr/2012/11/30/linnovation-educative-une-question-economique/|tag|http://www.semanlink.net/tag/mooc +http://internetactu.blog.lemonde.fr/2012/11/30/linnovation-educative-une-question-economique/|title|L’innovation éducative : une question économique ? InternetActu +http://internetactu.blog.lemonde.fr/2012/11/30/linnovation-educative-une-question-economique/|creationTime|2012-11-30T13:50:41Z +http://passeurdesciences.blog.lemonde.fr/2014/10/05/les-poles-magnetiques-terrestres-peuvent-sinverser-brutalement/|creationDate|2014-10-05 +http://passeurdesciences.blog.lemonde.fr/2014/10/05/les-poles-magnetiques-terrestres-peuvent-sinverser-brutalement/|tag|http://www.semanlink.net/tag/magnetisme_terrestre +http://passeurdesciences.blog.lemonde.fr/2014/10/05/les-poles-magnetiques-terrestres-peuvent-sinverser-brutalement/|title|Les pôles magnétiques terrestres peuvent s’inverser brutalement Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2014/10/05/les-poles-magnetiques-terrestres-peuvent-sinverser-brutalement/|creationTime|2014-10-05T23:54:57Z +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-04-01 +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/lynn_margulis +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/genetique_et_evolution +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/symbiose +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/mitochondries +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Comment nos cellules ont-elles appris à respirer? CNRS Le journal +https://lejournal.cnrs.fr/articles/comment-nos-cellules-ont-elles-appris-a-respirer?utm_content=bufferda8e9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-04-01T00:24:48Z +http://www.pbs.org/wgbh/nova/newton/einstein.html|creationDate|2005-11-03 +http://www.pbs.org/wgbh/nova/newton/einstein.html|tag|http://www.semanlink.net/tag/newton +http://www.pbs.org/wgbh/nova/newton/einstein.html|tag|http://www.semanlink.net/tag/einstein +http://www.pbs.org/wgbh/nova/newton/einstein.html|title|NOVA Einstein on Newton PBS +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|creationDate|2019-04-23 +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|tag|http://www.semanlink.net/tag/plastic +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|tag|http://www.semanlink.net/tag/pollution +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|tag|http://www.semanlink.net/tag/plastic_waste_trade +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|tag|http://www.semanlink.net/tag/greenpeace +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|comment|When China took action to protect its borders from foreign plastic pollution by effectively shutting its doors to plastic waste imports in the beginning of 2018, it threw the global plastic recycling industry into chaos. +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|title|Greenpeace: Stories from the frontlines of the plastic waste trade +http://www.no-burn.org/wp-content/uploads/Report-April-22.pdf|creationTime|2019-04-23T11:50:52Z +http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/|creationDate|2009-11-12 +http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/|tag|http://www.semanlink.net/tag/sparql_extensions +http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/|title|Surveying and Classifying SPARQL Extensions « Lost Boy +http://www.ldodds.com/blog/2009/10/surveying-and-classifying-sparql-extensions/|creationTime|2009-11-12T14:06:25Z +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|creationDate|2019-05-16 +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|tag|http://www.semanlink.net/tag/elasticsearch +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|tag|http://www.semanlink.net/tag/knowledge_graph +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|tag|http://www.semanlink.net/tag/giovanni_tummarello +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|title|Supercharging Elasticsearch for extended Knowledge Graph use cases: Data science + business analytics training: Strata Data Conference +https://conferences.oreilly.com/strata/strata-ny/public/schedule/detail/77597|creationTime|2019-05-16T01:03:08Z +http://martinfowler.com/articles/nosql-intro.pdf|creationDate|2013-03-13 +http://martinfowler.com/articles/nosql-intro.pdf|tag|http://www.semanlink.net/tag/nosql_pour_les_nuls +http://martinfowler.com/articles/nosql-intro.pdf|title|Polyglot persistence +http://martinfowler.com/articles/nosql-intro.pdf|creationTime|2013-03-13T11:51:51Z +http://www.bbc.com/news/science-environment-27935479|creationDate|2014-06-21 +http://www.bbc.com/news/science-environment-27935479|tag|http://www.semanlink.net/tag/big_bang +http://www.bbc.com/news/science-environment-27935479|tag|http://www.semanlink.net/tag/cosmic_inflation +http://www.bbc.com/news/science-environment-27935479|title|BBC News - Cosmic inflation: Confidence lowered for Big Bang signal +http://www.bbc.com/news/science-environment-27935479|creationTime|2014-06-21T18:18:06Z +http://googlewebmastercentral.blogspot.fr/|creationDate|2013-08-23 +http://googlewebmastercentral.blogspot.fr/|tag|http://www.semanlink.net/tag/webmasters_google +http://googlewebmastercentral.blogspot.fr/|title|Official Google Webmaster Central Blog +http://googlewebmastercentral.blogspot.fr/|creationTime|2013-08-23T14:14:00Z +http://www.topquadrant.com/w3c/RDFa/|creationDate|2011-01-18 +http://www.topquadrant.com/w3c/RDFa/|tag|http://www.semanlink.net/tag/topquadrant +http://www.topquadrant.com/w3c/RDFa/|tag|http://www.semanlink.net/tag/rdfa +http://www.topquadrant.com/w3c/RDFa/|title|TopQuadrant's RDFa Implementation Report +http://www.topquadrant.com/w3c/RDFa/|creationTime|2011-01-18T12:26:36Z +http://java.sun.com/applets/jdk/1.1/demo/GraphLayout/|creationDate|2005-09-25 +http://java.sun.com/applets/jdk/1.1/demo/GraphLayout/|tag|http://www.semanlink.net/tag/graph_visualization +http://java.sun.com/applets/jdk/1.1/demo/GraphLayout/|tag|http://www.semanlink.net/tag/java +http://java.sun.com/applets/jdk/1.1/demo/GraphLayout/|title|Graph Layout Graph Layout : sun jdk1.0 sample code +http://www.semanticweb.com/insight/article.php/12163_3700611_1|creationDate|2008-05-20 +http://www.semanticweb.com/insight/article.php/12163_3700611_1|tag|http://www.semanlink.net/tag/oracle +http://www.semanticweb.com/insight/article.php/12163_3700611_1|tag|http://www.semanlink.net/tag/soa +http://www.semanticweb.com/insight/article.php/12163_3700611_1|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.semanticweb.com/insight/article.php/12163_3700611_1|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanticweb.com/insight/article.php/12163_3700611_1|comment|moved to http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp +http://www.semanticweb.com/insight/article.php/12163_3700611_1|title|Oracle Sees Semantic Tech Solving Business Problems +http://www.semanticweb.com/insight/article.php/12163_3700611_1|creationTime|2008-05-20T15:19:51Z +http://www.w3.org/TR/grddl/|creationDate|2007-07-17 +http://www.w3.org/TR/grddl/|tag|http://www.semanlink.net/tag/grddl +http://www.w3.org/TR/grddl/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/grddl/|comment|W3C Proposed Recommendation 16 July 2007. Main specification doc +http://www.w3.org/TR/grddl/|title|Gleaning Resource Descriptions from Dialects of Languages (GRDDL) +http://www.w3.org/TR/grddl/|creationTime|2007-07-17T22:53:15Z +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|creationDate|2010-06-01 +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|tag|http://www.semanlink.net/tag/google +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|tag|http://www.semanlink.net/tag/rigolo +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|title|Google Home View +http://www.youtube.com/watch?v=OMFBuHsKXb0&feature=player_embedded|creationTime|2010-06-01T11:12:03Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|creationDate|2007-04-19 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|tag|http://www.semanlink.net/tag/slides +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|tag|http://www.semanlink.net/tag/linked_data +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|tag|http://www.semanlink.net/tag/chris_bizer +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|title|Turning the Web into a Database +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/disp-bizer.pdf|creationTime|2007-04-19T22:34:14Z +http://openjena.org/wiki/TDB/QuadFilter|creationDate|2011-03-15 +http://openjena.org/wiki/TDB/QuadFilter|tag|http://www.semanlink.net/tag/jena_tdb +http://openjena.org/wiki/TDB/QuadFilter|comment|"This page describes how to filter quads at the lowest level of TDB. It can be used to hide certain quads (tripes in named graphs) or triples. +" +http://openjena.org/wiki/TDB/QuadFilter|title|TDB/QuadFilter - Jena Wiki +http://openjena.org/wiki/TDB/QuadFilter|creationTime|2011-03-15T15:50:32Z +http://www.devx.com/semantic/Article/35480/1954|creationDate|2008-09-10 +http://www.devx.com/semantic/Article/35480/1954|tag|http://www.semanlink.net/tag/jena_and_database +http://www.devx.com/semantic/Article/35480/1954|tag|http://www.semanlink.net/tag/triplestore +http://www.devx.com/semantic/Article/35480/1954|title|Create Scalable Semantic Applications with Database-Backed RDF Stores +http://www.devx.com/semantic/Article/35480/1954|creationTime|2008-09-10T18:42:45Z +http://ccl.northwestern.edu/netlogo/|creationDate|2011-06-22 +http://ccl.northwestern.edu/netlogo/|tag|http://www.semanlink.net/tag/self_organizing_systems +http://ccl.northwestern.edu/netlogo/|comment|NetLogo is a multi-agent programmable modeling environment. +http://ccl.northwestern.edu/netlogo/|title|NetLogo Home Page +http://ccl.northwestern.edu/netlogo/|creationTime|2011-06-22T15:19:38Z +http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/|creationDate|2013-12-14 +http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/|tag|http://www.semanlink.net/tag/new_africa +http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/|tag|http://www.semanlink.net/tag/arduino +http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/|title|htxt.africa Hacking for kids project Afrimakers reaches crowdfunding milestone +http://www.htxt.co.za/2013/12/04/hacking-for-kids-project-afrimakers-reaches-crowdfunding-milestone/|creationTime|2013-12-14T19:32:53Z +http://blogs.talis.com/nodalities/files/2008/09/mic_2007_01.jpg|creationDate|2008-09-02 +http://blogs.talis.com/nodalities/files/2008/09/mic_2007_01.jpg|tag|http://www.semanlink.net/tag/michael_hausenblas +http://blogs.talis.com/nodalities/files/2008/09/mic_2007_01.jpg|title|mic_2007_01.jpg (Image JPEG, 200x218 pixels) +http://blogs.talis.com/nodalities/files/2008/09/mic_2007_01.jpg|creationTime|2008-09-02T14:00:22Z +http://web.archive.org/web/19981202230410/http://www.google.com/|creationDate|2008-06-22 +http://web.archive.org/web/19981202230410/http://www.google.com/|tag|http://www.semanlink.net/tag/google +http://web.archive.org/web/19981202230410/http://www.google.com/|title|"Google page in 1998 (""Beta"")" +http://web.archive.org/web/19981202230410/http://www.google.com/|creationTime|2008-06-22T03:03:31Z +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20markov%20distribute.pdf|creationDate|2013-02-28 +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20markov%20distribute.pdf|tag|http://www.semanlink.net/tag/destination_prediction +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20markov%20distribute.pdf|title|A Markov Model for Driver Turn Prediction +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20markov%20distribute.pdf|creationTime|2013-02-28T14:40:46Z +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|creationDate|2007-10-13 +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|tag|http://www.semanlink.net/tag/w3c_tag +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|tag|http://www.semanlink.net/tag/dereferencing_http_uris +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|comment|(W3C) Draft Tag Finding 04 October 2007 +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|title|Dereferencing HTTP URIs +http://www.w3.org/2001/tag/doc/httpRange-14/HttpRange-14.html|creationTime|2007-10-13T19:24:32Z +https://en.wikipedia.org/wiki/Jharia_coalfield|creationDate|2016-11-17 +https://en.wikipedia.org/wiki/Jharia_coalfield|tag|http://www.semanlink.net/tag/coal_seam_fire +https://en.wikipedia.org/wiki/Jharia_coalfield|comment|Jharia is famous for a coal field fire that has burned underground for nearly a century +https://en.wikipedia.org/wiki/Jharia_coalfield|title|Jharia coalfield +https://en.wikipedia.org/wiki/Jharia_coalfield|creationTime|2016-11-17T00:10:46Z +http://arxiv.org/abs/1603.05106v1|creationDate|2016-03-18 +http://arxiv.org/abs/1603.05106v1|tag|http://www.semanlink.net/tag/one_shot_generalization +http://arxiv.org/abs/1603.05106v1|tag|http://www.semanlink.net/tag/machine_learning +http://arxiv.org/abs/1603.05106v1|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1603.05106v1|arxiv_author|Karol Gregor +http://arxiv.org/abs/1603.05106v1|arxiv_author|Ivo Danihelka +http://arxiv.org/abs/1603.05106v1|arxiv_author|Danilo Jimenez Rezende +http://arxiv.org/abs/1603.05106v1|arxiv_author|Daan Wierstra +http://arxiv.org/abs/1603.05106v1|arxiv_author|Shakir Mohamed +http://arxiv.org/abs/1603.05106v1|title|[1603.05106] One-Shot Generalization in Deep Generative Models +http://arxiv.org/abs/1603.05106v1|creationTime|2016-03-18T00:02:19Z +http://arxiv.org/abs/1603.05106v1|arxiv_summary|"Humans have an impressive ability to reason about new concepts and +experiences from just a single example. In particular, humans have an ability +for one-shot generalization: an ability to encounter a new concept, understand +its structure, and then be able to generate compelling alternative variations +of the concept. We develop machine learning systems with this important +capacity by developing new deep generative models, models that combine the +representational power of deep learning with the inferential power of Bayesian +reasoning. We develop a class of sequential generative models that are built on +the principles of feedback and attention. These two characteristics lead to +generative models that are among the state-of-the art in density estimation and +image generation. We demonstrate the one-shot generalization ability of our +models using three tasks: unconditional sampling, generating new exemplars of a +given concept, and generating new exemplars of a family of concepts. In all +cases our models are able to generate compelling and diverse samples---having +seen new examples just once---providing an important class of general-purpose +models for one-shot machine learning." +http://arxiv.org/abs/1603.05106v1|arxiv_firstAuthor|Danilo Jimenez Rezende +http://arxiv.org/abs/1603.05106v1|arxiv_updated|2016-05-25T12:57:19Z +http://arxiv.org/abs/1603.05106v1|arxiv_title|One-Shot Generalization in Deep Generative Models +http://arxiv.org/abs/1603.05106v1|arxiv_published|2016-03-16T14:10:00Z +http://arxiv.org/abs/1603.05106v1|arxiv_num|1603.05106 +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|creationDate|2013-03-18 +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|tag|http://www.semanlink.net/tag/big_brother +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|tag|http://www.semanlink.net/tag/cybersurveillance +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|tag|http://www.semanlink.net/tag/privacy_and_internet +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|comment|Welcome to an Internet without privacy, and we've ended up here with hardly a fight. +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|title|Opinion: The Internet is a surveillance state - CNN.com +http://edition.cnn.com/2013/03/16/opinion/schneier-internet-surveillance/index.html?eref=edition|creationTime|2013-03-18T18:17:50Z +http://fishbowl.pastiche.org/2003/01/13/when_is_a_constant_not_a_constant/|creationDate|2012-02-18 +http://fishbowl.pastiche.org/2003/01/13/when_is_a_constant_not_a_constant/|tag|http://www.semanlink.net/tag/java_dev +http://fishbowl.pastiche.org/2003/01/13/when_is_a_constant_not_a_constant/|title|When is a constant not a constant? - The Fishbowl +http://fishbowl.pastiche.org/2003/01/13/when_is_a_constant_not_a_constant/|creationTime|2012-02-18T17:01:06Z +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|creationDate|2007-05-23 +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|tag|http://www.semanlink.net/tag/musicbrainz +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|tag|http://www.semanlink.net/tag/fps_post +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|title|Browsing Musicbrainz’s dataset via URI dereferencing at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/05/22/browsing-musicbrainzs-dataset-via-uri-dereferencing/|creationTime|2007-05-23T18:46:19Z +https://arxiv.org/abs/1706.03762|creationDate|2018-10-12 +https://arxiv.org/abs/1706.03762|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1706.03762|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://arxiv.org/abs/1706.03762|arxiv_author|Ashish Vaswani +https://arxiv.org/abs/1706.03762|arxiv_author|Noam Shazeer +https://arxiv.org/abs/1706.03762|arxiv_author|Niki Parmar +https://arxiv.org/abs/1706.03762|arxiv_author|Illia Polosukhin +https://arxiv.org/abs/1706.03762|arxiv_author|Llion Jones +https://arxiv.org/abs/1706.03762|arxiv_author|Jakob Uszkoreit +https://arxiv.org/abs/1706.03762|arxiv_author|Aidan N. Gomez +https://arxiv.org/abs/1706.03762|arxiv_author|Lukasz Kaiser +https://arxiv.org/abs/1706.03762|comment|> The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the **Transformer**, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. +https://arxiv.org/abs/1706.03762|title|[1706.03762] Attention Is All You Need +https://arxiv.org/abs/1706.03762|creationTime|2018-10-12T18:50:14Z +https://arxiv.org/abs/1706.03762|arxiv_summary|"The dominant sequence transduction models are based on complex recurrent or +convolutional neural networks in an encoder-decoder configuration. The best +performing models also connect the encoder and decoder through an attention +mechanism. We propose a new simple network architecture, the Transformer, based +solely on attention mechanisms, dispensing with recurrence and convolutions +entirely. Experiments on two machine translation tasks show these models to be +superior in quality while being more parallelizable and requiring significantly +less time to train. Our model achieves 28.4 BLEU on the WMT 2014 +English-to-German translation task, improving over the existing best results, +including ensembles by over 2 BLEU. On the WMT 2014 English-to-French +translation task, our model establishes a new single-model state-of-the-art +BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction +of the training costs of the best models from the literature. We show that the +Transformer generalizes well to other tasks by applying it successfully to +English constituency parsing both with large and limited training data." +https://arxiv.org/abs/1706.03762|arxiv_firstAuthor|Ashish Vaswani +https://arxiv.org/abs/1706.03762|arxiv_updated|2017-12-06T03:30:32Z +https://arxiv.org/abs/1706.03762|arxiv_title|Attention Is All You Need +https://arxiv.org/abs/1706.03762|arxiv_published|2017-06-12T17:57:34Z +https://arxiv.org/abs/1706.03762|arxiv_num|1706.03762 +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|creationDate|2009-03-31 +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|tag|http://www.semanlink.net/tag/david_peterson +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|tag|http://www.semanlink.net/tag/semantic_web +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|tag|http://www.semanlink.net/tag/obama +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|title|Obama’s Groundbreaking use of the Semantic Web +http://www.sitepoint.com/blogs/2009/03/19/obama-groundbreaking-use-semantic-web/|creationTime|2009-03-31T14:29:46Z +http://de.wikipedia.org/wiki/Die_Welle_(2008)|creationDate|2014-09-10 +http://de.wikipedia.org/wiki/Die_Welle_(2008)|tag|http://www.semanlink.net/tag/film_allemand +http://de.wikipedia.org/wiki/Die_Welle_(2008)|tag|http://www.semanlink.net/tag/fascisme +http://de.wikipedia.org/wiki/Die_Welle_(2008)|title|Die Welle (2008) – Wikipedia +http://de.wikipedia.org/wiki/Die_Welle_(2008)|creationTime|2014-09-10T22:36:40Z +https://guillaumegenthial.github.io/testing.html|creationDate|2018-05-21 +https://guillaumegenthial.github.io/testing.html|tag|http://www.semanlink.net/tag/tensorflow +https://guillaumegenthial.github.io/testing.html|tag|http://www.semanlink.net/tag/guillaume_genthial +https://guillaumegenthial.github.io/testing.html|tag|http://www.semanlink.net/tag/unit_test +https://guillaumegenthial.github.io/testing.html|title|Testing Tensorflow code +https://guillaumegenthial.github.io/testing.html|creationTime|2018-05-21T12:04:22Z +http://www.youtube.com/watch?v=AgHHX9R4Qtk|creationDate|2010-07-20 +http://www.youtube.com/watch?v=AgHHX9R4Qtk|tag|http://www.semanlink.net/tag/juif +http://www.youtube.com/watch?v=AgHHX9R4Qtk|tag|http://www.semanlink.net/tag/obama +http://www.youtube.com/watch?v=AgHHX9R4Qtk|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=AgHHX9R4Qtk|tag|http://www.semanlink.net/tag/rigolo +http://www.youtube.com/watch?v=AgHHX9R4Qtk|title|Sarah Silverman and The Great Schlep +http://www.youtube.com/watch?v=AgHHX9R4Qtk|creationTime|2010-07-20T00:40:57Z +https://en.wikipedia.org/wiki/Volver|creationDate|2016-09-25 +https://en.wikipedia.org/wiki/Volver|tag|http://www.semanlink.net/tag/pedro_almodovar +https://en.wikipedia.org/wiki/Volver|tag|http://www.semanlink.net/tag/film_espagnol +https://en.wikipedia.org/wiki/Volver|title|Volver +https://en.wikipedia.org/wiki/Volver|creationTime|2016-09-25T22:58:16Z +http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc|creationDate|2007-08-30 +http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc|tag|http://www.semanlink.net/tag/rdf123 +http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc|tag|http://www.semanlink.net/tag/uri +http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc|title|Applying a character transformation to the content of a cell when creating an URI - RDF123 Google Groups +http://groups.google.com/group/rdf123/browse_thread/thread/ca8c79c26ec0dc40/8a8c7781dacfb0cc?hl=en#8a8c7781dacfb0cc|creationTime|2007-08-30T02:43:20Z +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|creationDate|2007-11-09 +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|tag|http://www.semanlink.net/tag/semantic_web_search_engine +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|tag|http://www.semanlink.net/tag/ldow2008 +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|comment|Falcons is a keyword-based search engine for Semantic Web entities. It enables searching concepts guided by recommended vocabularies, searching objects guided by recommended concepts, and browsing entity summarization via concept spaces. +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|title|Falcons +http://iws.seu.edu.cn/services/falcons/objectsearch/index.jsp|creationTime|2007-11-09T10:00:51Z +http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf|creationDate|2011-11-14 +http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf|tag|http://www.semanlink.net/tag/plantation_d_arbres +http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf|tag|http://www.semanlink.net/tag/merisier +http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf|title|Merisier +http://www.crpf-poitou-charentes.fr/IMG/pdf/merisier.pdf|creationTime|2011-11-14T21:04:44Z +http://www.wired.com/gadgetlab/2010/07/hardware-hobbyists-arduino/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationDate|2013-07-11 +http://www.wired.com/gadgetlab/2010/07/hardware-hobbyists-arduino/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|tag|http://www.semanlink.net/tag/arduino +http://www.wired.com/gadgetlab/2010/07/hardware-hobbyists-arduino/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|title|Why Arduino Is a Hit With Hardware Hackers Gadget Lab Wired.com +http://www.wired.com/gadgetlab/2010/07/hardware-hobbyists-arduino/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationTime|2013-07-11T08:53:43Z +https://pxlnv.com/blog/bullshit-web/|creationDate|2018-08-05 +https://pxlnv.com/blog/bullshit-web/|tag|http://www.semanlink.net/tag/accelerated_mobile_pages +https://pxlnv.com/blog/bullshit-web/|tag|http://www.semanlink.net/tag/bullshit_web +https://pxlnv.com/blog/bullshit-web/|comment|the essence of bullshit is an indifference to the way things really are +https://pxlnv.com/blog/bullshit-web/|title|The Bullshit Web — Pixel Envy +https://pxlnv.com/blog/bullshit-web/|creationTime|2018-08-05T15:49:23Z +http://www.theguardian.com/politics/live/2016/jun/25/brexit-live-emergency-meetings-eu-uk-leave-vote#comment-77205935|creationDate|2016-06-26 +http://www.theguardian.com/politics/live/2016/jun/25/brexit-live-emergency-meetings-eu-uk-leave-vote#comment-77205935|tag|http://www.semanlink.net/tag/brexit +http://www.theguardian.com/politics/live/2016/jun/25/brexit-live-emergency-meetings-eu-uk-leave-vote#comment-77205935|title|If Boris Johnson looked downbeat yesterday, that is because he realises that he has lost. (Guardian) +http://www.theguardian.com/politics/live/2016/jun/25/brexit-live-emergency-meetings-eu-uk-leave-vote#comment-77205935|creationTime|2016-06-26T23:14:45Z +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|creationDate|2009-06-25 +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|tag|http://www.semanlink.net/tag/edith_piaf +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|tag|http://www.semanlink.net/tag/ina +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|tag|http://www.semanlink.net/tag/publicite +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|tag|http://www.semanlink.net/tag/perrier +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|title|Perrier : Ivresse +http://www.ina.fr/pub/alimentation-boisson/video/PUB777003071/perrier-ivresse.fr.html|creationTime|2009-06-25T08:32:43Z +http://www.datavirtuality.com|creationDate|2014-04-30 +http://www.datavirtuality.com|tag|http://www.semanlink.net/tag/data_warehouse +http://www.datavirtuality.com|tag|http://www.semanlink.net/tag/tech_company +http://www.datavirtuality.com|title|Data Virtuality +http://www.datavirtuality.com|creationTime|2014-04-30T17:17:43Z +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|creationDate|2011-01-08 +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|tag|http://www.semanlink.net/tag/edgar_morin +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|tag|http://www.semanlink.net/tag/etat_du_monde +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|comment|"La mort de la pieuvre totalitaire a été suivie par le formidable déchaînement de celle du fanatisme religieux et celle du capitalisme financier.
+Ce qui est remarquable est que les Etats ne se préoccupent nullement de maîtriser ou au moins contrôler ""le marché"", c'est-à-dire la spéculation et le capitalisme financier, mais par contre s'efforcent de juguler les forces démocratisantes et libertaires qui font la vertu d'Internet." +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|title|"Edgar Morin : ""Les nuits sont enceintes et nul ne connaît le jour qui naîtra.""" +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|creationTime|2011-01-08T17:18:03Z +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2011/01/08/edgar-morin-les-nuits-sont-enceintes_1462821_3232.html|date|2011-01-09 +http://ngmodules.org/modules/angular-marked|creationDate|2015-09-04 +http://ngmodules.org/modules/angular-marked|tag|http://www.semanlink.net/tag/markdown +http://ngmodules.org/modules/angular-marked|tag|http://www.semanlink.net/tag/angularjs_module +http://ngmodules.org/modules/angular-marked|title|angular-marked +http://ngmodules.org/modules/angular-marked|creationTime|2015-09-04T22:35:21Z +http://www.guardian.co.uk/world/2013/jun/10/nsa-spying-scandal-what-we-have-learned|creationDate|2013-06-11 +http://www.guardian.co.uk/world/2013/jun/10/nsa-spying-scandal-what-we-have-learned|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.guardian.co.uk/world/2013/jun/10/nsa-spying-scandal-what-we-have-learned|title|NSA spying scandal: what we have learned World news guardian.co.uk +http://www.guardian.co.uk/world/2013/jun/10/nsa-spying-scandal-what-we-have-learned|creationTime|2013-06-11T10:30:46Z +http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2009-January/018249.html|creationDate|2010-09-03 +http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2009-January/018249.html|tag|http://www.semanlink.net/tag/meta_content_framework +http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2009-January/018249.html|title|connecting up to the MCF heritage +http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2009-January/018249.html|creationTime|2010-09-03T23:28:11Z +https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond|creationDate|2017-07-11 +https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond|tag|http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents +https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond|title|Hierarchical clustering in Python and beyond +https://fr.slideshare.net/FrankKelly3/hierarchical-clustering-in-python-and-beyond|creationTime|2017-07-11T10:07:47Z +http://www.europeana.eu/portal/|creationDate|2009-04-22 +http://www.europeana.eu/portal/|tag|http://www.semanlink.net/tag/europeana +http://www.europeana.eu/portal/|title|Europeana - Homepage +http://www.europeana.eu/portal/|creationTime|2009-04-22T23:22:51Z +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|creationDate|2019-05-22 +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|tag|http://www.semanlink.net/tag/rare_events +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|tag|http://www.semanlink.net/tag/autoencoder +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|comment|> We can catch [decoder's] high reconstruction errors and label them as a rare-event prediction. +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|title|Extreme Rare Event Classification using Autoencoders in Keras (2019) +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|bookmarkOf|https://towardsdatascience.com/extreme-rare-event-classification-using-autoencoders-in-keras-a565b386f098 +http://www.semanlink.net/doc/2019/05/extreme_rare_event_classificati|creationTime|2019-05-22T13:00:11Z +https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/|creationDate|2019-05-03 +https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/|tag|http://www.semanlink.net/tag/google_cloud_platform +https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/|tag|http://www.semanlink.net/tag/fast_ai +https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/|title|GCP and Fast Ai v1: A full setup that’ll work – mc.ai +https://mc.ai/gcp-and-fast-ai-v1-a-full-setup-thatll-work/|creationTime|2019-05-03T14:44:08Z +http://datao.net|creationDate|2009-01-28 +http://datao.net|tag|http://www.semanlink.net/tag/data_web +http://datao.net|tag|http://www.semanlink.net/tag/rdf_browser +http://datao.net|title|DataO - Browser of the Web of Data +http://datao.net|creationTime|2009-01-28T17:14:49Z +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|creationDate|2011-06-15 +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|tag|http://www.semanlink.net/tag/chine_afrique +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|tag|http://www.semanlink.net/tag/african_land_grab +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|tag|http://www.semanlink.net/tag/eau +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|tag|http://www.semanlink.net/tag/agriculture_africaine +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|comment|"But why bother leasing land instead of simply importing food? Such imports are equivalent to importing ""virtual water"", since food production accounts for nearly 80 per cent of annual freshwater usage." +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|title|African land grab could lead to future water conflicts - environment - 26 May 2011 - New Scientist +http://www.newscientist.com/article/mg21028144.100-african-land-grab-could-lead-to-future-water-conflicts.html?DCMP=OTC-rss&nsref=online-news|creationTime|2011-06-15T17:38:43Z +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|creationDate|2010-09-06 +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|tag|http://www.semanlink.net/tag/open_world_assumption +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|tag|http://www.semanlink.net/tag/owl +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|tag|http://www.semanlink.net/tag/model_driven_development +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|title|Model Driven Development with Semantic Web Technologies +http://www.uni-koblenz.de/confsec/tutorials/2010/MDD_MDA_MDE_OWL.html|creationTime|2010-09-06T21:16:13Z +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|creationDate|2010-07-05 +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|tag|http://www.semanlink.net/tag/chiffres +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|tag|http://www.semanlink.net/tag/pib +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|tag|http://www.semanlink.net/tag/sante +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|tag|http://www.semanlink.net/tag/mort +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|comment|"D'après le CNPS, la santé représente 11 % du PIB. D’après les données des caisses d’assurance maladie, 80 % des dépenses de santé concernent la dernière année de vie.
+Le refus institutionnalisé de l’idée de mort coûte 8.8 % du PIB + +" +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|title|La mort et le PIB +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|creationTime|2010-07-05T00:12:30Z +http://www.lemonde.fr/idees/chronique/2010/07/02/la-mort-et-le-pib_1381908_3232.html|source|Le Monde +http://deeplearning4j.org/word2vec.html|creationDate|2016-02-26 +http://deeplearning4j.org/word2vec.html|tag|http://www.semanlink.net/tag/word2vec +http://deeplearning4j.org/word2vec.html|tag|http://www.semanlink.net/tag/java +http://deeplearning4j.org/word2vec.html|title|Word2vec: Neural Word Embeddings in Java - Deeplearning4j: Open-source, distributed deep learning for the JVM +http://deeplearning4j.org/word2vec.html|creationTime|2016-02-26T13:01:35Z +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|creationDate|2009-01-06 +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|tag|http://www.semanlink.net/tag/probabilites +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|tag|http://www.semanlink.net/tag/recherche +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|tag|http://www.semanlink.net/tag/statistics +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|title|Facts versus Factions: the use and abuse of subjectivity in scientific research +http://ourworld.compuserve.com/homepages/rajm/openesef.htm|creationTime|2009-01-06T22:44:18Z +http://dannyayers.com/2006/09/27/javascript-sparql-editor|creationDate|2006-10-09 +http://dannyayers.com/2006/09/27/javascript-sparql-editor|tag|http://www.semanlink.net/tag/sparql +http://dannyayers.com/2006/09/27/javascript-sparql-editor|tag|http://www.semanlink.net/tag/javascript +http://dannyayers.com/2006/09/27/javascript-sparql-editor|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/09/27/javascript-sparql-editor|comment|sparql-editor - a simple, single-file HTML form/textarea/Javascript based SPARQL editor. Do what thou wilt (public domain). Only tested on Firefox. +http://dannyayers.com/2006/09/27/javascript-sparql-editor|title|Javascript SPARQL editor +http://groups.drupal.org/node/20589|creationDate|2012-05-10 +http://groups.drupal.org/node/20589|tag|http://www.semanlink.net/tag/drupal_rdf +http://groups.drupal.org/node/20589|tag|http://www.semanlink.net/tag/solr_rdf +http://groups.drupal.org/node/20589|title|Solr RDF Support Drupal Groups +http://groups.drupal.org/node/20589|creationTime|2012-05-10T18:48:29Z +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|creationDate|2016-01-03 +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|tag|http://www.semanlink.net/tag/deep_learning_attention +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|tag|http://www.semanlink.net/tag/good +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|comment|"cf. visual attention + +In standard [#seq2seq](/tag/sequence_to_sequence_learning) NMT, the decoder is supposed to generate a translation solely based on the last hidden state of the encoder - which therefore must capture everything from the source sentence (it must be a sentence embedding). Not good. Hence the attention mechanism. + +> we allow the decoder to “attend” to different parts of the source sentence at each step of the output generation. Importantly, we let the model learn what to attend to based on the input sentence and what it has produced so far + +> each decoder output word now depends on a weighted combination of all the input states, not just the last state. + +Possible to interpret what the model is doing by looking at the Attention weight matrix + +Cost: We need to calculate an attention value for each combination of input and output word (-> attention is a bit of a misnomer: we look at everything in details before deciding what to focus on) + +> attention mechanism is simply giving the network access to its internal memory, which is the hidden state of the encoder + +> Unlike typical memory, the memory access mechanism here is soft, which means that the network retrieves a weighted combination of all memory locations, not a value from a single discrete location +" +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|title|Attention and Memory in Deep Learning and NLP – WildML +http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/|creationTime|2016-01-03T14:36:12Z +http://craphound.com/littlebrother/about/|creationDate|2012-05-23 +http://craphound.com/littlebrother/about/|tag|http://www.semanlink.net/tag/cory_doctorow +http://craphound.com/littlebrother/about/|title|Little Brother +http://craphound.com/littlebrother/about/|creationTime|2012-05-23T13:16:53Z +http://www.rollingstone.com/politics/news/global-warmings-terrifying-new-math-20120719|creationDate|2012-07-24 +http://www.rollingstone.com/politics/news/global-warmings-terrifying-new-math-20120719|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.rollingstone.com/politics/news/global-warmings-terrifying-new-math-20120719|title|Global Warming's Terrifying New Math Rolling Stone +http://www.rollingstone.com/politics/news/global-warmings-terrifying-new-math-20120719|creationTime|2012-07-24T08:07:45Z +http://tomcat.apache.org/tomcat-5.0-doc/index.html|creationDate|2006-09-15 +http://tomcat.apache.org/tomcat-5.0-doc/index.html|tag|http://www.semanlink.net/tag/tomcat +http://tomcat.apache.org/tomcat-5.0-doc/index.html|tag|http://www.semanlink.net/tag/developer_documentation +http://tomcat.apache.org/tomcat-5.0-doc/index.html|title|Tomcat 5 - Documentation Index +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|creationDate|2015-10-24 +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|tag|http://www.semanlink.net/tag/predictions +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|tag|http://www.semanlink.net/tag/robotique +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|tag|http://www.semanlink.net/tag/gartner +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|comment|Machines will replace more writers, and by 2018, 2 million workers will be required to wear health monitors +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|title|Robotics, automation play a big role in Gartner's top 10 predictions Computerworld +http://www.computerworld.com/article/2989830/it-careers/machines-are-replacing-writers-gartner-says.html|creationTime|2015-10-24T22:25:41Z +http://www.w3.org/TR/rdf-sparql-query/|creationDate|2007-07-07 +http://www.w3.org/TR/rdf-sparql-query/|tag|http://www.semanlink.net/tag/andy_seaborne +http://www.w3.org/TR/rdf-sparql-query/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/rdf-sparql-query/|title|SPARQL Query Language for RDF +http://www.w3.org/TR/rdf-sparql-query/|creationTime|2007-07-07T13:50:03Z +http://www.w3.org/TR/rif-rdf-owl/#Overview|creationDate|2007-11-07 +http://www.w3.org/TR/rif-rdf-owl/#Overview|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/rif-rdf-owl/#Overview|tag|http://www.semanlink.net/tag/rif +http://www.w3.org/TR/rif-rdf-owl/#Overview|comment|W3C Working Draft 30 October 2007 +http://www.w3.org/TR/rif-rdf-owl/#Overview|title|RIF RDF and OWL Compatibility +http://www.w3.org/TR/rif-rdf-owl/#Overview|creationTime|2007-11-07T17:06:41Z +http://www.aclweb.org/anthology/W18-3012/|creationDate|2018-10-08 +http://www.aclweb.org/anthology/W18-3012/|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.aclweb.org/anthology/W18-3012/|tag|http://www.semanlink.net/tag/sif_embeddings +http://www.aclweb.org/anthology/W18-3012/|tag|http://www.semanlink.net/tag/random_walk +http://www.aclweb.org/anthology/W18-3012/|comment|"> we show that word vector length has a confounding effect on the probability of a sentence being generated in Arora et al.’s model ([SIF embeddings](tag:sif_embeddings)). We propose a random walk model that is robust to this confound... Our approach beats Arora et al.’s by up to 44.4% on textual similarity tasks... Unlike Arora et al.’s method, ours requires no hyperparameter tuning + +[Github](https://github.com/kawine/usif) + + + +" +http://www.aclweb.org/anthology/W18-3012/|relatedDoc|https://github.com/kawine/usif +http://www.aclweb.org/anthology/W18-3012/|title|Unsupervised Random Walk Sentence Embeddings: A Strong but Simple Baseline (Ethayarajh 2018) +http://www.aclweb.org/anthology/W18-3012/|creationTime|2018-10-08T00:31:14Z +http://www.videolan.org/|creationDate|2008-10-02 +http://www.videolan.org/|tag|http://www.semanlink.net/tag/video +http://www.videolan.org/|tag|http://www.semanlink.net/tag/media_player +http://www.videolan.org/|tag|http://www.semanlink.net/tag/open_source +http://www.videolan.org/|title|VideoLAN - Free and Open Source software and video streaming solutions for every OS! +http://www.videolan.org/|creationTime|2008-10-02T01:08:04Z +https://www.mdpi.com/2073-8994/11/4/453|creationDate|2019-04-24 +https://www.mdpi.com/2073-8994/11/4/453|tag|http://www.semanlink.net/tag/entity_linking +https://www.mdpi.com/2073-8994/11/4/453|title|Entity Linking via Symmetrical Attention-Based Neural Network and Entity Structural Features (2019) +https://www.mdpi.com/2073-8994/11/4/453|creationTime|2019-04-24T16:19:59Z +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|creationDate|2013-07-14 +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|tag|http://www.semanlink.net/tag/privacy_and_internet +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|tag|http://www.semanlink.net/tag/duckduckgo +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|tag|http://www.semanlink.net/tag/nsa +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|title|Ether Rag: Duck Duck Go: Illusion of Privacy +http://etherrag.blogspot.jp/2013/07/duck-duck-go-illusion-of-privacy.html|creationTime|2013-07-14T17:02:13Z +https://twitter.com/fchollet/status/1105139360226140160|creationDate|2019-03-12 +https://twitter.com/fchollet/status/1105139360226140160|tag|http://www.semanlink.net/tag/francois_chollet +https://twitter.com/fchollet/status/1105139360226140160|tag|http://www.semanlink.net/tag/tensorflow_2_0 +https://twitter.com/fchollet/status/1105139360226140160|tag|http://www.semanlink.net/tag/keras +https://twitter.com/fchollet/status/1105139360226140160|title|François Chollet sur Twitter : a crash course on everything you need to know to use TensorFlow 2.0 + Keras +https://twitter.com/fchollet/status/1105139360226140160|creationTime|2019-03-12T22:48:43Z +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|creationDate|2008-04-07 +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|tag|http://www.semanlink.net/tag/rdf_and_social_networks +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|tag|http://www.semanlink.net/tag/rdf_vs_xml +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|tag|http://www.semanlink.net/tag/qotd +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|comment|Defining a vocabulary instead of a DTD is the low-hanging fruit... It doesn't reduce the work to do by simplifying it, but by reducing the scope: by forgetting about the data structures. +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|title|RDF and social networks - bobdc.blog +http://www.snee.com/bobdc.blog/2008/04/rdf_and_social_networks.html|creationTime|2008-04-07T16:55:04Z +http://www.itl.nist.gov/iad/mig/tests/ace/|creationDate|2012-04-10 +http://www.itl.nist.gov/iad/mig/tests/ace/|tag|http://www.semanlink.net/tag/nlp +http://www.itl.nist.gov/iad/mig/tests/ace/|title|Automatic Content Extraction (ACE) Evaluation +http://www.itl.nist.gov/iad/mig/tests/ace/|creationTime|2012-04-10T14:26:00Z +http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData|creationDate|2007-03-01 +http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData|tag|http://www.semanlink.net/tag/linked_data +http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData|comment|"The goal of the Linking Open Data project is to make various open data +sources available on the Web as RDF and to set RDF links between data items +from different data sources." +http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData|title|Linking-open-data - Wiki +http://esw.w3.org/topic/SweoIG/TaskForces/CommunityProjects/LinkingOpenData|creationTime|2007-01-28T22:38:11Z +http://linkeddata.deri.ie/services/tutorials/rdfa|creationDate|2011-06-29 +http://linkeddata.deri.ie/services/tutorials/rdfa|tag|http://www.semanlink.net/tag/cheat_sheet +http://linkeddata.deri.ie/services/tutorials/rdfa|tag|http://www.semanlink.net/tag/rdfa +http://linkeddata.deri.ie/services/tutorials/rdfa|title|RDFa cheat sheet +http://linkeddata.deri.ie/services/tutorials/rdfa|creationTime|2011-06-29T18:05:04Z +http://www.speedtest.net/my-result/5527234144|creationDate|2016-08-04 +http://www.speedtest.net/my-result/5527234144|tag|http://www.semanlink.net/tag/fournisseurs_d_acces_a_internet +http://www.speedtest.net/my-result/5527234144|title|Speedtest.net by Ookla - My Results +http://www.speedtest.net/my-result/5527234144|creationTime|2016-08-04T09:19:54Z +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|creationDate|2018-07-20 +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|tag|http://www.semanlink.net/tag/physique +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|tag|http://www.semanlink.net/tag/strange +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|tag|http://www.semanlink.net/tag/mathematiques +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|comment|New findings are fueling an old suspicion that fundamental particles and forces spring from strange eight-part numbers called “octonions.” +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|title|The Octonion Math That Could Underpin Physics Quanta Magazine +https://www.quantamagazine.org/the-octonion-math-that-could-underpin-physics-20180720|creationTime|2018-07-20T23:25:59Z +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|creationDate|2017-10-25 +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|tag|http://www.semanlink.net/tag/lstm_networks +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|tag|http://www.semanlink.net/tag/keras_embedding_layer +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|tag|http://www.semanlink.net/tag/nlp_sample_code +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|tag|http://www.semanlink.net/tag/using_word_embedding +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|tag|http://www.semanlink.net/tag/word2vec +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|title|LSTM with word2vec embeddings Kaggle +https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings|creationTime|2017-10-25T15:50:14Z +http://lists.w3.org/Archives/Public/semantic-web/2006Oct/0133.html|creationDate|2007-01-02 +http://lists.w3.org/Archives/Public/semantic-web/2006Oct/0133.html|tag|http://www.semanlink.net/tag/content_negotiation +http://lists.w3.org/Archives/Public/semantic-web/2006Oct/0133.html|title|Testing RDF vs HTML Content Negotiation from T.Heath on 2006-10-26 (semantic-web@w3.org from October 2006) +http://stackoverflow.com/questions/15257674/scikit-learn-add-features-to-a-vectorized-set-of-documents|creationDate|2015-10-21 +http://stackoverflow.com/questions/15257674/scikit-learn-add-features-to-a-vectorized-set-of-documents|title|python - scikit-learn, add features to a vectorized set of documents - Stack Overflow +http://stackoverflow.com/questions/15257674/scikit-learn-add-features-to-a-vectorized-set-of-documents|creationTime|2015-10-21T23:14:21Z +https://www.npmjs.com/package/markdown-it-hashtag|creationDate|2017-04-01 +https://www.npmjs.com/package/markdown-it-hashtag|tag|http://www.semanlink.net/tag/markdown_ittt +https://www.npmjs.com/package/markdown-it-hashtag|title|markdown-it-hashtag +https://www.npmjs.com/package/markdown-it-hashtag|creationTime|2017-04-01T03:11:41Z +http://internetactu.blog.lemonde.fr/2012/07/13/vers-un-nouveau-monde-de-donnees/|creationDate|2012-07-13 +http://internetactu.blog.lemonde.fr/2012/07/13/vers-un-nouveau-monde-de-donnees/|tag|http://www.semanlink.net/tag/data_web +http://internetactu.blog.lemonde.fr/2012/07/13/vers-un-nouveau-monde-de-donnees/|title|Vers un Nouveau Monde de données InternetActu +http://internetactu.blog.lemonde.fr/2012/07/13/vers-un-nouveau-monde-de-donnees/|creationTime|2012-07-13T17:30:00Z +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|creationDate|2007-07-04 +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|tag|http://www.semanlink.net/tag/restful_semantic_web_services +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|tag|http://www.semanlink.net/tag/sparql +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|tag|http://www.semanlink.net/tag/rdf_forms +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|title|Restful semantic web services +http://blogs.sun.com/bblfish/entry/restful_semantic_web_services|creationTime|2007-07-04T23:11:56Z +http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/|creationDate|2015-10-18 +http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/|tag|http://www.semanlink.net/tag/peak_everything +http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/|tag|http://www.semanlink.net/tag/economie_ecologique +http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/|title|Faut-il prendre l’effondrement au sérieux ? InternetActu +http://internetactu.blog.lemonde.fr/2015/10/17/faut-il-prendre-leffondrement-au-serieux/|creationTime|2015-10-18T19:32:36Z +http://www.w3.org/TR/rdfa-in-html/|creationDate|2010-09-03 +http://www.w3.org/TR/rdfa-in-html/|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://www.w3.org/TR/rdfa-in-html/|tag|http://www.semanlink.net/tag/html +http://www.w3.org/TR/rdfa-in-html/|title|HTML+RDFa 1.1 +http://www.w3.org/TR/rdfa-in-html/|creationTime|2010-09-03T22:24:37Z +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|creationDate|2016-06-12 +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|tag|http://www.semanlink.net/tag/attentats_13_11_2015 +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|tag|http://www.semanlink.net/tag/robert_mcliam_wilson +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|comment|"""LES TERRORISTES PENSENT TOUS LES JOURS À LEUR BARBE QUI NE POUSSE PAS""" +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|title|Le regard de Robert McLiam Wilson sur les attentats du 13 novembre GQ +http://www.gqmagazine.fr/pop-culture/interview/articles/robert-mcliam-wilson-de-la-guerer-civile-a-belfast-au-terrorisme-a-paris/30310|creationTime|2016-06-12T09:35:55Z +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|creationDate|2007-04-17 +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|comment|"Example related to book searching and selling on how the Semantic Web technologies can be used for data integration +" +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|title|Data Integration on Semantic Web +http://www.w3.org/People/Ivan/CorePresentations/DataIntegration/Slides.html|creationTime|2007-04-17T22:55:09Z +http://googlewebmastercentral.blogspot.fr/2009/02/specify-your-canonical.html|creationDate|2013-08-23 +http://googlewebmastercentral.blogspot.fr/2009/02/specify-your-canonical.html|tag|http://www.semanlink.net/tag/webmasters_google +http://googlewebmastercentral.blogspot.fr/2009/02/specify-your-canonical.html|title|Official Google Webmaster Central Blog: Specify your canonical +http://googlewebmastercentral.blogspot.fr/2009/02/specify-your-canonical.html|creationTime|2013-08-23T14:21:55Z +https://dl.acm.org/citation.cfm?id=3159660|creationDate|2018-03-04 +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/missing_labels_ml +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/manik_varma +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/automatic_tagging +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/text_multi_label_classification +https://dl.acm.org/citation.cfm?id=3159660|tag|http://www.semanlink.net/tag/nlp_microsoft +https://dl.acm.org/citation.cfm?id=3159660|comment|"This paper formulates the extreme classification problem **when predictions need to be made on training points with partially revealed labels**. + +[SwiftXML pseudo-code](/doc/?uri=https%3A%2F%2Fpdfs.semanticscholar.org%2F873e%2Fea884de581f79b1e783052f8e9fa60726fc8.pdf) + +**Learns from word2vec features extracted from the tags in addition to the article text features.**" +https://dl.acm.org/citation.cfm?id=3159660|relatedDoc|https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf +https://dl.acm.org/citation.cfm?id=3159660|title|Extreme Multi-label Learning with Label Features for Warm-start Tagging, Ranking & Recommendation (2018) +https://dl.acm.org/citation.cfm?id=3159660|creationTime|2018-03-04T17:15:30Z +http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf|creationDate|2006-12-16 +http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf|tag|http://www.semanlink.net/tag/ian_horrocks +http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf|tag|http://www.semanlink.net/tag/description_logic +http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf|comment|"In this paper, we compare description logics with relational databases +with respect to their treatment of schema constraints, the languages +used to express these constraints, and the approaches to query +answering and constraint checking. Our analysis reveals a significant +overlap between the two formalisms. Inspired by the integrity constraints +of relational databases, we define a notion of integrity constraints +for description logics. We analyze different possibilities for +defining the semantics of the constraints. Finally, we present several +algorithms for checking constraint satisfaction for description logics +with varying degrees of expressivity." +http://www.cs.man.ac.uk/~bmotik/publications/papers/mhs06constraints-report.pdf|title|Integrating Description Logics and Relational Databases +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|creationDate|2008-08-14 +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|tag|http://www.semanlink.net/tag/sportif +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|tag|http://www.semanlink.net/tag/pekin_2008 +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|tag|http://www.semanlink.net/tag/carl_lewis +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|tag|http://www.semanlink.net/tag/jeux_olympiques +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|title|Is Phelps really the greatest? BBC SPORT Olympics 2008 blog +http://www.bbc.co.uk/blogs/olympics/2008/08/is_phelps_really_the_greatest.html|creationTime|2008-08-14T02:40:34Z +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|creationDate|2013-01-13 +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|tag|http://www.semanlink.net/tag/credit_default_swap +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|tag|http://www.semanlink.net/tag/finance +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|tag|http://www.semanlink.net/tag/crise_des_subprimes +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|title|Le CEO destructeur d’AIG réclame 25 milliards à l’Etat américain qui a sauvé l’entreprise. Démystifier la finance +http://finance.blog.lemonde.fr/2013/01/08/le-ceo-destructeur-daig-reclame-25-milliards-a-letat-americain-qui-a-sauve-lentreprise/|creationTime|2013-01-13T00:33:41Z +http://www.semantic-web-journal.net/|creationDate|2010-07-19 +http://www.semantic-web-journal.net/|tag|http://www.semanlink.net/tag/semantic_web +http://www.semantic-web-journal.net/|title|www.semantic-web-journal.net +http://www.semantic-web-journal.net/|creationTime|2010-07-19T09:11:34Z +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|creationDate|2014-11-08 +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|tag|http://www.semanlink.net/tag/drm +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|tag|http://www.semanlink.net/tag/amazon +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|comment|By foolishly insisting on DRM, and then selling to Amazon on a wholesale basis, the publishers handed Amazon a monopoly on their customers—and thereby empowered a predatory monopsony. +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|title|What Amazon's ebook strategy means - Charlie's Diary +http://www.antipope.org/charlie/blog-static/2012/04/understanding-amazons-strategy.html|creationTime|2014-11-08T13:16:17Z +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|creationDate|2013-08-12 +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/jena_fuseki +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/skos +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/text_search +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/semanlink_related +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/lucene +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/jena +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|tag|http://www.semanlink.net/tag/thesaurus +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|title|Thesaurus-augmented Search with Jena Text ZBW Labs +http://zbw.eu/labs/en/blog/thesaurus-augmented-search-with-jena-text|creationTime|2013-08-12T11:50:12Z +http://www.3desite.fr/3de_brico_demont_arm.php|creationDate|2013-07-22 +http://www.3desite.fr/3de_brico_demont_arm.php|tag|http://www.semanlink.net/tag/howto +http://www.3desite.fr/3de_brico_demont_arm.php|title|"Comment démonter, une armoire ""Normande"" ?" +http://www.3desite.fr/3de_brico_demont_arm.php|creationTime|2013-07-22T19:25:53Z +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|creationDate|2013-10-13 +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|tag|http://www.semanlink.net/tag/massacre_de_la_saint_barthelemy +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|tag|http://www.semanlink.net/tag/henri_iv +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|title|La Reine Margot (film, 1994) - Wikipédia +http://fr.wikipedia.org/wiki/La_Reine_Margot_(film,_1994)|creationTime|2013-10-13T23:23:15Z +http://www.regular-expressions.info/java.html|creationDate|2012-03-19 +http://www.regular-expressions.info/java.html|tag|http://www.semanlink.net/tag/java_dev +http://www.regular-expressions.info/java.html|tag|http://www.semanlink.net/tag/regex +http://www.regular-expressions.info/java.html|title|Using Regular Expressions in Java +http://www.regular-expressions.info/java.html|creationTime|2012-03-19T08:29:15Z +http://www.cpdomina.net/papers/mscthesis09_final.pdf|creationDate|2010-07-19 +http://www.cpdomina.net/papers/mscthesis09_final.pdf|tag|http://www.semanlink.net/tag/logic +http://www.cpdomina.net/papers/mscthesis09_final.pdf|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.cpdomina.net/papers/mscthesis09_final.pdf|tag|http://www.semanlink.net/tag/reasoning +http://www.cpdomina.net/papers/mscthesis09_final.pdf|comment|The objective of this thesis is to study mechanisms to perform probabilistic reasoning in the Semantic Web. For this purpose, we use Markov logic, a novel representation formalism that combines first-order logic with probabilistic graphical models. +http://www.cpdomina.net/papers/mscthesis09_final.pdf|title|Probabilistic Reasoning in the Semantic Web using Markov Logic +http://www.cpdomina.net/papers/mscthesis09_final.pdf|creationTime|2010-07-19T11:33:57Z +http://www.imdb.com/name/nm0559249/|creationDate|2007-11-24 +http://www.imdb.com/name/nm0559249/|tag|http://www.semanlink.net/tag/renato_matos +http://www.imdb.com/name/nm0559249/|tag|http://www.semanlink.net/tag/cinema_bresilien +http://www.imdb.com/name/nm0559249/|comment|Cego que Gritava Luz, O (1997) , Louco Por Cinema (1994) , A Terceira Margem do Rio (1994) +http://www.imdb.com/name/nm0559249/|title|Renato Matos - Filmography +http://www.imdb.com/name/nm0559249/|creationTime|2007-11-24T11:48:45Z +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|creationDate|2017-10-19 +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|tag|http://www.semanlink.net/tag/knowledge_maps +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|comment|Our goal is to organize data in the computer the way humans organize data in their minds +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|title|Knowledge Maps: Structure Versus Meaning - DATAVERSITY +http://www.dataversity.net/knowledge-maps-structure-versus-meaning/|creationTime|2017-10-19T00:09:05Z +http://fr.wikipedia.org/wiki/1729_(nombre)#Nombre_de_Hardy-Ramanujan|creationDate|2008-10-02 +http://fr.wikipedia.org/wiki/1729_(nombre)#Nombre_de_Hardy-Ramanujan|tag|http://www.semanlink.net/tag/mathematiques +http://fr.wikipedia.org/wiki/1729_(nombre)#Nombre_de_Hardy-Ramanujan|title|1729 = 12^3 + 1^3 = 10^3 + 9^3 +http://fr.wikipedia.org/wiki/1729_(nombre)#Nombre_de_Hardy-Ramanujan|creationTime|2008-10-02T10:40:41Z +http://www.france5.fr/histoire_decouverte/articles/W00371/300/|creationDate|2006-05-08 +http://www.france5.fr/histoire_decouverte/articles/W00371/300/|tag|http://www.semanlink.net/tag/mission_voulet_chanoine +http://www.france5.fr/histoire_decouverte/articles/W00371/300/|title|France 5 : Blancs de mémoire +http://www.khanacademy.org/|creationDate|2010-09-27 +http://www.khanacademy.org/|tag|http://www.semanlink.net/tag/online_course_materials +http://www.khanacademy.org/|tag|http://www.semanlink.net/tag/education +http://www.khanacademy.org/|tag|http://www.semanlink.net/tag/open_source +http://www.khanacademy.org/|title|Khan Academy +http://www.khanacademy.org/|creationTime|2010-09-27T11:09:25Z +http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/|creationDate|2012-05-19 +http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/|title|Deconstructing the Google Knowledge Graph » AI3:::Adaptive Information +http://www.mkbergman.com/1009/deconstructing-the-google-knowledge-graph/|creationTime|2012-05-19T22:33:01Z +https://www.bergnet.org/2017/08/rdf-ext-v1-release/|creationDate|2017-08-09 +https://www.bergnet.org/2017/08/rdf-ext-v1-release/|tag|http://www.semanlink.net/tag/rdfjs +https://www.bergnet.org/2017/08/rdf-ext-v1-release/|title|RDF-Ext v1 Release bergis reptile zoo of software, hardware and ideas +https://www.bergnet.org/2017/08/rdf-ext-v1-release/|creationTime|2017-08-09T00:08:14Z +http://www.steinkern.de/|creationDate|2007-01-19 +http://www.steinkern.de/|tag|http://www.semanlink.net/tag/ammonite +http://www.steinkern.de/|tag|http://www.semanlink.net/tag/fossile +http://www.steinkern.de/|comment|Des photos d'ammonites en pagaille +http://www.steinkern.de/|title|Steinkern.de - Die Fossilien-Community - Startseite +https://www.researchgate.net/project/Theories-of-Deep-Learning|creationDate|2017-11-09 +https://www.researchgate.net/project/Theories-of-Deep-Learning|tag|http://www.semanlink.net/tag/deep_learning +https://www.researchgate.net/project/Theories-of-Deep-Learning|tag|http://www.semanlink.net/tag/researchgate +https://www.researchgate.net/project/Theories-of-Deep-Learning|title|Theories of Deep Learning by Hatef Monajemi Research Project on ResearchGate +https://www.researchgate.net/project/Theories-of-Deep-Learning|creationTime|2017-11-09T10:17:45Z +http://wiki.apache.org/solr/LanguageAnalysis|creationDate|2012-11-26 +http://wiki.apache.org/solr/LanguageAnalysis|tag|http://www.semanlink.net/tag/solr_not_english_only +http://wiki.apache.org/solr/LanguageAnalysis|title|LanguageAnalysis - Solr Wiki +http://wiki.apache.org/solr/LanguageAnalysis|creationTime|2012-11-26T00:07:07Z +https://arxiv.org/abs/1607.07956|creationDate|2018-05-12 +https://arxiv.org/abs/1607.07956|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1607.07956|tag|http://www.semanlink.net/tag/nlp_hierarchical_text_classification +https://arxiv.org/abs/1607.07956|tag|http://www.semanlink.net/tag/category_embedding +https://arxiv.org/abs/1607.07956|tag|http://www.semanlink.net/tag/entity_embeddings +https://arxiv.org/abs/1607.07956|arxiv_author|Yuezhang Li +https://arxiv.org/abs/1607.07956|arxiv_author|Ronghuo Zheng +https://arxiv.org/abs/1607.07956|arxiv_author|Rahul Iyer +https://arxiv.org/abs/1607.07956|arxiv_author|Tian Tian +https://arxiv.org/abs/1607.07956|arxiv_author|Zhiting Hu +https://arxiv.org/abs/1607.07956|arxiv_author|Katia Sycara +https://arxiv.org/abs/1607.07956|comment|"a framework that embeds entities and categories into a semantic space by integrating structured +knowledge and taxonomy hierarchy from large knowledge bases. + +two methods: + +1. Category Embedding model): it replaces the entities in the context with their directly +labeled categories to build categories’ context; +2. Hierarchical Category Embedding: it +further incorporates all ancestor categories of the context entities to utilize the hierarchical information." +https://arxiv.org/abs/1607.07956|title|[1607.07956] Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification (COLING 2016) +https://arxiv.org/abs/1607.07956|creationTime|2018-05-12T16:41:35Z +https://arxiv.org/abs/1607.07956|arxiv_summary|"Due to the lack of structured knowledge applied in learning distributed +representation of cate- gories, existing work cannot incorporate category +hierarchies into entity information. We propose a framework that embeds +entities and categories into a semantic space by integrating structured +knowledge and taxonomy hierarchy from large knowledge bases. The framework +allows to com- pute meaningful semantic relatedness between entities and +categories. Our framework can han- dle both single-word concepts and +multiple-word concepts with superior performance on concept categorization and +yield state of the art results on dataless hierarchical classification." +https://arxiv.org/abs/1607.07956|arxiv_firstAuthor|Yuezhang Li +https://arxiv.org/abs/1607.07956|arxiv_updated|2016-07-27T04:51:17Z +https://arxiv.org/abs/1607.07956|arxiv_title|Joint Embedding of Hierarchical Categories and Entities for Concept Categorization and Dataless Classification +https://arxiv.org/abs/1607.07956|arxiv_published|2016-07-27T04:51:17Z +https://arxiv.org/abs/1607.07956|arxiv_num|1607.07956 +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|creationDate|2014-11-07 +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|tag|http://www.semanlink.net/tag/julian_assange +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|tag|http://www.semanlink.net/tag/harry_halpin +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|tag|http://www.semanlink.net/tag/google +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|tag|http://www.semanlink.net/tag/wikileaks +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|title|What Is Enlightenment?: Google, Wikileaks, and the Reorganization of the World The Los Angeles Review of Books +http://lareviewofbooks.org/review/enlightenment-google-wikileaks-reorganization-world#|creationTime|2014-11-07T12:49:10Z +http://rinuboney.github.io/2016/01/19/ladder-network.html|creationDate|2016-01-19 +http://rinuboney.github.io/2016/01/19/ladder-network.html|tag|http://www.semanlink.net/tag/semi_supervised_learning +http://rinuboney.github.io/2016/01/19/ladder-network.html|title|Introduction to Semi-Supervised Learning with Ladder Networks Rinu Boney +http://rinuboney.github.io/2016/01/19/ladder-network.html|creationTime|2016-01-19T16:01:29Z +http://www.w3.org/wiki/ConverterToRdf|creationDate|2011-09-21 +http://www.w3.org/wiki/ConverterToRdf|tag|http://www.semanlink.net/tag/rdf_tools +http://www.w3.org/wiki/ConverterToRdf|tag|http://www.semanlink.net/tag/rdf_dev +http://www.w3.org/wiki/ConverterToRdf|comment|List of converters +http://www.w3.org/wiki/ConverterToRdf|title|ConverterToRdf - W3C Wiki +http://www.w3.org/wiki/ConverterToRdf|creationTime|2011-09-21T11:15:59Z +http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/|creationDate|2016-03-14 +http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/|tag|http://www.semanlink.net/tag/lumieres +http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/|title|Credo sur l’ombre des Lumières +http://www.christian-faure.net/2016/03/13/credo-sur-lombre-des-lumieres/|creationTime|2016-03-14T22:03:49Z +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html|creationDate|2014-09-10 +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html|tag|http://www.semanlink.net/tag/jena +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html|tag|http://www.semanlink.net/tag/ldp_implementations +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html|title|Re: LDP interfaces in Java (based on Jena and JAX-RS) from Jürgen Jakobitsch on 2012-08-06 (public-ldp-wg@w3.org from August 2012) +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Aug/0034.html|creationTime|2014-09-10T14:12:30Z +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|creationDate|2018-07-18 +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|tag|http://www.semanlink.net/tag/semantic_web_critique +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|comment|l’interopérabilité n’est finalement pas une préoccupation des organisations +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|title|Bilan de 15 ans de réflexion sur la gestion des données numériques Les petites cases +http://www.lespetitescases.net/bilan-reflexion-sur-la-gestion-des-donnees-numeriques|creationTime|2018-07-18T09:22:09Z +http://www.youtube.com/watch?v=g0gaIcfxtpc|creationDate|2009-09-01 +http://www.youtube.com/watch?v=g0gaIcfxtpc|tag|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +http://www.youtube.com/watch?v=g0gaIcfxtpc|title|400m : remontée de Raquil aux championnats du monde St denis +http://www.youtube.com/watch?v=g0gaIcfxtpc|creationTime|2009-09-01T10:46:48Z +http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/|creationDate|2012-08-17 +http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/|tag|http://www.semanlink.net/tag/football +http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/|tag|http://www.semanlink.net/tag/rigolo +http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/|title|PERLES – Ils ont « redoré le blouson de l’Equipe de France » Big Browser +http://bigbrowser.blog.lemonde.fr/2012/08/17/perles-ils-ont-redore-le-blouson-de-lequipe-de-france/|creationTime|2012-08-17T22:14:14Z +https://arxiv.org/abs/1712.09405|creationDate|2017-12-29 +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/word2vec +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/fasttext +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/tomas_mikolov +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1712.09405|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1712.09405|arxiv_author|Tomas Mikolov +https://arxiv.org/abs/1712.09405|arxiv_author|Armand Joulin +https://arxiv.org/abs/1712.09405|arxiv_author|Piotr Bojanowski +https://arxiv.org/abs/1712.09405|arxiv_author|Christian Puhrsch +https://arxiv.org/abs/1712.09405|arxiv_author|Edouard Grave +https://arxiv.org/abs/1712.09405|comment|> we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks +https://arxiv.org/abs/1712.09405|title|[1712.09405] Advances in Pre-Training Distributed Word Representations +https://arxiv.org/abs/1712.09405|creationTime|2017-12-29T20:52:48Z +https://arxiv.org/abs/1712.09405|arxiv_summary|"Many Natural Language Processing applications nowadays rely on pre-trained +word representations estimated from large text corpora such as news +collections, Wikipedia and Web Crawl. In this paper, we show how to train +high-quality word vector representations by using a combination of known tricks +that are however rarely used together. The main result of our work is the new +set of publicly available pre-trained models that outperform the current state +of the art by a large margin on a number of tasks." +https://arxiv.org/abs/1712.09405|arxiv_firstAuthor|Tomas Mikolov +https://arxiv.org/abs/1712.09405|arxiv_updated|2017-12-26T21:00:04Z +https://arxiv.org/abs/1712.09405|arxiv_title|Advances in Pre-Training Distributed Word Representations +https://arxiv.org/abs/1712.09405|arxiv_published|2017-12-26T21:00:04Z +https://arxiv.org/abs/1712.09405|arxiv_num|1712.09405 +http://openspring.net/sites/openspring.net/files/corl-etal-2009iswc.pdf|creationDate|2011-09-15 +http://openspring.net/sites/openspring.net/files/corl-etal-2009iswc.pdf|tag|http://www.semanlink.net/tag/drupal_rdf +http://openspring.net/sites/openspring.net/files/corl-etal-2009iswc.pdf|title|Produce and Consume Linked Data with Drupal! (full paper) +http://openspring.net/sites/openspring.net/files/corl-etal-2009iswc.pdf|creationTime|2011-09-15T23:58:50Z +http://jena.hpl.hp.com/wiki/SDB/Dataset_Description|creationDate|2008-11-17 +http://jena.hpl.hp.com/wiki/SDB/Dataset_Description|tag|http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena +http://jena.hpl.hp.com/wiki/SDB/Dataset_Description|tag|http://www.semanlink.net/tag/jena_assembler +http://jena.hpl.hp.com/wiki/SDB/Dataset_Description|title|SDB/Dataset Description - Jena wiki +http://jena.hpl.hp.com/wiki/SDB/Dataset_Description|creationTime|2008-11-17T00:50:08Z +http://aclweb.org/anthology/P17-1170|creationDate|2017-10-21 +http://aclweb.org/anthology/P17-1170|tag|http://www.semanlink.net/tag/word_embedding_evaluation +http://aclweb.org/anthology/P17-1170|tag|http://www.semanlink.net/tag/sense_embeddings +http://aclweb.org/anthology/P17-1170|tag|http://www.semanlink.net/tag/nlp +http://aclweb.org/anthology/P17-1170|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://aclweb.org/anthology/P17-1170|tag|http://www.semanlink.net/tag/lexical_ambiguity +http://aclweb.org/anthology/P17-1170|comment|"By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. + + +Our results suggest that research in sense representation should put special emphasis on real-world evaluations on benchmarks for downstream applications, rather than on artificial tasks such as word similarity. In fact, research has previously shown that **word similarity might not constitute a reliable proxy to measure the performance of word embeddings in downstream applications** + +[github](https://github.com/pilehvar/sensecnn) +" +http://aclweb.org/anthology/P17-1170|title|Towards a Seamless Integration of Word Senses into Downstream NLP Applications (2017) +http://aclweb.org/anthology/P17-1170|creationTime|2017-10-21T16:59:09Z +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|creationDate|2018-12-04 +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|tag|http://www.semanlink.net/tag/neural_machine_translation +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|tag|http://www.semanlink.net/tag/slides +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|tag|http://www.semanlink.net/tag/francois_yvon +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|title|Monolingual data in NMT +https://nlpparis.files.wordpress.com/2018/11/monolingual.pdf|creationTime|2018-12-04T17:54:31Z +http://www.shirky.com/writings/evolve.html|creationDate|2007-09-27 +http://www.shirky.com/writings/evolve.html|tag|http://www.semanlink.net/tag/evolvable_systems +http://www.shirky.com/writings/evolve.html|tag|http://www.semanlink.net/tag/web +http://www.shirky.com/writings/evolve.html|tag|http://www.semanlink.net/tag/qotd +http://www.shirky.com/writings/evolve.html|tag|http://www.semanlink.net/tag/citation +http://www.shirky.com/writings/evolve.html|comment|"Why something as poorly designed as the Web became The Next Big Thing, and what that means for the future.
+THREE RULES FOR EVOLVABLE SYSTEMS
+""Only solutions that produce partial results when partially implemented can succeed""
+""What is, is wrong.""
+""Evolution is cleverer than you are"". + + + +" +http://www.shirky.com/writings/evolve.html|title|Shirky: In Praise of Evolvable Systems (1996) +http://www.shirky.com/writings/evolve.html|creationTime|2007-09-27T22:22:04Z +http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics|creationDate|2007-07-18 +http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics|title|SKOS/Semantics - W3C Semantic Web Deployment Wiki +http://www.w3.org/2006/07/SWD/wiki/SKOS/Semantics|creationTime|2007-07-18T23:37:56Z +http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future|creationDate|2009-01-18 +http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future|tag|http://www.semanlink.net/tag/social_networks +http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future|title|The W3C Workshop on the Future of Social Networking Position Papers +http://blogs.sun.com/bblfish/entry/w3c_workshop_on_the_future|creationTime|2009-01-18T19:50:01Z +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|creationDate|2009-03-03 +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|tag|http://www.semanlink.net/tag/virtuoso +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|tag|http://www.semanlink.net/tag/triplestore +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|title|Getting started using Virtuoso as a triplestore - bobdc.blog +http://www.snee.com/bobdc.blog/2009/02/getting-started-using-virtuoso.html|creationTime|2009-03-03T20:50:10Z +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|creationDate|2018-10-28 +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|tag|http://www.semanlink.net/tag/quantum_computing +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|tag|http://www.semanlink.net/tag/mecanique_quantique +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|comment|série de blog posts +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|title|Comprendre l’informatique quantique +https://www.oezratty.net/wordpress/2018/comprendre-informatique-quantique-pourquoi/|creationTime|2018-10-28T14:35:29Z +http://mallet.cs.umass.edu/|creationDate|2012-09-20 +http://mallet.cs.umass.edu/|tag|http://www.semanlink.net/tag/mallet +http://mallet.cs.umass.edu/|title|MALLET homepage +http://mallet.cs.umass.edu/|creationTime|2012-09-20T10:41:45Z +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|creationDate|2018-01-01 +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|tag|http://www.semanlink.net/tag/deep_learning +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|title|AI and Deep Learning in 2017 – A Year in Review – WildML +http://www.wildml.com/2017/12/ai-and-deep-learning-in-2017-a-year-in-review/|creationTime|2018-01-01T12:41:36Z +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|creationDate|2014-04-25 +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|tag|http://www.semanlink.net/tag/support_vector_machine +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|tag|http://www.semanlink.net/tag/maxent_models +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|title|On Bayesian inference, maximum entropy and Support Vector Machines methods +http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=6694|creationTime|2014-04-25T16:21:40Z +http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki|creationDate|2008-04-25 +http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki|tag|http://www.semanlink.net/tag/www08 +http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki|tag|http://www.semanlink.net/tag/semantic_wiki +http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki|title|Semantic MediaWiki - Semantic-mediawiki.org +http://www.semantic-mediawiki.org/wiki/Semantic_MediaWiki|creationTime|2008-04-25T11:05:14Z +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|creationDate|2018-11-06 +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|tag|http://www.semanlink.net/tag/emnlp_2018 +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|tag|http://www.semanlink.net/tag/slides +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|tag|http://www.semanlink.net/tag/multi_task_learning +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|tag|http://www.semanlink.net/tag/tutorial +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|comment|"**Joint models: solve 2 tasks at once.** + +Related tasks: POS tagging, NER, chuncking. Pipeline tasks + +Motivations: + +- reduce error propagation +- information exchange between tasks + +Challenges: + +- Joint learning +- Search + +History: statistical models. 2 kinds: + +- Graph-Based Methods + - Traditional solution: + - Score each candidate, select the highest-scored output + - Search-space typically exponential +- Transition-Based Methods + - Transition-Based systems: Automata + - State: partial result during decoding, Action: operations that can be applied for state transition + - Output constructed incrementally + +- Deep learning based model + - Neural transition based models + - Neural graph-based models + - Cross task + - Seminal work: Collobert, Ronan, et al. ""Natural language processing (almost) from scratch."" + - Not all tasks are mutually beneficial + - Ramachandran, et al. “Unsupervised pretraining for sequence to sequence learning.” + - Peters, Matthew E., et al. ""Deep contextualized word representations."" (ELMo) + - ""BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding."" + - ULMFIT + - Correlation between multi-task learning and pretraining + - Cross lingual + - Cross domain + - Cross standard + + + +" +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|title|Joint Models in NLP - Slides - Tutorial (EMNLP 2018) - Yue Zhang +https://frcchang.github.io/tutorial/EMNLP2018_joint_models.pdf|creationTime|2018-11-06T11:22:04Z +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|creationDate|2008-08-28 +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|tag|http://www.semanlink.net/tag/jena +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|tag|http://www.semanlink.net/tag/virtuoso +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|comment|The Virtuoso Jena RDF Data Provider is a fully operational Native Graph Model Storage Provider for the Jena Framework, enables Semantic Web applications written using the Jena RDF Frameworks to query the Virtuoso RDF Quad store directly. +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|title|Virtuoso Jena Provider +http://docs.openlinksw.com:80/virtuoso/rdfnativestorageproviders.html|creationTime|2008-08-28T23:12:26Z +https://github.com/facebookresearch/fairseq-py|creationDate|2017-10-02 +https://github.com/facebookresearch/fairseq-py|tag|http://www.semanlink.net/tag/github_project +https://github.com/facebookresearch/fairseq-py|tag|http://www.semanlink.net/tag/python +https://github.com/facebookresearch/fairseq-py|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +https://github.com/facebookresearch/fairseq-py|tag|http://www.semanlink.net/tag/facebook +https://github.com/facebookresearch/fairseq-py|title|facebookresearch/fairseq-py: Facebook AI Research Sequence-to-Sequence Toolkit written in Python. +https://github.com/facebookresearch/fairseq-py|creationTime|2017-10-02T13:34:19Z +https://sermanet.github.io/imitate/|creationDate|2018-10-27 +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/google_brain +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/robotic_imitation +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/reinforcement_learning +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/self_supervised_learning +https://sermanet.github.io/imitate/|tag|http://www.semanlink.net/tag/learning_by_imitation +https://sermanet.github.io/imitate/|comment|"Self-supervised approach for learning representations and robotic behaviors entirely from unlabeled videos recorded from multiple viewpoints, and study how this representation can be used in two robotic imitation settings: imitating object interactions from videos of humans, and imitating human poses. + +> We train our representations using a metric learning loss, where multiple simultaneous viewpoints of the same observation are attracted in the embedding space, while being repelled from temporal neighbors which are often visually similar but functionally different. In other words, the model simultaneously learns to recognize what is common between different-looking images, and what is different between similar-looking images. +> This signal causes our model to discover attributes that do not change across viewpoint, but do change across time, while ignoring nuisance variables such as occlusions, motion blur, lighting and background. We demonstrate that this representation can be used by a robot to directly mimic human poses without an explicit correspondence, and that it can be used as a reward function within a reinforcement learning algorithm." +https://sermanet.github.io/imitate/|title|Time-Contrastive Networks: Self-Supervised Learning from Video (2017) +https://sermanet.github.io/imitate/|creationTime|2018-10-27T14:59:43Z +http://www.flickr.com/photos/danbri/3282565132/|creationDate|2010-12-22 +http://www.flickr.com/photos/danbri/3282565132/|tag|http://www.semanlink.net/tag/dan_brickley +http://www.flickr.com/photos/danbri/3282565132/|tag|http://www.semanlink.net/tag/skos +http://www.flickr.com/photos/danbri/3282565132/|title|The topic topic +http://www.flickr.com/photos/danbri/3282565132/|creationTime|2010-12-22T17:10:43Z +http://www.opur.u-bordeaux.fr/|creationDate|2006-01-07 +http://www.opur.u-bordeaux.fr/|tag|http://www.semanlink.net/tag/rosee +http://www.opur.u-bordeaux.fr/|tag|http://www.semanlink.net/tag/sylvain +http://www.opur.u-bordeaux.fr/|title|International Organization For Dew Utilization +http://slifty.com/2012/08/a-tor-of-the-dark-web/|creationDate|2012-08-17 +http://slifty.com/2012/08/a-tor-of-the-dark-web/|tag|http://www.semanlink.net/tag/tor_anonymity_network +http://slifty.com/2012/08/a-tor-of-the-dark-web/|title|A Tor of the Dark Web Sorry for the Spam +http://slifty.com/2012/08/a-tor-of-the-dark-web/|creationTime|2012-08-17T22:23:31Z +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|creationDate|2014-05-17 +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|tag|http://www.semanlink.net/tag/ng +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|tag|http://www.semanlink.net/tag/machine_learning +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|tag|http://www.semanlink.net/tag/map_reduce +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|title|Map-Reduce for Machine Learning on multicore +http://www.cs.stanford.edu/people/ang//papers/nips06-mapreducemulticore.pdf|creationTime|2014-05-17T14:01:08Z +http://www.youtube.com/watch?v=38spQlWHhno|creationDate|2009-08-18 +http://www.youtube.com/watch?v=38spQlWHhno|tag|http://www.semanlink.net/tag/jeux_olympiques +http://www.youtube.com/watch?v=38spQlWHhno|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=38spQlWHhno|tag|http://www.semanlink.net/tag/marie_jo_perec +http://www.youtube.com/watch?v=38spQlWHhno|title|Marie-José Pérec, Atlanta 1996 +http://www.youtube.com/watch?v=38spQlWHhno|creationTime|2009-08-18T11:54:18Z +http://www.mnot.net/blog/2006/05/11/browser_caching|creationDate|2008-02-16 +http://www.mnot.net/blog/2006/05/11/browser_caching|tag|http://www.semanlink.net/tag/http_cache +http://www.mnot.net/blog/2006/05/11/browser_caching|title|mnot’s Web log: The State of Browser Caching +http://www.mnot.net/blog/2006/05/11/browser_caching|creationTime|2008-02-16T00:47:50Z +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|creationDate|2017-08-06 +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|tag|http://www.semanlink.net/tag/kigali +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|tag|http://www.semanlink.net/tag/rwanda +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|title|Un dimanche à Kigali, du mémorial du génocide à « l’hôtel des mille combines » +http://www.lemonde.fr/afrique/article/2017/08/06/un-dimanche-a-kigali-du-memorial-du-genocide-a-l-hotel-des-mille-combines_5169224_3212.html|creationTime|2017-08-06T10:46:42Z +https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/|creationDate|2019-04-01 +https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/|tag|http://www.semanlink.net/tag/tanis_kt +https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/|tag|http://www.semanlink.net/tag/national_geographic +https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/|title|Fossils may capture the day the dinosaurs died. Here's what you should know. (National Geographic) +https://www.nationalgeographic.com/science/2019/03/fossils-found-from-day-dinosaurs-died-chicxulub-tanis-cretaceous-extinction/|creationTime|2019-04-01T17:41:12Z +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|creationDate|2005-10-25 +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|tag|http://www.semanlink.net/tag/google +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|tag|http://www.semanlink.net/tag/mysql +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|tag|http://www.semanlink.net/tag/web_services_critique +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|tag|http://www.semanlink.net/tag/semantic_web_critique +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|title|ONLamp.com: Bosworth's Web of Data +http://www.onlamp.com/pub/a/onlamp/2005/04/22/bosworth.html|seeAlso|http://torrez.us/archives/2005/10/24/407 +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1|creationDate|2016-09-20 +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/javascript_closures +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1|title|scope - How do JavaScript closures work? - Stack Overflow +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work?noredirect=1&lq=1|creationTime|2016-09-20T11:38:21Z +http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322|creationDate|2010-06-02 +http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/wikileaks +http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322|title|Le trésor de guerre de Wikileaks ? Une gorge profonde chinoise - BUG BROTHER +http://bugbrother.blog.lemonde.fr/2010/06/02/le-tresor-de-guerre-de-wikileaks-une-gorge-profonde-chinoise/#xtor=RSS-32280322|creationTime|2010-06-02T21:44:05Z +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|creationDate|2011-01-07 +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|tag|http://www.semanlink.net/tag/data_integration +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|tag|http://www.semanlink.net/tag/data_warehouse +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|tag|http://www.semanlink.net/tag/enterprise_data +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|tag|http://www.semanlink.net/tag/etl +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|comment|This is not at all to say that semantically linking datasets isn’t valuable.  On the contrary!  I believe that coating old, weather-beaten databases with a coat of semantic paint is awesomely valuable.  It makes creating ETL pipelines that bring together data from all kinds of locations a breeze as compared to traditional, relationally-oriented ETL pipelines.  It’s hardly even fair to compare the two approaches, except insofar as the maturity of the traditional technologies is concerned, and I’ll try to pick up on specific reasons for this belief in future posts.  In fact, I see semantics as enabling on-demand datamarts in ways that traditional data integration technologies simply have failed to do +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|title|I’ve got a Federated Bridge to Sell You (A Defense of the Warehouse) - semanticweb.com +http://semanticweb.com/defending_the_warehouse_b17223#more-17223|creationTime|2011-01-07T23:58:23Z +http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html|creationDate|2014-10-03 +http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html|tag|http://www.semanlink.net/tag/faille_de_securite +http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html|tag|http://www.semanlink.net/tag/bash +http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html|title|Shellshock, la faille de sécurité majeure découverte « presque par hasard » par un Français +http://www.lemonde.fr/pixels/article/2014/10/02/shellshock-la-faille-de-securite-majeure-decouverte-presque-par-hasard-par-un-francais_4498904_4408996.html|creationTime|2014-10-03T00:12:51Z +http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/|creationDate|2015-06-29 +http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/|tag|http://www.semanlink.net/tag/programming +http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/|title|Paul Ford: What is Code? Bloomberg +http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/|creationTime|2015-06-29T01:37:01Z +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|creationDate|2019-02-25 +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|tag|http://www.semanlink.net/tag/antiquite_romaine +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|tag|http://www.semanlink.net/tag/realite_virtuelle +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|tag|http://www.semanlink.net/tag/rome +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|title|An Animated Reconstruction of Ancient Rome: Take A 30-Minute Stroll Through the City's Virtually-Recreated Streets Open Culture +http://www.openculture.com/2019/02/an-animated-reconstruction-of-ancient-rome.html|creationTime|2019-02-25T14:50:42Z +http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer|creationDate|2016-03-13 +http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer|tag|http://www.semanlink.net/tag/lee_sedol +http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer|tag|http://www.semanlink.net/tag/alphago +http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer|title|Go humans: Lee Sedol scores first victory against supercomputer World news The Guardian +http://www.theguardian.com/world/2016/mar/13/go-humans-lee-sedol-scores-first-victory-against-supercomputer|creationTime|2016-03-13T20:28:38Z +http://www.w3.org/2011/09/LinkedData/Report|creationDate|2012-02-05 +http://www.w3.org/2011/09/LinkedData/Report|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2011/09/LinkedData/Report|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.w3.org/2011/09/LinkedData/Report|title|Workshop Report: Linked Enterprise Data Patterns +http://www.w3.org/2011/09/LinkedData/Report|creationTime|2012-02-05T19:29:33Z +http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1|creationDate|2011-11-26 +http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1|tag|http://www.semanlink.net/tag/archeologie +http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1|tag|http://www.semanlink.net/tag/bresil +http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1|title|Pedra do Ingá +http://pt.wikipedia.org/wiki/Pedra_do_Ing%C3%A1|creationTime|2011-11-26T00:10:26Z +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|creationDate|2013-08-07 +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/justice_americaine +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/liberte_d_expression +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/gaz_de_schiste +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|title|MOTUS – Aux Etats-Unis, deux enfants interdits de parler du gaz de schiste Big Browser +http://bigbrowser.blog.lemonde.fr/2013/08/06/motus-aux-etats-unis-deux-enfants-interdits-de-parler-du-gaz-de-schiste/#xtor=RSS-32280322|creationTime|2013-08-07T00:48:35Z +http://sti.innoraise.com/profile/show/5350|creationDate|2008-10-28 +http://sti.innoraise.com/profile/show/5350|tag|http://www.semanlink.net/tag/fps +http://sti.innoraise.com/profile/show/5350|tag|http://www.semanlink.net/tag/innoraise +http://sti.innoraise.com/profile/show/5350|title|Me on INNORAISE +http://sti.innoraise.com/profile/show/5350|creationTime|2008-10-28T16:07:12Z +https://www.slideshare.net/pmika/what-happened-to-the-semantic-web|creationDate|2017-07-05 +https://www.slideshare.net/pmika/what-happened-to-the-semantic-web|tag|http://www.semanlink.net/tag/semantic_web +https://www.slideshare.net/pmika/what-happened-to-the-semantic-web|tag|http://www.semanlink.net/tag/peter_mika +https://www.slideshare.net/pmika/what-happened-to-the-semantic-web|title|What happened to the Semantic Web? +https://www.slideshare.net/pmika/what-happened-to-the-semantic-web|creationTime|2017-07-05T23:14:19Z +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|creationDate|2019-02-17 +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|tag|http://www.semanlink.net/tag/tutorial +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|tag|http://www.semanlink.net/tag/iswc +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|tag|http://www.semanlink.net/tag/olaf_hartig +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|tag|http://www.semanlink.net/tag/graphql +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|title|"ISWC 2019 Tutorial ""An Introduction To GraphQL"" - IDA" +https://www.ida.liu.se/research/semanticweb/events/GraphQLTutorialAtISWC2019.shtml|creationTime|2019-02-17T10:45:12Z +http://danbri.org/words/2010/11/30/615|creationDate|2010-12-06 +http://danbri.org/words/2010/11/30/615|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://danbri.org/words/2010/11/30/615|tag|http://www.semanlink.net/tag/synthetic_biology +http://danbri.org/words/2010/11/30/615|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2010/11/30/615|title|How to tell you’re living in the future: bacterial computers, HTML and RDF +http://danbri.org/words/2010/11/30/615|creationTime|2010-12-06T18:58:32Z +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|creationDate|2011-06-07 +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|tag|http://www.semanlink.net/tag/schema_org +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|tag|http://www.semanlink.net/tag/rdfa +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|tag|http://www.semanlink.net/tag/microdata +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|title|Microdata chosen over RDFa for semantics by Google, Bing and Yahoo! +http://ebiquity.umbc.edu/blogger/2011/06/02/microdata-rdfa-google-bing-yahoo-semantic-web/|creationTime|2011-06-07T13:51:23Z +http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission|creationDate|2009-06-24 +http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission|tag|http://www.semanlink.net/tag/xsparql +http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission|title|W3C Semantic Web Activity News - XSPARQL published as a W3C Submission +http://www.w3.org/blog/SW/2009/06/23/xsparql_published_as_a_w3c_submission|creationTime|2009-06-24T21:58:38Z +http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why|creationDate|2007-07-03 +http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why|tag|http://www.semanlink.net/tag/rdf_driven_web_sites +http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why|tag|http://www.semanlink.net/tag/erdf +http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why|title|Why use eRDF? +http://semwebdev.keithalexander.co.uk/blog/posts/use_erdf_why|creationTime|2007-07-03T00:58:26Z +https://medium.com/@madrugado/interesting-stuff-in-emnlp-part-i-4a79b5007eb1|creationDate|2018-11-25 +https://medium.com/@madrugado/interesting-stuff-in-emnlp-part-i-4a79b5007eb1|tag|http://www.semanlink.net/tag/emnlp_2018 +https://medium.com/@madrugado/interesting-stuff-in-emnlp-part-i-4a79b5007eb1|title|Interesting Stuff in EMNLP (part I) – Valentin Malykh – Medium +https://medium.com/@madrugado/interesting-stuff-in-emnlp-part-i-4a79b5007eb1|creationTime|2018-11-25T15:53:56Z +http://www.jenitennison.com/blog/node/165|creationDate|2014-04-13 +http://www.jenitennison.com/blog/node/165|tag|http://www.semanlink.net/tag/rdfa +http://www.jenitennison.com/blog/node/165|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.jenitennison.com/blog/node/165|tag|http://www.semanlink.net/tag/microdata +http://www.jenitennison.com/blog/node/165|title|Microdata and RDFa Living Together in Harmony Jeni's Musings +http://www.jenitennison.com/blog/node/165|creationTime|2014-04-13T13:40:35Z +http://www.arxiv-sanity.com/|creationDate|2018-01-22 +http://www.arxiv-sanity.com/|tag|http://www.semanlink.net/tag/tools +http://www.arxiv-sanity.com/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://www.arxiv-sanity.com/|tag|http://www.semanlink.net/tag/arxiv +http://www.arxiv-sanity.com/|title|Arxiv Sanity Preserver +http://www.arxiv-sanity.com/|creationTime|2018-01-22T18:07:05Z +http://www-nlp.stanford.edu/wiki/Software/Classifier|creationDate|2014-03-16 +http://www-nlp.stanford.edu/wiki/Software/Classifier|tag|http://www.semanlink.net/tag/stanford_classifier +http://www-nlp.stanford.edu/wiki/Software/Classifier|title|The Stanford classifier +http://www-nlp.stanford.edu/wiki/Software/Classifier|creationTime|2014-03-16T17:50:20Z +http://www.ldodds.com/blog/archives/000237.html|creationDate|2005-10-04 +http://www.ldodds.com/blog/archives/000237.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000237.html|tag|http://www.semanlink.net/tag/jena +http://www.ldodds.com/blog/archives/000237.html|title|Lost Boy: Using Jena in an Application Server +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|creationDate|2014-04-08 +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|tag|http://www.semanlink.net/tag/maxent_classifier +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|tag|http://www.semanlink.net/tag/tutorial +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|title|Machine Learning Tutorial: The Max Entropy Text Classifier DatumBox +http://blog.datumbox.com/machine-learning-tutorial-the-max-entropy-text-classifier/|creationTime|2014-04-08T19:18:28Z +http://www.wired.com/wiredscience/2014/03/strangest-magma-earth-carbonatites-oldoinyo-lengai/|creationDate|2014-03-12 +http://www.wired.com/wiredscience/2014/03/strangest-magma-earth-carbonatites-oldoinyo-lengai/|tag|http://www.semanlink.net/tag/volcan +http://www.wired.com/wiredscience/2014/03/strangest-magma-earth-carbonatites-oldoinyo-lengai/|title|Strangest Magma on Earth: Carbonatites of Ol Doinyo Lengai - Wired Science +http://www.wired.com/wiredscience/2014/03/strangest-magma-earth-carbonatites-oldoinyo-lengai/|creationTime|2014-03-12T00:36:45Z +http://www.w3.org/International/O-URL|creationDate|2008-10-13 +http://www.w3.org/International/O-URL|tag|http://www.semanlink.net/tag/uri_encoding +http://www.w3.org/International/O-URL|title|i18n/l10n: URL +http://www.w3.org/International/O-URL|creationTime|2008-10-13T10:18:02Z +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|creationDate|2018-03-04 +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|tag|http://www.semanlink.net/tag/keras +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|tag|http://www.semanlink.net/tag/nlp_text_classification +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|tag|http://www.semanlink.net/tag/stack_overflow +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|title|Intro to text classification with Keras: automatically tagging Stack Overflow posts Google Cloud Big Data and Machine Learning Blog +https://cloud.google.com/blog/big-data/2017/10/intro-to-text-classification-with-keras-automatically-tagging-stack-overflow-posts|creationTime|2018-03-04T16:59:49Z +http://en.wikipedia.org/wiki/Il_Divo_(film)|creationDate|2011-11-28 +http://en.wikipedia.org/wiki/Il_Divo_(film)|tag|http://www.semanlink.net/tag/mafia +http://en.wikipedia.org/wiki/Il_Divo_(film)|tag|http://www.semanlink.net/tag/film_italien +http://en.wikipedia.org/wiki/Il_Divo_(film)|comment|based on the figure of former Italian Prime Minister Giulio Andreotti +http://en.wikipedia.org/wiki/Il_Divo_(film)|title|Il Divo (film) - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Il_Divo_(film)|creationTime|2011-11-28T21:53:57Z +http://www.wired.co.uk/article/chinese-government-social-credit-score-privacy-invasion|creationDate|2018-01-03 +http://www.wired.co.uk/article/chinese-government-social-credit-score-privacy-invasion|tag|http://www.semanlink.net/tag/china_s_social_credit_system +http://www.wired.co.uk/article/chinese-government-social-credit-score-privacy-invasion|title|Big data meets Big Brother as China moves to rate its citizens WIRED UK +http://www.wired.co.uk/article/chinese-government-social-credit-score-privacy-invasion|creationTime|2018-01-03T17:42:44Z +http://trdf.sourceforge.net/|creationDate|2013-07-12 +http://trdf.sourceforge.net/|tag|http://www.semanlink.net/tag/olaf_hartig +http://trdf.sourceforge.net/|tag|http://www.semanlink.net/tag/trust_in_the_web_of_data +http://trdf.sourceforge.net/|title|tRDF - Tools for Trust in the Web of Data +http://trdf.sourceforge.net/|creationTime|2013-07-12T11:11:50Z +http://emnlp2018.org/schedule|creationDate|2018-11-04 +http://emnlp2018.org/schedule|tag|http://www.semanlink.net/tag/emnlp_2018 +http://emnlp2018.org/schedule|title|Conference Schedule - EMNLP 2018 +http://emnlp2018.org/schedule|creationTime|2018-11-04T00:49:44Z +http://www.afrixml.net/|creationDate|2013-01-06 +http://www.afrixml.net/|tag|http://www.semanlink.net/tag/semantic_web +http://www.afrixml.net/|tag|http://www.semanlink.net/tag/semantic_web_project +http://www.afrixml.net/|tag|http://www.semanlink.net/tag/afrique +http://www.afrixml.net/|comment|AfriXML is a Semantic Web project for describing and sharing the global experience of Africa and the Diaspora. It espouses the concepts and methods of digital anthropology resarch for the development of Africa-focused software and networked content. +http://www.afrixml.net/|title|AfriXML - Semantic Africa African Vocabularies, Taxonomies and Ontologies: XML. Web Services. RDF +http://www.afrixml.net/|creationTime|2013-01-06T18:51:42Z +http://www.amazingadgets.com/Gadgets/google-adsense/social-book-marking-script-clones-of-diggcom.php|creationDate|2008-02-15 +http://www.amazingadgets.com/Gadgets/google-adsense/social-book-marking-script-clones-of-diggcom.php|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.amazingadgets.com/Gadgets/google-adsense/social-book-marking-script-clones-of-diggcom.php|title|Social Book marking Script - Clones of Digg.com +http://www.amazingadgets.com/Gadgets/google-adsense/social-book-marking-script-clones-of-diggcom.php|creationTime|2008-02-15T23:53:40Z +http://www.ibm.com/developerworks/library/x-disprdf/index.html|creationDate|2010-09-30 +http://www.ibm.com/developerworks/library/x-disprdf/index.html|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/library/x-disprdf/index.html|tag|http://www.semanlink.net/tag/business_intelligence +http://www.ibm.com/developerworks/library/x-disprdf/index.html|tag|http://www.semanlink.net/tag/howto +http://www.ibm.com/developerworks/library/x-disprdf/index.html|tag|http://www.semanlink.net/tag/excel_and_sw +http://www.ibm.com/developerworks/library/x-disprdf/index.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.ibm.com/developerworks/library/x-disprdf/index.html|comment|"Different sets of RDF data are much easier to combine than different sets of data in other common formats. You can easily convert disparate non-RDF data sets to RDF and then combine them to create new content. In this article, learn how to integrate spreadsheet data, CSV data from a web service, and fielded data from a website into a single report. +" +http://www.ibm.com/developerworks/library/x-disprdf/index.html|title|Integrate disparate data sources with Semantic Web technology +http://www.ibm.com/developerworks/library/x-disprdf/index.html|creationTime|2010-09-30T23:06:24Z +http://media.daimler.com/dcmedia/0-921-657591-1-1588269-1-0-0-0-0-0-11701-614316-0-1-0-0-0-0-0.html|creationDate|2013-10-15 +http://media.daimler.com/dcmedia/0-921-657591-1-1588269-1-0-0-0-0-0-11701-614316-0-1-0-0-0-0-0.html|tag|http://www.semanlink.net/tag/daimler +http://media.daimler.com/dcmedia/0-921-657591-1-1588269-1-0-0-0-0-0-11701-614316-0-1-0-0-0-0-0.html|title|Daimler IT honored with the first ever „European Data Innovator Award“ Daimler Global Media Site +http://media.daimler.com/dcmedia/0-921-657591-1-1588269-1-0-0-0-0-0-11701-614316-0-1-0-0-0-0-0.html|creationTime|2013-10-15T13:45:36Z +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|creationDate|2013-09-09 +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|tag|http://www.semanlink.net/tag/mondialisation +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|tag|http://www.semanlink.net/tag/ted +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|tag|http://www.semanlink.net/tag/riches +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|comment|it's important to remember that before we learned how to share the fruits of the Industrial Revolution with the broad swathes of society, we had to go through two depressions, the Great Depression of the 1930s, the Long Depression of the 1870s, two world wars, communist revolutions in Russia and in China, and an era of tremendous social and political upheaval in the West. We also, not coincidentally, went through an era of tremendous social and political inventions. We created the modern welfare state. We created public education. We created public health care. We created public pensions. We created unions.
Today, we are living through an era of economic transformation comparable in its scale and its scope to the Industrial Revolution. To be sure that this new economy benefits us all and not just the plutocrats, we need to embark on an era of comparably ambitious social and political change. +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|title|The rise of the new global super-rich Video on TED.com +http://www.ted.com/talks/chrystia_freeland_the_rise_of_the_new_global_super_rich.html|creationTime|2013-09-09T14:01:03Z +http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/|creationDate|2013-11-09 +http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/|tag|http://www.semanlink.net/tag/alexandre_passant +http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/|tag|http://www.semanlink.net/tag/knowledge_graph +http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/|title|Knowledge graphs utilised for content personalisation and discovery on the Web It's all about music discovery! +http://blog.seevl.fm/2013/11/03/knowledge-graphs-for-discovery/|creationTime|2013-11-09T13:09:16Z +http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML|creationDate|2013-12-10 +http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML|tag|http://www.semanlink.net/tag/sw_guys +http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML|tag|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML|title|(LOV-E) Linked Open Vocabularies Endpoint +http://lov.okfn.org/endpoint/lov?query=PREFIX+rdf:%3Chttp://www.w3.org/1999/02/22-rdf-syntax-ns%23%3E%0D%0APREFIX+xsd:%3Chttp://www.w3.org/2001/XMLSchema%23%3E%0D%0APREFIX+dcterms:%3Chttp://purl.org/dc/terms/%3E%0D%0APREFIX+rdfs:%3Chttp://www.w3.org/2000/01/rdf-schema%23%3E%0D%0APREFIX+owl:%3Chttp://www.w3.org/2002/07/owl%23%3E%0D%0APREFIX+skos:%3Chttp://www.w3.org/2004/02/skos/core%23%3E%0D%0APREFIX+foaf:%3Chttp://xmlns.com/foaf/0.1/%3E%0D%0APREFIX+void:%3Chttp://rdfs.org/ns/void%23%3E%0D%0APREFIX+bibo:%3Chttp://purl.org/ontology/bibo/%3E%0D%0APREFIX+vann:%3Chttp://purl.org/vocab/vann/%3E%0D%0APREFIX+voaf:%3Chttp://purl.org/vocommons/voaf%23%3E%0D%0APREFIX+frbr:%3Chttp://purl.org/vocab/frbr/core%23%3E%0D%0APREFIX+lov:%3Chttp://lov.okfn.org/dataset/lov/lov%23%3E%0D%0APREFIX+rev:%3Chttp://purl.org/stuff/rev%23%3E%0D%0A%0D%0ASELECT+DISTINCT+%3Fperson+%3Fname+%3Fv%0D%0A%0D%0AWHERE%0D%0A%0D%0A%7B++++++%3Fv+a+voaf:Vocabulary.%0D%0A+++++++%7B%3Fv+dcterms:creator+%3Fperson%7D%0D%0A++++++++UNION+%0D%0A+++++++%7B%3Fv+dcterms:contributor+%3Fperson%7D.%0D%0A+++++++%3Fperson+a+foaf:Person.%0D%0A+++++++%3Fperson+foaf:name+%3Fname+%0D%0A+++++++FILTER(contains(str(%3Fperson),%22google.com%22))%0D%0A%7D%0D%0A+%0D%0AORDER+BY+%3Fname%0D%0A&format=HTML|creationTime|2013-12-10T09:30:22Z +http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip|creationDate|2007-12-17 +http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip|tag|http://www.semanlink.net/tag/javascript_rdf_parser_in_ie +http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip|comment|Modifications made to code of tabulator-0.8-2007-02-01T16-43Z to have the javascript RDF Parser working with internet explorer 6 and 7. This is not a complete port of Tabulator: it only concerns the RDF parser (and the way to invoke Ajax). It has not been fully tested (only with simple RDF, without lists, anonymous resources, etc.) +http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip|title|Javascript RDF Parser in ie +http://www.semanlink.net/files/2007/12/js-rdf-parser-ie-modifs2tabulator.zip|creationTime|2007-12-17T09:04:33Z +http://purl.org/makolab/caont/|creationDate|2012-11-13 +http://purl.org/makolab/caont/|tag|http://www.semanlink.net/tag/makolab +http://purl.org/makolab/caont/|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://purl.org/makolab/caont/|title|CAO: Car Audio Ontology +http://purl.org/makolab/caont/|creationTime|2012-11-13T17:35:20Z +http://semarglproject.org/index.html|creationDate|2013-01-28 +http://semarglproject.org/index.html|tag|http://www.semanlink.net/tag/rdfa_tool +http://semarglproject.org/index.html|tag|http://www.semanlink.net/tag/java_tool +http://semarglproject.org/index.html|comment|Semargl is a modular framework for crawling linked data from structured documents. The main goal of project is to provide lightweight and performant tool without excess dependencies. At this moment it offers high-performant streaming parsers for RDF/XML, RDFa, N-Triples, streaming serializer for Turtle and integration with Jena, Sesame and Clerezza. Small memory footprint, and CPU requirements allow framework to be used by any application. It runs seamlessly on Android and GAE. +http://semarglproject.org/index.html|title|Semargl: better linked data processing +http://semarglproject.org/index.html|creationTime|2013-01-28T18:01:57Z +http://fgiasson.com/blog/index.php/2007/09/28/turbocharge-your-links-with-zlinks/|creationDate|2007-10-31 +http://fgiasson.com/blog/index.php/2007/09/28/turbocharge-your-links-with-zlinks/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/09/28/turbocharge-your-links-with-zlinks/|title|Frederick Giasson’s Weblog » Turbocharge your Links with zLinks +http://fgiasson.com/blog/index.php/2007/09/28/turbocharge-your-links-with-zlinks/|creationTime|2007-10-31T15:58:43Z +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|creationDate|2015-12-27 +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|tag|http://www.semanlink.net/tag/hateoas +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|tag|http://www.semanlink.net/tag/api +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|title|The Pragmatic Proof: Hypermedia API Composition and Execution Ruben Verborgh +http://ruben.verborgh.org/publications/verborgh_tplp_2016/|creationTime|2015-12-27T23:22:07Z +http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1|creationDate|2010-08-12 +http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1|tag|http://www.semanlink.net/tag/dynamic_semantic_publishing +http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1|title|Dynamic Semantic Publishing for any Blog (Part 1) - benjamin nowack's blog +http://bnode.org/blog/2010/07/30/dynamic-semantic-publishing-for-any-blog-part-1|creationTime|2010-08-12T16:11:00Z +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|creationDate|2018-11-10 +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|tag|http://www.semanlink.net/tag/slides +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|tag|http://www.semanlink.net/tag/nlp +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|tag|http://www.semanlink.net/tag/sebastian_ruder +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|comment|"([includes S. Ruder's Review of the Recent History of NLP](/doc/?uri=http%3A%2F%2Fblog.aylien.com%2Fa-review-of-the-recent-history-of-natural-language-processing%2F)) +" +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|relatedDoc|http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/ +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|title|Frontiers of Natural Language Processing (Deep Learning Indaba 2018, Stellenbosch, South Africa) +https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view|creationTime|2018-11-10T17:23:53Z +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|creationDate|2018-01-27 +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|tag|http://www.semanlink.net/tag/representation_learning +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|tag|http://www.semanlink.net/tag/graph_embeddings +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|title|TUTORIAL: Representation Learning on Networks - TheWebConf 2018 +https://www2018.thewebconf.org/program/tutorials-track/tutorial-225/|creationTime|2018-01-27T15:18:02Z +http://nlp2rdf.org/|creationDate|2012-02-28 +http://nlp2rdf.org/|tag|http://www.semanlink.net/tag/nlp +http://nlp2rdf.org/|tag|http://www.semanlink.net/tag/lod2 +http://nlp2rdf.org/|tag|http://www.semanlink.net/tag/rdf +http://nlp2rdf.org/|title|NLP2RDF Converting NLP tool output to RDF +http://nlp2rdf.org/|creationTime|2012-02-28T10:55:26Z +http://www.vogella.com/articles/EGit/article.html|creationDate|2013-09-08 +http://www.vogella.com/articles/EGit/article.html|tag|http://www.semanlink.net/tag/eclipse +http://www.vogella.com/articles/EGit/article.html|tag|http://www.semanlink.net/tag/git +http://www.vogella.com/articles/EGit/article.html|tag|http://www.semanlink.net/tag/egit +http://www.vogella.com/articles/EGit/article.html|tag|http://www.semanlink.net/tag/github +http://www.vogella.com/articles/EGit/article.html|tag|http://www.semanlink.net/tag/tutorial +http://www.vogella.com/articles/EGit/article.html|title|Git version control with Eclipse (EGit) - Tutorial +http://www.vogella.com/articles/EGit/article.html|creationTime|2013-09-08T23:37:16Z +https://support.google.com/webmasters/answer/96569?hl=en|creationDate|2013-08-23 +https://support.google.com/webmasters/answer/96569?hl=en|tag|http://www.semanlink.net/tag/webmasters_google +https://support.google.com/webmasters/answer/96569?hl=en|title|"rel=""nofollow"" - Webmaster Tools Help" +https://support.google.com/webmasters/answer/96569?hl=en|creationTime|2013-08-23T18:12:48Z +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|creationDate|2018-05-20 +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|tag|http://www.semanlink.net/tag/poincare_embeddings +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|tag|http://www.semanlink.net/tag/gensim +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|tag|http://www.semanlink.net/tag/tutorial +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|title|Tutorial on Poincaré Embeddings (Jupyter Notebook ) +https://nbviewer.jupyter.org/github/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Poincare%20Tutorial.ipynb|creationTime|2018-05-20T09:06:58Z +http://www.paulgraham.com/say.html|creationDate|2005-11-24 +http://www.paulgraham.com/say.html|tag|http://www.semanlink.net/tag/tabous +http://www.paulgraham.com/say.html|tag|http://www.semanlink.net/tag/liberte_de_pensee +http://www.paulgraham.com/say.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/say.html|comment|"My hypothesis is that the side that's shocked is most likely to be the mistaken one. +I suspect the only taboos that are more than taboos are the ones that are universal, or nearly so. Murder for example. But any idea that's considered harmless in a significant percentage of times and places, and yet is taboo in ours, is a good candidate for something we're mistaken about. ABQ: Always be questioning" +http://www.paulgraham.com/say.html|title|What You Can't Say +http://data.bnf.fr/|creationDate|2012-04-28 +http://data.bnf.fr/|tag|http://www.semanlink.net/tag/bnf +http://data.bnf.fr/|title|databnf +http://data.bnf.fr/|creationTime|2012-04-28T11:33:04Z +http://www.newswise.com/articles/view/521790|creationDate|2006-07-14 +http://www.newswise.com/articles/view/521790|tag|http://www.semanlink.net/tag/galileo +http://www.newswise.com/articles/view/521790|tag|http://www.semanlink.net/tag/cornell +http://www.newswise.com/articles/view/521790|title|Newswise Cracking the Secret Codes of Europe's Galileo Satellite +https://www.airpair.com/nlp/keyword-extraction-tutorial|creationDate|2017-05-24 +https://www.airpair.com/nlp/keyword-extraction-tutorial|tag|http://www.semanlink.net/tag/rake +https://www.airpair.com/nlp/keyword-extraction-tutorial|tag|http://www.semanlink.net/tag/semanlink_feature_request +https://www.airpair.com/nlp/keyword-extraction-tutorial|tag|http://www.semanlink.net/tag/nlp_topic_extraction +https://www.airpair.com/nlp/keyword-extraction-tutorial|comment|"2 tools: + +- simple keyword extraction with a Python library (RAKE) +- Java tool (Maui) that uses a machine-learning technique. + +Focus on 2 tasks: + +- Extracting the most significant words and phrases that appear in given text +- Identifying a set of topics from a predefined vocabulary that match a given text + +Typical steps: + +- Candidate selection (extract all possible words, phrases, terms or concepts that can potentially be keywords). +- Properties calculation (for each candidate, properties that indicate that it may be a keyword) +- Scoring and selecting keywords + +RAKE: finding multi-word phrases containing frequent words. +: simplicity, ease of use -: limited accuracy, parameter configuration requirement, throws away many valid phrases, doesn’t normalize candidates (no stemming). + +Maui: (""Multi-purpose automatic topic indexing""). Based on [Weka](/semanlink/tag/weka) (GPL, java, maven, github). Compared to RAKE: + +- Extract keywords not just from text, but also with a reference to a controlled vocabulary +- Improve the accuracy by training Maui on manually chosen keywords + - but requires a training model. + +Maui can use a controlled vocabulary expressed in SKOS - so I could use it in semanlink! + + +" +https://www.airpair.com/nlp/keyword-extraction-tutorial|title|NLP keyword extraction tutorial with RAKE and Maui +https://www.airpair.com/nlp/keyword-extraction-tutorial|creationTime|2017-05-24T18:20:50Z +http://www.bbc.co.uk/blogs/internet/entries/afdf2190-4e60-3dfc-b15f-fc17f88c85a1|creationDate|2017-01-06 +http://www.bbc.co.uk/blogs/internet/entries/afdf2190-4e60-3dfc-b15f-fc17f88c85a1|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.bbc.co.uk/blogs/internet/entries/afdf2190-4e60-3dfc-b15f-fc17f88c85a1|title|BBC Blogs - Internet Blog - Opening up the BBC's Linked Data with /things +http://www.bbc.co.uk/blogs/internet/entries/afdf2190-4e60-3dfc-b15f-fc17f88c85a1|creationTime|2017-01-06T11:35:26Z +http://tech.groups.yahoo.com/group/jena-dev/message/36263|creationDate|2008-12-12 +http://tech.groups.yahoo.com/group/jena-dev/message/36263|tag|http://www.semanlink.net/tag/andy_seaborne +http://tech.groups.yahoo.com/group/jena-dev/message/36263|tag|http://www.semanlink.net/tag/fps_post +http://tech.groups.yahoo.com/group/jena-dev/message/36263|tag|http://www.semanlink.net/tag/arq_property_functions +http://tech.groups.yahoo.com/group/jena-dev/message/36263|title|Message: RE: [jena-dev] ARQ property functions: is it possible to use a blank node as argObject? +http://tech.groups.yahoo.com/group/jena-dev/message/36263|creationTime|2008-12-12T10:36:30Z +http://www.w3.org/2012/pyRdfa/|creationDate|2013-06-13 +http://www.w3.org/2012/pyRdfa/|tag|http://www.semanlink.net/tag/validation +http://www.w3.org/2012/pyRdfa/|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://www.w3.org/2012/pyRdfa/|tag|http://www.semanlink.net/tag/rdfa_tool +http://www.w3.org/2012/pyRdfa/|title|RDFa 1.1 Distiller and Parser +http://www.w3.org/2012/pyRdfa/|creationTime|2013-06-13T10:34:01Z +http://www.maa.org/devlin/LockhartsLament.pdf|creationDate|2009-06-22 +http://www.maa.org/devlin/LockhartsLament.pdf|tag|http://www.semanlink.net/tag/mathematiques +http://www.maa.org/devlin/LockhartsLament.pdf|tag|http://www.semanlink.net/tag/jean_paul +http://www.maa.org/devlin/LockhartsLament.pdf|tag|http://www.semanlink.net/tag/education +http://www.maa.org/devlin/LockhartsLament.pdf|comment|"I don’t see how it’s doing society any +good to have its members walking around with vague memories of +algebraic formulas and geometric diagrams, and clear memories of +hating them. It might do some good, though, to show them +something beautiful and give them an opportunity to enjoy being +creative, flexible, open-minded
+You don’t need to make math interesting— it’s already more +interesting than we can handle! And the glory of it is its complete irrelevance to our lives.
+Mathematics is the music of reason.
+ +We learn things because +they interest us now, not because they might be useful later.
+How many students taking literature classes will one day be writers? +That is not why we teach literature, nor why students take it. We +teach to enlighten everyone, not to train only the future professionals. +In any case, the most valuable skill for a scientist or engineer is being +able to think creatively and independently. The last thing anyone +needs is to be trained. + + +" +http://www.maa.org/devlin/LockhartsLament.pdf|title|A Mathematician’s Lament +http://www.maa.org/devlin/LockhartsLament.pdf|creationTime|2009-06-22T13:37:43Z +http://hieraki.goodlad.ca/read/chapter/6#page10|creationDate|2005-05-20 +http://hieraki.goodlad.ca/read/chapter/6#page10|tag|http://www.semanlink.net/tag/ajax +http://hieraki.goodlad.ca/read/chapter/6#page10|tag|http://www.semanlink.net/tag/javascript +http://hieraki.goodlad.ca/read/chapter/6#page10|title|Combining XMLHttpRequest and Rails to Produce More Efficient UIs Generalized JavaScript Functions +http://www.feynmanlectures.info/docroot/I_toc.html|creationDate|2013-09-13 +http://www.feynmanlectures.info/docroot/I_toc.html|tag|http://www.semanlink.net/tag/feynman +http://www.feynmanlectures.info/docroot/I_toc.html|tag|http://www.semanlink.net/tag/physique +http://www.feynmanlectures.info/docroot/I_toc.html|comment|"Volume 1: mainly mechanics, radiation, and heat
+""science is as much for intellectual enjoyment as for practical utility""" +http://www.feynmanlectures.info/docroot/I_toc.html|title|The Feynman Lectures on Physics +http://www.feynmanlectures.info/docroot/I_toc.html|creationTime|2013-09-13T23:24:00Z +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|creationDate|2013-06-17 +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|tag|http://www.semanlink.net/tag/gao +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|tag|http://www.semanlink.net/tag/c2gweb_and_product_description +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|tag|http://www.semanlink.net/tag/product_description +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|title|Start Your Semantic Engines: TrueCar Looks To Foster Transition Of Vehicle Data From Flat To Structured And Enhanced - semanticweb.com +http://semanticweb.com/start-your-semantic-engines-truecar-looks-to-foster-transition-of-vehicle-data-from-flat-to-structured-and-enhanced_b37899?goback=%2Egde_1630687_member_249670861|creationTime|2013-06-17T01:28:08Z +http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/|creationDate|2017-06-02 +http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/|title|Introduction to Latent Dirichlet Allocation +http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/|creationTime|2017-06-02T00:59:38Z +http://www.mkyong.com/maven/how-to-include-library-manully-into-maven-local-repository/|creationDate|2012-08-18 +http://www.mkyong.com/maven/how-to-include-library-manully-into-maven-local-repository/|tag|http://www.semanlink.net/tag/maven +http://www.mkyong.com/maven/how-to-include-library-manully-into-maven-local-repository/|title|How to include library manually into maven local repository? +http://www.mkyong.com/maven/how-to-include-library-manully-into-maven-local-repository/|creationTime|2012-08-18T14:58:06Z +http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html|creationDate|2018-01-14 +http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html|tag|http://www.semanlink.net/tag/nuclear_war +http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html|tag|http://www.semanlink.net/tag/whistleblower +http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html|title|Daniel Ellsberg on ‘The Doomsday Machine’ +http://nymag.com/daily/intelligencer/2017/11/daniel-ellsberg-on-the-doomsday-machine.html|creationTime|2018-01-14T19:18:13Z +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|creationDate|2015-01-13 +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|tag|http://www.semanlink.net/tag/r +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|comment|"part 2 +" +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|title|R (and SPARQL), part 1 - bobdc.blog +http://www.snee.com/bobdc.blog/2015/01/r-and-sparql-part-1.html|creationTime|2015-01-13T21:34:58Z +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|creationDate|2011-02-09 +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|tag|http://www.semanlink.net/tag/semantic_overflow +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|tag|http://www.semanlink.net/tag/goodrelations +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|title|goodrelations-based rich snippet example in google query - Semantic Overflow +http://www.semanticoverflow.com/questions/2986/goodrelations-based-rich-snippet-example-in-google-query|creationTime|2011-02-09T00:44:51Z +http://www.kaidangaskia.com/|creationDate|2007-08-21 +http://www.kaidangaskia.com/|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.kaidangaskia.com/|tag|http://www.semanlink.net/tag/rap +http://www.kaidangaskia.com/|comment|La chanteuse s'appelle Safia, nous l'avons entendue un soir au bar de la MJC avec l'orchestre de Moussa, et elle assure. +http://www.kaidangaskia.com/|title|Kaidan Gaskia +http://www.kaidangaskia.com/|creationTime|2007-08-21T19:38:20Z +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|creationDate|2010-04-26 +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|tag|http://www.semanlink.net/tag/richard_cyganiak +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|tag|http://www.semanlink.net/tag/lod_mailing_list +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|tag|http://www.semanlink.net/tag/kingsley_idehen +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|tag|http://www.semanlink.net/tag/entity_attribute_value_model +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|title|Comments on Data 3.0 manifesto +http://lists.w3.org/Archives/Public/public-lod/2010Apr/0278.html|creationTime|2010-04-26T11:57:59Z +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|creationDate|2019-05-26 +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|tag|http://www.semanlink.net/tag/autoencoder +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|title|Deep Locality Sensitive Hashing +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|bookmarkOf|https://www.slideshare.net/GabrieleAngeletti/project-deep-locality-sensitive-hashing +http://www.semanlink.net/doc/2019/05/deep_locality_sensitive_hashing|creationTime|2019-05-26T11:48:01Z +http://www.w3.org/wiki/Activity_Streams|creationDate|2014-10-23 +http://www.w3.org/wiki/Activity_Streams|tag|http://www.semanlink.net/tag/w3c_working_group +http://www.w3.org/wiki/Activity_Streams|tag|http://www.semanlink.net/tag/activity_streams +http://www.w3.org/wiki/Activity_Streams|comment|"Little ""a"" activity streams : are a UI paradigm for displaying recent activity within a context. Activities are typically displayed in reverse chronological order and consist of relatively simple statements such as ""John uploaded a new photo"" or ""12 people liked Sally's post"". Big ""A"" Activity Streams : is a data format for encoding and transferring activity/event metadata. The first version of the specification was published in 2011 by the independent Activity Streams Working Group and is based on extending Atom. The current (2.0+) version of the spec is JSON-based." +http://www.w3.org/wiki/Activity_Streams|title|Activity Streams - W3C Wiki +http://www.w3.org/wiki/Activity_Streams|creationTime|2014-10-23T23:53:34Z +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|creationDate|2013-09-24 +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|tag|http://www.semanlink.net/tag/infotechnocratie +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|tag|http://www.semanlink.net/tag/dsi +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|tag|http://www.semanlink.net/tag/leon_levy_bencheton +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|comment|"« L'Infotechnocratie - Le déni de complexité en informatique » +...on ignore ou on refuse la complexité, l'abstraction et l'immatérialité, la rareté des bons concepteurs, on oublie l'importance de la connaissance des systèmes et processus existants, et l'on élève au rang de panacées universelles des voies de progrès partiels telles que l'objet, le SOA, la réingénierie de processus, l'itératif ou l'Agile, l'industrialisation, le downsizing, sans oublier les appels d'offres, les contrats au forfait et l'externalisation." +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|title|Le DSI face à la complexité +http://www.cio-online.com/contributions/lire-le-dsi-face-a-la-complexite-653.html|creationTime|2013-09-24T10:22:42Z +https://dl.acm.org/citation.cfm?id=3210036|creationDate|2018-07-11 +https://dl.acm.org/citation.cfm?id=3210036|tag|http://www.semanlink.net/tag/cross_modal_retrieval +https://dl.acm.org/citation.cfm?id=3210036|tag|http://www.semanlink.net/tag/recette_de_cuisine +https://dl.acm.org/citation.cfm?id=3210036|tag|http://www.semanlink.net/tag/embeddings +https://dl.acm.org/citation.cfm?id=3210036|comment|a cross-modal retrieval model aligning visual and textual data (like pictures of dishes and their recipes) in a shared representation space +https://dl.acm.org/citation.cfm?id=3210036|title|Cross-Modal Retrieval in the Cooking Context +https://dl.acm.org/citation.cfm?id=3210036|creationTime|2018-07-11T13:05:27Z +http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202|creationDate|2011-02-01 +http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202|tag|http://www.semanlink.net/tag/slow_food +http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202|tag|http://www.semanlink.net/tag/ofir +http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202|title|Brazil - Go Fish - Which Fish? Slow Fish - Local Sustainable Fish +http://www.slowfood.com/slowfish/pagine/eng/pagina.lasso?-id_pg=202|creationTime|2011-02-01T12:06:54Z +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|creationDate|2009-10-08 +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|comment|"""We can truly call RDF a disruptive data model or framework. But, it does so without disrupting what exists in the slightest. And that is a most remarkable achievement.""" +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|title|Fresh Perspectives on the Semantic Enterprise » AI3:::Adaptive Information +http://www.mkbergman.com/825/fresh-perspectives-on-the-semantic-enterprise/|creationTime|2009-10-08T22:03:07Z +http://www.youtube.com/watch?v=d5Pb9nykjQA|creationDate|2009-04-03 +http://www.youtube.com/watch?v=d5Pb9nykjQA|tag|http://www.semanlink.net/tag/art_d_afrique +http://www.youtube.com/watch?v=d5Pb9nykjQA|tag|http://www.semanlink.net/tag/censorship +http://www.youtube.com/watch?v=d5Pb9nykjQA|tag|http://www.semanlink.net/tag/anticolonialisme +http://www.youtube.com/watch?v=d5Pb9nykjQA|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=d5Pb9nykjQA|comment|"un film de Chris Marker et de Alain Resnais. Censure totale de 1953 à 63. Diffusé intégralement en 68. +" +http://www.youtube.com/watch?v=d5Pb9nykjQA|title|Les statues meurent aussi +http://www.youtube.com/watch?v=d5Pb9nykjQA|creationTime|2009-04-03T01:26:08Z +http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/|creationDate|2013-02-02 +http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/|title|Enterprise-scale Semantic Systems AI3:::Adaptive Information +http://www.mkbergman.com/1037/enterprise-scale-semantic-systems/|creationTime|2013-02-02T17:26:25Z +http://jqapi.ru/|creationDate|2011-04-06 +http://jqapi.ru/|tag|http://www.semanlink.net/tag/jquery +http://jqapi.ru/|title|jQuery 1.5 Cheatsheet by Future Colors +http://jqapi.ru/|creationTime|2011-04-06T10:49:09Z +http://www.boston.com/business/globe/articles/2005/10/10/you_need_not_be_paranoid_to_fear_rfid?mode=PF|creationDate|2005-10-10 +http://www.boston.com/business/globe/articles/2005/10/10/you_need_not_be_paranoid_to_fear_rfid?mode=PF|tag|http://www.semanlink.net/tag/rfid +http://www.boston.com/business/globe/articles/2005/10/10/you_need_not_be_paranoid_to_fear_rfid?mode=PF|title|You need not be paranoid to fear RFID - The Boston Globe +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|creationDate|2010-08-20 +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|tag|http://www.semanlink.net/tag/sparql_1_1 +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|tag|http://www.semanlink.net/tag/arq +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|title|Trying SPARQL 1.1 new query features with ARQ - bobdc.blog +http://www.snee.com/bobdc.blog/2010/08/trying-sparql-11-new-query-fea.html|creationTime|2010-08-20T12:38:33Z +http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf|creationDate|2012-03-25 +http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf|tag|http://www.semanlink.net/tag/language_model +http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf|tag|http://www.semanlink.net/tag/n_gram +http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf|title|An empirical study of smoothing techniques for language modeling +http://research.microsoft.com/en-us/um/people/joshuago/tr-10-98.pdf|creationTime|2012-03-25T22:54:35Z +http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser|creationDate|2011-08-28 +http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser|tag|http://www.semanlink.net/tag/rdf_browser +http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser|title|Data-Oriented Web Browser +http://dannyayers.com/2011/08/26/Data-Oriented-Web-Browser|creationTime|2011-08-28T22:51:50Z +http://colah.github.io/|creationDate|2015-10-16 +http://colah.github.io/|tag|http://www.semanlink.net/tag/christopher_olah +http://colah.github.io/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://colah.github.io/|tag|http://www.semanlink.net/tag/ml_nlp_blog +http://colah.github.io/|tag|http://www.semanlink.net/tag/machine_learning +http://colah.github.io/|title|Colah's blog +http://colah.github.io/|creationTime|2015-10-16T16:36:29Z +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|creationDate|2007-07-08 +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|tag|http://www.semanlink.net/tag/africa_s_last_wild_places +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|tag|http://www.semanlink.net/tag/photo +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|tag|http://www.semanlink.net/tag/hippopotame +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|tag|http://www.semanlink.net/tag/secheresse +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|tag|http://www.semanlink.net/tag/national_geographic +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|comment|Hundreds of hippopotamuses crowd into what is left of the Katuma River in Tanzania's Katavi National Park. +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|title|National Geographic News Photo Gallery: Megaflyover: Documenting Africa's Last Wild Places +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo8.html|creationTime|2007-07-08T02:42:33Z +http://sofakolle.planeteafrique.com/index.asp?p=65&m=167|creationDate|2012-07-27 +http://sofakolle.planeteafrique.com/index.asp?p=65&m=167|tag|http://www.semanlink.net/tag/john_sofakolle +http://sofakolle.planeteafrique.com/index.asp?p=65&m=167|title|John Sofakollé +http://sofakolle.planeteafrique.com/index.asp?p=65&m=167|creationTime|2012-07-27T22:39:48Z +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|creationDate|2012-04-26 +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|tag|http://www.semanlink.net/tag/toyota +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|tag|http://www.semanlink.net/tag/private_equity +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|tag|http://www.semanlink.net/tag/travail +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|tag|http://www.semanlink.net/tag/documentaire_tv +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|tag|http://www.semanlink.net/tag/management +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|comment|"Réalisation : Jean-Robert Viallet La-mise-à-mort-du-Travail-La-Dépossession-Episode-3 +" +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|title|La mise à mort du travail +http://yami2.com/films/la_mise_a_mort_du_travail_fr.php|creationTime|2012-04-26T00:23:14Z +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|creationDate|2007-01-19 +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|tag|http://www.semanlink.net/tag/rdf_driven_web_sites +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|tag|http://www.semanlink.net/tag/rdf_tools +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|comment|I want to be able to quickly develop data-driven Web applications that read from and write back to RDF data sources. +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_survey.html|title|TechnicaLee Speaking: Using RDF on the Web: A Survey +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|creationDate|2019-03-04 +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|tag|http://www.semanlink.net/tag/moyen_age +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|tag|http://www.semanlink.net/tag/histoire_de_france +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|tag|http://www.semanlink.net/tag/danemark +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|tag|http://www.semanlink.net/tag/reine +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|tag|http://www.semanlink.net/tag/sexe +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|title|Ingeburge de Danemark +https://fr.wikipedia.org/wiki/Ingeburge_de_Danemark#Biographie|creationTime|2019-03-04T08:41:50Z +http://www.wired.com/2015/01/architecture-and-vision-warkawater/|creationDate|2015-01-11 +http://www.wired.com/2015/01/architecture-and-vision-warkawater/|tag|http://www.semanlink.net/tag/eau +http://www.wired.com/2015/01/architecture-and-vision-warkawater/|tag|http://www.semanlink.net/tag/ethiopie +http://www.wired.com/2015/01/architecture-and-vision-warkawater/|title|A Bamboo Tower That Produces Water From Air WIRED +http://www.wired.com/2015/01/architecture-and-vision-warkawater/|creationTime|2015-01-11T01:20:29Z +http://www.artfact-online.fr/blog/blog-post/6|creationDate|2017-05-18 +http://www.artfact-online.fr/blog/blog-post/6|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://www.artfact-online.fr/blog/blog-post/6|tag|http://www.semanlink.net/tag/text_similarity +http://www.artfact-online.fr/blog/blog-post/6|tag|http://www.semanlink.net/tag/good +http://www.artfact-online.fr/blog/blog-post/6|tag|http://www.semanlink.net/tag/maali_mnasri +http://www.artfact-online.fr/blog/blog-post/6|comment|"Author: Maali Mnasri (PhD @ CEA) + +First transform text units to vectors? not always (eg. sentence similarity task using lexical word alignment). But vectors are efficient to process, and benefit from existing clustering algorithms such as k-means. + +Sentence level or document level? Sentence clustering to summarise large documents. + +Thematic clustering vs Semantic clustering: depends on the similarity measure. + +Text similarity measures: + +- Cosine similarity of tf-idf (suitable to produce thematic clusters) +- Knowledge-based Measures (wordNet) (quantify semantic relatedness of words), +- Word embedings + +Examples, sample code: + +- using wordnet with NLTK, and the formula to compute sentence similarities from word similarities. +- computing similarities between docs using gensim/word2vec + + +Which clustering algorithm? + +- when we have an approximation of the clusters number, and when the similarity measure is not expensive in terms of computation time, clustering algo are suitable and fast. Sample code of k-means clustering using tf-idf vectors with scikit-learn +- Hierarchical clustering algorithms + - don't need to give the number of clusters + - but time consuming (calculate a similarity matrix for the sentences) +- for voluminous data, use an incremental clustering algorithm: sentences are processed one at a time ; each new sentence is compared to each of the already formed clusters. + + + + + +" +http://www.artfact-online.fr/blog/blog-post/6|title|Quick review on Text Clustering and Text Similarity Approaches +http://www.artfact-online.fr/blog/blog-post/6|creationTime|2017-05-18T01:31:31Z +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|creationDate|2018-07-07 +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|tag|http://www.semanlink.net/tag/class_based_language_models +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|tag|http://www.semanlink.net/tag/deep_nlp +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|tag|http://www.semanlink.net/tag/language_model +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|tag|http://www.semanlink.net/tag/alex_allauzen +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|tag|http://www.semanlink.net/tag/slides +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|comment|"- Neural Nets : Basics + - Introduction to multi-layered neural network + - Optimization via back-propagation + - Regularization and Dropout + - The vanishing gradient issue +- Advanced Architectures with NLP applications + - n-gram language model + - Neural Machine Translation (Overview) + - Character based model for sequence tagging" +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|title|Deep learning : background and application to natural language processing +https://perso.limsi.fr/Individu/allauzen/doc/aa_deep_nlp.pdf|creationTime|2018-07-07T14:36:25Z +https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts|creationDate|2017-06-07 +https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts|tag|http://www.semanlink.net/tag/biterm_topic_model +https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts|tag|http://www.semanlink.net/tag/topic_modeling_over_short_texts +https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts|title|nlp - What's the disadvantage of LDA for short texts? - Stack Overflow +https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts|creationTime|2017-06-07T18:45:34Z +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|creationDate|2015-01-30 +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/content_negotiation +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/http_cache +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/good +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/rest +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|tag|http://www.semanlink.net/tag/api +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|comment|"we have to stop building APIs
+Never ask for an API. Ask for new representations on your existing URLs. " +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|title|The lie of the API Ruben Verborgh +http://ruben.verborgh.org/blog/2013/11/29/the-lie-of-the-api/|creationTime|2015-01-30T23:10:42Z +https://deepmind.com/blog/learning-to-generate-images/|creationDate|2018-03-28 +https://deepmind.com/blog/learning-to-generate-images/|tag|http://www.semanlink.net/tag/reinforcement_learning +https://deepmind.com/blog/learning-to-generate-images/|tag|http://www.semanlink.net/tag/google_deepmind +https://deepmind.com/blog/learning-to-generate-images/|comment|This ability to interpret objects through the tools that created them gives us a richer understanding of the world and is an important aspect of our intelligence. +https://deepmind.com/blog/learning-to-generate-images/|title|Learning to write programs that generate images DeepMind +https://deepmind.com/blog/learning-to-generate-images/|creationTime|2018-03-28T12:11:42Z +http://nlp.stanford.edu/software/|creationDate|2013-07-12 +http://nlp.stanford.edu/software/|tag|http://www.semanlink.net/tag/stanford +http://nlp.stanford.edu/software/|tag|http://www.semanlink.net/tag/nlp +http://nlp.stanford.edu/software/|title|The Stanford NLP (Natural Language Processing) Group +http://nlp.stanford.edu/software/|creationTime|2013-07-12T10:59:05Z +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|creationDate|2012-04-11 +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/workshop +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/www_2012 +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/e_learning +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|tag|http://www.semanlink.net/tag/education +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|title|Emerging Web Technologies, Facing the Future of Education — EducTice +http://eductice.ens-lyon.fr/EducTice/ressources/journees-scientifiques/EWFE2012/|creationTime|2012-04-11T00:52:50Z +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|creationDate|2017-06-19 +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|tag|http://www.semanlink.net/tag/sparse_dictionary_learning +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|tag|http://www.semanlink.net/tag/computational_neuroscience +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|title|Computational Neuroscience: Why does the brain adopt mechanisms like sparse coding to represent the environment? - Quora +https://www.quora.com/Computational-Neuroscience-Why-does-the-brain-adopt-mechanisms-like-sparse-coding-to-represent-the-environment|creationTime|2017-06-19T15:09:37Z +http://www.presentations2go.eu/|creationDate|2014-11-17 +http://www.presentations2go.eu/|title|Presentations 2Go™ Lecture Capture & Webcasting +http://www.presentations2go.eu/|creationTime|2014-11-17T21:18:02Z +http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html|creationDate|2018-01-08 +http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html|tag|http://www.semanlink.net/tag/feynman +http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html|tag|http://www.semanlink.net/tag/physique +http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html|title|'The Character of Physical Law': Richard Feynman's Legendary Course Presented at Cornell, 1964 Open Culture +http://www.openculture.com/2012/08/the_character_of_physical_law_richard_feynmans_legendary_lecture_series_at_cornell_1964.html|creationTime|2018-01-08T23:16:54Z +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|creationDate|2005-10-28 +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|tag|http://www.semanlink.net/tag/bioinformatics +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|tag|http://www.semanlink.net/tag/rdf +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|tag|http://www.semanlink.net/tag/bill_de_hora +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|title|Bill de hÓra: MetaGraph: Domain knowledge v RDF +http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html|seeAlso|http://www.metagraph.org/index.html +https://arxiv.org/abs/1902.11269|creationDate|2019-03-02 +https://arxiv.org/abs/1902.11269|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1902.11269|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://arxiv.org/abs/1902.11269|tag|http://www.semanlink.net/tag/elmo +https://arxiv.org/abs/1902.11269|arxiv_author|Patrick H. Chen +https://arxiv.org/abs/1902.11269|arxiv_author|Cho-Jui Hsieh +https://arxiv.org/abs/1902.11269|arxiv_author|Liunian Harold Li +https://arxiv.org/abs/1902.11269|arxiv_author|Kai-Wei Chang +https://arxiv.org/abs/1902.11269|comment|"**how to accelerate contextual representation learning**. + +> Contextual representation models are difficult to train due to the large parameter sizes and high computational complexity + +> We find that the softmax layer (the output layer) causes significant inefficiency due to the large vocabulary size. +Therefore, we redesign the learning objectiv. +> Specifically, the proposed approach bypasses the softmax layer by performing language modeling with dimension reduction, and allows the models to leverage pre-trained word embeddings. +Our framework reduces the time spent on the output layer to a negligible level, eliminates almost all the trainable parameters of the softmax layer and performs language modeling without truncating the vocabulary. +When applied to ELMo, our method achieves a 4 times speedup and eliminates 80% trainable parameters while achieving competitive performance on downstream tasks. + +**decouples learning contexts and words** + +> Instead of using +a softmax layer to predict the distribution of the +missing word, we utilize and extend the SEMFIT +layer (Kumar and Tsvetkov, 2018) to **predict the +embedding of the missing word**." +https://arxiv.org/abs/1902.11269|title|[1902.11269] Efficient Contextual Representation Learning Without Softmax Layer +https://arxiv.org/abs/1902.11269|creationTime|2019-03-02T08:47:19Z +https://arxiv.org/abs/1902.11269|arxiv_summary|"Contextual representation models have achieved great success in improving +various downstream tasks. However, these language-model-based encoders are +difficult to train due to the large parameter sizes and high computational +complexity. By carefully examining the training procedure, we find that the +softmax layer (the output layer) causes significant inefficiency due to the +large vocabulary size. Therefore, we redesign the learning objective and +propose an efficient framework for training contextual representation models. +Specifically, the proposed approach bypasses the softmax layer by performing +language modeling with dimension reduction, and allows the models to leverage +pre-trained word embeddings. Our framework reduces the time spent on the output +layer to a negligible level, eliminates almost all the trainable parameters of +the softmax layer and performs language modeling without truncating the +vocabulary. When applied to ELMo, our method achieves a 4 times speedup and +eliminates 80% trainable parameters while achieving competitive performance on +downstream tasks." +https://arxiv.org/abs/1902.11269|arxiv_firstAuthor|Liunian Harold Li +https://arxiv.org/abs/1902.11269|arxiv_updated|2019-02-28T18:19:14Z +https://arxiv.org/abs/1902.11269|arxiv_title|Efficient Contextual Representation Learning Without Softmax Layer +https://arxiv.org/abs/1902.11269|arxiv_published|2019-02-28T18:19:14Z +https://arxiv.org/abs/1902.11269|arxiv_num|1902.11269 +http://news.bbc.co.uk/2/hi/science/nature/8043397.stm|creationDate|2009-05-12 +http://news.bbc.co.uk/2/hi/science/nature/8043397.stm|tag|http://www.semanlink.net/tag/smart_energy_grids +http://news.bbc.co.uk/2/hi/science/nature/8043397.stm|title|BBC NEWS 'Distributed power' to save Earth +http://news.bbc.co.uk/2/hi/science/nature/8043397.stm|creationTime|2009-05-12T00:14:34Z +http://news.bbc.co.uk/2/hi/science/nature/8043397.stm|source|BBC +http://www.paulgraham.com/mac.html|creationDate|2006-01-12 +http://www.paulgraham.com/mac.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/mac.html|tag|http://www.semanlink.net/tag/mac_os_x +http://www.paulgraham.com/mac.html|tag|http://www.semanlink.net/tag/hackers +http://www.paulgraham.com/mac.html|comment|"All the best hackers +I know are gradually switching to Macs. My +friend Robert said his whole research group at MIT recently bought +themselves Powerbooks. These guys are not the graphic designers +and grandmas who were buying Macs at Apple's low point in the +mid 1990s. They're about as hardcore OS hackers as you can get.The reason, of course, is OS X. Powerbooks are beautifully designed +and run FreeBSD. What more do you need to know?" +http://www.paulgraham.com/mac.html|title|Return of the Mac +https://www.html5rocks.com/en/tutorials/file/dndfiles/|creationDate|2019-04-23 +https://www.html5rocks.com/en/tutorials/file/dndfiles/|tag|http://www.semanlink.net/tag/javascript +https://www.html5rocks.com/en/tutorials/file/dndfiles/|tag|http://www.semanlink.net/tag/html5 +https://www.html5rocks.com/en/tutorials/file/dndfiles/|tag|http://www.semanlink.net/tag/drag_and_drop +https://www.html5rocks.com/en/tutorials/file/dndfiles/|comment|"HTML5 provides a standard way to interact with local files, via the File +API specification" +https://www.html5rocks.com/en/tutorials/file/dndfiles/|title|Reading local files in JavaScript - HTML5 Rocks +https://www.html5rocks.com/en/tutorials/file/dndfiles/|creationTime|2019-04-23T14:58:02Z +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|creationDate|2008-05-08 +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|tag|http://www.semanlink.net/tag/fps_and_ldow2008 +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|tag|http://www.semanlink.net/tag/slides_fps +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|comment|Slides of my talk at LDOW2008 +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|title|Linking Enterprise Data (slides) +http://events.linkeddata.org/ldow2008/slides/Servant-ldow2008-slides.pdf|creationTime|2008-05-08T14:30:09Z +http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves|creationDate|2010-05-14 +http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves|comment|Physicist Ranjit Pati of Michigan Tech: “We have mimicked how neurons behave in the brain.” +http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves|title|“Like Neurons in the Brain”: A Molecular Computer That Evolves h+ Magazine +http://hplusmagazine.com/articles/ai/%E2%80%9C-neurons-brain%E2%80%9D-molecular-computer-evolves|creationTime|2010-05-14T21:40:53Z +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|creationDate|2018-05-29 +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|tag|http://www.semanlink.net/tag/dans_deep_averaging_neural_networks +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|tag|http://www.semanlink.net/tag/neural_bag_of_words +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|tag|http://www.semanlink.net/tag/word_embedding_compositionality +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|comment|"> Many existing deep learning models for natural language processing tasks focus on learning the compositionality of their inputs, which requires many expensive computations. We present a simple deep neural network that competes with and, in some cases, outperforms such models on sentiment analysis and factoid question answering tasks while taking only a fraction of the training time. While our model is syntactically-ignorant, we show significant improvements over previous bag-of-words models by deepening our network and applying a novel variant of dropout + + +" +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|title|Deep Unordered Composition Rivals Syntactic Methods for Text Classification (2015) +http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf|creationTime|2018-05-29T16:52:50Z +http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092|creationDate|2008-12-27 +http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092|tag|http://www.semanlink.net/tag/petrole +http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092|title|Vidéos/DVD L'ÉPOPÉE DE L'OR NOIR - Auteur-réalisateur Jean-Pierre Beaurenaut et Yves Billon +http://www.harmattan.fr/index.asp?navig=catalogue&obj=video&no=1092|creationTime|2008-12-27T20:17:03Z +https://spacy.io/|creationDate|2017-06-29 +https://spacy.io/|tag|http://www.semanlink.net/tag/spacy +https://spacy.io/|title|spaCy - Industrial-strength Natural Language Processing in Python +https://spacy.io/|creationTime|2017-06-29T18:32:28Z +http://docs.scipy.org/doc/numpy/user/whatisnumpy.html|creationDate|2015-10-19 +http://docs.scipy.org/doc/numpy/user/whatisnumpy.html|tag|http://www.semanlink.net/tag/numpy +http://docs.scipy.org/doc/numpy/user/whatisnumpy.html|title|What is NumPy? +http://docs.scipy.org/doc/numpy/user/whatisnumpy.html|creationTime|2015-10-19T12:18:52Z +https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f|creationDate|2018-07-22 +https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f|tag|http://www.semanlink.net/tag/javascript +https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f|comment|javascript fatigue +https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f|title|How it feels to learn JavaScript in 2016 – Hacker Noon +https://hackernoon.com/how-it-feels-to-learn-javascript-in-2016-d3a717dd577f|creationTime|2018-07-22T11:53:49Z +https://www.horsesforsources.com/gartner_fail_automation-AI_080418|creationDate|2018-04-09 +https://www.horsesforsources.com/gartner_fail_automation-AI_080418|tag|http://www.semanlink.net/tag/jobbotization +https://www.horsesforsources.com/gartner_fail_automation-AI_080418|tag|http://www.semanlink.net/tag/gartner +https://www.horsesforsources.com/gartner_fail_automation-AI_080418|title|Gartner fails spectacularly with its 180 degree flip on the impact of AI Automation on jobs - Horses for Sources +https://www.horsesforsources.com/gartner_fail_automation-AI_080418|creationTime|2018-04-09T21:59:08Z +http://www.programmableweb.com/news/cognitive-computing-makes-it-possible-to-build-truly-amazing-apps/analysis/2014/09/05?utm_content=buffer3c400&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2014-09-09 +http://www.programmableweb.com/news/cognitive-computing-makes-it-possible-to-build-truly-amazing-apps/analysis/2014/09/05?utm_content=buffer3c400&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/cognitive_computing +http://www.programmableweb.com/news/cognitive-computing-makes-it-possible-to-build-truly-amazing-apps/analysis/2014/09/05?utm_content=buffer3c400&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Cognitive Computing Makes It Possible to Build Truly Amazing Apps ProgrammableWeb +http://www.programmableweb.com/news/cognitive-computing-makes-it-possible-to-build-truly-amazing-apps/analysis/2014/09/05?utm_content=buffer3c400&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2014-09-09T10:41:19Z +http://www.xml.com/pub/a/2007/04/04/introducing-rdfa-part-two.html|creationDate|2007-07-02 +http://www.xml.com/pub/a/2007/04/04/introducing-rdfa-part-two.html|tag|http://www.semanlink.net/tag/rdfa +http://www.xml.com/pub/a/2007/04/04/introducing-rdfa-part-two.html|title|XML.com: Introducing RDFa, Part Two +http://www.xml.com/pub/a/2007/04/04/introducing-rdfa-part-two.html|creationTime|2007-07-02T22:39:08Z +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|creationDate|2018-03-26 +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|tag|http://www.semanlink.net/tag/erreur_judiciaire +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|tag|http://www.semanlink.net/tag/poesie +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|tag|http://www.semanlink.net/tag/souvenirs +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|comment|"Lorsque Maillart, juge d’enfer, menait + +A Montfaucon Semblançay l’âme rendre, + +A votre avis, lequel des deux tenait + +Meilleur maintien ? Pour le vous faire entendre, + +Maillart sembla l’homme que mort va prendre, + +Et Semblançay fut si ferme vieillard + +Que l’on cuidait pour vray qu’il menast pendre + +A Montfaucon le lieutenant Maillart. + +(Clément Marot) + + + + + +" +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|title|La condamnation de Semblançay : une erreur judiciaire ? autourdemesromans.com +http://autourdemesromans.com/lexecution-de-semblancay-une-erreur-judiciaire/|creationTime|2018-03-26T09:34:09Z +http://blog.sgo.to/2013/10/minha-entrevista-no-google.html|creationDate|2014-10-29 +http://blog.sgo.to/2013/10/minha-entrevista-no-google.html|tag|http://www.semanlink.net/tag/google +http://blog.sgo.to/2013/10/minha-entrevista-no-google.html|tag|http://www.semanlink.net/tag/samuel_goto +http://blog.sgo.to/2013/10/minha-entrevista-no-google.html|title|Hello World: Minha Entrevista no Google +http://blog.sgo.to/2013/10/minha-entrevista-no-google.html|creationTime|2014-10-29T01:47:44Z +http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter|creationDate|2014-09-10 +http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter|tag|http://www.semanlink.net/tag/antibiotiques +http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter|tag|http://www.semanlink.net/tag/bacteries +http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter|title|"""Immortal"" flatworms : a weapon against bacteria - CNRS Web site - CNRS" +http://www2.cnrs.fr/en/2447.htm?utm_source=twitterfeed&utm_medium=twitter|creationTime|2014-09-10T23:00:45Z +https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground|creationDate|2016-07-27 +https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground|tag|http://www.semanlink.net/tag/tensorflow +https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground|tag|http://www.semanlink.net/tag/artificial_neural_network +https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground|title|Understanding neural networks with TensorFlow Playground Google Cloud Big Data and Machine Learning Blog    Google Cloud Platform +https://cloud.google.com/blog/big-data/2016/07/understanding-neural-networks-with-tensorflow-playground|creationTime|2016-07-27T10:05:31Z +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|creationDate|2018-11-02 +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|tag|http://www.semanlink.net/tag/nlp_google +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|tag|http://www.semanlink.net/tag/sgnn +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|comment|[same paper](https://aclweb.org/anthology/papers/D/D18/D18-1092/) +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|title|Self-Governing Neural Networks for On-Device Short Text Classification - Sujith Ravi Zornitsa Kozareva (2018) +https://aclanthology.coli.uni-saarland.de/papers/D18-1092/d18-1092|creationTime|2018-11-02T23:20:31Z +https://en.wikipedia.org/wiki/Headhunters_(film)|creationDate|2017-10-07 +https://en.wikipedia.org/wiki/Headhunters_(film)|tag|http://www.semanlink.net/tag/thriller +https://en.wikipedia.org/wiki/Headhunters_(film)|title|Headhunters (film) +https://en.wikipedia.org/wiki/Headhunters_(film)|creationTime|2017-10-07T01:13:54Z +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|creationDate|2015-10-20 +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|tag|http://www.semanlink.net/tag/email_classification +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|tag|http://www.semanlink.net/tag/dimensionality_reduction +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|tag|http://www.semanlink.net/tag/feature_extraction +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|comment|"comparison of several feature selection and extraction methods in the frame of email classification +" +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|title|Highly discriminative statistical features for email classification +http://www.researchgate.net/publication/226638768_Highly_discriminative_statistical_features_for_email_classification|creationTime|2015-10-20T11:30:52Z +http://www.mkbergman.com/?p=354|creationDate|2007-04-04 +http://www.mkbergman.com/?p=354|tag|http://www.semanlink.net/tag/dbpedia +http://www.mkbergman.com/?p=354|title|Did You Blink? The Structured Web Just Arrived +http://www.mkbergman.com/?p=354|creationTime|2007-04-04T22:44:23Z +http://ccil.org/~cowan/XML/tagsoup/|creationDate|2008-05-08 +http://ccil.org/~cowan/XML/tagsoup/|tag|http://www.semanlink.net/tag/html_parsing +http://ccil.org/~cowan/XML/tagsoup/|tag|http://www.semanlink.net/tag/java_dev +http://ccil.org/~cowan/XML/tagsoup/|comment|a SAX-compliant parser written in Java that, instead of parsing well-formed or valid XML, parses HTML as it is found in the wild +http://ccil.org/~cowan/XML/tagsoup/|title|TagSoup home page +http://ccil.org/~cowan/XML/tagsoup/|creationTime|2008-05-08T02:54:45Z +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationDate|2017-11-27 +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/similarity_queries +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|title|Similarity Searches: Flies categorize odors using a variant of locality-sensitive hashing, inspire new algorithm +http://science.sciencemag.org/content/358/6364/793?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationTime|2017-11-27T17:22:01Z +http://arxiv.org/abs/0811.3701|creationDate|2009-01-20 +http://arxiv.org/abs/0811.3701|tag|http://www.semanlink.net/tag/hypothese_de_riemann +http://arxiv.org/abs/0811.3701|tag|http://www.semanlink.net/tag/jean_paul +http://arxiv.org/abs/0811.3701|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/0811.3701|arxiv_author|Jean-Paul Cardinal +http://arxiv.org/abs/0811.3701|comment|"> In this paper we explore a family of congruences over N* from which a sequence of symmetric matrices related to the Mertens function is built. From the results of numerical experiments we formulate a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important role in this classical and difficult problem. +" +http://arxiv.org/abs/0811.3701|title|[0811.3701] Symmetric matrices related to the Mertens function +http://arxiv.org/abs/0811.3701|creationTime|2009-01-20T21:56:47Z +http://arxiv.org/abs/0811.3701|arxiv_summary|"In this paper we explore a family of congruences over $\N^\ast$ from which +one builds a sequence of symmetric matrices related to the Mertens function. +From the results of numerical experiments, we formulate a conjecture about +the growth of the quadratic norm of these matrices, which implies the Riemann +hypothesis. This suggests that matrix analysis methods may come to play a more +important role in this classical and difficult problem." +http://arxiv.org/abs/0811.3701|arxiv_firstAuthor|Jean-Paul Cardinal +http://arxiv.org/abs/0811.3701|arxiv_updated|2009-03-09T11:28:48Z +http://arxiv.org/abs/0811.3701|arxiv_title|Symmetric matrices related to the Mertens function +http://arxiv.org/abs/0811.3701|arxiv_published|2008-11-22T17:22:06Z +http://arxiv.org/abs/0811.3701|arxiv_num|0811.3701 +http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html|creationDate|2012-12-06 +http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html|tag|http://www.semanlink.net/tag/decisions_en_entreprise +http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html|tag|http://www.semanlink.net/tag/dsi +http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html|title|Les DSI déplorent un manque de culture numérique des Comités de direction - Global Security Mag Online +http://www.globalsecuritymag.fr/Les-DSI-deplorent-un-manque-de,20121106,33532.html|creationTime|2012-12-06T23:44:04Z +http://fr.wikipedia.org/wiki/Plein_Soleil|creationDate|2013-09-22 +http://fr.wikipedia.org/wiki/Plein_Soleil|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Plein_Soleil|tag|http://www.semanlink.net/tag/patricia_highsmith +http://fr.wikipedia.org/wiki/Plein_Soleil|tag|http://www.semanlink.net/tag/delon +http://fr.wikipedia.org/wiki/Plein_Soleil|comment|"film français de René Clément avec Alain Delon. D'après ""The Talented Mr. Ripley"" de Patricia Highsmith" +http://fr.wikipedia.org/wiki/Plein_Soleil|title|Plein Soleil +http://fr.wikipedia.org/wiki/Plein_Soleil|creationTime|2013-09-22T22:38:16Z +http://hublog.hubmed.org/archives/001049.html|creationDate|2005-09-25 +http://hublog.hubmed.org/archives/001049.html|tag|http://www.semanlink.net/tag/touchgraph +http://hublog.hubmed.org/archives/001049.html|tag|http://www.semanlink.net/tag/del_icio_us +http://hublog.hubmed.org/archives/001049.html|title|HubLog: Graph del.icio.us related tags +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|creationDate|2013-09-23 +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|tag|http://www.semanlink.net/tag/tutorial +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|tag|http://www.semanlink.net/tag/eclipse +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|tag|http://www.semanlink.net/tag/git +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|tag|http://www.semanlink.net/tag/egit +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|title|EGit/Git For Eclipse Users - Eclipsepedia +http://wiki.eclipse.org/EGit/Git_For_Eclipse_Users|creationTime|2013-09-23T11:47:04Z +https://gist.github.com/andrewtkemp1/fa8f28e867e17559b931c3f6de9a4b9e|creationDate|2019-03-15 +https://gist.github.com/andrewtkemp1/fa8f28e867e17559b931c3f6de9a4b9e|tag|http://www.semanlink.net/tag/github +https://gist.github.com/andrewtkemp1/fa8f28e867e17559b931c3f6de9a4b9e|title|This is a very basic guide on how to start working on group projects at DevMountain with GitHub. · GitHub +https://gist.github.com/andrewtkemp1/fa8f28e867e17559b931c3f6de9a4b9e|creationTime|2019-03-15T00:56:52Z +https://en.wikipedia.org/wiki/Paper_Moon_(film)|creationDate|2017-09-25 +https://en.wikipedia.org/wiki/Paper_Moon_(film)|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Paper_Moon_(film)|tag|http://www.semanlink.net/tag/comedie +https://en.wikipedia.org/wiki/Paper_Moon_(film)|comment|"Road movie durant la grande dépression ; un arnaqueur (Ryan O'Neal) vendeur de bibles et une petite fille + +" +https://en.wikipedia.org/wiki/Paper_Moon_(film)|title|Paper Moon +https://en.wikipedia.org/wiki/Paper_Moon_(film)|creationTime|2017-09-25T22:34:37Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|creationDate|2005-10-10 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|tag|http://www.semanlink.net/tag/grand_challenge +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|tag|http://www.semanlink.net/tag/volkswagen +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|tag|http://www.semanlink.net/tag/stanford +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|comment|"A team from Stanford University's School of Engineering has made motoring history, winning a $2m (£1.14m) prize in the process.
+Its car, a Volkswagen Touareg nicknamed ""Stanley"", has become the first self-navigating vehicle to successfully complete the gruelling 131.6 mile (211km) cross-country Darpa Grand Challenge, a race for autonomous robot vehicles held in Nevada's Mojave desert. +" +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4322542.stm|title|BBC NEWS Technology Robotic racers achieve milestone +http://hyperfp.noosblog.fr/|creationDate|2006-01-14 +http://hyperfp.noosblog.fr/|tag|http://www.semanlink.net/tag/fps_blog +http://hyperfp.noosblog.fr/|tag|http://www.semanlink.net/tag/noos +http://hyperfp.noosblog.fr/|title|Mon weblog chez Noos +http://www.w3.org/DesignIssues/NoSnooping.html|creationDate|2011-03-08 +http://www.w3.org/DesignIssues/NoSnooping.html|tag|http://www.semanlink.net/tag/publicite_internet +http://www.w3.org/DesignIssues/NoSnooping.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/NoSnooping.html|comment|The act of reading, like the act of writing, is a pure, fundamendal, human act. It must be available without interference or spying. +http://www.w3.org/DesignIssues/NoSnooping.html|title|No Snooping +http://www.w3.org/DesignIssues/NoSnooping.html|creationTime|2011-03-08T09:41:28Z +https://developers.google.com/gmail/schemas/|creationDate|2013-06-13 +https://developers.google.com/gmail/schemas/|tag|http://www.semanlink.net/tag/gmail +https://developers.google.com/gmail/schemas/|tag|http://www.semanlink.net/tag/schema_org +https://developers.google.com/gmail/schemas/|title|Schemas in Gmail — Google Developers +https://developers.google.com/gmail/schemas/|creationTime|2013-06-13T16:20:35Z +http://www.nytimes.com/2015/12/04/business/dealbook/how-mark-zuckerbergs-altruism-helps-himself.html|creationDate|2015-12-04 +http://www.nytimes.com/2015/12/04/business/dealbook/how-mark-zuckerbergs-altruism-helps-himself.html|tag|http://www.semanlink.net/tag/mark_zuckerberg +http://www.nytimes.com/2015/12/04/business/dealbook/how-mark-zuckerbergs-altruism-helps-himself.html|title|How Mark Zuckerberg’s Altruism Helps Himself - The New York Times +http://www.nytimes.com/2015/12/04/business/dealbook/how-mark-zuckerbergs-altruism-helps-himself.html|creationTime|2015-12-04T00:16:36Z +http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines|creationDate|2017-01-30 +http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines|tag|http://www.semanlink.net/tag/darpa +http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines|title|The merging of humans and machines is happening now WIRED UK +http://www.wired.co.uk/article/darpa-arati-prabhakar-humans-machines|creationTime|2017-01-30T11:43:29Z +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|creationDate|2018-11-06 +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|tag|http://www.semanlink.net/tag/self_attention +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|tag|http://www.semanlink.net/tag/lilian_weng +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|tag|http://www.semanlink.net/tag/deep_learning_attention +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|title|Attention? Attention! +https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html|creationTime|2018-11-06T19:56:25Z +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|creationDate|2015-10-16 +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/good +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/deep_learning +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/word_embedding +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/t_sne +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/christopher_olah +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|tag|http://www.semanlink.net/tag/nlp +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|title|Deep Learning, NLP, and Representations - colah's blog +http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/|creationTime|2015-10-16T11:40:15Z +http://www.sterpin.net/imacslotin.htm|creationDate|2006-04-08 +http://www.sterpin.net/imacslotin.htm|tag|http://www.semanlink.net/tag/imac +http://www.sterpin.net/imacslotin.htm|tag|http://www.semanlink.net/tag/bricolage_mac +http://www.sterpin.net/imacslotin.htm|tag|http://www.semanlink.net/tag/howto +http://www.sterpin.net/imacslotin.htm|comment|Comment remplacer le disque dur d'un imac slot-in ou son lecteur optique (et éventuellement la pile de sauvegarde et les barrettes mémoire) +http://www.sterpin.net/imacslotin.htm|title|Démontage iMac Remplacer le disque dur d'un imac slot-in ou son lecteur optique (et éventuellement la pile de sauvegarde et les barrettes mémoire) +https://www.tensorflow.org/tutorials/word2vec|creationDate|2017-08-28 +https://www.tensorflow.org/tutorials/word2vec|tag|http://www.semanlink.net/tag/word2vec +https://www.tensorflow.org/tutorials/word2vec|tag|http://www.semanlink.net/tag/word_embedding +https://www.tensorflow.org/tutorials/word2vec|tag|http://www.semanlink.net/tag/tutorial +https://www.tensorflow.org/tutorials/word2vec|tag|http://www.semanlink.net/tag/tensorflow +https://www.tensorflow.org/tutorials/word2vec|title|Vector Representations of Words    TensorFlow +https://www.tensorflow.org/tutorials/word2vec|creationTime|2017-08-28T15:41:07Z +http://www.nytimes.com/2015/01/02/business/energy-environment/a-gray-area-in-regulation-of-genetically-modified-crops.html?partner=rss&emc=rss&_r=0|creationDate|2015-01-02 +http://www.nytimes.com/2015/01/02/business/energy-environment/a-gray-area-in-regulation-of-genetically-modified-crops.html?partner=rss&emc=rss&_r=0|tag|http://www.semanlink.net/tag/ogm +http://www.nytimes.com/2015/01/02/business/energy-environment/a-gray-area-in-regulation-of-genetically-modified-crops.html?partner=rss&emc=rss&_r=0|title|Scientists Alter Crops With Techniques Outside Regulators’ Scope - NYTimes.com +http://www.nytimes.com/2015/01/02/business/energy-environment/a-gray-area-in-regulation-of-genetically-modified-crops.html?partner=rss&emc=rss&_r=0|creationTime|2015-01-02T12:28:46Z +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|creationDate|2008-05-08 +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|tag|http://www.semanlink.net/tag/semantic_enterprise +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|tag|http://www.semanlink.net/tag/foaf +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|title|foaf enabling an enterprise +http://blogs.sun.com/bblfish/entry/foaf_enabling_an_enterprise|creationTime|2008-05-08T14:00:38Z +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|creationDate|2011-09-02 +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|tag|http://www.semanlink.net/tag/ecole +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|tag|http://www.semanlink.net/tag/education +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|title|Contre l'idéologie de la compétence, l'éducation doit apprendre à penser +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|creationTime|2011-09-02T22:40:15Z +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2011/09/02/contre-l-ideologie-de-la-competence-l-education-doit-apprendre-a-penser_1566841_3232.html|date|2011-09-03 +https://github.com/RubenVerborgh/N3.js|creationDate|2013-12-04 +https://github.com/RubenVerborgh/N3.js|tag|http://www.semanlink.net/tag/javascript_rdf +https://github.com/RubenVerborgh/N3.js|tag|http://www.semanlink.net/tag/turtle +https://github.com/RubenVerborgh/N3.js|tag|http://www.semanlink.net/tag/ruben_verborgh +https://github.com/RubenVerborgh/N3.js|comment|"""Lightning fast, asynchronous, streaming Turtle for JavaScript""" +https://github.com/RubenVerborgh/N3.js|title|N3.js +https://github.com/RubenVerborgh/N3.js|creationTime|2013-12-04T13:58:45Z +https://www.kaggle.com/ostegm/plotting-similar-patents|creationDate|2019-02-19 +https://www.kaggle.com/ostegm/plotting-similar-patents|tag|http://www.semanlink.net/tag/python +https://www.kaggle.com/ostegm/plotting-similar-patents|tag|http://www.semanlink.net/tag/embeddings +https://www.kaggle.com/ostegm/plotting-similar-patents|tag|http://www.semanlink.net/tag/google_patents +https://www.kaggle.com/ostegm/plotting-similar-patents|comment|"Kaggle kernel to get started using the **patent embeddings** in Python +" +https://www.kaggle.com/ostegm/plotting-similar-patents|title|Plotting Similar Patents Kaggle +https://www.kaggle.com/ostegm/plotting-similar-patents|creationTime|2019-02-19T17:45:21Z +https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/|creationDate|2019-04-22 +https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/|tag|http://www.semanlink.net/tag/chene +https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/|tag|http://www.semanlink.net/tag/arbres_remarquables +https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/|title|Le Gros Chêne, près de La Loupe, a perdu une branche (27/10/2016) +https://www.lechorepublicain.fr/nogent-le-rotrou-28400/faits-divers/le-gros-chene-pres-de-la-loupe-a-perdu-une-branche_12130843/|creationTime|2019-04-22T22:50:44Z +https://blog.openai.com/better-language-models/|creationDate|2019-02-14 +https://blog.openai.com/better-language-models/|tag|http://www.semanlink.net/tag/language_model +https://blog.openai.com/better-language-models/|tag|http://www.semanlink.net/tag/openai_gpt +https://blog.openai.com/better-language-models/|tag|http://www.semanlink.net/tag/pre_trained_language_models +https://blog.openai.com/better-language-models/|tag|http://www.semanlink.net/tag/openai +https://blog.openai.com/better-language-models/|comment|"Our model, called GPT-2 (a successor to GPT), was trained simply to predict the next word in 40GB of Internet text. Due to our concerns about malicious applications of the technology, we are not releasing the trained model +" +https://blog.openai.com/better-language-models/|title|Better Language Models and Their Implications +https://blog.openai.com/better-language-models/|creationTime|2019-02-14T20:50:48Z +https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/|creationDate|2017-11-23 +https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/|tag|http://www.semanlink.net/tag/convolutional_neural_network +https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/|tag|http://www.semanlink.net/tag/good +https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/|title|An Intuitive Explanation of Convolutional Neural Networks – the data science blog +https://ujjwalkarn.me/2016/08/11/intuitive-explanation-convnets/|creationTime|2017-11-23T01:44:25Z +http://www.aaronsw.com/weblog/nummi|creationDate|2013-01-13 +http://www.aaronsw.com/weblog/nummi|tag|http://www.semanlink.net/tag/toyota +http://www.aaronsw.com/weblog/nummi|tag|http://www.semanlink.net/tag/aaron_swartz +http://www.aaronsw.com/weblog/nummi|tag|http://www.semanlink.net/tag/general_motors +http://www.aaronsw.com/weblog/nummi|comment|No, you can’t force other people to change. You can, however, change just about everything else. And usually, that’s enough. +http://www.aaronsw.com/weblog/nummi|title|Fix the machine, not the person (Aaron Swartz's Raw Thought) +http://www.aaronsw.com/weblog/nummi|creationTime|2013-01-13T14:32:28Z +http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax|creationDate|2005-09-27 +http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax|tag|http://www.semanlink.net/tag/howto +http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax|tag|http://www.semanlink.net/tag/dev +http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax|tag|http://www.semanlink.net/tag/ajax +http://www-128.ibm.com/developerworks/library/j-ajax1/?ca=dgr-lnxw01Ajax|title|Ajax for Java developers: Build dynamic Java applications +https://arxiv.org/abs/1704.08803|creationDate|2019-01-27 +https://arxiv.org/abs/1704.08803|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1704.08803|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://arxiv.org/abs/1704.08803|tag|http://www.semanlink.net/tag/weak_supervision +https://arxiv.org/abs/1704.08803|tag|http://www.semanlink.net/tag/machine_learned_ranking +https://arxiv.org/abs/1704.08803|tag|http://www.semanlink.net/tag/okapi_bm25 +https://arxiv.org/abs/1704.08803|arxiv_author|Jaap Kamps +https://arxiv.org/abs/1704.08803|arxiv_author|Aliaksei Severyn +https://arxiv.org/abs/1704.08803|arxiv_author|Hamed Zamani +https://arxiv.org/abs/1704.08803|arxiv_author|W. Bruce Croft +https://arxiv.org/abs/1704.08803|arxiv_author|Mostafa Dehghani +https://arxiv.org/abs/1704.08803|comment|"Main Idea: To **leverage large amounts of unsupervised data to infer “weak” labels** and use that signal for learning supervised models as if we had the ground truth labels. See [blog post](/doc/?uri=http%3A%2F%2Fmostafadehghani.com%2F2017%2F04%2F23%2Fbeating-the-teacher-neural-ranking-models-with-weak-supervision%2F): + +> This is **truly awesome since we have only used BM25 as the supervisor to train a model which performs better than BM25** itself! +" +https://arxiv.org/abs/1704.08803|relatedDoc|http://mostafadehghani.com/2017/04/23/beating-the-teacher-neural-ranking-models-with-weak-supervision/ +https://arxiv.org/abs/1704.08803|title|[1704.08803] Neural Ranking Models with Weak Supervision +https://arxiv.org/abs/1704.08803|creationTime|2019-01-27T17:31:01Z +https://arxiv.org/abs/1704.08803|arxiv_summary|"Despite the impressive improvements achieved by unsupervised deep neural +networks in computer vision and NLP tasks, such improvements have not yet been +observed in ranking for information retrieval. The reason may be the complexity +of the ranking problem, as it is not obvious how to learn from queries and +documents when no supervised signal is available. Hence, in this paper, we +propose to train a neural ranking model using weak supervision, where labels +are obtained automatically without human annotators or any external resources +(e.g., click data). To this aim, we use the output of an unsupervised ranking +model, such as BM25, as a weak supervision signal. We further train a set of +simple yet effective ranking models based on feed-forward neural networks. We +study their effectiveness under various learning scenarios (point-wise and +pair-wise models) and using different input representations (i.e., from +encoding query-document pairs into dense/sparse vectors to using word embedding +representation). We train our networks using tens of millions of training +instances and evaluate it on two standard collections: a homogeneous news +collection(Robust) and a heterogeneous large-scale web collection (ClueWeb). +Our experiments indicate that employing proper objective functions and letting +the networks to learn the input representation based on weakly supervised data +leads to impressive performance, with over 13% and 35% MAP improvements over +the BM25 model on the Robust and the ClueWeb collections. Our findings also +suggest that supervised neural ranking models can greatly benefit from +pre-training on large amounts of weakly labeled data that can be easily +obtained from unsupervised IR models." +https://arxiv.org/abs/1704.08803|arxiv_firstAuthor|Mostafa Dehghani +https://arxiv.org/abs/1704.08803|arxiv_updated|2017-05-29T11:58:34Z +https://arxiv.org/abs/1704.08803|arxiv_title|Neural Ranking Models with Weak Supervision +https://arxiv.org/abs/1704.08803|arxiv_published|2017-04-28T04:08:47Z +https://arxiv.org/abs/1704.08803|arxiv_num|1704.08803 +https://github.com/kritarthanand/Disambiguation-Stanbol|creationDate|2012-08-10 +https://github.com/kritarthanand/Disambiguation-Stanbol|tag|http://www.semanlink.net/tag/apache_stanbol +https://github.com/kritarthanand/Disambiguation-Stanbol|comment|"Before Installation of Engine if you input text ""Paris is a small city in the state of Texas"". It identifies Texas as state of US and Paris as Paris, France. When the Entity disambiguation engine works on it to it correctly identifies Paris as Paris, Texas." +https://github.com/kritarthanand/Disambiguation-Stanbol|title|kritarthanand/Disambiguation-Stanbol · GitHub +https://github.com/kritarthanand/Disambiguation-Stanbol|creationTime|2012-08-10T09:16:22Z +http://logicerror.com/semanticWeb-webdev|creationDate|2005-09-28 +http://logicerror.com/semanticWeb-webdev|tag|http://www.semanlink.net/tag/semantic_web_services +http://logicerror.com/semanticWeb-webdev|tag|http://www.semanlink.net/tag/semantic_web_services_vs_soap +http://logicerror.com/semanticWeb-webdev|title|The Semantic Web (for Web Developers) +https://databricks.com/blog/2015/02/17/introducing-dataframes-in-spark-for-large-scale-data-science.html|creationDate|2015-02-19 +https://databricks.com/blog/2015/02/17/introducing-dataframes-in-spark-for-large-scale-data-science.html|tag|http://www.semanlink.net/tag/apache_spark +https://databricks.com/blog/2015/02/17/introducing-dataframes-in-spark-for-large-scale-data-science.html|title|Introducing DataFrames in Spark for Large Scale Data Science Databricks +https://databricks.com/blog/2015/02/17/introducing-dataframes-in-spark-for-large-scale-data-science.html|creationTime|2015-02-19T01:18:27Z +http://www.cems.uwe.ac.uk/~phale/|creationDate|2006-05-13 +http://www.cems.uwe.ac.uk/~phale/|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.cems.uwe.ac.uk/~phale/|tag|http://www.semanlink.net/tag/user_driven_modelling +http://www.cems.uwe.ac.uk/~phale/|comment|My current research is on a technique of 'User Driven Modelling/Programming'. My intention is to enable non-programmers to create software from a user interface that allows them to model a particular problem or scenario. +http://www.cems.uwe.ac.uk/~phale/|title|Peter Hale PhD Research - User Driven Programming +http://schema.rdfs.org/|creationDate|2011-06-07 +http://schema.rdfs.org/|tag|http://www.semanlink.net/tag/schema_org +http://schema.rdfs.org/|tag|http://www.semanlink.net/tag/linked_data +http://schema.rdfs.org/|tag|http://www.semanlink.net/tag/microdata +http://schema.rdfs.org/|comment|Effort by people from the Linked Data community to express the terms provided by the Schema.org consortium in RDF. +http://schema.rdfs.org/|title|schema.rdfs.org +http://schema.rdfs.org/|creationTime|2011-06-07T13:49:26Z +http://swui.semanticweb.org/swui06/papers/Berners-Lee/Berners-Lee.pdf|creationDate|2007-01-02 +http://swui.semanticweb.org/swui06/papers/Berners-Lee/Berners-Lee.pdf|tag|http://www.semanlink.net/tag/tabulator +http://swui.semanticweb.org/swui06/papers/Berners-Lee/Berners-Lee.pdf|title|Tabulator: Exploring and Analyzing linked data on the Semantic Web +https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd|creationDate|2018-12-04 +https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd|tag|http://www.semanlink.net/tag/variational_bayesian_methods +https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd|title|Making Your Neural Network Say “I Don’t Know” — Bayesian NNs using Pyro and PyTorch +https://towardsdatascience.com/making-your-neural-network-say-i-dont-know-bayesian-nns-using-pyro-and-pytorch-b1c24e6ab8cd|creationTime|2018-12-04T18:59:35Z +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|creationDate|2019-05-27 +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|tag|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|tag|http://www.semanlink.net/tag/optimization +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|title|A bird-eye view of optimization algorithms +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|bookmarkOf|http://fa.bianp.net/teaching/2018/eecs227at/ +http://www.semanlink.net/doc/2019/05/a_bird_eye_view_of_optimization|creationTime|2019-05-27T09:41:57Z +https://lists.w3.org/Archives/Public/public-linked-json/2012Aug/0029.html|creationDate|2017-01-05 +https://lists.w3.org/Archives/Public/public-linked-json/2012Aug/0029.html|tag|http://www.semanlink.net/tag/json_2_json_ld +https://lists.w3.org/Archives/Public/public-linked-json/2012Aug/0029.html|title|RE: DuckDuckGo data to JSON-LD? from Markus Lanthaler on 2012-08-24 (public-linked-json@w3.org from August 2012) +https://lists.w3.org/Archives/Public/public-linked-json/2012Aug/0029.html|creationTime|2017-01-05T17:08:47Z +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|creationDate|2009-10-18 +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|tag|http://www.semanlink.net/tag/banlieue +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|tag|http://www.semanlink.net/tag/violence +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|tag|http://www.semanlink.net/tag/los_angeles +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|title|Boyz n the Hood +http://en.wikipedia.org/wiki/Boyz_n_the_Hood|creationTime|2009-10-18T22:41:45Z +http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm|tag|http://www.semanlink.net/tag/aichi +http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm|tag|http://www.semanlink.net/tag/robot_humanoide +http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm|title|BBC NEWS Japanese develop 'female' android +http://news.bbc.co.uk/1/hi/sci/tech/4714135.stm|source|BBC +http://owled2007.iut-velizy.uvsq.fr/|creationDate|2007-03-04 +http://owled2007.iut-velizy.uvsq.fr/|tag|http://www.semanlink.net/tag/owled_2007 +http://owled2007.iut-velizy.uvsq.fr/|title|OWL: Experiences and Directions - OWLED 2007 +http://owled2007.iut-velizy.uvsq.fr/|creationTime|2007-03-04T17:07:23Z +https://www.quora.com/Will-capsule-networks-replace-neural-networks|creationDate|2018-04-26 +https://www.quora.com/Will-capsule-networks-replace-neural-networks|tag|http://www.semanlink.net/tag/capsule_networks +https://www.quora.com/Will-capsule-networks-replace-neural-networks|title|Will capsule networks replace neural networks? - Quora +https://www.quora.com/Will-capsule-networks-replace-neural-networks|creationTime|2018-04-26T14:21:15Z +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|creationDate|2018-10-26 +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|tag|http://www.semanlink.net/tag/meaning_in_nlp +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|tag|http://www.semanlink.net/tag/nlp +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|tag|http://www.semanlink.net/tag/grounded_language_learning +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|tag|http://www.semanlink.net/tag/nlu +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|comment|The antithesis of grounded language is inferred language. Inferred language derives meaning from words themselves rather than what they represent. When trained only on large corpuses of text, but not on real-world representations, statistical methods for NLP and NLU lack true understanding of what words mean. +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|title|4 Approaches To Natural Language Processing & Understanding +https://www.topbots.com/4-different-approaches-natural-language-processing-understanding/|creationTime|2018-10-26T00:36:36Z +http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290|creationDate|2013-09-10 +http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290|tag|http://www.semanlink.net/tag/neocortex +http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290|tag|http://www.semanlink.net/tag/darpa +http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290|title|DARPA Wants To Develop Machines That Mimic The Cerebral Neocortex - semanticweb.com +http://semanticweb.com/darpa-wants-to-develop-machines-that-mimic-the-cerebral-neocortex_b39290|creationTime|2013-09-10T01:49:53Z +http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400|creationDate|2011-07-13 +http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400|tag|http://www.semanlink.net/tag/automobile +http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400|comment|Instead of measuring e-mail leads, …, automakers should monitor page views of vehicle details +http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400|title|Online marketers should shift techniques, AutoTrader exec says +http://www.autonews.com/apps/pbcs.dll/article?AID=/20110712/RETAIL01/110719963/1400|creationTime|2011-07-13T17:57:42Z +http://hugues.blogs.com/|creationDate|2006-03-21 +http://hugues.blogs.com/|tag|http://www.semanlink.net/tag/politique +http://hugues.blogs.com/|tag|http://www.semanlink.net/tag/blog +http://hugues.blogs.com/|title|www.com-vat.com : Commentaires & vaticinations www.com-vat.com
Commentaires & vaticinations +http://www.jair.org/media/989/live-989-2063-jair.pdf|creationDate|2009-01-06 +http://www.jair.org/media/989/live-989-2063-jair.pdf|tag|http://www.semanlink.net/tag/knowledge_compilation +http://www.jair.org/media/989/live-989-2063-jair.pdf|comment|Analyses different compilation approaches according to two key dimensions: the succinctness of the target compilation language, and the class of queries and transformations that the language supports in polytime. +http://www.jair.org/media/989/live-989-2063-jair.pdf|title|A Knowledge Compilation Map - Adnan Darwiche, Pierre Marquis +http://www.jair.org/media/989/live-989-2063-jair.pdf|creationTime|2009-01-06T22:27:28Z +http://beta.kasabi.com/|creationDate|2011-07-07 +http://beta.kasabi.com/|tag|http://www.semanlink.net/tag/leigh_dodds +http://beta.kasabi.com/|tag|http://www.semanlink.net/tag/talis +http://beta.kasabi.com/|tag|http://www.semanlink.net/tag/sw_online_tools +http://beta.kasabi.com/|title|Kasabi +http://beta.kasabi.com/|creationTime|2011-07-07T15:38:34Z +http://protege.cim3.net/cgi-bin/wiki.pl?ModelingTipsAndTricks|creationDate|2006-12-23 +http://protege.cim3.net/cgi-bin/wiki.pl?ModelingTipsAndTricks|tag|http://www.semanlink.net/tag/protege +http://protege.cim3.net/cgi-bin/wiki.pl?ModelingTipsAndTricks|tag|http://www.semanlink.net/tag/dev +http://protege.cim3.net/cgi-bin/wiki.pl?ModelingTipsAndTricks|title|ProtegeWiki: Modeling Tips And Tricks +http://www.faviki.com/|creationDate|2010-01-20 +http://www.faviki.com/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.faviki.com/|tag|http://www.semanlink.net/tag/wikipedia +http://www.faviki.com/|tag|http://www.semanlink.net/tag/keep_new +http://www.faviki.com/|tag|http://www.semanlink.net/tag/social_bookmarking +http://www.faviki.com/|tag|http://www.semanlink.net/tag/dbpedia +http://www.faviki.com/|comment|Faviki - Social bookmarking tool using smart semantic Wikipedia (DBpedia) tags +http://www.faviki.com/|title|Faviki +http://www.faviki.com/|creationTime|2010-01-20T18:27:05Z +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|creationDate|2010-12-06 +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|tag|http://www.semanlink.net/tag/tim_berners_lee +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|tag|http://www.semanlink.net/tag/social_semantic_web +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|tag|http://www.semanlink.net/tag/foaf_ssl +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|tag|http://www.semanlink.net/tag/access_control +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|comment|James Hollenbach, Joe Presbrey, and Tim Berners-Lee +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|title|Using RDF Metadata To Enable Access Control on the Social Semantic Web +http://dig.csail.mit.edu/2009/Papers/ISWC/rdf-access-control/paper.pdf|creationTime|2010-12-06T16:25:03Z +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|creationDate|2009-05-11 +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|tag|http://www.semanlink.net/tag/electric_car +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|tag|http://www.semanlink.net/tag/mesh_network +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|tag|http://www.semanlink.net/tag/smart_energy_grids +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|tag|http://www.semanlink.net/tag/automobile +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|title|The Grid, Our Cars and the Net: One Idea to Link Them All Autopia +http://www.wired.com/autopia/2009/05/the-grid-our-cars-and-the-internet-one-idea-to-link-them-all/|creationTime|2009-05-11T22:45:07Z +https://gingkoapp.com/|creationDate|2015-05-12 +https://gingkoapp.com/|tag|http://www.semanlink.net/tag/jarriel_perlman +https://gingkoapp.com/|tag|http://www.semanlink.net/tag/thinking_tools +https://gingkoapp.com/|tag|http://www.semanlink.net/tag/semanlink_related +https://gingkoapp.com/|title|Gingko App +https://gingkoapp.com/|creationTime|2015-05-12T01:49:24Z +https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published|creationDate|2017-04-03 +https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published|tag|http://www.semanlink.net/tag/node_js +https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published|tag|http://www.semanlink.net/tag/tutorial +https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published|title|Tutorial : Des applications ultra-rapides avec Node.js +https://openclassrooms.com/courses/des-applications-ultra-rapides-avec-node-js?status=published|creationTime|2017-04-03T17:28:27Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|creationDate|2015-02-18 +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|tag|http://www.semanlink.net/tag/markus_lanthaler +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|tag|http://www.semanlink.net/tag/schema_org_actions +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|tag|http://www.semanlink.net/tag/hydra +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|comment|"see also hydra list" +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|title|Actions in Schema.org / Hydra from Markus Lanthaler on 2013-06-08 (public-vocabs@w3.org from June 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0044.html|creationTime|2015-02-18T01:33:00Z +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|creationDate|2018-04-28 +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|tag|http://www.semanlink.net/tag/antoine_bordes +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|tag|http://www.semanlink.net/tag/vint_cerf +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|tag|http://www.semanlink.net/tag/ai_dangers +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|title|L’inventeur du Web exhorte à réguler l’intelligence artificielle +http://www.lemonde.fr/pixels/article/2018/04/27/l-inventeur-du-web-exhorte-a-reguler-l-intelligence-artificielle_5291555_4408996.html|creationTime|2018-04-28T16:16:19Z +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|creationDate|2006-12-01 +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|tag|http://www.semanlink.net/tag/rdf_graphs +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|tag|http://www.semanlink.net/tag/graph_visualization +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|tag|http://www.semanlink.net/tag/semantic_web_ui +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|comment|Are graphs the right default representation for the Semantic Web?  And if not, how might we think about default presentations for the Semantic Web in order to make accessible its promised benefits for knowledge building and sharing? +http://swui.semanticweb.org/swui06/papers/Karger/Pathetic_Fallacy.html|title|The Pathetic Fallacy of RDF +https://github.com/ahmadassaf/KBE|creationDate|2014-05-29 +https://github.com/ahmadassaf/KBE|tag|http://www.semanlink.net/tag/github_project +https://github.com/ahmadassaf/KBE|tag|http://www.semanlink.net/tag/node_js +https://github.com/ahmadassaf/KBE|tag|http://www.semanlink.net/tag/google_knowledge_graph +https://github.com/ahmadassaf/KBE|comment|This is a node.js application that aims at extracting the knowledge represented in the Google infoboxes (aka Google Knowlege Graph Panel) +https://github.com/ahmadassaf/KBE|title|Knowledge-base Extractor (github) +https://github.com/ahmadassaf/KBE|creationTime|2014-05-29T19:35:07Z +http://www.ldodds.com/blog/archives/000252.html|creationDate|2008-04-01 +http://www.ldodds.com/blog/archives/000252.html|tag|http://www.semanlink.net/tag/arq_property_functions +http://www.ldodds.com/blog/archives/000252.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000252.html|title|Lost Boy: Writing an ARQ Extension Function +http://www.ldodds.com/blog/archives/000252.html|creationTime|2008-04-01T17:17:08Z +http://vocab.org/changeset/schema.html|creationDate|2014-09-15 +http://vocab.org/changeset/schema.html|tag|http://www.semanlink.net/tag/ldp_updates +http://vocab.org/changeset/schema.html|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://vocab.org/changeset/schema.html|tag|http://www.semanlink.net/tag/talis +http://vocab.org/changeset/schema.html|tag|http://www.semanlink.net/tag/ian_davis +http://vocab.org/changeset/schema.html|comment|This vocabulary defines a set of terms for describing changes to resource descriptions. +http://vocab.org/changeset/schema.html|title|Changeset +http://vocab.org/changeset/schema.html|creationTime|2014-09-15T11:30:18Z +http://www.agu.org/news/press/pr_archives/2013/2013-11.shtml|creationDate|2013-03-20 +http://www.agu.org/news/press/pr_archives/2013/2013-11.shtml|tag|http://www.semanlink.net/tag/voyager +http://www.agu.org/news/press/pr_archives/2013/2013-11.shtml|title|Voyager 1 has entered a new region of space, sudden changes in cosmic rays indicate +http://www.agu.org/news/press/pr_archives/2013/2013-11.shtml|creationTime|2013-03-20T23:13:14Z +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|creationDate|2015-04-29 +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/sw_is_flawed +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/bernard_vatant +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/uri +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/fps_post +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/linked_data +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|tag|http://www.semanlink.net/tag/deri +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|comment|"vocab.deri.ie is down again.
+Relying on URIs to identify things turns out to be very flimsy. URIs are not cool. As it is, the sem web / web of linked data is flawed. We need a way to identify things that do not depend on one person or one organization." +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|title|What's happening to http://vocab.deri.ie/ ? http://vocab.deri.ie/void for… +https://plus.google.com/u/0/+BernardVatant/posts/SrqvxvPGkpd|creationTime|2015-04-29T00:31:02Z +http://pcworld.com/howto/article/0,aid,122094,pg,1,00.asp|creationDate|2005-09-12 +http://pcworld.com/howto/article/0,aid,122094,pg,1,00.asp|tag|http://www.semanlink.net/tag/ipod +http://pcworld.com/howto/article/0,aid,122094,pg,1,00.asp|comment|"20 Things They Don't Want You to Know + " +http://pcworld.com/howto/article/0,aid,122094,pg,1,00.asp|title|PCWorld.com - 20 Things They Don't Want You to Know +http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/|creationDate|2017-12-18 +http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/|tag|http://www.semanlink.net/tag/bitcoin +http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/|tag|http://www.semanlink.net/tag/paul_krugman +http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/|title|PAUL KRUGMAN: Bitcoin is a more obvious bubble than housing was +http://www.businessinsider.fr/us/paul-krugman-says-bitcoin-is-a-bubble-2017-12/|creationTime|2017-12-18T00:50:52Z +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|creationDate|2016-10-08 +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|tag|http://www.semanlink.net/tag/neuroscience +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|tag|http://www.semanlink.net/tag/memristor +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|comment|Memristors are electrical components that limit or regulate the flow of electrical current in a circuit and can remember the amount of charge that was flowing through it and retain the data, even when the power is turned off. +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|title|First Demonstration of Brain Inspired Device to Power Artificial Systems – Neuroscience News +http://neurosciencenews.com/memristor-computational-neuroscience-5232/|creationTime|2016-10-08T15:15:22Z +http://jena.hpl.hp.com/~afs/SPARQL-Update.html|creationDate|2008-10-21 +http://jena.hpl.hp.com/~afs/SPARQL-Update.html|tag|http://www.semanlink.net/tag/andy_seaborne +http://jena.hpl.hp.com/~afs/SPARQL-Update.html|tag|http://www.semanlink.net/tag/sparql_update +http://jena.hpl.hp.com/~afs/SPARQL-Update.html|title|SPARQL Update +http://jena.hpl.hp.com/~afs/SPARQL-Update.html|creationTime|2008-10-21T14:26:59Z +http://www.eea.europa.eu/|creationDate|2012-01-03 +http://www.eea.europa.eu/|tag|http://www.semanlink.net/tag/environnement +http://www.eea.europa.eu/|tag|http://www.semanlink.net/tag/union_europeenne +http://www.eea.europa.eu/|title|European Environment Agency +http://www.eea.europa.eu/|creationTime|2012-01-03T12:02:08Z +http://supraconductivite.fr/fr/index.php|creationDate|2011-10-21 +http://supraconductivite.fr/fr/index.php|tag|http://www.semanlink.net/tag/supraconductivite +http://supraconductivite.fr/fr/index.php|title|La supraconductivité dans tous ses états +http://supraconductivite.fr/fr/index.php|creationTime|2011-10-21T23:26:01Z +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|creationDate|2007-07-15 +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|tag|http://www.semanlink.net/tag/benjamin_franklin +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|tag|http://www.semanlink.net/tag/liberte +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|tag|http://www.semanlink.net/tag/securite +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|tag|http://www.semanlink.net/tag/citation +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|title|They who can give up essential liberty to obtain a little temporary safety, deserve neither liberty nor safety. +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|creationTime|2007-07-15T11:22:44Z +http://www.semanlink.net/note/2007/07/une_societe_qui_est_prete_a_sacrifier_un_peu_de_sa_liberte|publish|true +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|creationDate|2012-06-30 +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|tag|http://www.semanlink.net/tag/obscurantisme +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|tag|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|tag|http://www.semanlink.net/tag/aqmi +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|tag|http://www.semanlink.net/tag/tombouctou +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|title|Tombouctou, épicentre du nouvel obscurantisme islamiste africain +http://www.lemonde.fr/idees/article/2012/06/28/tombouctou-epicentre-du-nouvel-obscurantisme-islamiste-africain_1725995_3232.html?utm_source=dlvr.it&utm_medium=twitter#xtor=RSS-3208001|creationTime|2012-06-30T16:09:02Z +http://usefulinc.com/edd/blog|creationDate|2005-06-24 +http://usefulinc.com/edd/blog|tag|http://www.semanlink.net/tag/edd_dumbill +http://usefulinc.com/edd/blog|tag|http://www.semanlink.net/tag/web_site_design +http://usefulinc.com/edd/blog|tag|http://www.semanlink.net/tag/blog +http://usefulinc.com/edd/blog|tag|http://www.semanlink.net/tag/css_example +http://usefulinc.com/edd/blog|title|Edd Dumbill's Weblog: Behind the Times +http://vivoweb.org/|creationDate|2016-11-08 +http://vivoweb.org/|tag|http://www.semanlink.net/tag/rdf_tools +http://vivoweb.org/|title|VIVO connect - share - discover +http://vivoweb.org/|creationTime|2016-11-08T15:44:16Z +https://github.com/dbpedia/GSoC/issues/19|creationDate|2019-03-26 +https://github.com/dbpedia/GSoC/issues/19|tag|http://www.semanlink.net/tag/graph_neural_networks +https://github.com/dbpedia/GSoC/issues/19|tag|http://www.semanlink.net/tag/dbpedia +https://github.com/dbpedia/GSoC/issues/19|tag|http://www.semanlink.net/tag/question_answering +https://github.com/dbpedia/GSoC/issues/19|title|A Neural QA Model for DBpedia (GSoC 2019) +https://github.com/dbpedia/GSoC/issues/19|creationTime|2019-03-26T08:15:19Z +http://www.bbc.com/news/uk-politics-eu-referendum-36641390|creationDate|2016-06-28 +http://www.bbc.com/news/uk-politics-eu-referendum-36641390|tag|http://www.semanlink.net/tag/brexit +http://www.bbc.com/news/uk-politics-eu-referendum-36641390|title|Reality Check: Have Leave campaigners changed their minds? - BBC News +http://www.bbc.com/news/uk-politics-eu-referendum-36641390|creationTime|2016-06-28T10:15:27Z +https://realpython.com/python-speech-recognition/|creationDate|2019-03-30 +https://realpython.com/python-speech-recognition/|tag|http://www.semanlink.net/tag/speech_recognition +https://realpython.com/python-speech-recognition/|tag|http://www.semanlink.net/tag/python +https://realpython.com/python-speech-recognition/|title|The Ultimate Guide To Speech Recognition With Python – Real Python +https://realpython.com/python-speech-recognition/|creationTime|2019-03-30T15:51:32Z +http://psi.cecs.anu.edu.au/|creationDate|2012-04-10 +http://psi.cecs.anu.edu.au/|tag|http://www.semanlink.net/tag/machine_learning +http://psi.cecs.anu.edu.au/|tag|http://www.semanlink.net/tag/saas +http://psi.cecs.anu.edu.au/|title|Machine Learning as a Service +http://psi.cecs.anu.edu.au/|creationTime|2012-04-10T23:28:57Z +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|creationDate|2017-12-30 +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|tag|http://www.semanlink.net/tag/semantic_hashing +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|tag|http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|comment|"we strive to develop a series of **new latent semantic models with a deep structure that project queries and documents into a common low-dimensional space** where the relevance of a document given a query is readily computed as the distance between them. The proposed deep structured semantic models are discriminatively trained by maximizing the conditional likelihood of the clicked documents given a query using the clickthrough data. To make our models applicable to large-scale Web search applications, we also use a technique called word hashing +" +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|title|Learning Deep Structured Semantic Models for Web Search using Clickthrough Data - Microsoft Research (2013) +https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/|creationTime|2017-12-30T02:10:49Z +http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D|creationDate|2008-11-28 +http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D|tag|http://www.semanlink.net/tag/gastronomie +http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D|tag|http://www.semanlink.net/tag/ofir +http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D|title|Cozinheiro brasileiro Ofir Oliveira veio ao Douro para encantar +http://www.radioansiaes.pt/noticia.aspx?id=%7Be571728b-cf88-483d-b1a7-860512a19e88%7D|creationTime|2008-11-28T23:02:53Z +https://pt.wikipedia.org/wiki/Aquarius_(filme)|creationDate|2019-02-27 +https://pt.wikipedia.org/wiki/Aquarius_(filme)|tag|http://www.semanlink.net/tag/sonia_braga +https://pt.wikipedia.org/wiki/Aquarius_(filme)|tag|http://www.semanlink.net/tag/film_bresilien +https://pt.wikipedia.org/wiki/Aquarius_(filme)|title|Aquarius (filme) +https://pt.wikipedia.org/wiki/Aquarius_(filme)|creationTime|2019-02-27T23:14:32Z +http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/|creationDate|2019-04-22 +http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/|tag|http://www.semanlink.net/tag/arbres_remarquables +http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/|tag|http://www.semanlink.net/tag/chene +http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/|title|Le Gros Chêne de La Loupe +http://unweekenddansleperche.fr/le-gros-chene-de-la-loupe/|creationTime|2019-04-22T22:45:43Z +http://www.intelligence.senate.gov/study2014/sscistudy1.pdf|creationDate|2014-12-14 +http://www.intelligence.senate.gov/study2014/sscistudy1.pdf|tag|http://www.semanlink.net/tag/cia +http://www.intelligence.senate.gov/study2014/sscistudy1.pdf|tag|http://www.semanlink.net/tag/tortures_americaines +http://www.intelligence.senate.gov/study2014/sscistudy1.pdf|title|Rapport sur les tortures menées par la CIA +http://www.intelligence.senate.gov/study2014/sscistudy1.pdf|creationTime|2014-12-14T19:44:53Z +http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm|creationDate|2018-11-05 +http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm|tag|http://www.semanlink.net/tag/quantum_computing +http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm|comment|un premier pas vers l’utilisation de nano-aimants moléculaires en tant qu’éléments de mémoire quantique électriquement contrôlés +http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm|title|Un stockage d’information quantique d’origine moléculaire ? CNRS +http://www.cnrs.fr/inc/communication/direct_labos/boudalis2.htm|creationTime|2018-11-05T22:29:53Z +https://github.com/winterbe/java8-tutorial|creationDate|2015-11-16 +https://github.com/winterbe/java8-tutorial|tag|http://www.semanlink.net/tag/tutorial +https://github.com/winterbe/java8-tutorial|tag|http://www.semanlink.net/tag/java_8 +https://github.com/winterbe/java8-tutorial|title|winterbe/java8-tutorial +https://github.com/winterbe/java8-tutorial|creationTime|2015-11-16T18:44:37Z +http://code.google.com/p/skoseditor/|creationDate|2010-05-07 +http://code.google.com/p/skoseditor/|tag|http://www.semanlink.net/tag/protege +http://code.google.com/p/skoseditor/|tag|http://www.semanlink.net/tag/skos_editor +http://code.google.com/p/skoseditor/|title|skoseditor +http://code.google.com/p/skoseditor/|creationTime|2010-05-07T11:52:08Z +https://www.lemonde.fr/afrique/article/2018/08/06/un-rapport-pointe-les-failles-des-etudes-internationales-et-liberales-sur-l-afrique_5339789_3212.html|creationDate|2018-08-06 +https://www.lemonde.fr/afrique/article/2018/08/06/un-rapport-pointe-les-failles-des-etudes-internationales-et-liberales-sur-l-afrique_5339789_3212.html|tag|http://www.semanlink.net/tag/afrique +https://www.lemonde.fr/afrique/article/2018/08/06/un-rapport-pointe-les-failles-des-etudes-internationales-et-liberales-sur-l-afrique_5339789_3212.html|title|Un rapport pointe les failles des études internationales (et libérales) sur l’Afrique +https://www.lemonde.fr/afrique/article/2018/08/06/un-rapport-pointe-les-failles-des-etudes-internationales-et-liberales-sur-l-afrique_5339789_3212.html|creationTime|2018-08-06T19:31:59Z +http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html|creationDate|2011-12-17 +http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html|tag|http://www.semanlink.net/tag/bittorrent +http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html|title|Plus fort qu'Hadopi ? Un site dévoile ce que vous avez piraté sur BitTorrent ! +http://www.numerama.com/magazine/20928-plus-fort-qu-hadopi-un-site-devoile-ce-que-vous-avez-pirate-sur-bittorrent.html|creationTime|2011-12-17T14:38:49Z +http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/|creationDate|2014-03-15 +http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/|tag|http://www.semanlink.net/tag/grant_ingersoll +http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/|tag|http://www.semanlink.net/tag/solr_and_nlp +http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/|title|A Simple Question Answering system using Solr and OpenNLP SearchHub Lucene/Solr Open Source Search +http://searchhub.org/2013/02/11/a-simple-question-answering-system-using-solr-and-opennlp/|creationTime|2014-03-15T13:59:28Z +http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6|creationDate|2018-05-18 +http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6|tag|http://www.semanlink.net/tag/word_embedding +http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6|tag|http://www.semanlink.net/tag/spellchecker +http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6|title|NLP: Any libraries/dictionaries out there for fixing common spelling errors? - Part 2 & Alumni - Deep Learning Course Forums +http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411/6|creationTime|2018-05-18T07:46:31Z +https://www.quora.com/How-is-GloVe-different-from-word2vec|creationDate|2017-05-18 +https://www.quora.com/How-is-GloVe-different-from-word2vec|tag|http://www.semanlink.net/tag/word_embedding +https://www.quora.com/How-is-GloVe-different-from-word2vec|tag|http://www.semanlink.net/tag/glove +https://www.quora.com/How-is-GloVe-different-from-word2vec|tag|http://www.semanlink.net/tag/word2vec +https://www.quora.com/How-is-GloVe-different-from-word2vec|comment|"Both learn geometrical encodings (vectors) of words from their co-occurrence information. Word2vec is a ""predictive"" model, whereas GloVe is a ""count-based"" model." +https://www.quora.com/How-is-GloVe-different-from-word2vec|title|How is GloVe different from word2vec? - Quora +https://www.quora.com/How-is-GloVe-different-from-word2vec|creationTime|2017-05-18T23:20:04Z +https://books.google.fr/books?id=4NjZCgAAQBAJ|creationDate|2017-09-29 +https://books.google.fr/books?id=4NjZCgAAQBAJ|tag|http://www.semanlink.net/tag/afrique_de_l_ouest +https://books.google.fr/books?id=4NjZCgAAQBAJ|tag|http://www.semanlink.net/tag/livre +https://books.google.fr/books?id=4NjZCgAAQBAJ|tag|http://www.semanlink.net/tag/peter_chilson +https://books.google.fr/books?id=4NjZCgAAQBAJ|title|Riding the Demon: On the Road in West Africa - Peter Chilson - Google Livres +https://books.google.fr/books?id=4NjZCgAAQBAJ|creationTime|2017-09-29T21:01:50Z +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|creationDate|2018-03-27 +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|tag|http://www.semanlink.net/tag/sport +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|tag|http://www.semanlink.net/tag/australie +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|comment|« Hey Cricket Australia, show some balls » +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|title|Le scandale qui secoue le cricket australien +http://www.lemonde.fr/sport/article/2018/03/26/le-scandale-qui-secoue-le-cricket-australien_5276730_3242.html|creationTime|2018-03-27T12:05:40Z +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|creationDate|2009-01-28 +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|tag|http://www.semanlink.net/tag/jean_rohmer +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|tag|http://www.semanlink.net/tag/semantic_networks +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|comment|Lessons for the future of Semantic Desktops learnt from 10 years of experience with the IDELIANCE Semantic Networks Manager +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|title|SemanticDesktop.org - Wiki.IDELIANCE +http://www.semanticdesktop.org/xwiki/bin/view/Wiki/IDELIANCE|creationTime|2009-01-28T17:20:50Z +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|creationDate|2006-02-07 +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|tag|http://www.semanlink.net/tag/rdf +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|tag|http://www.semanlink.net/tag/json +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|tag|http://www.semanlink.net/tag/javascript +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|comment|in a very real way, the RDF model of data could be described as interlinked associative arrays. this simplification and reduction to something akin to an essence of RDF was in the back of my mind months later, when I was working on an AJAX application, using JSON as a data interchange format. prior to this, i had never looked too deeply into JavaScript, but the similarities between RDF and JSON were apparent. both are a very general, minimalist means of representing data, with simplicity being a primary virtue. both can be modeled very simply as a sets of connected associative arrays... +http://danieldkim.blogspot.com/2005/07/javascript-and-rdf-almost-perfect.html|title|Dan's Blaggity Blog: JavaScript and RDF - (almost) perfect together +https://news.cnrs.fr/videos/the-secret-sex-life-of-truffles?utm_content=buffere34f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-10-17 +https://news.cnrs.fr/videos/the-secret-sex-life-of-truffles?utm_content=buffere34f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/truffe +https://news.cnrs.fr/videos/the-secret-sex-life-of-truffles?utm_content=buffere34f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|The Secret Sex Life of Truffles CNRS News +https://news.cnrs.fr/videos/the-secret-sex-life-of-truffles?utm_content=buffere34f3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-10-17T21:53:50Z +http://bits.blogs.nytimes.com/2014/03/04/apples-new-carplay-is-a-step-in-the-right-direction/?_php=true&_type=blogs&partner=rss&emc=rss&_r=0|creationDate|2014-03-05 +http://bits.blogs.nytimes.com/2014/03/04/apples-new-carplay-is-a-step-in-the-right-direction/?_php=true&_type=blogs&partner=rss&emc=rss&_r=0|tag|http://www.semanlink.net/tag/apple_carplay +http://bits.blogs.nytimes.com/2014/03/04/apples-new-carplay-is-a-step-in-the-right-direction/?_php=true&_type=blogs&partner=rss&emc=rss&_r=0|title|Apple's New CarPlay Is Almost a Step in the Right Direction - NYTimes.com +http://bits.blogs.nytimes.com/2014/03/04/apples-new-carplay-is-a-step-in-the-right-direction/?_php=true&_type=blogs&partner=rss&emc=rss&_r=0|creationTime|2014-03-05T00:03:30Z +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|creationDate|2010-12-20 +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|tag|http://www.semanlink.net/tag/police +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|tag|http://www.semanlink.net/tag/hortefeux +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|tag|http://www.semanlink.net/tag/sarkozyland +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|comment|Une des différences entre démocratie et dictature, c'est la confiance qu'on peut avoir dans la police. +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|title|La forfaiture ou le mépris répété des lois les plus intimes de la République - LeMonde.fr +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|creationTime|2010-12-20T12:07:03Z +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2010/12/20/la-forfaiture-ou-le-mepris-repete-des-lois-les-plus-intimes-de-la-republique_1455130_3232.html|date|2010-12-20 +https://hazyresearch.github.io/hyperE/|creationDate|2018-07-27 +https://hazyresearch.github.io/hyperE/|tag|http://www.semanlink.net/tag/entity_embeddings +https://hazyresearch.github.io/hyperE/|tag|http://www.semanlink.net/tag/poincare_embeddings +https://hazyresearch.github.io/hyperE/|comment|hyperbolic entity embeddings for 100 Wikidata relationships +https://hazyresearch.github.io/hyperE/|title|HyperE: Hyperbolic Embeddings for Entities +https://hazyresearch.github.io/hyperE/|creationTime|2018-07-27T12:18:28Z +http://www.w3.org/2001/sw/Europe/reports/demo-lessons-report/|creationDate|2006-01-15 +http://www.w3.org/2001/sw/Europe/reports/demo-lessons-report/|tag|http://www.semanlink.net/tag/swad_e +http://www.w3.org/2001/sw/Europe/reports/demo-lessons-report/|tag|http://www.semanlink.net/tag/hp +http://www.w3.org/2001/sw/Europe/reports/demo-lessons-report/|title|12.1.8 HP SWAD-E Demonstrators - lessons learnt +https://titanpad.com/vIQu6H2Xdl|creationDate|2015-09-08 +https://titanpad.com/vIQu6H2Xdl|tag|http://www.semanlink.net/tag/wikidata_query_service +https://titanpad.com/vIQu6H2Xdl|title|TitanPad: SPARQL queries against the Wikidata endpoint +https://titanpad.com/vIQu6H2Xdl|creationTime|2015-09-08T23:41:17Z +https://solarsystem.nasa.gov/galileo/|creationDate|2017-09-22 +https://solarsystem.nasa.gov/galileo/|tag|http://www.semanlink.net/tag/galileo_spacecraft +https://solarsystem.nasa.gov/galileo/|title|Solar System Exploration: : Galileo Legacy Site +https://solarsystem.nasa.gov/galileo/|creationTime|2017-09-22T01:24:47Z +https://arxiv.org/abs/1801.01586|creationDate|2018-01-09 +https://arxiv.org/abs/1801.01586|tag|http://www.semanlink.net/tag/tutorial +https://arxiv.org/abs/1801.01586|tag|http://www.semanlink.net/tag/autoencoder +https://arxiv.org/abs/1801.01586|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1801.01586|arxiv_author|David Charte +https://arxiv.org/abs/1801.01586|arxiv_author|Salvador García +https://arxiv.org/abs/1801.01586|arxiv_author|Francisco Herrera +https://arxiv.org/abs/1801.01586|arxiv_author|María J. del Jesus +https://arxiv.org/abs/1801.01586|arxiv_author|Francisco Charte +https://arxiv.org/abs/1801.01586|title|[1801.01586] A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines +https://arxiv.org/abs/1801.01586|creationTime|2018-01-09T14:05:31Z +https://arxiv.org/abs/1801.01586|arxiv_summary|"Many of the existing machine learning algorithms, both supervised and +unsupervised, depend on the quality of the input characteristics to generate a +good model. The amount of these variables is also important, since performance +tends to decline as the input dimensionality increases, hence the interest in +using feature fusion techniques, able to produce feature sets that are more +compact and higher level. A plethora of procedures to fuse original variables +for producing new ones has been developed in the past decades. The most basic +ones use linear combinations of the original variables, such as PCA (Principal +Component Analysis) and LDA (Linear Discriminant Analysis), while others find +manifold embeddings of lower dimensionality based on non-linear combinations, +such as Isomap or LLE (Linear Locally Embedding) techniques. +More recently, autoencoders (AEs) have emerged as an alternative to manifold +learning for conducting nonlinear feature fusion. Dozens of AE models have been +proposed lately, each with its own specific traits. Although many of them can +be used to generate reduced feature sets through the fusion of the original +ones, there also AEs designed with other applications in mind. +The goal of this paper is to provide the reader with a broad view of what an +AE is, how they are used for feature fusion, a taxonomy gathering a broad range +of models, and how they relate to other classical techniques. In addition, a +set of didactic guidelines on how to choose the proper AE for a given task is +supplied, together with a discussion of the software tools available. Finally, +two case studies illustrate the usage of AEs with datasets of handwritten +digits and breast cancer." +https://arxiv.org/abs/1801.01586|arxiv_firstAuthor|David Charte +https://arxiv.org/abs/1801.01586|arxiv_updated|2018-01-04T23:51:05Z +https://arxiv.org/abs/1801.01586|arxiv_title|A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines +https://arxiv.org/abs/1801.01586|arxiv_published|2018-01-04T23:51:05Z +https://arxiv.org/abs/1801.01586|arxiv_num|1801.01586 +https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf|creationDate|2017-08-23 +https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf|tag|http://www.semanlink.net/tag/machine_learning +https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf|title|A Few Useful Things to Know about Machine Learning +https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf|creationTime|2017-08-23T14:56:41Z +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|creationDate|2007-05-31 +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|tag|http://www.semanlink.net/tag/rdfa_tool +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|tag|http://www.semanlink.net/tag/bookmarklet +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|tag|http://www.semanlink.net/tag/javascript +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|comment|"This is the home of the RDFa Javascript implementation, including the RDFa bookmarklets. +" +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|title|RDFa Bookmarklets +http://www.w3.org/2006/07/SWD/RDFa/impl/js/|creationTime|2007-05-31T01:11:09Z +https://nlp.stanford.edu/software/jenny-ner-2007.pdf|creationDate|2018-05-20 +https://nlp.stanford.edu/software/jenny-ner-2007.pdf|tag|http://www.semanlink.net/tag/slides +https://nlp.stanford.edu/software/jenny-ner-2007.pdf|tag|http://www.semanlink.net/tag/stanford_ner +https://nlp.stanford.edu/software/jenny-ner-2007.pdf|title|Named Entity Recognition and the Stanford NER Software (slides) +https://nlp.stanford.edu/software/jenny-ner-2007.pdf|creationTime|2018-05-20T22:37:58Z +http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/|creationDate|2017-01-25 +http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/|title|Pour un revenu universel crédible et audacieux Le blog de Thomas Piketty +http://piketty.blog.lemonde.fr/2017/01/25/pour-un-revenu-universel-credible-et-ambitieux/|creationTime|2017-01-25T23:19:28Z +http://news.yale.edu/2014/03/25/yale-researchers-reconstruct-facial-images-locked-viewer-s-mind#.UzXiQztMwoc.twitter|creationDate|2014-03-29 +http://news.yale.edu/2014/03/25/yale-researchers-reconstruct-facial-images-locked-viewer-s-mind#.UzXiQztMwoc.twitter|tag|http://www.semanlink.net/tag/computational_neuroscience +http://news.yale.edu/2014/03/25/yale-researchers-reconstruct-facial-images-locked-viewer-s-mind#.UzXiQztMwoc.twitter|title|YaleNews Yale researchers reconstruct facial images locked in a viewer’s mind +http://news.yale.edu/2014/03/25/yale-researchers-reconstruct-facial-images-locked-viewer-s-mind#.UzXiQztMwoc.twitter|creationTime|2014-03-29T09:23:28Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|creationDate|2007-10-25 +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|tag|http://www.semanlink.net/tag/attali +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|tag|http://www.semanlink.net/tag/croissance +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|title|Croissance : Attali contre Attali, par Hervé Kempf +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|creationTime|2007-10-25T13:40:47Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-970085,0.html|source|Le Monde +http://www.dartmouth.edu/~news/releases/2005/09/14.html|creationDate|2005-09-15 +http://www.dartmouth.edu/~news/releases/2005/09/14.html|tag|http://www.semanlink.net/tag/robotique +http://www.dartmouth.edu/~news/releases/2005/09/14.html|comment|A machine that measures 60 micrometers by 250 micrometers. It integrates power delivery, locomotion, communication, and a controllable steering system. +http://www.dartmouth.edu/~news/releases/2005/09/14.html|title|Dartmouth News - Dartmouth researchers build world's smallest mobile robot - 09/14/05 Dartmouth News - Dartmouth researchers build world's smallest mobile robot +http://nbviewer.ipython.org/|creationDate|2015-04-28 +http://nbviewer.ipython.org/|tag|http://www.semanlink.net/tag/jupyter +http://nbviewer.ipython.org/|tag|http://www.semanlink.net/tag/ipython +http://nbviewer.ipython.org/|comment|A simple way to share Jupyter Notebooks +http://nbviewer.ipython.org/|title|nbViewer +http://nbviewer.ipython.org/|creationTime|2015-04-28T14:21:46Z +http://searchwebservices.techtarget.com/ateQuestionNResponse/0,289625,sid26_cid494324_tax289201,00.html|creationDate|2005-10-14 +http://searchwebservices.techtarget.com/ateQuestionNResponse/0,289625,sid26_cid494324_tax289201,00.html|tag|http://www.semanlink.net/tag/web_services_document_vs_rpc_style +http://searchwebservices.techtarget.com/ateQuestionNResponse/0,289625,sid26_cid494324_tax289201,00.html|title|Document vs. RPC style--why is it a big deal? +http://blogs.lexpress.fr/attali/2008/11/plans-b.php|creationDate|2009-05-15 +http://blogs.lexpress.fr/attali/2008/11/plans-b.php|tag|http://www.semanlink.net/tag/crise_des_subprimes +http://blogs.lexpress.fr/attali/2008/11/plans-b.php|tag|http://www.semanlink.net/tag/attali +http://blogs.lexpress.fr/attali/2008/11/plans-b.php|title|PLANS B - Conversation avec Jacques Attali - Lexpress +http://blogs.lexpress.fr/attali/2008/11/plans-b.php|creationTime|2009-05-15T01:02:15Z +http://www.lesechos.fr/entreprises-secteurs/tech-medias/actu/0202374255582-les-revendeurs-de-donnees-cherchent-encore-leur-modele-economique-508886.php|creationDate|2012-11-10 +http://www.lesechos.fr/entreprises-secteurs/tech-medias/actu/0202374255582-les-revendeurs-de-donnees-cherchent-encore-leur-modele-economique-508886.php|tag|http://www.semanlink.net/tag/data_publica +http://www.lesechos.fr/entreprises-secteurs/tech-medias/actu/0202374255582-les-revendeurs-de-donnees-cherchent-encore-leur-modele-economique-508886.php|title|Les revendeurs de données cherchent encore leur modèle économique, Actualités +http://www.lesechos.fr/entreprises-secteurs/tech-medias/actu/0202374255582-les-revendeurs-de-donnees-cherchent-encore-leur-modele-economique-508886.php|creationTime|2012-11-10T16:01:48Z +http://imagining-other.net/pp5thomasmoreextracts.htm|creationDate|2017-01-29 +http://imagining-other.net/pp5thomasmoreextracts.htm|tag|http://www.semanlink.net/tag/thomas_more +http://imagining-other.net/pp5thomasmoreextracts.htm|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://imagining-other.net/pp5thomasmoreextracts.htm|comment|…no penalty on earth will stop people from stealing, if it’s their only way of getting food… Instead of inflicting these horrible punishments, it would be far more to the point to provide everyone with some means of livelihood, so that nobody's under the frightful necessity of becoming first a thief and then a corpse. +http://imagining-other.net/pp5thomasmoreextracts.htm|title|Thomas More's political thought: extracts +http://imagining-other.net/pp5thomasmoreextracts.htm|creationTime|2017-01-29T18:48:02Z +http://rapid-i.com/content/view/202/206/|creationDate|2013-09-05 +http://rapid-i.com/content/view/202/206/|title|RapidMiner - Extensions +http://rapid-i.com/content/view/202/206/|creationTime|2013-09-05T13:24:09Z +http://stackoverflow.com/questions/1308263/wiping-out-maven-local-repository-on-build-machine|creationDate|2012-08-14 +http://stackoverflow.com/questions/1308263/wiping-out-maven-local-repository-on-build-machine|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/1308263/wiping-out-maven-local-repository-on-build-machine|title|continuous integration - Wiping out Maven local repository on build machine - Stack Overflow +http://stackoverflow.com/questions/1308263/wiping-out-maven-local-repository-on-build-machine|creationTime|2012-08-14T11:07:26Z +http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#|creationDate|2012-09-02 +http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#|tag|http://www.semanlink.net/tag/ian_davis +http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#|tag|http://www.semanlink.net/tag/talis_rdf_json +http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#|title|RDF 1.1 JSON Serialisation (RDF/JSON) +http://dvcs.w3.org/hg/rdf/raw-file/default/rdf-json/index.html#|creationTime|2012-09-02T16:42:05Z +http://www.universcience.tv/|creationDate|2010-06-26 +http://www.universcience.tv/|tag|http://www.semanlink.net/tag/science +http://www.universcience.tv/|tag|http://www.semanlink.net/tag/web_tv +http://www.universcience.tv/|tag|http://www.semanlink.net/tag/cite_des_sciences_et_de_l_industrie +http://www.universcience.tv/|title|universcience.tv la Web TV scientifique +http://www.universcience.tv/|creationTime|2010-06-26T11:22:33Z +http://www4.wiwiss.fu-berlin.de/pubby/|creationDate|2007-06-23 +http://www4.wiwiss.fu-berlin.de/pubby/|tag|http://www.semanlink.net/tag/pubby +http://www4.wiwiss.fu-berlin.de/pubby/|title|Pubby – A Linked Data Frontend for SPARQL Endpoints +http://www4.wiwiss.fu-berlin.de/pubby/|creationTime|2007-06-23T15:15:36Z +http://www.sciencemag.org/news/2017/11/artificial-intelligence-goes-bilingual-without-dictionary|creationDate|2017-12-01 +http://www.sciencemag.org/news/2017/11/artificial-intelligence-goes-bilingual-without-dictionary|tag|http://www.semanlink.net/tag/machine_translation +http://www.sciencemag.org/news/2017/11/artificial-intelligence-goes-bilingual-without-dictionary|title|Artificial intelligence goes bilingual—without a dictionary Science AAAS +http://www.sciencemag.org/news/2017/11/artificial-intelligence-goes-bilingual-without-dictionary|creationTime|2017-12-01T01:25:34Z +http://dfdf.inesc-id.pt/tr/web-arch|creationDate|2007-11-20 +http://dfdf.inesc-id.pt/tr/web-arch|tag|http://www.semanlink.net/tag/uri_identity +http://dfdf.inesc-id.pt/tr/web-arch|tag|http://www.semanlink.net/tag/information_resources +http://dfdf.inesc-id.pt/tr/web-arch|tag|http://www.semanlink.net/tag/uri +http://dfdf.inesc-id.pt/tr/web-arch|title|URI Identity and Web Architecture Revisited +http://dfdf.inesc-id.pt/tr/web-arch|creationTime|2007-11-20T21:36:46Z +https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d|creationDate|2018-12-02 +https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d|tag|http://www.semanlink.net/tag/few_shot_learning +https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d|tag|http://www.semanlink.net/tag/pytorch +https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d|title|Advances in few-shot learning: reproducing results in PyTorch +https://towardsdatascience.com/advances-in-few-shot-learning-reproducing-results-in-pytorch-aba70dee541d|creationTime|2018-12-02T10:21:44Z +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|creationDate|2013-03-12 +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|tag|http://www.semanlink.net/tag/nosql +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|tag|http://www.semanlink.net/tag/mongodb +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|tag|http://www.semanlink.net/tag/exploration_test +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|title|Going NoSQL with MongoDB - MSDN Magazine: The Working Programmer +http://msdn.microsoft.com/en-us/magazine/ff714592.aspx|creationTime|2013-03-12T16:10:05Z +http://www-irem.univ-paris13.fr/spip/|creationDate|2009-06-01 +http://www-irem.univ-paris13.fr/spip/|tag|http://www.semanlink.net/tag/mathematiques +http://www-irem.univ-paris13.fr/spip/|tag|http://www.semanlink.net/tag/jean_paul +http://www-irem.univ-paris13.fr/spip/|tag|http://www.semanlink.net/tag/education +http://www-irem.univ-paris13.fr/spip/|title|IREM de Paris Nord (Institut de Recherche en Enseignement des Mathématiques) +http://www-irem.univ-paris13.fr/spip/|creationTime|2009-06-01T14:12:25Z +http://robohub.org/the-agricultural-labor-conundrum/|creationDate|2017-08-09 +http://robohub.org/the-agricultural-labor-conundrum/|tag|http://www.semanlink.net/tag/jobbotization +http://robohub.org/the-agricultural-labor-conundrum/|tag|http://www.semanlink.net/tag/travail +http://robohub.org/the-agricultural-labor-conundrum/|tag|http://www.semanlink.net/tag/agriculture +http://robohub.org/the-agricultural-labor-conundrum/|title|The agricultural labor conundrum Robohub +http://robohub.org/the-agricultural-labor-conundrum/|creationTime|2017-08-09T10:27:52Z +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|creationDate|2011-11-13 +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|tag|http://www.semanlink.net/tag/semantic_search +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|tag|http://www.semanlink.net/tag/semweb_china +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|tag|http://www.semanlink.net/tag/social_networks +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|title|Huoua Uses Semantic Search to ‘Spark’ Instant Circles - semanticweb.com +http://semanticweb.com/huoua-uses-semantic-search-to-%E2%80%98spark%E2%80%99-instant-circles_b24592|creationTime|2011-11-13T14:19:03Z +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|creationDate|2017-05-23 +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|tag|http://www.semanlink.net/tag/graph_database +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|tag|http://www.semanlink.net/tag/neo4j +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|tag|http://www.semanlink.net/tag/rdf +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|title|Graph databases and RDF: It's a family affair ZDNet +http://www.zdnet.com/article/graph-databases-and-rdf-its-a-family-affair/?utm_content=buffer95710&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer#this|creationTime|2017-05-23T10:54:18Z +https://radekosmulski.github.io/answers/html/What%20are%20pretrained%20models%3F.html|creationDate|2018-11-23 +https://radekosmulski.github.io/answers/html/What%20are%20pretrained%20models%3F.html|tag|http://www.semanlink.net/tag/pretrained_models +https://radekosmulski.github.io/answers/html/What%20are%20pretrained%20models%3F.html|title|What are pretrained models and why are they useful? +https://radekosmulski.github.io/answers/html/What%20are%20pretrained%20models%3F.html|creationTime|2018-11-23T08:54:12Z +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|creationDate|2012-07-04 +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|tag|http://www.semanlink.net/tag/semantic_search +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|tag|http://www.semanlink.net/tag/enterprise_search +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|title|The Enterprise Search Market – What should be on your radar? Keynote IKS 2012 Workshop IKS Blog – The Semantic CMS Community +http://blog.iks-project.eu/the-enterprise-search-market-what-should-be-on-your-radar-keynote-iks-2012-workshop/|creationTime|2012-07-04T22:45:33Z +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|creationDate|2013-09-06 +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|tag|http://www.semanlink.net/tag/the_guardian +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|comment|"Those methods include covert measures to ensure NSA control over setting of international encryption standards, the use of supercomputers to break encryption with ""brute force"", and – the most closely guarded secret of all – collaboration with technology companies and internet service providers themselves. Through these covert partnerships, the agencies have inserted secret vulnerabilities – known as backdoors or trapdoors – into commercial encryption software." +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|title|Revealed: how US and UK spy agencies defeat internet privacy and security World news Guardian Weekly +http://www.theguardian.com/world/2013/sep/05/nsa-gchq-encryption-codes-security|creationTime|2013-09-06T22:39:37Z +http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html|creationDate|2015-10-21 +http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html|tag|http://www.semanlink.net/tag/nltk +http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html|tag|http://www.semanlink.net/tag/scikit_learn +http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html|title|The Glowing Python: Combining Scikit-Learn and NTLK +http://glowingpython.blogspot.fr/2013/07/combining-scikit-learn-and-ntlk.html|creationTime|2015-10-21T18:43:13Z +https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationDate|2018-04-09 +https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/python_nlp +https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/text_preprocessing +https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|title|Text Data Preprocessing: A Walkthrough in Python +https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationTime|2018-04-09T13:26:13Z +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|creationDate|2017-05-18 +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|tag|http://www.semanlink.net/tag/word_embedding_evaluation +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|tag|http://www.semanlink.net/tag/glove +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|tag|http://www.semanlink.net/tag/word2vec +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|tag|http://www.semanlink.net/tag/word_embedding +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|comment|(good presentation in the intro of context-counting vs. context-predicting vectors) +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|title|Don’t count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors (2014) +http://clic.cimec.unitn.it/marco/publications/acl2014/baroni-etal-countpredict-acl2014.pdf|creationTime|2017-05-18T23:30:46Z +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|creationDate|2014-03-15 +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|tag|http://www.semanlink.net/tag/jquery +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|tag|http://www.semanlink.net/tag/solr +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|tag|http://www.semanlink.net/tag/grant_ingersoll +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|title|Easy Hierarchical Faceting and display with Solr and jQuery (and a tiny bit of Python) SearchHub Lucene/Solr Open Source Search +http://searchhub.org/2014/02/07/easy-hierarchical-faceting-and-display-with-solr-and-jquery-and-a-tiny-bit-of-python/|creationTime|2014-03-15T13:47:31Z +http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf|creationDate|2013-08-20 +http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf|tag|http://www.semanlink.net/tag/topic_modeling +http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf|tag|http://www.semanlink.net/tag/david_blei +http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf|title|Probabilistic Topic Models +http://www.cs.princeton.edu/~blei/papers/Blei2012.pdf|creationTime|2013-08-20T10:40:52Z +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|creationDate|2007-07-08 +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|tag|http://www.semanlink.net/tag/national_geographic +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|tag|http://www.semanlink.net/tag/photo +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|tag|http://www.semanlink.net/tag/rwanda +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|tag|http://www.semanlink.net/tag/africa_s_last_wild_places +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|comment|A patchwork of crops covers nearly every inch of volcanic earth in northwestern Rwanda +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|title|Rwanda - National Geographic News Photo Gallery: Megaflyover: Documenting Africa's Last Wild Places +http://news.nationalgeographic.com/news/2005/08/photogalleries/africa/photo10.html|creationTime|2007-07-08T02:49:37Z +http://www.semanlink.info/|creationDate|2008-07-08 +http://www.semanlink.info/|tag|http://www.semanlink.net/tag/semanlink +http://www.semanlink.info/|tag|http://www.semanlink.net/tag/dimitris +http://www.semanlink.info/|comment|The aim of this site is the diffusion of usage of Semanlink in Greece and on all over the world. +http://www.semanlink.info/|title|Semanlink : Find your Path in the Labyrinth of Information - Dimitris' site +http://www.semanlink.info/|creationTime|2008-07-08T21:13:42Z +https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project|creationDate|2016-06-29 +https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project|tag|http://www.semanlink.net/tag/git +https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project|tag|http://www.semanlink.net/tag/github +https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project|title|Git - GitHub - Contributing to a Project +https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project|creationTime|2016-06-29T16:10:55Z +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|creationDate|2005-08-18 +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|tag|http://www.semanlink.net/tag/semanlink_related +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|tag|http://www.semanlink.net/tag/faceted_search +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|tag|http://www.semanlink.net/tag/classification_systems +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|tag|http://www.semanlink.net/tag/tagging +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|comment|The problem of where to file: Is it possible to construct the perfect classification system? +http://wiki.osafoundation.org/bin/view/Journal/HierarchyVersusFacetsVersusTags|title|HierarchyVersusFacetsVersusTags +http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html|creationDate|2015-08-14 +http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html|tag|http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest +http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html|tag|http://www.semanlink.net/tag/bollore +http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html|title|Vincent Bolloré : « Notre méthode, c’est plutôt du commando que de l’armée régulière » +http://www.lemonde.fr/afrique/article/2015/08/12/vincent-bollore-notre-methode-c-est-plutot-du-commando-que-de-l-armee-reguliere_4721862_3212.html|creationTime|2015-08-14T15:15:10Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|creationDate|2006-12-23 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|tag|http://www.semanlink.net/tag/social_bookmarking +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|tag|http://www.semanlink.net/tag/semanlink_related +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|tag|http://www.semanlink.net/tag/tagging +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|comment|"Facetag is a working prototype of a semantic +collaborative tagging tool conceived for bookmarking +information architecture resources. It aims to show how the +widespread homogeneous and flat keywords' space of tags can be +effectively mixed with a richer faceted classification scheme to +improve the “information scent” and “berrypicking” capabilities +of the system. The additional semantic structure is aggregated +both implicitly observing user behaviour and explicitly +introducing a compelling user experience to facilitate the +creation of relationships between tags directly by end-users. +Facetag current implementation is written in PHP / SQL and +includes an open API which allows querying and integration +from other applications." +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/05.pdf|title|FaceTag: Integrating Bottom-up and Top-down Classification in a Social Tagging System +http://www.w3.org/wiki/LDP_Implementations|creationDate|2014-07-12 +http://www.w3.org/wiki/LDP_Implementations|tag|http://www.semanlink.net/tag/ldp_w3c +http://www.w3.org/wiki/LDP_Implementations|tag|http://www.semanlink.net/tag/ldp_implementations +http://www.w3.org/wiki/LDP_Implementations|title|LDP Implementations - W3C Wiki +http://www.w3.org/wiki/LDP_Implementations|creationTime|2014-07-12T13:07:27Z +https://fr.wikipedia.org/wiki/Miharu_Takizakura|creationDate|2019-04-22 +https://fr.wikipedia.org/wiki/Miharu_Takizakura|tag|http://www.semanlink.net/tag/arbres_remarquables +https://fr.wikipedia.org/wiki/Miharu_Takizakura|tag|http://www.semanlink.net/tag/japon +https://fr.wikipedia.org/wiki/Miharu_Takizakura|title|Miharu Takizakura +https://fr.wikipedia.org/wiki/Miharu_Takizakura|creationTime|2019-04-22T23:00:18Z +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|creationDate|2017-11-21 +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|tag|http://www.semanlink.net/tag/sud_des_etats_unis +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|comment|the story of Virgil Tibbs (Sidney Poitier), a black police detective from Philadelphia, who becomes involved in a murder investigation in a small town in Mississippi. +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|title|In the Heat of the Night (film) +https://en.wikipedia.org/wiki/In_the_Heat_of_the_Night_(film)|creationTime|2017-11-21T00:15:33Z +http://donsmaps.com/brassempouyvenus.html|creationDate|2007-11-21 +http://donsmaps.com/brassempouyvenus.html|tag|http://www.semanlink.net/tag/venus_de_brassempouy +http://donsmaps.com/brassempouyvenus.html|title|Venus de Brassempouy +http://donsmaps.com/brassempouyvenus.html|creationTime|2007-11-21T15:46:22Z +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|creationDate|2018-05-11 +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|tag|http://www.semanlink.net/tag/multilingual_embeddings +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|tag|http://www.semanlink.net/tag/nlp_facebook +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|tag|http://www.semanlink.net/tag/fasttext +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|comment|"With this technique, embeddings for every language exist in the same vector space, and maintain the property that words with similar meanings (regardless of language) are close together in vector space + +> To train these multilingual word embeddings, we first trained separate embeddings for each language using fastText and a combination of data from Facebook and Wikipedia. We then used dictionaries to project each of these embedding spaces into a common space (English). The dictionaries are automatically induced from parallel data — meaning data sets that consist of a pair of sentences in two different languages that have the same meaning — which we use for training translation systems." +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|title|Under the hood: Multilingual embeddings Engineering Blog Facebook Code +https://code.facebook.com/posts/550719898617409/under-the-hood-multilingual-embeddings/|creationTime|2018-05-11T22:52:23Z +http://www.w3.org/2001/sw/DataAccess/prot26|creationDate|2008-10-12 +http://www.w3.org/2001/sw/DataAccess/prot26|tag|http://www.semanlink.net/tag/dan_connolly +http://www.w3.org/2001/sw/DataAccess/prot26|tag|http://www.semanlink.net/tag/sparql +http://www.w3.org/2001/sw/DataAccess/prot26|title|The SPARQL Service Interface +http://www.w3.org/2001/sw/DataAccess/prot26|creationTime|2008-10-12T17:32:15Z +http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/|creationDate|2009-11-12 +http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/|tag|http://www.semanlink.net/tag/product_modelling +http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/|tag|http://www.semanlink.net/tag/a_voir +http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/|title|Product Modelling using Semantic Web Technologies +http://www.w3.org/2005/Incubator/w3pm/XGR-w3pm-20091008/|creationTime|2009-11-12T14:00:49Z +https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f|creationDate|2014-09-16 +https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f|tag|http://www.semanlink.net/tag/internet_of_things +https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f|tag|http://www.semanlink.net/tag/json_ld +https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f|title|One URL: The Semantic Sensor Revolution +https://medium.com/@dpjanes/one-url-the-semantic-sensor-revolution-58b6d8c86c0f|creationTime|2014-09-16T10:09:01Z +http://www.w3.org/2009/12/rdf-ws/papers/ws23|creationDate|2011-01-15 +http://www.w3.org/2009/12/rdf-ws/papers/ws23|tag|http://www.semanlink.net/tag/rdf_blank_nodes +http://www.w3.org/2009/12/rdf-ws/papers/ws23|tag|http://www.semanlink.net/tag/sparql +http://www.w3.org/2009/12/rdf-ws/papers/ws23|title|Revisiting Blank Nodes in RDF to Avoid the Semantic Mismatch with SPARQL +http://www.w3.org/2009/12/rdf-ws/papers/ws23|creationTime|2011-01-15T02:52:08Z +http://benalman.com/projects/jquery-hashchange-plugin/|creationDate|2012-06-21 +http://benalman.com/projects/jquery-hashchange-plugin/|tag|http://www.semanlink.net/tag/jquery +http://benalman.com/projects/jquery-hashchange-plugin/|tag|http://www.semanlink.net/tag/bookmarks +http://benalman.com/projects/jquery-hashchange-plugin/|tag|http://www.semanlink.net/tag/ajax +http://benalman.com/projects/jquery-hashchange-plugin/|title|Ben Alman » jQuery hashchange event +http://benalman.com/projects/jquery-hashchange-plugin/|creationTime|2012-06-21T14:04:04Z +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|creationDate|2015-02-18 +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|tag|http://www.semanlink.net/tag/samuel_goto +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|tag|http://www.semanlink.net/tag/schema_org_actions +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|title|Re: Schema.org Actions - an update and call for review from Sam Goto on 2013-10-17 (public-hydra@w3.org from October 2013) +http://lists.w3.org/Archives/Public/public-hydra/2013Oct/0023.html|creationTime|2015-02-18T02:11:07Z +http://code.google.com/p/gnizr/|creationDate|2007-11-20 +http://code.google.com/p/gnizr/|tag|http://www.semanlink.net/tag/gnizr +http://code.google.com/p/gnizr/|comment|gnizr™ (gə-nīzər) is an open source application for social bookmarking and web mashup. It is easy to use gnizr to create a personalized del.icio.us-like portal for a group of friends and colleagues to store, classify, and share information, and mash-it-up with information about location. +http://code.google.com/p/gnizr/|title|gnizr - Google Code +http://code.google.com/p/gnizr/|creationTime|2007-11-20T21:44:05Z +http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html|creationDate|2017-10-25 +http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html|tag|http://www.semanlink.net/tag/spellchecker +http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html|tag|http://www.semanlink.net/tag/python +http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html|title|Un correcteur orthographique en 21 lignes de Python +http://blog.proxteam.eu/2013/10/un-correcteur-orthographique-en-21.html|creationTime|2017-10-25T22:56:55Z +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|creationDate|2008-12-18 +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|tag|http://www.semanlink.net/tag/universite +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|tag|http://www.semanlink.net/tag/recherche +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|comment|Au moment où la Ministre est en train de peaufiner un projet de décret destiné à punir les enseignants de l'Université qui se trouveraient être de « piètres chercheurs », il n'est peut-être pas inutile de faire connaître au public ce qu'est un travail de recherche. +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|title|Chercher et enseigner à l'Université. 1- La recherche - Opinions - Le Monde.fr +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|creationTime|2008-12-18T22:14:59Z +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-1-la-recherche_1132439_3232.html|source|Le Monde +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|creationDate|2012-02-27 +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|tag|http://www.semanlink.net/tag/film +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|tag|http://www.semanlink.net/tag/falashas +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|tag|http://www.semanlink.net/tag/israel +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|title|Va, vis et deviens +http://fr.wikipedia.org/wiki/Va,_vis_et_deviens|creationTime|2012-02-27T22:56:26Z +http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003|creationDate|2013-12-06 +http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003|tag|http://www.semanlink.net/tag/christiane_taubira +http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003|tag|http://www.semanlink.net/tag/nelson_mandela +http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003|title|Nelson Mandela : Rolihlahla, pour transfigurer le monde Christiane Taubira +http://www.huffingtonpost.fr/christiane-taubira/nelson-mandela--rolihlahla-monde_b_4394382.html?ncid=edlinkusaolp00000003|creationTime|2013-12-06T01:14:02Z +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|creationDate|2014-04-30 +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|tag|http://www.semanlink.net/tag/peter_patel_schneider +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|comment|"""in the absence of a good description of schema.org I tried to put one +together for my own purposes, as a potential producer and consumer of +schema.org information. This ended up needing to include a philosophy of just +what schema.org is, so I put in my own, namely something that could serve as a +precursor to a formal treatment of schema.org""" +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|title|schema.org as it could be from Peter F. Patel-Schneider on 2014-01-06 (public-vocabs@w3.org from January 2014) +http://lists.w3.org/Archives/Public/public-vocabs/2014Jan/0025.html|creationTime|2014-04-30T13:23:01Z +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|creationDate|2013-02-14 +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|tag|http://www.semanlink.net/tag/monsanto +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|tag|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|title|La guérilla judiciaire des géants des semences contre les fermiers américains +http://www.lemonde.fr/planete/article/2013/02/13/la-guerilla-judiciaire-des-geants-des-semences-contre-les-fermiers-americains_1832036_3244.html|creationTime|2013-02-14T08:41:32Z +http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web|creationDate|2007-09-27 +http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web|tag|http://www.semanlink.net/tag/mvc +http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web|comment|Some links on the mismatch between MVC and the Web +http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web|title|Internet Alchemy » MVC Obscures the Mechanics of the Web +http://iandavis.com/blog/2007/09/mvc-obscures-the-mechanics-of-the-web|creationTime|2007-09-27T21:24:05Z +http://www.cio.co.uk/news/r-and-d/self-driving-cars-hit-swedish-public-roads/|creationDate|2013-12-04 +http://www.cio.co.uk/news/r-and-d/self-driving-cars-hit-swedish-public-roads/|tag|http://www.semanlink.net/tag/driverless_car +http://www.cio.co.uk/news/r-and-d/self-driving-cars-hit-swedish-public-roads/|title|Self-driving cars to hit Swedish public roads +http://www.cio.co.uk/news/r-and-d/self-driving-cars-hit-swedish-public-roads/|creationTime|2013-12-04T16:50:54Z +https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/|creationDate|2019-04-11 +https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/|tag|http://www.semanlink.net/tag/survey +https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/|tag|http://www.semanlink.net/tag/named_entity_recognition +https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/|title|Evaluating Solutions for Named Entity Recognition Novetta.com (2018) +https://www.novetta.com/2018/08/evaluating-solutions-for-named-entity-recognition/|creationTime|2019-04-11T01:15:43Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|creationDate|2013-10-05 +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|tag|http://www.semanlink.net/tag/guha +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|comment|"> Schema.org encourages the use of multiple vocabularies +" +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|title|Re: SKOS for schema.org proposal for discussion from Guha on 2013-10-03 (public-vocabs@w3.org from October 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Oct/0037.html|creationTime|2013-10-05T12:16:38Z +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|creationDate|2015-07-09 +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|tag|http://www.semanlink.net/tag/schema_org +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|tag|http://www.semanlink.net/tag/google_rich_snippets +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|tag|http://www.semanlink.net/tag/martin_hepp +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|title|Google Product Rich Snippets for Multiple Products on a Page The Hepp Research Blog on Marketing with Data +http://blog.heppresearch.com/2015/07/09/google-product-rich-snippets-for-multiple-products-on-a-page/|creationTime|2015-07-09T13:11:40Z +https://medium.com/@karpathy/software-2-0-a64152b37c35|creationDate|2017-11-12 +https://medium.com/@karpathy/software-2-0-a64152b37c35|tag|http://www.semanlink.net/tag/software +https://medium.com/@karpathy/software-2-0-a64152b37c35|tag|http://www.semanlink.net/tag/andrej_karpathy +https://medium.com/@karpathy/software-2-0-a64152b37c35|tag|http://www.semanlink.net/tag/artificial_neural_network +https://medium.com/@karpathy/software-2-0-a64152b37c35|comment|> Neural networks are not just another classifier, they represent the beginning of a fundamental shift in how we write software. They are Software 2.0 +https://medium.com/@karpathy/software-2-0-a64152b37c35|title|Software 2.0 – Andrej Karpathy – Medium +https://medium.com/@karpathy/software-2-0-a64152b37c35|creationTime|2017-11-12T11:56:35Z +http://www.arbres.org/arbres_remarquables.html|creationDate|2017-10-06 +http://www.arbres.org/arbres_remarquables.html|tag|http://www.semanlink.net/tag/arbres_remarquables +http://www.arbres.org/arbres_remarquables.html|title|Les arbres remarquables +http://www.arbres.org/arbres_remarquables.html|creationTime|2017-10-06T22:20:20Z +http://www.feynmanlectures.caltech.edu/|creationDate|2014-09-10 +http://www.feynmanlectures.caltech.edu/|tag|http://www.semanlink.net/tag/physique +http://www.feynmanlectures.caltech.edu/|tag|http://www.semanlink.net/tag/feynman +http://www.feynmanlectures.caltech.edu/|title|The Feynman Lectures on Physics +http://www.feynmanlectures.caltech.edu/|creationTime|2014-09-10T00:22:26Z +http://neurosciencenews.com/machine-learning-vision-3312/|creationDate|2015-12-26 +http://neurosciencenews.com/machine-learning-vision-3312/|tag|http://www.semanlink.net/tag/neuroscience +http://neurosciencenews.com/machine-learning-vision-3312/|comment|Poggio’s group developed an invariant representation of objects that’s based on dot-products +http://neurosciencenews.com/machine-learning-vision-3312/|title|Machines That Learn Like Humans Neuroscience News +http://neurosciencenews.com/machine-learning-vision-3312/|creationTime|2015-12-26T01:28:02Z +https://cs230-stanford.github.io/pytorch-getting-started.html|creationDate|2019-04-03 +https://cs230-stanford.github.io/pytorch-getting-started.html|tag|http://www.semanlink.net/tag/pytorch +https://cs230-stanford.github.io/pytorch-getting-started.html|tag|http://www.semanlink.net/tag/nlp_stanford +https://cs230-stanford.github.io/pytorch-getting-started.html|title|Introduction to PyTorch Code Examples +https://cs230-stanford.github.io/pytorch-getting-started.html|creationTime|2019-04-03T13:57:45Z +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|creationDate|2014-11-26 +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|tag|http://www.semanlink.net/tag/linked_data_fragments +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|tag|http://www.semanlink.net/tag/rdf_forms +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|tag|http://www.semanlink.net/tag/hydra +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|comment|Triple Pattern Fragments are just the Linked Data principles, but instead of only hyperlinks, it's hyperlinks and forms, like the rest of the Web +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|title|Re: TPF and DBMSes (was Re: Hydra and Shapes) from Ruben Verborgh on 2014-11-26 (public-hydra@w3.org from November 2014) +http://lists.w3.org/Archives/Public/public-hydra/2014Nov/0088.html|creationTime|2014-11-26T13:04:38Z +https://www.quora.com/How-does-word2vec-work-Can-someone-walk-through-a-specific-example|creationDate|2017-08-28 +https://www.quora.com/How-does-word2vec-work-Can-someone-walk-through-a-specific-example|tag|http://www.semanlink.net/tag/word2vec +https://www.quora.com/How-does-word2vec-work-Can-someone-walk-through-a-specific-example|title|How does word2vec work? Can someone walk through a specific example? - Quora +https://www.quora.com/How-does-word2vec-work-Can-someone-walk-through-a-specific-example|creationTime|2017-08-28T16:26:41Z +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|creationDate|2019-02-09 +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|tag|http://www.semanlink.net/tag/rake +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|tag|http://www.semanlink.net/tag/simple_idea +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|comment|"> We found that tokenizing using stopwords + non word characters was really useful for ""finding"" the keywords + +[keywords2vec](/doc/2019/02/keywords2vec)" +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|relatedDoc|http://www.semanlink.net/doc/2019/02/keywords2vec +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|title|"Jeremy Howard on Twitter: ""Such a ridiculously simple idea couldn't possibly work, could it? Or... could it? """ +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|bookmarkOf|https://twitter.com/jeremyphoward/status/1094025901371621376?lang=en +http://www.semanlink.net/doc/2019/02/jeremy_howard_on_twitter_such|creationTime|2019-02-09T01:42:55Z +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|creationDate|2017-10-04 +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|tag|http://www.semanlink.net/tag/adn +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|tag|http://www.semanlink.net/tag/virus +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|title|Ancient Viruses Are Buried in Your DNA - The New York Times +https://www.nytimes.com/2017/10/04/science/ancient-viruses-dna-genome.html|creationTime|2017-10-04T23:52:24Z +http://heliosearch.org/advanced-filter-caching-in-solr/|creationDate|2015-03-14 +http://heliosearch.org/advanced-filter-caching-in-solr/|tag|http://www.semanlink.net/tag/solr +http://heliosearch.org/advanced-filter-caching-in-solr/|title|Solr Filter Caching - Solr Evolved +http://heliosearch.org/advanced-filter-caching-in-solr/|creationTime|2015-03-14T23:15:04Z +http://streamplayer.free.fr/|creationDate|2008-07-03 +http://streamplayer.free.fr/|tag|http://www.semanlink.net/tag/gilles_lepin +http://streamplayer.free.fr/|title|Stream Player +http://streamplayer.free.fr/|creationTime|2008-07-03T23:12:28Z +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|creationDate|2010-09-06 +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|tag|http://www.semanlink.net/tag/owl +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|tag|http://www.semanlink.net/tag/ian_horrocks +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|tag|http://www.semanlink.net/tag/ulrike_sattler +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|title|Bridging the Gap Between OWL and Relational Databases +http://www.comlab.ox.ac.uk/ian.horrocks/Publications/download/2007/MoHS07a.pdf|creationTime|2010-09-06T22:05:50Z +http://semanticweb.com/tag/francois-paul-servant|creationDate|2012-02-08 +http://semanticweb.com/tag/francois-paul-servant|tag|http://www.semanlink.net/tag/fps +http://semanticweb.com/tag/francois-paul-servant|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semanticweb.com/tag/francois-paul-servant|title|François-Paul Servant - semanticweb.com +http://semanticweb.com/tag/francois-paul-servant|creationTime|2012-02-08T12:52:18Z +http://philippe-boulet-gercourt.blogs.nouvelobs.com/archive/2008/03/20/l-integrale-du-discours-d-obama-en-francais.html|creationDate|2008-11-07 +http://philippe-boulet-gercourt.blogs.nouvelobs.com/archive/2008/03/20/l-integrale-du-discours-d-obama-en-francais.html|tag|http://www.semanlink.net/tag/obama +http://philippe-boulet-gercourt.blogs.nouvelobs.com/archive/2008/03/20/l-integrale-du-discours-d-obama-en-francais.html|title|L'intégrale du discours d'Obama en français : Made in USA +http://philippe-boulet-gercourt.blogs.nouvelobs.com/archive/2008/03/20/l-integrale-du-discours-d-obama-en-francais.html|creationTime|2008-11-07T01:47:17Z +https://www.fastly.com/blog/best-practices-for-using-the-vary-header|creationDate|2016-04-01 +https://www.fastly.com/blog/best-practices-for-using-the-vary-header|tag|http://www.semanlink.net/tag/vary_header +https://www.fastly.com/blog/best-practices-for-using-the-vary-header|title|Best Practices for Using the Vary Header Fastly +https://www.fastly.com/blog/best-practices-for-using-the-vary-header|creationTime|2016-04-01T09:52:50Z +http://muto.socialtagging.org/core/v1.html|creationDate|2011-11-17 +http://muto.socialtagging.org/core/v1.html|tag|http://www.semanlink.net/tag/ontologies +http://muto.socialtagging.org/core/v1.html|tag|http://www.semanlink.net/tag/tagging +http://muto.socialtagging.org/core/v1.html|title|"""Modular Unified Tagging Ontology (MUTO)""" +http://muto.socialtagging.org/core/v1.html|creationTime|2011-11-17T20:47:34Z +http://www.esa.int/Our_Activities/Space_Science/Rosetta|creationDate|2014-11-15 +http://www.esa.int/Our_Activities/Space_Science/Rosetta|tag|http://www.semanlink.net/tag/esa +http://www.esa.int/Our_Activities/Space_Science/Rosetta|tag|http://www.semanlink.net/tag/rosetta +http://www.esa.int/Our_Activities/Space_Science/Rosetta|title|Rosetta / Space Science / Our Activities / ESA +http://www.esa.int/Our_Activities/Space_Science/Rosetta|creationTime|2014-11-15T13:42:46Z +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|creationDate|2017-06-15 +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|tag|http://www.semanlink.net/tag/trump +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|tag|http://www.semanlink.net/tag/reagan +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|title|Reagan puissance dix Le blog de Thomas Piketty +http://piketty.blog.lemonde.fr/2017/06/13/reagan-puissance-dix/|creationTime|2017-06-15T23:40:19Z +http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html|creationDate|2013-05-16 +http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html|tag|http://www.semanlink.net/tag/ted +http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html|tag|http://www.semanlink.net/tag/luis_von_ahn +http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html|title|Luis von Ahn : Collaboration en ligne à très grande échelle. Video on TED.com +http://www.ted.com/talks/luis_von_ahn_massive_scale_online_collaboration.html|creationTime|2013-05-16T00:13:36Z +https://www.jair.org/index.php/jair/index|creationDate|2018-07-08 +https://www.jair.org/index.php/jair/index|tag|http://www.semanlink.net/tag/ai_book +https://www.jair.org/index.php/jair/index|title|Journal of Artificial Intelligence Research +https://www.jair.org/index.php/jair/index|creationTime|2018-07-08T19:14:37Z +https://blog.jooq.org/2015/12/08/3-reasons-why-you-shouldnt-replace-your-for-loops-by-stream-foreach/|creationDate|2016-10-31 +https://blog.jooq.org/2015/12/08/3-reasons-why-you-shouldnt-replace-your-for-loops-by-stream-foreach/|tag|http://www.semanlink.net/tag/java_8 +https://blog.jooq.org/2015/12/08/3-reasons-why-you-shouldnt-replace-your-for-loops-by-stream-foreach/|title|3 Reasons why You Shouldn’t Replace Your for-loops by Stream.forEach() – Java, SQL and jOOQ. +https://blog.jooq.org/2015/12/08/3-reasons-why-you-shouldnt-replace-your-for-loops-by-stream-foreach/|creationTime|2016-10-31T09:21:43Z +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|creationDate|2007-12-10 +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|tag|http://www.semanlink.net/tag/virus +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|tag|http://www.semanlink.net/tag/darwin +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|comment|Why are evolutionary biologists bringing back extinct deadly viruses? +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|title|Annals of Science: Darwin’s Surprise: Reporting & Essays: The New Yorker +http://www.newyorker.com/reporting/2007/12/03/071203fa_fact_specter|creationTime|2007-12-10T15:17:32Z +http://swig.hpclab.ceid.upatras.gr/SKOS/Skos2Owl2|creationDate|2013-02-07 +http://swig.hpclab.ceid.upatras.gr/SKOS/Skos2Owl2|tag|http://www.semanlink.net/tag/skos_owl +http://swig.hpclab.ceid.upatras.gr/SKOS/Skos2Owl2|title|SKOS in OWL 2 - SWIGroup Wiki +http://swig.hpclab.ceid.upatras.gr/SKOS/Skos2Owl2|creationTime|2013-02-07T09:02:57Z +http://blog.octo.com/designer-une-api-rest/|creationDate|2015-11-16 +http://blog.octo.com/designer-une-api-rest/|tag|http://www.semanlink.net/tag/rest +http://blog.octo.com/designer-une-api-rest/|title|Designer une API REST OCTO talks ! +http://blog.octo.com/designer-une-api-rest/|creationTime|2015-11-16T11:40:07Z +https://arxiv.org/abs/1511.08855|creationDate|2017-11-19 +https://arxiv.org/abs/1511.08855|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1511.08855|tag|http://www.semanlink.net/tag/semantic_folding +https://arxiv.org/abs/1511.08855|tag|http://www.semanlink.net/tag/semantic_fingerprints +https://arxiv.org/abs/1511.08855|arxiv_author|Francisco De Sousa Webber +https://arxiv.org/abs/1511.08855|title|[1511.08855] Semantic Folding Theory And its Application in Semantic Fingerprinting +https://arxiv.org/abs/1511.08855|creationTime|2017-11-19T15:59:15Z +https://arxiv.org/abs/1511.08855|arxiv_summary|"Human language is recognized as a very complex domain since decades. No +computer system has been able to reach human levels of performance so far. The +only known computational system capable of proper language processing is the +human brain. While we gather more and more data about the brain, its +fundamental computational processes still remain obscure. The lack of a sound +computational brain theory also prevents the fundamental understanding of +Natural Language Processing. As always when science lacks a theoretical +foundation, statistical modeling is applied to accommodate as many sampled +real-world data as possible. An unsolved fundamental issue is the actual +representation of language (data) within the brain, denoted as the +Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal +Memory (HTM) theory, a consistent computational theory of the human cortex, we +have developed a corresponding theory of language data representation: The +Semantic Folding Theory. The process of encoding words, by using a topographic +semantic space as distributional reference frame into a sparse binary +representational vector is called Semantic Folding and is the central topic of +this document. Semantic Folding describes a method of converting language from +its symbolic representation (text) into an explicit, semantically grounded +representation that can be generically processed by Hawkins' HTM networks. As +it turned out, this change in representation, by itself, can solve many complex +NLP problems by applying Boolean operators and a generic similarity function +like the Euclidian Distance. Many practical problems of statistical NLP +systems, like the high cost of computation, the fundamental incongruity of +precision and recall , the complex tuning procedures etc., can be elegantly +overcome by applying Semantic Folding." +https://arxiv.org/abs/1511.08855|arxiv_firstAuthor|Francisco De Sousa Webber +https://arxiv.org/abs/1511.08855|arxiv_updated|2016-03-16T22:04:51Z +https://arxiv.org/abs/1511.08855|arxiv_title|Semantic Folding Theory And its Application in Semantic Fingerprinting +https://arxiv.org/abs/1511.08855|arxiv_published|2015-11-28T00:13:09Z +https://arxiv.org/abs/1511.08855|arxiv_num|1511.08855 +http://www.w3.org/TR/swbp-skos-core-spec/|creationDate|2006-12-23 +http://www.w3.org/TR/swbp-skos-core-spec/|tag|http://www.semanlink.net/tag/skos_w3c_document +http://www.w3.org/TR/swbp-skos-core-spec/|tag|http://www.semanlink.net/tag/deprecated +http://www.w3.org/TR/swbp-skos-core-spec/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/swbp-skos-core-spec/|title|SKOS Core Vocabulary Specification +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|creationDate|2016-09-11 +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|tag|http://www.semanlink.net/tag/deep_learning +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|tag|http://www.semanlink.net/tag/dl_why_does_it_work +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|comment|Nobody understands why deep neural networks are so good at solving complex problems. Now physicists say the secret is buried in the laws of physics. +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|title|The Extraordinary Link Between Deep Neural Networks and the Nature of the Universe +https://www.technologyreview.com/s/602344/the-extraordinary-link-between-deep-neural-networks-and-the-nature-of-the-universe/|creationTime|2016-09-11T00:38:49Z +http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html|creationDate|2008-10-17 +http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html|tag|http://www.semanlink.net/tag/sharepoint +http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html|tag|http://www.semanlink.net/tag/ontoprise +http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html|title|Ontoprise brings semantic technologies to Microsoft SharePoint +http://www.ontoprise.de/de/en/home/news/news-en/strategic-partnership.html|creationTime|2008-10-17T18:26:39Z +http://www.ultralingua.com/onlinedictionary/|creationDate|2007-02-24 +http://www.ultralingua.com/onlinedictionary/|tag|http://www.semanlink.net/tag/anglais +http://www.ultralingua.com/onlinedictionary/|tag|http://www.semanlink.net/tag/ultralingua +http://www.ultralingua.com/onlinedictionary/|title|Online Dictionary for French English, Spanish English, Italian English, and more. +http://www.ultralingua.com/onlinedictionary/|creationTime|2007-02-24T02:24:37Z +http://www.newscientist.com/article/dn25044-a-history-of-the-first-americans-in-9-sites.html?full=true#.UvwMx_2Ciww|creationDate|2014-02-13 +http://www.newscientist.com/article/dn25044-a-history-of-the-first-americans-in-9-sites.html?full=true#.UvwMx_2Ciww|tag|http://www.semanlink.net/tag/first_americans +http://www.newscientist.com/article/dn25044-a-history-of-the-first-americans-in-9-sites.html?full=true#.UvwMx_2Ciww|title|A history of the first Americans in 9½ sites - life - 13 February 2014 - New Scientist +http://www.newscientist.com/article/dn25044-a-history-of-the-first-americans-in-9-sites.html?full=true#.UvwMx_2Ciww|creationTime|2014-02-13T01:08:23Z +http://neuralnetworksanddeeplearning.com/chap2.html|creationDate|2017-08-21 +http://neuralnetworksanddeeplearning.com/chap2.html|tag|http://www.semanlink.net/tag/backpropagation +http://neuralnetworksanddeeplearning.com/chap2.html|title|How the backpropagation algorithm works +http://neuralnetworksanddeeplearning.com/chap2.html|creationTime|2017-08-21T16:42:49Z +http://cloud.feedly.com/#latest|creationDate|2013-07-10 +http://cloud.feedly.com/#latest|tag|http://www.semanlink.net/tag/rss +http://cloud.feedly.com/#latest|title|Feedly +http://cloud.feedly.com/#latest|creationTime|2013-07-10T23:23:19Z +http://www.personalinfocloud.com/2005/02/explaining_and_.html|creationDate|2005-04-30 +http://www.personalinfocloud.com/2005/02/explaining_and_.html|tag|http://www.semanlink.net/tag/tagging +http://www.personalinfocloud.com/2005/02/explaining_and_.html|tag|http://www.semanlink.net/tag/folksonomy +http://www.personalinfocloud.com/2005/02/explaining_and_.html|tag|http://www.semanlink.net/tag/social_bookmarking +http://www.personalinfocloud.com/2005/02/explaining_and_.html|title|Explaining and Showing Broad and Narrow Folksonomies +http://ebusiness-unibw.org/pipermail/goodrelations/2010-May/000215.html|creationDate|2010-05-03 +http://ebusiness-unibw.org/pipermail/goodrelations/2010-May/000215.html|tag|http://www.semanlink.net/tag/goodrelations +http://ebusiness-unibw.org/pipermail/goodrelations/2010-May/000215.html|title|GoodRelations vs. Google RDFa vs. Open Graph vs. hProduct/hListing: Using GoodRelations in 10 Triples +http://ebusiness-unibw.org/pipermail/goodrelations/2010-May/000215.html|creationTime|2010-05-03T11:38:33Z +https://lejournal.cnrs.fr/articles/leconomie-malade-de-ses-modeles?utm_content=buffer8bbc6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-05-15 +https://lejournal.cnrs.fr/articles/leconomie-malade-de-ses-modeles?utm_content=buffer8bbc6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/modeles_economiques +https://lejournal.cnrs.fr/articles/leconomie-malade-de-ses-modeles?utm_content=buffer8bbc6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|L’économie malade de ses modèles CNRS Le journal +https://lejournal.cnrs.fr/articles/leconomie-malade-de-ses-modeles?utm_content=buffer8bbc6&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-05-15T13:52:58Z +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html|creationDate|2017-04-28 +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html|tag|http://www.semanlink.net/tag/elasticsearch +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html|title|More Like This Query Elasticsearch Reference +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html|creationTime|2017-04-28T14:38:23Z +http://junit.sourceforge.net/#Getting|creationDate|2009-04-24 +http://junit.sourceforge.net/#Getting|tag|http://www.semanlink.net/tag/junit +http://junit.sourceforge.net/#Getting|title|JUnit: Getting started (using JUnit 4) +http://junit.sourceforge.net/#Getting|creationTime|2009-04-24T23:06:31Z +http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html|creationDate|2012-08-14 +http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html|tag|http://www.semanlink.net/tag/maven +http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html|tag|http://www.semanlink.net/tag/eclipse_project +http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html|title|How do I import a Maven project into Eclipse? - Web Tutorials - avajava.com +http://www.avajava.com/tutorials/lessons/how-do-i-import-a-maven-project-into-eclipse.html|creationTime|2012-08-14T00:19:49Z +http://nerd.eurecom.fr/|creationDate|2013-07-10 +http://nerd.eurecom.fr/|tag|http://www.semanlink.net/tag/raphael_troncy +http://nerd.eurecom.fr/|tag|http://www.semanlink.net/tag/named_entity_recognition +http://nerd.eurecom.fr/|comment|NERD proposes a web framework which unifies numerous named entity extractors using the NERD ontology which provides a rich set of axioms aligning the taxonomies of these tools. +http://nerd.eurecom.fr/|title|NERD: Named Entity Recognition and Disambiguation +http://nerd.eurecom.fr/|creationTime|2013-07-10T22:13:47Z +http://www.tbray.org/ongoing/When/200x/2005/03/11/WSInTheSpring|creationDate|2005-10-13 +http://www.tbray.org/ongoing/When/200x/2005/03/11/WSInTheSpring|tag|http://www.semanlink.net/tag/tim_bray +http://www.tbray.org/ongoing/When/200x/2005/03/11/WSInTheSpring|tag|http://www.semanlink.net/tag/web_services +http://www.tbray.org/ongoing/When/200x/2005/03/11/WSInTheSpring|title|Web Services: Spring 2005 Roundup +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm|creationDate|2005-11-04 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm|tag|http://www.semanlink.net/tag/bibliotheque_numerique +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm|tag|http://www.semanlink.net/tag/google +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm|tag|http://www.semanlink.net/tag/microsoft +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/technology/4402442.stm|title|BBC NEWS Microsoft scans British Library +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|creationDate|2018-10-16 +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|tag|http://www.semanlink.net/tag/semantic_web_w3_org +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|tag|http://www.semanlink.net/tag/dan_brickley +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|tag|http://www.semanlink.net/tag/rdf +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|title|RDF(-DEV), back to the future (was Re: Semantic Web Interest Group now closed) from Dan Brickley on 2018-10-16 (semantic-web@w3.org from October 2018) +https://lists.w3.org/Archives/Public/semantic-web/2018Oct/0052.html|creationTime|2018-10-16T09:36:31Z +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|creationDate|2017-01-23 +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|tag|http://www.semanlink.net/tag/jobbotization +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|tag|http://www.semanlink.net/tag/chomage +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|comment|Technology has gotten so cheap that it is now more economically viable to buy robots than it is to pay people $5 a day +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|title|Technological Unemployment: The Real Reason This Elephant Chart is Terrifying +https://futurism.com/technological-unemployment-the-real-reason-this-elephant-chart-is-terrifying/|creationTime|2017-01-23T10:58:09Z +http://ajaxpatterns.org/Browser-Side_XSLT|creationDate|2008-02-23 +http://ajaxpatterns.org/Browser-Side_XSLT|tag|http://www.semanlink.net/tag/client_side_xslt +http://ajaxpatterns.org/Browser-Side_XSLT|title|Browser-Side XSLT - Ajax Patterns +http://ajaxpatterns.org/Browser-Side_XSLT|creationTime|2008-02-23T09:12:18Z +https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY|creationDate|2016-01-04 +https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY|tag|http://www.semanlink.net/tag/ruben_verborgh +https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY|tag|http://www.semanlink.net/tag/hateoas +https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY|title|What does it mean to not use hypermedia? - Google Groups +https://groups.google.com/forum/#!topic/hypermedia-web/j0oGkhTetcY|creationTime|2016-01-04T11:58:54Z +http://en.wikipedia.org/wiki/Spike-triggered_average|creationDate|2014-01-18 +http://en.wikipedia.org/wiki/Spike-triggered_average|tag|http://www.semanlink.net/tag/coursera_computational_neuroscience +http://en.wikipedia.org/wiki/Spike-triggered_average|comment|week 2 +http://en.wikipedia.org/wiki/Spike-triggered_average|title|Spike-triggered average - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Spike-triggered_average|creationTime|2014-01-18T16:56:31Z +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|creationDate|2013-05-24 +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|tag|http://www.semanlink.net/tag/machine_learning +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|tag|http://www.semanlink.net/tag/information_theory +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|title|mathematicalmonk's channel - YouTube +http://www.youtube.com/user/mathematicalmonk/videos?tag_id=&sort=dd&view=1|creationTime|2013-05-24T22:43:37Z +https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/|creationDate|2018-01-21 +https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/|tag|http://www.semanlink.net/tag/bitcoin +https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/|tag|http://www.semanlink.net/tag/anticipation +https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/|title|I am a time-traveler from the future, here to beg you to stop what you are doing. : Bitcoin +https://www.reddit.com/r/Bitcoin/comments/1lfobc/i_am_a_timetraveler_from_the_future_here_to_beg/|creationTime|2018-01-21T23:37:53Z +http://blog.iks-project.eu/semantic-ui-development-with-vie/|creationDate|2012-06-15 +http://blog.iks-project.eu/semantic-ui-development-with-vie/|tag|http://www.semanlink.net/tag/sebastian_germesin +http://blog.iks-project.eu/semantic-ui-development-with-vie/|tag|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://blog.iks-project.eu/semantic-ui-development-with-vie/|title|Semantic UI Development with VIE IKS Blog – The Semantic CMS Community +http://blog.iks-project.eu/semantic-ui-development-with-vie/|creationTime|2012-06-15T17:35:14Z +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|creationDate|2009-05-28 +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|tag|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|tag|http://www.semanlink.net/tag/owl +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|title|"""Semantic Web for the Working Ontologist"" - bobdc.blog" +http://www.snee.com/bobdc.blog/2009/05/semantic-web-for-the-working-o.html|creationTime|2009-05-28T23:35:47Z +https://nlp.stanford.edu/software/tagger.shtml|creationDate|2017-07-11 +https://nlp.stanford.edu/software/tagger.shtml|tag|http://www.semanlink.net/tag/stanford_pos_tagger +https://nlp.stanford.edu/software/tagger.shtml|title|Stanford Log-linear Part-Of-Speech Tagger +https://nlp.stanford.edu/software/tagger.shtml|creationTime|2017-07-11T15:25:58Z +http://www.betaversion.org/~stefano/linotype/news/94/|creationDate|2005-10-27 +http://www.betaversion.org/~stefano/linotype/news/94/|tag|http://www.semanlink.net/tag/simile +http://www.betaversion.org/~stefano/linotype/news/94/|tag|http://www.semanlink.net/tag/stefano_mazzocchi +http://www.betaversion.org/~stefano/linotype/news/94/|tag|http://www.semanlink.net/tag/mozilla +http://www.betaversion.org/~stefano/linotype/news/94/|tag|http://www.semanlink.net/tag/piggy_bank +http://www.betaversion.org/~stefano/linotype/news/94/|tag|http://www.semanlink.net/tag/cocoon +http://www.betaversion.org/~stefano/linotype/news/94/|comment|"At the heart of Piggy Bank, there is a web server running inside your web browser. It's running a servlet, a minimal RESTful framework... +
+When people ask me what I do for a living, I say that I research what the web of the future could be. At that point, they ask me to give them an example of what that would mean for them. My usual reply is ""if we are successful, the only difference you'll perceive is that you won't feel as constantly lost as you feel today"". At that point they smile, happy to meet a technologist who thinks it's his fault, not theirs, if they can't do something with his software. + +" +http://www.betaversion.org/~stefano/linotype/news/94/|title|Stefano's Linotype ~ Piggy Bank, Cocoon and the Future of the Web +http://eculture.cs.vu.nl/europeana/session/search|creationDate|2009-05-05 +http://eculture.cs.vu.nl/europeana/session/search|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://eculture.cs.vu.nl/europeana/session/search|tag|http://www.semanlink.net/tag/semantic_search +http://eculture.cs.vu.nl/europeana/session/search|tag|http://www.semanlink.net/tag/europeana +http://eculture.cs.vu.nl/europeana/session/search|comment|This is a research prototype of Europeana's semantic search engine. +http://eculture.cs.vu.nl/europeana/session/search|title|Europeana's semantic search engine. +http://eculture.cs.vu.nl/europeana/session/search|creationTime|2009-05-05T16:19:20Z +http://stackoverflow.com/questions/2803305/javascript-how-to-download-js-asynchronously|creationDate|2012-07-13 +http://stackoverflow.com/questions/2803305/javascript-how-to-download-js-asynchronously|tag|http://www.semanlink.net/tag/download_execute_javascript +http://stackoverflow.com/questions/2803305/javascript-how-to-download-js-asynchronously|title|web development - JavaScript: How to download JS asynchronously? - Stack Overflow +http://stackoverflow.com/questions/2803305/javascript-how-to-download-js-asynchronously|creationTime|2012-07-13T02:15:05Z +http://www.econsultant.com/delicious-by-function/index.html|creationDate|2006-05-13 +http://www.econsultant.com/delicious-by-function/index.html|tag|http://www.semanlink.net/tag/del_icio_us +http://www.econsultant.com/delicious-by-function/index.html|title|del.icio.us: 150+ hacks categorized +http://www.lassila.org/blog/archive/2006/03/oink.html|creationDate|2006-03-22 +http://www.lassila.org/blog/archive/2006/03/oink.html|tag|http://www.semanlink.net/tag/ora_lassila +http://www.lassila.org/blog/archive/2006/03/oink.html|tag|http://www.semanlink.net/tag/lisp +http://www.lassila.org/blog/archive/2006/03/oink.html|tag|http://www.semanlink.net/tag/rdf_browser +http://www.lassila.org/blog/archive/2006/03/oink.html|comment|"""RDF browser"", built using Wilbur. I am tired of looking RDF data in the RDF/XML (or any other syntactic) form. It seems that an RDF graph can easily be rendered as hypertext, and browsing is a very intuitive way to navigate data. Loading multiple RDF documents into Wilbur's triple-store and viewing them all together offers a simple way to integrate data, ad hoc. I've dubbed this piece of software ""OINK"" for ""Open Integration of Networked Knowledge"" (really)." +http://www.lassila.org/blog/archive/2006/03/oink.html|title|Wilbur-and-O: OINK +http://swse.deri.org/|creationDate|2007-07-20 +http://swse.deri.org/|tag|http://www.semanlink.net/tag/swse +http://swse.deri.org/|tag|http://www.semanlink.net/tag/semantic_search +http://swse.deri.org/|title|SWSE - Semantic Search +http://swse.deri.org/|creationTime|2007-07-20T00:48:05Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|creationDate|2007-03-20 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|tag|http://www.semanlink.net/tag/rdf_forms +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|tag|http://www.semanlink.net/tag/richard_cyganiak +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|tag|http://www.semanlink.net/tag/good +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|title|[Linking-open-data] Re: Forms in the web of data (Richard Cyganiak) +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=15904|creationTime|2007-03-20T21:32:27Z +http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html|creationDate|2005-12-02 +http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html|tag|http://www.semanlink.net/tag/bookmarklet +http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html|tag|http://www.semanlink.net/tag/javascript +http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html|tag|http://www.semanlink.net/tag/dev_tips +http://codinginparadise.org/weblog/2005/08/ajax-creating-huge-bookmarklets.html|title|Creating Huge Bookmarklets +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|creationDate|2011-02-09 +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/java +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/rdfa +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/semantic_overflow +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/jena +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|title|What is the best Java RDFa Parser? - Semantic Overflow +http://www.semanticoverflow.com/questions/1493/what-is-the-best-java-rdfa-parser|creationTime|2011-02-09T00:28:36Z +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|creationDate|2007-06-22 +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|tag|http://www.semanlink.net/tag/zitgist +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|tag|http://www.semanlink.net/tag/rdf_templating +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|title|Zitgist’s RDF Browser: Browse the Semantic Web at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/06/20/zitgists-rdf-browser-browse-the-semantic-web/|creationTime|2007-06-22T23:12:29Z +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|creationDate|2017-09-18 +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|tag|http://www.semanlink.net/tag/grands_problemes +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|tag|http://www.semanlink.net/tag/risks +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|comment|Risks with Infinite impact +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|title|Risks that threaten human civilisation +https://api.globalchallenges.org/static/wp-content/uploads/12-Risks-with-infinite-impact.pdf|creationTime|2017-09-18T14:34:22Z +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|creationDate|2012-04-15 +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|tag|http://www.semanlink.net/tag/chris_manning +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|tag|http://www.semanlink.net/tag/maxent_models +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|tag|http://www.semanlink.net/tag/maxent_for_nlp +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|title|Maxent models, Conditional estimation and Optimization +http://www.cs.berkeley.edu/~klein/papers/maxent-tutorial-slides-6.pdf|creationTime|2012-04-15T14:33:52Z +http://planb.nicecupoftea.org/archives/001302.html|creationDate|2005-09-17 +http://planb.nicecupoftea.org/archives/001302.html|tag|http://www.semanlink.net/tag/ajax +http://planb.nicecupoftea.org/archives/001302.html|tag|http://www.semanlink.net/tag/sparql +http://planb.nicecupoftea.org/archives/001302.html|title|Plan B: Ajax and Sparql +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|creationDate|2013-03-19 +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|tag|http://www.semanlink.net/tag/solr +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|tag|http://www.semanlink.net/tag/the_guardian +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|tag|http://www.semanlink.net/tag/database +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|title|For The Guardian, Solr is the new database +http://searchhub.org/2010/04/29/for-the-guardian-solr-is-the-new-database/|creationTime|2013-03-19T23:14:20Z +http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long|creationDate|2007-04-03 +http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long|tag|http://www.semanlink.net/tag/hash_uris +http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long|tag|http://www.semanlink.net/tag/richard_cyganiak +http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long|title|dowhatimean.net » Content negotiation with hash URIs (long) +http://dowhatimean.net/2006/11/content-negotiation-with-hash-uris-long|creationTime|2007-04-03T23:30:30Z +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|creationDate|2018-11-25 +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|tag|http://www.semanlink.net/tag/text_similarity +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|tag|http://www.semanlink.net/tag/nlp_sample_code +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|tag|http://www.semanlink.net/tag/xgboost +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|comment|[Part 1](/doc/2019/07/finding_similar_quora_questions) +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|relatedDoc|http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|title|Finding Similar Quora Questions with Word2Vec and Xgboost +https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d|creationTime|2018-11-25T10:35:08Z +https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html|creationDate|2018-11-08 +https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html|tag|http://www.semanlink.net/tag/especes_menacees +https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html|tag|http://www.semanlink.net/tag/limule +https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html|title|La limule, « fossile vivant » au sang bleu menacé de disparition +https://www.lemonde.fr/big-browser/article/2018/11/03/la-limule-fossile-vivant-au-sang-bleu-menace-de-disparition_5378484_4832693.html|creationTime|2018-11-08T00:16:20Z +http://www.w3.org/2009/03/xbrl/program.html|creationDate|2010-07-01 +http://www.w3.org/2009/03/xbrl/program.html|tag|http://www.semanlink.net/tag/xbrl +http://www.w3.org/2009/03/xbrl/program.html|title|Program for the Workshop on Improving Access to Financial Data on the Web +http://www.w3.org/2009/03/xbrl/program.html|creationTime|2010-07-01T18:34:46Z +https://arxiv.org/abs/1811.05370|creationDate|2018-11-20 +https://arxiv.org/abs/1811.05370|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1811.05370|tag|http://www.semanlink.net/tag/transfer_learning +https://arxiv.org/abs/1811.05370|tag|http://www.semanlink.net/tag/ulmfit +https://arxiv.org/abs/1811.05370|tag|http://www.semanlink.net/tag/amazon_alexa +https://arxiv.org/abs/1811.05370|tag|http://www.semanlink.net/tag/elmo +https://arxiv.org/abs/1811.05370|arxiv_author|Aditya Siddhant +https://arxiv.org/abs/1811.05370|arxiv_author|Angeliki Metallinou +https://arxiv.org/abs/1811.05370|arxiv_author|Anuj Goyal +https://arxiv.org/abs/1811.05370|comment|> We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x +https://arxiv.org/abs/1811.05370|title|[1811.05370] Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents +https://arxiv.org/abs/1811.05370|creationTime|2018-11-20T00:14:11Z +https://arxiv.org/abs/1811.05370|arxiv_summary|"User interaction with voice-powered agents generates large amounts of +unlabeled utterances. In this paper, we explore techniques to efficiently +transfer the knowledge from these unlabeled utterances to improve model +performance on Spoken Language Understanding (SLU) tasks. We use Embeddings +from Language Model (ELMo) to take advantage of unlabeled data by learning +contextualized word representations. Additionally, we propose ELMo-Light +(ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our +findings suggest unsupervised pre-training on a large corpora of unlabeled +utterances leads to significantly better SLU performance compared to training +from scratch and it can even outperform conventional supervised transfer. +Additionally, we show that the gains from unsupervised transfer techniques can +be further improved by supervised transfer. The improvements are more +pronounced in low resource settings and when using only 1000 labeled in-domain +samples, our techniques match the performance of training from scratch on +10-15x more labeled in-domain data." +https://arxiv.org/abs/1811.05370|arxiv_firstAuthor|Aditya Siddhant +https://arxiv.org/abs/1811.05370|arxiv_updated|2018-11-13T15:44:31Z +https://arxiv.org/abs/1811.05370|arxiv_title|Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents +https://arxiv.org/abs/1811.05370|arxiv_published|2018-11-13T15:44:31Z +https://arxiv.org/abs/1811.05370|arxiv_num|1811.05370 +http://www.boutell.com/newfaq/creating/scriptpass.html|creationDate|2008-01-15 +http://www.boutell.com/newfaq/creating/scriptpass.html|tag|http://www.semanlink.net/tag/cookie +http://www.boutell.com/newfaq/creating/scriptpass.html|tag|http://www.semanlink.net/tag/javascript +http://www.boutell.com/newfaq/creating/scriptpass.html|title|WWW FAQs: How do I pass data between JavaScript pages? +http://www.boutell.com/newfaq/creating/scriptpass.html|creationTime|2008-01-15T23:23:29Z +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|creationDate|2017-10-06 +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|tag|http://www.semanlink.net/tag/orne +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|tag|http://www.semanlink.net/tag/arbres_remarquables +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|tag|http://www.semanlink.net/tag/chene +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|title|Chêne de la Lambonnière 550 ans, Pervenchères (Orne) Krapo arboricole +https://krapooarboricole.wordpress.com/2008/05/13/chene-de-la-lambonniere-600-ans-pervencheres-orne/|creationTime|2017-10-06T22:31:13Z +http://arxiv.org/abs/1602.02410|creationDate|2016-02-09 +http://arxiv.org/abs/1602.02410|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1602.02410|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://arxiv.org/abs/1602.02410|tag|http://www.semanlink.net/tag/language_model +http://arxiv.org/abs/1602.02410|arxiv_author|Noam Shazeer +http://arxiv.org/abs/1602.02410|arxiv_author|Rafal Jozefowicz +http://arxiv.org/abs/1602.02410|arxiv_author|Oriol Vinyals +http://arxiv.org/abs/1602.02410|arxiv_author|Yonghui Wu +http://arxiv.org/abs/1602.02410|arxiv_author|Mike Schuster +http://arxiv.org/abs/1602.02410|comment|recent advances in Recurrent Neural Networks for large scale Language Modeling +http://arxiv.org/abs/1602.02410|title|[1602.02410] Exploring the Limits of Language Modeling +http://arxiv.org/abs/1602.02410|creationTime|2016-02-09T19:00:54Z +http://arxiv.org/abs/1602.02410|arxiv_summary|"In this work we explore recent advances in Recurrent Neural Networks for +large scale Language Modeling, a task central to language understanding. We +extend current models to deal with two key challenges present in this task: +corpora and vocabulary sizes, and complex, long term structure of language. We +perform an exhaustive study on techniques such as character Convolutional +Neural Networks or Long-Short Term Memory, on the One Billion Word Benchmark. +Our best single model significantly improves state-of-the-art perplexity from +51.3 down to 30.0 (whilst reducing the number of parameters by a factor of 20), +while an ensemble of models sets a new record by improving perplexity from 41.0 +down to 23.7. We also release these models for the NLP and ML community to +study and improve upon." +http://arxiv.org/abs/1602.02410|arxiv_firstAuthor|Rafal Jozefowicz +http://arxiv.org/abs/1602.02410|arxiv_updated|2016-02-11T23:01:48Z +http://arxiv.org/abs/1602.02410|arxiv_title|Exploring the Limits of Language Modeling +http://arxiv.org/abs/1602.02410|arxiv_published|2016-02-07T19:11:17Z +http://arxiv.org/abs/1602.02410|arxiv_num|1602.02410 +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|creationDate|2019-01-02 +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|tag|http://www.semanlink.net/tag/kenya +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|tag|http://www.semanlink.net/tag/dette +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|tag|http://www.semanlink.net/tag/chine_afrique +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|title|Kenya. Pour se rembourser, la Chine pourrait s’emparer du port de Mombasa Courrier international +https://www.courrierinternational.com/article/kenya-pour-se-rembourser-la-chine-pourrait-semparer-du-port-de-mombasa|creationTime|2019-01-02T12:45:20Z +https://arxiv.org/abs/1607.00570|creationDate|2017-06-09 +https://arxiv.org/abs/1607.00570|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1607.00570|tag|http://www.semanlink.net/tag/tf_idf +https://arxiv.org/abs/1607.00570|tag|http://www.semanlink.net/tag/nlp_short_texts +https://arxiv.org/abs/1607.00570|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1607.00570|arxiv_author|Cedric De Boom +https://arxiv.org/abs/1607.00570|arxiv_author|Thomas Demeester +https://arxiv.org/abs/1607.00570|arxiv_author|Bart Dhoedt +https://arxiv.org/abs/1607.00570|arxiv_author|Steven Van Canneyt +https://arxiv.org/abs/1607.00570|comment|"A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. Github (hmm...) (python code) + +" +https://arxiv.org/abs/1607.00570|title|[1607.00570] Representation learning for very short texts using weighted word embedding aggregation +https://arxiv.org/abs/1607.00570|creationTime|2017-06-09T15:01:36Z +https://arxiv.org/abs/1607.00570|arxiv_summary|"Short text messages such as tweets are very noisy and sparse in their use of +vocabulary. Traditional textual representations, such as tf-idf, have +difficulty grasping the semantic meaning of such texts, which is important in +applications such as event detection, opinion mining, news recommendation, etc. +We constructed a method based on semantic word embeddings and frequency +information to arrive at low-dimensional representations for short texts +designed to capture semantic similarity. For this purpose we designed a +weight-based model and a learning procedure based on a novel median-based loss +function. This paper discusses the details of our model and the optimization +methods, together with the experimental results on both Wikipedia and Twitter +data. We find that our method outperforms the baseline approaches in the +experiments, and that it generalizes well on different word embeddings without +retraining. Our method is therefore capable of retaining most of the semantic +information in the text, and is applicable out-of-the-box." +https://arxiv.org/abs/1607.00570|arxiv_firstAuthor|Cedric De Boom +https://arxiv.org/abs/1607.00570|arxiv_updated|2016-07-02T23:10:09Z +https://arxiv.org/abs/1607.00570|arxiv_title|Representation learning for very short texts using weighted word embedding aggregation +https://arxiv.org/abs/1607.00570|arxiv_published|2016-07-02T23:10:09Z +https://arxiv.org/abs/1607.00570|arxiv_num|1607.00570 +https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8|creationDate|2018-01-03 +https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8|tag|http://www.semanlink.net/tag/rdf2vec +https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8|comment|"> In our approach, we adapt neural language models for RDF graph embeddings. Such approaches take advantage of the word order in text documents, explicitly modeling the assumption that closer words in the word sequence are statistically more dependent. In the case of RDF graphs, we consider entities and relations between entities instead of word sequences. Thus, in order to apply such approaches on RDF graph data, we first have to transform the graph data into sequences of entities, which can be considered as sentences. Using those sentences, we can train the same neural language models to represent each entity in the RDF graph as a vector of numerical values in a latent feature space. +" +https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8|title|RDF2Vec: RDF Graph Embeddings for Data Mining - (2016) +https://www.semanticscholar.org/paper/RDF2Vec-RDF-Graph-Embeddings-for-Data-Mining-Ristoski-Paulheim/21bc51c43a3ed702ccb661d8137f9b5bbe0ed3c8|creationTime|2018-01-03T16:54:19Z +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|creationDate|2008-11-19 +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|tag|http://www.semanlink.net/tag/neolithique +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|comment|The oldest genetically identifiable nuclear family met a violent death, according to analysis of remains from 4,600-year-old burials in Germany. +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|title|BBC NEWS Science & Environment Oldest nuclear family 'murdered' +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|creationTime|2008-11-19T21:34:12Z +http://news.bbc.co.uk/2/hi/science/nature/7733372.stm|source|BBC +http://www.ldh-toulon.net/spip.php?article877|creationDate|2007-04-30 +http://www.ldh-toulon.net/spip.php?article877|tag|http://www.semanlink.net/tag/pillage_du_palais_d_ete +http://www.ldh-toulon.net/spip.php?article877|tag|http://www.semanlink.net/tag/hugo +http://www.ldh-toulon.net/spip.php?article877|comment|"L’empereur Xianfeng est en fuite. Il a abandonné Pékin aux troupes anglo-françaises qui, le 6 octobre 1860, envahissent sa résidence d’été, d’une beauté exceptionnelle, la saccagent, la dévastent. + +> Devant l’histoire, l’un des deux bandits s’appellera la France, l’autre s’appellera l’Angleterre. Mais je proteste, et je vous remercie de m’en donner l’occasion ; les crimes de ceux qui mènent ne sont pas la faute de ceux qui sont menés ; les gouvernements sont quelquefois des bandits, les peuples jamais. +> +> L’empire français a empoché la moitié de cette victoire et il étale aujourd’hui avec une sorte de naïveté de propriétaire, le splendide bric-à-brac du Palais d’été. +> +> J’espère qu’un jour viendra où la France, délivrée et nettoyée, renverra ce butin à la Chine spoliée. +> +> En attendant, il y a un vol et deux voleurs, je le constate. +> Telle est, monsieur, la quantité d’approbation que je donne à l’expédition de Chine." +http://www.ldh-toulon.net/spip.php?article877|title|[LDH-Toulon] lettre de Victor Hugo au capitaine Butler +http://www.ldh-toulon.net/spip.php?article877|creationTime|2007-04-30T01:31:15Z +https://lists.w3.org/Archives/Public/semantic-web/2017Feb/0045.html|creationDate|2017-02-14 +https://lists.w3.org/Archives/Public/semantic-web/2017Feb/0045.html|tag|http://www.semanlink.net/tag/jena +https://lists.w3.org/Archives/Public/semantic-web/2017Feb/0045.html|title|[ANN] Apache Jena 3.2.0 released from A. Soroka on 2017-02-13 (semantic-web@w3.org from February 2017) +https://lists.w3.org/Archives/Public/semantic-web/2017Feb/0045.html|creationTime|2017-02-14T12:52:42Z +http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1|creationDate|2019-05-09 +http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1|tag|http://www.semanlink.net/tag/knowledge_graph_conference_2019 +http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1|tag|http://www.semanlink.net/tag/denny_vrandecic +http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1|title|Knowledge Graph Conference 2019, Day 1 - Simia +http://simia.net/wiki/Knowledge_Graph_Conference_2019,_Day_1|creationTime|2019-05-09T23:17:21Z +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|creationDate|2009-05-28 +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|tag|http://www.semanlink.net/tag/web_2_0 +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|tag|http://www.semanlink.net/tag/semantic_enterprise +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|title|"Soutenance de thèse ""Technologies du Web Sémantique pour l'Entreprise 2.0"" Alexandre Passant" +http://apassant.net/blog/2009/05/27/soutenance-de-th%C3%A8se-technologies-du-web-s%C3%A9mantique-pour-lentreprise-20|creationTime|2009-05-28T23:33:21Z +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|creationDate|2017-04-28 +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|tag|http://www.semanlink.net/tag/elasticsearch +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|tag|http://www.semanlink.net/tag/nlp +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|tag|http://www.semanlink.net/tag/semantic_search +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|title|Semantic search with NLP and elasticsearch - Stack Overflow +http://stackoverflow.com/questions/8772692/semantic-search-with-nlp-and-elasticsearch|creationTime|2017-04-28T03:22:51Z +http://www.nasa.gov/vision/universe/solarsystem/voyager_agu.html|creationDate|2005-05-25 +http://www.nasa.gov/vision/universe/solarsystem/voyager_agu.html|tag|http://www.semanlink.net/tag/voyager +http://www.nasa.gov/vision/universe/solarsystem/voyager_agu.html|tag|http://www.semanlink.net/tag/nasa +http://www.nasa.gov/vision/universe/solarsystem/voyager_agu.html|title|NASA - Voyager Enters Solar System's Final Frontier +http://www.w3.org/TR/swbp-vocab-pub/|creationDate|2006-03-14 +http://www.w3.org/TR/swbp-vocab-pub/|tag|http://www.semanlink.net/tag/publishing_rdf_vocabularies +http://www.w3.org/TR/swbp-vocab-pub/|tag|http://www.semanlink.net/tag/httprange_14_solution +http://www.w3.org/TR/swbp-vocab-pub/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/swbp-vocab-pub/|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/TR/swbp-vocab-pub/|tag|http://www.semanlink.net/tag/howto +http://www.w3.org/TR/swbp-vocab-pub/|comment|"This document describes best practice recipes for publishing an RDFS or +OWL vocabulary or ontology on the Web. The features of each recipe are clearly +described, so that vocabulary or ontology creators may choose the recipe best +suited to the needs of their particular situations. Each recipe contains +an example configuration for use with an Apache HTTP server, although the +principles involved may be adapted to other environments. The recipes are all +designed to be consistent with the architecture of the Web as currently +specified." +http://www.w3.org/TR/swbp-vocab-pub/|title|Best Practice Recipes for Publishing RDF Vocabularies +https://arxiv.org/abs/1807.07984|creationDate|2018-11-14 +https://arxiv.org/abs/1807.07984|tag|http://www.semanlink.net/tag/attention_in_graphs +https://arxiv.org/abs/1807.07984|tag|http://www.semanlink.net/tag/survey +https://arxiv.org/abs/1807.07984|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1807.07984|arxiv_author|Eunyee Koh +https://arxiv.org/abs/1807.07984|arxiv_author|John Boaz Lee +https://arxiv.org/abs/1807.07984|arxiv_author|Ryan A. Rossi +https://arxiv.org/abs/1807.07984|arxiv_author|Sungchul Kim +https://arxiv.org/abs/1807.07984|arxiv_author|Nesreen K. Ahmed +https://arxiv.org/abs/1807.07984|comment|"> An attention mechanism aids a model by +allowing it to ""focus on the most relevant parts of the input to make decisions"" +" +https://arxiv.org/abs/1807.07984|title|[1807.07984] Attention Models in Graphs: A Survey +https://arxiv.org/abs/1807.07984|creationTime|2018-11-14T02:13:13Z +https://arxiv.org/abs/1807.07984|arxiv_summary|"Graph-structured data arise naturally in many different application domains. +By representing data as graphs, we can capture entities (i.e., nodes) as well +as their relationships (i.e., edges) with each other. Many useful insights can +be derived from graph-structured data as demonstrated by an ever-growing body +of work focused on graph mining. However, in the real-world, graphs can be both +large - with many complex patterns - and noisy which can pose a problem for +effective graph mining. An effective way to deal with this issue is to +incorporate ""attention"" into graph mining solutions. An attention mechanism +allows a method to focus on task-relevant parts of the graph, helping it to +make better decisions. In this work, we conduct a comprehensive and focused +survey of the literature on the emerging field of graph attention models. We +introduce three intuitive taxonomies to group existing work. These are based on +problem setting (type of input and output), the type of attention mechanism +used, and the task (e.g., graph classification, link prediction, etc.). We +motivate our taxonomies through detailed examples and use each to survey +competing approaches from a unique standpoint. Finally, we highlight several +challenges in the area and discuss promising directions for future work." +https://arxiv.org/abs/1807.07984|arxiv_firstAuthor|John Boaz Lee +https://arxiv.org/abs/1807.07984|arxiv_updated|2018-07-20T18:11:07Z +https://arxiv.org/abs/1807.07984|arxiv_title|Attention Models in Graphs: A Survey +https://arxiv.org/abs/1807.07984|arxiv_published|2018-07-20T18:11:07Z +https://arxiv.org/abs/1807.07984|arxiv_num|1807.07984 +http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php|creationDate|2007-03-02 +http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php|tag|http://www.semanlink.net/tag/mac_os_x +http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php|tag|http://www.semanlink.net/tag/wifi +http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php|title|Macworld: Secrets: Stranger in a Strange LAN +http://www.macworld.com/2005/03/secrets/aprilmobilemac/index.php|creationTime|2007-03-02T23:44:30Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|creationDate|2013-07-29 +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|tag|http://www.semanlink.net/tag/c2gweb_and_product_description +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|tag|http://www.semanlink.net/tag/gao +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|title|Vehicles, and customizable products from Francois-Paul Servant on 2013-07-23 (public-vocabs@w3.org from July 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Jul/0075.html|creationTime|2013-07-29T02:04:56Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|creationDate|2018-03-18 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|tag|http://www.semanlink.net/tag/nlp_4_semanlink +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|tag|http://www.semanlink.net/tag/multi_label_classification +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|tag|http://www.semanlink.net/tag/k_nearest_neighbors_algorithm +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|tag|http://www.semanlink.net/tag/frequently_cited_paper +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|tag|http://www.semanlink.net/tag/knn_in_mlc +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|comment|"a lazy learning approach derived from the traditional [k-nearest neighbors algorithm](tag:k_nearest_neighbors_algorithm) + +> for each unseen instance, its K nearest neighbors in the training set are firstly identified. After that, based on statistical information gained from the label sets of these neighboring instances, i.e. the number of neighboring instances belonging to each possible class, [Maximum a posteriori (MAP)](tag:maximum_a_posteriori_estimation) principle is utilized to determine the label set for the unseen instance. + +Implemented in [scikit-multilearn](http://scikit.ml/api/skmultilearn.adapt.mlknn.html), in [java](https://github.com/lefman/mulan-extended/blob/master/mulan/src/mulan/classifier/lazy/MLkNN.java) + +> the first lazy approach proposed specifically for multi-label classification. This is also a binary relevance approach which considers each label independently as a binary classification problem. Instead of a standard k-NN method, however, MLkNN uses the maximum a-posteriori (MAP) (Kelleher et al., 2015) approach combined with k-NN. [src](https://pdfs.semanticscholar.org/af9b/33da37d290c063cd826ab5923d96892a9767.pdf)" +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|title|ML-knn: A lazy learning approach to multi-label learning (2007) +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.89.7148|creationTime|2018-03-18T10:54:01Z +http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/|creationDate|2014-01-12 +http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/|tag|http://www.semanlink.net/tag/especes_menacees +http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/|tag|http://www.semanlink.net/tag/synthetic_biology +http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/|title|Modifier le vivant pour sauver la planète : la bioingénierie peut-elle protéger la nature ? InternetActu +http://internetactu.blog.lemonde.fr/2014/01/10/modifier-le-vivant-pour-sauver-la-planete-la-bioingenierie-peut-elle-proteger-la-nature/|creationTime|2014-01-12T15:47:41Z +http://poolparty.punkt.at/demozone/|creationDate|2010-08-31 +http://poolparty.punkt.at/demozone/|tag|http://www.semanlink.net/tag/skos +http://poolparty.punkt.at/demozone/|tag|http://www.semanlink.net/tag/demo +http://poolparty.punkt.at/demozone/|title|Poolparty Demozone +http://poolparty.punkt.at/demozone/|creationTime|2010-08-31T09:50:28Z +https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8|creationDate|2017-08-18 +https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8|tag|http://www.semanlink.net/tag/coding +https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8|tag|http://www.semanlink.net/tag/over_engineering +https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8|title|Modern Software Over-Engineering Mistakes – RDX – Medium +https://medium.com/@rdsubhas/10-modern-software-engineering-mistakes-bc67fbef4fc8|creationTime|2017-08-18T12:34:53Z +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|creationDate|2014-04-18 +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|tag|http://www.semanlink.net/tag/moustique +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|tag|http://www.semanlink.net/tag/bresil +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|tag|http://www.semanlink.net/tag/dengue +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|tag|http://www.semanlink.net/tag/ogm +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|title|Le Brésil va lâcher des millions de moustiques OGM contre la dengue Eco(lo) +http://ecologie.blog.lemonde.fr/2014/04/18/le-bresil-va-lacher-des-millions-de-moustiques-ogm-contre-la-dengue/|creationTime|2014-04-18T12:02:39Z +http://www.essepuntato.it/lode|creationDate|2012-11-23 +http://www.essepuntato.it/lode|tag|http://www.semanlink.net/tag/owl_tool +http://www.essepuntato.it/lode|tag|http://www.semanlink.net/tag/rdf_owl_documentation_tool +http://www.essepuntato.it/lode|title|LODE - Live OWL Documentation Environment +http://www.essepuntato.it/lode|creationTime|2012-11-23T15:02:05Z +http://panopto.com/|creationDate|2014-11-17 +http://panopto.com/|title|Video Platform for Businesses and Universities Panopto Video Platform +http://panopto.com/|creationTime|2014-11-17T21:23:55Z +https://iamtrask.github.io/2015/07/12/basic-python-network/|creationDate|2015-08-24 +https://iamtrask.github.io/2015/07/12/basic-python-network/|tag|http://www.semanlink.net/tag/artificial_neural_network +https://iamtrask.github.io/2015/07/12/basic-python-network/|tag|http://www.semanlink.net/tag/sample_code +https://iamtrask.github.io/2015/07/12/basic-python-network/|tag|http://www.semanlink.net/tag/handwriting_recognition +https://iamtrask.github.io/2015/07/12/basic-python-network/|tag|http://www.semanlink.net/tag/python +https://iamtrask.github.io/2015/07/12/basic-python-network/|title|A Neural Network in 11 lines of Python (Part 1) - i am trask +https://iamtrask.github.io/2015/07/12/basic-python-network/|creationTime|2015-08-24T02:03:23Z +http://research.microsoft.com/en-us/projects/trinity/query.aspx|creationDate|2013-03-25 +http://research.microsoft.com/en-us/projects/trinity/query.aspx|tag|http://www.semanlink.net/tag/satori +http://research.microsoft.com/en-us/projects/trinity/query.aspx|tag|http://www.semanlink.net/tag/microsoft_research +http://research.microsoft.com/en-us/projects/trinity/query.aspx|title|Real-time query processing for billion node graphs - Microsoft Research +http://research.microsoft.com/en-us/projects/trinity/query.aspx|creationTime|2013-03-25T13:10:57Z +http://www.bbc.com/news/magazine-28986843|creationDate|2014-09-08 +http://www.bbc.com/news/magazine-28986843|tag|http://www.semanlink.net/tag/adn_mitochondrial +http://www.bbc.com/news/magazine-28986843|tag|http://www.semanlink.net/tag/pregnancy +http://www.bbc.com/news/magazine-28986843|title|BBC News - The girl with three biological parents +http://www.bbc.com/news/magazine-28986843|creationTime|2014-09-08T22:29:30Z +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|creationDate|2006-08-20 +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|tag|http://www.semanlink.net/tag/genetique +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|tag|http://www.semanlink.net/tag/evolution +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|comment|US researchers have taken a mouse back in time some 500 million years by reversing the process of evolution. +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|title|BBC NEWS Evolution reversed in mice +http://news.bbc.co.uk/1/hi/sci/tech/5245950.stm|source|BBC +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|creationDate|2016-05-14 +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|tag|http://www.semanlink.net/tag/chatbot +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|tag|http://www.semanlink.net/tag/technology_enhanced_learning +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|title|What happened when a professor built a chatbot to be his teaching assistant - The Washington Post +https://www.washingtonpost.com/news/innovations/wp/2016/05/11/this-professor-stunned-his-students-when-he-revealed-the-secret-identity-of-his-teaching-assistant/|creationTime|2016-05-14T19:45:41Z +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|creationDate|2012-11-26 +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|tag|http://www.semanlink.net/tag/virus +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|tag|http://www.semanlink.net/tag/evolution +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|tag|http://www.semanlink.net/tag/altruisme +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|tag|http://www.semanlink.net/tag/bacteries +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|title|Quand un virus sauve une bactérie du suicide Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2012/11/25/quand-un-virus-sauve-une-bacterie-du-suicide/|creationTime|2012-11-26T12:09:01Z +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|creationDate|2017-12-08 +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|tag|http://www.semanlink.net/tag/chiffres +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|tag|http://www.semanlink.net/tag/langues_vivantes +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|comment|6,909 living languages on Earth. 414 of those account for 94% of humanity +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|title|Number of languages +http://www.education.rec.ri.cmu.edu/fire/naclo/pages/Ling/Fact/num-languages.html|creationTime|2017-12-08T14:28:40Z +https://www.bbc.com/news/world-africa-46442570|creationDate|2018-12-07 +https://www.bbc.com/news/world-africa-46442570|tag|http://www.semanlink.net/tag/girafe +https://www.bbc.com/news/world-africa-46442570|tag|http://www.semanlink.net/tag/niger +https://www.bbc.com/news/world-africa-46442570|title|Saving the last West African giraffes in Niger - BBC News +https://www.bbc.com/news/world-africa-46442570|creationTime|2018-12-07T13:21:46Z +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|creationDate|2005-11-04 +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|tag|http://www.semanlink.net/tag/rdf +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|tag|http://www.semanlink.net/tag/wsdl +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|comment|The main objective of this specification is to present a standard RDF vocabulary equivalent to WSDL 2, so that all WSDL 2 documents can be transformed into RDF and merged with other Semantic Web data. +http://www.w3.org/TR/2005/WD-wsdl20-rdf-20051104/|title|Web Services Description Language (WSDL) Version 2.0: RDF Mapping +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|creationDate|2007-05-30 +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|tag|http://www.semanlink.net/tag/sweo_renault_use_case +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|comment|Semantic Web Education and Outreach Interest Group: case study +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|title|Semantic Web Technologies in Automotive Repair and Diagnostic Documentation +http://www.w3.org/2001/sw/sweo/public/UseCases/Renault/|creationTime|2007-05-30T13:59:07Z +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|creationDate|2019-03-27 +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|tag|http://www.semanlink.net/tag/driverless_car +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|tag|http://www.semanlink.net/tag/threat_models +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|tag|http://www.semanlink.net/tag/machine_learning_problems +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|comment|> why should we care about adversarial examples? +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|title|Unsolved research problems vs. real-world threat models +https://medium.com/@catherio/unsolved-research-problems-vs-real-world-threat-models-e270e256bc9e|creationTime|2019-03-27T08:37:16Z +http://ruder.io/emnlp-2018-highlights/|creationDate|2018-11-08 +http://ruder.io/emnlp-2018-highlights/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/emnlp-2018-highlights/|tag|http://www.semanlink.net/tag/emnlp_2018 +http://ruder.io/emnlp-2018-highlights/|title|EMNLP 2018 Highlights: Inductive bias, cross-lingual learning, and more +http://ruder.io/emnlp-2018-highlights/|creationTime|2018-11-08T23:49:49Z +https://www.youtube.com/watch?v=fTjNkbLBEqg|creationDate|2014-09-06 +https://www.youtube.com/watch?v=fTjNkbLBEqg|tag|http://www.semanlink.net/tag/privacy_and_internet +https://www.youtube.com/watch?v=fTjNkbLBEqg|tag|http://www.semanlink.net/tag/tor_anonymity_network +https://www.youtube.com/watch?v=fTjNkbLBEqg|tag|http://www.semanlink.net/tag/dark_web +https://www.youtube.com/watch?v=fTjNkbLBEqg|tag|http://www.semanlink.net/tag/documentaire_tv +https://www.youtube.com/watch?v=fTjNkbLBEqg|title|BBC Horizon 2014-2015 Episode 4: Inside the Dark Web - YouTube +https://www.youtube.com/watch?v=fTjNkbLBEqg|creationTime|2014-09-06T10:24:43Z +https://github.com/BAILOOL/DoYouEvenLearn|creationDate|2018-04-06 +https://github.com/BAILOOL/DoYouEvenLearn|tag|http://www.semanlink.net/tag/computer_vision +https://github.com/BAILOOL/DoYouEvenLearn|tag|http://www.semanlink.net/tag/machine_learning +https://github.com/BAILOOL/DoYouEvenLearn|tag|http://www.semanlink.net/tag/links +https://github.com/BAILOOL/DoYouEvenLearn|tag|http://www.semanlink.net/tag/artificial_intelligence +https://github.com/BAILOOL/DoYouEvenLearn|title|Essential Guide to keep up with AI/ML/CV +https://github.com/BAILOOL/DoYouEvenLearn|creationTime|2018-04-06T16:16:13Z +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|creationDate|2017-11-21 +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|tag|http://www.semanlink.net/tag/topic_embeddings +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|comment|> lda2vec learns the powerful word representations in word2vec while jointly constructing human-interpretable LDA document representations. +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|title|Introducing our Hybrid lda2vec Algorithm (2017) Stitch Fix Technology – Multithreaded +http://multithreaded.stitchfix.com/blog/2016/05/27/lda2vec/#topic=38&lambda=1&term=|creationTime|2017-11-21T01:02:22Z +https://realitydrop.org/|creationDate|2013-02-28 +https://realitydrop.org/|tag|http://www.semanlink.net/tag/rechauffement_climatique +https://realitydrop.org/|tag|http://www.semanlink.net/tag/al_gore +https://realitydrop.org/|title|Reality Drop: Spread Science about Climate Change, Global Warming +https://realitydrop.org/|creationTime|2013-02-28T19:06:42Z +https://www.ijcai.org/proceedings/2018/0810.pdf|creationDate|2018-10-26 +https://www.ijcai.org/proceedings/2018/0810.pdf|tag|http://www.semanlink.net/tag/robotique +https://www.ijcai.org/proceedings/2018/0810.pdf|tag|http://www.semanlink.net/tag/grounded_language_learning +https://www.ijcai.org/proceedings/2018/0810.pdf|tag|http://www.semanlink.net/tag/ijcai +https://www.ijcai.org/proceedings/2018/0810.pdf|comment|When trained only on large corpuses of text, but not on real-world representations, statistical methods for NLP and NLU lack true understanding of what words mean +https://www.ijcai.org/proceedings/2018/0810.pdf|title|Grounded Language Learning: Where Robotics and NLP Meet (IJCAI 2018) +https://www.ijcai.org/proceedings/2018/0810.pdf|creationTime|2018-10-26T00:50:33Z +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|creationDate|2014-12-22 +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|tag|http://www.semanlink.net/tag/knowledge_graph +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|tag|http://www.semanlink.net/tag/semanlink2_related +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|tag|http://www.semanlink.net/tag/graph_visualization +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|tag|http://www.semanlink.net/tag/graph_editor +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|comment|One UI to drive them all. WikiNizer is built around a single, universally applicable User Interface centering around a Graph Editor. +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|title|Wikinizer: Introducing MindGraph LinkedUp: Linking Web Data for Education - An EU project about the potential of open data in education +http://linkedup-project.eu/2014/12/22/wikinizer-introducing-mindgraph/|creationTime|2014-12-22T13:45:10Z +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|creationDate|2017-10-23 +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|tag|http://www.semanlink.net/tag/keras +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|tag|http://www.semanlink.net/tag/word2vec +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|tag|http://www.semanlink.net/tag/tutorial +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|title|A Word2Vec Keras tutorial +http://adventuresinmachinelearning.com/word2vec-keras-tutorial/|creationTime|2017-10-23T01:22:35Z +http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them|creationDate|2014-04-23 +http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them|tag|http://www.semanlink.net/tag/slideshare +http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them|tag|http://www.semanlink.net/tag/bitmap_index +http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them|title|All About Bitmap Indexes... And Sorting Them +http://fr.slideshare.net/lemire/all-about-bitmap-indexes-and-sorting-them|creationTime|2014-04-23T21:48:30Z +https://carrotsearch.com/lingo3g/|creationDate|2017-05-22 +https://carrotsearch.com/lingo3g/|tag|http://www.semanlink.net/tag/carrot2 +https://carrotsearch.com/lingo3g/|tag|http://www.semanlink.net/tag/hierarchical_clustering_and_clustering_of_text_documents +https://carrotsearch.com/lingo3g/|tag|http://www.semanlink.net/tag/nlp_tools +https://carrotsearch.com/lingo3g/|comment|Instant analysis of small-to-medium quantities of text. Organizes collections of text documents into clearly-labeled hierarchical folders. In real-time, fully automatically, without external knowledge bases +https://carrotsearch.com/lingo3g/|title|Lingo3G: real-time text clustering engine Carrot Search +https://carrotsearch.com/lingo3g/|creationTime|2017-05-22T13:59:23Z +http://www.cortical.io/technology_semantic.html|creationDate|2017-04-28 +http://www.cortical.io/technology_semantic.html|tag|http://www.semanlink.net/tag/cortical_io +http://www.cortical.io/technology_semantic.html|tag|http://www.semanlink.net/tag/semantic_fingerprints +http://www.cortical.io/technology_semantic.html|comment|"Words can be represented as fingerprints + +> Apple – Fruit = Computer +> Jaguar – Porsche = Tiger + +" +http://www.cortical.io/technology_semantic.html|title|semantic fingerprinting - cortical.io +http://www.cortical.io/technology_semantic.html|creationTime|2017-04-28T22:49:01Z +https://www.quora.com/How-does-Keras-compare-to-other-Deep-Learning-frameworks-like-Tensor-Flow-Theano-or-Torch|creationDate|2017-09-09 +https://www.quora.com/How-does-Keras-compare-to-other-Deep-Learning-frameworks-like-Tensor-Flow-Theano-or-Torch|tag|http://www.semanlink.net/tag/deep_learning_frameworks +https://www.quora.com/How-does-Keras-compare-to-other-Deep-Learning-frameworks-like-Tensor-Flow-Theano-or-Torch|title|How does Keras compare to other Deep Learning frameworks like Tensor Flow, Theano, or Torch? - Quora +https://www.quora.com/How-does-Keras-compare-to-other-Deep-Learning-frameworks-like-Tensor-Flow-Theano-or-Torch|creationTime|2017-09-09T13:49:22Z +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|creationDate|2015-12-15 +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|tag|http://www.semanlink.net/tag/dictionnaire +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|tag|http://www.semanlink.net/tag/ecologie +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|title|Dictionnaire de la pensée écologique +http://www.rfi.fr/culture/20151204-litterature-livres-philosophie-ecologie-cop21-environnement|creationTime|2015-12-15T09:53:12Z +https://github.com/tensorflow/nmt|creationDate|2017-09-18 +https://github.com/tensorflow/nmt|tag|http://www.semanlink.net/tag/tutorial +https://github.com/tensorflow/nmt|tag|http://www.semanlink.net/tag/tensorflow +https://github.com/tensorflow/nmt|tag|http://www.semanlink.net/tag/neural_machine_translation +https://github.com/tensorflow/nmt|title|TensorFlow Neural Machine Translation (seq2seq) Tutorial +https://github.com/tensorflow/nmt|creationTime|2017-09-18T14:14:51Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|creationDate|2008-11-24 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|tag|http://www.semanlink.net/tag/cerveau +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|tag|http://www.semanlink.net/tag/ibm +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|tag|http://www.semanlink.net/tag/darpa +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|comment|IBM has announced it will lead a US government-funded collaboration to make electronic circuits that mimic brains. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|title|IBM plans 'brain-like' computers +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/7740484.stm?ad=1|creationTime|2008-11-24T13:35:09Z +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|creationDate|2016-02-04 +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|tag|http://www.semanlink.net/tag/ng +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|tag|http://www.semanlink.net/tag/travail +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|title|Andrew Ng: What is the future of jobs and work in 20 years? - Quora +https://www.quora.com/What-is-the-future-of-jobs-and-work-in-20-years?no_redirect=1|creationTime|2016-02-04T19:57:37Z +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|creationDate|2011-09-27 +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|tag|http://www.semanlink.net/tag/droit_d_auteur +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|tag|http://www.semanlink.net/tag/architecture +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|title|Rendre aux Français leur paysage architectural +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|creationTime|2011-09-27T10:03:09Z +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2011/09/27/rendre-aux-francais-leur-paysage-architectural_1578046_3232.html|date|2011-09-27 +http://blogs.techrepublic.com.com/tech-manager/?p=564&tag=rbxccnbtr1|creationDate|2010-07-24 +http://blogs.techrepublic.com.com/tech-manager/?p=564&tag=rbxccnbtr1|tag|http://www.semanlink.net/tag/business_case +http://blogs.techrepublic.com.com/tech-manager/?p=564&tag=rbxccnbtr1|title|6 essential elements for a winning business case IT Leadership TechRepublic.com +http://blogs.techrepublic.com.com/tech-manager/?p=564&tag=rbxccnbtr1|creationTime|2010-07-24T10:49:41Z +http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148|creationDate|2007-04-03 +http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148|tag|http://www.semanlink.net/tag/linked_data +http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148|title|Personal URIs & Data Spaces +http://www.openlinksw.com/blog/~kidehen/index.vspx?page=&id=1148|creationTime|2007-04-03T22:58:30Z +https://textblob.readthedocs.io/en/dev/|creationDate|2017-06-29 +https://textblob.readthedocs.io/en/dev/|tag|http://www.semanlink.net/tag/textblob +https://textblob.readthedocs.io/en/dev/|title|TextBlob: Simplified Text Processing +https://textblob.readthedocs.io/en/dev/|creationTime|2017-06-29T10:53:36Z +http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html|creationDate|2014-03-27 +http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html|tag|http://www.semanlink.net/tag/apache_opennlp +http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html|tag|http://www.semanlink.net/tag/developer_documentation +http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html|title|Apache OpenNLP Developer Documentation +http://opennlp.apache.org/documentation/1.5.3/manual/opennlp.html|creationTime|2014-03-27T10:40:23Z +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|creationDate|2008-10-11 +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|tag|http://www.semanlink.net/tag/bug +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|tag|http://www.semanlink.net/tag/uri_encoding +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|tag|http://www.semanlink.net/tag/tomcat +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|title|Tomcat - Bug 23929 – request.setCharacterEncoding(String) doesn't work +https://issues.apache.org/bugzilla/show_bug.cgi?id=23929|creationTime|2008-10-11T17:09:29Z +http://books.google.com/ngrams/|creationDate|2012-03-24 +http://books.google.com/ngrams/|tag|http://www.semanlink.net/tag/n_gram +http://books.google.com/ngrams/|tag|http://www.semanlink.net/tag/google +http://books.google.com/ngrams/|title|Google Ngram Viewer +http://books.google.com/ngrams/|creationTime|2012-03-24T09:06:56Z +http://vimeo.com/34870158|creationDate|2012-01-12 +http://vimeo.com/34870158|tag|http://www.semanlink.net/tag/semtechbiz +http://vimeo.com/34870158|tag|http://www.semanlink.net/tag/martin_hepp +http://vimeo.com/34870158|tag|http://www.semanlink.net/tag/automobile +http://vimeo.com/34870158|tag|http://www.semanlink.net/tag/semantic_seo +http://vimeo.com/34870158|title|Semantic SEO for the Automotive Industry on Vimeo +http://vimeo.com/34870158|creationTime|2012-01-12T08:51:11Z +http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/|creationDate|2013-04-08 +http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/|tag|http://www.semanlink.net/tag/nlp +http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/|tag|http://www.semanlink.net/tag/google +http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/|title|Bigger, Better Google Ngrams: Brace Yourself for the Power of Grammar - Ben Zimmer - The Atlantic +http://www.theatlantic.com/technology/archive/2012/10/bigger-better-google-ngrams-brace-yourself-for-the-power-of-grammar/263487/|creationTime|2013-04-08T15:57:23Z +http://www.onlamp.com/pub/wlg/6563|creationDate|2005-10-13 +http://www.onlamp.com/pub/wlg/6563|tag|http://www.semanlink.net/tag/soap +http://www.onlamp.com/pub/wlg/6563|comment|SOAP has never been and still isn't necessary as a means of getting XML from one place to another. +http://www.onlamp.com/pub/wlg/6563|title|What if SOAP had never happened? +http://www.volkswagen.co.uk/vocabularies/coo/ns|creationDate|2011-03-24 +http://www.volkswagen.co.uk/vocabularies/coo/ns|tag|http://www.semanlink.net/tag/car_options_ontology +http://www.volkswagen.co.uk/vocabularies/coo/ns|title|The Car Options Ontology (COO) +http://www.volkswagen.co.uk/vocabularies/coo/ns|creationTime|2011-03-24T17:02:45Z +http://www.click2map.com/v2/sachone/Carte-association-ARBRES|creationDate|2019-04-09 +http://www.click2map.com/v2/sachone/Carte-association-ARBRES|tag|http://www.semanlink.net/tag/arbres_remarquables +http://www.click2map.com/v2/sachone/Carte-association-ARBRES|title|Carte de France des Arbres Remarquables labellisés +http://www.click2map.com/v2/sachone/Carte-association-ARBRES|creationTime|2019-04-09T19:47:44Z +http://www.westwind.com/reference/OS-X/invisibles.html|creationDate|2005-03-29 +http://www.westwind.com/reference/OS-X/invisibles.html|tag|http://www.semanlink.net/tag/mac_os_x +http://www.westwind.com/reference/OS-X/invisibles.html|title|Mac OS X Hidden Files & Directories +http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html|creationDate|2013-12-07 +http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html|tag|http://www.semanlink.net/tag/nelson_mandela +http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html|comment|our beloved Nelson Mandela +http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html|title|Mort de Nelson Mandela, l'Africain capital +http://www.lemonde.fr/afrique/article/2013/12/05/nelson-mandela-est-mort_3427343_3212.html|creationTime|2013-12-07T11:48:42Z +https://www.lemonde.fr/les-recettes-du-monde/article/2018/08/06/saumon-grille-au-beurre-rouge-facon-joel-robuchon-la-recette-de-nicolas-chatenier_5339851_5324493.html|creationDate|2018-08-06 +https://www.lemonde.fr/les-recettes-du-monde/article/2018/08/06/saumon-grille-au-beurre-rouge-facon-joel-robuchon-la-recette-de-nicolas-chatenier_5339851_5324493.html|tag|http://www.semanlink.net/tag/recette_de_cuisine +https://www.lemonde.fr/les-recettes-du-monde/article/2018/08/06/saumon-grille-au-beurre-rouge-facon-joel-robuchon-la-recette-de-nicolas-chatenier_5339851_5324493.html|title|Saumon grillé au beurre rouge façon Joël Robuchon : la recette de Nicolas Chatenier +https://www.lemonde.fr/les-recettes-du-monde/article/2018/08/06/saumon-grille-au-beurre-rouge-facon-joel-robuchon-la-recette-de-nicolas-chatenier_5339851_5324493.html|creationTime|2018-08-06T18:28:42Z +http://www.semanticdesktop.org|creationDate|2006-02-22 +http://www.semanticdesktop.org|tag|http://www.semanlink.net/tag/semantic_web +http://www.semanticdesktop.org|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.semanticdesktop.org|comment|Still, we miss a wide use of Semantic Web technologies on personal computers. +http://www.semanticdesktop.org|title|SemanticDesktop.org +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|creationDate|2017-12-23 +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|tag|http://www.semanlink.net/tag/word_mover_s_distance +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|tag|http://www.semanlink.net/tag/word2vec +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|comment|Finding similar documents with Word2Vec and WMD (Word Mover’s Distance) +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|title|gensim/WMD_tutorial.ipynb +https://github.com/RaRe-Technologies/gensim/blob/c971411c09773488dbdd899754537c0d1a9fce50/docs/notebooks/WMD_tutorial.ipynb|creationTime|2017-12-23T14:12:41Z +https://github.com/fchollet/keras/tree/master/examples|creationDate|2017-10-25 +https://github.com/fchollet/keras/tree/master/examples|tag|http://www.semanlink.net/tag/keras +https://github.com/fchollet/keras/tree/master/examples|tag|http://www.semanlink.net/tag/francois_chollet +https://github.com/fchollet/keras/tree/master/examples|title|Keras examples directory +https://github.com/fchollet/keras/tree/master/examples|creationTime|2017-10-25T14:41:57Z +http://danbri.org/words/2008/01/03/243|creationDate|2008-01-03 +http://danbri.org/words/2008/01/03/243|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2008/01/03/243|tag|http://www.semanlink.net/tag/php +http://danbri.org/words/2008/01/03/243|tag|http://www.semanlink.net/tag/rdf +http://danbri.org/words/2008/01/03/243|title|danbri’s foaf stories » Commandline PHP for loading RDF URLs into ARC (and Twinkle for query UI) +http://danbri.org/words/2008/01/03/243|creationTime|2008-01-03T14:03:41Z +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|creationDate|2017-05-27 +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|tag|http://www.semanlink.net/tag/reader_mode_browsers +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|tag|http://www.semanlink.net/tag/firefox +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|title|javascript - How does Firefox reader view operate - Stack Overflow +https://stackoverflow.com/questions/30661650/how-does-firefox-reader-view-operate?rq=1|creationTime|2017-05-27T13:08:52Z +http://www.scmp.com/news/china/society/article/2120477/chinas-plan-use-solar-power-melt-permafrost-turn-tibetan|creationDate|2017-11-21 +http://www.scmp.com/news/china/society/article/2120477/chinas-plan-use-solar-power-melt-permafrost-turn-tibetan|tag|http://www.semanlink.net/tag/chine +http://www.scmp.com/news/china/society/article/2120477/chinas-plan-use-solar-power-melt-permafrost-turn-tibetan|title|China’s plan to use solar power to melt permafrost to turn a Tibetan grassland into an artificial forest on the roof of the world South China Morning Post +http://www.scmp.com/news/china/society/article/2120477/chinas-plan-use-solar-power-melt-permafrost-turn-tibetan|creationTime|2017-11-21T11:15:46Z +http://www.structureddynamics.com/linked_data.html|creationDate|2009-10-05 +http://www.structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/linked_data +http://www.structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/faq +http://www.structureddynamics.com/linked_data.html|tag|http://www.semanlink.net/tag/link_to_me +http://www.structureddynamics.com/linked_data.html|title|Linked Data FAQ +http://www.structureddynamics.com/linked_data.html|creationTime|2009-10-05T18:52:51Z +http://www.paulgraham.com/web20.html|creationDate|2005-11-23 +http://www.paulgraham.com/web20.html|tag|http://www.semanlink.net/tag/web_2_0 +http://www.paulgraham.com/web20.html|tag|http://www.semanlink.net/tag/google +http://www.paulgraham.com/web20.html|tag|http://www.semanlink.net/tag/good +http://www.paulgraham.com/web20.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/web20.html|title|Web 2.0 +http://apassant.net/lodr/|creationDate|2008-10-07 +http://apassant.net/lodr/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/lodr/|tag|http://www.semanlink.net/tag/lodr +http://apassant.net/lodr/|title|LODr (Alexandre Passant's instance) +http://apassant.net/lodr/|creationTime|2008-10-07T13:52:09Z +http://www.ultralingua.net/ulnet-enable.cgi?service=english2french&location=http://www.paulgraham.com/essay.html|creationDate|2005-11-24 +http://www.ultralingua.net/ulnet-enable.cgi?service=english2french&location=http://www.paulgraham.com/essay.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.ultralingua.net/ulnet-enable.cgi?service=english2french&location=http://www.paulgraham.com/essay.html|title|The Age of the Essay +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|creationDate|2010-09-06 +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|tag|http://www.semanlink.net/tag/open_world_assumption +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|tag|http://www.semanlink.net/tag/it_failures +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|tag|http://www.semanlink.net/tag/business_intelligence +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|title|The Open World Assumption: Elephant in the Room » AI3:::Adaptive Information +http://www.mkbergman.com/852/the-open-world-assumption-elephant-in-the-room/|creationTime|2010-09-06T21:45:24Z +http://thewhyaxis.info/hairball/|creationDate|2015-01-26 +http://thewhyaxis.info/hairball/|tag|http://www.semanlink.net/tag/graph_visualization +http://thewhyaxis.info/hairball/|title|The Why Axis - Sigma.js Cleans up Hairball Network Visualizations +http://thewhyaxis.info/hairball/|creationTime|2015-01-26T14:51:17Z +https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn|creationDate|2015-10-21 +https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn|tag|http://www.semanlink.net/tag/nltk +https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn|comment|A relatively easy way (though not the only one) is to use Natural Language Toolkit (NLTK)'s scikitlearn module in the classify Package +https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn|title|How to preprocess labeled data for use with SciKit-Learn - Quora +https://www.quora.com/How-can-I-preprocess-labeled-data-for-use-with-SciKit-Learn|creationTime|2015-10-21T16:56:28Z +http://vocab.deri.ie/rdforms|creationDate|2011-08-28 +http://vocab.deri.ie/rdforms|tag|http://www.semanlink.net/tag/rdf_forms +http://vocab.deri.ie/rdforms|title|RDForms - representing HTML form and field semantics DERI Vocabularies +http://vocab.deri.ie/rdforms|creationTime|2011-08-28T23:05:30Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|creationDate|2006-06-06 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|tag|http://www.semanlink.net/tag/origine_de_l_agriculture +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|comment|Ancient figs found in an archaeological site in the Jordan Valley may represent one of the earliest forms of agriculture, scientists report. The carbonised fruits date between 11,200 and 11,400 years old.
After examining the figs, they determined that it was a self-pollinating, or parthenocarpic, variety, like the kind we eat today. In nature, parthenocarpic fig trees appear now and again by a chance genetic mutation. But because they do not produce seeds they cannot reproduce alone - they require a shoot to be removed and replanted. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|title|BBC NEWS Science/Nature Ancient fig clue to first farming +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5038116.stm|date|2006-06-02 +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|creationDate|2017-11-16 +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|tag|http://www.semanlink.net/tag/curiosite_naturelle +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|tag|http://www.semanlink.net/tag/ant +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|tag|http://www.semanlink.net/tag/zombie +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|comment|"Fungus doesn't take over the ant's brain, as thought, but rather creates an internal network of fungus cells that controls the limbs. ""The ant ends its life as a prisoner in its own body. Its brain is still in the driver’s seat, but the fungus has the wheel.""" +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|title|The Zombie Fungus Takes Over Ants’ Bodies to Control Their Minds - The Atlantic +https://www.theatlantic.com/science/archive/2017/11/how-the-zombie-fungus-takes-over-ants-bodies-to-control-their-minds/545864/|creationTime|2017-11-16T00:57:45Z +http://www.blazegraph.com/|creationDate|2015-03-30 +http://www.blazegraph.com/|tag|http://www.semanlink.net/tag/wikidata +http://www.blazegraph.com/|tag|http://www.semanlink.net/tag/graph_database +http://www.blazegraph.com/|title|www.blazegraph.com +http://www.blazegraph.com/|creationTime|2015-03-30T22:45:42Z +http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html|creationDate|2014-03-25 +http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html|tag|http://www.semanlink.net/tag/national_taiwan_university +http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html|tag|http://www.semanlink.net/tag/support_vector_machine +http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html|title|LIBSVM -- A Library for Support Vector Machines +http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html|creationTime|2014-03-25T11:18:59Z +http://www.scholarpedia.org/article/Models_of_consciousness|creationDate|2013-11-30 +http://www.scholarpedia.org/article/Models_of_consciousness|tag|http://www.semanlink.net/tag/models_of_consciousness +http://www.scholarpedia.org/article/Models_of_consciousness|title|Models of consciousness - Scholarpedia +http://www.scholarpedia.org/article/Models_of_consciousness|creationTime|2013-11-30T21:59:08Z +http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf|creationDate|2012-07-26 +http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf|tag|http://www.semanlink.net/tag/apache_stanbol +http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf|tag|http://www.semanlink.net/tag/olivier_grisel +http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf|title|IKS-Stanbol - Topic Classification +http://www.iks-project.eu/sites/default/files/Topic-Classification.pdf|creationTime|2012-07-26T15:59:35Z +http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html|creationDate|2010-12-19 +http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html|tag|http://www.semanlink.net/tag/jacqueline_de_romilly +http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html|title|Jacqueline de Romilly, helléniste et académicienne, est morte +http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html|creationTime|2010-12-19T14:07:22Z +http://www.lemonde.fr/carnet/article/2010/12/19/jacqueline-de-romilly-helleniste-et-academicienne-est-morte_1455554_3382.html|source|Le Monde +https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5|creationDate|2017-04-03 +https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5|tag|http://www.semanlink.net/tag/jersey +https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5|tag|http://www.semanlink.net/tag/tutorial +https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5|title|Swagger Core Jersey 2.X Project Setup 1.5 · swagger-api/swagger-core Wiki +https://github.com/swagger-api/swagger-core/wiki/Swagger-Core-Jersey-2.X-Project-Setup-1.5|creationTime|2017-04-03T14:29:57Z +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|creationDate|2015-08-27 +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|tag|http://www.semanlink.net/tag/histoire_des_jermas +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|tag|http://www.semanlink.net/tag/peace_corps +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|tag|http://www.semanlink.net/tag/jerma +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|title|Peace Corps/Niger An Introduction to the Zarma Language +http://files.peacecorps.gov/multimedia/audio/languagelessons/niger/NE_Zarma_Language_Lessons.pdf|creationTime|2015-08-27T13:58:14Z +http://www.lemonde.fr/opinions/chronique/2010/04/27/voile-pudique_1343015_3232.html|creationDate|2010-04-29 +http://www.lemonde.fr/opinions/chronique/2010/04/27/voile-pudique_1343015_3232.html|tag|http://www.semanlink.net/tag/loi_sur_le_voile +http://www.lemonde.fr/opinions/chronique/2010/04/27/voile-pudique_1343015_3232.html|title|Voile pudique +http://www.lemonde.fr/opinions/chronique/2010/04/27/voile-pudique_1343015_3232.html|creationTime|2010-04-29T16:15:50Z +http://news.bbc.co.uk/1/hi/health/4225564.stm|creationDate|2005-09-09 +http://news.bbc.co.uk/1/hi/health/4225564.stm|tag|http://www.semanlink.net/tag/medecine +http://news.bbc.co.uk/1/hi/health/4225564.stm|tag|http://www.semanlink.net/tag/fecondation +http://news.bbc.co.uk/1/hi/health/4225564.stm|title|BBC NEWS - Embryo with two mothers approved +http://news.bbc.co.uk/1/hi/health/4225564.stm|source|BBC +http://myfaces.apache.org|creationDate|2005-09-22 +http://myfaces.apache.org|tag|http://www.semanlink.net/tag/myfaces +http://myfaces.apache.org|title|Apache MyFaces +http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary|creationDate|2010-05-12 +http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary|tag|http://www.semanlink.net/tag/rdf_forms +http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary|comment|This note describes the so called RDForms, a vocabulary and a protocol based on HTML forms and key/value pairs (KVP) to enable the write-back of RDF diffs to a RDF wrapper used in pushback +http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary|title|PushBackDataToLegacySourcesRDForms - ESW Wiki +http://esw.w3.org/PushBackDataToLegacySourcesRDForms#RDForms_Vocabulary|creationTime|2010-05-12T23:27:19Z +http://titanpad.com/|creationDate|2012-10-09 +http://titanpad.com/|tag|http://www.semanlink.net/tag/online_tool +http://titanpad.com/|comment|TitanPad lets people work on one document simultaneously +http://titanpad.com/|title|TitanPad +http://titanpad.com/|creationTime|2012-10-09T14:43:06Z +http://www.wired.com/gadgetlab/2011/12/summly-app-summarization/|creationDate|2011-12-28 +http://www.wired.com/gadgetlab/2011/12/summly-app-summarization/|tag|http://www.semanlink.net/tag/summly +http://www.wired.com/gadgetlab/2011/12/summly-app-summarization/|title|Teen’s iOS App Uses Complex Algorithms to Summarize the Web Gadget Lab Wired.com +http://www.wired.com/gadgetlab/2011/12/summly-app-summarization/|creationTime|2011-12-28T13:20:46Z +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|creationDate|2017-11-06 +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|comment|"Comments about this paper [here](https://medium.com/paper-club/recurrent-convolutional-neural-networks-for-text-classification-107020765e52) and [thre](https://medium.com/paper-club/cnns-for-text-classification-b45bde0bb254) +" +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|title|Recurrent Convolutional Neural Networks for Text Classification (S Lai - ‎2015) +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.822.3091&rep=rep1&type=pdf|creationTime|2017-11-06T09:12:22Z +http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded|creationDate|2012-12-30 +http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded|tag|http://www.semanlink.net/tag/robert +http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded|tag|http://www.semanlink.net/tag/digital_audio +http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded|title|24bit vs 16bit, the myth exploded! +http://www.head-fi.org/t/415361/24bit-vs-16bit-the-myth-exploded|creationTime|2012-12-30T00:32:01Z +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|creationDate|2016-01-25 +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|tag|http://www.semanlink.net/tag/sparql_perfs +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|title|Dropping OPTIONAL blocks from SPARQL CONSTRUCT queries - bobdc.blog +http://www.snee.com/bobdc.blog/2014/10/dropping-optional-blocks-from.html|creationTime|2016-01-25T17:55:36Z +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|creationDate|2017-08-02 +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|tag|http://www.semanlink.net/tag/rwanda +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|tag|http://www.semanlink.net/tag/genocide_rwandais +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|tag|http://www.semanlink.net/tag/genocide +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|tag|http://www.semanlink.net/tag/ecole +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|title|Rwanda : comment le génocide est enseigné à l’école +http://www.lemonde.fr/afrique/article/2015/05/28/rwanda-comment-le-genocide-est-enseigne-a-l-ecole_4642789_3212.html|creationTime|2017-08-02T10:43:40Z +https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/|creationDate|2017-10-25 +https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/|tag|http://www.semanlink.net/tag/keras_embedding_layer +https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/|comment|"Keras Embedding Layer requires that the input data be integer encoded, so that each word is represented by a unique integer. This data preparation step can be performed using the Tokenizer API also provided with Keras. + +The Embedding layer is initialized with random weights and will learn an embedding for all of the words in the training dataset. + +- Example of Learning an Embedding +- Example of Using Pre-Trained GloVe Embedding + +" +https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/|title|How to Use Word Embedding Layers for Deep Learning with Keras - Machine Learning Mastery +https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/|creationTime|2017-10-25T15:40:03Z +https://developers.google.com/machine-learning/glossary/|creationDate|2017-10-02 +https://developers.google.com/machine-learning/glossary/|tag|http://www.semanlink.net/tag/machine_learning +https://developers.google.com/machine-learning/glossary/|title|Machine Learning Glossary Google Developers +https://developers.google.com/machine-learning/glossary/|creationTime|2017-10-02T13:27:52Z +http://blog.stephenwolfram.com/2014/09/launching-today-mathematica-online/|creationDate|2014-09-16 +http://blog.stephenwolfram.com/2014/09/launching-today-mathematica-online/|tag|http://www.semanlink.net/tag/mathematica +http://blog.stephenwolfram.com/2014/09/launching-today-mathematica-online/|title|Launching Today: Mathematica Online!—Stephen Wolfram Blog +http://blog.stephenwolfram.com/2014/09/launching-today-mathematica-online/|creationTime|2014-09-16T10:06:27Z +http://www.lemonde.fr/europe/article/2015/10/12/apres-l-attentat-d-ankara-la-turquie-au-bord-du-gouffre_4787525_3214.html|creationDate|2015-10-13 +http://www.lemonde.fr/europe/article/2015/10/12/apres-l-attentat-d-ankara-la-turquie-au-bord-du-gouffre_4787525_3214.html|tag|http://www.semanlink.net/tag/turquie +http://www.lemonde.fr/europe/article/2015/10/12/apres-l-attentat-d-ankara-la-turquie-au-bord-du-gouffre_4787525_3214.html|title|Après l’attentat d’Ankara, la Turquie au bord du gouffre +http://www.lemonde.fr/europe/article/2015/10/12/apres-l-attentat-d-ankara-la-turquie-au-bord-du-gouffre_4787525_3214.html|creationTime|2015-10-13T10:26:55Z +http://lile2012.linkededucation.org/|creationDate|2011-12-17 +http://lile2012.linkededucation.org/|tag|http://www.semanlink.net/tag/workshop +http://lile2012.linkededucation.org/|tag|http://www.semanlink.net/tag/www_2012 +http://lile2012.linkededucation.org/|tag|http://www.semanlink.net/tag/linked_learning_2012 +http://lile2012.linkededucation.org/|tag|http://www.semanlink.net/tag/linked_learning +http://lile2012.linkededucation.org/|title|Linked Learning 2012 +http://lile2012.linkededucation.org/|creationTime|2011-12-17T02:21:19Z +http://lile2012.linkededucation.org/|homepage|http://lile2012.linkededucation.org/ +http://www.w3.org/2001/tag/2011/01/HashInURI-20110115|creationDate|2011-08-11 +http://www.w3.org/2001/tag/2011/01/HashInURI-20110115|tag|http://www.semanlink.net/tag/hash_uris +http://www.w3.org/2001/tag/2011/01/HashInURI-20110115|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/2001/tag/2011/01/HashInURI-20110115|title|Repurposing the Hash Sign for the New Web +http://www.w3.org/2001/tag/2011/01/HashInURI-20110115|creationTime|2011-08-11T17:36:45Z +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|creationDate|2018-02-13 +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|tag|http://www.semanlink.net/tag/pseudo_relevance_feedback +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|comment|"find an initial set of most relevant documents, assume that the top ranked documents are relevant, finally do relevance feedback under this assumption. +[It's said here](https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf) that pseudo relevance feedback is not included in Lucene" +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|relatedDoc|https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|title|Pseudo relevance feedback +https://nlp.stanford.edu/IR-book/html/htmledition/pseudo-relevance-feedback-1.html|creationTime|2018-02-13T10:53:40Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|creationDate|2007-06-13 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|tag|http://www.semanlink.net/tag/uriqr +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|tag|http://www.semanlink.net/tag/lod_mailing_list +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|tag|http://www.semanlink.net/tag/fps_post +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|title|fps writes in response to [Linking-open-data] A Search Engine for URIs (T.Heath) +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=17890|creationTime|2007-06-13T23:16:50Z +https://github.com/kawine/usif|creationDate|2018-10-20 +https://github.com/kawine/usif|tag|http://www.semanlink.net/tag/github_project +https://github.com/kawine/usif|comment|Github project associated to [USIF paper](doc:?uri=http%3A%2F%2Fwww.aclweb.org%2Fanthology%2FW18-3012%2F) +https://github.com/kawine/usif|relatedDoc|http://www.aclweb.org/anthology/W18-3012 +https://github.com/kawine/usif|relatedDoc|https://openreview.net/forum?id=rJedbn0ctQ +https://github.com/kawine/usif|relatedDoc|http://www.aclweb.org/anthology/W18-3012/ +https://github.com/kawine/usif|title|GitHub - kawine/usif: Implementation of unsupervised smoothed inverse frequency +https://github.com/kawine/usif|creationTime|2018-10-20T14:59:02Z +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|creationDate|2011-01-18 +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|tag|http://www.semanlink.net/tag/semweb_pro_2011 +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|title|Index of /semweblabs/semwebpro/rdfa +http://www.lespetitescases.net/semweblabs/semwebpro/rdfa/|creationTime|2011-01-18T11:13:33Z +http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies|creationDate|2011-06-28 +http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies|tag|http://www.semanlink.net/tag/howto +http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies|tag|http://www.semanlink.net/tag/goodrelations +http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies|title|Creating Domain-specific Products and Services Ontologies for GoodRelations +http://www.ebusiness-unibw.org/wiki/Own_GoodRelations_Vocabularies|creationTime|2011-06-28T14:25:20Z +http://n2.talis.com/wiki/SPARQL_Recipes|creationDate|2008-04-08 +http://n2.talis.com/wiki/SPARQL_Recipes|tag|http://www.semanlink.net/tag/sparql_tips +http://n2.talis.com/wiki/SPARQL_Recipes|tag|http://www.semanlink.net/tag/talis +http://n2.talis.com/wiki/SPARQL_Recipes|comment|includes: FILTERing in literals with a given language OR no language at all +http://n2.talis.com/wiki/SPARQL_Recipes|title|SPARQL Recipes - N2 wiki +http://n2.talis.com/wiki/SPARQL_Recipes|creationTime|2008-04-08T14:54:09Z +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|creationDate|2017-05-15 +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|tag|http://www.semanlink.net/tag/elasticsearch +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|tag|http://www.semanlink.net/tag/tutorial +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|tag|http://www.semanlink.net/tag/spark_java_web_framework +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|title|Use Elasticsearch in your Java applications +https://www.ibm.com/developerworks/library/j-use-elasticsearch-java-apps/index.html|creationTime|2017-05-15T19:05:12Z +http://gregarius.net|creationDate|2005-08-17 +http://gregarius.net|tag|http://www.semanlink.net/tag/feed_aggregator +http://gregarius.net|title|Gregarius » A Free, Web-based Feed Aggregator +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|creationDate|2014-12-17 +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|tag|http://www.semanlink.net/tag/handwriting +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|tag|http://www.semanlink.net/tag/apprendre +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|comment|"transcribing] lectures verbatim rather than processing information and reframing it in their own words is detrimental to learning +" +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|title|Handwriting Helps You Learn - Business Insider +http://uk.businessinsider.com/handwriting-helps-you-learn-2014-12?r=US|creationTime|2014-12-17T23:41:47Z +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|creationDate|2008-06-12 +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|tag|http://www.semanlink.net/tag/data_web +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|tag|http://www.semanlink.net/tag/www2007 +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|tag|http://www.semanlink.net/tag/semantic_web_life_sciences +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|comment|The prototype we describe is a biomedical knowledge base, constructed for a demonstration at Banff WWW2007 , that integrates 15 distinct data sources using currently available Semantic Web technologies such as the W3C standard Web Ontology Language [OWL] and Resource Description Framework [RDF]. This report outlines which resources were integrated, how the knowledge base was constructed using free and open source triple store technology, how it can be queried using the W3C Recommended RDF query language SPARQL [SPARQL], and what resources and inferences are involved in answering complex queries. While the utility of the knowledge base is illustrated by identifying a set of genes involved in Alzheimer's Disease, the approach described here can be applied to any use case that integrates data from multiple domains. +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|title|A Prototype Knowledge Base for the Life Sciences +http://www.w3.org/TR/2008/NOTE-hcls-kb-20080604/|creationTime|2008-06-12T08:29:11Z +http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html|creationDate|2006-02-17 +http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html|tag|http://www.semanlink.net/tag/drm +http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html|tag|http://www.semanlink.net/tag/microsoft +http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html|comment|Microsoft's DRM requires that device makers pay Microsoft a license fee for each device that plays back video encoded with its system. it also requires every such vendor to submit to a standardized, non-negotiable license agreement that spells out how the player must be implemented. This contract contains numerous items that limit the sort of business you're allowed to pursue, notably that you may not implement a Microsoft player in open source software. +http://www.boingboing.net/2006/01/30/msft_our_drm_licensi.html|title|Boing Boing: MSFT: Our DRM licensing is there to eliminate hobbyists and little guys +https://github.com/salesforce/decaNLP|creationDate|2018-06-21 +https://github.com/salesforce/decaNLP|tag|http://www.semanlink.net/tag/nlp_task_as_qa_problem +https://github.com/salesforce/decaNLP|tag|http://www.semanlink.net/tag/transfer_learning +https://github.com/salesforce/decaNLP|tag|http://www.semanlink.net/tag/richard_socher +https://github.com/salesforce/decaNLP|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +https://github.com/salesforce/decaNLP|comment|"> We cast all tasks as question answering over a context. + +[arxiv](https://arxiv.org/abs/1806.08730) +[slides](doc:2021/01/the_natural_language_decathlon_)" +https://github.com/salesforce/decaNLP|relatedDoc|http://www.semanlink.net/doc/2021/01/the_natural_language_decathlon_ +https://github.com/salesforce/decaNLP|title|The Natural Language Decathlon: Multitask Learning as Question Answering (2018) Salesforce research +https://github.com/salesforce/decaNLP|creationTime|2018-06-21T12:55:41Z +http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com|creationDate|2007-07-08 +http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com|tag|http://www.semanlink.net/tag/musique_du_niger +http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com|tag|http://www.semanlink.net/tag/moussa_poussi +http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com|title|Niger: National Geographic World Music +http://worldmusic.nationalgeographic.com/worldmusic/view/page.basic/country/content.country/niger_204?fs=www3.nationalgeographic.com&fs=plasma.nationalgeographic.com|creationTime|2007-07-08T02:37:01Z +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|creationDate|2018-05-27 +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|tag|http://www.semanlink.net/tag/guerre +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|tag|http://www.semanlink.net/tag/suede +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|comment|Mis en garde contre les « fake news », les habitants du royaume sont déjà prévenus : « Si la Suède est attaquée par un autre pays, nous ne nous rendrons jamais. Toutes les informations selon lesquelles la résistance doit cesser sont fausses. » +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|title|En Suède, un livret pour se préparer à la guerre +https://www.lemonde.fr/m-actu/article/2018/05/25/en-suede-un-livret-pour-se-preparer-a-la-guerre_5304591_4497186.html|creationTime|2018-05-27T10:49:54Z +http://rdfa.digitalbazaar.com/live-loop/|creationDate|2011-01-18 +http://rdfa.digitalbazaar.com/live-loop/|tag|http://www.semanlink.net/tag/dev_tools +http://rdfa.digitalbazaar.com/live-loop/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.digitalbazaar.com/live-loop/|comment|Live Loop lets you play with HTML+RDFa markup and instantly see the data that your markup produces. +http://rdfa.digitalbazaar.com/live-loop/|title|Live Loop +http://rdfa.digitalbazaar.com/live-loop/|creationTime|2011-01-18T12:09:16Z +http://xtech06.usefulinc.com/schedule/paper/147|creationDate|2006-05-22 +http://xtech06.usefulinc.com/schedule/paper/147|tag|http://www.semanlink.net/tag/mspace +http://xtech06.usefulinc.com/schedule/paper/147|tag|http://www.semanlink.net/tag/xtech_2006 +http://xtech06.usefulinc.com/schedule/paper/147|tag|http://www.semanlink.net/tag/semantic_web_application +http://xtech06.usefulinc.com/schedule/paper/147|tag|http://www.semanlink.net/tag/nasa +http://xtech06.usefulinc.com/schedule/paper/147|title|XTech 2006: Semantic Web @ NASA +http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html|creationDate|2007-09-25 +http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html|tag|http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation +http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html|comment|L'introduction des tests ADN pour l'établissement d'actes administratifs n’est pas seulement contraire au droit ou au sens commun. C'est un vrai danger démocratique. +http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html|title|www.com-vat.com: Génétique administrative : de Courteline à Orwell +http://hugues.blogs.com/commvat/2007/09/gntique-adminis.html|creationTime|2007-09-25T22:14:55Z +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|creationDate|2018-10-18 +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|tag|http://www.semanlink.net/tag/machine_learning +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|tag|http://www.semanlink.net/tag/lenka_zdeborova +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|tag|http://www.semanlink.net/tag/statistical_physics +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|tag|http://www.semanlink.net/tag/phase_transition +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|comment|"> Models studied in statistical physics are mathematically equivalent to some of those in high-dimensional statistics. + +> Statistical physics is often concerned with phase transitions, i.e., abrupt changes in behaviour. Interestingly, there is a deep correspondence between physical phases such as liquid, super-cooled liquid or glass, and solid, and regions of parameters for which a given data analysis task is algorithmically impossible, hard or easy +" +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|title|Cracking big data with statistical physics +http://theconversation.com/cracking-big-data-with-statistical-physics-79864|creationTime|2018-10-18T13:27:09Z +https://github.com/anvaka/word2vec-graph|creationDate|2018-03-12 +https://github.com/anvaka/word2vec-graph|tag|http://www.semanlink.net/tag/word2vec +https://github.com/anvaka/word2vec-graph|tag|http://www.semanlink.net/tag/graph_visualization +https://github.com/anvaka/word2vec-graph|tag|http://www.semanlink.net/tag/github_project +https://github.com/anvaka/word2vec-graph|title|GitHub - anvaka/word2vec-graph: Exploring word2vec embeddings as a graph of nearest neighbors +https://github.com/anvaka/word2vec-graph|creationTime|2018-03-12T11:22:58Z +http://phonetics.ucla.edu/|creationDate|2005-12-16 +http://phonetics.ucla.edu/|tag|http://www.semanlink.net/tag/langues +http://phonetics.ucla.edu/|title|UCLA Phonetics Lab Data +http://en.wikipedia.org/wiki/El_Cantante|creationDate|2010-02-13 +http://en.wikipedia.org/wiki/El_Cantante|tag|http://www.semanlink.net/tag/hector_lavoe +http://en.wikipedia.org/wiki/El_Cantante|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/El_Cantante|title|El Cantante +http://en.wikipedia.org/wiki/El_Cantante|creationTime|2010-02-13T01:58:28Z +http://iand.posterous.com/is-303-really-necessary|creationDate|2010-11-11 +http://iand.posterous.com/is-303-really-necessary|tag|http://www.semanlink.net/tag/httprange_14 +http://iand.posterous.com/is-303-really-necessary|tag|http://www.semanlink.net/tag/internet +http://iand.posterous.com/is-303-really-necessary|title|Is 303 Really Necessary? - Internet Alchemy +http://iand.posterous.com/is-303-really-necessary|creationTime|2010-11-11T15:08:16Z +https://www.ontotext.com/free-graphdb-download/?utm_source=twitter&utm_medium=card&utm_campaign=graphdb%20free|creationDate|2019-02-02 +https://www.ontotext.com/free-graphdb-download/?utm_source=twitter&utm_medium=card&utm_campaign=graphdb%20free|tag|http://www.semanlink.net/tag/graphdb +https://www.ontotext.com/free-graphdb-download/?utm_source=twitter&utm_medium=card&utm_campaign=graphdb%20free|title|GraphDB Free Download Ontotext +https://www.ontotext.com/free-graphdb-download/?utm_source=twitter&utm_medium=card&utm_campaign=graphdb%20free|creationTime|2019-02-02T12:03:55Z +http://www.w3.org/TR/webarch/|creationDate|2006-12-31 +http://www.w3.org/TR/webarch/|tag|http://www.semanlink.net/tag/norman_walsh +http://www.w3.org/TR/webarch/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/webarch/|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/TR/webarch/|tag|http://www.semanlink.net/tag/uri_dereferencing +http://www.w3.org/TR/webarch/|comment|This architecture document discusses the core design components of the Web. They are identification of resources, representation of resource state, and the protocols that support the interaction between agents and resources in the space. We relate core design components, constraints, and good practices to the principles and properties they support. +http://www.w3.org/TR/webarch/|title|Architecture of the World Wide Web, Volume One +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|creationDate|2018-08-12 +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|tag|http://www.semanlink.net/tag/medical_data +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|tag|http://www.semanlink.net/tag/concept_extraction +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|comment|"> A CNN for NLP learns which combinations of adjacent words are associated with a given concept. +" +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|title|Comparing deep learning and concept extraction based methods for patient phenotyping from clinical narratives (2018) +http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0192360|creationTime|2018-08-12T20:11:48Z +http://www.geocities.com/anpipniger/gajera.htm|creationDate|2006-04-02 +http://www.geocities.com/anpipniger/gajera.htm|tag|http://www.semanlink.net/tag/pompe_a_eau +http://www.geocities.com/anpipniger/gajera.htm|tag|http://www.semanlink.net/tag/verger_de_gado_a_niamey +http://www.geocities.com/anpipniger/gajera.htm|comment|"Attention, les valeurs données sont différentes de celles dans ""Private Irrigation Pilot Project_Niger.pdf"" http://www.ideorg.org/Files/Private%20Irrigation%20Pilot%20Project_Niger.pdf" +http://www.geocities.com/anpipniger/gajera.htm|title|GAJERA ASPIRANTE Pompe Gajera aspirante +http://www.knowledgevision.com/|creationDate|2014-11-17 +http://www.knowledgevision.com/|title|KnowledgeVision Online Presentations Video Tools +http://www.knowledgevision.com/|creationTime|2014-11-17T21:10:33Z +https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors|creationDate|2017-07-20 +https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors|tag|http://www.semanlink.net/tag/word2vec_howto +https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors|comment|> We found that the code above gives about the same (or slightly worse) results compared to the Bag of Words +https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors|title|More Fun With Word Vectors - Bag of Words Meets Bags of Popcorn Kaggle +https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-3-more-fun-with-word-vectors|creationTime|2017-07-20T14:56:22Z +http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf|creationDate|2007-05-23 +http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf|tag|http://www.semanlink.net/tag/yago +http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf|comment|"We present YAGO, a light-weight and extensible ontology +with high coverage and quality. YAGO builds on entities +and relations and currently contains more than 1 million +entities and 5 million facts. This includes the Is-A hierarchy +as well as non-taxonomic relations between entities (such +as hasWonPrize). The facts have been automatically extracted +from Wikipedia and unified with WordNet, using +a carefully designed combination of rule-based and heuristic +methods described in this paper. The resulting knowledge +base is a major step beyond WordNet: in quality by +adding knowledge about individuals like persons, organizations, +products, etc. with their semantic relationships – and +in quantity by increasing the number of facts by more than +an order of magnitude. Our empirical evaluation of fact correctness +shows an accuracy of about 95%. YAGO is based on +a logically clean model, which is decidable, extensible, and +compatible with RDFS. Finally, we show how YAGO can be +further extended by state-of-the-art information extraction +techniques. +" +http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf|title|YAGO: A Core of Semantic Knowledge Unifying WordNet and Wikipedia - Paper +http://www.mpi-inf.mpg.de/~suchanek/publications/www2007.pdf|creationTime|2007-05-23T21:42:00Z +http://www.w3.org/wiki/WebSchemas/SKOS|creationDate|2013-09-22 +http://www.w3.org/wiki/WebSchemas/SKOS|tag|http://www.semanlink.net/tag/schema_org +http://www.w3.org/wiki/WebSchemas/SKOS|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/wiki/WebSchemas/SKOS|title|WebSchemas/SKOS - W3C Wiki +http://www.w3.org/wiki/WebSchemas/SKOS|creationTime|2013-09-22T23:37:49Z +http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|creationDate|2007-04-20 +http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|title|TechnicaLee Speaking: SPARQL Calendar Demo: A SPARQL JavaScript Library +http://thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|creationTime|2007-04-20T20:59:26Z +http://xtech06.usefulinc.com/schedule/detail/135|creationDate|2006-04-24 +http://xtech06.usefulinc.com/schedule/detail/135|tag|http://www.semanlink.net/tag/david_beckett +http://xtech06.usefulinc.com/schedule/detail/135|tag|http://www.semanlink.net/tag/rdf +http://xtech06.usefulinc.com/schedule/detail/135|tag|http://www.semanlink.net/tag/a_suivre +http://xtech06.usefulinc.com/schedule/detail/135|tag|http://www.semanlink.net/tag/xtech_2006 +http://xtech06.usefulinc.com/schedule/detail/135|tag|http://www.semanlink.net/tag/tagging +http://xtech06.usefulinc.com/schedule/detail/135|comment|"This paper will discuss how tagging can be separated from the +services that provide them, allowing use of tags as separate +entities. This separation will use a mapping of tags to and from +RDF, showing how the context of tags (who, where, when made them) can +be represented and preserved. There are multiple choices for +modelling tags in this way which will be discussed along with their +benefits and some of the unusual uses of tags will be considered as +well as the different way that existing tagging services use tagging +differently. + + + The paper will also discuss methods of how to find the meaning of a +tag – going from a tag to something a human can understand as a +description, which may be evolving – in a decentralized, web friendly +way. Without the use of ontologies!" +http://xtech06.usefulinc.com/schedule/detail/135|title|XTech 2006: Semantics Through the Tag +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|creationDate|2013-03-13 +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|tag|http://www.semanlink.net/tag/diy +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|tag|http://www.semanlink.net/tag/kingsley_idehen +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|tag|http://www.semanlink.net/tag/dropbox +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|tag|http://www.semanlink.net/tag/linked_data_publishing +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|title|Kingsley Idehen - Google+ - DIY Linked Data Deployment via DropBox Unbeknownst to… +https://plus.google.com/112399767740508618350/posts/VWxowvLti4X|creationTime|2013-03-13T19:29:20Z +https://github.com/io-informatics/angular-jsonld|creationDate|2015-08-29 +https://github.com/io-informatics/angular-jsonld|tag|http://www.semanlink.net/tag/github_project +https://github.com/io-informatics/angular-jsonld|tag|http://www.semanlink.net/tag/json_ld +https://github.com/io-informatics/angular-jsonld|tag|http://www.semanlink.net/tag/angularjs +https://github.com/io-informatics/angular-jsonld|comment|This angular module facilitates the integration of JSON-LD server APIs in AngularJS clients. It is implemented on top of Restagular. Its purpose is to provide an adapter layer to map client's data model to the server's API model by using the semantics embedded in JSON-LD as the contract interface. Another important functionality of angular-jsonld is to enable easy navigation of JSON-LD hyperlinks in client's code. +https://github.com/io-informatics/angular-jsonld|title|angular-jsonld +https://github.com/io-informatics/angular-jsonld|creationTime|2015-08-29T16:51:15Z +http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html|creationDate|2015-02-14 +http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html|tag|http://www.semanlink.net/tag/linked_data_fragments +http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html|title|Understanding linked data fragments +http://lists.w3.org/Archives/Public/public-hydra/2015Feb/0026.html|creationTime|2015-02-14T09:54:26Z +http://www.devx.com/semantic/Article/38595|creationDate|2008-07-19 +http://www.devx.com/semantic/Article/38595|tag|http://www.semanlink.net/tag/nlp +http://www.devx.com/semantic/Article/38595|tag|http://www.semanlink.net/tag/searchmonkey +http://www.devx.com/semantic/Article/38595|tag|http://www.semanlink.net/tag/semantic_search +http://www.devx.com/semantic/Article/38595|comment|There are two approaches toward semantic search and both have received attention in the past months. The first approach builds on the automatic analysis of text using Natural Language Processing (NLP). The second approach uses semantic web technologies, which aims to make the web more easily searchable by allowing publishers to expose their (meta)data. +http://www.devx.com/semantic/Article/38595|title|Semantic Search Arrives at the Web +http://www.devx.com/semantic/Article/38595|creationTime|2008-07-19T18:23:40Z +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|creationDate|2015-01-01 +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|tag|http://www.semanlink.net/tag/natural_selection +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|tag|http://www.semanlink.net/tag/ogm +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|tag|http://www.semanlink.net/tag/mais_ogm +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|title|Voracious Worm Evolves to Eat Biotech Corn Engineered to Kill It WIRED +http://www.wired.com/2014/03/rootworm-resistance-bt-corn|creationTime|2015-01-01T15:58:15Z +http://lists.w3.org/Archives/Public/semantic-web/2014May/0032.html|creationDate|2014-05-17 +http://lists.w3.org/Archives/Public/semantic-web/2014May/0032.html|tag|http://www.semanlink.net/tag/rdf_template +http://lists.w3.org/Archives/Public/semantic-web/2014May/0032.html|title|RDF Template from Stéphane Campinas on 2014-05-10 (semantic-web@w3.org from May 2014) +http://lists.w3.org/Archives/Public/semantic-web/2014May/0032.html|creationTime|2014-05-17T17:44:06Z +http://www.youtube.com/user/minutephysics|creationDate|2013-05-30 +http://www.youtube.com/user/minutephysics|tag|http://www.semanlink.net/tag/physique +http://www.youtube.com/user/minutephysics|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/user/minutephysics|title|MinutePhysics - YouTube +http://www.youtube.com/user/minutephysics|creationTime|2013-05-30T10:42:10Z +http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/|creationDate|2013-05-17 +http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/|tag|http://www.semanlink.net/tag/gmail +http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/|tag|http://www.semanlink.net/tag/semantic_data +http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/|title|Gmail adds support for embedding semantic data +http://ebiquity.umbc.edu/blogger/2013/05/17/gmail-adds-support-for-embedding-semantic-data/|creationTime|2013-05-17T13:09:23Z +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|creationDate|2012-01-15 +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|tag|http://www.semanlink.net/tag/enseignement_francais +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|tag|http://www.semanlink.net/tag/gueant +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|tag|http://www.semanlink.net/tag/caroline_fourest +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|tag|http://www.semanlink.net/tag/sarkozy_immigration +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|title|Etudiants étrangers : Claude Guéant doit s'excuser +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|creationTime|2012-01-15T13:00:07Z +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2012/01/13/etudiants-etrangers-claude-gueant-doit-s-excuser_1629378_3232.html|date|2012-01-14 +http://www.sublimetext.com/|creationDate|2014-09-10 +http://www.sublimetext.com/|tag|http://www.semanlink.net/tag/text_editor +http://www.sublimetext.com/|title|Sublime Text: The text editor you'll fall in love with +http://www.sublimetext.com/|creationTime|2014-09-10T11:34:21Z +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|creationDate|2007-04-09 +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|tag|http://www.semanlink.net/tag/presidentielles_2007 +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|tag|http://www.semanlink.net/tag/rocard +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|comment|"94 propositions, dont
  • ""mettre en oeuvre une solution qui assure la sécurité juridique des échanges, mais préserve les intérêts des artistes autour d'une rémunération consensuelle qu'il conviendra de défendre vigoureusement au niveau européen""
  • +
  • refus de la ""brevetabilité des logiciels""
  • +
  • l'interruption du projet de carte d'identité biométrique
  • +
  • ""tarif social"" pour l'accès au haut débit de 5 euros par mois
" +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|title|République 2.0 beta. Vers une société de la connaissance ouverte. Michel Rocard +http://www.desirsdavenir.org/commun/pdf/RapportRocard.pdf|creationTime|2007-04-09T23:03:21Z +http://www.youtube.com/watch?v=txEekZcgl4s|creationDate|2009-11-27 +http://www.youtube.com/watch?v=txEekZcgl4s|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=txEekZcgl4s|tag|http://www.semanlink.net/tag/lhc +http://www.youtube.com/watch?v=txEekZcgl4s|title|LHC (en français) +http://www.youtube.com/watch?v=txEekZcgl4s|creationTime|2009-11-27T17:32:15Z +http://tech.groups.yahoo.com/group/jena-dev/message/28785|creationDate|2007-04-25 +http://tech.groups.yahoo.com/group/jena-dev/message/28785|tag|http://www.semanlink.net/tag/jena_dev +http://tech.groups.yahoo.com/group/jena-dev/message/28785|tag|http://www.semanlink.net/tag/uri +http://tech.groups.yahoo.com/group/jena-dev/message/28785|title|jena-dev : Message: Re: [jena-dev] Checking URI validity before adding it to a model +http://tech.groups.yahoo.com/group/jena-dev/message/28785|creationTime|2007-04-25T23:58:31Z +http://tech.groups.yahoo.com/group/jena-dev/message/28785|seeAlso|http://jena.cvs.sourceforge.net/jena/iri/src/com/hp/hpl/jena/iri/ +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|creationDate|2014-05-29 +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|tag|http://www.semanlink.net/tag/dan_brickley +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|tag|http://www.semanlink.net/tag/schema_org_roles +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|title|"Finalizing Schema.org ""Roles"" design from Dan Brickley on 2014-05-08 (public-vocabs@w3.org from May 2014)" +http://lists.w3.org/Archives/Public/public-vocabs/2014May/0085.html|creationTime|2014-05-29T16:36:08Z +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|creationDate|2014-09-11 +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|tag|http://www.semanlink.net/tag/rdf_forms +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|tag|http://www.semanlink.net/tag/henry_story +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|comment|RDF forms based on SPARQL +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|title|forms, direction, query, etc … from Roger Menday on 2012-11-19 (public-ldp-wg@w3.org from November 2012) +http://lists.w3.org/Archives/Public/public-ldp-wg/2012Nov/0201.html|creationTime|2014-09-11T10:04:07Z +https://arxiv.org/abs/1901.03136|creationDate|2019-02-15 +https://arxiv.org/abs/1901.03136|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1901.03136|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://arxiv.org/abs/1901.03136|arxiv_author|Lea Helmers +https://arxiv.org/abs/1901.03136|arxiv_author|Tim Oppermann +https://arxiv.org/abs/1901.03136|arxiv_author|Franziska Biegler +https://arxiv.org/abs/1901.03136|arxiv_author|Klaus-Robert Müller +https://arxiv.org/abs/1901.03136|arxiv_author|Franziska Horn +https://arxiv.org/abs/1901.03136|comment|"[github](https://github.com/helmersl/patent_similarity_search) + +mouais + +" +https://arxiv.org/abs/1901.03136|title|[1901.03136] Automating the search for a patent's prior art with a full text similarity search +https://arxiv.org/abs/1901.03136|creationTime|2019-02-15T15:57:01Z +https://arxiv.org/abs/1901.03136|arxiv_summary|"More than ever, technical inventions are the symbol of our society's advance. +Patents guarantee their creators protection against infringement. For an +invention being patentable, its novelty and inventiveness have to be assessed. +Therefore, a search for published work that describes similar inventions to a +given patent application needs to be performed. Currently, this so-called +search for prior art is executed with semi-automatically composed keyword +queries, which is not only time consuming, but also prone to errors. In +particular, errors may systematically arise by the fact that different keywords +for the same technical concepts may exist across disciplines. In this paper, a +novel approach is proposed, where the full text of a given patent application +is compared to existing patents using machine learning and natural language +processing techniques to automatically detect inventions that are similar to +the one described in the submitted document. Various state-of-the-art +approaches for feature extraction and document comparison are evaluated. In +addition to that, the quality of the current search process is assessed based +on ratings of a domain expert. The evaluation results show that our automated +approach, besides accelerating the search process, also improves the search +results for prior art with respect to their quality." +https://arxiv.org/abs/1901.03136|arxiv_firstAuthor|Lea Helmers +https://arxiv.org/abs/1901.03136|arxiv_updated|2019-03-04T19:45:29Z +https://arxiv.org/abs/1901.03136|arxiv_title|Automating the search for a patent's prior art with a full text similarity search +https://arxiv.org/abs/1901.03136|arxiv_published|2019-01-10T13:04:25Z +https://arxiv.org/abs/1901.03136|arxiv_num|1901.03136 +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|creationDate|2015-08-18 +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|tag|http://www.semanlink.net/tag/ibm +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|tag|http://www.semanlink.net/tag/deep_learning +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|title|IBM's 'Rodent Brain' Chip Could Make Our Phones Hyper-Smart WIRED +http://www.wired.com/2015/08/ibms-rodent-brain-chip-make-phones-hyper-smart/|creationTime|2015-08-18T14:07:58Z +http://www.googleartproject.com/|creationDate|2012-04-18 +http://www.googleartproject.com/|tag|http://www.semanlink.net/tag/google +http://www.googleartproject.com/|tag|http://www.semanlink.net/tag/art +http://www.googleartproject.com/|comment|"40 countries 155 museums, 6000 artists 32000 HD zoomable artworks (april 2012) +" +http://www.googleartproject.com/|title|Google Art Project +http://www.googleartproject.com/|creationTime|2012-04-18T14:06:02Z +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|creationDate|2018-10-31 +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|tag|http://www.semanlink.net/tag/emnlp_2018 +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|tag|http://www.semanlink.net/tag/allennlp +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|tag|http://www.semanlink.net/tag/tutorial +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|title|Writing Code for NLP Research, AllenNLP's tutorial at #emnlp2018 +https://docs.google.com/presentation/d/17NoJY2SnC2UMbVegaRCWA7Oca7UCZ3vHnMqBV4SUayc/edit#slide=id.p|creationTime|2018-10-31T18:11:21Z +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|creationDate|2010-09-02 +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|tag|http://www.semanlink.net/tag/sw_demo +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|tag|http://www.semanlink.net/tag/excel_and_sw +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|tag|http://www.semanlink.net/tag/anzo +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|title|Could SemTech Run On Excel? (SemTech Lightning Demo) - TechnicaLee Speaking +http://www.thefigtrees.net/lee/blog/2010/07/could_semtech_run_on_excel_sem.html|creationTime|2010-09-02T11:52:06Z +http://www.picment.com/articles/css/funwithforms/|creationDate|2008-06-02 +http://www.picment.com/articles/css/funwithforms/|tag|http://www.semanlink.net/tag/css +http://www.picment.com/articles/css/funwithforms/|tag|http://www.semanlink.net/tag/sample_code +http://www.picment.com/articles/css/funwithforms/|tag|http://www.semanlink.net/tag/forms +http://www.picment.com/articles/css/funwithforms/|title|Picment.com » Articles » CSS » Fun with forms – customized input elements +http://www.picment.com/articles/css/funwithforms/|creationTime|2008-06-02T22:22:14Z +https://carrotsearch.com/lingo3g/comparison/|creationDate|2017-06-04 +https://carrotsearch.com/lingo3g/comparison/|tag|http://www.semanlink.net/tag/carrot2 +https://carrotsearch.com/lingo3g/comparison/|title|Lingo3G or Carrot2? Carrot Search +https://carrotsearch.com/lingo3g/comparison/|creationTime|2017-06-04T13:14:42Z +http://www.journaldunet.com/ebusiness/marques-sites/ergonomie-configurateurs-automobiles/|creationDate|2012-08-03 +http://www.journaldunet.com/ebusiness/marques-sites/ergonomie-configurateurs-automobiles/|tag|http://www.semanlink.net/tag/configuration +http://www.journaldunet.com/ebusiness/marques-sites/ergonomie-configurateurs-automobiles/|title|Neuf configurateurs automobiles au banc d'essai - Journal du Net e-Business +http://www.journaldunet.com/ebusiness/marques-sites/ergonomie-configurateurs-automobiles/|creationTime|2012-08-03T13:47:48Z +http://www.nature.com/news/five-big-mysteries-about-crispr-s-origins-1.21294|creationDate|2017-01-15 +http://www.nature.com/news/five-big-mysteries-about-crispr-s-origins-1.21294|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.nature.com/news/five-big-mysteries-about-crispr-s-origins-1.21294|title|Five big mysteries about CRISPR’s origins : Nature News & Comment +http://www.nature.com/news/five-big-mysteries-about-crispr-s-origins-1.21294|creationTime|2017-01-15T14:40:43Z +http://www.bbc.co.uk/news/special/2014/newsspec_6954/index.html|creationDate|2014-04-07 +http://www.bbc.co.uk/news/special/2014/newsspec_6954/index.html|tag|http://www.semanlink.net/tag/genocide_rwandais +http://www.bbc.co.uk/news/special/2014/newsspec_6954/index.html|title|A good man in Rwanda +http://www.bbc.co.uk/news/special/2014/newsspec_6954/index.html|creationTime|2014-04-07T16:58:26Z +http://dig.csail.mit.edu/breadcrumbs/node/253|creationDate|2010-04-28 +http://dig.csail.mit.edu/breadcrumbs/node/253|tag|http://www.semanlink.net/tag/dan_connolly +http://dig.csail.mit.edu/breadcrumbs/node/253|tag|http://www.semanlink.net/tag/map_territory_relation +http://dig.csail.mit.edu/breadcrumbs/node/253|tag|http://www.semanlink.net/tag/scala +http://dig.csail.mit.edu/breadcrumbs/node/253|tag|http://www.semanlink.net/tag/rdf +http://dig.csail.mit.edu/breadcrumbs/node/253|title|Map and Territory in RDF APIs +http://dig.csail.mit.edu/breadcrumbs/node/253|creationTime|2010-04-28T23:34:39Z +http://www.devx.com/semantic/Article/39162|creationDate|2008-09-25 +http://www.devx.com/semantic/Article/39162|tag|http://www.semanlink.net/tag/benjamin_nowack +http://www.devx.com/semantic/Article/39162|tag|http://www.semanlink.net/tag/web_dev +http://www.devx.com/semantic/Article/39162|tag|http://www.semanlink.net/tag/semantic_web_dev +http://www.devx.com/semantic/Article/39162|comment|"""Getting Real"" is an agile approach to web application development. This article explains how it can be successfully combined with the flexibility of semantic web technologies." +http://www.devx.com/semantic/Article/39162|title|"""Getting Real"" with RDF and SPARQL" +http://www.devx.com/semantic/Article/39162|creationTime|2008-09-25T11:32:09Z +https://markdown-it.github.io/|creationDate|2017-04-01 +https://markdown-it.github.io/|tag|http://www.semanlink.net/tag/demo +https://markdown-it.github.io/|tag|http://www.semanlink.net/tag/markdown +https://markdown-it.github.io/|tag|http://www.semanlink.net/tag/markdown_ittt +https://markdown-it.github.io/|title|markdown-it demo +https://markdown-it.github.io/|creationTime|2017-04-01T00:39:07Z +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|creationDate|2009-04-02 +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|tag|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|tag|http://www.semanlink.net/tag/etl +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|title|Requirements for Relational-to-RDF Mapping +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling%27s%20Blog/1434|creationTime|2009-04-02T14:12:52Z +https://www.aminer.cn/dl4g-sde|creationDate|2019-04-30 +https://www.aminer.cn/dl4g-sde|tag|http://www.semanlink.net/tag/structured_data_embedding +https://www.aminer.cn/dl4g-sde|tag|http://www.semanlink.net/tag/workshop +https://www.aminer.cn/dl4g-sde|tag|http://www.semanlink.net/tag/thewebconf_2019 +https://www.aminer.cn/dl4g-sde|tag|http://www.semanlink.net/tag/philippe_cudre_mauroux +https://www.aminer.cn/dl4g-sde|tag|http://www.semanlink.net/tag/graph_embeddings +https://www.aminer.cn/dl4g-sde|title|International Workshop on Deep Learning for Graphs and Structured Data Embedding +https://www.aminer.cn/dl4g-sde|creationTime|2019-04-30T13:28:30Z +http://www.thumbshots.org/|creationDate|2006-10-11 +http://www.thumbshots.org/|tag|http://www.semanlink.net/tag/web_tools +http://www.thumbshots.org/|tag|http://www.semanlink.net/tag/thumbnails +http://www.thumbshots.org/|title|Open Thumbshots - Free Web thumbnail preview image +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|creationDate|2018-07-23 +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|tag|http://www.semanlink.net/tag/google +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|tag|http://www.semanlink.net/tag/francois_chollet +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|tag|http://www.semanlink.net/tag/nlp_text_classification +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|tag|http://www.semanlink.net/tag/tutorial +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|comment|"F. Chollet: ""An important insight is that the ratio between number of training samples and mean number of words per sample can tell you whether you should be using a n-gram model or a sequence model -- and whether you should use pre-trained word embeddings or train your own from scratch."" +" +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|title|Practical guide to text classification    Google Developers +https://developers.google.com/machine-learning/guides/text-classification/step-2-5|creationTime|2018-07-23T22:01:01Z +http://www.wired.com/2015/12/the-cia-secret-to-cybersecurity-that-no-one-seems-to-get/|creationDate|2015-12-21 +http://www.wired.com/2015/12/the-cia-secret-to-cybersecurity-that-no-one-seems-to-get/|tag|http://www.semanlink.net/tag/securite_informatique +http://www.wired.com/2015/12/the-cia-secret-to-cybersecurity-that-no-one-seems-to-get/|title|The CIA Secret to Cybersecurity That No One Seems to Get WIRED +http://www.wired.com/2015/12/the-cia-secret-to-cybersecurity-that-no-one-seems-to-get/|creationTime|2015-12-21T13:16:00Z +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|creationDate|2008-03-30 +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|tag|http://www.semanlink.net/tag/ldow2008 +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|tag|http://www.semanlink.net/tag/moat +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|tag|http://www.semanlink.net/tag/alexandre_passant +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|title|Meaning Of A Tag: A Collaborative Approach to Bridge the Gap Between Tagging and Linked Data +http://events.linkeddata.org/ldow2008/papers/22-passant-laublet-meaning-of-a-tag.pdf|creationTime|2008-03-30T21:32:39Z +https://atom.io/docs/latest/|creationDate|2014-10-05 +https://atom.io/docs/latest/|tag|http://www.semanlink.net/tag/atom_github +https://atom.io/docs/latest/|tag|http://www.semanlink.net/tag/documentation +https://atom.io/docs/latest/|title|ATOM documentation +https://atom.io/docs/latest/|creationTime|2014-10-05T17:42:10Z +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|creationDate|2018-05-05 +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|tag|http://www.semanlink.net/tag/fps_notes +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|title|The Web Conf 2018 : compte-rendu +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|creationTime|2018-05-05T12:31:30Z +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|date|2018-05-04 +http://www.semanlink.net/documents/2018/05/TheWebConf-CR.md|publish|true +http://en.wikipedia.org/wiki/Fitch%27s_paradox_of_knowability|creationDate|2012-02-03 +http://en.wikipedia.org/wiki/Fitch%27s_paradox_of_knowability|tag|http://www.semanlink.net/tag/logic +http://en.wikipedia.org/wiki/Fitch%27s_paradox_of_knowability|title|Fitch's paradox of knowability - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Fitch%27s_paradox_of_knowability|creationTime|2012-02-03T10:23:31Z +http://jena.apache.org/download/maven.html|creationDate|2012-08-14 +http://jena.apache.org/download/maven.html|tag|http://www.semanlink.net/tag/maven +http://jena.apache.org/download/maven.html|tag|http://www.semanlink.net/tag/jena +http://jena.apache.org/download/maven.html|title|Apache Jena - Using Jena with Apache Maven +http://jena.apache.org/download/maven.html|creationTime|2012-08-14T16:04:46Z +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|creationDate|2012-07-10 +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|tag|http://www.semanlink.net/tag/forms +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|tag|http://www.semanlink.net/tag/cms +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|tag|http://www.semanlink.net/tag/henri_bergius +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|title|Build a CMS, no forms allowed +http://www.flickr.com/photos/iks_project/7394169866/in/set-72157630176990928/|creationTime|2012-07-10T23:15:05Z +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=17&ll=48.839816,2.278762&spn=0.005303,0.012896&t=h|creationDate|2006-09-23 +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=17&ll=48.839816,2.278762&spn=0.005303,0.012896&t=h|tag|http://www.semanlink.net/tag/170_rue_de_lourmel +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=17&ll=48.839816,2.278762&spn=0.005303,0.012896&t=h|tag|http://www.semanlink.net/tag/google_maps +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=17&ll=48.839816,2.278762&spn=0.005303,0.012896&t=h|title|170, rue de Lourmel +http://www.newsforge.com/article.pl?sid=06/08/14/1438204|creationDate|2006-08-19 +http://www.newsforge.com/article.pl?sid=06/08/14/1438204|tag|http://www.semanlink.net/tag/antimilitarisme +http://www.newsforge.com/article.pl?sid=06/08/14/1438204|tag|http://www.semanlink.net/tag/open_source +http://www.newsforge.com/article.pl?sid=06/08/14/1438204|tag|http://www.semanlink.net/tag/gpl +http://www.newsforge.com/article.pl?sid=06/08/14/1438204|title|"Open source project adds ""no military use"" clause to the GPL" +http://www.blogmarks.net/|creationDate|2005-04-08 +http://www.blogmarks.net/|tag|http://www.semanlink.net/tag/blogmarks +http://www.blogmarks.net/|title|http://www.blogmarks.net +http://www.blogmarks.net/|creationTime|2005-04-08T22:00:00Z +https://github.com/oxford-cs-deepnlp-2017/lectures|creationDate|2017-02-07 +https://github.com/oxford-cs-deepnlp-2017/lectures|tag|http://www.semanlink.net/tag/slides +https://github.com/oxford-cs-deepnlp-2017/lectures|tag|http://www.semanlink.net/tag/deep_nlp +https://github.com/oxford-cs-deepnlp-2017/lectures|comment|lecture slides and course description for the Deep Natural Language Processing course offered in Hilary Term 2017 at the University of Oxford +https://github.com/oxford-cs-deepnlp-2017/lectures|title|lectures: Oxford Deep NLP 2017 course +https://github.com/oxford-cs-deepnlp-2017/lectures|creationTime|2017-02-07T13:59:28Z +http://news.bbc.co.uk/2/hi/technology/8598871.stm|creationDate|2010-05-20 +http://news.bbc.co.uk/2/hi/technology/8598871.stm|tag|http://www.semanlink.net/tag/punk +http://news.bbc.co.uk/2/hi/technology/8598871.stm|comment|Is Chatroulette a sign of internet punk? +http://news.bbc.co.uk/2/hi/technology/8598871.stm|title|The Day the Web Turned Day-Glo +http://news.bbc.co.uk/2/hi/technology/8598871.stm|creationTime|2010-05-20T22:50:39Z +http://news.bbc.co.uk/2/hi/technology/8598871.stm|source|BBC +http://www.phontron.com/nlptools.php|creationDate|2014-03-15 +http://www.phontron.com/nlptools.php|tag|http://www.semanlink.net/tag/nlp_tools +http://www.phontron.com/nlptools.php|title|Natural Language Processing Tools +http://www.phontron.com/nlptools.php|creationTime|2014-03-15T12:26:44Z +http://www.confoto.org/cart|creationDate|2005-11-16 +http://www.confoto.org/cart|tag|http://www.semanlink.net/tag/rdf_application +http://www.confoto.org/cart|comment|CONFOTO is an online service which facilitates browsing, annotating and re-purposing of photo, conference, and people descriptions. +http://www.confoto.org/cart|title|CONFOTO - RDF Data Cart +https://www.theguardian.com/environment/2017/apr/06/farms-could-slash-pesticide-use-without-losses-research-reveals|creationDate|2017-04-08 +https://www.theguardian.com/environment/2017/apr/06/farms-could-slash-pesticide-use-without-losses-research-reveals|tag|http://www.semanlink.net/tag/pesticide +https://www.theguardian.com/environment/2017/apr/06/farms-could-slash-pesticide-use-without-losses-research-reveals|title|Farms could slash pesticide use without losses, research reveals Environment The Guardian +https://www.theguardian.com/environment/2017/apr/06/farms-could-slash-pesticide-use-without-losses-research-reveals|creationTime|2017-04-08T13:00:44Z +http://www.independent.co.uk/life-style/gadgets-and-tech/news/prosthetic-hand-lets-man-actually-feel-what-he-touches-for-the-first-time-10499870.html|creationDate|2015-09-19 +http://www.independent.co.uk/life-style/gadgets-and-tech/news/prosthetic-hand-lets-man-actually-feel-what-he-touches-for-the-first-time-10499870.html|tag|http://www.semanlink.net/tag/brain_machine_interface +http://www.independent.co.uk/life-style/gadgets-and-tech/news/prosthetic-hand-lets-man-actually-feel-what-he-touches-for-the-first-time-10499870.html|title|Prosthetic hand lets man actually feel what he touches for the first time - News - Gadgets and Tech - The Independent +http://www.independent.co.uk/life-style/gadgets-and-tech/news/prosthetic-hand-lets-man-actually-feel-what-he-touches-for-the-first-time-10499870.html|creationTime|2015-09-19T03:22:02Z +http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw|creationDate|2019-04-15 +http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw|tag|http://www.semanlink.net/tag/notre_dame_de_paris +http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw|comment|"La structure de Notre-Dame de Paris « est sauvée et préservée dans sa globalité », ont affirmé les pompiers lundi soir à 23h **après des heures d'angoisse**. + +""Nous rebâtirons Notre-Dame, parce que c’est ce que les Français attendent, parce que c’est ce que notre histoire mérite, parce que c’est notre destin profond""" +http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw|title|[En direct] La structure de Notre-Dame de Paris «est sauvée» - France - RFI +http://www.rfi.fr/france/20190415-france-incendie-cours-cathedrale-notre-dame-paris?ref=tw|creationTime|2019-04-15T23:31:54Z +https://www.deepl.com/translator|creationDate|2017-11-21 +https://www.deepl.com/translator|tag|http://www.semanlink.net/tag/machine_translation +https://www.deepl.com/translator|title|DeepL Traducteur +https://www.deepl.com/translator|creationTime|2017-11-21T11:40:54Z +https://wikipedia2vec.github.io/wikipedia2vec/|creationDate|2018-12-23 +https://wikipedia2vec.github.io/wikipedia2vec/|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +https://wikipedia2vec.github.io/wikipedia2vec/|tag|http://www.semanlink.net/tag/wikipedia2vec +https://wikipedia2vec.github.io/wikipedia2vec/|comment|[GitHub](https://github.com/wikipedia2vec/wikipedia2vec) [Paper](doc:2020/09/1812_06280_wikipedia2vec_an_) +https://wikipedia2vec.github.io/wikipedia2vec/|relatedDoc|http://www.semanlink.net/doc/2020/09/1812_06280_wikipedia2vec_an_ +https://wikipedia2vec.github.io/wikipedia2vec/|title|Wikipedia2Vec +https://wikipedia2vec.github.io/wikipedia2vec/|creationTime|2018-12-23T22:19:48Z +http://graves.cl/visualRDF/|creationDate|2012-05-12 +http://graves.cl/visualRDF/|tag|http://www.semanlink.net/tag/rdf +http://graves.cl/visualRDF/|tag|http://www.semanlink.net/tag/sw_online_tools +http://graves.cl/visualRDF/|tag|http://www.semanlink.net/tag/graph_visualization +http://graves.cl/visualRDF/|title|Visual RDF +http://graves.cl/visualRDF/|creationTime|2012-05-12T18:14:08Z +http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082|creationDate|2012-10-13 +http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082|tag|http://www.semanlink.net/tag/tutorial +http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082|tag|http://www.semanlink.net/tag/goodrelations +http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082|title|An Illustrated Guide To E-Commerce Markup Using GoodRelations +http://searchengineland.com/make-yourself-findable-in-the-global-graph-of-commerce-135082|creationTime|2012-10-13T10:41:50Z +http://wodka.over-blog.com/article-2280247.html|creationDate|2007-02-23 +http://wodka.over-blog.com/article-2280247.html|tag|http://www.semanlink.net/tag/incipit +http://wodka.over-blog.com/article-2280247.html|tag|http://www.semanlink.net/tag/un_ivrogne_dans_la_brousse +http://wodka.over-blog.com/article-2280247.html|comment|"> Je me soûlais au vin de palme depuis l'âge de dix ans. Je n'avais rien eu d'autre à faire dans la vie que de boire du vin de palme. Dans ce temps-là, il n'y avait pas d'argent, on ne connaissait que les cauris, aussi la vie était bon marché et mon père était l'homme le plus riche de la ville. +> +> Mon père avait huit enfants et j'étais leur aîné, les autres travaillaient dur, moi j'étais un recordman du vin de palme. (…] Quand mon père s'est aperçu que je ne pouvais rien faire d'autre que de boire, il a engagé un excellent malafoutier qui n'avait rien d'autre à faire qu'à me préparer mon vin de palme pour la journée." +http://wodka.over-blog.com/article-2280247.html|title|Un ivrogne dans la brousse +http://wodka.over-blog.com/article-2280247.html|creationTime|2007-02-23T20:56:34Z +http://decentralyze.com/2010/03/09/rdf-meets-nosql/|creationDate|2010-03-10 +http://decentralyze.com/2010/03/09/rdf-meets-nosql/|tag|http://www.semanlink.net/tag/nosql +http://decentralyze.com/2010/03/09/rdf-meets-nosql/|tag|http://www.semanlink.net/tag/rdf +http://decentralyze.com/2010/03/09/rdf-meets-nosql/|title|RDF meets NoSQL « Decentralyze – Programming the Data Cloud +http://decentralyze.com/2010/03/09/rdf-meets-nosql/|creationTime|2010-03-10T21:22:24Z +http://xtech06.usefulinc.com/schedule/paper/61|creationDate|2006-05-22 +http://xtech06.usefulinc.com/schedule/paper/61|tag|http://www.semanlink.net/tag/web_services +http://xtech06.usefulinc.com/schedule/paper/61|tag|http://www.semanlink.net/tag/leigh_dodds +http://xtech06.usefulinc.com/schedule/paper/61|tag|http://www.semanlink.net/tag/good +http://xtech06.usefulinc.com/schedule/paper/61|tag|http://www.semanlink.net/tag/xtech_2006 +http://xtech06.usefulinc.com/schedule/paper/61|tag|http://www.semanlink.net/tag/sparql +http://xtech06.usefulinc.com/schedule/paper/61|comment|This paper will review the SPARQL specifications and its potential benefits to Web 2.0 applications. Focusing on the SPARQL protocol for RDF, the paper will provide implementation guidance for developers interested in adding SPARQL support to their applications. +http://xtech06.usefulinc.com/schedule/paper/61|title|XTech 2006: SPARQLing Services +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|creationDate|2014-01-29 +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|tag|http://www.semanlink.net/tag/fourmi +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|tag|http://www.semanlink.net/tag/esclavage +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|title|Une fourmi « ninja pillarde » et esclavagiste +http://www.lemonde.fr/planete/article/2014/01/16/une-fourmi-ninja-pillarde-et-esclavagiste-decouverte_4349559_3244.html|creationTime|2014-01-29T22:42:52Z +http://www.itworld.com/Tech/3494/071026pivo2/index.html|creationDate|2007-11-08 +http://www.itworld.com/Tech/3494/071026pivo2/index.html|tag|http://www.semanlink.net/tag/nissan +http://www.itworld.com/Tech/3494/071026pivo2/index.html|tag|http://www.semanlink.net/tag/robotique +http://www.itworld.com/Tech/3494/071026pivo2/index.html|title|ITworld.com - Nissan adds a robot helper to its concept car +http://www.itworld.com/Tech/3494/071026pivo2/index.html|creationTime|2007-11-08T16:23:11Z +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|creationDate|2018-05-27 +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|tag|http://www.semanlink.net/tag/mark_zuckerberg +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|tag|http://www.semanlink.net/tag/facebook +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|title|Zuckerberg set up fraudulent scheme to 'weaponise' data, court case alleges Technology The Guardian +https://www.theguardian.com/technology/2018/may/24/mark-zuckerberg-set-up-fraudulent-scheme-weaponise-data-facebook-court-case-alleges|creationTime|2018-05-27T10:41:30Z +http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html|creationDate|2014-10-05 +http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html|tag|http://www.semanlink.net/tag/hong_kong +http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html|tag|http://www.semanlink.net/tag/mobile_apps +http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html|title|Firechat, l'application popularisée par les manifestants hongkongais +http://www.lemonde.fr/pixels/article/2014/10/03/firechat-l-application-popularisee-par-les-manifestants-hongkongais_4500216_4408996.html|creationTime|2014-10-05T17:16:37Z +http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase|creationDate|2008-08-01 +http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase|tag|http://www.semanlink.net/tag/sparql +http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase|title|Semantic Web by Example: Semantic CrunchBase - benjamin nowack's blog +http://bnode.org/blog/2008/07/29/semantic-web-by-example-semantic-crunchbase|creationTime|2008-08-01T12:36:48Z +http://www.mail-archive.com/log4j-user@jakarta.apache.org/msg08853.html|creationDate|2009-05-04 +http://www.mail-archive.com/log4j-user@jakarta.apache.org/msg08853.html|tag|http://www.semanlink.net/tag/log4j +http://www.mail-archive.com/log4j-user@jakarta.apache.org/msg08853.html|title|RE: log4j ObjectRenderer. +http://www.mail-archive.com/log4j-user@jakarta.apache.org/msg08853.html|creationTime|2009-05-04T14:49:49Z +http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink|creationDate|2006-05-13 +http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink|tag|http://www.semanlink.net/tag/linkto_semanlink +http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink|tag|http://www.semanlink.net/tag/richard_cyganiak +http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink|comment|"> Unlike most tagging systems, Semanlink lets you arrange tags into a concept hierarchy +> +> It’s a del.icio.us on steroids. The UI is not yet quite streamlined enough for my taste, but it looks usable. It’s technologically simple – Jena memory model, file-based persistence – so it should be hackable. Quite cool." +http://dowhatimean.net/2006/05/juc-francois-paul-servant-%E2%80%93-semanlink|title|dowhatimean.net » [juc] François-Paul Servant – Semanlink +http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/|creationDate|2008-07-17 +http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/|tag|http://www.semanlink.net/tag/umbel +http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/|title|Starting to Play with the UMBEL Ontology at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2008/07/16/starting-to-play-with-the-umbel-ontology/|creationTime|2008-07-17T10:41:35Z +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|creationDate|2017-11-07 +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/semantic_hashing +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/geoffrey_hinton +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/good +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|tag|http://www.semanlink.net/tag/similarity_queries +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|comment|"> We show how to learn a deep graphical model of the word-count vectors obtained from a +large set of documents. The values of the latent variables in the deepest layer are easy to +infer and give a much better representation of each document than Latent Semantic Analysis. +When the deepest layer is forced to use a small number of binary variables (e.g. 32), +the graphical model performs ‘‘semantic hashing”: Documents are mapped to memory +addresses in such a way that semantically similar documents are located at nearby +addresses. Documents similar to a query document can then be found by simply accessing +all the addresses that differ by only a few bits from the address of the query document. This +way of extending the efficiency of hash-coding to approximate matching is much faster +than locality sensitive hashing, which is the fastest current method. By using semantic +hashing to filter the documents given to TF-IDF, we achieve higher accuracy than applying +TF-IDF to the entire document set. + + +Indexing is implemented in the following manner: a document is mapped to a word-count vector and then this vector is passed through a [#Restricted Boltzmann Machine](/tag/restricted_boltzmann_machine) autoencoder and encoded to 32-bit address. + + +" +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|title|Semantic hashing (2008) - Ruslan Salakhutdinov, Geoffrey Hinton +http://www.sciencedirect.com/science/article/pii/S0888613X08001813|creationTime|2017-11-07T11:54:38Z +http://www.bbc.co.uk/news/science-environment-17436400|creationDate|2012-03-20 +http://www.bbc.co.uk/news/science-environment-17436400|tag|http://www.semanlink.net/tag/archeologie +http://www.bbc.co.uk/news/science-environment-17436400|tag|http://www.semanlink.net/tag/satellite_images +http://www.bbc.co.uk/news/science-environment-17436400|title|BBC News - Ancient sites spotted from space, say archaeologists +http://www.bbc.co.uk/news/science-environment-17436400|creationTime|2012-03-20T23:04:49Z +http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html|creationDate|2013-04-23 +http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html|tag|http://www.semanlink.net/tag/map_reduce +http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html|comment|Multithreading is one of the popular way of doing parallel programming, but major complexity of multi-thread programming is to co-ordinate the access of each thread to the shared data.. If we can eliminate the shared state completely, then the complexity of co-ordination will disappear. +http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html|title|Hadoop Map/Reduce Implementation (Pragmatic Programming Techniques) +http://horicky.blogspot.fr/2008/11/hadoop-mapreduce-implementation.html|creationTime|2013-04-23T17:27:25Z +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|creationDate|2017-11-23 +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|tag|http://www.semanlink.net/tag/right_to_explanation +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|tag|http://www.semanlink.net/tag/ia_limites +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|tag|http://www.semanlink.net/tag/deep_learning +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|tag|http://www.semanlink.net/tag/christopher_olah +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|title|Can A.I. Be Taught to Explain Itself? - The New York Times +https://www.nytimes.com/2017/11/21/magazine/can-ai-be-taught-to-explain-itself.html|creationTime|2017-11-23T00:00:01Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|creationDate|2007-06-27 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|tag|http://www.semanlink.net/tag/richard_cyganiak +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|tag|http://www.semanlink.net/tag/tom_heath +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|tag|http://www.semanlink.net/tag/linked_data +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|tag|http://www.semanlink.net/tag/chris_bizer +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|tag|http://www.semanlink.net/tag/howto +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|title|How to publish Linked Data on the Web? +http://sites.wiwiss.fu-berlin.de/suhl/bizer/HowtoPublishLinkedData.htm|creationTime|2007-06-27T21:48:15Z +http://www.sorosoro.org/en/love|creationDate|2014-03-30 +http://www.sorosoro.org/en/love|tag|http://www.semanlink.net/tag/amour +http://www.sorosoro.org/en/love|title|Love « Sorosoro +http://www.sorosoro.org/en/love|creationTime|2014-03-30T03:24:12Z +http://purl.org/coo/ns|creationDate|2011-03-24 +http://purl.org/coo/ns|tag|http://www.semanlink.net/tag/car_options_ontology +http://purl.org/coo/ns|title|The Car Options Ontology (COO) +http://purl.org/coo/ns|creationTime|2011-03-24T16:38:08Z +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|creationDate|2019-05-18 +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_author|Ellie Pavlick +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_author|Ian Tenney +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_author|Dipanjan Das +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|comment|"> We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations. +" +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|title|[1905.05950] BERT Rediscovers the Classical NLP Pipeline +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|bookmarkOf|https://arxiv.org/abs/1905.05950 +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|creationTime|2019-05-18T17:50:08Z +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_summary|"Pre-trained text encoders have rapidly advanced the state of the art on many +NLP tasks. We focus on one such model, BERT, and aim to quantify where +linguistic information is captured within the network. We find that the model +represents the steps of the traditional NLP pipeline in an interpretable and +localizable way, and that the regions responsible for each step appear in the +expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. +Qualitative analysis reveals that the model can and often does adjust this +pipeline dynamically, revising lower-level decisions on the basis of +disambiguating information from higher-level representations." +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_firstAuthor|Ian Tenney +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_updated|2019-08-09T15:51:47Z +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_title|BERT Rediscovers the Classical NLP Pipeline +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_published|2019-05-15T05:47:23Z +http://www.semanlink.net/doc/2019/05/_1905_05950_bert_rediscovers_t|arxiv_num|1905.05950 +http://www.lemonde.fr/festival/article/2015/07/22/rand-hindi-l-homme-qui-veut-faire-disparaitre-les-technologies_4693695_4415198.html|creationDate|2015-08-07 +http://www.lemonde.fr/festival/article/2015/07/22/rand-hindi-l-homme-qui-veut-faire-disparaitre-les-technologies_4693695_4415198.html|tag|http://www.semanlink.net/tag/startups +http://www.lemonde.fr/festival/article/2015/07/22/rand-hindi-l-homme-qui-veut-faire-disparaitre-les-technologies_4693695_4415198.html|title|Rand Hindi, l’homme qui veut faire disparaître les technologies +http://www.lemonde.fr/festival/article/2015/07/22/rand-hindi-l-homme-qui-veut-faire-disparaitre-les-technologies_4693695_4415198.html|creationTime|2015-08-07T15:42:12Z +http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246|creationDate|2012-05-30 +http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246|tag|http://www.semanlink.net/tag/semantic_web +http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246|tag|http://www.semanlink.net/tag/industrie +http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246|title|SemWeb.Pro 2012 : le Web sémantique sort des... - Industrie et Technologies +http://www.industrie.com/it/semweb-pro-2012-le-web-semantique-sort-des-laboratoires.13246|creationTime|2012-05-30T14:57:36Z +http://code.google.com/apis/visualization/documentation/gallery.html|creationDate|2010-08-27 +http://code.google.com/apis/visualization/documentation/gallery.html|tag|http://www.semanlink.net/tag/google_visualization_api +http://code.google.com/apis/visualization/documentation/gallery.html|title|Google Visualization API Gallery - Google Chart Tools / Interactive Charts (aka Visualization API) - Google Code +http://code.google.com/apis/visualization/documentation/gallery.html|creationTime|2010-08-27T17:41:59Z +http://www.opencroquet.org/index.html|creationDate|2005-11-15 +http://www.opencroquet.org/index.html|tag|http://www.semanlink.net/tag/alan_kay +http://www.opencroquet.org/index.html|tag|http://www.semanlink.net/tag/social_software +http://www.opencroquet.org/index.html|tag|http://www.semanlink.net/tag/peer_to_peer +http://www.opencroquet.org/index.html|comment|open source software platform for creating deeply collaborative multi-user online applications. It features a network architecture that supports communication, collaboration, resource sharing, and synchronous computation among multiple users. Using Croquet, software developers can create powerful and highly collaborative multi-user 2D and 3D applications and simulations. +http://www.opencroquet.org/index.html|title|Croquet Project +https://code.facebook.com/posts/181565595577955/introducing-deeptext-facebook-s-text-understanding-engine/|creationDate|2017-06-28 +https://code.facebook.com/posts/181565595577955/introducing-deeptext-facebook-s-text-understanding-engine/|tag|http://www.semanlink.net/tag/nlp_facebook +https://code.facebook.com/posts/181565595577955/introducing-deeptext-facebook-s-text-understanding-engine/|title|Introducing DeepText: Facebook's text understanding engine Engineering Blog Facebook Code +https://code.facebook.com/posts/181565595577955/introducing-deeptext-facebook-s-text-understanding-engine/|creationTime|2017-06-28T01:16:05Z +https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey|creationDate|2016-04-06 +https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey|tag|http://www.semanlink.net/tag/docker_tomcat +https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey|tag|http://www.semanlink.net/tag/docker_mac +https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey|title|A Java geekA Java geek > From Vaadin to Docker, a novice's journey +https://blog.frankel.ch/from-vaadin-to-docker-a-novices-journey|creationTime|2016-04-06T15:28:17Z +http://blog.sgo.to/2014/02/rows.html|creationDate|2014-10-21 +http://blog.sgo.to/2014/02/rows.html|tag|http://www.semanlink.net/tag/restful_semantic_web_services +http://blog.sgo.to/2014/02/rows.html|tag|http://www.semanlink.net/tag/schema_org_actions +http://blog.sgo.to/2014/02/rows.html|tag|http://www.semanlink.net/tag/linked_data +http://blog.sgo.to/2014/02/rows.html|tag|http://www.semanlink.net/tag/resources_oriented_web_services +http://blog.sgo.to/2014/02/rows.html|tag|http://www.semanlink.net/tag/samuel_goto +http://blog.sgo.to/2014/02/rows.html|comment|Resources-Oriented Web Services (ROWS) is a set of technologies that enable the programmatic discovery, description and invocation of actions on resources. +http://blog.sgo.to/2014/02/rows.html|title|Hello World: ROWS +http://blog.sgo.to/2014/02/rows.html|creationTime|2014-10-21T13:40:01Z +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|creationDate|2008-04-08 +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/web_3_0 +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/slides +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/henry_story +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/semantic_camp_paris +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/nova_spivak +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|tag|http://www.semanlink.net/tag/javaone +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|title|Developing Web 3.0 - JavaOne +http://bblfish.net/work/presentations/2007/BOF-6747.pdf|creationTime|2008-04-08T20:37:31Z +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|creationDate|2018-03-10 +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|tag|http://www.semanlink.net/tag/soap_vs_rest +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|tag|http://www.semanlink.net/tag/rest +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|comment|[a response](https://philsturgeon.uk/api/2017/12/18/rest-confusion-explained/) +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|title|REST is the new SOAP – freeCodeCamp +https://medium.freecodecamp.org/rest-is-the-new-soap-97ff6c09896d|creationTime|2018-03-10T09:23:04Z +https://www.npmjs.com/package/wikidata-taxonomy|creationDate|2017-10-29 +https://www.npmjs.com/package/wikidata-taxonomy|tag|http://www.semanlink.net/tag/wikidata +https://www.npmjs.com/package/wikidata-taxonomy|tag|http://www.semanlink.net/tag/taxonomies +https://www.npmjs.com/package/wikidata-taxonomy|comment|Command-line tool to extract taxonomies from Wikidata. +https://www.npmjs.com/package/wikidata-taxonomy|title|wikidata-taxonomy +https://www.npmjs.com/package/wikidata-taxonomy|creationTime|2017-10-29T09:09:26Z +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|creationDate|2018-05-07 +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|tag|http://www.semanlink.net/tag/facebook +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|tag|http://www.semanlink.net/tag/china_s_social_credit_system +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|comment|"> la protection sociale traditionnelle était le moyen qu’une société se donnait pour garantir les individus contre la réalisation de risques sociaux imprévisibles, là où le Social Credit System protège au contraire la société contre les « risques » liés à l’imprévisibilité des comportements humains, en cherchant à les standardiser au maximum et en prévenant les « déviances » par rapport à une norme sociale encodée algorithmiquement. +" +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|title|L’anti-protection sociale de Facebook et l’avènement du « providentialisme de plateforme » – – S.I.Lex – +https://scinfolex.com/2018/05/06/lanti-protection-sociale-de-facebook-et-lavenement-du-providentialisme-de-plateforme/|creationTime|2018-05-07T23:34:16Z +https://arxiv.org/pdf/1412.1897v4.pdf|creationDate|2017-08-24 +https://arxiv.org/pdf/1412.1897v4.pdf|tag|http://www.semanlink.net/tag/image_recognition +https://arxiv.org/pdf/1412.1897v4.pdf|tag|http://www.semanlink.net/tag/artificial_neural_network +https://arxiv.org/pdf/1412.1897v4.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_author|Anh Nguyen +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_author|Jason Yosinski +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_author|Jeff Clune +https://arxiv.org/pdf/1412.1897v4.pdf|title|[1412.1897] Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images +https://arxiv.org/pdf/1412.1897v4.pdf|creationTime|2017-08-24T00:47:56Z +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_summary|"Deep neural networks (DNNs) have recently been achieving state-of-the-art +performance on a variety of pattern-recognition tasks, most notably visual +classification problems. Given that DNNs are now able to classify objects in +images with near-human-level performance, questions naturally arise as to what +differences remain between computer and human vision. A recent study revealed +that changing an image (e.g. of a lion) in a way imperceptible to humans can +cause a DNN to label the image as something else entirely (e.g. mislabeling a +lion a library). Here we show a related result: it is easy to produce images +that are completely unrecognizable to humans, but that state-of-the-art DNNs +believe to be recognizable objects with 99.99% confidence (e.g. labeling with +certainty that white noise static is a lion). Specifically, we take +convolutional neural networks trained to perform well on either the ImageNet or +MNIST datasets and then find images with evolutionary algorithms or gradient +ascent that DNNs label with high confidence as belonging to each dataset class. +It is possible to produce images totally unrecognizable to human eyes that DNNs +believe with near certainty are familiar objects, which we call ""fooling +images"" (more generally, fooling examples). Our results shed light on +interesting differences between human vision and current DNNs, and raise +questions about the generality of DNN computer vision." +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_firstAuthor|Anh Nguyen +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_updated|2015-04-02T23:12:56Z +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_title|Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_published|2014-12-05T05:29:43Z +https://arxiv.org/pdf/1412.1897v4.pdf|arxiv_num|1412.1897 +https://en.wikipedia.org/wiki/War_Witch|creationDate|2015-12-10 +https://en.wikipedia.org/wiki/War_Witch|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/War_Witch|tag|http://www.semanlink.net/tag/enfants_soldats +https://en.wikipedia.org/wiki/War_Witch|tag|http://www.semanlink.net/tag/congo_kinshasa +https://en.wikipedia.org/wiki/War_Witch|comment|Canadian drama film by Kim Nguyen primarily filmed in the Democratic Republic of Congo +https://en.wikipedia.org/wiki/War_Witch|title|War Witch (French: Rebelle) +https://en.wikipedia.org/wiki/War_Witch|creationTime|2015-12-10T02:29:58Z +http://www.openlinksw.com/weblog/oerling/?id=1498|creationDate|2008-12-17 +http://www.openlinksw.com/weblog/oerling/?id=1498|tag|http://www.semanlink.net/tag/inverse_functional_properties +http://www.openlinksw.com/weblog/oerling/?id=1498|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/weblog/oerling/?id=1498|tag|http://www.semanlink.net/tag/smushing +http://www.openlinksw.com/weblog/oerling/?id=1498|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1498|tag|http://www.semanlink.net/tag/linking_open_data +http://www.openlinksw.com/weblog/oerling/?id=1498|title|"""E Pluribus Unum"", or ""Inversely Functional Identity"", or ""Smooshing Without the Stickiness"" (re-updated)" +http://www.openlinksw.com/weblog/oerling/?id=1498|creationTime|2008-12-17T16:48:50Z +https://youtu.be/CtDWzb1qd-E|creationDate|2015-05-16 +https://youtu.be/CtDWzb1qd-E|tag|http://www.semanlink.net/tag/youtube_video +https://youtu.be/CtDWzb1qd-E|tag|http://www.semanlink.net/tag/music_of_africa +https://youtu.be/CtDWzb1qd-E|tag|http://www.semanlink.net/tag/salsa +https://youtu.be/CtDWzb1qd-E|tag|http://www.semanlink.net/tag/disque_a_retrouver +https://youtu.be/CtDWzb1qd-E|tag|http://www.semanlink.net/tag/souvenirs +https://youtu.be/CtDWzb1qd-E|title|Akoguin Theresa - Maestro Laba Sosseh Con L'Orquesta Aragon +https://youtu.be/CtDWzb1qd-E|creationTime|2015-05-16T23:47:45Z +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|creationDate|2010-09-24 +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|tag|http://www.semanlink.net/tag/uml +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|tag|http://www.semanlink.net/tag/rigolo +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|comment|"""UML, as I said already, serves no software-related purpose whatsoever, and that's fine with me -- some people have better things to do with their lives than trying to improve software technology""" +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|title|UML: The Positive Spin +http://archive.eiffel.com/doc/manuals/technology/bmarticles/uml/page.html|creationTime|2010-09-24T16:51:27Z +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|creationDate|2018-05-10 +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|tag|http://www.semanlink.net/tag/graph_based_text_representations +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|comment|"Comment dépasser les limites du modèle Bag Of Word en modélisant le texte sous forme de graphe. + +Organisé par [Michalis.Vazirgiannis](http://www.lix.polytechnique.fr/Labo/Michalis.Vazirgiannis/) (Polytechnique) et [Fragkiskos D. Malliaros](http://fragkiskos.me) (CentraleSupelec) + +[Slides](http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf)" +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|relatedDoc|http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|title|TUTORIAL: Graph-based Text Representations: Boosting Text Mining, NLP and Information Retrieval with Graphs +https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/|creationTime|2018-05-10T13:51:07Z +https://arxiv.org/pdf/1706.00957.pdf|creationDate|2017-11-11 +https://arxiv.org/pdf/1706.00957.pdf|tag|http://www.semanlink.net/tag/elasticsearch +https://arxiv.org/pdf/1706.00957.pdf|tag|http://www.semanlink.net/tag/vector_space_model +https://arxiv.org/pdf/1706.00957.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1706.00957.pdf|tag|http://www.semanlink.net/tag/semantic_search +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Michal Růžička +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Petr Sojka +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Radim Řehůřek +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Vít Novotný +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Jan Pomikálek +https://arxiv.org/pdf/1706.00957.pdf|arxiv_author|Jan Rygl +https://arxiv.org/pdf/1706.00957.pdf|comment|"> The ubiquity of semantic vector space modeling raises the challenge of efficient searching in dense, high-dimensional vector spaces. We would naturally want to take advantage of the design and optimizations behind modern fulltext engines like Elasticsearch so as to meet the scalability and robustness demands of modern IR applications. This is the research challenge addressed in this paper. +> The paper describes novel ways of encoding dense vectors into text documents, allowing the use of traditional inverted index engines. + +[blog post](https://rare-technologies.com/semantic-search-fulltext-engine-acl-2017/) +" +https://arxiv.org/pdf/1706.00957.pdf|title|[1706.00957] Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines +https://arxiv.org/pdf/1706.00957.pdf|creationTime|2017-11-11T22:28:36Z +https://arxiv.org/pdf/1706.00957.pdf|arxiv_summary|"Vector representations and vector space modeling (VSM) play a central role in +modern machine learning. We propose a novel approach to `vector similarity +searching' over dense semantic representations of words and documents that can +be deployed on top of traditional inverted-index-based fulltext engines, taking +advantage of their robustness, stability, scalability and ubiquity. +We show that this approach allows the indexing and querying of dense vectors +in text domains. This opens up exciting avenues for major efficiency gains, +along with simpler deployment, scaling and monitoring. +The end result is a fast and scalable vector database with a tunable +trade-off between vector search performance and quality, backed by a standard +fulltext engine such as Elasticsearch. +We empirically demonstrate its querying performance and quality by applying +this solution to the task of semantic searching over a dense vector +representation of the entire English Wikipedia." +https://arxiv.org/pdf/1706.00957.pdf|arxiv_firstAuthor|Jan Rygl +https://arxiv.org/pdf/1706.00957.pdf|arxiv_updated|2017-06-03T14:21:22Z +https://arxiv.org/pdf/1706.00957.pdf|arxiv_title|Semantic Vector Encoding and Similarity Search Using Fulltext Search Engines +https://arxiv.org/pdf/1706.00957.pdf|arxiv_published|2017-06-03T14:21:22Z +https://arxiv.org/pdf/1706.00957.pdf|arxiv_num|1706.00957 +https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery|creationDate|2017-06-16 +https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery|tag|http://www.semanlink.net/tag/stackoverflow_q +https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery|tag|http://www.semanlink.net/tag/javascript_tips +https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery|title|Inserting a text where cursor is using Javascript/jquery - Stack Overflow +https://stackoverflow.com/questions/1064089/inserting-a-text-where-cursor-is-using-javascript-jquery|creationTime|2017-06-16T14:39:07Z +http://rdfa.digitalbazaar.com/rdfa-test-harness/|creationDate|2007-11-07 +http://rdfa.digitalbazaar.com/rdfa-test-harness/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.digitalbazaar.com/rdfa-test-harness/|comment|The RDFa Test Harness provides an interactive mechanism to check the conformance of an RDFa extractor and SPARQL query engine. +http://rdfa.digitalbazaar.com/rdfa-test-harness/|title|The RDFa Test Harness (Crazy Ivan) +http://rdfa.digitalbazaar.com/rdfa-test-harness/|creationTime|2007-11-07T17:17:54Z +http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en//archive/bigtable-osdi06.pdf|creationDate|2013-02-18 +http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en//archive/bigtable-osdi06.pdf|tag|http://www.semanlink.net/tag/bigtable +http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en//archive/bigtable-osdi06.pdf|title|Bigtable: A Distributed Storage System for Structured Data +http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en//archive/bigtable-osdi06.pdf|creationTime|2013-02-18T11:21:36Z +http://ebiquity.umbc.edu/blogger/?p=383|creationDate|2005-10-31 +http://ebiquity.umbc.edu/blogger/?p=383|tag|http://www.semanlink.net/tag/amazon_alexa +http://ebiquity.umbc.edu/blogger/?p=383|tag|http://www.semanlink.net/tag/web_services +http://ebiquity.umbc.edu/blogger/?p=383|title|EBB: ebiquity blog at UMBC » Alexa Web Information Services +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|creationDate|2010-05-03 +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|tag|http://www.semanlink.net/tag/patent +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|tag|http://www.semanlink.net/tag/codec +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|tag|http://www.semanlink.net/tag/video +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|title|Why Our Civilization's Video Art and Culture is Threatened by the MPEG-LA +http://www.osnews.com/story/23236/Why_Our_Civilization_s_Video_Art_and_Culture_is_Threatened_by_the_MPEG-LA|creationTime|2010-05-03T09:31:19Z +http://www.lucenetutorial.com/lucene-query-syntax.html|creationDate|2015-03-14 +http://www.lucenetutorial.com/lucene-query-syntax.html|tag|http://www.semanlink.net/tag/lucene +http://www.lucenetutorial.com/lucene-query-syntax.html|tag|http://www.semanlink.net/tag/tutorial +http://www.lucenetutorial.com/lucene-query-syntax.html|title|Lucene Query Syntax - Lucene Tutorial.com +http://www.lucenetutorial.com/lucene-query-syntax.html|creationTime|2015-03-14T22:35:31Z +http://www.w3.org/Submission/2010/04/|creationDate|2010-07-20 +http://www.w3.org/Submission/2010/04/|tag|http://www.semanlink.net/tag/owllink_protocol +http://www.w3.org/Submission/2010/04/|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/Submission/2010/04/|title|Submission Request to W3C: OWLlink Protocol +http://www.w3.org/Submission/2010/04/|creationTime|2010-07-20T00:11:53Z +https://www.cs.hmc.edu/~jpadgett/nnfinal/NNPrsntnJP1.pdf|creationDate|2017-04-28 +https://www.cs.hmc.edu/~jpadgett/nnfinal/NNPrsntnJP1.pdf|tag|http://www.semanlink.net/tag/sparse_distributed_memory +https://www.cs.hmc.edu/~jpadgett/nnfinal/NNPrsntnJP1.pdf|title|Sparse Distributed Memory - A study of psychologically driven storage - Pentti Kanerva +https://www.cs.hmc.edu/~jpadgett/nnfinal/NNPrsntnJP1.pdf|creationTime|2017-04-28T22:59:33Z +https://arxiv.org/abs/1902.09229|creationDate|2019-03-20 +https://arxiv.org/abs/1902.09229|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1902.09229|tag|http://www.semanlink.net/tag/sanjeev_arora +https://arxiv.org/abs/1902.09229|tag|http://www.semanlink.net/tag/noise_contrastive_estimation +https://arxiv.org/abs/1902.09229|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1902.09229|arxiv_author|Hrishikesh Khandeparkar +https://arxiv.org/abs/1902.09229|arxiv_author|Nikunj Saunshi +https://arxiv.org/abs/1902.09229|arxiv_author|Sanjeev Arora +https://arxiv.org/abs/1902.09229|arxiv_author|Mikhail Khodak +https://arxiv.org/abs/1902.09229|arxiv_author|Orestis Plevrakis +https://arxiv.org/abs/1902.09229|comment|[blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) +https://arxiv.org/abs/1902.09229|relatedDoc|http://www.offconvex.org/2019/03/19/CURL/ +https://arxiv.org/abs/1902.09229|title|[1902.09229] A Theoretical Analysis of Contrastive Unsupervised Representation Learning +https://arxiv.org/abs/1902.09229|creationTime|2019-03-20T16:57:53Z +https://arxiv.org/abs/1902.09229|arxiv_summary|"Recent empirical works have successfully used unlabeled data to learn feature +representations that are broadly useful in downstream classification tasks. +Several of these methods are reminiscent of the well-known word2vec embedding +algorithm: leveraging availability of pairs of semantically ""similar"" data +points and ""negative samples,"" the learner forces the inner product of +representations of similar pairs with each other to be higher on average than +with negative samples. The current paper uses the term contrastive learning for +such algorithms and presents a theoretical framework for analyzing them by +introducing latent classes and hypothesizing that semantically similar points +are sampled from the same latent class. This framework allows us to show +provable guarantees on the performance of the learned representations on the +average classification task that is comprised of a subset of the same set of +latent classes. Our generalization bound also shows that learned +representations can reduce (labeled) sample complexity on downstream tasks. We +conduct controlled experiments in both the text and image domains to support +the theory." +https://arxiv.org/abs/1902.09229|arxiv_firstAuthor|Sanjeev Arora +https://arxiv.org/abs/1902.09229|arxiv_updated|2019-02-25T12:32:15Z +https://arxiv.org/abs/1902.09229|arxiv_title|A Theoretical Analysis of Contrastive Unsupervised Representation Learning +https://arxiv.org/abs/1902.09229|arxiv_published|2019-02-25T12:32:15Z +https://arxiv.org/abs/1902.09229|arxiv_num|1902.09229 +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|creationDate|2010-05-25 +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|tag|http://www.semanlink.net/tag/big_brother +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|tag|http://www.semanlink.net/tag/nick_clegg +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|tag|http://www.semanlink.net/tag/grande_bretagne +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|comment|“It is outrageous that decent, law-abiding people are regularly treated as if they have something to hide.
It has to stop.
So there will be no ID card scheme.
No national identity register, no second generation biometric passports
... +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|title|La Grande-Bretagne enterre la société de surveillance - BUG BROTHER +http://bugbrother.blog.lemonde.fr/2010/05/25/la-grande-bretagne-enterre-la-societe-de-surveillance/|creationTime|2010-05-25T13:23:46Z +http://code.google.com/p/owl1-1/|creationDate|2007-06-27 +http://code.google.com/p/owl1-1/|tag|http://www.semanlink.net/tag/owl_1_1 +http://code.google.com/p/owl1-1/|tag|http://www.semanlink.net/tag/wiki +http://code.google.com/p/owl1-1/|title|owl1-1 - Google Code +http://code.google.com/p/owl1-1/|creationTime|2007-06-27T21:44:27Z +http://www.cl.cam.ac.uk/~mgk25/iso-time.html|creationDate|2006-08-05 +http://www.cl.cam.ac.uk/~mgk25/iso-time.html|tag|http://www.semanlink.net/tag/dev +http://www.cl.cam.ac.uk/~mgk25/iso-time.html|tag|http://www.semanlink.net/tag/semanlink_dev +http://www.cl.cam.ac.uk/~mgk25/iso-time.html|comment|A summary of the international standard date and time notation +http://www.cl.cam.ac.uk/~mgk25/iso-time.html|title|International standard date and time notation +http://www.ibm.com/developerworks/java/library/j-solr1/|creationDate|2012-05-15 +http://www.ibm.com/developerworks/java/library/j-solr1/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/java/library/j-solr1/|tag|http://www.semanlink.net/tag/solr +http://www.ibm.com/developerworks/java/library/j-solr1/|title|Search smarter with Apache Solr, Part 1: Essential features and the Solr schema +http://www.ibm.com/developerworks/java/library/j-solr1/|creationTime|2012-05-15T16:55:20Z +https://code.fb.com/ai-research/pytext-open-source-nlp-framework/|creationDate|2018-12-15 +https://code.fb.com/ai-research/pytext-open-source-nlp-framework/|tag|http://www.semanlink.net/tag/nlp_tools +https://code.fb.com/ai-research/pytext-open-source-nlp-framework/|tag|http://www.semanlink.net/tag/nlp_facebook +https://code.fb.com/ai-research/pytext-open-source-nlp-framework/|title|Open-sourcing PyText for faster NLP development +https://code.fb.com/ai-research/pytext-open-source-nlp-framework/|creationTime|2018-12-15T10:08:46Z +http://www.rashmisinha.com/archives/05_09/tagging-cognitive.html|creationDate|2005-10-03 +http://www.rashmisinha.com/archives/05_09/tagging-cognitive.html|tag|http://www.semanlink.net/tag/tagging +http://www.rashmisinha.com/archives/05_09/tagging-cognitive.html|comment|The beauty of tagging is that it taps into an existing cognitive process without adding much cognitive cost. +http://www.rashmisinha.com/archives/05_09/tagging-cognitive.html|title|A cognitive analysis of tagging +http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/|creationDate|2012-01-06 +http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/|tag|http://www.semanlink.net/tag/education +http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/|tag|http://www.semanlink.net/tag/jean_paul +http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/|title|Don't Lecture Me American RadioWorks +http://americanradioworks.publicradio.org/features/tomorrows-college/lectures/|creationTime|2012-01-06T21:37:04Z +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|creationDate|2010-07-03 +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|tag|http://www.semanlink.net/tag/ghana +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|tag|http://www.semanlink.net/tag/uruguay +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|tag|http://www.semanlink.net/tag/match_de_legende +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|tag|http://www.semanlink.net/tag/coupe_du_monde_2010 +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|comment|[youtube](https://www.youtube.com/watch?v=tDpx9GGH79I) +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|title|Uruguay 1-1 Ghana +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|creationTime|2010-07-03T11:26:53Z +http://news.bbc.co.uk/sport2/hi/football/world_cup_2010/matches/match_58/default.stm|source|BBC +http://www.informationweek.com/news/global-cio/interviews/240002892|creationDate|2012-08-01 +http://www.informationweek.com/news/global-cio/interviews/240002892|tag|http://www.semanlink.net/tag/general_motors +http://www.informationweek.com/news/global-cio/interviews/240002892|tag|http://www.semanlink.net/tag/outsourcing +http://www.informationweek.com/news/global-cio/interviews/240002892|comment|Mott doesn't think GM can be creative or fast enough with outsourced IT +http://www.informationweek.com/news/global-cio/interviews/240002892|title|General Motors Will Slash Outsourcing In IT Overhaul - Global-cio - Executive insights/interviews - Informationweek +http://www.informationweek.com/news/global-cio/interviews/240002892|creationTime|2012-08-01T11:49:46Z +http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html|creationDate|2012-04-11 +http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html|comment|"httpRange-14 says that if the resource owner cares +to make a distinction, here is how they can make that distinction. +It does not say the distinction is necessary. The assumption that +200 implies IR is pure speculation + +" +http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html|title|Re: Classification of ISSUE-57 change proposals - R Fielding +http://lists.w3.org/Archives/Public/www-tag/2012Mar/0220.html|creationTime|2012-04-11T13:44:31Z +http://www.w3.org/2001/sw/rdb2rdf/|creationDate|2010-11-07 +http://www.w3.org/2001/sw/rdb2rdf/|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.w3.org/2001/sw/rdb2rdf/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2001/sw/rdb2rdf/|title|W3C RDB2RDF Working Group +http://www.w3.org/2001/sw/rdb2rdf/|creationTime|2010-11-07T12:57:14Z +https://support.google.com/webmasters/answer/35769?hl=en|creationDate|2013-08-23 +https://support.google.com/webmasters/answer/35769?hl=en|tag|http://www.semanlink.net/tag/tools +https://support.google.com/webmasters/answer/35769?hl=en|tag|http://www.semanlink.net/tag/webmasters_google +https://support.google.com/webmasters/answer/35769?hl=en|title|Webmaster Guidelines - Webmaster Tools Help +https://support.google.com/webmasters/answer/35769?hl=en|creationTime|2013-08-23T14:09:10Z +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|creationDate|2006-12-01 +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|tag|http://www.semanlink.net/tag/rdf_bus +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|comment|While we support the broad vision of a Semantic World Wide Web, we feel that there are great benefits to be derived from adapting semantic technologies for applications within an enterprise. In particular, we believe that RDF has several very appealing properties that position it as a data format of choice to provide a flexible information bus across heterogeneous applications and throughout the infrastructure layers of an application stack.
I AGREE COMPLETELY +http://www.thefigtrees.net/lee/blog/2006/11/semantic_web_technologies_in_t.html|title|TechnicaLee Speaking: Semantic Web Technologies in the Enterprise +http://maven-java-formatter-plugin.googlecode.com/svn/site/0.3.1/examples.html#Multimodule_Configuration|creationDate|2013-09-28 +http://maven-java-formatter-plugin.googlecode.com/svn/site/0.3.1/examples.html#Multimodule_Configuration|tag|http://www.semanlink.net/tag/maven_tips +http://maven-java-formatter-plugin.googlecode.com/svn/site/0.3.1/examples.html#Multimodule_Configuration|title|Multimodule Configuration / Maven2 Java Formatter Plugin - Examples +http://maven-java-formatter-plugin.googlecode.com/svn/site/0.3.1/examples.html#Multimodule_Configuration|creationTime|2013-09-28T11:09:18Z +http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html|creationDate|2016-02-28 +http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html|tag|http://www.semanlink.net/tag/benin +http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html|title|Le Bénin veut distribuer quatre millions de kits solaires en six mois +http://www.lemonde.fr/afrique/article/2016/02/27/le-benin-veut-distribuer-quatre-millions-de-kits-solaires-en-six-mois_4872961_3212.html|creationTime|2016-02-28T13:56:45Z +http://developer.yahoo.com/common/json.html|creationDate|2008-03-15 +http://developer.yahoo.com/common/json.html|tag|http://www.semanlink.net/tag/json +http://developer.yahoo.com/common/json.html|tag|http://www.semanlink.net/tag/web_services +http://developer.yahoo.com/common/json.html|tag|http://www.semanlink.net/tag/yahoo +http://developer.yahoo.com/common/json.html|title|Using JSON with Yahoo! Web Services +http://developer.yahoo.com/common/json.html|creationTime|2008-03-15T01:20:14Z +http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html|creationDate|2011-07-29 +http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html|tag|http://www.semanlink.net/tag/tomcat +http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html|tag|http://www.semanlink.net/tag/jersey +http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html|title|Build a RESTful Web service using Jersey and Apache Tomcat +http://www.ibm.com/developerworks/web/library/wa-aj-tomcat/index.html|creationTime|2011-07-29T00:20:22Z +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|creationDate|2012-07-12 +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|tag|http://www.semanlink.net/tag/http +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|tag|http://www.semanlink.net/tag/elliotte_rusty_harold +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|tag|http://www.semanlink.net/tag/rest +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|title|Mokka mit Schlag » POST vs. PUT +http://www.elharo.com/blog/software-development/web-development/2005/12/08/post-vs-put/|creationTime|2012-07-12T12:16:20Z +http://www.insu.cnrs.fr/node/9519|creationDate|2018-09-18 +http://www.insu.cnrs.fr/node/9519|tag|http://www.semanlink.net/tag/gabon +http://www.insu.cnrs.fr/node/9519|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.insu.cnrs.fr/node/9519|tag|http://www.semanlink.net/tag/fossile +http://www.insu.cnrs.fr/node/9519|tag|http://www.semanlink.net/tag/bacteries +http://www.insu.cnrs.fr/node/9519|title|La plus vieille biodiversité de communauté bactérienne, datée de 2,1 milliards d’années et son implication dans la conservation du biota francevillien +http://www.insu.cnrs.fr/node/9519|creationTime|2018-09-18T20:30:27Z +http://jena.sourceforge.net/ARQ/sparql-remote.html|creationDate|2008-01-04 +http://jena.sourceforge.net/ARQ/sparql-remote.html|tag|http://www.semanlink.net/tag/querying_remote_sparql_services +http://jena.sourceforge.net/ARQ/sparql-remote.html|tag|http://www.semanlink.net/tag/arq +http://jena.sourceforge.net/ARQ/sparql-remote.html|title|ARQ - Querying Remote SPARQL Services +http://jena.sourceforge.net/ARQ/sparql-remote.html|creationTime|2008-01-04T01:10:25Z +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-09-16 +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/burkina_faso +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/antiquite_africaine +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/loropeni +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|La cité oubliée de Loropéni CNRS Le journal +https://lejournal.cnrs.fr/videos/la-cite-oubliee-de-loropeni?utm_content=bufferd6b13&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-09-16T16:18:01Z +http://www.ldodds.com/blog/archives/000291.html|creationDate|2006-10-09 +http://www.ldodds.com/blog/archives/000291.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000291.html|tag|http://www.semanlink.net/tag/rest +http://www.ldodds.com/blog/archives/000291.html|comment|"reducing the ""surface area"" of the application" +http://www.ldodds.com/blog/archives/000291.html|title|Lost Boy: Benefits of Refactoring to REST +http://www.patricksoftwareblog.com/tag/flask-uploads/|creationDate|2018-03-28 +http://www.patricksoftwareblog.com/tag/flask-uploads/|tag|http://www.semanlink.net/tag/flask +http://www.patricksoftwareblog.com/tag/flask-uploads/|title|flask-uploads – Patrick's Software Blog +http://www.patricksoftwareblog.com/tag/flask-uploads/|creationTime|2018-03-28T21:54:41Z +https://www.wired.com/story/ai-pioneer-explains-evolution-neural-networks/|creationDate|2019-05-13 +https://www.wired.com/story/ai-pioneer-explains-evolution-neural-networks/|tag|http://www.semanlink.net/tag/geoffrey_hinton +https://www.wired.com/story/ai-pioneer-explains-evolution-neural-networks/|title|Interview of Geoffrey Hinton WIRED +https://www.wired.com/story/ai-pioneer-explains-evolution-neural-networks/|creationTime|2019-05-13T15:16:02Z +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|creationDate|2017-05-28 +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|tag|http://www.semanlink.net/tag/python_install +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|tag|http://www.semanlink.net/tag/anaconda +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|title|osx - How do I upgrade to Python 3.6 with conda? - Stack Overflow +https://stackoverflow.com/questions/41535881/how-do-i-upgrade-to-python-3-6-with-conda|creationTime|2017-05-28T18:58:27Z +https://dzone.com/articles/swagger-make-developers-love|creationDate|2017-04-03 +https://dzone.com/articles/swagger-make-developers-love|tag|http://www.semanlink.net/tag/tutorial +https://dzone.com/articles/swagger-make-developers-love|title|Swagger: Make Developers Love Working With Your REST API - DZone Java +https://dzone.com/articles/swagger-make-developers-love|creationTime|2017-04-03T14:10:22Z +http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1|creationDate|2016-09-20 +http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1|title|"What does ""var FOO = FOO {}"" (assign a variable or an empty object to that variable) mean in Javascript? - Stack Overflow" +http://stackoverflow.com/questions/6439579/what-does-var-foo-foo-assign-a-variable-or-an-empty-object-to-that-va?noredirect=1&lq=1|creationTime|2016-09-20T11:40:24Z +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|creationDate|2018-12-19 +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|tag|http://www.semanlink.net/tag/emnlp_2018 +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|tag|http://www.semanlink.net/tag/survey +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|title|10 Exciting Ideas of 2018 in NLP +http://ruder.io/10-exciting-ideas-of-2018-in-nlp/|creationTime|2018-12-19T21:48:10Z +http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_|title|I Oversaw the US Nuclear Power Industry. Now I Think It Should Be Banned. +http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_|bookmarkOf|https://www.commondreams.org/views/2019/05/17/i-oversaw-us-nuclear-power-industry-now-i-think-it-should-be-banned +http://www.semanlink.net/doc/2019/05/i_oversaw_the_us_nuclear_power_|creationTime|2019-05-20T20:18:56Z +http://tuttlesvc.teacherhosting.com/blog/blosxom.cgi/2005/08/22#413|creationDate|2005-11-23 +http://tuttlesvc.teacherhosting.com/blog/blosxom.cgi/2005/08/22#413|tag|http://www.semanlink.net/tag/rdf +http://tuttlesvc.teacherhosting.com/blog/blosxom.cgi/2005/08/22#413|title|The Not-RDF Tax +https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/|creationDate|2018-03-31 +https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/|tag|http://www.semanlink.net/tag/rapport_villani_sur_l_ia +https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/|tag|http://www.semanlink.net/tag/macron +https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/|title|Emmanuel Macron Q&A: France's President Discusses Artificial Intelligence Strategy WIRED +https://www.wired.com/story/emmanuel-macron-talks-to-wired-about-frances-ai-strategy/|creationTime|2018-03-31T19:49:02Z +https://arxiv.org/abs/1603.01360|creationDate|2018-03-05 +https://arxiv.org/abs/1603.01360|tag|http://www.semanlink.net/tag/guillaume_lample +https://arxiv.org/abs/1603.01360|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1603.01360|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/abs/1603.01360|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1603.01360|arxiv_author|Sandeep Subramanian +https://arxiv.org/abs/1603.01360|arxiv_author|Chris Dyer +https://arxiv.org/abs/1603.01360|arxiv_author|Miguel Ballesteros +https://arxiv.org/abs/1603.01360|arxiv_author|Kazuya Kawakami +https://arxiv.org/abs/1603.01360|arxiv_author|Guillaume Lample +https://arxiv.org/abs/1603.01360|comment|"Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. + +> Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora" +https://arxiv.org/abs/1603.01360|title|[1603.01360] Neural Architectures for Named Entity Recognition +https://arxiv.org/abs/1603.01360|creationTime|2018-03-05T18:40:55Z +https://arxiv.org/abs/1603.01360|arxiv_summary|"State-of-the-art named entity recognition systems rely heavily on +hand-crafted features and domain-specific knowledge in order to learn +effectively from the small, supervised training corpora that are available. In +this paper, we introduce two new neural architectures---one based on +bidirectional LSTMs and conditional random fields, and the other that +constructs and labels segments using a transition-based approach inspired by +shift-reduce parsers. Our models rely on two sources of information about +words: character-based word representations learned from the supervised corpus +and unsupervised word representations learned from unannotated corpora. Our +models obtain state-of-the-art performance in NER in four languages without +resorting to any language-specific knowledge or resources such as gazetteers." +https://arxiv.org/abs/1603.01360|arxiv_firstAuthor|Guillaume Lample +https://arxiv.org/abs/1603.01360|arxiv_updated|2016-04-07T15:09:36Z +https://arxiv.org/abs/1603.01360|arxiv_title|Neural Architectures for Named Entity Recognition +https://arxiv.org/abs/1603.01360|arxiv_published|2016-03-04T06:36:29Z +https://arxiv.org/abs/1603.01360|arxiv_num|1603.01360 +http://www.oregongeology.com/|creationDate|2008-05-18 +http://www.oregongeology.com/|tag|http://www.semanlink.net/tag/oregon +http://www.oregongeology.com/|tag|http://www.semanlink.net/tag/geologie +http://www.oregongeology.com/|title|Oregon Department of Geology and Mineral Industries (DOGAMI) Homepage +http://www.oregongeology.com/|creationTime|2008-05-18T23:56:36Z +https://twitter.com/yoavgo/status/1099273902415589376|creationDate|2019-02-24 +https://twitter.com/yoavgo/status/1099273902415589376|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://twitter.com/yoavgo/status/1099273902415589376|tag|http://www.semanlink.net/tag/yoav_goldberg +https://twitter.com/yoavgo/status/1099273902415589376|title|"(((ل()(ل() 'yoav)))) sur Twitter : ""These explanation slides by Mike Collins on the transformer ..." +https://twitter.com/yoavgo/status/1099273902415589376|creationTime|2019-02-24T14:11:09Z +http://sourceforge.net/projects/wikipedia/|creationDate|2005-10-11 +http://sourceforge.net/projects/wikipedia/|tag|http://www.semanlink.net/tag/wiki_software +http://sourceforge.net/projects/wikipedia/|tag|http://www.semanlink.net/tag/sourceforge +http://sourceforge.net/projects/wikipedia/|title|SourceForge.net: Project Info - MediaWiki +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|creationDate|2013-07-12 +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|tag|http://www.semanlink.net/tag/arduino +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|tag|http://www.semanlink.net/tag/pbs +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|tag|http://www.semanlink.net/tag/agriculture +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|tag|http://www.semanlink.net/tag/robotique +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|tag|http://www.semanlink.net/tag/drones +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|title|Farms of the Future Will Run on Robots and Drones — NOVA Next PBS +http://www.pbs.org/wgbh/nova/next/tech/farming-with-robotics-automation-and-sensors/|creationTime|2013-07-12T11:41:52Z +http://www.andornot.com/blog/post/Advanced-autocomplete-with-Solr-Ngrams-and-Twitters-typeaheadjs.aspx|creationDate|2015-06-26 +http://www.andornot.com/blog/post/Advanced-autocomplete-with-Solr-Ngrams-and-Twitters-typeaheadjs.aspx|tag|http://www.semanlink.net/tag/solr_autocomplete +http://www.andornot.com/blog/post/Advanced-autocomplete-with-Solr-Ngrams-and-Twitters-typeaheadjs.aspx|title|Advanced autocomplete with Solr Ngrams +http://www.andornot.com/blog/post/Advanced-autocomplete-with-Solr-Ngrams-and-Twitters-typeaheadjs.aspx|creationTime|2015-06-26T17:38:08Z +http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/|creationDate|2014-10-03 +http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/|tag|http://www.semanlink.net/tag/censorship +http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/|tag|http://www.semanlink.net/tag/antiracisme +http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/|title|Faut-il censurer les vieilles œuvres considérées comme racistes aujourd’hui ? Big Browser +http://bigbrowser.blog.lemonde.fr/2014/10/01/faut-il-censurer-les-vieilles-oeuvres-considerees-comme-racistes-aujourdhui/|creationTime|2014-10-03T00:04:13Z +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|creationDate|2010-12-15 +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|tag|http://www.semanlink.net/tag/sparql +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|comment|We identify a large class of SPARQL patterns, defined by imposing a simple and natural syntactic restriction, where the query evaluation problem can be solved more efficiently. This restriction gives rise to the class of well-designed patterns. We show that the evaluation problem is coNP-complete for well-designed patterns. Moreover, we provide several rewriting rules for well-designed patterns whose application may have a considerable impact in the cost of evaluating SPARQL queries. +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|title|Semantics and Complexity of SPARQL +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|creationTime|2010-12-15T13:28:27Z +http://web.ing.puc.cl/~jperez/papers/tods-a16-perez.pdf|seeAlso|http://web.ing.puc.cl/~jperez/talks/sccc09-slides.pdf +http://eclipsesource.com/blogs/tutorials/egit-tutorial/|creationDate|2015-05-12 +http://eclipsesource.com/blogs/tutorials/egit-tutorial/|tag|http://www.semanlink.net/tag/egit +http://eclipsesource.com/blogs/tutorials/egit-tutorial/|tag|http://www.semanlink.net/tag/tutorial +http://eclipsesource.com/blogs/tutorials/egit-tutorial/|title|EGit Tutorial « EclipseSource Blog +http://eclipsesource.com/blogs/tutorials/egit-tutorial/|creationTime|2015-05-12T00:27:36Z +http://www.zitgist.com/labs/linked_data.html|creationDate|2008-07-19 +http://www.zitgist.com/labs/linked_data.html|tag|http://www.semanlink.net/tag/zitgist +http://www.zitgist.com/labs/linked_data.html|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.zitgist.com/labs/linked_data.html|tag|http://www.semanlink.net/tag/link_to_me +http://www.zitgist.com/labs/linked_data.html|tag|http://www.semanlink.net/tag/linked_data +http://www.zitgist.com/labs/linked_data.html|title|Linked Data by Zitgist +http://www.zitgist.com/labs/linked_data.html|creationTime|2008-07-19T14:17:55Z +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|creationDate|2010-07-08 +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|tag|http://www.semanlink.net/tag/semantic_components +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|comment|A semantic component is an ontology-driven component, or widget, based on Flex +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|title|Semantic Components at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2010/07/05/semantic-components/|creationTime|2010-07-08T15:11:25Z +http://www.paulgraham.com/nerds.html|creationDate|2005-11-24 +http://www.paulgraham.com/nerds.html|tag|http://www.semanlink.net/tag/nerds +http://www.paulgraham.com/nerds.html|tag|http://www.semanlink.net/tag/adolescents +http://www.paulgraham.com/nerds.html|tag|http://www.semanlink.net/tag/good +http://www.paulgraham.com/nerds.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/nerds.html|tag|http://www.semanlink.net/tag/education +http://www.paulgraham.com/nerds.html|comment|"> Officially the purpose of schools is to teach kids. In fact their primary purpose is to keep kids locked up in one place for a big chunk of the day so adults can get things done. +> +> I'm suspicious of this theory that thirteen-year-old kids are intrinsically messed up. If it's physiological, it should be universal. Are Mongol nomads all nihilists at thirteen? +" +http://www.paulgraham.com/nerds.html|title|Why Nerds are Unpopular +http://usc-isi-i2.github.io/DL4KGS/|creationDate|2018-02-01 +http://usc-isi-i2.github.io/DL4KGS/|tag|http://www.semanlink.net/tag/workshop +http://usc-isi-i2.github.io/DL4KGS/|tag|http://www.semanlink.net/tag/eswc +http://usc-isi-i2.github.io/DL4KGS/|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://usc-isi-i2.github.io/DL4KGS/|comment|> A challenging but paramount task for problems ranging from entity classification to entity recommendation or entity linking is that of learning features representing entities in the knowledge graph (building “knowledge graph embeddings”) that can be fed into machine learning algorithms +http://usc-isi-i2.github.io/DL4KGS/|title|Workshop on Deep Learning for Knowledge Graphs and Semantic Technologies +http://usc-isi-i2.github.io/DL4KGS/|creationTime|2018-02-01T18:09:04Z +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|creationDate|2018-03-06 +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|tag|http://www.semanlink.net/tag/nlp_stanford +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|tag|http://www.semanlink.net/tag/named_entity_recognition +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|tag|http://www.semanlink.net/tag/bi_lstm +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|comment|> we propose a word representation that includes both the word-level and character-level information +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|title|Effective Word Representation for Named Entity Recognition (2017) +https://www.semanticscholar.org/paper/Effective-Word-Representation-for-Named-Entity-Rec-Hsieh-Li/839268385124f03bfd19c3ce44d7935d7c6f13a0|creationTime|2018-03-06T11:08:23Z +http://www.lemonde.fr/festival/article/2015/06/25/et-si-on-ouvrait-les-frontieres_4661969_4415198.html|creationDate|2015-07-05 +http://www.lemonde.fr/festival/article/2015/06/25/et-si-on-ouvrait-les-frontieres_4661969_4415198.html|tag|http://www.semanlink.net/tag/immigration +http://www.lemonde.fr/festival/article/2015/06/25/et-si-on-ouvrait-les-frontieres_4661969_4415198.html|title|Migrants : et si ouvrir les frontières générait de la richesse ? +http://www.lemonde.fr/festival/article/2015/06/25/et-si-on-ouvrait-les-frontieres_4661969_4415198.html|creationTime|2015-07-05T12:25:48Z +http://www.xom.nu/|creationDate|2008-03-06 +http://www.xom.nu/|tag|http://www.semanlink.net/tag/xml +http://www.xom.nu/|tag|http://www.semanlink.net/tag/java_dev +http://www.xom.nu/|title|XOM open source (LGPL), tree-based API for processing XML with Java +http://www.xom.nu/|creationTime|2008-03-06T21:53:34Z +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|creationDate|2016-04-09 +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|tag|http://www.semanlink.net/tag/sample_code +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|tag|http://www.semanlink.net/tag/jsonld_jena +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|tag|http://www.semanlink.net/tag/jsonld_java +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|title|Java Code Example com.github.jsonldjava.core.JsonLdOptions +http://www.programcreek.com/java-api-examples/index.php?api=com.github.jsonldjava.core.JsonLdOptions|creationTime|2016-04-09T18:55:23Z +http://semanticweb.com/w3c-publishes-linked-data-platform-best-practices-guidelines_b44199|creationDate|2014-08-28 +http://semanticweb.com/w3c-publishes-linked-data-platform-best-practices-guidelines_b44199|tag|http://www.semanlink.net/tag/ldp_w3c +http://semanticweb.com/w3c-publishes-linked-data-platform-best-practices-guidelines_b44199|title|W3C Publishes Linked Data Platform Best Practices and Guidelines - Semanticweb.com +http://semanticweb.com/w3c-publishes-linked-data-platform-best-practices-guidelines_b44199|creationTime|2014-08-28T20:34:32Z +http://arstechnica.com/science/news/2012/04/bugs-pick-up-pesticide-resistance-from-pesticide-eating-bacteria.ars|creationDate|2012-04-27 +http://arstechnica.com/science/news/2012/04/bugs-pick-up-pesticide-resistance-from-pesticide-eating-bacteria.ars|tag|http://www.semanlink.net/tag/pesticide +http://arstechnica.com/science/news/2012/04/bugs-pick-up-pesticide-resistance-from-pesticide-eating-bacteria.ars|title|Bugs pick up pesticide resistance from pesticide-eating bacteria +http://arstechnica.com/science/news/2012/04/bugs-pick-up-pesticide-resistance-from-pesticide-eating-bacteria.ars|creationTime|2012-04-27T23:36:28Z +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|creationDate|2008-06-12 +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|tag|http://www.semanlink.net/tag/nasa +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|tag|http://www.semanlink.net/tag/soleil +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|tag|http://www.semanlink.net/tag/exploration_spatiale +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|comment|"At closest approach, Solar Probe+ will be 7 million km or 9 solar radii from the sun. There, the spacecraft's carbon-composite heat shield must withstand temperatures greater than 1400o C and survive blasts of radiation at levels not experienced by any previous spacecraft. Naturally, the probe is solar powered.
+The two mysteries prompting this mission are the high temperature of the sun's corona and the puzzling acceleration of the solar wind." +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|title|NASA - Solar Probe Plus - Nasa plans to visit the sun +http://science.nasa.gov/headlines/y2008/10jun_solarprobe.htm?list1065474|creationTime|2008-06-12T23:54:44Z +http://nepomuk.semanticdesktop.org/|creationDate|2006-02-22 +http://nepomuk.semanticdesktop.org/|tag|http://www.semanlink.net/tag/semantic_desktop +http://nepomuk.semanticdesktop.org/|comment|"""Networked Environment for Personalized, Ontology-based Management of Unified Knowledge"" NEPOMUK brings together researchers, industrial software developers, and representative industrial users, to develop a comprehensive solution for extending the personal desktop into a collaboration environment which supports both the personal information management and the sharing and exchange across social and organizational relations." +http://nepomuk.semanticdesktop.org/|title|NEPOMUK - The Social Semantic Desktop +https://arxiv.org/pdf/1708.00214.pdf|creationDate|2017-08-04 +https://arxiv.org/pdf/1708.00214.pdf|tag|http://www.semanlink.net/tag/nn_4_nlp +https://arxiv.org/pdf/1708.00214.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Ji Ma +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Ryan McDonald +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Emily Pitler +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Jan A. Botha +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Slav Petrov +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Alex Salcianu +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|David Weiss +https://arxiv.org/pdf/1708.00214.pdf|arxiv_author|Anton Bakalov +https://arxiv.org/pdf/1708.00214.pdf|comment|"google guys: + +> We show that small and shallow feed- forward neural networks can achieve near state-of-the-art results on a range of unstructured and structured language processing tasks while being considerably cheaper in memory and computational requirements than deep recurrent models. Motivated by resource-constrained environments like mobile phones, we showcase simple techniques for obtaining such small neural network models, and investigate different tradeoffs when deciding how to allocate a small memory budget. +" +https://arxiv.org/pdf/1708.00214.pdf|title|[1708.00214] Natural Language Processing with Small Feed-Forward Networks +https://arxiv.org/pdf/1708.00214.pdf|creationTime|2017-08-04T00:43:05Z +https://arxiv.org/pdf/1708.00214.pdf|arxiv_summary|"We show that small and shallow feed-forward neural networks can achieve near +state-of-the-art results on a range of unstructured and structured language +processing tasks while being considerably cheaper in memory and computational +requirements than deep recurrent models. Motivated by resource-constrained +environments like mobile phones, we showcase simple techniques for obtaining +such small neural network models, and investigate different tradeoffs when +deciding how to allocate a small memory budget." +https://arxiv.org/pdf/1708.00214.pdf|arxiv_firstAuthor|Jan A. Botha +https://arxiv.org/pdf/1708.00214.pdf|arxiv_updated|2017-08-01T09:13:44Z +https://arxiv.org/pdf/1708.00214.pdf|arxiv_title|Natural Language Processing with Small Feed-Forward Networks +https://arxiv.org/pdf/1708.00214.pdf|arxiv_published|2017-08-01T09:13:44Z +https://arxiv.org/pdf/1708.00214.pdf|arxiv_num|1708.00214 +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|creationDate|2011-01-02 +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|tag|http://www.semanlink.net/tag/niger +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|tag|http://www.semanlink.net/tag/animal +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|title|BBC - Earth News - 'Ghostly' Saharan cheetah filmed in Niger, Africa +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|creationTime|2011-01-02T13:16:03Z +http://news.bbc.co.uk/earth/hi/earth_news/newsid_9306000/9306399.stm|source|BBC +https://blog.acolyer.org/2017/01/31/european-union-regulations-on-algorithmic-decision-making-and-a-right-to-explanation/|creationDate|2017-11-23 +https://blog.acolyer.org/2017/01/31/european-union-regulations-on-algorithmic-decision-making-and-a-right-to-explanation/|tag|http://www.semanlink.net/tag/right_to_explanation +https://blog.acolyer.org/2017/01/31/european-union-regulations-on-algorithmic-decision-making-and-a-right-to-explanation/|title|European Union regulations on algorithmic decision making and a “right to explanation” the morning paper +https://blog.acolyer.org/2017/01/31/european-union-regulations-on-algorithmic-decision-making-and-a-right-to-explanation/|creationTime|2017-11-23T00:05:05Z +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|creationDate|2014-03-08 +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|tag|http://www.semanlink.net/tag/google +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|tag|http://www.semanlink.net/tag/nsa +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|tag|http://www.semanlink.net/tag/south_by_southwest +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|title|SXSW starts out with spotlight on security, as Google's Schmidt sounds off on NSA PCWorld +http://www.pcworld.com/article/2106065/sxsw-starts-out-with-spotlight-on-security-as-googles-schmidt-sounds-off-on-nsa.html|creationTime|2014-03-08T10:39:14Z +http://code.google.com/p/backplanejs/wiki/Rdfj|creationDate|2012-02-21 +http://code.google.com/p/backplanejs/wiki/Rdfj|tag|http://www.semanlink.net/tag/rdf_in_json +http://code.google.com/p/backplanejs/wiki/Rdfj|tag|http://www.semanlink.net/tag/rdfj +http://code.google.com/p/backplanejs/wiki/Rdfj|tag|http://www.semanlink.net/tag/backplanejs +http://code.google.com/p/backplanejs/wiki/Rdfj|comment|"RDFj is a set of conventions for
- constructing JSON objects in such a way that they can easily be interpreted as RDF;
+- taking RDF and arriving at canonical JSON objects. +" +http://code.google.com/p/backplanejs/wiki/Rdfj|title|Rdfj +http://code.google.com/p/backplanejs/wiki/Rdfj|creationTime|2012-02-21T10:13:48Z +https://github.com/huggingface/pytorch-pretrained-BERT|creationDate|2019-03-15 +https://github.com/huggingface/pytorch-pretrained-BERT|tag|http://www.semanlink.net/tag/pytorch +https://github.com/huggingface/pytorch-pretrained-BERT|tag|http://www.semanlink.net/tag/github_project +https://github.com/huggingface/pytorch-pretrained-BERT|tag|http://www.semanlink.net/tag/bert +https://github.com/huggingface/pytorch-pretrained-BERT|title|huggingface/pytorch-pretrained-BERT: The Big-&-Extending-Repository-of-Transformers: Pretrained PyTorch models for Google's BERT, OpenAI GPT & GPT-2, Google/CMU Transformer-XL. +https://github.com/huggingface/pytorch-pretrained-BERT|creationTime|2019-03-15T22:38:21Z +https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html|creationDate|2018-11-24 +https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html|tag|http://www.semanlink.net/tag/termite +https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html|title|Des termites ont construit une « structure » aussi vaste que la Grande-Bretagne +https://www.lemonde.fr/sciences/article/2018/11/23/decouverte-au-bresil-d-une-construction-de-termites-vaste-comme-la-grande-bretagne_5387634_1650684.html|creationTime|2018-11-24T00:03:02Z +https://twitter.com/python_tip/status/1029632055284363264|creationDate|2018-08-16 +https://twitter.com/python_tip/status/1029632055284363264|tag|http://www.semanlink.net/tag/python_tips +https://twitter.com/python_tip/status/1029632055284363264|title|"Daily Python Tip sur Twitter : ""Wanna know which line of your function is eating all the time? Measure it with #lprun:… """ +https://twitter.com/python_tip/status/1029632055284363264|creationTime|2018-08-16T11:40:04Z +http://u2.gmu.edu:8080/dspace/bitstream/1920/454/1/URSW05_PR-OWL.pdf|creationDate|2006-09-07 +http://u2.gmu.edu:8080/dspace/bitstream/1920/454/1/URSW05_PR-OWL.pdf|tag|http://www.semanlink.net/tag/owl +http://u2.gmu.edu:8080/dspace/bitstream/1920/454/1/URSW05_PR-OWL.pdf|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://u2.gmu.edu:8080/dspace/bitstream/1920/454/1/URSW05_PR-OWL.pdf|title|PR-OWL: A Bayesian Ontology Language for the Semantic Web +http://dev.eclipse.org/newslists/news.eclipse.webtools/|creationDate|2007-11-12 +http://dev.eclipse.org/newslists/news.eclipse.webtools/|tag|http://www.semanlink.net/tag/wtp +http://dev.eclipse.org/newslists/news.eclipse.webtools/|tag|http://www.semanlink.net/tag/eclipse +http://dev.eclipse.org/newslists/news.eclipse.webtools/|tag|http://www.semanlink.net/tag/mailing_list +http://dev.eclipse.org/newslists/news.eclipse.webtools/|title|Eclipse Archives - webtools +http://dev.eclipse.org/newslists/news.eclipse.webtools/|creationTime|2007-11-12T01:16:54Z +https://www.youtube.com/watch?v=Yr1mOzC93xs|creationDate|2018-09-28 +https://www.youtube.com/watch?v=Yr1mOzC93xs|tag|http://www.semanlink.net/tag/yoshua_bengio +https://www.youtube.com/watch?v=Yr1mOzC93xs|tag|http://www.semanlink.net/tag/consciousness_prior +https://www.youtube.com/watch?v=Yr1mOzC93xs|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=Yr1mOzC93xs|tag|http://www.semanlink.net/tag/deep_learning +https://www.youtube.com/watch?v=Yr1mOzC93xs|tag|http://www.semanlink.net/tag/human_level_ai +https://www.youtube.com/watch?v=Yr1mOzC93xs|comment|"> **What's wrong with our unsupervised training objectives ? They are in pixel space rather than in abstract space** + +> Many more entropy bits in acoustics details than linguistic content. + +Related to [this paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)" +https://www.youtube.com/watch?v=Yr1mOzC93xs|relatedDoc|https://arxiv.org/abs/1709.08568 +https://www.youtube.com/watch?v=Yr1mOzC93xs|title|From Deep Learning of Disentangled Representations to Higher-level Cognition - YouTube +https://www.youtube.com/watch?v=Yr1mOzC93xs|creationTime|2018-09-28T22:21:15Z +http://rdf-translator.appspot.com/|creationDate|2012-08-27 +http://rdf-translator.appspot.com/|tag|http://www.semanlink.net/tag/rdf_tools +http://rdf-translator.appspot.com/|tag|http://www.semanlink.net/tag/converter +http://rdf-translator.appspot.com/|comment|RDF Translator is a multi-format conversion tool for structured markup. It provides translations between data formats ranging from RDF/XML to RDFa or Microdata. The service allows for conversions triggered either by URI or by direct text input. Furthermore it comes with a straightforward REST API for developers. +http://rdf-translator.appspot.com/|title|RDF Translator +http://rdf-translator.appspot.com/|creationTime|2012-08-27T12:33:57Z +http://static.flickr.com/22/35163080_a32ed821ae.jpg|creationDate|2006-03-30 +http://static.flickr.com/22/35163080_a32ed821ae.jpg|tag|http://www.semanlink.net/tag/maria +http://static.flickr.com/22/35163080_a32ed821ae.jpg|title|Maria sur le fleuve Niger +https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc|creationDate|2015-04-04 +https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc|tag|http://www.semanlink.net/tag/digital_economy +https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc|comment|"A chacune des cinq étapes de la transition numérique d’une filière, +il y a de bonnes raisons de se dire “jusqu’ici, tout va bien”." +https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc|title|Les cinq étapes du déni +https://medium.com/welcome-to-thefamily/les-cinq-%C3%A9tapes-du-d%C3%A9ni-a7a06072c9fc|creationTime|2015-04-04T23:12:58Z +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|creationDate|2007-11-30 +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|tag|http://www.semanlink.net/tag/internet_explorer +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|tag|http://www.semanlink.net/tag/compatibilite_javascript +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|tag|http://www.semanlink.net/tag/mozilla +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|title|Migration d'applications d'Internet Explorer vers Mozilla - MDC +http://developer.mozilla.org/fr/docs/Migration_d'applications_d'Internet_Explorer_vers_Mozilla#Diff.C3.A9rences_dans_l.27ex.C3.A9cution_de_JavaScript|creationTime|2007-11-30T16:37:09Z +http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re|creationDate|2006-06-26 +http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re|tag|http://www.semanlink.net/tag/film_danois +http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re|tag|http://www.semanlink.net/tag/sorcellerie +http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re|comment|"Drame danois de Carl Theodor Dreyer (1943) + +" +http://fr.wikipedia.org/wiki/Jour_de_col%C3%A8re|title|Dies iræ (Jour de colère) +https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ|creationDate|2015-02-19 +https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ|tag|http://www.semanlink.net/tag/dietrich_schulten +https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ|tag|http://www.semanlink.net/tag/hateoas +https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ|title|Follow your Nose in Hypermedia APIs? +https://groups.google.com/forum/#!topic/hypermedia-web/HHXFoBh95ZQ|creationTime|2015-02-19T14:50:30Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|creationDate|2006-09-15 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|tag|http://www.semanlink.net/tag/lord_s_resistance_army +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|tag|http://www.semanlink.net/tag/ouganda +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|title|BBC NEWS - LRA victims seek peace with past +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/africa/5341474.stm|date|2006-09-13 +https://en.wikipedia.org/wiki/Psusennes_I|creationDate|2019-03-19 +https://en.wikipedia.org/wiki/Psusennes_I|tag|http://www.semanlink.net/tag/pharaon +https://en.wikipedia.org/wiki/Psusennes_I|title|Psusennes I +https://en.wikipedia.org/wiki/Psusennes_I|creationTime|2019-03-19T22:12:27Z +http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html|creationDate|2009-07-15 +http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html|tag|http://www.semanlink.net/tag/memristor +http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html|title|Memristor minds: The future of artificial intelligence - New Scientist +http://www.newscientist.com/article/mg20327151.600-memristor-minds-the-future-of-artificial-intelligence.html|creationTime|2009-07-15T22:34:38Z +http://norvig.com/spell-correct.html|creationDate|2017-10-25 +http://norvig.com/spell-correct.html|tag|http://www.semanlink.net/tag/spellchecker +http://norvig.com/spell-correct.html|tag|http://www.semanlink.net/tag/textblob +http://norvig.com/spell-correct.html|title|How to Write a Spelling Corrector (Peter Norvig) +http://norvig.com/spell-correct.html|creationTime|2017-10-25T23:48:46Z +https://cloud.google.com/getting-started/|creationDate|2017-07-12 +https://cloud.google.com/getting-started/|tag|http://www.semanlink.net/tag/tutorial +https://cloud.google.com/getting-started/|tag|http://www.semanlink.net/tag/google_cloud_platform +https://cloud.google.com/getting-started/|title|How to get started with GCP    Google Cloud Platform +https://cloud.google.com/getting-started/|creationTime|2017-07-12T16:52:19Z +https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html|creationDate|2019-02-07 +https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html|tag|http://www.semanlink.net/tag/jeu +https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html|tag|http://www.semanlink.net/tag/google_deepmind +https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html|title|Intelligence artificielle : DeepMind s’intéresse au jeu de cartes français Hanabi +https://www.lemonde.fr/pixels/article/2019/02/06/intelligence-artificielle-deepmind-s-interesse-au-jeu-de-cartes-francais-hanabi_5420186_4408996.html|creationTime|2019-02-07T01:39:52Z +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|creationDate|2011-10-31 +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|tag|http://www.semanlink.net/tag/automobile +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|tag|http://www.semanlink.net/tag/configuration +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|tag|http://www.semanlink.net/tag/daimler +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|comment|by Andreas Kaiser , Wolfgang Küchlin +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|title|CiteSeerX — Automotive Product Documentation +http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3382|creationTime|2011-10-31T16:55:10Z +http://www.w3.org/DesignIssues/LinkedData.html|creationDate|2007-01-02 +http://www.w3.org/DesignIssues/LinkedData.html|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/DesignIssues/LinkedData.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/LinkedData.html|comment|"The Semantic Web isn't just about putting data on the web. It is about making links, so that a person or machine can explore the web of data.  With linked data, when you have some of it, you can find other, related, data.
+Like the web of hypertext, the web of data is constructed with documents on the web. However, unlike the web of hypertext, where links are relationships anchors in hypertext documents written in HTML, for data they links between arbitrary things described by RDF,. The URIs identify any kind of object or concept. But for HTML or RDF, the same expectations apply to make the web grow: +
    +
  1. Use URIs as names for things
  2. +
  3. Use HTTP URIs so that people can look up those names.
  4. +
  5. When someone looks up a URI, provide useful information.
  6. +
  7. Include links to other URIs. so that they can discover more things.
  8. +
+Simple. In fact, though, a surprising amount of data isn't linked in 2006, because of problems with one or more of the steps. This article discusses solutions to these problems, details of implementation, and factors affecting choices about how you publish your data. +" +http://www.w3.org/DesignIssues/LinkedData.html|title|Linked Data - Design Issues +http://jena.hpl.hp.com/juc2006/proceedings.html|creationDate|2006-05-17 +http://jena.hpl.hp.com/juc2006/proceedings.html|tag|http://www.semanlink.net/tag/jena_user_conference +http://jena.hpl.hp.com/juc2006/proceedings.html|title|2006 Jena User Conference - proceedings +https://github.com/wabyking/TextClassificationBenchmark|creationDate|2018-02-28 +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/pytorch +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/benchmark +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/github_project +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/nlp_text_classification +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/python_sample_code +https://github.com/wabyking/TextClassificationBenchmark|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/wabyking/TextClassificationBenchmark|title|A Benchmark of Text Classification in PyTorch +https://github.com/wabyking/TextClassificationBenchmark|creationTime|2018-02-28T23:52:55Z +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|creationDate|2014-12-20 +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|tag|http://www.semanlink.net/tag/genetic_data +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|tag|http://www.semanlink.net/tag/adn +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|tag|http://www.semanlink.net/tag/machine_learning +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|title|Machine Learning Reveals Genetic Control System Quanta Magazine +https://www.quantamagazine.org/20141218-machine-intelligence-cracks-genetic-controls/|creationTime|2014-12-20T10:41:21Z +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|creationDate|2010-05-28 +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|tag|http://www.semanlink.net/tag/ble +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|tag|http://www.semanlink.net/tag/champignon +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|tag|http://www.semanlink.net/tag/menace +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|comment|Le blé représente 20 % de la ration alimentaire que la population mondiale consomme chaque jour +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|title|Un champignon menace la production mondiale de blé +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|creationTime|2010-05-28T00:53:15Z +http://www.lemonde.fr/planete/article/2010/05/26/un-champignon-menace-la-production-mondiale-de-ble_1363533_3244.html|source|Le Monde +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|creationDate|2007-11-13 +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|tag|http://www.semanlink.net/tag/linkto_semanlink +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|tag|http://www.semanlink.net/tag/hypersolutions +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|tag|http://www.semanlink.net/tag/archive +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|comment|Archive datée de 1998 mentionnant Semanlink +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|title|Nouveautés sur le site hyperSOLutions +http://web.archive.org/web/19981202015816/www.hypersolutions.fr/news.ssi|creationTime|2007-11-13T22:50:45Z +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|creationDate|2017-10-01 +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|tag|http://www.semanlink.net/tag/word_embedding +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|tag|http://www.semanlink.net/tag/conditional_random_field +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|comment|"Uses word embeddings as features for named entity recognition (NER) training, and CRF as learning algorithm +" +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|title|Named Entity Recognition using Word Embedding as a Feature (2016) +http://www.sersc.org/journals/IJSEIA/vol10_no2_2016/8.pdf|creationTime|2017-10-01T19:20:07Z +http://dannyayers.com/2006/07/13/the-semantic-web|creationDate|2006-07-13 +http://dannyayers.com/2006/07/13/the-semantic-web|tag|http://www.semanlink.net/tag/tagging +http://dannyayers.com/2006/07/13/the-semantic-web|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/07/13/the-semantic-web|comment|highlights the overlaps between folksonomies and ontologies +http://dannyayers.com/2006/07/13/the-semantic-web|title|"The Semantic Web Revisited Danny Ayers about ""The Semantic Web Revisited""" +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|creationDate|2012-11-12 +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|tag|http://www.semanlink.net/tag/product_description +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|tag|http://www.semanlink.net/tag/goodrelations +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|tag|http://www.semanlink.net/tag/configuration_ontology +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|title|Documentation/Product features - GoodRelations Wiki +http://wiki.goodrelations-vocabulary.org/Documentation/Product_features|creationTime|2012-11-12T18:45:41Z +http://www.freesoft.org/CIE/RFC/2068/147.htm|creationDate|2012-04-24 +http://www.freesoft.org/CIE/RFC/2068/147.htm|tag|http://www.semanlink.net/tag/content_negotiation +http://www.freesoft.org/CIE/RFC/2068/147.htm|tag|http://www.semanlink.net/tag/http_cache +http://www.freesoft.org/CIE/RFC/2068/147.htm|title|13.6 Caching Negotiated Responses +http://www.freesoft.org/CIE/RFC/2068/147.htm|creationTime|2012-04-24T17:33:45Z +https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf|creationDate|2017-02-12 +https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf|tag|http://www.semanlink.net/tag/pauvrete +https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf|title|Why do the poor make such poor decisions? +https://thecorrespondent.com/4664/why-do-the-poor-make-such-poor-decisions/179307480-39a74caf|creationTime|2017-02-12T12:57:58Z +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|creationDate|2012-05-10 +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|tag|http://www.semanlink.net/tag/rigolo +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|tag|http://www.semanlink.net/tag/social_graph +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|tag|http://www.semanlink.net/tag/social_networks +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|title|The Social Graph is Neither (Pinboard Blog) +http://blog.pinboard.in/2011/11/the_social_graph_is_neither/|creationTime|2012-05-10T01:59:57Z +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|creationDate|2010-09-17 +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|tag|http://www.semanlink.net/tag/ibm +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|tag|http://www.semanlink.net/tag/hp +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|tag|http://www.semanlink.net/tag/oracle +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|tag|http://www.semanlink.net/tag/microsoft +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|title|Semantic Enterprise: What Are The Gorillas Doing? (Oracle, IBM, HP, Cisco, Microsoft and SAP) - Semantic Web +http://www.semanticweb.com/on/semantic_enterprise_what_are_the_gorillas_doing_oracle_ibm_hp_cisco_microsoft_and_sap_168973.asp|creationTime|2010-09-17T18:28:49Z +http://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html|creationDate|2014-06-23 +http://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html|tag|http://www.semanlink.net/tag/maven +http://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html|title|Maven - Introduction to the Standard Directory Layout +http://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html|creationTime|2014-06-23T11:41:04Z +https://www.quora.com/Data-Science-Can-machine-learning-be-used-for-time-series-analysis|creationDate|2017-06-15 +https://www.quora.com/Data-Science-Can-machine-learning-be-used-for-time-series-analysis|tag|http://www.semanlink.net/tag/time_series +https://www.quora.com/Data-Science-Can-machine-learning-be-used-for-time-series-analysis|title|Data Science: Can machine learning be used for time-series analysis? - Quora +https://www.quora.com/Data-Science-Can-machine-learning-be-used-for-time-series-analysis|creationTime|2017-06-15T13:55:18Z +http://perian.org/|creationDate|2006-10-24 +http://perian.org/|tag|http://www.semanlink.net/tag/quicktime +http://perian.org/|comment|Free plugin that enables QuickTime to play almost every popular video format (including Divx) +http://perian.org/|title|Perian - A swiss-army knife for QuickTime +http://www.facebook.com/group.php?gid=19352893701|creationDate|2008-07-14 +http://www.facebook.com/group.php?gid=19352893701|tag|http://www.semanlink.net/tag/dimitris +http://www.facebook.com/group.php?gid=19352893701|tag|http://www.semanlink.net/tag/about_semanlink +http://www.facebook.com/group.php?gid=19352893701|tag|http://www.semanlink.net/tag/facebook +http://www.facebook.com/group.php?gid=19352893701|comment|Facebook group created by Dimitris +http://www.facebook.com/group.php?gid=19352893701|title|Facebook Find your path in the Labyrinth of Information! +http://www.facebook.com/group.php?gid=19352893701|creationTime|2008-07-14T14:07:44Z +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|creationDate|2008-05-06 +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|tag|http://www.semanlink.net/tag/mac_os_x +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|tag|http://www.semanlink.net/tag/net +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|tag|http://www.semanlink.net/tag/windows +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|comment|This is the second part of a three-part series describing how one developer became disillusioned with the Windows platform and was reinvigorated by the bright lights of Mac OS X. +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|title|From Win32 to Cocoa: a Windows user's conversion to Mac OS X +http://arstechnica.com/articles/culture/microsoft-learn-from-apple-II.ars|creationTime|2008-05-06T22:15:36Z +http://rdfa.info/rdfa-in-the-wild/|creationDate|2007-08-23 +http://rdfa.info/rdfa-in-the-wild/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.info/rdfa-in-the-wild/|comment|This page tries to gather RDFa implementations, systems that use (or contemplate on using) RDFa, and RDFa-based vocabularies and the like. +http://rdfa.info/rdfa-in-the-wild/|title|RDFa in the wild +http://rdfa.info/rdfa-in-the-wild/|creationTime|2007-08-23T23:45:00Z +http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)|creationDate|2014-01-01 +http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)|tag|http://www.semanlink.net/tag/sahel +http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)|tag|http://www.semanlink.net/tag/agriculture +http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)|title|Zaï (agriculture) - Wikipédia +http://fr.wikipedia.org/wiki/Za%C3%AF_(agriculture)|creationTime|2014-01-01T12:54:23Z +http://www.xml.com/pub/a/2003/02/05/tr.html|creationDate|2008-02-23 +http://www.xml.com/pub/a/2003/02/05/tr.html|tag|http://www.semanlink.net/tag/client_side_xslt +http://www.xml.com/pub/a/2003/02/05/tr.html|comment|"The W3C Recommendation ""Associating Style Sheets with XML Documents"" describes a processing instruction to include at the beginning of a document to name a stylesheet to apply to that document." +http://www.xml.com/pub/a/2003/02/05/tr.html|title|XML.com: XSLT, Browsers, and JavaScript +http://www.xml.com/pub/a/2003/02/05/tr.html|creationTime|2008-02-23T09:16:03Z +http://www.forbes.com/sites/gregsatell/2013/10/27/how-ibms-watson-will-change-the-way-we-work/|creationDate|2013-12-18 +http://www.forbes.com/sites/gregsatell/2013/10/27/how-ibms-watson-will-change-the-way-we-work/|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.forbes.com/sites/gregsatell/2013/10/27/how-ibms-watson-will-change-the-way-we-work/|title|How IBM's Watson Will Change The Way We Work - Forbes +http://www.forbes.com/sites/gregsatell/2013/10/27/how-ibms-watson-will-change-the-way-we-work/|creationTime|2013-12-18T15:58:54Z +http://www.laconscience.com/article.php?id_article=2382|creationDate|2007-04-18 +http://www.laconscience.com/article.php?id_article=2382|tag|http://www.semanlink.net/tag/rumba +http://www.laconscience.com/article.php?id_article=2382|title|La Conscience - Sam Mangwana +http://www.laconscience.com/article.php?id_article=2382|creationTime|2007-04-18T13:22:56Z +http://java.dzone.com/articles/clerezza-apache-project|creationDate|2011-09-09 +http://java.dzone.com/articles/clerezza-apache-project|tag|http://www.semanlink.net/tag/clerezza +http://java.dzone.com/articles/clerezza-apache-project|tag|http://www.semanlink.net/tag/reto_bachmann_gmur +http://java.dzone.com/articles/clerezza-apache-project|title|Clerezza: An Apache Project for the Semantic Web Javalobby +http://java.dzone.com/articles/clerezza-apache-project|creationTime|2011-09-09T21:53:51Z +http://realitesbiomedicales.blog.lemonde.fr/2015/11/27/decapite-ce-ver-repousse-avec-la-tete-dune-autre-espece/|creationDate|2017-08-24 +http://realitesbiomedicales.blog.lemonde.fr/2015/11/27/decapite-ce-ver-repousse-avec-la-tete-dune-autre-espece/|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://realitesbiomedicales.blog.lemonde.fr/2015/11/27/decapite-ce-ver-repousse-avec-la-tete-dune-autre-espece/|title|Décapité, ce ver repousse avec la tête… d’une autre espèce Réalités Biomédicales +http://realitesbiomedicales.blog.lemonde.fr/2015/11/27/decapite-ce-ver-repousse-avec-la-tete-dune-autre-espece/|creationTime|2017-08-24T00:11:43Z +http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/|creationDate|2016-01-15 +http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/|tag|http://www.semanlink.net/tag/r +http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/|tag|http://www.semanlink.net/tag/tensorflow +http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/|title|Mini AI app using TensorFlow and Shiny – Opiate for the masses +http://opiateforthemass.es/articles/mini-ai-app-using-tensorflow-and-shiny/|creationTime|2016-01-15T01:15:01Z +http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/|creationDate|2016-03-31 +http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/|tag|http://www.semanlink.net/tag/curl +http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/|title|Debugging Semantic Web sites with cURL cygri’s notes on web data +http://richard.cyganiak.de/blog/2007/02/debugging-semantic-web-sites-with-curl/|creationTime|2016-03-31T11:22:15Z +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|creationDate|2015-04-04 +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|tag|http://www.semanlink.net/tag/google +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|tag|http://www.semanlink.net/tag/apple +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|tag|http://www.semanlink.net/tag/automobile +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|comment|ben voyons +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|title|"Apple et Google ne vont pas ""entrer dans l'industrie automobile"", selon Carlos Tavares" +http://www.usine-digitale.fr/article/apple-et-google-ne-vont-pas-entrer-dans-l-industrie-automobile-selon-carlos-tavares.N317081|creationTime|2015-04-04T23:14:17Z +https://www.quora.com/When-is-using-word-embeddings-harmful|creationDate|2017-06-05 +https://www.quora.com/When-is-using-word-embeddings-harmful|tag|http://www.semanlink.net/tag/word_embedding +https://www.quora.com/When-is-using-word-embeddings-harmful|title|When is using word embeddings harmful? - Quora +https://www.quora.com/When-is-using-word-embeddings-harmful|creationTime|2017-06-05T11:54:20Z +https://github.com/tristan/jsonld-java|creationDate|2012-08-14 +https://github.com/tristan/jsonld-java|tag|http://www.semanlink.net/tag/json_ld +https://github.com/tristan/jsonld-java|tag|http://www.semanlink.net/tag/github_project +https://github.com/tristan/jsonld-java|title|tristan/jsonld-java · GitHub +https://github.com/tristan/jsonld-java|creationTime|2012-08-14T00:59:20Z +http://www.rfi.fr/francais/actu/articles/090/article_52884.asp|creationDate|2007-07-26 +http://www.rfi.fr/francais/actu/articles/090/article_52884.asp|tag|http://www.semanlink.net/tag/niger_agriculture +http://www.rfi.fr/francais/actu/articles/090/article_52884.asp|comment|Avec une production annuelle de plus de 300 000 tonnes, le Niger est l'un des pays d'Afrique de l'Ouest qui exporte le plus d'oignons vers le Burkina, le Togo, le Ghana, et la Côte d'Ivoire. Si elle est bien structurée, la filière est prometteuse pour les paysans nigériens. Là-bas, l'oignon est d'ailleurs communément appelé «l'or violet». +http://www.rfi.fr/francais/actu/articles/090/article_52884.asp|title|RFI - L'oignon : une filière prometteuse pour les paysans nigériens +http://www.rfi.fr/francais/actu/articles/090/article_52884.asp|creationTime|2007-07-26T13:05:33Z +http://www.google.com/webmasters/|creationDate|2013-08-23 +http://www.google.com/webmasters/|tag|http://www.semanlink.net/tag/webmasters_google +http://www.google.com/webmasters/|title|Webmasters - Google +http://www.google.com/webmasters/|creationTime|2013-08-23T13:55:02Z +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|creationDate|2018-01-28 +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|tag|http://www.semanlink.net/tag/poker +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|comment|Imagine a smartphone that’s able to negotiate the best price on a new car for you +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|title|AI can win at poker: but as computers get smarter, who keeps tabs on their ethics? Technology The Guardian +https://www.theguardian.com/technology/2017/feb/05/artificial-intelligence-ethics-poker-libratus-texas-holdem-ai-deepstack|creationTime|2018-01-28T17:02:23Z +http://www.w3.org/2001/sw/BestPractices/SE/ODSD/|creationDate|2005-11-23 +http://www.w3.org/2001/sw/BestPractices/SE/ODSD/|tag|http://www.semanlink.net/tag/object_oriented_programming +http://www.w3.org/2001/sw/BestPractices/SE/ODSD/|tag|http://www.semanlink.net/tag/semantic_web_dev +http://www.w3.org/2001/sw/BestPractices/SE/ODSD/|comment|This note is hence intended to act as an introduction to Semantic Web technologies for software developers with background in object-oriented languages like UML and Java. Our goal is to clarify the differences between RDF/OWL and object-oriented languages, and to encourage mainstream developers to add Semantic Web technology to their routine tool kit. +http://www.w3.org/2001/sw/BestPractices/SE/ODSD/|title|A Semantic Web Primer for Object-Oriented Software Developers +http://librdf.org/docs/ruby.html|creationDate|2006-03-27 +http://librdf.org/docs/ruby.html|tag|http://www.semanlink.net/tag/ruby +http://librdf.org/docs/ruby.html|tag|http://www.semanlink.net/tag/redland +http://librdf.org/docs/ruby.html|title|Redland RDF Application Framework - Ruby Interface +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|creationDate|2008-10-18 +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|tag|http://www.semanlink.net/tag/wikipedia +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|tag|http://www.semanlink.net/tag/mashups +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|tag|http://www.semanlink.net/tag/google_spreadsheets +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|title|Data Scraping Wikipedia with Google Spreadsheets « OUseful.Info, the blog… +http://ouseful.wordpress.com/2008/10/14/data-scraping-wikipedia-with-google-spreadsheets/|creationTime|2008-10-18T10:25:37Z +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|creationDate|2011-10-12 +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|tag|http://www.semanlink.net/tag/payment +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|tag|http://www.semanlink.net/tag/money +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|tag|http://www.semanlink.net/tag/micropayments_on_the_web +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|comment|a universal payment standard designed specifically for the Web. Think “an open source PayPal on steroids” – an open, patent and royalty free specification for Web Payments. The goal of PaySwarm is to make crowd-funding, world-changing ideas, buying and selling online as easy as sending an e-mail or an instant message. We want payment to be baked into the core of the Web so that exciting new companies can be launched on top of this truly open payment platform. +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|title|PaySwarm – Give Someone $0.02 for Their Two Cents (Part I) - semanticweb.com +http://semanticweb.com/payswarm-give-someone-0-02-for-their-two-cents-part-i_b23739|creationTime|2011-10-12T23:51:27Z +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|creationDate|2014-03-11 +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|tag|http://www.semanlink.net/tag/microscope +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|tag|http://www.semanlink.net/tag/paludisme +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|tag|http://www.semanlink.net/tag/innovation +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|tag|http://www.semanlink.net/tag/diy +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|title|Stanford bioengineer develops a 50-cent paper microscope Scope Blog +http://scopeblog.stanford.edu/2014/03/10/stanford-bioengineer-develops-a-50-cent-paper-microscope/|creationTime|2014-03-11T14:27:31Z +http://www.wdl.org/en/item/53/|creationDate|2009-04-22 +http://www.wdl.org/en/item/53/|tag|http://www.semanlink.net/tag/histoire_de_l_afrique +http://www.wdl.org/en/item/53/|tag|http://www.semanlink.net/tag/carte +http://www.wdl.org/en/item/53/|title|Map of Barbary, Nigritia and Guinea - World Digital Library +http://www.wdl.org/en/item/53/|creationTime|2009-04-22T23:19:47Z +http://www.bioshare.net/|creationDate|2007-11-14 +http://www.bioshare.net/|tag|http://www.semanlink.net/tag/biodiversity_data +http://www.bioshare.net/|tag|http://www.semanlink.net/tag/biodiversite +http://www.bioshare.net/|comment|"Collaborative biodiversity portal +" +http://www.bioshare.net/|title|Bioshare: Home +http://www.bioshare.net/|creationTime|2007-11-14T13:44:47Z +http://dexter.isti.cnr.it/|creationDate|2014-10-20 +http://dexter.isti.cnr.it/|tag|http://www.semanlink.net/tag/entity_linking +http://dexter.isti.cnr.it/|tag|http://www.semanlink.net/tag/named_entity_recognition +http://dexter.isti.cnr.it/|title|Dexter, an Open Source Framework for Entity Linking +http://dexter.isti.cnr.it/|creationTime|2014-10-20T01:35:57Z +http://louvre-boite.viabloga.com/news/76.shtml|creationDate|2005-09-21 +http://louvre-boite.viabloga.com/news/76.shtml|tag|http://www.semanlink.net/tag/tag_clusters +http://louvre-boite.viabloga.com/news/76.shtml|title|L'ouvre-boîte - Next big thing : Tag Clusters +https://en.wikipedia.org/wiki/My_Life_in_the_Bush_of_Ghosts_%28novel%29|creationDate|2015-07-05 +https://en.wikipedia.org/wiki/My_Life_in_the_Bush_of_Ghosts_%28novel%29|tag|http://www.semanlink.net/tag/livre_a_lire +https://en.wikipedia.org/wiki/My_Life_in_the_Bush_of_Ghosts_%28novel%29|title|"""My Life in the Bush of Ghosts"" Amos Tutuola" +https://en.wikipedia.org/wiki/My_Life_in_the_Bush_of_Ghosts_%28novel%29|creationTime|2015-07-05T11:59:16Z +http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/|creationDate|2013-04-10 +http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/|tag|http://www.semanlink.net/tag/hackers +http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/|title|Ne dites pas à ma mère que je suis un hacker, elle me croit blogueur au Monde.fr, & reporter au Vinvinteur BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/04/07/ne-dites-pas-a-ma-mere-que-je-suis-un-hacker-elle-me-croit-blogueur-au-monde-fr-reporter-au-vinvinteur/|creationTime|2013-04-10T16:32:19Z +http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf|creationDate|2014-04-24 +http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf|tag|http://www.semanlink.net/tag/conditional_random_field +http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf|tag|http://www.semanlink.net/tag/andrew_mccallum +http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf|title|An Introduction to Conditional Random Fields for Relational Learning (Charles Sutton and Andrew McCallum, 2006) +http://people.cs.umass.edu/~mccallum/papers/crf-tutorial.pdf|creationTime|2014-04-24T01:16:48Z +http://news.bbc.co.uk/2/hi/technology/8544935.stm|creationDate|2010-05-20 +http://news.bbc.co.uk/2/hi/technology/8544935.stm|tag|http://www.semanlink.net/tag/copyright +http://news.bbc.co.uk/2/hi/technology/8544935.stm|tag|http://www.semanlink.net/tag/news +http://news.bbc.co.uk/2/hi/technology/8544935.stm|comment|Copyright is not the only thing that matters online, says Bill Thompson +http://news.bbc.co.uk/2/hi/technology/8544935.stm|title|BBC News - Is it time to defend our rights? +http://news.bbc.co.uk/2/hi/technology/8544935.stm|creationTime|2010-05-20T22:48:14Z +http://news.bbc.co.uk/2/hi/technology/8544935.stm|source|BBC +https://www.youtube.com/watch?v=ge4kEwrperk|creationDate|2019-04-23 +https://www.youtube.com/watch?v=ge4kEwrperk|tag|http://www.semanlink.net/tag/medical_ir_ml_ia +https://www.youtube.com/watch?v=ge4kEwrperk|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://www.youtube.com/watch?v=ge4kEwrperk|title|France is AI 2018: Emmanuel Bacry - Detecting weak signals in pharmaco epidemiology - YouTube +https://www.youtube.com/watch?v=ge4kEwrperk|creationTime|2019-04-23T18:15:29Z +http://www.michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/|creationDate|2013-12-09 +http://www.michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/|tag|http://www.semanlink.net/tag/bitcoin +http://www.michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/|title|How the Bitcoin protocol actually works DDI +http://www.michaelnielsen.org/ddi/how-the-bitcoin-protocol-actually-works/|creationTime|2013-12-09T18:49:47Z +http://www.pr-owl.org/|creationDate|2006-11-07 +http://www.pr-owl.org/|tag|http://www.semanlink.net/tag/owl +http://www.pr-owl.org/|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.pr-owl.org/|tag|http://www.semanlink.net/tag/semantic_web +http://www.pr-owl.org/|title|PR-OWL: A Bayesian Framework for the Semantic Web +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|creationDate|2010-01-07 +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|tag|http://www.semanlink.net/tag/theatre +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|tag|http://www.semanlink.net/tag/shoah +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|tag|http://www.semanlink.net/tag/kristallnacht +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|tag|http://www.semanlink.net/tag/attali +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|title|Du cristal à la fumée +http://www.theatredurondpoint.fr/saison/fiche_spectacle.cfm/59614-du-cristal-a-la-fumee.html|creationTime|2010-01-07T01:24:02Z +http://2008.xtech.org/public/schedule/detail/545|creationDate|2008-06-17 +http://2008.xtech.org/public/schedule/detail/545|tag|http://www.semanlink.net/tag/xtech +http://2008.xtech.org/public/schedule/detail/545|tag|http://www.semanlink.net/tag/web_2_0 +http://2008.xtech.org/public/schedule/detail/545|tag|http://www.semanlink.net/tag/dataportability +http://2008.xtech.org/public/schedule/detail/545|comment|Web 2.0 partitions the Web into a number of topical sub-Webs, and locks you in, thereby reducing the value of the network as a whole. +http://2008.xtech.org/public/schedule/detail/545|title|XTech 2008: Why you should have a Website — IDEAlliance +http://2008.xtech.org/public/schedule/detail/545|creationTime|2008-06-17T21:48:55Z +http://aperture.sourceforge.net/|creationDate|2007-09-25 +http://aperture.sourceforge.net/|tag|http://www.semanlink.net/tag/semantic_web_tools +http://aperture.sourceforge.net/|tag|http://www.semanlink.net/tag/java +http://aperture.sourceforge.net/|tag|http://www.semanlink.net/tag/open_source +http://aperture.sourceforge.net/|comment|a Java framework for getting data and metadata +http://aperture.sourceforge.net/|title|Aperture Framework +http://aperture.sourceforge.net/|creationTime|2007-09-25T21:51:55Z +https://github.com/Graphity|creationDate|2014-10-13 +https://github.com/Graphity|tag|http://www.semanlink.net/tag/ldp_implementations +https://github.com/Graphity|comment|"Graphity client: Generic Linked Data browser and platform for building declarative SPARQL triplestore-backed Web applications
+Graphity server: Generic Linked Data server for SPARQL tripestore backends.
+Java (Jena) + SPARQL + Jersey " +https://github.com/Graphity|title|Graphity on Github +https://github.com/Graphity|creationTime|2014-10-13T00:35:02Z +http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html|creationDate|2017-08-15 +http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html|tag|http://www.semanlink.net/tag/shacl +http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html|title|The W3C standard constraint language for RDF: SHACL - bobdc.blog +http://www.snee.com/bobdc.blog/2017/07/the-w3c-standard-constraint-la.html|creationTime|2017-08-15T15:51:20Z +http://www.devx.com/semantic/Article/38700|creationDate|2008-08-01 +http://www.devx.com/semantic/Article/38700|tag|http://www.semanlink.net/tag/semantic_web_databases +http://www.devx.com/semantic/Article/38700|tag|http://www.semanlink.net/tag/owl +http://www.devx.com/semantic/Article/38700|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.devx.com/semantic/Article/38700|comment|"Using the W3C OWL ontology standard lets you get more out of all kinds of data. Find out how this standard and some free software lets you query two databases as if they were one.
+ +The primary goal of this article is to put together a demonstration of how you can use OWL to integrate two relational databases, and then perform queries against the aggregate collection to answer realistic questions that you could not answer without the addition of an OWL ontology. +" +http://www.devx.com/semantic/Article/38700|title|Relational Database Integration with RDF/OWL +http://www.devx.com/semantic/Article/38700|creationTime|2008-08-01T12:33:41Z +http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining|creationDate|2007-06-13 +http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining|tag|http://www.semanlink.net/tag/equivalence_mining +http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining|comment|This page collects software tools and papers about techniques that can be used to auto-generate links between data items within different datasources. +http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining|title|TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining - ESW Wiki +http://esw.w3.org/topic/TaskForces/CommunityProjects/LinkingOpenData/EquivalenceMining|creationTime|2007-06-13T23:23:55Z +http://web.mit.edu/cre/research/1k-house-project.html|creationDate|2011-09-20 +http://web.mit.edu/cre/research/1k-house-project.html|tag|http://www.semanlink.net/tag/mit +http://web.mit.edu/cre/research/1k-house-project.html|tag|http://www.semanlink.net/tag/innovation +http://web.mit.edu/cre/research/1k-house-project.html|title|MIT's 1K House Project +http://web.mit.edu/cre/research/1k-house-project.html|creationTime|2011-09-20T08:57:14Z +http://www.spectrum.ieee.org/singularity|creationDate|2008-08-17 +http://www.spectrum.ieee.org/singularity|tag|http://www.semanlink.net/tag/conscience +http://www.spectrum.ieee.org/singularity|comment|The singularity: when machines become conscious +http://www.spectrum.ieee.org/singularity|title|IEEE Spectrum: Special Report: The Singularity +http://www.spectrum.ieee.org/singularity|creationTime|2008-08-17T11:53:51Z +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|creationDate|2011-10-12 +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|tag|http://www.semanlink.net/tag/goodrelations_renault +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|tag|http://www.semanlink.net/tag/makolab +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|tag|http://www.semanlink.net/tag/martin_hepp +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|title|ANN: Renault adds GoodRelations to UK Shop +http://lists.w3.org/Archives/Public/public-lod/2011Oct/0034.html|creationTime|2011-10-12T22:30:30Z +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|creationDate|2007-12-31 +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|tag|http://www.semanlink.net/tag/rdf_browser +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|tag|http://www.semanlink.net/tag/rdf_editor +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|title|DriftR Linked Data Browser and Editor (Screencast) - benjamin nowack's blog +http://bnode.org/blog/2007/12/22/driftr-linked-data-browser-and-editor-screencast|creationTime|2007-12-31T17:09:40Z +http://ruder.io/requests-for-research/|creationDate|2018-03-04 +http://ruder.io/requests-for-research/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/requests-for-research/|tag|http://www.semanlink.net/tag/nlp +http://ruder.io/requests-for-research/|title|NLP: Requests for Research +http://ruder.io/requests-for-research/|creationTime|2018-03-04T16:38:14Z +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|creationDate|2010-12-22 +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|tag|http://www.semanlink.net/tag/niger +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|tag|http://www.semanlink.net/tag/greenpeace +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|tag|http://www.semanlink.net/tag/areva +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|title|Niger: Greenpeace dénonce le déversement de déchets radioactifs d'Areva - LeMonde.fr +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|creationTime|2010-12-22T23:31:13Z +http://www.lemonde.fr/depeches/2010/12/18/niger-greenpeace-denonce-le-deversement-de-dechets-radioactifs-d-areva_3244_108_44080202.html|source|Le Monde +http://jung.sourceforge.net/|creationDate|2005-09-26 +http://jung.sourceforge.net/|tag|http://www.semanlink.net/tag/graph_visualization +http://jung.sourceforge.net/|title|JUNG - Java Universal Network/Graph Framework +http://www.semanlink.net/2014/09/ec-web-paper.pdf|creationDate|2014-08-04 +http://www.semanlink.net/2014/09/ec-web-paper.pdf|tag|http://www.semanlink.net/tag/automobile +http://www.semanlink.net/2014/09/ec-web-paper.pdf|tag|http://www.semanlink.net/tag/ec_web +http://www.semanlink.net/2014/09/ec-web-paper.pdf|tag|http://www.semanlink.net/tag/fps_ec_web_14 +http://www.semanlink.net/2014/09/ec-web-paper.pdf|title|Automotive range as e-commerce data (EC-WEB 2014) +http://www.semanlink.net/2014/09/ec-web-paper.pdf|creationTime|2014-08-04T12:31:33Z +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|creationDate|2016-04-09 +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|tag|http://www.semanlink.net/tag/fps_post +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|tag|http://www.semanlink.net/tag/jersey +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|title|Jersey: UriConnegFilter now declared final, breaks old code: how to fix it? - Stack Overflow +http://stackoverflow.com/questions/36517620/jersey-uriconnegfilter-now-declared-final-breaks-old-code-how-to-fix-it|creationTime|2016-04-09T15:51:33Z +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|creationDate|2013-08-27 +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|tag|http://www.semanlink.net/tag/tips +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|tag|http://www.semanlink.net/tag/flash +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|tag|http://www.semanlink.net/tag/seo +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|title|Flash SEO: 5 Tips and Best Practices for Optimizing Flash Websites +http://www.toprankblog.com/2009/11/seo-for-flash-tips/|creationTime|2013-08-27T14:27:19Z +http://www.der-mo.net/relationBrowser/index.html|creationDate|2006-10-09 +http://www.der-mo.net/relationBrowser/index.html|tag|http://www.semanlink.net/tag/semantic_networks +http://www.der-mo.net/relationBrowser/index.html|tag|http://www.semanlink.net/tag/graph_visualization +http://www.der-mo.net/relationBrowser/index.html|comment|A simple, yet effective radial semantic web browser. It was used to display ontologies and term occurrences in eLearning applications, but also to display social networks in Outfoxed. I will release an Open Source version soon. +http://www.der-mo.net/relationBrowser/index.html|title|der-mo.net - Moritz Stefaner - Relation browser +https://twitter.com/pnderthevstnes/status/1110260437801562112|creationDate|2019-03-26 +https://twitter.com/pnderthevstnes/status/1110260437801562112|tag|http://www.semanlink.net/tag/ulmfit +https://twitter.com/pnderthevstnes/status/1110260437801562112|tag|http://www.semanlink.net/tag/backtranslation +https://twitter.com/pnderthevstnes/status/1110260437801562112|title|"Sam Shleifer sur Twitter : ""ULMFit from @fastai + Data Augmentation with backtranslation can get 80+% validation accuracy using only 50 training examples on #NLP IMDB sentiment classification!" +https://twitter.com/pnderthevstnes/status/1110260437801562112|creationTime|2019-03-26T00:43:08Z +http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF|creationDate|2009-02-10 +http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF|tag|http://www.semanlink.net/tag/virtuoso_open_source_edition +http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF|tag|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF|title|Virtuoso Open-Source Edition: Mapping SQL Data to RDF +http://virtuoso.openlinksw.com/dataspace/dav/wiki/Main/VOSSQL2RDF|creationTime|2009-02-10T22:44:37Z +http://objecthunter.congrace.de/tinybo/blog/articles/98|creationDate|2012-03-19 +http://objecthunter.congrace.de/tinybo/blog/articles/98|tag|http://www.semanlink.net/tag/jersey_rdf +http://objecthunter.congrace.de/tinybo/blog/articles/98|tag|http://www.semanlink.net/tag/rdf_in_json +http://objecthunter.congrace.de/tinybo/blog/articles/98|tag|http://www.semanlink.net/tag/jena +http://objecthunter.congrace.de/tinybo/blog/articles/98|title|Serializing Apache Jena's RDF resources via JAXB into JSON docments in a JAX-RS context. +http://objecthunter.congrace.de/tinybo/blog/articles/98|creationTime|2012-03-19T23:18:25Z +http://fr.wikipedia.org/wiki/Beijing_Genomics_Institute|creationDate|2014-03-23 +http://fr.wikipedia.org/wiki/Beijing_Genomics_Institute|tag|http://www.semanlink.net/tag/beijing_genomics_institute +http://fr.wikipedia.org/wiki/Beijing_Genomics_Institute|title|Beijing Genomics Institute +http://fr.wikipedia.org/wiki/Beijing_Genomics_Institute|creationTime|2014-03-23T23:11:38Z +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|creationDate|2018-02-22 +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|tag|http://www.semanlink.net/tag/mit +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|tag|http://www.semanlink.net/tag/artificial_neural_network +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|tag|http://www.semanlink.net/tag/ai_chip +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|comment|New chip reduces neural networks’ power consumption by up to 95 percent, making them practical for battery-powered devices. +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|title|Neural networks everywhere MIT News +http://news.mit.edu/2018/chip-neural-networks-battery-powered-devices-0214|creationTime|2018-02-22T00:41:29Z +https://www.reviewnb.com/|creationDate|2019-02-14 +https://www.reviewnb.com/|tag|http://www.semanlink.net/tag/github +https://www.reviewnb.com/|tag|http://www.semanlink.net/tag/jupyter +https://www.reviewnb.com/|tag|http://www.semanlink.net/tag/dev_tools +https://www.reviewnb.com/|title|ReviewNB: Jupyter Notebook Diff for GitHub +https://www.reviewnb.com/|creationTime|2019-02-14T08:38:55Z +https://class.coursera.org/datascitoolbox-010|creationDate|2015-01-06 +https://class.coursera.org/datascitoolbox-010|tag|http://www.semanlink.net/tag/coursera_the_data_scientist_s_toolbox +https://class.coursera.org/datascitoolbox-010|title|The Data Scientist’s Toolbox Coursera +https://class.coursera.org/datascitoolbox-010|creationTime|2015-01-06T14:51:51Z +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|creationDate|2016-04-09 +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|tag|http://www.semanlink.net/tag/brinxmat +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|tag|http://www.semanlink.net/tag/about_rdf +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|comment|"RDF is good for data models, it’s good for data structures and transformations; it isn’t good for working with values.
+People don’t want a data model, they want values. This is a shame.
+The RDF stack doesn’t provide a replacement for a database. +" +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|title|An unusually sensible post about RDF Brinxmat's blog +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|creationTime|2016-04-09T00:40:09Z +https://brinxmat.wordpress.com/2014/01/28/an-unusually-sensible-post-about-rdf/|linkTo|http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/ +https://www.researchgate.net/messages/671455533|creationDate|2017-10-24 +https://www.researchgate.net/messages/671455533|tag|http://www.semanlink.net/tag/fps_paper +https://www.researchgate.net/messages/671455533|tag|http://www.semanlink.net/tag/osema_deri_renault_paper +https://www.researchgate.net/messages/671455533|tag|http://www.semanlink.net/tag/configuration_as_linked_data +https://www.researchgate.net/messages/671455533|tag|http://www.semanlink.net/tag/researchgate +https://www.researchgate.net/messages/671455533|comment|"> I found your other paper ""A Semantic Web Representation of a Product Range Specification based on Constraint Satisfaction Problem in the Automotive Industry"" very interesting. Actually, it is the best paper on this topic I found so far!" +https://www.researchgate.net/messages/671455533|title|Re: Product Customization as Linked Data - ResearchGate +https://www.researchgate.net/messages/671455533|creationTime|2017-10-24T10:20:40Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|creationDate|2006-05-05 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|tag|http://www.semanlink.net/tag/oleoduc +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|tag|http://www.semanlink.net/tag/baikal +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|tag|http://www.semanlink.net/tag/ecologie +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|tag|http://www.semanlink.net/tag/poutine +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|comment|Russian President Vladimir Putin has said the route of a controversial new oil pipeline should be altered to avoid the world's deepest freshwater lake. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|title|BBC NEWS - Putin orders oil pipeline shifted +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/europe/4945998.stm|date|2006-04-26 +http://moussapoussy.planeteafrique.com|creationDate|2005-10-25 +http://moussapoussy.planeteafrique.com|tag|http://www.semanlink.net/tag/moussa_poussi +http://moussapoussy.planeteafrique.com|tag|http://www.semanlink.net/tag/planeteafrique +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|creationDate|2015-09-23 +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|tag|http://www.semanlink.net/tag/chine +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|tag|http://www.semanlink.net/tag/mobile_apps +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|tag|http://www.semanlink.net/tag/ng +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|tag|http://www.semanlink.net/tag/sharing_economy +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|title|China's sharing economy is about mobile apps and quick delivery - Fortune +http://fortune.com/2015/09/23/china-sharing-economy-mobile/|creationTime|2015-09-23T22:16:58Z +http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html|creationDate|2008-06-14 +http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html|tag|http://www.semanlink.net/tag/ajax +http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html|tag|http://www.semanlink.net/tag/javascript_tips +http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html|title|Coding In Paradise: AJAX Tutorial: Saving Session Across Page Loads Without Cookies, On The Client Side +http://codinginparadise.org/weblog/2005/08/ajax-tutorial-saving-session-across.html|creationTime|2008-06-14T15:56:00Z +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|creationDate|2018-12-01 +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|tag|http://www.semanlink.net/tag/ulmfit +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|tag|http://www.semanlink.net/tag/tutorial +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|tag|http://www.semanlink.net/tag/google_colab +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|tag|http://www.semanlink.net/tag/nlp_text_classification +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|tag|http://www.semanlink.net/tag/fast_ai +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|title|Tutorial on Text Classification (NLP) using ULMFiT and fastai Library in Python - Analytics Vidhya +https://www.analyticsvidhya.com/blog/2018/11/tutorial-text-classification-ulmfit-fastai-library/|creationTime|2018-12-01T13:22:04Z +https://www.washingtonpost.com/news/wonk/wp/2016/11/08/a-new-theory-for-why-trump-voters-are-so-angry-that-actually-makes-sense/|creationDate|2016-11-11 +https://www.washingtonpost.com/news/wonk/wp/2016/11/08/a-new-theory-for-why-trump-voters-are-so-angry-that-actually-makes-sense/|tag|http://www.semanlink.net/tag/trump +https://www.washingtonpost.com/news/wonk/wp/2016/11/08/a-new-theory-for-why-trump-voters-are-so-angry-that-actually-makes-sense/|title|A new theory for why Trump voters are so angry — that actually makes sense - The Washington Post +https://www.washingtonpost.com/news/wonk/wp/2016/11/08/a-new-theory-for-why-trump-voters-are-so-angry-that-actually-makes-sense/|creationTime|2016-11-11T12:15:59Z +http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/|creationDate|2015-04-08 +http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/|tag|http://www.semanlink.net/tag/junit +http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/|tag|http://www.semanlink.net/tag/maven +http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/|title|How to get file resource from Maven src/test/resources/ folder in JUnit test? +http://devblog.virtage.com/2013/07/how-to-get-file-resource-from-maven-srctestresources-folder-in-junit-test/|creationTime|2015-04-08T17:28:10Z +http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf|creationDate|2013-08-20 +http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf|tag|http://www.semanlink.net/tag/topic_modeling +http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf|comment|The LSA approach makes three claims: that semantic information can be derived from a word-document co-occurrence matrix; that dimensionality reduction is an essential part of this derivation; and that words and documents can be represented as points in Euclidean space. Topic models' approach is consistent with the first two of these claims, but differs in the third, describing a class of statistical models in which the semantic properties of words and documents are expressed in terms of probabilistic topics. +http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf|title|Probabilistic Topic Models +http://psiexp.ss.uci.edu/research/papers/SteyversGriffithsLSABookFormatted.pdf|creationTime|2013-08-20T17:35:15Z +http://projects.freelibrary.info/solr-jetty-maven/|creationDate|2014-01-20 +http://projects.freelibrary.info/solr-jetty-maven/|tag|http://www.semanlink.net/tag/solr +http://projects.freelibrary.info/solr-jetty-maven/|tag|http://www.semanlink.net/tag/maven +http://projects.freelibrary.info/solr-jetty-maven/|comment|A project that runs Solr in Jetty using Maven +http://projects.freelibrary.info/solr-jetty-maven/|title|Solr-Jetty-Maven Project +http://projects.freelibrary.info/solr-jetty-maven/|creationTime|2014-01-20T22:59:12Z +http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service|creationDate|2014-09-26 +http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service|tag|http://www.semanlink.net/tag/security_and_rest +http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service|title|Best Practices for securing a REST API / web service - Stack Overflow +http://stackoverflow.com/questions/7551/best-practices-for-securing-a-rest-api-web-service|creationTime|2014-09-26T00:28:34Z +http://www.youtube.com/watch?v=o0NuuWJscqg|creationDate|2008-01-22 +http://www.youtube.com/watch?v=o0NuuWJscqg|tag|http://www.semanlink.net/tag/sani_aboussa +http://www.youtube.com/watch?v=o0NuuWJscqg|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/watch?v=o0NuuWJscqg|title|YouTube - Sani Aboussa - Hadiza (soumata haouré) +http://www.youtube.com/watch?v=o0NuuWJscqg|creationTime|2008-01-22T23:04:26Z +http://www.w3.org/wiki/JSON+RDF|creationDate|2012-02-20 +http://www.w3.org/wiki/JSON+RDF|tag|http://www.semanlink.net/tag/rdf_in_json +http://www.w3.org/wiki/JSON+RDF|comment|Collects RDF serialisations in JSON. +http://www.w3.org/wiki/JSON+RDF|title|JSON+RDF - W3C Wiki +http://www.w3.org/wiki/JSON+RDF|creationTime|2012-02-20T22:58:52Z +http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi|creationDate|2010-05-17 +http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi|tag|http://www.semanlink.net/tag/musees_africains +http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi|tag|http://www.semanlink.net/tag/m3_multi_media_museum +http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi|title|Multi Media Museum et les musées africains +http://web.archive.org/web/20000531042005/http://www.hypersolutions.fr/M3/AfricanMuseums/AfricanMuseums.ssi|creationTime|2010-05-17T11:59:34Z +http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/|creationDate|2008-02-15 +http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/|tag|http://www.semanlink.net/tag/information_visualization +http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/|title|Transnets » Blog Archive » Comment faire voir? +http://pisani.blog.lemonde.fr/2008/02/01/comment-faire-voir/|creationTime|2008-02-15T23:23:21Z +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|creationDate|2016-11-06 +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|tag|http://www.semanlink.net/tag/google_brain +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|tag|http://www.semanlink.net/tag/encryption +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|tag|http://www.semanlink.net/tag/deep_learning +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|title|Google's neural networks invent their own encryption New Scientist +https://www.newscientist.com/article/2110522-googles-neural-networks-invent-their-own-encryption/|creationTime|2016-11-06T01:56:28Z +https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view|creationDate|2019-04-29 +https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view|tag|http://www.semanlink.net/tag/francois_scharffe +https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view|title|open-knowledge-embeddings.pdf - (2019) +https://drive.google.com/file/d/0Bzx8a8hdiMD2Z25LNUFKWndxazVMTkVfU3g1bDVaX3lJZ3E4/view|creationTime|2019-04-29T09:38:33Z +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|creationDate|2012-09-17 +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|tag|http://www.semanlink.net/tag/1789 +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|tag|http://www.semanlink.net/tag/liberte +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|tag|http://www.semanlink.net/tag/declaration_des_droits_de_l_homme +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|comment|DÉCLARATION DES DROITS DE L’HOMME ET DU CITOYEN DE 1789 +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|title|La liberté consiste à pouvoir faire tout ce qui ne nuit pas à autrui +http://www.assemblee-nationale.fr/histoire/dudh/1789.asp|creationTime|2012-09-17T23:28:05Z +http://gawker.com/terrorism-works-1678049997|creationDate|2015-01-30 +http://gawker.com/terrorism-works-1678049997|tag|http://www.semanlink.net/tag/terrorisme +http://gawker.com/terrorism-works-1678049997|title|Terrorism Works +http://gawker.com/terrorism-works-1678049997|creationTime|2015-01-30T15:31:43Z +http://www.bbc.com/news/magazine-21702546|creationDate|2014-11-08 +http://www.bbc.com/news/magazine-21702546|tag|http://www.semanlink.net/tag/syrian_civil_war +http://www.bbc.com/news/magazine-21702546|tag|http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco +http://www.bbc.com/news/magazine-21702546|title|Syria's priceless heritage under attack +http://www.bbc.com/news/magazine-21702546|creationTime|2014-11-08T14:08:36Z +http://www.guardian.co.uk/print/0,3858,5223112-106710,00.html|creationDate|2005-06-25 +http://www.guardian.co.uk/print/0,3858,5223112-106710,00.html|tag|http://www.semanlink.net/tag/europe +http://www.guardian.co.uk/print/0,3858,5223112-106710,00.html|tag|http://www.semanlink.net/tag/tony_blair +http://www.guardian.co.uk/print/0,3858,5223112-106710,00.html|title|Guardian Blair finds a little heaven in euro hell +http://www.litteratureaudio.com|creationDate|2009-08-19 +http://www.litteratureaudio.com|tag|http://www.semanlink.net/tag/livres_audio +http://www.litteratureaudio.com|tag|http://www.semanlink.net/tag/litterature +http://www.litteratureaudio.com|title|Litterature audio.com Livres audio gratuits à écouter et télécharger +http://www.litteratureaudio.com|creationTime|2009-08-19T19:05:48Z +http://www.slideshare.net/terraces/tmp-467722/|creationDate|2008-06-20 +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/foaf +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/tutorial +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/eswc_2008 +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/alexandre_passant +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/sioc +http://www.slideshare.net/terraces/tmp-467722/|tag|http://www.semanlink.net/tag/slides +http://www.slideshare.net/terraces/tmp-467722/|title|"FOAF & SIOC applications, slides (Alexandre Passant, ESWC 2008, ""Socail Networks"" tutorial)" +http://www.slideshare.net/terraces/tmp-467722/|creationTime|2008-06-20T19:04:51Z +https://distill.pub/2016/augmented-rnns/|creationDate|2018-05-31 +https://distill.pub/2016/augmented-rnns/|tag|http://www.semanlink.net/tag/deep_learning_attention +https://distill.pub/2016/augmented-rnns/|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://distill.pub/2016/augmented-rnns/|tag|http://www.semanlink.net/tag/christopher_olah +https://distill.pub/2016/augmented-rnns/|comment|"> We’d like attention to be **differentiable**, so that we can learn where to +focus. To do this, we use the same trick Neural Turing Machines use: +we focus everywhere, just to different extents. + +" +https://distill.pub/2016/augmented-rnns/|title|Attention and Augmented Recurrent Neural Networks (2016) +https://distill.pub/2016/augmented-rnns/|creationTime|2018-05-31T16:59:56Z +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|creationDate|2017-09-18 +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|tag|http://www.semanlink.net/tag/bhaskar_mitra +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|tag|http://www.semanlink.net/tag/nlp_microsoft +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|title|Using Text Embeddings for Information Retrieval +https://fr.slideshare.net/BhaskarMitra3/using-text-embeddings-for-information-retrieval|creationTime|2017-09-18T17:02:59Z +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|creationDate|2013-03-27 +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|tag|http://www.semanlink.net/tag/open_data +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|tag|http://www.semanlink.net/tag/neelie_kroes +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|tag|http://www.semanlink.net/tag/europe +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|tag|http://www.semanlink.net/tag/big_data +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|comment|All in all, open data is a huge opportunity for Europe. A chance for citizens to benefit from amazing new products and services. A chance to boost many sectors of our society – from healthcare to democracy itself. +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|title|EUROPA - COMMUNIQUES DE PRESSE - Communiqué de presse - Speech: The big data revolution +http://europa.eu/rapid/press-release_SPEECH-13-261_en.htm|creationTime|2013-03-27T23:00:11Z +http://www.semanticweb.com/on/semantic_enterprise_the_startups_169811.asp|creationDate|2010-08-09 +http://www.semanticweb.com/on/semantic_enterprise_the_startups_169811.asp|tag|http://www.semanlink.net/tag/semantic_web_company +http://www.semanticweb.com/on/semantic_enterprise_the_startups_169811.asp|title|Semantic Enterprise: The StartUps - Semantic Web +http://www.semanticweb.com/on/semantic_enterprise_the_startups_169811.asp|creationTime|2010-08-09T22:08:45Z +http://redlink.co/|creationDate|2015-03-09 +http://redlink.co/|tag|http://www.semanlink.net/tag/apache_marmotta +http://redlink.co/|tag|http://www.semanlink.net/tag/rupert_westenthaler +http://redlink.co/|tag|http://www.semanlink.net/tag/semantic_startup +http://redlink.co/|tag|http://www.semanlink.net/tag/apache_stanbol +http://redlink.co/|tag|http://www.semanlink.net/tag/john_pereira +http://redlink.co/|tag|http://www.semanlink.net/tag/solr +http://redlink.co/|title|redlink +http://redlink.co/|creationTime|2015-03-09T11:27:51Z +http://www.bbc.com/future/story/20120614-how-bacteria-talk|creationDate|2013-12-14 +http://www.bbc.com/future/story/20120614-how-bacteria-talk|tag|http://www.semanlink.net/tag/ted +http://www.bbc.com/future/story/20120614-how-bacteria-talk|tag|http://www.semanlink.net/tag/bacteries +http://www.bbc.com/future/story/20120614-how-bacteria-talk|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.bbc.com/future/story/20120614-how-bacteria-talk|title|How bacteria 'talk' to each other +http://www.bbc.com/future/story/20120614-how-bacteria-talk|creationTime|2013-12-14T12:33:27Z +http://www.gnu.org/software/octave/doc/interpreter/index.html|creationDate|2013-12-28 +http://www.gnu.org/software/octave/doc/interpreter/index.html|tag|http://www.semanlink.net/tag/gnu_octave +http://www.gnu.org/software/octave/doc/interpreter/index.html|tag|http://www.semanlink.net/tag/documentation +http://www.gnu.org/software/octave/doc/interpreter/index.html|title|GNU Octave's Documentation +http://www.gnu.org/software/octave/doc/interpreter/index.html|creationTime|2013-12-28T17:00:18Z +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|creationDate|2010-04-10 +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|tag|http://www.semanlink.net/tag/finlande +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|tag|http://www.semanlink.net/tag/education +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|title|BBC News - Why do Finland's schools get the best results? +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|creationTime|2010-04-10T18:15:39Z +http://news.bbc.co.uk/2/hi/programmes/world_news_america/8601207.stm|source|BBC +http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php|creationDate|2010-07-30 +http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php|tag|http://www.semanlink.net/tag/readwriteweb_com +http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php|tag|http://www.semanlink.net/tag/semantic_web_products +http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php|title|Top 10 Semantic Web Products of 2009 +http://www.readwriteweb.com/archives/top_10_semantic_web_products_of_2009.php|creationTime|2010-07-30T14:33:47Z +http://www.bbc.co.uk/things/|creationDate|2014-09-23 +http://www.bbc.co.uk/things/|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.bbc.co.uk/things/|comment|BBC Things provides a single reference for all of the things that matter to the BBC and our audiences. +http://www.bbc.co.uk/things/|title|BBC - Things +http://www.bbc.co.uk/things/|creationTime|2014-09-23T13:53:02Z +https://plus.google.com/s/schema.org|creationDate|2013-07-07 +https://plus.google.com/s/schema.org|tag|http://www.semanlink.net/tag/googleplus +https://plus.google.com/s/schema.org|tag|http://www.semanlink.net/tag/schema_org +https://plus.google.com/s/schema.org|title|schema.org - Google+ +https://plus.google.com/s/schema.org|creationTime|2013-07-07T17:20:06Z +http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf|creationDate|2012-03-24 +http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf|tag|http://www.semanlink.net/tag/university_of_maryland +http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf|tag|http://www.semanlink.net/tag/language_model +http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf|title|Language models - Jordan Boyd-Graber - University of Maryland +http://umiacs.umd.edu/~jbg/teaching/INFM_718_2011/lecture_6.pdf|creationTime|2012-03-24T19:04:03Z +https://www.wired.com/story/four-successful-bel-transplants/|creationDate|2018-08-04 +https://www.wired.com/story/four-successful-bel-transplants/|tag|http://www.semanlink.net/tag/lab_grown_organs +https://www.wired.com/story/four-successful-bel-transplants/|title|Bioengineers Are Closer Than Ever To Lab-Grown Lungs WIRED +https://www.wired.com/story/four-successful-bel-transplants/|creationTime|2018-08-04T14:21:37Z +http://deliprao.com/archives/262|creationDate|2017-12-13 +http://deliprao.com/archives/262|tag|http://www.semanlink.net/tag/delip_rao +http://deliprao.com/archives/262|tag|http://www.semanlink.net/tag/learned_index_structures +http://deliprao.com/archives/262|tag|http://www.semanlink.net/tag/nips_2017 +http://deliprao.com/archives/262|comment|"Comments on the [“The Case for Learned Index Structures”](https://arxiv.org/abs/1712.01208v1) paper +" +http://deliprao.com/archives/262|relatedDoc|https://arxiv.org/abs/1712.01208v1 +http://deliprao.com/archives/262|title|Everything is a Model Delip Rao +http://deliprao.com/archives/262|creationTime|2017-12-13T11:11:04Z +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|tag|http://www.semanlink.net/tag/martin_hepp +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|comment|"""You can, in theory, collate statements scattered across an HTML document by using the about property with the same identifier in the enclosing (outer) RDFa entity, but 1. that is kind of clumsy 2. search engines will often not understand that the information belongs to the same entity, since they typically parse RDFa markup as a tree and not as a graph (i.e. nesting matters). """ +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|title|Re: Offer data in separate places in HTML from Martin Hepp on 2013-05-14 (public-vocabs@w3.org from May 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0106.html|creationTime|2013-07-06T23:56:56Z +http://allforces.com/2005/08/25/wordpress-on-mac-install/|creationDate|2007-07-07 +http://allforces.com/2005/08/25/wordpress-on-mac-install/|tag|http://www.semanlink.net/tag/installing_wordpress +http://allforces.com/2005/08/25/wordpress-on-mac-install/|title|WordPress on Mac Part 3: Installing WordPress All Forces +http://allforces.com/2005/08/25/wordpress-on-mac-install/|creationTime|2007-07-07T15:33:30Z +https://arxiv.org/abs/1611.04228|creationDate|2017-04-28 +https://arxiv.org/abs/1611.04228|tag|http://www.semanlink.net/tag/brain_vs_deep_learning +https://arxiv.org/abs/1611.04228|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1611.04228|tag|http://www.semanlink.net/tag/hebbian_theory +https://arxiv.org/abs/1611.04228|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://arxiv.org/abs/1611.04228|arxiv_author|Aseem Wadhwa +https://arxiv.org/abs/1611.04228|arxiv_author|Upamanyu Madhow +https://arxiv.org/abs/1611.04228|comment|"The ""fire together, wire together"" Hebbian model is a central principle for learning in neuroscience, but surprisingly, it has found limited applicability in modern machine learning. In this paper, we take a first step towards bridging this gap, by developing flavors of competitive Hebbian learning which produce sparse, distributed neural codes using online adaptation with minimal tuning" +https://arxiv.org/abs/1611.04228|title|[1611.04228] Learning Sparse, Distributed Representations using the Hebbian Principle +https://arxiv.org/abs/1611.04228|creationTime|2017-04-28T22:52:38Z +https://arxiv.org/abs/1611.04228|arxiv_summary|"The ""fire together, wire together"" Hebbian model is a central principle for +learning in neuroscience, but surprisingly, it has found limited applicability +in modern machine learning. In this paper, we take a first step towards +bridging this gap, by developing flavors of competitive Hebbian learning which +produce sparse, distributed neural codes using online adaptation with minimal +tuning. We propose an unsupervised algorithm, termed Adaptive Hebbian Learning +(AHL). We illustrate the distributed nature of the learned representations via +output entropy computations for synthetic data, and demonstrate superior +performance, compared to standard alternatives such as autoencoders, in +training a deep convolutional net on standard image datasets." +https://arxiv.org/abs/1611.04228|arxiv_firstAuthor|Aseem Wadhwa +https://arxiv.org/abs/1611.04228|arxiv_updated|2016-11-14T02:28:13Z +https://arxiv.org/abs/1611.04228|arxiv_title|Learning Sparse, Distributed Representations using the Hebbian Principle +https://arxiv.org/abs/1611.04228|arxiv_published|2016-11-14T02:28:13Z +https://arxiv.org/abs/1611.04228|arxiv_num|1611.04228 +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|creationDate|2010-12-15 +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|tag|http://www.semanlink.net/tag/axel_polleres +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|tag|http://www.semanlink.net/tag/bijan_parsia +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|tag|http://www.semanlink.net/tag/sparql +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|tag|http://www.semanlink.net/tag/andy_seaborne +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|title|ESWC2007_SPARQL_Tutorial.pdf +http://axel.deri.ie/publications/ESWC2007_SPARQL_Tutorial.pdf|creationTime|2010-12-15T13:32:08Z +http://www.youtube.com/watch?v=w7BKNySQ97w|creationDate|2011-06-09 +http://www.youtube.com/watch?v=w7BKNySQ97w|tag|http://www.semanlink.net/tag/police +http://www.youtube.com/watch?v=w7BKNySQ97w|tag|http://www.semanlink.net/tag/liberte_d_expression +http://www.youtube.com/watch?v=w7BKNySQ97w|tag|http://www.semanlink.net/tag/georges_brassens +http://www.youtube.com/watch?v=w7BKNySQ97w|tag|http://www.semanlink.net/tag/sarkozyland +http://www.youtube.com/watch?v=w7BKNySQ97w|tag|http://www.semanlink.net/tag/rigolo +http://www.youtube.com/watch?v=w7BKNySQ97w|comment|"Frénétique l'une d'elles attache le vieux maréchal des logis et lui fait crier
+MORT AUX VACHES, MORT AUX LOIS, VIVE L'ANARCHIE" +http://www.youtube.com/watch?v=w7BKNySQ97w|title|Hécatombe - Georges Brassens +http://www.youtube.com/watch?v=w7BKNySQ97w|creationTime|2011-06-09T23:19:07Z +http://crianca.free.fr/|creationDate|2006-03-22 +http://crianca.free.fr/|tag|http://www.semanlink.net/tag/punk +http://crianca.free.fr/|tag|http://www.semanlink.net/tag/robert +http://crianca.free.fr/|title|Criança Punk Rock - Paris +http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news|creationDate|2011-09-26 +http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news|tag|http://www.semanlink.net/tag/petrole +http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news|title|PETROLE Cruel sera le réveil +http://www.dailymotion.com/video/x6s3co_petrole-cruel-sera-le-reveil-1-5_news|creationTime|2011-09-26T13:31:39Z +http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322|creationDate|2013-08-25 +http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322|tag|http://www.semanlink.net/tag/new_africa +http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322|title|Courriels de trois Africains commentant l’impact du mobile sur leur continent Winch 5 +http://winch5.blog.lemonde.fr/2013/08/22/courriels-de-trois-africains-commentant-limpact-du-mobile-sur-leur-continent/#xtor=RSS-32280322|creationTime|2013-08-25T13:03:24Z +http://www.holygoat.co.uk/projects/tags/|creationDate|2005-04-26 +http://www.holygoat.co.uk/projects/tags/|tag|http://www.semanlink.net/tag/tag_ontology +http://www.holygoat.co.uk/projects/tags/|title|Tag ontology design +http://krook.org/jsdom/|creationDate|2005-05-27 +http://krook.org/jsdom/|tag|http://www.semanlink.net/tag/javascript_dom +http://krook.org/jsdom/|comment|Doc format javadoc +http://krook.org/jsdom/|title|JavaScript DOM +https://medium.com/product-design/d8d4f2300cf3|creationDate|2013-03-12 +https://medium.com/product-design/d8d4f2300cf3|tag|http://www.semanlink.net/tag/social_networks +https://medium.com/product-design/d8d4f2300cf3|title|Tenth Grade Tech Trends +https://medium.com/product-design/d8d4f2300cf3|creationTime|2013-03-12T10:14:49Z +http://careers.ulitzer.us/node/888956|creationDate|2009-08-24 +http://careers.ulitzer.us/node/888956|tag|http://www.semanlink.net/tag/electric_car +http://careers.ulitzer.us/node/888956|tag|http://www.semanlink.net/tag/nissan +http://careers.ulitzer.us/node/888956|title|The Renault-Nissan Alliance Forms Zero-Emission Vehicle Partnership in San Diego Careers and Employment Journal +http://careers.ulitzer.us/node/888956|creationTime|2009-08-24T17:49:42Z +http://bigbrowser.blog.lemonde.fr/2013/06/11/prism-comment-passer-entre-les-mailles-de-la-surveillance-dinternet/|creationDate|2013-06-13 +http://bigbrowser.blog.lemonde.fr/2013/06/11/prism-comment-passer-entre-les-mailles-de-la-surveillance-dinternet/|tag|http://www.semanlink.net/tag/prism_surveillance_program +http://bigbrowser.blog.lemonde.fr/2013/06/11/prism-comment-passer-entre-les-mailles-de-la-surveillance-dinternet/|title|PRISM – Comment passer entre les mailles de la surveillance d’Internet ? Big Browser +http://bigbrowser.blog.lemonde.fr/2013/06/11/prism-comment-passer-entre-les-mailles-de-la-surveillance-dinternet/|creationTime|2013-06-13T00:19:14Z +https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273|creationDate|2019-02-21 +https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273|tag|http://www.semanlink.net/tag/patent +https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273|comment|"easy-to-use measure of patent scope: the number of words in its first claim (the less words the better) (!) +" +https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273|title|How to Measure and Draw Causal Inferences with Patent Scope by Jeffrey M. Kuhn, Neil Thompson :: SSRN +https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2977273|creationTime|2019-02-21T00:29:59Z +http://paris.sae.edu/fr/home/|creationDate|2010-09-30 +http://paris.sae.edu/fr/home/|tag|http://www.semanlink.net/tag/robert +http://paris.sae.edu/fr/home/|title|Ecole audiovisuel Paris: formation audio - technicien son, formation mao, formation video, formation webdesign, formation 3D - SAE Paris +http://paris.sae.edu/fr/home/|creationTime|2010-09-30T22:30:25Z +http://answers.semanticweb.com/questions/1087/generating-documentation-from-rdfs-andor-owl-vocabularies|creationDate|2013-08-28 +http://answers.semanticweb.com/questions/1087/generating-documentation-from-rdfs-andor-owl-vocabularies|tag|http://www.semanlink.net/tag/rdf_owl_documentation_tool +http://answers.semanticweb.com/questions/1087/generating-documentation-from-rdfs-andor-owl-vocabularies|title|Generating documentation from RDFS and/or OWL vocabularies - ANSWERS +http://answers.semanticweb.com/questions/1087/generating-documentation-from-rdfs-andor-owl-vocabularies|creationTime|2013-08-28T13:38:13Z +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|creationDate|2018-07-26 +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|tag|http://www.semanlink.net/tag/afrique +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|tag|http://www.semanlink.net/tag/bonne_nouvelle +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|tag|http://www.semanlink.net/tag/ogm +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|tag|http://www.semanlink.net/tag/crispr_cas9 +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|title|European Ruling Could Slow Africa’s Push For Crispr Crops WIRED +https://www.wired.com/story/european-ruling-could-slow-africas-push-for-crispr-crops/|creationTime|2018-07-26T01:14:47Z +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|creationDate|2017-02-08 +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|tag|http://www.semanlink.net/tag/minimum_wage +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|comment|Perhaps the most transformative effect of basic income I’ve personally experienced is the power it gives in any negotiation. For many people, this will be experienced as the power to refuse to work for insufficiently low wages (potentially nullifying the need for minimum wage laws), or unacceptable terms of any kind +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|title|What if you got $1,000 a month, just for being alive? I decided to find out. - Vox +http://www.vox.com/first-person/2016/11/14/13513066/universal-basic-income-crowdfund|creationTime|2017-02-08T01:03:29Z +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|creationDate|2008-01-20 +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|tag|http://www.semanlink.net/tag/justice +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|tag|http://www.semanlink.net/tag/sarkozy +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|tag|http://www.semanlink.net/tag/j_hallucine +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|tag|http://www.semanlink.net/tag/sarkozyland +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|comment|En tous cas, force est de constater qu’un mineur de plus de 13 ans qui serait l’auteur d’acte graves peut, non seulement encourir une peine de 20 ans d’emprisonnement criminels, encore plus facilement bien sûr s’il a plus de 16 ans au moment des faits car il peut perdre le bénéfice de l’excuse de minorité, soit du fait de la décision de la cour d’assises, soit désormais automatiquement comme double récidiviste du fait de la loi et encourir ….. la perpétuité puisque jugé comme un majeur. +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|title|Droits des enfants » Vers la détention à vie des mineurs : bonjour l’espoir ! +http://jprosen.blog.lemonde.fr/2008/01/13/vers-la-detention-a-vie-des-mineurs-bonjour-l%E2%80%99espoir-235/|creationTime|2008-01-20T20:45:39Z +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|creationDate|2011-09-23 +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|tag|http://www.semanlink.net/tag/validation +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|tag|http://www.semanlink.net/tag/lod_cloud +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|tag|http://www.semanlink.net/tag/linked_data +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|comment|This website gives an overview of Linked Data sources cataloged on CKAN and their completeness level for inclusion in the LOD cloud. It furthermore offers a validator for your CKAN entry with step-by-step guidance. +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|title|CKAN LOD Datasets +http://www4.wiwiss.fu-berlin.de/lodcloud/ckan/validator/index.php|creationTime|2011-09-23T09:57:42Z +http://sommet-ia.evenement.bercy.gouv.fr|creationDate|2018-03-28 +http://sommet-ia.evenement.bercy.gouv.fr|tag|http://www.semanlink.net/tag/rapport_villani_sur_l_ia +http://sommet-ia.evenement.bercy.gouv.fr|title|Sommet intelligence artificielle à Paris +http://sommet-ia.evenement.bercy.gouv.fr|creationTime|2018-03-28T22:02:15Z +https://github.com/editorsnotes/edit-with-lov|creationDate|2017-03-18 +https://github.com/editorsnotes/edit-with-lov|tag|http://www.semanlink.net/tag/json_ld +https://github.com/editorsnotes/edit-with-lov|tag|http://www.semanlink.net/tag/lov_linked_open_vocabularies +https://github.com/editorsnotes/edit-with-lov|title|editorsnotes/edit-with-lov: demo of editing JSON-LD using LOV vocabularies +https://github.com/editorsnotes/edit-with-lov|creationTime|2017-03-18T10:30:15Z +http://messenger.jhuapl.edu/gallery/sciencePhotos/image.php?gallery_id=2&image_id=117|creationDate|2008-01-16 +http://messenger.jhuapl.edu/gallery/sciencePhotos/image.php?gallery_id=2&image_id=117|tag|http://www.semanlink.net/tag/messenger +http://messenger.jhuapl.edu/gallery/sciencePhotos/image.php?gallery_id=2&image_id=117|title|Messenger's First Look at Mercury’s Previously Unseen Side +http://messenger.jhuapl.edu/gallery/sciencePhotos/image.php?gallery_id=2&image_id=117|creationTime|2008-01-16T23:32:42Z +https://www.quora.com/How-do-you-calculate-the-memory-footprint-of-a-particular-deep-learning-model|creationDate|2019-05-16 +https://www.quora.com/How-do-you-calculate-the-memory-footprint-of-a-particular-deep-learning-model|tag|http://www.semanlink.net/tag/memory_requirements_in_nn +https://www.quora.com/How-do-you-calculate-the-memory-footprint-of-a-particular-deep-learning-model|title|How do you calculate the memory footprint of a particular deep learning model? - Quora +https://www.quora.com/How-do-you-calculate-the-memory-footprint-of-a-particular-deep-learning-model|creationTime|2019-05-16T23:14:54Z +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|creationDate|2010-08-12 +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|tag|http://www.semanlink.net/tag/calais +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|tag|http://www.semanlink.net/tag/linked_data +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|tag|http://www.semanlink.net/tag/zemanta +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|tag|http://www.semanlink.net/tag/entity_linking +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|title|Linked Data Entity Extraction with Zemanta and OpenCalais - benjamin nowack's blog +http://bnode.org/blog/2010/07/28/linked-data-entity-extraction-with-zemanta-and-opencalais|creationTime|2010-08-12T16:23:37Z +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|creationDate|2018-02-06 +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|tag|http://www.semanlink.net/tag/invasion_d_especes_etrangeres +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|tag|http://www.semanlink.net/tag/clonage +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|tag|http://www.semanlink.net/tag/ecrevisse +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|title|This Mutant Crayfish Clones Itself, and It’s Taking Over Europe - The New York Times +https://www.nytimes.com/2018/02/05/science/mutant-crayfish-clones-europe.html|creationTime|2018-02-06T11:47:20Z +http://www.consortiuminfo.org/bulletins/semanticweb.php|creationDate|2008-03-04 +http://www.consortiuminfo.org/bulletins/semanticweb.php|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.consortiuminfo.org/bulletins/semanticweb.php|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.consortiuminfo.org/bulletins/semanticweb.php|title|THE SEMANTIC WEB: AN INTERVIEW WITH TIM BERNERS-LEE - Consortiuminfo.org Consortium Standards Bulletin- June 2005 +http://www.consortiuminfo.org/bulletins/semanticweb.php|creationTime|2008-03-04T22:56:12Z +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|creationDate|2015-08-09 +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|tag|http://www.semanlink.net/tag/megalith +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|tag|http://www.semanlink.net/tag/asie_mineure +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|tag|http://www.semanlink.net/tag/neolithique +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|tag|http://www.semanlink.net/tag/origine_de_l_agriculture +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|tag|http://www.semanlink.net/tag/prehistoire +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|title|Göbekli Tepe - Wikipedia, the free encyclopedia +https://en.wikipedia.org/wiki/G%C3%B6bekli_Tepe|creationTime|2015-08-09T11:16:23Z +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|creationDate|2013-08-25 +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|tag|http://www.semanlink.net/tag/ntic_et_developpement +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|tag|http://www.semanlink.net/tag/innovation +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|title|Chapitre 1 – L’innovation vient aussi d’ailleurs Winch 5 +http://winch5.blog.lemonde.fr/chapitre-1-linnovation-vient-aussi-dailleurs/|creationTime|2013-08-25T12:50:18Z +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|creationDate|2015-02-27 +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|tag|http://www.semanlink.net/tag/unix_howto +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|tag|http://www.semanlink.net/tag/path +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|tag|http://www.semanlink.net/tag/os_x_unix +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|tag|http://www.semanlink.net/tag/mac_os_x_tip +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|title|Mac OS X: Set / Change $PATH Variable +http://www.cyberciti.biz/faq/appleosx-bash-unix-change-set-path-environment-variable/|creationTime|2015-02-27T14:14:47Z +http://eventmedia.eurecom.fr/|creationDate|2012-10-09 +http://eventmedia.eurecom.fr/|tag|http://www.semanlink.net/tag/lod_use_case +http://eventmedia.eurecom.fr/|title|EventMedia +http://eventmedia.eurecom.fr/|creationTime|2012-10-09T11:37:16Z +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|creationDate|2009-01-03 +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|tag|http://www.semanlink.net/tag/entailment +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|tag|http://www.semanlink.net/tag/forward_chaining +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|tag|http://www.semanlink.net/tag/rdf_schema_inferencing +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|comment|"Once the working set is no longer in memory, response times jump disproportionately. Also, if the data changes or is retracted or is unreliable, one can end up doing a lot of extra work with materialization. Consider the effect of one malicious sameAs statement. This can lead to a lot of effects that are hard to retract. On the other hand, if running in memory with static data..., the queries run some 20% faster if entailment subclasses and sub-properties are materialized rather than done at run time. + +" +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|title|ISWC 2008: Some Questions - Inference: Is it always forward chaining? +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1479|creationTime|2009-01-03T01:15:13Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf|tag|http://www.semanlink.net/tag/read_write_linked_data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf|title|Using read/write Linked Data for Application Integration – Towards a Linked Data Basic Profile +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-04.pdf|creationTime|2012-04-16T10:26:54Z +https://arxiv.org/abs/1602.06797|creationDate|2017-11-07 +https://arxiv.org/abs/1602.06797|tag|http://www.semanlink.net/tag/semi_supervised_learning +https://arxiv.org/abs/1602.06797|tag|http://www.semanlink.net/tag/short_text_clustering +https://arxiv.org/abs/1602.06797|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1602.06797|arxiv_author|Haitao Mi +https://arxiv.org/abs/1602.06797|arxiv_author|Zhiguo Wang +https://arxiv.org/abs/1602.06797|arxiv_author|Abraham Ittycheriah +https://arxiv.org/abs/1602.06797|comment|">semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: +> +>1. assign each short text to its nearest centroid based on its representation from the current neural networks; +>2. re-estimate the cluster centroids based on cluster assignments from step (1); +>3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. +" +https://arxiv.org/abs/1602.06797|title|[1602.06797] Semi-supervised Clustering for Short Text via Deep Representation Learning +https://arxiv.org/abs/1602.06797|creationTime|2017-11-07T23:07:48Z +https://arxiv.org/abs/1602.06797|arxiv_summary|"In this work, we propose a semi-supervised method for short text clustering, +where we represent texts as distributed vectors with neural networks, and use a +small amount of labeled data to specify our intention for clustering. We design +a novel objective to combine the representation learning process and the +k-means clustering process together, and optimize the objective with both +labeled data and unlabeled data iteratively until convergence through three +steps: (1) assign each short text to its nearest centroid based on its +representation from the current neural networks; (2) re-estimate the cluster +centroids based on cluster assignments from step (1); (3) update neural +networks according to the objective by keeping centroids and cluster +assignments fixed. Experimental results on four datasets show that our method +works significantly better than several other text clustering methods." +https://arxiv.org/abs/1602.06797|arxiv_firstAuthor|Zhiguo Wang +https://arxiv.org/abs/1602.06797|arxiv_updated|2017-07-14T19:52:33Z +https://arxiv.org/abs/1602.06797|arxiv_title|Semi-supervised Clustering for Short Text via Deep Representation Learning +https://arxiv.org/abs/1602.06797|arxiv_published|2016-02-22T14:55:26Z +https://arxiv.org/abs/1602.06797|arxiv_num|1602.06797 +http://www.scottbot.net/HIAL/?p=221|creationDate|2014-04-23 +http://www.scottbot.net/HIAL/?p=221|tag|http://www.semanlink.net/tag/topic_modeling +http://www.scottbot.net/HIAL/?p=221|comment|Great post +http://www.scottbot.net/HIAL/?p=221|title|Topic Modeling and Network Analysis the scottbot irregular +http://www.scottbot.net/HIAL/?p=221|creationTime|2014-04-23T22:51:15Z +https://plus.google.com/communities/104510681993581444051|creationDate|2014-07-24 +https://plus.google.com/communities/104510681993581444051|tag|http://www.semanlink.net/tag/googleplus +https://plus.google.com/communities/104510681993581444051|tag|http://www.semanlink.net/tag/automotive_ontology_working_group +https://plus.google.com/communities/104510681993581444051|title|The Automotive Ontology Working Group - Community - Google+ +https://plus.google.com/communities/104510681993581444051|creationTime|2014-07-24T15:09:04Z +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|creationDate|2008-04-25 +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|tag|http://www.semanlink.net/tag/umbel +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|tag|http://www.semanlink.net/tag/zitgist +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|title|Exploding the Domain: UMBEL Web Services by Zitgist at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2008/04/20/exploding-the-domain-umbel-web-services-by-zitgist/|creationTime|2008-04-25T08:56:46Z +http://infolab.stanford.edu/~bawa/Pub/similarity.pdf|creationDate|2018-03-22 +http://infolab.stanford.edu/~bawa/Pub/similarity.pdf|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://infolab.stanford.edu/~bawa/Pub/similarity.pdf|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://infolab.stanford.edu/~bawa/Pub/similarity.pdf|title|LSH Forest: Self-Tuning Indexes for Similarity Search (2005) +http://infolab.stanford.edu/~bawa/Pub/similarity.pdf|creationTime|2018-03-22T00:13:07Z +http://ishtarnews.blogspot.com/2006/12/national-day-in-zinder-parade.html|creationDate|2009-02-22 +http://ishtarnews.blogspot.com/2006/12/national-day-in-zinder-parade.html|tag|http://www.semanlink.net/tag/zinder +http://ishtarnews.blogspot.com/2006/12/national-day-in-zinder-parade.html|title|Ishtar News: National Day in Zinder - the parade +http://ishtarnews.blogspot.com/2006/12/national-day-in-zinder-parade.html|creationTime|2009-02-22T16:27:36Z +http://www.technologyreview.com/featuredstory/520446/the-decline-of-wikipedia/|creationDate|2013-12-03 +http://www.technologyreview.com/featuredstory/520446/the-decline-of-wikipedia/|tag|http://www.semanlink.net/tag/wikipedia +http://www.technologyreview.com/featuredstory/520446/the-decline-of-wikipedia/|title|The Decline of Wikipedia: Even As More People Than Ever Rely on It, Fewer People Create It MIT Technology Review +http://www.technologyreview.com/featuredstory/520446/the-decline-of-wikipedia/|creationTime|2013-12-03T14:18:17Z +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|creationDate|2016-03-28 +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|tag|http://www.semanlink.net/tag/javascript +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|tag|http://www.semanlink.net/tag/angularjs +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|tag|http://www.semanlink.net/tag/react_js +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|comment|Angular 2 continues to put “JS” into HTML. React puts “HTML” into JS. +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|title|Angular 2 versus React: There Will Be Blood — Free Code Camp — Medium +https://medium.freecodecamp.com/angular-2-versus-react-there-will-be-blood-66595faafd51#.9vkdng65m|creationTime|2016-03-28T18:13:40Z +http://www2013.org/proceedings/p749.pdf|creationDate|2013-05-21 +http://www2013.org/proceedings/p749.pdf|tag|http://www.semanlink.net/tag/personal_archives +http://www2013.org/proceedings/p749.pdf|tag|http://www.semanlink.net/tag/www_2013 +http://www2013.org/proceedings/p749.pdf|tag|http://www.semanlink.net/tag/semanlink2_related +http://www2013.org/proceedings/p749.pdf|tag|http://www.semanlink.net/tag/microsoft_research +http://www2013.org/proceedings/p749.pdf|title|Rethinking the Web as a Personal Archive +http://www2013.org/proceedings/p749.pdf|creationTime|2013-05-21T07:58:56Z +https://en.wikipedia.org/wiki/The_Lunchbox|creationDate|2016-05-06 +https://en.wikipedia.org/wiki/The_Lunchbox|tag|http://www.semanlink.net/tag/film_indien +https://en.wikipedia.org/wiki/The_Lunchbox|title|The Lunchbox - Wikipedia, the free encyclopedia +https://en.wikipedia.org/wiki/The_Lunchbox|creationTime|2016-05-06T01:47:40Z +https://github.com/facebookresearch/MUSE|creationDate|2017-12-22 +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/fasttext +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/word_embedding +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/cross_lingual_nlp +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/guillaume_lample +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/nlp_facebook +https://github.com/facebookresearch/MUSE|tag|http://www.semanlink.net/tag/multilingual_embeddings +https://github.com/facebookresearch/MUSE|title|facebookresearch/MUSE: A library for Multilingual Unsupervised or Supervised word Embeddings +https://github.com/facebookresearch/MUSE|creationTime|2017-12-22T10:09:41Z +http://revsys.com/newscloud/|creationDate|2006-03-23 +http://revsys.com/newscloud/|tag|http://www.semanlink.net/tag/tag_cloud +http://revsys.com/newscloud/|title|NewsCloud (Revolution Systems) +https://www.wired.com/tag/autonomous-vehicles/|creationDate|2016-09-16 +https://www.wired.com/tag/autonomous-vehicles/|tag|http://www.semanlink.net/tag/driverless_car +https://www.wired.com/tag/autonomous-vehicles/|title|Autonomous Vehicles WIRED +https://www.wired.com/tag/autonomous-vehicles/|creationTime|2016-09-16T16:15:34Z +http://graus.nu/research/context-based-entity-linking/|creationDate|2013-07-18 +http://graus.nu/research/context-based-entity-linking/|tag|http://www.semanlink.net/tag/named_entity_recognition +http://graus.nu/research/context-based-entity-linking/|title|Context-based Entity Linking Blog graus.nu +http://graus.nu/research/context-based-entity-linking/|creationTime|2013-07-18T23:34:49Z +http://www.cnrs.fr/inc/communication/direct_labos/cario.htm?utm_content=buffer8109c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-05-05 +http://www.cnrs.fr/inc/communication/direct_labos/cario.htm?utm_content=buffer8109c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/artificial_neurons +http://www.cnrs.fr/inc/communication/direct_labos/cario.htm?utm_content=buffer8109c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Premier neurone artificiel monocomposant - CNRS +http://www.cnrs.fr/inc/communication/direct_labos/cario.htm?utm_content=buffer8109c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-05-05T01:08:28Z +https://dl.acm.org/citation.cfm?id=1007733|creationDate|2018-11-09 +https://dl.acm.org/citation.cfm?id=1007733|tag|http://www.semanlink.net/tag/imbalanced_data +https://dl.acm.org/citation.cfm?id=1007733|title|Editorial: special issue on learning from imbalanced data sets - Chawla (2004) +https://dl.acm.org/citation.cfm?id=1007733|creationTime|2018-11-09T00:56:18Z +http://energy.gov/eere/articles/first-commercially-available-fuel-cell-electric-vehicles-hit-street|creationDate|2014-12-12 +http://energy.gov/eere/articles/first-commercially-available-fuel-cell-electric-vehicles-hit-street|tag|http://www.semanlink.net/tag/hydrogen_cars +http://energy.gov/eere/articles/first-commercially-available-fuel-cell-electric-vehicles-hit-street|title|First Commercially Available Fuel Cell Electric Vehicles Hit the Street Department of Energy +http://energy.gov/eere/articles/first-commercially-available-fuel-cell-electric-vehicles-hit-street|creationTime|2014-12-12T20:53:56Z +http://en.wikipedia.org/wiki/The_Master_and_Margarita|creationDate|2010-10-30 +http://en.wikipedia.org/wiki/The_Master_and_Margarita|tag|http://www.semanlink.net/tag/litterature_russe +http://en.wikipedia.org/wiki/The_Master_and_Margarita|tag|http://www.semanlink.net/tag/boulgakov +http://en.wikipedia.org/wiki/The_Master_and_Margarita|tag|http://www.semanlink.net/tag/vito +http://en.wikipedia.org/wiki/The_Master_and_Margarita|tag|http://www.semanlink.net/tag/roman +http://en.wikipedia.org/wiki/The_Master_and_Margarita|title|The Master and Margarita +http://en.wikipedia.org/wiki/The_Master_and_Margarita|creationTime|2010-10-30T21:55:42Z +http://www.youtube.com/watch?v=6gmP4nk0EOE|creationDate|2008-12-16 +http://www.youtube.com/watch?v=6gmP4nk0EOE|tag|http://www.semanlink.net/tag/web_2_0 +http://www.youtube.com/watch?v=6gmP4nk0EOE|tag|http://www.semanlink.net/tag/web +http://www.youtube.com/watch?v=6gmP4nk0EOE|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=6gmP4nk0EOE|comment|"The web in five minutes +" +http://www.youtube.com/watch?v=6gmP4nk0EOE|title|Web 2.0 ... The Machine is Us/ing Us +http://www.youtube.com/watch?v=6gmP4nk0EOE|creationTime|2008-12-16T10:19:28Z +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|creationDate|2014-07-02 +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|tag|http://www.semanlink.net/tag/android +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|tag|http://www.semanlink.net/tag/google +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|title|Google I/O 2014 Recap: Android, Knowledge Graph and more Alexandre Passant +http://apassant.net/2014/07/01/google-io-2014-recap-android-knowledge-graph-and-more/|creationTime|2014-07-02T09:20:12Z +http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/|creationDate|2017-09-12 +http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/|tag|http://www.semanlink.net/tag/minhash +http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/|tag|http://www.semanlink.net/tag/tutorial +http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/|title|MinHash Tutorial with Python Code · Chris McCormick +http://mccormickml.com/2015/06/12/minhash-tutorial-with-python-code/|creationTime|2017-09-12T15:17:28Z +http://purl.org/configurationontology/quickstart|creationDate|2013-04-17 +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/configurator +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/tutorial +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/configuration_ontology +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/rplug +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/javascript_rdf +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/c2gweb_on_the_web +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/c2gweb +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/c2gweb_rdf +http://purl.org/configurationontology/quickstart|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://purl.org/configurationontology/quickstart|comment|Renault publishes the description of its commercial range as Linked Data. This page explains how to connect to this data, and how to use it. As an example, a configurator over the Renault range is implemented in javascript, using these data. +http://purl.org/configurationontology/quickstart|title|C2GWeb-js: Renault Configuration data as RDF, tutorial +http://purl.org/configurationontology/quickstart|creationTime|2013-04-17T17:56:37Z +http://purl.org/configurationontology/quickstart|publish|true +https://arxiv.org/abs/1801.06146|creationDate|2018-01-19 +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/transfer_learning +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/ulmfit +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/catastrophic_forgetting +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/jeremy_howard +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/nlp_text_classification +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/good +https://arxiv.org/abs/1801.06146|tag|http://www.semanlink.net/tag/sebastian_ruder +https://arxiv.org/abs/1801.06146|arxiv_author|Sebastian Ruder +https://arxiv.org/abs/1801.06146|arxiv_author|Jeremy Howard +https://arxiv.org/abs/1801.06146|comment|"code is available in the fastai lib + +[blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) + +[see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) + + + + + +" +https://arxiv.org/abs/1801.06146|relatedDoc|https://yashuseth.blog/2018/06/17/understanding-universal-language-model-fine-tuning-ulmfit/ +https://arxiv.org/abs/1801.06146|title|[1801.06146] Universal Language Model Fine-tuning for Text Classification +https://arxiv.org/abs/1801.06146|creationTime|2018-01-19T11:31:32Z +https://arxiv.org/abs/1801.06146|arxiv_summary|"Inductive transfer learning has greatly impacted computer vision, but +existing approaches in NLP still require task-specific modifications and +training from scratch. We propose Universal Language Model Fine-tuning +(ULMFiT), an effective transfer learning method that can be applied to any task +in NLP, and introduce techniques that are key for fine-tuning a language model. +Our method significantly outperforms the state-of-the-art on six text +classification tasks, reducing the error by 18-24% on the majority of datasets. +Furthermore, with only 100 labeled examples, it matches the performance of +training from scratch on 100x more data. We open-source our pretrained models +and code." +https://arxiv.org/abs/1801.06146|arxiv_firstAuthor|Jeremy Howard +https://arxiv.org/abs/1801.06146|arxiv_updated|2018-05-23T09:23:47Z +https://arxiv.org/abs/1801.06146|arxiv_title|Universal Language Model Fine-tuning for Text Classification +https://arxiv.org/abs/1801.06146|arxiv_published|2018-01-18T17:54:52Z +https://arxiv.org/abs/1801.06146|arxiv_num|1801.06146 +http://www.oracle.com/technology/tech/semantic_technologies/index.html|creationDate|2009-01-15 +http://www.oracle.com/technology/tech/semantic_technologies/index.html|tag|http://www.semanlink.net/tag/semantic_web_databases +http://www.oracle.com/technology/tech/semantic_technologies/index.html|tag|http://www.semanlink.net/tag/oracle +http://www.oracle.com/technology/tech/semantic_technologies/index.html|title|Oracle - Semantic Technologies Center +http://www.oracle.com/technology/tech/semantic_technologies/index.html|creationTime|2009-01-15T18:15:40Z +http://www.bbc.com/news/technology-35502030|creationDate|2016-02-05 +http://www.bbc.com/news/technology-35502030|tag|http://www.semanlink.net/tag/iphone +http://www.bbc.com/news/technology-35502030|tag|http://www.semanlink.net/tag/apple_sucks +http://www.bbc.com/news/technology-35502030|title|iPhones 'disabled' if Apple detects third-party repairs - BBC News +http://www.bbc.com/news/technology-35502030|creationTime|2016-02-05T22:20:02Z +https://en.wikipedia.org/wiki/Thought_vector|creationDate|2018-10-20 +https://en.wikipedia.org/wiki/Thought_vector|tag|http://www.semanlink.net/tag/thought_vector +https://en.wikipedia.org/wiki/Thought_vector|title|Thought vector - Wikipedia +https://en.wikipedia.org/wiki/Thought_vector|creationTime|2018-10-20T17:56:34Z +http://www.productontology.org/|creationDate|2011-04-05 +http://www.productontology.org/|tag|http://www.semanlink.net/tag/product_types_ontology +http://www.productontology.org/|tag|http://www.semanlink.net/tag/goodrelations +http://www.productontology.org/|title|The Product Types Ontology: Use Wikipedia pages for describing products or services with GoodRelations +http://www.productontology.org/|creationTime|2011-04-05T13:41:07Z +http://maczealots.com/tutorials/wordpress/|creationDate|2007-07-07 +http://maczealots.com/tutorials/wordpress/|tag|http://www.semanlink.net/tag/installing_wordpress +http://maczealots.com/tutorials/wordpress/|title|Installing WordPress on Tiger +http://maczealots.com/tutorials/wordpress/|creationTime|2007-07-07T15:21:54Z +http://nlp.seas.harvard.edu/NamedTensor|creationDate|2019-01-04 +http://nlp.seas.harvard.edu/NamedTensor|tag|http://www.semanlink.net/tag/tensor +http://nlp.seas.harvard.edu/NamedTensor|title|Tensor Considered Harmful +http://nlp.seas.harvard.edu/NamedTensor|creationTime|2019-01-04T22:05:52Z +http://hal.upmc.fr/hal-01517032|creationDate|2018-02-12 +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/lip6 +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/lynda_tamine +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/irit +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/laure_soulier +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/embeddings_in_ir +http://hal.upmc.fr/hal-01517032|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://hal.upmc.fr/hal-01517032|comment|"Modèle neuronal pour la recherche d'information qui combine sémantique symbolique (représentation explicite d'entités et de leur relations, issue de ressources externes) et sémantique distributionnelle (représentation sémantique latente calculée à partir des textes) +" +http://hal.upmc.fr/hal-01517032|title|Modèle Neuronal de Recherche d'Information Augmenté par une Ressource Sémantique (2017) +http://hal.upmc.fr/hal-01517032|creationTime|2018-02-12T16:59:36Z +http://simile.mit.edu/mail/SummarizeList?listId=14|creationDate|2007-06-23 +http://simile.mit.edu/mail/SummarizeList?listId=14|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/SummarizeList?listId=14|tag|http://www.semanlink.net/tag/mailing_list +http://simile.mit.edu/mail/SummarizeList?listId=14|tag|http://www.semanlink.net/tag/simile +http://simile.mit.edu/mail/SummarizeList?listId=14|title|Linking Open Data mailing list at simile +http://simile.mit.edu/mail/SummarizeList?listId=14|creationTime|2007-06-23T15:13:28Z +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|creationDate|2018-05-23 +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|tag|http://www.semanlink.net/tag/emnlp_2018 +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|tag|http://www.semanlink.net/tag/sentence_embeddings +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|tag|http://www.semanlink.net/tag/tensorflow +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|tag|http://www.semanlink.net/tag/nlp_google +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|tag|http://www.semanlink.net/tag/google_research +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|comment|"[Paper presented at EMNLP 2018](https://aclanthology.coli.uni-saarland.de/papers/D18-2029/d18-2029) +" +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|title|Module google/universal-sentence-encoder    TensorFlow +https://www.tensorflow.org/hub/modules/google/universal-sentence-encoder-large/1|creationTime|2018-05-23T16:35:31Z +https://www.ethereum-france.com/deploiement-du-projet-the-dao-mere-de-toutes-les-dao/|creationDate|2016-06-30 +https://www.ethereum-france.com/deploiement-du-projet-the-dao-mere-de-toutes-les-dao/|tag|http://www.semanlink.net/tag/the_dao +https://www.ethereum-france.com/deploiement-du-projet-the-dao-mere-de-toutes-les-dao/|title|Déploiement de The DAO, « mère de toute les DAO » Ethereum France +https://www.ethereum-france.com/deploiement-du-projet-the-dao-mere-de-toutes-les-dao/|creationTime|2016-06-30T14:30:12Z +http://www.readwriteweb.com/hack/2010/12/lisp-getting-started.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationDate|2010-12-21 +http://www.readwriteweb.com/hack/2010/12/lisp-getting-started.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|tag|http://www.semanlink.net/tag/lisp +http://www.readwriteweb.com/hack/2010/12/lisp-getting-started.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|title|How To Get Started With Lisp +http://www.readwriteweb.com/hack/2010/12/lisp-getting-started.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationTime|2010-12-21T15:31:41Z +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|creationDate|2008-01-03 +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|tag|http://www.semanlink.net/tag/leopard +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|tag|http://www.semanlink.net/tag/securite_informatique +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|comment|Take Control of Sharing Files in Leopard +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|title|Macworld Avoid file-sharing risks +http://www.macworld.com/article/61359/2007/12/tco_filesharing.html?t=107|creationTime|2008-01-03T18:01:09Z +http://www.butleranalytics.com/10-free-deep-learning-tools/|creationDate|2016-01-12 +http://www.butleranalytics.com/10-free-deep-learning-tools/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://www.butleranalytics.com/10-free-deep-learning-tools/|tag|http://www.semanlink.net/tag/deep_learning +http://www.butleranalytics.com/10-free-deep-learning-tools/|title|10 Free Deep Learning Tools - Butler Analytics +http://www.butleranalytics.com/10-free-deep-learning-tools/|creationTime|2016-01-12T18:35:11Z +https://github.com/Babylonpartners/fastText_multilingual|creationDate|2018-05-11 +https://github.com/Babylonpartners/fastText_multilingual|tag|http://www.semanlink.net/tag/fasttext +https://github.com/Babylonpartners/fastText_multilingual|tag|http://www.semanlink.net/tag/multilingual_embeddings +https://github.com/Babylonpartners/fastText_multilingual|comment|"Aligning the fastText vectors of 78 languages. +> In a recent paper at ICLR 2017, we showed how the SVD can be used to learn a linear transformation (a matrix), which aligns monolingual vectors from two languages in a single vector space. In this repository we provide 78 matrices, which can be used to align the majority of the fastText languages in a single space. + +[How to align two vector spaces for myself!](https://github.com/Babylonpartners/fastText_multilingual/blob/master/align_your_own.ipynb) + +" +https://github.com/Babylonpartners/fastText_multilingual|title|GitHub - Babylonpartners/fastText_multilingual: Multilingual word vectors +https://github.com/Babylonpartners/fastText_multilingual|creationTime|2018-05-11T22:39:27Z +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|creationDate|2012-09-23 +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|tag|http://www.semanlink.net/tag/publication_scientifique +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|tag|http://www.semanlink.net/tag/pesticide +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|tag|http://www.semanlink.net/tag/recherche +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|tag|http://www.semanlink.net/tag/lobby_agroalimentaire +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|tag|http://www.semanlink.net/tag/syngenta +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|title|Le chercheur, l'agrochimiste et les abeilles +http://www.lemonde.fr/planete/article/2012/09/23/le-chercheur-l-agrochimiste-et-les-abeilles_1764022_3244.html|creationTime|2012-09-23T22:16:54Z +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|creationDate|2008-06-20 +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|tag|http://www.semanlink.net/tag/alexandre_passant +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|tag|http://www.semanlink.net/tag/eswc_2008 +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|tag|http://www.semanlink.net/tag/semantic_wiki +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|tag|http://www.semanlink.net/tag/slides +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|title|Towards an Interlinked Semantic Wiki Farm +http://www.slideshare.net/terraces/towards-an-interlinked-semantic-wiki-farm/|creationTime|2008-06-20T19:06:26Z +http://www.siliconvalley.com/mld/siliconvalley/11685903.htm?template=contentModules/printstory.jsp|creationDate|2005-05-20 +http://www.siliconvalley.com/mld/siliconvalley/11685903.htm?template=contentModules/printstory.jsp|tag|http://www.semanlink.net/tag/grand_challenge +http://www.siliconvalley.com/mld/siliconvalley/11685903.htm?template=contentModules/printstory.jsp|comment|ROBOTIC CARS FACE DESERT COURSE IN 2ND DARPA CHALLENGE +http://www.siliconvalley.com/mld/siliconvalley/11685903.htm?template=contentModules/printstory.jsp|title|Behind the wheel: nobody +http://simile.mit.edu/timeline/|creationDate|2006-07-05 +http://simile.mit.edu/timeline/|tag|http://www.semanlink.net/tag/good +http://simile.mit.edu/timeline/|tag|http://www.semanlink.net/tag/simile_timeline +http://simile.mit.edu/timeline/|comment|Timeline is a DHTML-based AJAXy widget for visualizing time-based events. +http://simile.mit.edu/timeline/|title|SIMILE Timeline +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|creationDate|2017-07-20 +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/distributional_semantics +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/word_embedding_evaluation +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/glove +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/good +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/word2vec +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|tag|http://www.semanlink.net/tag/word_embedding +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|comment|"> While on the surface DSMs and word embedding models use varying algorithms to learn word representations – the former count, the latter predict – both types of model fundamentally act on the same underlying statistics of the data, i.e. the co-occurrence counts between words... + +> These results are in contrast to the general consensus that word embeddings are superior to traditional methods. Rather, they indicate that it typically makes no difference whatsoever whether word embeddings or distributional methods are used. What really matters is that your hyperparameters are tuned and that you utilize the appropriate pre-processing and post-processing steps." +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|title|An overview of word embeddings and their connection to distributional semantic models - AYLIEN (2016) +http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/|creationTime|2017-07-20T15:43:09Z +http://ben.adida.net/presentations/www2008-rdfa/|creationDate|2008-04-21 +http://ben.adida.net/presentations/www2008-rdfa/|tag|http://www.semanlink.net/tag/ben_adida +http://ben.adida.net/presentations/www2008-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://ben.adida.net/presentations/www2008-rdfa/|tag|http://www.semanlink.net/tag/slides +http://ben.adida.net/presentations/www2008-rdfa/|tag|http://www.semanlink.net/tag/elias_torres +http://ben.adida.net/presentations/www2008-rdfa/|tag|http://www.semanlink.net/tag/www08 +http://ben.adida.net/presentations/www2008-rdfa/|title|Ben Adida - RDFa, slides du workshop à WWW2008 +http://ben.adida.net/presentations/www2008-rdfa/|creationTime|2008-04-21T15:37:42Z +http://ricostacruz.com/backbone-patterns/#assumptions|creationDate|2012-08-31 +http://ricostacruz.com/backbone-patterns/#assumptions|tag|http://www.semanlink.net/tag/howto +http://ricostacruz.com/backbone-patterns/#assumptions|tag|http://www.semanlink.net/tag/backbone_js +http://ricostacruz.com/backbone-patterns/#assumptions|tag|http://www.semanlink.net/tag/design_pattern +http://ricostacruz.com/backbone-patterns/#assumptions|title|Backbone patterns +http://ricostacruz.com/backbone-patterns/#assumptions|creationTime|2012-08-31T13:23:09Z +http://getglue.com/fps|creationDate|2009-11-05 +http://getglue.com/fps|tag|http://www.semanlink.net/tag/glue +http://getglue.com/fps|tag|http://www.semanlink.net/tag/fps +http://getglue.com/fps|title|fps on Get Glue +http://getglue.com/fps|creationTime|2009-11-05T00:24:26Z +http://gerbil.aksw.org/gerbil/overview|creationDate|2014-12-16 +http://gerbil.aksw.org/gerbil/overview|tag|http://www.semanlink.net/tag/axel_ngonga +http://gerbil.aksw.org/gerbil/overview|title|GERBIL Experiment Overview +http://gerbil.aksw.org/gerbil/overview|creationTime|2014-12-16T09:25:40Z +https://github.com/solid/solid|creationDate|2018-03-27 +https://github.com/solid/solid|tag|http://www.semanlink.net/tag/github_project +https://github.com/solid/solid|tag|http://www.semanlink.net/tag/solid +https://github.com/solid/solid|title|GitHub - solid/solid: Solid - Re-decentralizing the web (project directory) +https://github.com/solid/solid|creationTime|2018-03-27T09:39:00Z +https://www.lemonde.fr/sciences/article/2019/04/10/quand-plusieurs-humanites-peuplaient-la-terre_5448527_1650684.html|creationDate|2019-04-11 +https://www.lemonde.fr/sciences/article/2019/04/10/quand-plusieurs-humanites-peuplaient-la-terre_5448527_1650684.html|tag|http://www.semanlink.net/tag/paleoanthropology +https://www.lemonde.fr/sciences/article/2019/04/10/quand-plusieurs-humanites-peuplaient-la-terre_5448527_1650684.html|title|Quand plusieurs humanités peuplaient la Terre +https://www.lemonde.fr/sciences/article/2019/04/10/quand-plusieurs-humanites-peuplaient-la-terre_5448527_1650684.html|creationTime|2019-04-11T00:39:31Z +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|creationDate|2018-07-23 +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|tag|http://www.semanlink.net/tag/spacy +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|tag|http://www.semanlink.net/tag/nlp +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|tag|http://www.semanlink.net/tag/nlp_introduction +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|title|Natural Language Processing is Fun! – Adam Geitgey – Medium +https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e|creationTime|2018-07-23T12:57:34Z +http://www.miv.t.u-tokyo.ac.jp/ishizuka/pr-class/Dumais-CIKM98.pdf|creationDate|2014-04-08 +http://www.miv.t.u-tokyo.ac.jp/ishizuka/pr-class/Dumais-CIKM98.pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.miv.t.u-tokyo.ac.jp/ishizuka/pr-class/Dumais-CIKM98.pdf|title|Inductive learning algorithms and representations for text categorization +http://www.miv.t.u-tokyo.ac.jp/ishizuka/pr-class/Dumais-CIKM98.pdf|creationTime|2014-04-08T19:08:52Z +http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html|creationDate|2013-04-27 +http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html|tag|http://www.semanlink.net/tag/sparql_1_1 +http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html|title|Appreciating SPARQL property paths more - bobdc.blog +http://www.snee.com/bobdc.blog/2013/04/appreciating-sparql-property-p.html|creationTime|2013-04-27T19:33:47Z +http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html|creationDate|2016-12-05 +http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html|tag|http://www.semanlink.net/tag/hymne_a_la_joie +http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html|tag|http://www.semanlink.net/tag/extreme_droite +http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html|title|L'opéra de Mayence écrase une manifestation de l'extrême droite avec l'hymne à la joie ! +http://musicologie.org/actu/2015/11/l_opera_de_mayence_ecrase_une_manifestation_d_extreme_droite.html|creationTime|2016-12-05T16:16:30Z +http://www.datalyse.fr/|creationDate|2013-07-10 +http://www.datalyse.fr/|tag|http://www.semanlink.net/tag/big_data +http://www.datalyse.fr/|title|Datalyse - projet d'innovation Big Data - Datalyse +http://www.datalyse.fr/|creationTime|2013-07-10T22:20:26Z +https://en.wikipedia.org/wiki/The_Man_Without_a_Past|creationDate|2017-03-22 +https://en.wikipedia.org/wiki/The_Man_Without_a_Past|tag|http://www.semanlink.net/tag/finlande +https://en.wikipedia.org/wiki/The_Man_Without_a_Past|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/The_Man_Without_a_Past|title|The Man Without a Past +https://en.wikipedia.org/wiki/The_Man_Without_a_Past|creationTime|2017-03-22T22:27:37Z +http://www.html5rocks.com/en/tutorials/cors/?redirect_from_locale=fr|creationDate|2015-10-13 +http://www.html5rocks.com/en/tutorials/cors/?redirect_from_locale=fr|tag|http://www.semanlink.net/tag/cross_origin_resource_sharing +http://www.html5rocks.com/en/tutorials/cors/?redirect_from_locale=fr|title|Using CORS - HTML5 Rocks +http://www.html5rocks.com/en/tutorials/cors/?redirect_from_locale=fr|creationTime|2015-10-13T11:53:30Z +https://www.quora.com/Scikit-Learn-Can-you-create-your-own-dataset-data-files-to-use-with-the-code-they-use-in-the-tutorials|creationDate|2015-10-21 +https://www.quora.com/Scikit-Learn-Can-you-create-your-own-dataset-data-files-to-use-with-the-code-they-use-in-the-tutorials|tag|http://www.semanlink.net/tag/olivier_grisel +https://www.quora.com/Scikit-Learn-Can-you-create-your-own-dataset-data-files-to-use-with-the-code-they-use-in-the-tutorials|title|Scikit Learn - Can you create your own dataset/data files to use with the code they use in the tutorials? - Quora +https://www.quora.com/Scikit-Learn-Can-you-create-your-own-dataset-data-files-to-use-with-the-code-they-use-in-the-tutorials|creationTime|2015-10-21T16:48:54Z +https://rajpurkar.github.io/SQuAD-explorer/|creationDate|2018-11-05 +https://rajpurkar.github.io/SQuAD-explorer/|tag|http://www.semanlink.net/tag/question_answering +https://rajpurkar.github.io/SQuAD-explorer/|tag|http://www.semanlink.net/tag/nlp_stanford +https://rajpurkar.github.io/SQuAD-explorer/|title|The Stanford Question Answering Dataset +https://rajpurkar.github.io/SQuAD-explorer/|creationTime|2018-11-05T15:29:18Z +http://dev.w3.org/html5/rdfa/rdfa-module.html|creationDate|2010-09-03 +http://dev.w3.org/html5/rdfa/rdfa-module.html|tag|http://www.semanlink.net/tag/rdfa +http://dev.w3.org/html5/rdfa/rdfa-module.html|tag|http://www.semanlink.net/tag/html5 +http://dev.w3.org/html5/rdfa/rdfa-module.html|title|HTML5+RDFa A mechanism for embedding RDF in HTML +http://dev.w3.org/html5/rdfa/rdfa-module.html|creationTime|2010-09-03T22:21:12Z +http://docs.codehaus.org/display/MAVENUSER/MavenPropertiesGuide|creationDate|2013-09-23 +http://docs.codehaus.org/display/MAVENUSER/MavenPropertiesGuide|title|MavenPropertiesGuide +http://docs.codehaus.org/display/MAVENUSER/MavenPropertiesGuide|creationTime|2013-09-23T00:43:19Z +http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/|creationDate|2013-10-04 +http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/|tag|http://www.semanlink.net/tag/temps +http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/|tag|http://www.semanlink.net/tag/bernard_stiegler +http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/|title|Réinventer un rapport au temps, par Bernard Stiegler Lois des réseaux +http://reseaux.blog.lemonde.fr/2013/10/03/reinventer-rapport-temps-bernard-stiegler/|creationTime|2013-10-04T13:40:14Z +http://www.mkbergman.com/|creationDate|2008-05-16 +http://www.mkbergman.com/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/|tag|http://www.semanlink.net/tag/semantic_web +http://www.mkbergman.com/|title|AI3:::Adaptive Information » Mike Bergman on the semantic Web and structured Web +http://www.mkbergman.com/|creationTime|2008-05-16T00:19:31Z +http://www.scottbot.net/HIAL/?p=19113|creationDate|2012-09-20 +http://www.scottbot.net/HIAL/?p=19113|tag|http://www.semanlink.net/tag/topic_modeling +http://www.scottbot.net/HIAL/?p=19113|tag|http://www.semanlink.net/tag/nlp_and_humanities +http://www.scottbot.net/HIAL/?p=19113|title|Topic Modeling for Humanists: A Guided Tour » the scottbot irregular +http://www.scottbot.net/HIAL/?p=19113|creationTime|2012-09-20T10:53:00Z +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|creationDate|2010-08-27 +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|tag|http://www.semanlink.net/tag/rdf_data_visualization +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|tag|http://www.semanlink.net/tag/google_visualization_api +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|tag|http://www.semanlink.net/tag/json +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|title|Hook up Google Visualization with RDF data - Data-gov Wiki +http://data-gov.tw.rpi.edu/wiki/How_to_use_Google_Visualization_API#Hook_up_Google_Visualization_with_RDF_data|creationTime|2010-08-27T17:43:59Z +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|creationDate|2018-08-28 +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|tag|http://www.semanlink.net/tag/jeux +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|comment|a learning experience — for us and the machines +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|title|OpenAI’s Dota 2 defeat is still a win for artificial intelligence  - The Verge +https://www.theverge.com/2018/8/28/17787610/openai-dota-2-bots-ai-lost-international-reinforcement-learning|creationTime|2018-08-28T19:21:40Z +http://www.amazon.com/gp/aws/sdk/103-1858432-5126204?v=2005%2d10%2d01&s=AWSMechanicalTurkRequester|creationDate|2005-11-04 +http://www.amazon.com/gp/aws/sdk/103-1858432-5126204?v=2005%2d10%2d01&s=AWSMechanicalTurkRequester|tag|http://www.semanlink.net/tag/amazon_mechanical_turk +http://www.amazon.com/gp/aws/sdk/103-1858432-5126204?v=2005%2d10%2d01&s=AWSMechanicalTurkRequester|title|Amazon Web Services: Amazon Mechanical Turk +http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top|creationDate|2010-09-21 +http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top|tag|http://www.semanlink.net/tag/business_intelligence +http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top|tag|http://www.semanlink.net/tag/gartner +http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top|title|Magic Quadrant for Business Intelligence Platforms - Gartner +http://www.gartner.com/technology/media-products/reprints/oracle/article121/article121.html#top|creationTime|2010-09-21T14:30:01Z +https://aminer.org/bignet_www2018|creationDate|2018-01-27 +https://aminer.org/bignet_www2018|tag|http://www.semanlink.net/tag/graph_embeddings +https://aminer.org/bignet_www2018|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://aminer.org/bignet_www2018|tag|http://www.semanlink.net/tag/workshop +https://aminer.org/bignet_www2018|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://aminer.org/bignet_www2018|title|WORKSHOP: BigNet @ WWW 2018 Workshop on Learning Representations for Big Networks +https://aminer.org/bignet_www2018|creationTime|2018-01-27T15:13:16Z +http://www.flickr.com/photos/richardwallis/sets/72157629229803283/with/6841000233/|creationDate|2012-02-08 +http://www.flickr.com/photos/richardwallis/sets/72157629229803283/with/6841000233/|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://www.flickr.com/photos/richardwallis/sets/72157629229803283/with/6841000233/|title|Semtech Berlin 2012 : un album sur Flickr +http://www.flickr.com/photos/richardwallis/sets/72157629229803283/with/6841000233/|creationTime|2012-02-08T18:04:53Z +https://groups.google.com/forum/#!forum/swagger-swaggersocket|creationDate|2017-04-21 +https://groups.google.com/forum/#!forum/swagger-swaggersocket|title|Swagger – Google Groups +https://groups.google.com/forum/#!forum/swagger-swaggersocket|creationTime|2017-04-21T19:37:28Z +https://arxiv.org/abs/1801.00631|creationDate|2018-01-03 +https://arxiv.org/abs/1801.00631|tag|http://www.semanlink.net/tag/deep_learning +https://arxiv.org/abs/1801.00631|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1801.00631|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +https://arxiv.org/abs/1801.00631|tag|http://www.semanlink.net/tag/ia_limites +https://arxiv.org/abs/1801.00631|arxiv_author|Gary Marcus +https://arxiv.org/abs/1801.00631|title|[1801.00631] Deep Learning: A Critical Appraisal +https://arxiv.org/abs/1801.00631|creationTime|2018-01-03T11:33:53Z +https://arxiv.org/abs/1801.00631|arxiv_summary|"Although deep learning has historical roots going back decades, neither the +term ""deep learning"" nor the approach was popular just over five years ago, +when the field was reignited by papers such as Krizhevsky, Sutskever and +Hinton's now classic (2012) deep network model of Imagenet. What has the field +discovered in the five subsequent years? Against a background of considerable +progress in areas such as speech recognition, image recognition, and game +playing, and considerable enthusiasm in the popular press, I present ten +concerns for deep learning, and suggest that deep learning must be supplemented +by other techniques if we are to reach artificial general intelligence." +https://arxiv.org/abs/1801.00631|arxiv_firstAuthor|Gary Marcus +https://arxiv.org/abs/1801.00631|arxiv_updated|2018-01-02T12:49:35Z +https://arxiv.org/abs/1801.00631|arxiv_title|Deep Learning: A Critical Appraisal +https://arxiv.org/abs/1801.00631|arxiv_published|2018-01-02T12:49:35Z +https://arxiv.org/abs/1801.00631|arxiv_num|1801.00631 +http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf|creationDate|2013-09-11 +http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf|tag|http://www.semanlink.net/tag/rapidminer +http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf|comment|The RapidMiner User Manual is the main documentation of RapidMiner containing an introduction into the basic concepts together with a complete description of the program itself +http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf|title|Rapidminer User manual +http://docs.rapid-i.com/files/rapidminer/rapidminer-5.0-manual-english_v1.0.pdf|creationTime|2013-09-11T00:22:53Z +http://www.w3.org/TR/rdfa-api/#enhanced-browser-interfaces|creationDate|2011-01-17 +http://www.w3.org/TR/rdfa-api/#enhanced-browser-interfaces|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/TR/rdfa-api/#enhanced-browser-interfaces|title|RDFa API +http://www.w3.org/TR/rdfa-api/#enhanced-browser-interfaces|creationTime|2011-01-17T22:49:32Z +http://www.ibtimes.co.uk/articles/456073/20130411/what-bitcoin-mining-silk-road-work-feature.htm|creationDate|2013-11-29 +http://www.ibtimes.co.uk/articles/456073/20130411/what-bitcoin-mining-silk-road-work-feature.htm|tag|http://www.semanlink.net/tag/bitcoin +http://www.ibtimes.co.uk/articles/456073/20130411/what-bitcoin-mining-silk-road-work-feature.htm|title|What is Bitcoin and How Does it Work? - IBTimes UK +http://www.ibtimes.co.uk/articles/456073/20130411/what-bitcoin-mining-silk-road-work-feature.htm|creationTime|2013-11-29T14:37:45Z +http://apassant.net/blog/2008/10/07/say-hello-to-lodrinfo/|creationDate|2008-10-07 +http://apassant.net/blog/2008/10/07/say-hello-to-lodrinfo/|tag|http://www.semanlink.net/tag/lodr +http://apassant.net/blog/2008/10/07/say-hello-to-lodrinfo/|title|Say hello to lodr.info : Alexandre Passant +http://apassant.net/blog/2008/10/07/say-hello-to-lodrinfo/|creationTime|2008-10-07T13:46:06Z +https://allennlp.org/elmo|creationDate|2018-02-16 +https://allennlp.org/elmo|tag|http://www.semanlink.net/tag/elmo +https://allennlp.org/elmo|comment|"> models both (1) complex characteristics of word use (e.g., syntax and semantics), and (2) how these uses vary across linguistic contexts (i.e., to model polysemy). + +> These word vectors are learned functions of the internal states of a deep bidirectional language model (biLM) + +These representations are : + +- Contextual: The representation for each word depends on the entire context in which it is used. +- Deep: combine all layers of a deep pre-trained neural network. +- Character based + +[github](https://github.com/allenai/bilm-tf)" +https://allennlp.org/elmo|title|ELMo: Deep contextualized word representations (2018) +https://allennlp.org/elmo|creationTime|2018-02-16T13:13:09Z +http://winch5.blog.lemonde.fr/introduction/|creationDate|2013-08-25 +http://winch5.blog.lemonde.fr/introduction/|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/introduction/|title|Introduction Winch 5 +http://winch5.blog.lemonde.fr/introduction/|creationTime|2013-08-25T12:49:30Z +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|creationDate|2017-02-19 +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|tag|http://www.semanlink.net/tag/finlande +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|comment|Both left and right are promoting the idea of a basic wage for everyone, currently on trial, as a solution to the new world of work +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|title|Is Finland’s basic universal income a solution to automation, fewer jobs and lower wages? Society The Guardian +https://www.theguardian.com/society/2017/feb/19/basic-income-finland-low-wages-fewer-jobs|creationTime|2017-02-19T18:47:36Z +http://www.entreprises.ouest-france.fr/article/agriculture-agroalimentaire-france-decroche-17-12-2015-246432#.VnUOtBtPZPU.twitter|creationDate|2015-12-20 +http://www.entreprises.ouest-france.fr/article/agriculture-agroalimentaire-france-decroche-17-12-2015-246432#.VnUOtBtPZPU.twitter|tag|http://www.semanlink.net/tag/agriculture_francaise +http://www.entreprises.ouest-france.fr/article/agriculture-agroalimentaire-france-decroche-17-12-2015-246432#.VnUOtBtPZPU.twitter|title|Agriculture et agroalimentaire. La France décroche Ouest France Entreprises +http://www.entreprises.ouest-france.fr/article/agriculture-agroalimentaire-france-decroche-17-12-2015-246432#.VnUOtBtPZPU.twitter|creationTime|2015-12-20T14:10:09Z +http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/|creationDate|2010-06-07 +http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/|tag|http://www.semanlink.net/tag/faim +http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/|tag|http://www.semanlink.net/tag/chiffres +http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/|title|1 milliard de personnes souffrent de la faim en 2009 : combien en faudra-t-il pour que la faim devienne enfin une priorité mondiale? - Action Contre La Faim +http://www.actioncontrelafaim.org/presse/communiques/communique/article/61/1-milliard-de-personnes-souffrent-de-la-faim-en-2009-combien-en-faudra-t-il-pour-que-la-faim-devie/|creationTime|2010-06-07T13:25:01Z +http://meta.wikimedia.org/wiki/Semantic_MediaWiki|creationDate|2005-09-05 +http://meta.wikimedia.org/wiki/Semantic_MediaWiki|tag|http://www.semanlink.net/tag/semantic_annotation +http://meta.wikimedia.org/wiki/Semantic_MediaWiki|tag|http://www.semanlink.net/tag/wiki +http://meta.wikimedia.org/wiki/Semantic_MediaWiki|comment|"The WikiProject ""Semantic MediaWiki"" provides a common platform for discussing extensions of the MediaWiki software that allow for simple, machine-based processing of Wiki-content. This usually requires some form of ""semantic annotation,"" but the special Wiki environment and the multitude of envisaged applications impose a number of additional requirements." +http://meta.wikimedia.org/wiki/Semantic_MediaWiki|title|Semantic MediaWiki - Meta +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|creationDate|2014-07-26 +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|tag|http://www.semanlink.net/tag/api +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|tag|http://www.semanlink.net/tag/facebook +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|tag|http://www.semanlink.net/tag/graph +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|title|Facebook Graph API Quickstart +https://developers.facebook.com/docs/graph-api/quickstart/v2.0|creationTime|2014-07-26T02:01:09Z +https://towardsdatascience.com/semantic-code-search-3cd6d244a39c|creationDate|2018-06-02 +https://towardsdatascience.com/semantic-code-search-3cd6d244a39c|tag|http://www.semanlink.net/tag/natural_language_semantic_search +https://towardsdatascience.com/semantic-code-search-3cd6d244a39c|tag|http://www.semanlink.net/tag/tutorial +https://towardsdatascience.com/semantic-code-search-3cd6d244a39c|title|How To Create Natural Language Semantic Search For Arbitrary Objects With Deep Learning +https://towardsdatascience.com/semantic-code-search-3cd6d244a39c|creationTime|2018-06-02T10:23:37Z +http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/|creationDate|2005-11-01 +http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/|tag|http://www.semanlink.net/tag/tim_bray +http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/|tag|http://www.semanlink.net/tag/piggy_bank +http://dannyayers.com/archives/2005/10/28/rdfnet-challenged/|title|Danny Ayers, Raw Blog : » RDF.net : Challenged! +https://arxiv.org/abs/1703.02507|creationDate|2019-03-25 +https://arxiv.org/abs/1703.02507|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1703.02507|tag|http://www.semanlink.net/tag/sent2vec +https://arxiv.org/abs/1703.02507|tag|http://www.semanlink.net/tag/sif_embeddings +https://arxiv.org/abs/1703.02507|arxiv_author|Matteo Pagliardini +https://arxiv.org/abs/1703.02507|arxiv_author|Prakhar Gupta +https://arxiv.org/abs/1703.02507|arxiv_author|Martin Jaggi +https://arxiv.org/abs/1703.02507|title|[1703.02507] Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features +https://arxiv.org/abs/1703.02507|creationTime|2019-03-25T15:36:27Z +https://arxiv.org/abs/1703.02507|arxiv_summary|"The recent tremendous success of unsupervised word embeddings in a multitude +of applications raises the obvious question if similar methods could be derived +to improve embeddings (i.e. semantic representations) of word sequences as +well. We present a simple but efficient unsupervised objective to train +distributed representations of sentences. Our method outperforms the +state-of-the-art unsupervised models on most benchmark tasks, highlighting the +robustness of the produced general-purpose sentence embeddings." +https://arxiv.org/abs/1703.02507|arxiv_firstAuthor|Matteo Pagliardini +https://arxiv.org/abs/1703.02507|arxiv_updated|2018-12-28T15:12:58Z +https://arxiv.org/abs/1703.02507|arxiv_title|Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features +https://arxiv.org/abs/1703.02507|arxiv_published|2017-03-07T18:19:11Z +https://arxiv.org/abs/1703.02507|arxiv_num|1703.02507 +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|creationDate|2017-07-10 +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|tag|http://www.semanlink.net/tag/distributional_semantics +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|tag|http://www.semanlink.net/tag/slides +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|tag|http://www.semanlink.net/tag/word_embedding +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|tag|http://www.semanlink.net/tag/dan_jurafsky +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|tag|http://www.semanlink.net/tag/nlp +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|comment|Yejin Choi - University of Washington [Slides adapted from Dan Jurafsky] +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|title|Distributed Semantics & Embeddings +http://courses.cs.washington.edu/courses/cse490u/16sp/slides/DistributedSemantics.pdf|creationTime|2017-07-10T13:22:28Z +https://blog.wikimedia.org/2017/10/30/wikidata-fifth-birthday/|creationDate|2017-11-01 +https://blog.wikimedia.org/2017/10/30/wikidata-fifth-birthday/|tag|http://www.semanlink.net/tag/wikidata +https://blog.wikimedia.org/2017/10/30/wikidata-fifth-birthday/|title|Wikidata, a rapidly growing global hub, turns five – Wikimedia Blog +https://blog.wikimedia.org/2017/10/30/wikidata-fifth-birthday/|creationTime|2017-11-01T13:58:48Z +https://docs.docker.com/engine/reference/glossary/|creationDate|2016-04-05 +https://docs.docker.com/engine/reference/glossary/|tag|http://www.semanlink.net/tag/docker +https://docs.docker.com/engine/reference/glossary/|title|Docker Glossary +https://docs.docker.com/engine/reference/glossary/|creationTime|2016-04-05T11:20:09Z +https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/|creationDate|2017-04-01 +https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/|tag|http://www.semanlink.net/tag/neo4j +https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/|tag|http://www.semanlink.net/tag/tagging +https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/|title|QuickGraph#5 Learning a taxonomy from your tagged data Jesús Barrasa +https://jesusbarrasa.wordpress.com/2017/03/31/quickgraph5-learning-a-taxonomy-from-your-tagged-data/|creationTime|2017-04-01T11:55:46Z +https://schema.org/docs/automotive.html|creationDate|2018-06-19 +https://schema.org/docs/automotive.html|tag|http://www.semanlink.net/tag/gao +https://schema.org/docs/automotive.html|tag|http://www.semanlink.net/tag/schema_org +https://schema.org/docs/automotive.html|title|Markup for Autos - schema.org +https://schema.org/docs/automotive.html|creationTime|2018-06-19T10:55:59Z +http://www.txtnet.com/mathlib/home.asp|creationDate|2007-01-03 +http://www.txtnet.com/mathlib/home.asp|tag|http://www.semanlink.net/tag/jean_paul +http://www.txtnet.com/mathlib/home.asp|tag|http://www.semanlink.net/tag/education +http://www.txtnet.com/mathlib/home.asp|tag|http://www.semanlink.net/tag/mathematiques +http://www.txtnet.com/mathlib/home.asp|tag|http://www.semanlink.net/tag/enfants +http://www.txtnet.com/mathlib/home.asp|title|La Librairie des Maths +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|creationDate|2018-04-13 +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|tag|http://www.semanlink.net/tag/keras +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|tag|http://www.semanlink.net/tag/tutorial +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|title|Part-of-Speech tagging tutorial with the Keras Deep Learning library - Cdiscount TechBlog +https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/|creationTime|2018-04-13T10:18:20Z +https://twitter.com/olafhartig/status/1096781008098205697|creationDate|2019-02-16 +https://twitter.com/olafhartig/status/1096781008098205697|tag|http://www.semanlink.net/tag/olaf_hartig +https://twitter.com/olafhartig/status/1096781008098205697|tag|http://www.semanlink.net/tag/graphql +https://twitter.com/olafhartig/status/1096781008098205697|title|"Olaf Hartig sur Twitter : ""Here are typical examples of how people do data integration in the GraphQL context. Everything is explicitly implemented in the program code. No flexibility. Reminds me of the API mash-up apps that were popular 15 years ago. https://t.co/3qJMKXoWDt https://t.co/GWqPdmeFIP""" +https://twitter.com/olafhartig/status/1096781008098205697|creationTime|2019-02-16T20:09:22Z +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|creationDate|2005-06-15 +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|tag|http://www.semanlink.net/tag/microsoft +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|tag|http://www.semanlink.net/tag/chine +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|comment|Chinese bloggers posting their thoughts via Microsoft's net service face restrictions on what they can write. +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|title|BBC NEWS Technology Microsoft censors Chinese blogs +http://news.bbc.co.uk/go/pr/fr/-/1/hi/technology/4088702.stm|source|BBC +http://lists.w3.org/Archives/Public/public-semweb-ui/2006May/0001.html|creationDate|2006-05-25 +http://lists.w3.org/Archives/Public/public-semweb-ui/2006May/0001.html|tag|http://www.semanlink.net/tag/jena_user_conference +http://lists.w3.org/Archives/Public/public-semweb-ui/2006May/0001.html|tag|http://www.semanlink.net/tag/linkto_semanlink +http://lists.w3.org/Archives/Public/public-semweb-ui/2006May/0001.html|title|Jena User Conference - some interesting UI related papers/presentations from Shabajee, Paul on 2006-05-17 (public-semweb-ui@w3.org from May 2006) +http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/|creationDate|2016-01-16 +http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/|tag|http://www.semanlink.net/tag/dark_web +http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/|tag|http://www.semanlink.net/tag/silk_road +http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/|title|The Silk Road's Dark-Web Dream Is Dead WIRED +http://www.wired.com/2016/01/the-silk-roads-dark-web-dream-is-dead/|creationTime|2016-01-16T12:33:19Z +https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2|creationDate|2019-05-13 +https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2|tag|http://www.semanlink.net/tag/dropout +https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2|title|Uncertainty estimation for Neural Network — Dropout as Bayesian Approximation +https://towardsdatascience.com/uncertainty-estimation-for-neural-network-dropout-as-bayesian-approximation-7d30fc7bc1f2|creationTime|2019-05-13T09:10:25Z +http://www.corante.com/copyfight/archives/039510print.html|creationDate|2005-09-05 +http://www.corante.com/copyfight/archives/039510print.html|tag|http://www.semanlink.net/tag/n_importe_quoi +http://www.corante.com/copyfight/archives/039510print.html|tag|http://www.semanlink.net/tag/imprimantes +http://www.corante.com/copyfight/archives/039510print.html|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.corante.com/copyfight/archives/039510print.html|tag|http://www.semanlink.net/tag/patent_infringement +http://www.corante.com/copyfight/archives/039510print.html|title|"The Latest IP Crime: ""Box-Wrap"" Patent Infringement" +http://patterns.dataincubator.org/book/|creationDate|2011-09-10 +http://patterns.dataincubator.org/book/|tag|http://www.semanlink.net/tag/howto +http://patterns.dataincubator.org/book/|tag|http://www.semanlink.net/tag/design_pattern +http://patterns.dataincubator.org/book/|tag|http://www.semanlink.net/tag/leigh_dodds +http://patterns.dataincubator.org/book/|tag|http://www.semanlink.net/tag/ian_davis +http://patterns.dataincubator.org/book/|tag|http://www.semanlink.net/tag/linked_data +http://patterns.dataincubator.org/book/|comment|A pattern catalogue for modelling, publishing, and consuming Linked Data +http://patterns.dataincubator.org/book/|title|Linked Data Patterns +http://patterns.dataincubator.org/book/|creationTime|2011-09-10T13:55:35Z +https://github.com/HydraCG|creationDate|2015-02-18 +https://github.com/HydraCG|tag|http://www.semanlink.net/tag/hydra +https://github.com/HydraCG|tag|http://www.semanlink.net/tag/github_project +https://github.com/HydraCG|comment|"Issues + +" +https://github.com/HydraCG|title|Hydra Community Group - GitHub +https://github.com/HydraCG|creationTime|2015-02-18T01:26:45Z +https://vuejs.org/|creationDate|2018-07-19 +https://vuejs.org/|tag|http://www.semanlink.net/tag/vue_js +https://vuejs.org/|title|Vue.js +https://vuejs.org/|creationTime|2018-07-19T23:16:39Z +http://gettingreal.37signals.com/|creationDate|2008-09-09 +http://gettingreal.37signals.com/|tag|http://www.semanlink.net/tag/livre +http://gettingreal.37signals.com/|tag|http://www.semanlink.net/tag/web_dev +http://gettingreal.37signals.com/|title|Getting Real: The Book by 37signals +http://gettingreal.37signals.com/|creationTime|2008-09-09T15:00:52Z +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|creationDate|2014-05-03 +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|tag|http://www.semanlink.net/tag/martin_hepp +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|tag|http://www.semanlink.net/tag/product_description +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|tag|http://www.semanlink.net/tag/hepp_s_propertyvalue +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|title|WebSchemas/PropertyValuePairs - W3C Wiki +https://www.w3.org/wiki/WebSchemas/PropertyValuePairs|creationTime|2014-05-03T01:49:19Z +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|creationDate|2007-07-04 +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|tag|http://www.semanlink.net/tag/rdfa +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|tag|http://www.semanlink.net/tag/rdf_templating +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|title|TechnicaLee Speaking: Using RDF on the Web: A Vision +http://www.thefigtrees.net/lee/blog/2007/01/using_rdf_on_the_web_a_vision.html|creationTime|2007-07-04T00:54:08Z +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|creationDate|2019-05-26 +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|tag|http://www.semanlink.net/tag/knowledge_extraction +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|title|Knowledge extraction from unstructured texts Tech Blog (2016) +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|bookmarkOf|https://blog.heuritech.com/2016/04/15/knowledge-extraction-from-unstructured-texts/ +http://www.semanlink.net/doc/2019/05/knowledge_extraction_from_unstr|creationTime|2019-05-26T23:52:21Z +http://www.joelonsoftware.com/items/2005/10/24.html|creationDate|2005-10-27 +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/fake_blogs +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/zombie_pcs +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/adsense +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/aggregators +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/google_ranking +http://www.joelonsoftware.com/items/2005/10/24.html|tag|http://www.semanlink.net/tag/publicite_internet +http://www.joelonsoftware.com/items/2005/10/24.html|title|Something Rotten in AdSense +http://www.touchgraph.com|creationDate|2005-09-24 +http://www.touchgraph.com|tag|http://www.semanlink.net/tag/touchgraph +http://www.touchgraph.com|title|TouchGraph +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|creationDate|2012-09-01 +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|tag|http://www.semanlink.net/tag/rdfa +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|comment|RDFa markup generated by Javascript is invisible to Google and Yahoo, just like other markup. +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|title|RDFa for videos rendered in javascript - Stack Overflow +http://stackoverflow.com/questions/1982204/rdfa-for-videos-rendered-in-javascript|creationTime|2012-09-01T15:53:41Z +http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser|creationDate|2013-01-28 +http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/rdfa_parser +http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser|tag|http://www.semanlink.net/tag/java +http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser|title|What is the best Java RDFa Parser? - semanticweb.com ANSWERS +http://answers.semanticweb.com/questions/1493/what-is-the-best-java-rdfa-parser|creationTime|2013-01-28T17:36:30Z +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|creationDate|2019-02-07 +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|tag|http://www.semanlink.net/tag/sentence_similarity +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|tag|http://www.semanlink.net/tag/sif_embeddings +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|tag|http://www.semanlink.net/tag/word_mover_s_distance +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|tag|http://www.semanlink.net/tag/yves_peirsman +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|comment|"[blog post](/doc/?uri=http%3A%2F%2Fnlp.town%2Fblog%2Fsentence-similarity%2F) +" +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|relatedDoc|http://nlp.town/blog/sentence-similarity/ +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|title|nlp-notebooks/Simple Sentence Similarity.ipynb at master · nlptown/nlp-notebooks +https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb|creationTime|2019-02-07T00:59:11Z +http://leobard.twoday.net/stories/1900548/|creationDate|2006-05-06 +http://leobard.twoday.net/stories/1900548/|tag|http://www.semanlink.net/tag/gnowsis +http://leobard.twoday.net/stories/1900548/|title|Semantic World and Cyberspace: gnowsis 0.9.0 release +http://www.esa.int/Our_Activities/Space_Science/Rosetta/Highlights/Top_10_at_10_km|creationDate|2014-11-25 +http://www.esa.int/Our_Activities/Space_Science/Rosetta/Highlights/Top_10_at_10_km|tag|http://www.semanlink.net/tag/rosetta +http://www.esa.int/Our_Activities/Space_Science/Rosetta/Highlights/Top_10_at_10_km|title|Top 10 at 10 km / Highlights / Rosetta / Space Science / Our Activities / ESA +http://www.esa.int/Our_Activities/Space_Science/Rosetta/Highlights/Top_10_at_10_km|creationTime|2014-11-25T15:32:21Z +https://aclweb.org/anthology/papers/P/P17/P17-2085/|creationDate|2019-04-24 +https://aclweb.org/anthology/papers/P/P17/P17-2085/|tag|http://www.semanlink.net/tag/list_only_entity_linking +https://aclweb.org/anthology/papers/P/P17/P17-2085/|tag|http://www.semanlink.net/tag/nlp_microsoft +https://aclweb.org/anthology/papers/P/P17/P17-2085/|comment|"Proposes to tackle the problem of List-only Entity Linking through seed mentions (mentions +that we are confident to link). Other mentions are disambiguated by comparing them with the seed mentions rather than directly with the entities. + +" +https://aclweb.org/anthology/papers/P/P17/P17-2085/|title|List-only Entity Linking - ACL Anthology (2017) +https://aclweb.org/anthology/papers/P/P17/P17-2085/|creationTime|2019-04-24T15:22:25Z +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|creationDate|2013-12-22 +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|tag|http://www.semanlink.net/tag/sdmx +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|tag|http://www.semanlink.net/tag/statistical_data +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|tag|http://www.semanlink.net/tag/dave_reynolds +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|title|The RDF Data Cube Vocabulary +http://www.w3.org/TR/2013/PR-vocab-data-cube-20131217/|creationTime|2013-12-22T12:26:49Z +http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm|creationDate|2011-08-22 +http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm|tag|http://www.semanlink.net/tag/photo_numerique +http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm|title|BBC News - Out-of-focus pictures eliminated by photography innovation +http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm|creationTime|2011-08-22T19:57:56Z +http://news.bbc.co.uk/2/hi/programmes/click_online/9568340.stm|source|BBC +http://vancouverdata.blogspot.fr/2012/08/googles-self-driving-cars-are-going-to.html|creationDate|2013-09-05 +http://vancouverdata.blogspot.fr/2012/08/googles-self-driving-cars-are-going-to.html|tag|http://www.semanlink.net/tag/google_car +http://vancouverdata.blogspot.fr/2012/08/googles-self-driving-cars-are-going-to.html|title|Google’s Self-Driving Cars Are Going to Change Everything (Vancouver Data Blog by Neil McGuigan) +http://vancouverdata.blogspot.fr/2012/08/googles-self-driving-cars-are-going-to.html|creationTime|2013-09-05T11:47:43Z +http://www.tagcommons.org/|creationDate|2006-12-01 +http://www.tagcommons.org/|tag|http://www.semanlink.net/tag/tagging +http://www.tagcommons.org/|comment|TagCommons is a place and a process for us to create ways to share and interoperate over tagging data. +http://www.tagcommons.org/|title|TagCommons +https://www.facebook.com/nipsfoundation/videos/795861577420073/|creationDate|2018-12-06 +https://www.facebook.com/nipsfoundation/videos/795861577420073/|tag|http://www.semanlink.net/tag/tutorial +https://www.facebook.com/nipsfoundation/videos/795861577420073/|tag|http://www.semanlink.net/tag/minimum_description_length_principle +https://www.facebook.com/nipsfoundation/videos/795861577420073/|tag|http://www.semanlink.net/tag/unsupervised_machine_learning +https://www.facebook.com/nipsfoundation/videos/795861577420073/|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +https://www.facebook.com/nipsfoundation/videos/795861577420073/|comment|This tutorial Unsupervised Deep Learning will cover in detail, the approach to simply 'predict everything' in the data, typically with a probabilistic model, which can be seen through the lens of the Minimum Description Length principle as an effort to compress the data as compactly as possible. +https://www.facebook.com/nipsfoundation/videos/795861577420073/|title|"Neural Information Processing Systems - Tutorial Sessions: Unsupervised Deep Learning ""predict everything""" +https://www.facebook.com/nipsfoundation/videos/795861577420073/|creationTime|2018-12-06T09:59:34Z +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|creationDate|2010-07-01 +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|tag|http://www.semanlink.net/tag/oracle +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|tag|http://www.semanlink.net/tag/soa +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|comment|There are three sort of megatrends going on in the data center right now. One is SOA in particular for applications, that enables you to essentially break apart monolithic applications and combine them as services. But the second big trend then is to decouple the data from the application or the application services, so that in that sense what you can do is write your application or create services independent of the data sources they have to deal with, which comes full circle back to having a virtual layer between application services and data. +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|title|Oracle Sees Semantic Tech Solving Business Problems +http://www.semanticweb.com/main/oracle_sees_semantic_tech_solving_business_problems_138811.asp|creationTime|2010-07-01T12:54:45Z +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|creationDate|2011-02-21 +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|tag|http://www.semanlink.net/tag/spreadsheets +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|tag|http://www.semanlink.net/tag/google_refine +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|tag|http://www.semanlink.net/tag/skos +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|title|The Semantic Puzzle Transforming spreadsheets into SKOS with Google Refine +http://blog.semantic-web.at/2011/02/17/transforming-spreadsheets-into-skos-with-google-refine/|creationTime|2011-02-21T23:46:16Z +https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins|creationDate|2017-11-07 +https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins|tag|http://www.semanlink.net/tag/semantic_hashing +https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins|tag|http://www.semanlink.net/tag/geoffrey_hinton +https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins|title|Semantic Hashing [9 mins] - Université de Toronto Coursera +https://fr.coursera.org/learn/neural-networks/lecture/s7bmT/semantic-hashing-9-mins|creationTime|2017-11-07T14:40:31Z +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|creationDate|2018-12-02 +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|tag|http://www.semanlink.net/tag/gene_therapy +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|tag|http://www.semanlink.net/tag/vive_le_capitalisme +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|tag|http://www.semanlink.net/tag/goldman_sachs +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|comment|"Goldman Sachs analysts attempted to address a touchy subject for biotech companies, especially those involved in the pioneering ""gene therapy"" treatment: cures could be bad for business in the long run +" +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|title|Goldman Sachs about gene therapy: 'Is curing patients a sustainable business model?' +https://www.cnbc.com/2018/04/11/goldman-asks-is-curing-patients-a-sustainable-business-model.html?__source=sharebar%7Ctwitter&par=sharebar|creationTime|2018-12-02T19:43:30Z +http://slayeroffice.com/articles/innerHTML_alternatives/#6a|creationDate|2006-10-06 +http://slayeroffice.com/articles/innerHTML_alternatives/#6a|tag|http://www.semanlink.net/tag/ajax +http://slayeroffice.com/articles/innerHTML_alternatives/#6a|tag|http://www.semanlink.net/tag/dev_tips +http://slayeroffice.com/articles/innerHTML_alternatives/#6a|tag|http://www.semanlink.net/tag/javascript_dom +http://slayeroffice.com/articles/innerHTML_alternatives/#6a|title|slayeroffice alternatives to innerHTML +http://www.ina.fr/video/I05266317/jessye-norman-video.html|creationDate|2018-07-14 +http://www.ina.fr/video/I05266317/jessye-norman-video.html|tag|http://www.semanlink.net/tag/j_y_etais +http://www.ina.fr/video/I05266317/jessye-norman-video.html|tag|http://www.semanlink.net/tag/revolution_francaise +http://www.ina.fr/video/I05266317/jessye-norman-video.html|tag|http://www.semanlink.net/tag/marseillaise +http://www.ina.fr/video/I05266317/jessye-norman-video.html|tag|http://www.semanlink.net/tag/14_juillet +http://www.ina.fr/video/I05266317/jessye-norman-video.html|title|La Marseillaise du bicentenaire de la Révolution +http://www.ina.fr/video/I05266317/jessye-norman-video.html|creationTime|2018-07-14T14:27:33Z +https://www.fast.ai/2019/05/13/blogging-advice/|creationDate|2019-05-14 +https://www.fast.ai/2019/05/13/blogging-advice/|tag|http://www.semanlink.net/tag/blog +https://www.fast.ai/2019/05/13/blogging-advice/|tag|http://www.semanlink.net/tag/rachel_thomas +https://www.fast.ai/2019/05/13/blogging-advice/|title|Advice for Better Blog Posts · fast.ai +https://www.fast.ai/2019/05/13/blogging-advice/|creationTime|2019-05-14T16:04:56Z +http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012|creationDate|2012-05-12 +http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012|title|SEMANTIC ENTERPRISE TECHNOLOGIES in ACTION - IKS Project +http://wiki.iks-project.eu/index.php/Workshops/Salzburg2012|creationTime|2012-05-12T09:44:35Z +http://apassant.net/blog/2008/04/22/attending-www2008/|creationDate|2008-04-24 +http://apassant.net/blog/2008/04/22/attending-www2008/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2008/04/22/attending-www2008/|tag|http://www.semanlink.net/tag/ldow2008 +http://apassant.net/blog/2008/04/22/attending-www2008/|tag|http://www.semanlink.net/tag/www08 +http://apassant.net/blog/2008/04/22/attending-www2008/|title|Attending WWW2008 : Alexandre Passant +http://apassant.net/blog/2008/04/22/attending-www2008/|creationTime|2008-04-24T10:29:51Z +https://jira.mongodb.org/browse/SERVER-1723|creationDate|2016-11-29 +https://jira.mongodb.org/browse/SERVER-1723|tag|http://www.semanlink.net/tag/mongodb +https://jira.mongodb.org/browse/SERVER-1723|tag|http://www.semanlink.net/tag/bitmap_index +https://jira.mongodb.org/browse/SERVER-1723|title|[SERVER-1723] Add Bitmap indexes - MongoDB +https://jira.mongodb.org/browse/SERVER-1723|creationTime|2016-11-29T09:31:47Z +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|creationDate|2012-11-13 +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|tag|http://www.semanlink.net/tag/polluted_places +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|tag|http://www.semanlink.net/tag/pesticide +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|tag|http://www.semanlink.net/tag/siberie +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|tag|http://www.semanlink.net/tag/urss +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|title|Les villages de Sibérie, cimetières des déchets toxiques de l'ex-URSS +http://www.lemonde.fr/planete/article/2012/11/12/les-villages-de-siberie-cimetieres-des-dechets-toxiques-de-l-ex-urss_1789089_3244.html|creationTime|2012-11-13T22:44:15Z +http://docs.openlinksw.com/virtuoso/|creationDate|2009-05-18 +http://docs.openlinksw.com/virtuoso/|tag|http://www.semanlink.net/tag/virtuoso_universal_server +http://docs.openlinksw.com/virtuoso/|tag|http://www.semanlink.net/tag/virtuoso_doc +http://docs.openlinksw.com/virtuoso/|title|OpenLink Virtuoso Universal Server: Documentation +http://docs.openlinksw.com/virtuoso/|creationTime|2009-05-18T09:19:30Z +https://www.youtube.com/watch?v=jfwqRMdTmLo|creationDate|2019-02-24 +https://www.youtube.com/watch?v=jfwqRMdTmLo|tag|http://www.semanlink.net/tag/deep_nlp +https://www.youtube.com/watch?v=jfwqRMdTmLo|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=jfwqRMdTmLo|comment|">3 problems, 3 architectures: +>- Speech recognition (feed-forward networks) +>- NL parsing (word embeddings and feed-forward networks) +>- QA (transformers)" +https://www.youtube.com/watch?v=jfwqRMdTmLo|title|Successes and Challenges in Neural Models for Speech and Language - Michael Collins - YouTube +https://www.youtube.com/watch?v=jfwqRMdTmLo|creationTime|2019-02-24T14:12:27Z +https://innovation.ie.fujitsu.com/kedi/|creationDate|2018-04-10 +https://innovation.ie.fujitsu.com/kedi/|tag|http://www.semanlink.net/tag/knowledge_engineering +https://innovation.ie.fujitsu.com/kedi/|tag|http://www.semanlink.net/tag/pierre_yves_vandenbussche +https://innovation.ie.fujitsu.com/kedi/|tag|http://www.semanlink.net/tag/fujitsu +https://innovation.ie.fujitsu.com/kedi/|title|Fujitsu Ireland Research and Innovation Knowledge Engineering and DIscovery (KEDI) +https://innovation.ie.fujitsu.com/kedi/|creationTime|2018-04-10T17:45:54Z +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|creationDate|2008-08-31 +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|tag|http://www.semanlink.net/tag/revolution_francaise +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|tag|http://www.semanlink.net/tag/esclavage +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|tag|http://www.semanlink.net/tag/senegal +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|tag|http://www.semanlink.net/tag/couple_mixte +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|title|Les Caprices d'un fleuve +http://fr.wikipedia.org/wiki/Les_caprices_d'un_fleuve|creationTime|2008-08-31T02:29:07Z +http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1|creationDate|2014-10-13 +http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1|title|What is the scope of variables in JavaScript? - Stack Overflow +http://stackoverflow.com/questions/500431/what-is-the-scope-of-variables-in-javascript?rq=1|creationTime|2014-10-13T21:46:31Z +https://github.com/maxlath/wikidata-sdk#you-may-also-like|creationDate|2017-10-28 +https://github.com/maxlath/wikidata-sdk#you-may-also-like|tag|http://www.semanlink.net/tag/javascript_tool +https://github.com/maxlath/wikidata-sdk#you-may-also-like|tag|http://www.semanlink.net/tag/wikidata_query_service +https://github.com/maxlath/wikidata-sdk#you-may-also-like|title|maxlath/wikidata-sdk: A javascript tool-suite to query Wikidata and simplify its results +https://github.com/maxlath/wikidata-sdk#you-may-also-like|creationTime|2017-10-28T11:01:29Z +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|creationDate|2008-04-10 +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|tag|http://www.semanlink.net/tag/synchrotron +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|tag|http://www.semanlink.net/tag/insectes_fossiles +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|tag|http://www.semanlink.net/tag/ambre +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|tag|http://www.semanlink.net/tag/plastic_print +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|title|BBC NEWS Science/Nature Secret 'dino bugs' revealed +http://news.bbc.co.uk/2/hi/science/nature/7324564.stm|creationTime|2008-04-10T10:35:02Z +http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html|creationDate|2011-02-08 +http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html|tag|http://www.semanlink.net/tag/skos +http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html|title|What SKOS-XL adds to SKOS - bobdc.blog +http://www.snee.com/bobdc.blog/2011/02/what-skos-xl-adds-to-skos.html|creationTime|2011-02-08T22:16:29Z +http://www.zerodeconduite.net/sistersinlaw/|creationDate|2008-06-11 +http://www.zerodeconduite.net/sistersinlaw/|tag|http://www.semanlink.net/tag/justice +http://www.zerodeconduite.net/sistersinlaw/|tag|http://www.semanlink.net/tag/femme +http://www.zerodeconduite.net/sistersinlaw/|tag|http://www.semanlink.net/tag/cameroun +http://www.zerodeconduite.net/sistersinlaw/|tag|http://www.semanlink.net/tag/film +http://www.zerodeconduite.net/sistersinlaw/|tag|http://www.semanlink.net/tag/documentaire +http://www.zerodeconduite.net/sistersinlaw/|title|Sisters in law, un film de Kim Longinotto et Florence Ayisi +http://www.zerodeconduite.net/sistersinlaw/|creationTime|2008-06-11T00:43:59Z +http://nlp.town/blog/sentence-similarity/|creationDate|2018-05-25 +http://nlp.town/blog/sentence-similarity/|tag|http://www.semanlink.net/tag/survey +http://nlp.town/blog/sentence-similarity/|tag|http://www.semanlink.net/tag/yves_peirsman +http://nlp.town/blog/sentence-similarity/|tag|http://www.semanlink.net/tag/embedding_evaluation +http://nlp.town/blog/sentence-similarity/|tag|http://www.semanlink.net/tag/sentence_similarity +http://nlp.town/blog/sentence-similarity/|tag|http://www.semanlink.net/tag/good +http://nlp.town/blog/sentence-similarity/|comment|"[notebook](/doc/?uri=https%3A%2F%2Fgithub.com%2Fnlptown%2Fnlp-notebooks%2Fblob%2Fmaster%2FSimple%2520Sentence%2520Similarity.ipynb) +" +http://nlp.town/blog/sentence-similarity/|relatedDoc|https://github.com/nlptown/nlp-notebooks/blob/master/Simple%20Sentence%20Similarity.ipynb +http://nlp.town/blog/sentence-similarity/|title|Comparing Sentence Similarity Methods +http://nlp.town/blog/sentence-similarity/|creationTime|2018-05-25T23:53:41Z +http://www.cnes.fr/web/CNES-fr/11559-gp-recit-d-une-journee-historique-au-sonc.php|creationDate|2014-11-18 +http://www.cnes.fr/web/CNES-fr/11559-gp-recit-d-une-journee-historique-au-sonc.php|tag|http://www.semanlink.net/tag/philae +http://www.cnes.fr/web/CNES-fr/11559-gp-recit-d-une-journee-historique-au-sonc.php|title|GP - Récit d’une journée historique au SONC - CNES +http://www.cnes.fr/web/CNES-fr/11559-gp-recit-d-une-journee-historique-au-sonc.php|creationTime|2014-11-18T09:07:28Z +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|creationDate|2007-05-20 +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|tag|http://www.semanlink.net/tag/antarctique +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|tag|http://www.semanlink.net/tag/decouverte_d_especes_inconnues +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|comment|An extraordinarily diverse array of marine life (more than 700 new species) has been discovered in the deep, dark waters around Antarctica. +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|title|BBC NEWS Antarctic 'treasure trove' found +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|creationTime|2007-05-20T13:16:53Z +http://news.bbc.co.uk/1/hi/sci/tech/6661987.stm|source|BBC +http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost|creationDate|2007-01-07 +http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost|tag|http://www.semanlink.net/tag/web_dev +http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost|tag|http://www.semanlink.net/tag/java_dev +http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost|tag|http://www.semanlink.net/tag/http +http://www.theserverside.com/tt/articles/article.tss?l=RedirectAfterPost|title|Redirect After Post +http://dig.csail.mit.edu/breadcrumbs/node/62|creationDate|2007-01-02 +http://dig.csail.mit.edu/breadcrumbs/node/62|tag|http://www.semanlink.net/tag/tim_berners_lee +http://dig.csail.mit.edu/breadcrumbs/node/62|tag|http://www.semanlink.net/tag/ajar +http://dig.csail.mit.edu/breadcrumbs/node/62|tag|http://www.semanlink.net/tag/linked_data +http://dig.csail.mit.edu/breadcrumbs/node/62|title|Links on the Semantic Web +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|title|"Sebastian Ruder sur Twitter : ""Besides the obvious things (ELMo, BERT, etc.), is there anything that we should definitely discuss at the NAACL ""Transfer Learning in NLP"" tutorial?""" +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|bookmarkOf|https://twitter.com/seb_ruder/status/1130050862204821504 +http://www.semanlink.net/doc/2019/05/sebastian_ruder_sur_twitter_|creationTime|2019-05-20T11:26:32Z +http://ruder.io/deep-learning-nlp-best-practices/index.html|creationDate|2017-09-18 +http://ruder.io/deep-learning-nlp-best-practices/index.html|tag|http://www.semanlink.net/tag/deep_nlp +http://ruder.io/deep-learning-nlp-best-practices/index.html|tag|http://www.semanlink.net/tag/best_practices +http://ruder.io/deep-learning-nlp-best-practices/index.html|title|Deep Learning for NLP Best Practices +http://ruder.io/deep-learning-nlp-best-practices/index.html|creationTime|2017-09-18T15:30:46Z +https://github.com/marcotcr/lime|creationDate|2018-09-09 +https://github.com/marcotcr/lime|tag|http://www.semanlink.net/tag/github_project +https://github.com/marcotcr/lime|tag|http://www.semanlink.net/tag/lime +https://github.com/marcotcr/lime|title|GitHub - marcotcr/lime: Lime: Explaining the predictions of any machine learning classifier +https://github.com/marcotcr/lime|creationTime|2018-09-09T15:25:49Z +https://arxiv.org/abs/1604.00289|creationDate|2018-10-28 +https://arxiv.org/abs/1604.00289|tag|http://www.semanlink.net/tag/human_like_ai +https://arxiv.org/abs/1604.00289|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1604.00289|arxiv_author|Tomer D. Ullman +https://arxiv.org/abs/1604.00289|arxiv_author|Brenden M. Lake +https://arxiv.org/abs/1604.00289|arxiv_author|Joshua B. Tenenbaum +https://arxiv.org/abs/1604.00289|arxiv_author|Samuel J. Gershman +https://arxiv.org/abs/1604.00289|comment|"> we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations +" +https://arxiv.org/abs/1604.00289|title|[1604.00289] Building Machines That Learn and Think Like People +https://arxiv.org/abs/1604.00289|creationTime|2018-10-28T17:08:00Z +https://arxiv.org/abs/1604.00289|arxiv_summary|"Recent progress in artificial intelligence (AI) has renewed interest in +building systems that learn and think like people. Many advances have come from +using deep neural networks trained end-to-end in tasks such as object +recognition, video games, and board games, achieving performance that equals or +even beats humans in some respects. Despite their biological inspiration and +performance achievements, these systems differ from human intelligence in +crucial ways. We review progress in cognitive science suggesting that truly +human-like learning and thinking machines will have to reach beyond current +engineering trends in both what they learn, and how they learn it. +Specifically, we argue that these machines should (a) build causal models of +the world that support explanation and understanding, rather than merely +solving pattern recognition problems; (b) ground learning in intuitive theories +of physics and psychology, to support and enrich the knowledge that is learned; +and (c) harness compositionality and learning-to-learn to rapidly acquire and +generalize knowledge to new tasks and situations. We suggest concrete +challenges and promising routes towards these goals that can combine the +strengths of recent neural network advances with more structured cognitive +models." +https://arxiv.org/abs/1604.00289|arxiv_firstAuthor|Brenden M. Lake +https://arxiv.org/abs/1604.00289|arxiv_updated|2016-11-02T17:26:50Z +https://arxiv.org/abs/1604.00289|arxiv_title|Building Machines That Learn and Think Like People +https://arxiv.org/abs/1604.00289|arxiv_published|2016-04-01T15:37:57Z +https://arxiv.org/abs/1604.00289|arxiv_num|1604.00289 +http://projectmosul.itn-dch.net/|creationDate|2015-03-17 +http://projectmosul.itn-dch.net/|comment|looking for volunteers to help virtually restore the Mosul Museum +http://projectmosul.itn-dch.net/|title|Projectmosul +http://projectmosul.itn-dch.net/|creationTime|2015-03-17T20:23:54Z +http://dig.csail.mit.edu/issues/tabulator/issue258|creationDate|2008-04-01 +http://dig.csail.mit.edu/issues/tabulator/issue258|tag|http://www.semanlink.net/tag/javascript_rdf_parser_in_ie +http://dig.csail.mit.edu/issues/tabulator/issue258|tag|http://www.semanlink.net/tag/richard_cyganiak +http://dig.csail.mit.edu/issues/tabulator/issue258|title|Demande 258: Patch for improved IE support - Tabulator Issue Tracker +http://dig.csail.mit.edu/issues/tabulator/issue258|creationTime|2008-04-01T15:02:47Z +http://www.w3.org/2011/09/LinkedData/|creationDate|2011-10-08 +http://www.w3.org/2011/09/LinkedData/|tag|http://www.semanlink.net/tag/enterprise_data +http://www.w3.org/2011/09/LinkedData/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.w3.org/2011/09/LinkedData/|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/2011/09/LinkedData/|tag|http://www.semanlink.net/tag/design_pattern +http://www.w3.org/2011/09/LinkedData/|title|Linked Enterprise Data Patterns +http://www.w3.org/2011/09/LinkedData/|creationTime|2011-10-08T21:29:12Z +http://ckan.org/case-studies/publicdata-eu/|creationDate|2012-10-22 +http://ckan.org/case-studies/publicdata-eu/|tag|http://www.semanlink.net/tag/data_publica +http://ckan.org/case-studies/publicdata-eu/|tag|http://www.semanlink.net/tag/ckan +http://ckan.org/case-studies/publicdata-eu/|title|PublicData.eu ckan - The open source data portal software +http://ckan.org/case-studies/publicdata-eu/|creationTime|2012-10-22T23:11:58Z +http://www.shopafrica53.com/|creationDate|2013-08-25 +http://www.shopafrica53.com/|tag|http://www.semanlink.net/tag/winch5 +http://www.shopafrica53.com/|tag|http://www.semanlink.net/tag/new_africa +http://www.shopafrica53.com/|tag|http://www.semanlink.net/tag/ghana +http://www.shopafrica53.com/|title|shopafrica53 +http://www.shopafrica53.com/|creationTime|2013-08-25T12:52:41Z +http://www.miximum.fr/pour-enfin-comprendre-javascript.html|creationDate|2015-06-28 +http://www.miximum.fr/pour-enfin-comprendre-javascript.html|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.miximum.fr/pour-enfin-comprendre-javascript.html|tag|http://www.semanlink.net/tag/javascript +http://www.miximum.fr/pour-enfin-comprendre-javascript.html|title|Miximum – Pour enfin comprendre Javascript +http://www.miximum.fr/pour-enfin-comprendre-javascript.html|creationTime|2015-06-28T01:57:26Z +http://www.linkedin.com/in/francoispaulservant|creationDate|2008-10-16 +http://www.linkedin.com/in/francoispaulservant|tag|http://www.semanlink.net/tag/fps +http://www.linkedin.com/in/francoispaulservant|tag|http://www.semanlink.net/tag/linkedin +http://www.linkedin.com/in/francoispaulservant|comment|my public profile +http://www.linkedin.com/in/francoispaulservant|title|François-Paul Servant - LinkedIn +http://www.linkedin.com/in/francoispaulservant|creationTime|2008-10-16T23:30:01Z +http://km.aifb.kit.edu/sites/spark/|creationDate|2011-03-07 +http://km.aifb.kit.edu/sites/spark/|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://km.aifb.kit.edu/sites/spark/|comment|"Spark library for visualizing SPARQL result sets +
+Spark is a fully HTML5 compliant JavaScript library which allows you to include results from SPARQL queries in any website (as a ""spark""), or, by using the QCrumb, data from any RDF file out there. Spark provides an easy extensible mechanism to define renders for SPARQL result sets." +http://km.aifb.kit.edu/sites/spark/|title|Spark +http://km.aifb.kit.edu/sites/spark/|creationTime|2011-03-07T08:24:58Z +https://sites.google.com/site/restframework/service-descriptors|creationDate|2017-02-04 +https://sites.google.com/site/restframework/service-descriptors|tag|http://www.semanlink.net/tag/service_descriptors +https://sites.google.com/site/restframework/service-descriptors|tag|http://www.semanlink.net/tag/rest +https://sites.google.com/site/restframework/service-descriptors|title|Service Descriptors - REST Framework +https://sites.google.com/site/restframework/service-descriptors|creationTime|2017-02-04T11:36:58Z +https://www.youtube.com/watch?v=5NuZxUxHN0o|creationDate|2014-10-04 +https://www.youtube.com/watch?v=5NuZxUxHN0o|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=5NuZxUxHN0o|tag|http://www.semanlink.net/tag/janis_joplin +https://www.youtube.com/watch?v=5NuZxUxHN0o|comment|"Ball and chain *** and Piece of my heart *** +" +https://www.youtube.com/watch?v=5NuZxUxHN0o|title|Janis Joplin ~ Live in Frankfurt +https://www.youtube.com/watch?v=5NuZxUxHN0o|creationTime|2014-10-04T18:06:15Z +https://openreview.net/forum?id=S1HlA-ZAZ|creationDate|2018-12-06 +https://openreview.net/forum?id=S1HlA-ZAZ|tag|http://www.semanlink.net/tag/memory_in_deep_learning +https://openreview.net/forum?id=S1HlA-ZAZ|tag|http://www.semanlink.net/tag/variational_autoencoder_vae +https://openreview.net/forum?id=S1HlA-ZAZ|tag|http://www.semanlink.net/tag/sparse_distributed_memory +https://openreview.net/forum?id=S1HlA-ZAZ|tag|http://www.semanlink.net/tag/google_deepmind +https://openreview.net/forum?id=S1HlA-ZAZ|comment|A generative memory model that combines slow-learning neural networks and a fast-adapting linear Gaussian model as memory +https://openreview.net/forum?id=S1HlA-ZAZ|title|The Kanerva Machine: A Generative Distributed Memory OpenReview (2018) +https://openreview.net/forum?id=S1HlA-ZAZ|creationTime|2018-12-06T12:50:01Z +http://www.howtocreate.co.uk/tutorials/javascript/objects|creationDate|2007-11-27 +http://www.howtocreate.co.uk/tutorials/javascript/objects|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.howtocreate.co.uk/tutorials/javascript/objects|comment|"Inludes how to add new methods to a class using the ""prototype"" keyword" +http://www.howtocreate.co.uk/tutorials/javascript/objects|title|JavaScript tutorial - Creating objects +http://www.howtocreate.co.uk/tutorials/javascript/objects|creationTime|2007-11-27T14:22:35Z +https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D|creationDate|2019-01-29 +https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D|tag|http://www.semanlink.net/tag/spiking_neural_network +https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D|comment|"> Here, we describe a new approach to training SNNs, where the ANN training is to not only learn the task, but to produce a SNN in the process. Specifically, if the training procedure can include the eventual objective of low-precision communication between nodes, the training process of a SNN can be nearly as effective as a comparable ANN. This method, which we term Whetstone inspired by the tool to sharpen a dull knife, is intentionally agnostic to both the type of ANN being trained and the targeted neuromorphic hardware. Rather, the intent is to provide a straightforward interface for machine learning researchers to leverage the powerful capabilities of low-power neu-romorphic hardware on a wide range of deep learning applications + +Whetstone can train neural nets through Keras to be ""spiking"" without an expansion of the network or an expensive temporal code +" +https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D|title|Training deep neural networks for binary communication with the Whetstone method Nature Machine Intelligence +https://www.nature.com/articles/s42256-018-0015-y.epdf?author_access_token=HIFIT_s3XXRdKKF3DTspd9RgN0jAjWel9jnR3ZoTv0P7sMl50Mvxe5hygHWfkIWjiyJe1kEkFLNBiorlpBWGyE5yRNu7SaSa6rWLAwmUPf1dL47QUigBag24erZ3G6Ue-9ZkZNtWzrZVVkxMrGE8eA%3D%3D|creationTime|2019-01-29T01:16:07Z +https://www.deepl.com/|creationDate|2017-08-30 +https://www.deepl.com/|tag|http://www.semanlink.net/tag/deep_nlp +https://www.deepl.com/|tag|http://www.semanlink.net/tag/machine_translation +https://www.deepl.com/|title|DeepL +https://www.deepl.com/|creationTime|2017-08-30T11:23:25Z +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|creationDate|2006-02-28 +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|tag|http://www.semanlink.net/tag/shelley_powers +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|tag|http://www.semanlink.net/tag/folksonomies_vs_ontologies +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|tag|http://www.semanlink.net/tag/technorati +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|tag|http://www.semanlink.net/tag/tagging +http://weblog.burningbird.net/2005/01/27/cheap-eats-at-the-semantic-web-cafe/|title|Burningbird » Cheap Eats at the Semantic Web Café +http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm|creationDate|2008-04-13 +http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm|tag|http://www.semanlink.net/tag/pekin +http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm|tag|http://www.semanlink.net/tag/voyage_en_chine +http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm|title|Guide Touristique Pekin +http://www.aseantravelandtours.com/china/sightseeings/visite_pekin.htm|creationTime|2008-04-13T13:47:37Z +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|creationDate|2008-06-07 +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|tag|http://www.semanlink.net/tag/niamey +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|tag|http://www.semanlink.net/tag/rfi +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|title|RFI - Couleurs tropicales - émission enregistrée à Niamey +http://www.rfi.fr/radiofr/editions/072/edition_13_20060531.asp|creationTime|2008-06-07T09:29:11Z +http://www.w3.org/TR/owl-guide/|creationDate|2007-09-19 +http://www.w3.org/TR/owl-guide/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/owl-guide/|tag|http://www.semanlink.net/tag/owl +http://www.w3.org/TR/owl-guide/|title|OWL Web Ontology Language Guide +http://www.w3.org/TR/owl-guide/|creationTime|2007-09-19T01:01:28Z +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|creationDate|2018-10-09 +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|tag|http://www.semanlink.net/tag/ruben_verborgh +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|tag|http://www.semanlink.net/tag/tim_berners_lee +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|tag|http://www.semanlink.net/tag/solid +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|title|Solid: Linked Data for personal data management +https://rubenverborgh.github.io/Solid-DeSemWeb-2018/|creationTime|2018-10-09T10:39:39Z +https://www.youtube.com/watch?v=kkTTrVotetI|creationDate|2016-01-07 +https://www.youtube.com/watch?v=kkTTrVotetI|tag|http://www.semanlink.net/tag/louvre +https://www.youtube.com/watch?v=kkTTrVotetI|tag|http://www.semanlink.net/tag/heroisme +https://www.youtube.com/watch?v=kkTTrVotetI|tag|http://www.semanlink.net/tag/esprit_de_resistance +https://www.youtube.com/watch?v=kkTTrVotetI|tag|http://www.semanlink.net/tag/2eme_guerre_mondiale +https://www.youtube.com/watch?v=kkTTrVotetI|tag|http://www.semanlink.net/tag/documentaire_tv +https://www.youtube.com/watch?v=kkTTrVotetI|comment|"Diriger, c'est prévoir, Jaujard avait prévu +" +https://www.youtube.com/watch?v=kkTTrVotetI|title|Comment Jacques Jaujard a sauvé le Louvre +https://www.youtube.com/watch?v=kkTTrVotetI|creationTime|2016-01-07T19:28:14Z +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|creationDate|2006-06-08 +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|tag|http://www.semanlink.net/tag/football +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|tag|http://www.semanlink.net/tag/coupe_du_monde_2006 +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|tag|http://www.semanlink.net/tag/physique +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|title|BBC NEWS - The science behind the swerve +http://news.bbc.co.uk/1/hi/magazine/5048238.stm|source|BBC +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|creationDate|2008-10-23 +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|tag|http://www.semanlink.net/tag/excel +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|tag|http://www.semanlink.net/tag/anzo +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|tag|http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|title|TechnicaLee Speaking: Videos: Anzo for Excel in action +http://www.thefigtrees.net/lee/blog/2008/10/videos_anzo_for_excel_in_actio.html|creationTime|2008-10-23T18:30:40Z +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|creationDate|2015-01-14 +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|tag|http://www.semanlink.net/tag/google +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|tag|http://www.semanlink.net/tag/evgeny_morozov +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|comment|The problem with Google is not that it is too big but that it hoovers up data that does not belong to it. +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|title|My oped in FT - Notes EM +http://evgenymorozov.tumblr.com/post/107976715275/my-oped-in-ft|creationTime|2015-01-14T17:27:33Z +http://neuralnetworksanddeeplearning.com/chap1.html|creationDate|2015-12-20 +http://neuralnetworksanddeeplearning.com/chap1.html|tag|http://www.semanlink.net/tag/handwriting_recognition +http://neuralnetworksanddeeplearning.com/chap1.html|tag|http://www.semanlink.net/tag/tutorial +http://neuralnetworksanddeeplearning.com/chap1.html|tag|http://www.semanlink.net/tag/artificial_neural_network +http://neuralnetworksanddeeplearning.com/chap1.html|title|Using neural nets to recognize handwritten digits +http://neuralnetworksanddeeplearning.com/chap1.html|creationTime|2015-12-20T14:33:06Z +https://arxiv.org/abs/1711.09677|creationDate|2019-02-02 +https://arxiv.org/abs/1711.09677|tag|http://www.semanlink.net/tag/three_way_decisions +https://arxiv.org/abs/1711.09677|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1711.09677|arxiv_author|Ljubomir Buturovic +https://arxiv.org/abs/1711.09677|arxiv_author|Damjan Krstajic +https://arxiv.org/abs/1711.09677|arxiv_author|Simon Thomas +https://arxiv.org/abs/1711.09677|arxiv_author|David E Leahy +https://arxiv.org/abs/1711.09677|title|"[1711.09677] Binary classification models with ""Uncertain"" predictions" +https://arxiv.org/abs/1711.09677|creationTime|2019-02-02T15:22:02Z +https://arxiv.org/abs/1711.09677|arxiv_summary|"Binary classification models which can assign probabilities to categories +such as ""the tissue is 75% likely to be tumorous"" or ""the chemical is 25% +likely to be toxic"" are well understood statistically, but their utility as an +input to decision making is less well explored. We argue that users need to +know which is the most probable outcome, how likely that is to be true and, in +addition, whether the model is capable enough to provide an answer. It is the +last case, where the potential outcomes of the model explicitly include ""don't +know"" that is addressed in this paper. Including this outcome would better +separate those predictions that can lead directly to a decision from those +where more data is needed. Where models produce an ""Uncertain"" answer similar +to a human reply of ""don't know"" or ""50:50"" in the examples we refer to +earlier, this would translate to actions such as ""operate on tumour"" or ""remove +compound from use"" where the models give a ""more true than not"" answer. Where +the models judge the result ""Uncertain"" the practical decision might be ""carry +out more detailed laboratory testing of compound"" or ""commission new tissue +analyses"". The paper presents several examples where we first analyse the +effect of its introduction, then present a methodology for separating +""Uncertain"" from binary predictions and finally, we provide arguments for its +use in practice." +https://arxiv.org/abs/1711.09677|arxiv_firstAuthor|Damjan Krstajic +https://arxiv.org/abs/1711.09677|arxiv_updated|2017-12-04T15:10:52Z +https://arxiv.org/abs/1711.09677|arxiv_title|"Binary classification models with ""Uncertain"" predictions" +https://arxiv.org/abs/1711.09677|arxiv_published|2017-11-27T13:29:42Z +https://arxiv.org/abs/1711.09677|arxiv_num|1711.09677 +http://www.terramadre.info/|creationDate|2008-08-08 +http://www.terramadre.info/|title|TerraMadre Rete delle comunità del cibo +http://www.terramadre.info/|creationTime|2008-08-08T19:27:14Z +http://www.osxfaq.com|creationDate|2005-03-29 +http://www.osxfaq.com|tag|http://www.semanlink.net/tag/os_x_unix +http://www.osxfaq.com|tag|http://www.semanlink.net/tag/mac_os_x +http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation|creationDate|2012-09-01 +http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation|tag|http://www.semanlink.net/tag/rdfa +http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation|title|What is a good Javascript RDFa parser implementation? - Stack Overflow +http://stackoverflow.com/questions/757066/what-is-a-good-javascript-rdfa-parser-implementation|creationTime|2012-09-01T15:18:42Z +http://www.w3.org/TR/swbp-skos-core-guide/|creationDate|2005-11-04 +http://www.w3.org/TR/swbp-skos-core-guide/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/swbp-skos-core-guide/|tag|http://www.semanlink.net/tag/skos_w3c_document +http://www.w3.org/TR/swbp-skos-core-guide/|tag|http://www.semanlink.net/tag/deprecated +http://www.w3.org/TR/swbp-skos-core-guide/|title|SKOS Core Guide +http://odaf.org/events/odaf_europe_2010.php|creationDate|2010-07-16 +http://odaf.org/events/odaf_europe_2010.php|tag|http://www.semanlink.net/tag/semantic_statistics +http://odaf.org/events/odaf_europe_2010.php|title|ODaF Europe 2010: Semantic Statistics +http://odaf.org/events/odaf_europe_2010.php|creationTime|2010-07-16T14:22:46Z +http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=1095551|creationDate|2011-02-09 +http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=1095551|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=1095551|title|Rich snippets: Shopping and products +http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=1095551|creationTime|2011-02-09T01:05:27Z +http://thmanager.sourceforge.net/|creationDate|2007-01-09 +http://thmanager.sourceforge.net/|tag|http://www.semanlink.net/tag/skos_editor +http://thmanager.sourceforge.net/|comment|Open Source Tool for creating and visualizing SKOS RDF vocabularies +http://thmanager.sourceforge.net/|title|ThManager - metadata editor +http://www.nytimes.com/2006/04/07/science/07evolve.html?_r=1&oref=slogin|creationDate|2006-04-09 +http://www.nytimes.com/2006/04/07/science/07evolve.html?_r=1&oref=slogin|tag|http://www.semanlink.net/tag/evolution +http://www.nytimes.com/2006/04/07/science/07evolve.html?_r=1&oref=slogin|comment|By reconstructing ancient genes from long-extinct animals, scientists have for the first time demonstrated the step-by-step progression of how evolution created a new piece of molecular machinery by reusing and modifying existing parts. +http://www.nytimes.com/2006/04/07/science/07evolve.html?_r=1&oref=slogin|title|Study, in a First, Explains Evolution's Molecular Advance - New York Times +http://vimeo.com/46304267|creationDate|2012-08-04 +http://vimeo.com/46304267|tag|http://www.semanlink.net/tag/video +http://vimeo.com/46304267|tag|http://www.semanlink.net/tag/anticipation +http://vimeo.com/46304267|tag|http://www.semanlink.net/tag/realite_augmentee +http://vimeo.com/46304267|comment|A short futuristic film by Eran May-raz and Daniel Lazo +http://vimeo.com/46304267|title|Sight +http://vimeo.com/46304267|creationTime|2012-08-04T11:42:34Z +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|creationDate|2013-09-09 +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|tag|http://www.semanlink.net/tag/kindle +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|tag|http://www.semanlink.net/tag/lego +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|tag|http://www.semanlink.net/tag/hack +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|tag|http://www.semanlink.net/tag/drm +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|tag|http://www.semanlink.net/tag/diy +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|comment|DIY kindle scanner is a LEGO MINDSTORMS projects. It combats the removal of old-established rights by DRM systems. +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|title|How a Man in Austria Used Legos to Hack Amazon's Kindle E-Book Securit - Arik Hesseldahl - News - AllThingsD +http://allthingsd.com/20130906/how-a-man-in-austria-used-legos-to-hack-amazons-kindle-e-book-security/|creationTime|2013-09-09T14:13:33Z +http://automotive.dfki.de/index.php/en/home|creationDate|2012-09-20 +http://automotive.dfki.de/index.php/en/home|tag|http://www.semanlink.net/tag/automobile +http://automotive.dfki.de/index.php/en/home|tag|http://www.semanlink.net/tag/ui +http://automotive.dfki.de/index.php/en/home|title|Automotive IUI - Car oriented multimodal interface architectures +http://automotive.dfki.de/index.php/en/home|creationTime|2012-09-20T14:37:52Z +http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs|creationDate|2010-01-18 +http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs|tag|http://www.semanlink.net/tag/conferences +http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs|tag|http://www.semanlink.net/tag/online_course_materials +http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs|title|Canal-U - Université de tous les savoirs +http://www.canal-u.tv/canalu/producteurs/universite_de_tous_les_savoirs|creationTime|2010-01-18T10:53:34Z +http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music|creationDate|2007-10-30 +http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music|tag|http://www.semanlink.net/tag/amy_winehouse +http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music|comment|Includes Back to Black +http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music|title|Amy Winehouse in Concert - Dailymotion +http://www.dailymotion.com/related/3766044/video/x1y9ko_amywinehouseconcertpart1_music|creationTime|2007-10-30T09:59:32Z +http://www.lemonde.fr/campus/article/2017/11/15/paradise-papers-faire-la-morale-ne-suffit-pas_5215401_4401467.html|creationDate|2017-12-05 +http://www.lemonde.fr/campus/article/2017/11/15/paradise-papers-faire-la-morale-ne-suffit-pas_5215401_4401467.html|tag|http://www.semanlink.net/tag/paradise_papers +http://www.lemonde.fr/campus/article/2017/11/15/paradise-papers-faire-la-morale-ne-suffit-pas_5215401_4401467.html|title|« Paradise Papers » : faire la morale ne suffit pas +http://www.lemonde.fr/campus/article/2017/11/15/paradise-papers-faire-la-morale-ne-suffit-pas_5215401_4401467.html|creationTime|2017-12-05T19:24:55Z +https://arxiv.org/abs/1508.01991|creationDate|2018-03-05 +https://arxiv.org/abs/1508.01991|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1508.01991|tag|http://www.semanlink.net/tag/bi_lstm +https://arxiv.org/abs/1508.01991|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1508.01991|tag|http://www.semanlink.net/tag/conditional_random_field +https://arxiv.org/abs/1508.01991|tag|http://www.semanlink.net/tag/sequence_labeling +https://arxiv.org/abs/1508.01991|arxiv_author|Kai Yu +https://arxiv.org/abs/1508.01991|arxiv_author|Zhiheng Huang +https://arxiv.org/abs/1508.01991|arxiv_author|Wei Xu +https://arxiv.org/abs/1508.01991|title|[1508.01991] Bidirectional LSTM-CRF Models for Sequence Tagging +https://arxiv.org/abs/1508.01991|creationTime|2018-03-05T19:03:20Z +https://arxiv.org/abs/1508.01991|arxiv_summary|"In this paper, we propose a variety of Long Short-Term Memory (LSTM) based +models for sequence tagging. These models include LSTM networks, bidirectional +LSTM (BI-LSTM) networks, LSTM with a Conditional Random Field (CRF) layer +(LSTM-CRF) and bidirectional LSTM with a CRF layer (BI-LSTM-CRF). Our work is +the first to apply a bidirectional LSTM CRF (denoted as BI-LSTM-CRF) model to +NLP benchmark sequence tagging data sets. We show that the BI-LSTM-CRF model +can efficiently use both past and future input features thanks to a +bidirectional LSTM component. It can also use sentence level tag information +thanks to a CRF layer. The BI-LSTM-CRF model can produce state of the art (or +close to) accuracy on POS, chunking and NER data sets. In addition, it is +robust and has less dependence on word embedding as compared to previous +observations." +https://arxiv.org/abs/1508.01991|arxiv_firstAuthor|Zhiheng Huang +https://arxiv.org/abs/1508.01991|arxiv_updated|2015-08-09T06:32:47Z +https://arxiv.org/abs/1508.01991|arxiv_title|Bidirectional LSTM-CRF Models for Sequence Tagging +https://arxiv.org/abs/1508.01991|arxiv_published|2015-08-09T06:32:47Z +https://arxiv.org/abs/1508.01991|arxiv_num|1508.01991 +http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/|creationDate|2014-01-02 +http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/|tag|http://www.semanlink.net/tag/bitcoin +http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/|comment|It seemed to me that there were two ways to improve the [financial] system: from above through regulation (which I support), or from below through competition. +http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/|title|Why I’m interested in Bitcoin - Chris Dixon +http://cdixon.org/2013/12/31/why-im-interested-in-bitcoin/|creationTime|2014-01-02T12:20:48Z +http://singularitysummit.com/|creationDate|2012-11-03 +http://singularitysummit.com/|tag|http://www.semanlink.net/tag/technological_singularity +http://singularitysummit.com/|title|The Singularity Summit +http://singularitysummit.com/|creationTime|2012-11-03T11:14:18Z +http://linkededucation.org/|creationDate|2011-09-14 +http://linkededucation.org/|tag|http://www.semanlink.net/tag/linked_data +http://linkededucation.org/|tag|http://www.semanlink.net/tag/education +http://linkededucation.org/|title|linkededucation.org +http://linkededucation.org/|creationTime|2011-09-14T18:17:57Z +http://www.livescience.com/scienceoffiction/060707_pacman_insects.html|creationDate|2006-07-14 +http://www.livescience.com/scienceoffiction/060707_pacman_insects.html|tag|http://www.semanlink.net/tag/criquet +http://www.livescience.com/scienceoffiction/060707_pacman_insects.html|tag|http://www.semanlink.net/tag/computer_game +http://www.livescience.com/scienceoffiction/060707_pacman_insects.html|comment|"Remember packman? ""Instead of computer code, I wanted to have animals controlling the ghosts. To enable this, I built a real maze for the animals to walk around in, with its proportions and layout matching the maze of the computer game. The position of the animals in the maze is detected using colour-tracking via a camera, and linked to the ghosts in the game. This way, the real animals are directly controlling the virtual ghosts."" Crickets' tendency to flee from vibration (a sign of approaching predators) allows human players to interact with them." +http://www.livescience.com/scienceoffiction/060707_pacman_insects.html|title|LiveScience.com - Live Insects Challenge Humans in Bizarre Computer Game +http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/package-summary.html|creationDate|2005-11-01 +http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/package-summary.html|tag|http://www.semanlink.net/tag/java_concurrency +http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/package-summary.html|title|EDU.oswego.cs.dl.util.concurrent +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|creationDate|2012-04-14 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|tag|http://www.semanlink.net/tag/automatic_tagging +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|tag|http://www.semanlink.net/tag/yves_raymond +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|tag|http://www.semanlink.net/tag/nlp +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|title|Automated interlinking of speech radio archives +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-11.pdf|creationTime|2012-04-14T12:03:28Z +https://www.nytimes.com/2019/03/29/science/dinosaurs-extinction-asteroid.html|creationDate|2019-03-30 +https://www.nytimes.com/2019/03/29/science/dinosaurs-extinction-asteroid.html|tag|http://www.semanlink.net/tag/tanis_kt +https://www.nytimes.com/2019/03/29/science/dinosaurs-extinction-asteroid.html|title|Fossil Site Reveals Day That Meteor Hit Earth and, Maybe, Wiped Out Dinosaurs - The New York Times +https://www.nytimes.com/2019/03/29/science/dinosaurs-extinction-asteroid.html|creationTime|2019-03-30T13:22:01Z +http://hci-matters.com/blog/?p=9|creationDate|2007-07-10 +http://hci-matters.com/blog/?p=9|tag|http://www.semanlink.net/tag/gui +http://hci-matters.com/blog/?p=9|title|The New Interface Advocate :: The misused mouse, part 2: A proposal for a nearly mouseless interface. +http://hci-matters.com/blog/?p=9|creationTime|2007-07-10T23:04:13Z +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|creationDate|2016-11-10 +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|tag|http://www.semanlink.net/tag/blockchain +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|tag|http://www.semanlink.net/tag/mirek_sopek +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|tag|http://www.semanlink.net/tag/slideshare +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|title|How Can Blockchain amplify Digital Identifiers? Improving Data Persis… +http://www.slideshare.net/sopekmir/how-can-blockchain-amplify-digital-identifiers-improving-data-persistence-openness-and-trust-in-the-modern-world-68539778|creationTime|2016-11-10T09:49:38Z +http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss|creationDate|2016-04-23 +http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss|tag|http://www.semanlink.net/tag/travail +http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss|comment|"""bullshit"" administrative jobs are merely a halfway house between ""bullshit"" industrial jobs and no jobs at all. Not because of the conniving of rich interests, but because machines inevitably outmatch humans at handling bullshit without complaining." +http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss|title|"Labour markets: On ""bullshit jobs"" The Economist" +http://www.economist.com/blogs/freeexchange/2013/08/labour-markets-0?fsrc=rss|creationTime|2016-04-23T17:32:05Z +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|creationDate|2018-08-02 +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|tag|http://www.semanlink.net/tag/histoire_des_jermas +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|tag|http://www.semanlink.net/tag/boube_gado +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|tag|http://www.semanlink.net/tag/histoire_du_niger +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|comment|[pdf](https://storage.googleapis.com/cantookhub-media-eden/45/6ae1e47bbb8a3f93751e43e51f4e8a54f892fd.pdf) +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|title|Le Zarmatarey : contribution à l'histoire des populations d'entre Niger et Dallol Mawri / par Boubé Gado Gallica +https://gallica.bnf.fr/ark:/12148/bpt6k33245388|creationTime|2018-08-02T21:27:53Z +https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-01-16 +https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/davos +https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Why we should all have a basic income World Economic Forum +https://www.weforum.org/agenda/2017/01/why-we-should-all-have-a-basic-income?utm_content=buffer711e3&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-01-16T00:42:08Z +http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2|creationDate|2009-03-31 +http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2|tag|http://www.semanlink.net/tag/virtuoso +http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2|tag|http://www.semanlink.net/tag/amazon +http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2|title|VOS: Virtuoso Universal Server AMI for Amazon EC2 Instantiation Guide +http://virtuoso.openlinksw.com/wiki/main/Main/VirtInstallationEC2|creationTime|2009-03-31T14:21:47Z +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|creationDate|2016-07-29 +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|tag|http://www.semanlink.net/tag/rechauffement_climatique +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|tag|http://www.semanlink.net/tag/siberie +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|tag|http://www.semanlink.net/tag/anthrax +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|title|Anthrax sickens 13 in western Siberia, and a thawed-out reindeer corpse may be to blame - The Washington Post +https://www.washingtonpost.com/news/morning-mix/wp/2016/07/28/anthrax-sickens-13-in-western-siberia-and-a-thawed-out-reindeer-corpse-may-be-to-blame/?postshare=2951469801095885&tid=ss_tw|creationTime|2016-07-29T21:33:29Z +http://pandas.pydata.org/pandas-docs/stable/|creationDate|2016-02-09 +http://pandas.pydata.org/pandas-docs/stable/|tag|http://www.semanlink.net/tag/documentation +http://pandas.pydata.org/pandas-docs/stable/|tag|http://www.semanlink.net/tag/pandas +http://pandas.pydata.org/pandas-docs/stable/|title|pandas documentation +http://pandas.pydata.org/pandas-docs/stable/|creationTime|2016-02-09T11:25:29Z +http://www.heppresearch.com/gr4google|creationDate|2011-03-24 +http://www.heppresearch.com/gr4google|tag|http://www.semanlink.net/tag/rdfa +http://www.heppresearch.com/gr4google|tag|http://www.semanlink.net/tag/google +http://www.heppresearch.com/gr4google|tag|http://www.semanlink.net/tag/goodrelations +http://www.heppresearch.com/gr4google|title|Semantic SEO for Google with GoodRelations and RDFa Hepp Research GmbH +http://www.heppresearch.com/gr4google|creationTime|2011-03-24T21:54:23Z +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|creationDate|2011-04-05 +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|tag|http://www.semanlink.net/tag/php +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|tag|http://www.semanlink.net/tag/tomcat +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|comment|Running PHP applications in Tomcat 6 +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|title|PHP/Java Bridge +http://php-java-bridge.sourceforge.net/doc/tomcat6.php|creationTime|2011-04-05T18:28:36Z +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|creationDate|2018-04-09 +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|tag|http://www.semanlink.net/tag/soudan +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|tag|http://www.semanlink.net/tag/meroe +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|tag|http://www.semanlink.net/tag/antiquite_africaine +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|comment|Le site archéologique de Sedeinga, dans le nord du Soudan, offre un témoignage inédit des rites funéraires des royaumes de Napata et de Méroé qui régnèrent sur cette région du VIIe siècle avant notre ère jusqu’au IVe siècle +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|title|Une plongée dans l’Afrique antique CNRS Le journal +https://lejournal.cnrs.fr/diaporamas/une-plongee-dans-lafrique-antique|creationTime|2018-04-09T22:15:14Z +http://images.apple.com/quicktime/pdf/QuickTime7_User_Guide.pdf|creationDate|2007-09-15 +http://images.apple.com/quicktime/pdf/QuickTime7_User_Guide.pdf|tag|http://www.semanlink.net/tag/quicktime +http://images.apple.com/quicktime/pdf/QuickTime7_User_Guide.pdf|title|QuickTime 7 User Guide +http://images.apple.com/quicktime/pdf/QuickTime7_User_Guide.pdf|creationTime|2007-09-15T14:25:13Z +http://www.w3.org/community/markdown/wiki/MarkdownImplementations|creationDate|2015-10-11 +http://www.w3.org/community/markdown/wiki/MarkdownImplementations|tag|http://www.semanlink.net/tag/markdown +http://www.w3.org/community/markdown/wiki/MarkdownImplementations|title|MarkdownImplementations - Markdown Community Group +http://www.w3.org/community/markdown/wiki/MarkdownImplementations|creationTime|2015-10-11T09:42:26Z +https://explosion.ai/blog/sense2vec-with-spacy|creationDate|2018-04-08 +https://explosion.ai/blog/sense2vec-with-spacy|tag|http://www.semanlink.net/tag/spacy +https://explosion.ai/blog/sense2vec-with-spacy|tag|http://www.semanlink.net/tag/sense2vec +https://explosion.ai/blog/sense2vec-with-spacy|tag|http://www.semanlink.net/tag/matthew_honnibal +https://explosion.ai/blog/sense2vec-with-spacy|title|Sense2vec with spaCy and Gensim · Blog · Explosion AI +https://explosion.ai/blog/sense2vec-with-spacy|creationTime|2018-04-08T15:28:14Z +http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/|creationDate|2013-09-10 +http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/|tag|http://www.semanlink.net/tag/innovation +http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/|title|Chapitre 2 — Innover = créer des opportunités Winch 5 +http://winch5.blog.lemonde.fr/chapitre-2-innover-creer-des-opportunites/|creationTime|2013-09-10T01:12:37Z +http://stackoverflow.com/questions/11758676/resolve-multiple-slf4j-bindings-in-maven-project|creationDate|2012-08-24 +http://stackoverflow.com/questions/11758676/resolve-multiple-slf4j-bindings-in-maven-project|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/11758676/resolve-multiple-slf4j-bindings-in-maven-project|title|Resolve multiple SLF4J bindings in maven project - Stack Overflow +http://stackoverflow.com/questions/11758676/resolve-multiple-slf4j-bindings-in-maven-project|creationTime|2012-08-24T03:18:34Z +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|creationDate|2014-06-08 +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|tag|http://www.semanlink.net/tag/googleplus +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|tag|http://www.semanlink.net/tag/google +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|title|Thanks for nothing, jerkface ZDNet +http://www.zdnet.com/thanks-for-nothing-jerkface-7000030306/|creationTime|2014-06-08T21:16:05Z +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|creationDate|2018-01-23 +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|tag|http://www.semanlink.net/tag/tools +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|tag|http://www.semanlink.net/tag/skos +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|tag|http://www.semanlink.net/tag/skos_editor +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|title|Vocabulaires dans le web de données : quels outils open-source ? - Sparna Blog +http://blog.sparna.fr/2018/01/23/vocabulaires-thesaurus-web-donnees-skos-open-source/|creationTime|2018-01-23T18:20:31Z +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|creationDate|2007-05-23 +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|tag|http://www.semanlink.net/tag/placentaires_marsupiaux_et_monotremes +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|tag|http://www.semanlink.net/tag/australia_s_evolutionary_history +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|comment|Why did Australia get a preponderance of pouched and egg-laying mammals? And, at the same time, precious few of the kind of mammal that dominates every other land in the world?
Today no monotremes exist outside of Australia (and New Guinea), and no placental mammals that didn't fly or swim there—for example, bats or dugongs—exist in Australia except for rodents (which arrived only about five million years ago) and mammals that were introduced by people (who arrived by 60,000 years ago). +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|title|NOVA Portrait of Australia's unique evolutionary history. +http://www.pbs.org/wgbh/nova/bonediggers/evolution.html|creationTime|2007-05-23T01:18:38Z +http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2|creationDate|2008-12-08 +http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2|tag|http://www.semanlink.net/tag/owl_2 +http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2|title|W3C Semantic Web Activity News - Eleven W3C Publications Related to OWL 2 +http://www.w3.org/blog/SW/2008/12/03/eleven_w3c_publications_related_to_owl_2|creationTime|2008-12-08T11:50:03Z +http://www.laquadrature.net/files/LaQuadratureduNet-Riposte-Graduee_reponse-inefficace-inapplicable-dangereuse-a-un-faux-probleme.pdf|creationDate|2009-02-18 +http://www.laquadrature.net/files/LaQuadratureduNet-Riposte-Graduee_reponse-inefficace-inapplicable-dangereuse-a-un-faux-probleme.pdf|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.laquadrature.net/files/LaQuadratureduNet-Riposte-Graduee_reponse-inefficace-inapplicable-dangereuse-a-un-faux-probleme.pdf|title|HADOPI, « Riposte graduée » : Une réponse inefficace, inapplicable et dangereuse à un faux problème. +http://www.laquadrature.net/files/LaQuadratureduNet-Riposte-Graduee_reponse-inefficace-inapplicable-dangereuse-a-un-faux-probleme.pdf|creationTime|2009-02-18T01:10:47Z +http://esw.w3.org/topic/SparqlImplementations|creationDate|2007-10-13 +http://esw.w3.org/topic/SparqlImplementations|tag|http://www.semanlink.net/tag/sparql +http://esw.w3.org/topic/SparqlImplementations|title|SparqlImplementations - ESW Wiki +http://esw.w3.org/topic/SparqlImplementations|creationTime|2007-10-13T19:30:26Z +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|creationDate|2013-09-03 +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|tag|http://www.semanlink.net/tag/cheval +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|tag|http://www.semanlink.net/tag/jeux +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|tag|http://www.semanlink.net/tag/clonage +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|title|Aux Etats-Unis, des chevaux de course clonés entrent en piste Eco(lo) +http://ecologie.blog.lemonde.fr/2013/09/03/aux-etats-unis-des-chevaux-de-course-clones-entrent-en-piste/|creationTime|2013-09-03T19:35:41Z +https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/|creationDate|2017-06-19 +https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/|tag|http://www.semanlink.net/tag/pandas +https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/|title|12 Useful Pandas Techniques in Python for Data Manipulation +https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/|creationTime|2017-06-19T11:00:03Z +http://webmaster.yandex.ru/microtest.xml|creationDate|2013-06-25 +http://webmaster.yandex.ru/microtest.xml|tag|http://www.semanlink.net/tag/schema_org +http://webmaster.yandex.ru/microtest.xml|tag|http://www.semanlink.net/tag/validation +http://webmaster.yandex.ru/microtest.xml|tag|http://www.semanlink.net/tag/rdfa_tool +http://webmaster.yandex.ru/microtest.xml|title|Yandex checker +http://webmaster.yandex.ru/microtest.xml|creationTime|2013-06-25T16:22:18Z +http://lists.w3.org/Archives/Public/semantic-web/2005Apr/0157.html|creationDate|2005-04-26 +http://lists.w3.org/Archives/Public/semantic-web/2005Apr/0157.html|tag|http://www.semanlink.net/tag/tagging +http://lists.w3.org/Archives/Public/semantic-web/2005Apr/0157.html|title|When flickr meets del.icio.us meets SKOS. +http://www.cnrs.fr/inee/communication/breves/b098.html|creationDate|2015-02-16 +http://www.cnrs.fr/inee/communication/breves/b098.html|tag|http://www.semanlink.net/tag/zombie +http://www.cnrs.fr/inee/communication/breves/b098.html|tag|http://www.semanlink.net/tag/parasitisme +http://www.cnrs.fr/inee/communication/breves/b098.html|tag|http://www.semanlink.net/tag/manipulation +http://www.cnrs.fr/inee/communication/breves/b098.html|tag|http://www.semanlink.net/tag/guepe +http://www.cnrs.fr/inee/communication/breves/b098.html|tag|http://www.semanlink.net/tag/virus +http://www.cnrs.fr/inee/communication/breves/b098.html|title|Un virus transforme les coccinelles en zombies au profit d’une guêpe parasitoïde +http://www.cnrs.fr/inee/communication/breves/b098.html|creationTime|2015-02-16T19:20:30Z +https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/|creationDate|2014-01-26 +https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/|tag|http://www.semanlink.net/tag/origine_de_la_vie +https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/|tag|http://www.semanlink.net/tag/thermodynamique +https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/|title|A New Thermodynamics Theory of the Origin of Life Simons Foundation +https://www.simonsfoundation.org/quanta/20140122-a-new-physics-theory-of-life/|creationTime|2014-01-26T20:26:54Z +http://www.semanlink.net/doc/2019/05/robust_language_representation_|creationDate|2019-05-19 +http://www.semanlink.net/doc/2019/05/robust_language_representation_|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2019/05/robust_language_representation_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/05/robust_language_representation_|tag|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/doc/2019/05/robust_language_representation_|comment|Related to [this](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1901.11504). +http://www.semanlink.net/doc/2019/05/robust_language_representation_|relatedDoc|https://arxiv.org/abs/1901.11504 +http://www.semanlink.net/doc/2019/05/robust_language_representation_|title|Robust Language Representation Learning via Multi-task Knowledge Distillation - Microsoft Research +http://www.semanlink.net/doc/2019/05/robust_language_representation_|bookmarkOf|https://www.microsoft.com/en-us/research/blog/robust-language-representation-learning-via-multi-task-knowledge-distillation/ +http://www.semanlink.net/doc/2019/05/robust_language_representation_|creationTime|2019-05-19T23:16:17Z +http://video.google.com/videoplay?docid=-9050474362583451279|creationDate|2008-10-10 +http://video.google.com/videoplay?docid=-9050474362583451279|tag|http://www.semanlink.net/tag/crise_financiere +http://video.google.com/videoplay?docid=-9050474362583451279|tag|http://www.semanlink.net/tag/banque +http://video.google.com/videoplay?docid=-9050474362583451279|tag|http://www.semanlink.net/tag/money +http://video.google.com/videoplay?docid=-9050474362583451279|title|Money As Debt +http://video.google.com/videoplay?docid=-9050474362583451279|creationTime|2008-10-10T21:23:24Z +http://developer.apple.com/internet/opensource/osdb.html|creationDate|2008-10-20 +http://developer.apple.com/internet/opensource/osdb.html|tag|http://www.semanlink.net/tag/apple_developer_connection +http://developer.apple.com/internet/opensource/osdb.html|tag|http://www.semanlink.net/tag/mysql +http://developer.apple.com/internet/opensource/osdb.html|tag|http://www.semanlink.net/tag/mac_os_x +http://developer.apple.com/internet/opensource/osdb.html|title|MySQL on Mac OS X +http://developer.apple.com/internet/opensource/osdb.html|creationTime|2008-10-20T10:51:13Z +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|creationDate|2014-11-18 +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|tag|http://www.semanlink.net/tag/drupal +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|tag|http://www.semanlink.net/tag/json_ld +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|tag|http://www.semanlink.net/tag/rdfa +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|title|Why Drupal 8 should drop RDFa (and microdata) in favor of JSON Lin Clark +http://lin-clark.com/blog/2013/12/08/drop-rdfa-drupal-8/|creationTime|2014-11-18T10:39:43Z +http://www.ldodds.com/blog/archives/000322.html|creationDate|2008-02-01 +http://www.ldodds.com/blog/archives/000322.html|tag|http://www.semanlink.net/tag/sparql +http://www.ldodds.com/blog/archives/000322.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000322.html|comment|"""ADC pattern"" (ASK, DESCRIBE, CONSTRUCT): a way to probe a remote data set to see if it has information that is of interest and then extract information from that data set with increasing levels of precision and control. " +http://www.ldodds.com/blog/archives/000322.html|title|Lost Boy: Bee Node Deconstructed +http://www.ldodds.com/blog/archives/000322.html|creationTime|2008-02-01T18:26:36Z +http://www.supprimerlechomage.org/|creationDate|2006-03-22 +http://www.supprimerlechomage.org/|tag|http://www.semanlink.net/tag/attali +http://www.supprimerlechomage.org/|tag|http://www.semanlink.net/tag/chomage +http://www.supprimerlechomage.org/|comment|Notre proposition : reconnaître que se former et chercher un emploi constitue une activité socialement utile. Qu'elle mérite un statut sous la forme d’un contrat d’évolution, avec tous les attributs d'un contrat de travail : rémunération, protection sociale, encadrement et débouché professionnel. +http://www.gymglish.com/workbook/showlesson?e=fps%40semanlink.net&s=O6qCR70UXs&t=L|creationDate|2013-08-20 +http://www.gymglish.com/workbook/showlesson?e=fps%40semanlink.net&s=O6qCR70UXs&t=L|tag|http://www.semanlink.net/tag/english_grammar +http://www.gymglish.com/workbook/showlesson?e=fps%40semanlink.net&s=O6qCR70UXs&t=L|title|Gymglish Lesson - Concordance des temps: Style indirect +http://www.gymglish.com/workbook/showlesson?e=fps%40semanlink.net&s=O6qCR70UXs&t=L|creationTime|2013-08-20T09:06:32Z +http://www.tamtaminfo.com/inquietudes-sur-le-projet-de-constrution-de-la-voie-ferree-par-le-groupe-bollore/|creationDate|2015-08-15 +http://www.tamtaminfo.com/inquietudes-sur-le-projet-de-constrution-de-la-voie-ferree-par-le-groupe-bollore/|tag|http://www.semanlink.net/tag/boucle_ferroviaire_d_afrique_de_l_ouest +http://www.tamtaminfo.com/inquietudes-sur-le-projet-de-constrution-de-la-voie-ferree-par-le-groupe-bollore/|title|Inquiétudes sur le projet de constrution de la voie ferrée par le groupe Bolloré Tamtaminfo +http://www.tamtaminfo.com/inquietudes-sur-le-projet-de-constrution-de-la-voie-ferree-par-le-groupe-bollore/|creationTime|2015-08-15T13:24:25Z +http://www.cnrs.fr/inee/communication/breves/b390.html|creationDate|2018-07-16 +http://www.cnrs.fr/inee/communication/breves/b390.html|tag|http://www.semanlink.net/tag/african_origin_of_modern_humans +http://www.cnrs.fr/inee/communication/breves/b390.html|tag|http://www.semanlink.net/tag/origines_de_l_homme +http://www.cnrs.fr/inee/communication/breves/b390.html|title|Out of Africa : nos origines multiples +http://www.cnrs.fr/inee/communication/breves/b390.html|creationTime|2018-07-16T12:25:25Z +http://www.streamingwizard.com/|creationDate|2014-11-17 +http://www.streamingwizard.com/|title|Streaming services and solutions provider; specialists in live broadcasting and on demand video +http://www.streamingwizard.com/|creationTime|2014-11-17T20:56:12Z +https://github.com/innoq/iqvoc/wiki/|creationDate|2011-06-09 +https://github.com/innoq/iqvoc/wiki/|tag|http://www.semanlink.net/tag/skos_editor +https://github.com/innoq/iqvoc/wiki/|title|iQvoc +https://github.com/innoq/iqvoc/wiki/|creationTime|2011-06-09T22:50:55Z +http://arc.semsol.org/|creationDate|2008-02-04 +http://arc.semsol.org/|tag|http://www.semanlink.net/tag/rdf_dev +http://arc.semsol.org/|tag|http://www.semanlink.net/tag/php +http://arc.semsol.org/|tag|http://www.semanlink.net/tag/benjamin_nowack +http://arc.semsol.org/|title|Easy RDF and SPARQL for LAMP systems - ARC RDF Classes for PHP +http://arc.semsol.org/|creationTime|2008-02-04T15:37:39Z +http://www.lemonde.fr/technologies/article/2013/08/26/google-investit-dans-le-service-de-taxis-uber_3466504_651865.html#|creationDate|2013-08-26 +http://www.lemonde.fr/technologies/article/2013/08/26/google-investit-dans-le-service-de-taxis-uber_3466504_651865.html#|tag|http://www.semanlink.net/tag/google_car +http://www.lemonde.fr/technologies/article/2013/08/26/google-investit-dans-le-service-de-taxis-uber_3466504_651865.html#|title|Google investit dans le service de taxis Uber +http://www.lemonde.fr/technologies/article/2013/08/26/google-investit-dans-le-service-de-taxis-uber_3466504_651865.html#|creationTime|2013-08-26T23:28:51Z +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|creationDate|2007-07-07 +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|tag|http://www.semanlink.net/tag/mysql +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|tag|http://www.semanlink.net/tag/installing_wordpress +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|tag|http://www.semanlink.net/tag/php +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|title|WordPress on Mac Part 1: PHP & MySQL All Forces +http://allforces.com/2005/08/22/wordpress-on-mac-phpandmysql/|creationTime|2007-07-07T15:28:04Z +http://passeurdesciences.blog.lemonde.fr/2014/07/06/un-mystere-astronomique-de-470-millions-dannees/|creationDate|2014-07-06 +http://passeurdesciences.blog.lemonde.fr/2014/07/06/un-mystere-astronomique-de-470-millions-dannees/|tag|http://www.semanlink.net/tag/meteorite +http://passeurdesciences.blog.lemonde.fr/2014/07/06/un-mystere-astronomique-de-470-millions-dannees/|title|Un mystère astronomique de 470 millions d’années Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2014/07/06/un-mystere-astronomique-de-470-millions-dannees/|creationTime|2014-07-06T19:50:48Z +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|creationDate|2010-07-19 +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|tag|http://www.semanlink.net/tag/goodrelations +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|tag|http://www.semanlink.net/tag/data_web +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|tag|http://www.semanlink.net/tag/linked_data +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|comment|GoodRelations Annotator and GoodRelations Snippet Generator: The easy way to advertise your business on the Web of Linked Data. +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|title|GoodRelations Annotator +http://www.ebusiness-unibw.org/tools/goodrelations-annotator/|creationTime|2010-07-19T11:20:20Z +https://github.com/evilstreak/markdown-js|creationDate|2015-10-11 +https://github.com/evilstreak/markdown-js|tag|http://www.semanlink.net/tag/evilstreak_markdown_js +https://github.com/evilstreak/markdown-js|title|evilstreak/markdown-js +https://github.com/evilstreak/markdown-js|creationTime|2015-10-11T09:51:45Z +http://mashable.com/2007/05/15/16-awesome-data-visualization-tools/|creationDate|2008-02-15 +http://mashable.com/2007/05/15/16-awesome-data-visualization-tools/|tag|http://www.semanlink.net/tag/data_visualization_tools +http://mashable.com/2007/05/15/16-awesome-data-visualization-tools/|title|16 Awesome Data Visualization Tools +http://mashable.com/2007/05/15/16-awesome-data-visualization-tools/|creationTime|2008-02-15T23:50:08Z +http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/|creationDate|2015-06-20 +http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/|tag|http://www.semanlink.net/tag/cross_origin_resource_sharing +http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/|tag|http://www.semanlink.net/tag/solr +http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/|title|Solr, Jetty and CORS - Chris Eldredge +http://chris.eldredge.io/blog/2015/04/02/solr-jetty-cors/|creationTime|2015-06-20T09:24:03Z +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|creationDate|2017-05-24 +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|tag|http://www.semanlink.net/tag/hashtag +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|tag|http://www.semanlink.net/tag/nlp_sample_code +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|comment|"pretty basic, use word frequency, stemming and stopwords. +" +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|title|Swayy Blog — An algorithm for generating automatic hashtags +http://blog.swayy.co/post/61672584784/an-algorithm-for-generating-automatic-hashtags|creationTime|2017-05-24T18:07:27Z +http://www.planeteafrique.com/niger/ONG_Search.asp|creationDate|2005-03-03 +http://www.planeteafrique.com/niger/ONG_Search.asp|tag|http://www.semanlink.net/tag/ong +http://www.planeteafrique.com/niger/ONG_Search.asp|tag|http://www.semanlink.net/tag/niger +http://www.planeteafrique.com/niger/ONG_Search.asp|title|Annuaire des Associations et ONG oeuvrant au Niger +http://ivan-herman.name/2008/12/03/bridge-between-sw-communities-owl-rl/|creationDate|2010-08-27 +http://ivan-herman.name/2008/12/03/bridge-between-sw-communities-owl-rl/|tag|http://www.semanlink.net/tag/owl_rl +http://ivan-herman.name/2008/12/03/bridge-between-sw-communities-owl-rl/|title|Bridge between SW communities: OWL RL « Ivan’s private site +http://ivan-herman.name/2008/12/03/bridge-between-sw-communities-owl-rl/|creationTime|2010-08-27T13:15:21Z +https://www.typepad.com/t/app/weblog/manage?blog_id=284632|creationDate|2006-01-14 +https://www.typepad.com/t/app/weblog/manage?blog_id=284632|tag|http://www.semanlink.net/tag/fps_blog +https://www.typepad.com/t/app/weblog/manage?blog_id=284632|tag|http://www.semanlink.net/tag/noos +https://www.typepad.com/t/app/weblog/manage?blog_id=284632|title|Mon blog sur Noos - Edition +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|creationDate|2018-10-18 +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|tag|http://www.semanlink.net/tag/slides +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|tag|http://www.semanlink.net/tag/yves_raymond +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|tag|http://www.semanlink.net/tag/recommender_systems +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|title|Time, Context and Causality in Recommender Systems +https://www.slideshare.net/moustaki/time-context-and-causality-in-recommender-systems|creationTime|2018-10-18T13:53:35Z +http://dannyayers.com/code/sparql-editor|creationDate|2007-01-09 +http://dannyayers.com/code/sparql-editor|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/code/sparql-editor|tag|http://www.semanlink.net/tag/sparql +http://dannyayers.com/code/sparql-editor|title|Online SPARQL Editor +http://web.archive.org/web/19981206171549/www.hypersolutions.fr/|creationDate|2010-05-17 +http://web.archive.org/web/19981206171549/www.hypersolutions.fr/|tag|http://www.semanlink.net/tag/hypersolutions +http://web.archive.org/web/19981206171549/www.hypersolutions.fr/|title|hyperSOLutions - Home page +http://web.archive.org/web/19981206171549/www.hypersolutions.fr/|creationTime|2010-05-17T09:16:29Z +http://petrole.blog.lemonde.fr/2011/11/06/trop-tard-pour-limiter-le-rechauffement-a-2%C2%B0c-selon-nature/|creationDate|2011-11-06 +http://petrole.blog.lemonde.fr/2011/11/06/trop-tard-pour-limiter-le-rechauffement-a-2%C2%B0c-selon-nature/|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://petrole.blog.lemonde.fr/2011/11/06/trop-tard-pour-limiter-le-rechauffement-a-2%C2%B0c-selon-nature/|title|Trop tard pour limiter le réchauffement à 2°C, d’après ‘Nature’ Oil Man +http://petrole.blog.lemonde.fr/2011/11/06/trop-tard-pour-limiter-le-rechauffement-a-2%C2%B0c-selon-nature/|creationTime|2011-11-06T16:24:09Z +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|creationDate|2018-11-05 +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|tag|http://www.semanlink.net/tag/manaal_faruqui +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|tag|http://www.semanlink.net/tag/cross_lingual_word_embeddings +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|tag|http://www.semanlink.net/tag/tutorial +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|title|Cross-Lingual Word Representations: Induction and Evaluation (Tutorial EMNLP 2017) +http://people.ds.cam.ac.uk/iv250/tutorial/xlingrep-tutorial.pdf|creationTime|2018-11-05T14:12:58Z +http://www.knowledgesearch.org/|creationDate|2006-10-09 +http://www.knowledgesearch.org/|tag|http://www.semanlink.net/tag/semantic_indexing +http://www.knowledgesearch.org/|comment|Creating tools to identify the latent knowledge found in text +http://www.knowledgesearch.org/|title|The Semantic Indexing Project +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|creationDate|2018-12-20 +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|tag|http://www.semanlink.net/tag/2018 +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|tag|http://www.semanlink.net/tag/deep_learning +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|tag|http://www.semanlink.net/tag/survey +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|title|The major advancements in Deep Learning in 2018 Tryolabs Blog +https://tryolabs.com/blog/2018/12/19/major-advancements-deep-learning-2018/|creationTime|2018-12-20T11:42:21Z +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|creationDate|2006-02-26 +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|tag|http://www.semanlink.net/tag/personal_information_management +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|comment|"In this paper, we propose a layered and semantic ontology-based framework for personal information management, and we discuss its annotations, associations, and navigation. We also discuss query processing in two cases: query rewriting in a single personal information application, PIA, and that between two PIAs. +" +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|title|A Multi-Ontology Approach for Personal Information Management +http://www.semanticdesktop.org/xwiki/bin/download/Wiki/AMultiOntologyApproachForPersonalInformationManagement/32_xiaocruz_multiontology_final.pdf|date|2005-11 +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|creationDate|2009-12-02 +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|tag|http://www.semanlink.net/tag/nodalities +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|tag|http://www.semanlink.net/tag/data_web +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|comment|good title! +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|title|The Data Web as an OS +http://www.talis.com/nodalities/pdf/nodalities_issue8.pdf|creationTime|2009-12-02T00:49:40Z +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|creationDate|2010-07-30 +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|tag|http://www.semanlink.net/tag/readwriteweb_com +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|tag|http://www.semanlink.net/tag/text_to_semantic_data +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|tag|http://www.semanlink.net/tag/calais +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|comment|"A service that crawls the Web for text on a specific topic, then transforms it into ""structured semantic data."" It's a direct competitor to Thomson Reuters' Calais product." +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|title|"Extractiv Launches ""Semantics as a Service"" Platform" +http://www.readwriteweb.com/archives/extractiv_launches_semantics_as_a_service_platform.php|creationTime|2010-07-30T14:37:04Z +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-01-04 +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/bernard_stiegler +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/memoire_informatique +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/memoire +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|comment|" +" +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Are Digital Devices Robbing our Memories? CNRS News +https://news.cnrs.fr/articles/are-digital-devices-robbing-our-memories?utm_content=bufferb159d&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-01-04T14:13:10Z +http://nicola.io/future-rdf/2015/|creationDate|2015-09-12 +http://nicola.io/future-rdf/2015/|tag|http://www.semanlink.net/tag/javascript_rdf +http://nicola.io/future-rdf/2015/|title|Towards the future RDF library +http://nicola.io/future-rdf/2015/|creationTime|2015-09-12T13:54:07Z +http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/|creationDate|2014-07-24 +http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/|tag|http://www.semanlink.net/tag/semantic_web +http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/|title|A Decade in the Trenches of the Semantic Web AI3:::Adaptive Information +http://www.mkbergman.com/1771/a-decade-in-the-trenches-of-the-semantic-web/|creationTime|2014-07-24T23:19:52Z +http://www.ibm.com/developerworks/xml/library/x-dita10/|creationDate|2011-02-15 +http://www.ibm.com/developerworks/xml/library/x-dita10/|tag|http://www.semanlink.net/tag/dita +http://www.ibm.com/developerworks/xml/library/x-dita10/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/xml/library/x-dita10/|tag|http://www.semanlink.net/tag/skos +http://www.ibm.com/developerworks/xml/library/x-dita10/|title|Subject classification with DITA and SKOS +http://www.ibm.com/developerworks/xml/library/x-dita10/|creationTime|2011-02-15T11:55:39Z +http://www.worldpressphoto.nl|creationDate|2005-06-15 +http://www.worldpressphoto.nl|tag|http://www.semanlink.net/tag/journal +http://www.worldpressphoto.nl|tag|http://www.semanlink.net/tag/photo_journalisme +http://www.worldpressphoto.nl|title|World Press Photo +http://arxiv.org/abs/1002.2284v2|creationDate|2013-05-11 +http://arxiv.org/abs/1002.2284v2|tag|http://www.semanlink.net/tag/p_np +http://arxiv.org/abs/1002.2284v2|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1002.2284v2|tag|http://www.semanlink.net/tag/markets +http://arxiv.org/abs/1002.2284v2|arxiv_author|Philip Maymin +http://arxiv.org/abs/1002.2284v2|comment|Hmm wow +http://arxiv.org/abs/1002.2284v2|title|[1002.2284] Markets are efficient if and only if P = NP +http://arxiv.org/abs/1002.2284v2|creationTime|2013-05-11T11:18:22Z +http://arxiv.org/abs/1002.2284v2|arxiv_summary|"I prove that if markets are weak-form efficient, meaning current prices fully +reflect all information available in past prices, then P = NP, meaning every +computational problem whose solution can be verified in polynomial time can +also be solved in polynomial time. I also prove the converse by showing how we +can ""program"" the market to solve NP-complete problems. Since P probably does +not equal NP, markets are probably not efficient. Specifically, markets become +increasingly inefficient as the time series lengthens or becomes more frequent. +An illustration by way of partitioning the excess returns to momentum +strategies based on data availability confirms this prediction." +http://arxiv.org/abs/1002.2284v2|arxiv_firstAuthor|Philip Maymin +http://arxiv.org/abs/1002.2284v2|arxiv_updated|2010-05-13T07:26:53Z +http://arxiv.org/abs/1002.2284v2|arxiv_title|Markets are efficient if and only if P = NP +http://arxiv.org/abs/1002.2284v2|arxiv_published|2010-02-11T05:56:16Z +http://arxiv.org/abs/1002.2284v2|arxiv_num|1002.2284 +http://www.talisaspire.com/|creationDate|2012-04-17 +http://www.talisaspire.com/|tag|http://www.semanlink.net/tag/tom_heath +http://www.talisaspire.com/|tag|http://www.semanlink.net/tag/talis +http://www.talisaspire.com/|tag|http://www.semanlink.net/tag/semantic_cms +http://www.talisaspire.com/|tag|http://www.semanlink.net/tag/linked_learning_2012 +http://www.talisaspire.com/|tag|http://www.semanlink.net/tag/linked_learning +http://www.talisaspire.com/|title|Talis Aspire +http://www.talisaspire.com/|creationTime|2012-04-17T11:51:35Z +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|creationDate|2010-05-20 +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|tag|http://www.semanlink.net/tag/artificial_life +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|tag|http://www.semanlink.net/tag/craig_venter +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|title|BBC News - 'Artificial life' breakthrough announced by scientists +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|creationTime|2010-05-20T22:31:25Z +http://news.bbc.co.uk/2/hi/science_and_environment/10132762.stm|source|BBC +https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77|creationDate|2018-12-02 +https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77|tag|http://www.semanlink.net/tag/few_shot_learning +https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77|tag|http://www.semanlink.net/tag/survey +https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77|title|Advances in few-shot learning: a guided tour – Towards Data Science +https://towardsdatascience.com/advances-in-few-shot-learning-a-guided-tour-36bc10a68b77|creationTime|2018-12-02T10:16:29Z +http://linuxgizmos.com/google-launches-android-automotive-consortium/|creationDate|2014-01-07 +http://linuxgizmos.com/google-launches-android-automotive-consortium/|tag|http://www.semanlink.net/tag/automobile +http://linuxgizmos.com/google-launches-android-automotive-consortium/|tag|http://www.semanlink.net/tag/android +http://linuxgizmos.com/google-launches-android-automotive-consortium/|tag|http://www.semanlink.net/tag/google +http://linuxgizmos.com/google-launches-android-automotive-consortium/|title|Google launches Android automotive consortium ·  LinuxGizmos.com +http://linuxgizmos.com/google-launches-android-automotive-consortium/|creationTime|2014-01-07T11:45:52Z +http://torrez.us/archives/2005/09/13/393|creationDate|2005-09-14 +http://torrez.us/archives/2005/09/13/393|tag|http://www.semanlink.net/tag/rdf_vs_xml +http://torrez.us/archives/2005/09/13/393|title|Elias Torres » From XML to RDF: how semantic web technologies will change the design of ‘omic’ standards +http://www.newscientist.com/article/mg21128323.200-the-vast-asian-realm-of-the-lost-humans.html#.UpfAXaVkiww|creationDate|2013-11-28 +http://www.newscientist.com/article/mg21128323.200-the-vast-asian-realm-of-the-lost-humans.html#.UpfAXaVkiww|tag|http://www.semanlink.net/tag/denisovan +http://www.newscientist.com/article/mg21128323.200-the-vast-asian-realm-of-the-lost-humans.html#.UpfAXaVkiww|title|The vast Asian realm of the lost humans - life - 29 September 2011 - New Scientist +http://www.newscientist.com/article/mg21128323.200-the-vast-asian-realm-of-the-lost-humans.html#.UpfAXaVkiww|creationTime|2013-11-28T23:17:15Z +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|creationDate|2007-09-26 +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|tag|http://www.semanlink.net/tag/hugo +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|tag|http://www.semanlink.net/tag/bibliotheque +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|tag|http://www.semanlink.net/tag/crise_des_banlieues +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|comment|"Tu viens d'incendier la Bibliothèque ?
+- Oui,
+J'ai mis le feu là.
+- Mais, c'est un crime inouï !
+Crime commis par toi contre toi-même, infâme !
+Mais tu viens de tuer le rayon de ton âme !
+[snip]
+Voilà ce que tu perds, hélas, et par ta faute !
+Le livre est ta richesse à toi ! c'est le savoir,
+Le droit, la vérité, la vertu, le devoir,
+Le progrès, la raison dissipant tout délire.
+Et tu détruis cela toi !
+- Je ne sais pas lire." +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|title|Victor Hugo : A qui la faute ? +http://artic.ac-besancon.fr/ecoles_25/ia/maitrlng/victorhugo_06.htm|creationTime|2007-09-26T21:42:05Z +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|creationDate|2015-10-24 +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|tag|http://www.semanlink.net/tag/representation_learning +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|tag|http://www.semanlink.net/tag/artificial_intelligence +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|tag|http://www.semanlink.net/tag/unsupervised_machine_learning +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|title|Obstacles on the Path to AI +https://drive.google.com/file/d/0BxKBnD5y2M8NbWN6XzM5UXkwNDA/view?pli=1|creationTime|2015-10-24T00:49:43Z +http://oldman.readthedocs.org/en/latest/|creationDate|2015-02-18 +http://oldman.readthedocs.org/en/latest/|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://oldman.readthedocs.org/en/latest/|comment|"OldMan is a Python Object Linked Data Mapper (OLDM).
+An OLDM let you create, retrieve and update RDF representations of Web Resources by manipulating them as Python objects.
+OldMan has one main objective: help you to declare your models using RDF triples and JSON-LD contexts instead of programming Python model classes yourself.
+However, OldMan does not force you to express all your domain logic in a declarative style. OldMan makes easy for you to add dynamically plain-old Python methods to resource objects. + + +" +http://oldman.readthedocs.org/en/latest/|title|OldMan: Python OLDM +http://oldman.readthedocs.org/en/latest/|creationTime|2015-02-18T23:48:16Z +http://www.prototypejs.org/|creationDate|2008-11-03 +http://www.prototypejs.org/|tag|http://www.semanlink.net/tag/javascript_librairies +http://www.prototypejs.org/|comment|used by dbPedia mobile +http://www.prototypejs.org/|title|Prototype JavaScript framework +http://www.prototypejs.org/|creationTime|2008-11-03T10:21:59Z +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|creationDate|2012-11-22 +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|tag|http://www.semanlink.net/tag/facebook +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|tag|http://www.semanlink.net/tag/yahoo +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|tag|http://www.semanlink.net/tag/publicite_internet +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|title|Facebook Is Quietly Ramping Up A Product That 'Kills Us,' Says Yahoo Source - Business Insider +http://www.businessinsider.com/facebook-is-quietly-ramping-up-a-product-that-kills-us-says-yahoo-source-2012-11|creationTime|2012-11-22T00:46:05Z +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|creationDate|2007-01-02 +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|tag|http://www.semanlink.net/tag/content_negotiation +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|tag|http://www.semanlink.net/tag/richard_cyganiak +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|comment|If foo is another kind of resources (e.g. a person), then you *must* do a 303 redirect to the location where a description of foo is available. You can 303-redirect to different locations based on accept headers. Post-httpRange-14, the only way to serve a description of a non-information resource without a second request is to use hash URIs. +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0102.html|title|"Re: AW: Content negotiation flamewar (was: Re: ""Hash URIs"" and content negotiation) from Richard Cyganiak on 2006-11-13 (semantic-web@w3.org from November 2006)" +http://www.franz.com/|creationDate|2010-08-24 +http://www.franz.com/|tag|http://www.semanlink.net/tag/semantic_web_company +http://www.franz.com/|tag|http://www.semanlink.net/tag/allegrograph +http://www.franz.com/|title|Franz Inc. Web 3.0's Database +http://www.franz.com/|creationTime|2010-08-24T23:51:52Z +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|creationDate|2010-11-26 +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|tag|http://www.semanlink.net/tag/chiffres +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|tag|http://www.semanlink.net/tag/pauvrete +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|comment|"Le nombre d'individus vivant dans l'extrême pauvreté a ainsi ""augmenté de 3 millions par an entre 2002 et 2007"", qui ont été pourtant des années de forte croissance économique (avec des moyennes de 7 %), pour atteindre 421 millions d'individus en 2007. +" +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|title|Le nombre de pays très pauvres a doublé en quarante ans +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|creationTime|2010-11-26T22:12:32Z +http://www.lemonde.fr/international/article/2010/11/26/le-nombre-de-pays-tres-pauvres-a-double-en-quarante-ans_1445160_3210.html|source|Le Monde +http://esw.w3.org/topic/RdfStoreBenchmarking|creationDate|2009-02-16 +http://esw.w3.org/topic/RdfStoreBenchmarking|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://esw.w3.org/topic/RdfStoreBenchmarking|tag|http://www.semanlink.net/tag/benchmark +http://esw.w3.org/topic/RdfStoreBenchmarking|title|RdfStoreBenchmarking - ESW Wiki +http://esw.w3.org/topic/RdfStoreBenchmarking|creationTime|2009-02-16T17:06:29Z +http://www.bbc.co.uk/blogs/bbcinternet/2010/07/bbc_world_cup_2010_dynamic_sem.html|creationDate|2011-02-04 +http://www.bbc.co.uk/blogs/bbcinternet/2010/07/bbc_world_cup_2010_dynamic_sem.html|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.bbc.co.uk/blogs/bbcinternet/2010/07/bbc_world_cup_2010_dynamic_sem.html|title|BBC - BBC Internet Blog: BBC World Cup 2010 dynamic semantic publishing +http://www.bbc.co.uk/blogs/bbcinternet/2010/07/bbc_world_cup_2010_dynamic_sem.html|creationTime|2011-02-04T21:33:53Z +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|creationDate|2011-09-20 +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|tag|http://www.semanlink.net/tag/crowd_sourcing +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|tag|http://www.semanlink.net/tag/retrovirus +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|tag|http://www.semanlink.net/tag/jeux_en_ligne +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|comment|Gamers have solved the structure of a retrovirus enzyme whose configuration had stumped scientists for more than a decade. The gamers achieved their discovery by playing Foldit, an online game that allows players to collaborate and compete in predicting the structure of protein molecules. +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|title|Gamers succeed where scientists fail ScienceBlog.com +http://scienceblog.com/47894/gamers-succeed-where-scientists-fail/|creationTime|2011-09-20T08:59:51Z +http://www.enswers.net/|creationDate|2013-08-25 +http://www.enswers.net/|tag|http://www.semanlink.net/tag/enswers +http://www.enswers.net/|tag|http://www.semanlink.net/tag/coree_du_sud +http://www.enswers.net/|tag|http://www.semanlink.net/tag/winch5 +http://www.enswers.net/|title|Enswers +http://www.enswers.net/|creationTime|2013-08-25T12:59:03Z +http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf|creationDate|2017-06-27 +http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf|tag|http://www.semanlink.net/tag/machine_learning +http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf|tag|http://www.semanlink.net/tag/slides +http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf|title|"""Around Machine Learning in 90 Minutes"" slides" +http://www.cazencott.info/dotclear/public/lectures/2017-06-26-intro-ml.pdf|creationTime|2017-06-27T15:22:42Z +https://github.com/D2KLab/entity2rec|creationDate|2018-06-04 +https://github.com/D2KLab/entity2rec|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://github.com/D2KLab/entity2rec|tag|http://www.semanlink.net/tag/github_project +https://github.com/D2KLab/entity2rec|tag|http://www.semanlink.net/tag/raphael_troncy +https://github.com/D2KLab/entity2rec|tag|http://www.semanlink.net/tag/recommended_reading +https://github.com/D2KLab/entity2rec|title|D2KLab/entity2rec: entity2rec generates item recommendation from knowledge graphs +https://github.com/D2KLab/entity2rec|creationTime|2018-06-04T00:10:02Z +http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm|creationDate|2005-06-15 +http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm|tag|http://www.semanlink.net/tag/carbon_sequestration +http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm|comment|The UK government has announced £25m of funding for a plan to capture greenhouse gases such as carbon dioxide and store them under the North Sea. +http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm|title|BBC NEWS - Funds for greenhouse gas storage +http://news.bbc.co.uk/1/hi/sci/tech/4089538.stm|source|BBC +http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/|creationDate|2014-07-25 +http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/|title|Spring Dawns on Artificial Intelligence AI3:::Adaptive Information +http://www.mkbergman.com/1731/spring-dawns-on-artificial-intelligence/|creationTime|2014-07-25T01:53:12Z +http://lib.store.yahoo.net/lib/paulgraham/bbnexcerpts.txt|creationDate|2005-11-24 +http://lib.store.yahoo.net/lib/paulgraham/bbnexcerpts.txt|tag|http://www.semanlink.net/tag/lisp +http://lib.store.yahoo.net/lib/paulgraham/bbnexcerpts.txt|tag|http://www.semanlink.net/tag/paul_graham +http://lib.store.yahoo.net/lib/paulgraham/bbnexcerpts.txt|title|Lisp for Web-Based Applications +http://www.answers.com/topic/the-sea-bat-1|creationDate|2009-07-20 +http://www.answers.com/topic/the-sea-bat-1|tag|http://www.semanlink.net/tag/film_americain +http://www.answers.com/topic/the-sea-bat-1|title|The Sea Bat 1930: Movie and film review from Answers.com +http://www.answers.com/topic/the-sea-bat-1|creationTime|2009-07-20T00:57:18Z +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|creationDate|2008-06-23 +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|tag|http://www.semanlink.net/tag/couple_mixte +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|comment|a 1967 comedy-drama film starring Spencer Tracy, Sidney Poitier, Katharine Hepburn, and Katharine Houghton. +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|title|Guess Who's Coming to Dinner +http://en.wikipedia.org/wiki/Guess_Who's_Coming_to_Dinner|creationTime|2008-06-23T00:28:54Z +http://rdrpostagger.sourceforge.net/|creationDate|2017-07-11 +http://rdrpostagger.sourceforge.net/|tag|http://www.semanlink.net/tag/part_of_speech_tagging +http://rdrpostagger.sourceforge.net/|tag|http://www.semanlink.net/tag/nlp_tools +http://rdrpostagger.sourceforge.net/|comment|approach to automatically construct tagging rules in the form of a binary tree. Python and java +http://rdrpostagger.sourceforge.net/|title|RDRPOSTagger: A Rule-based Part-of-Speech and Morphological Tagging Toolkit +http://rdrpostagger.sourceforge.net/|creationTime|2017-07-11T15:46:46Z +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|creationDate|2012-09-06 +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|tag|http://www.semanlink.net/tag/m2eclipse +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|tag|http://www.semanlink.net/tag/maven +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|tag|http://www.semanlink.net/tag/egit +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|tag|http://www.semanlink.net/tag/eclipse_juno +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|title|Eclipse Juno, Maven, M2E and EGit Compatibility Problem and Solution Machine vs. Me +http://www.machineversus.me/2012/08/if-youve-upgraded-to-eclipse-juno.html|creationTime|2012-09-06T14:58:52Z +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|creationDate|2017-12-16 +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|tag|http://www.semanlink.net/tag/bien_envoye +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|tag|http://www.semanlink.net/tag/bertrand_russell +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|tag|http://www.semanlink.net/tag/antifascisme +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|title|When Debate Is Futile: Bertrand Russell’s Remarkable Response to a Fascist’s Provocation – Brain Pickings +https://www.brainpickings.org/2016/10/06/bertrand-russell-oswald-mosley/|creationTime|2017-12-16T15:03:15Z +http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n|creationDate|2019-05-19 +http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n|title|Microsoft makes Google's BERT NLP model better +http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n|bookmarkOf|https://venturebeat.com/2019/05/16/microsoft-makes-googles-bert-nlp-model-better/ +http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n|creationTime|2019-05-19T23:13:06Z +http://www.semanlink.net/doc/2019/05/microsoft_makes_google_s_bert_n|mainDoc|http://www.semanlink.net/doc/2019/05/robust_language_representation_ +https://datascience.stackexchange.com/questions/10299/what-is-a-good-explanation-of-non-negative-matrix-factorization/15438|creationDate|2017-11-13 +https://datascience.stackexchange.com/questions/10299/what-is-a-good-explanation-of-non-negative-matrix-factorization/15438|tag|http://www.semanlink.net/tag/non_negative_matrix_factorization +https://datascience.stackexchange.com/questions/10299/what-is-a-good-explanation-of-non-negative-matrix-factorization/15438|title|nlp - What is a good explanation of Non Negative Matrix Factorization? - Data Science Stack Exchange +https://datascience.stackexchange.com/questions/10299/what-is-a-good-explanation-of-non-negative-matrix-factorization/15438|creationTime|2017-11-13T11:31:34Z +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|creationDate|2016-01-03 +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|tag|http://www.semanlink.net/tag/facebook +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|tag|http://www.semanlink.net/tag/net_neutrality +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|tag|http://www.semanlink.net/tag/inde +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|title|Facebook is selling old wine (Internet.org) in a new bottle (Free Basics), users be aware - Times of India +http://timesofindia.indiatimes.com/tech/tech-news/Facebook-is-selling-old-wine-Internet-org-in-a-new-bottle-Free-Basicsusers-be-aware/articleshow/50219009.cms|creationTime|2016-01-03T12:25:36Z +http://itmanagement.earthweb.com/features/article.php/12297_3867751_3/Business-Intelligence-Software-Ten-Leaders.htm|creationDate|2010-09-21 +http://itmanagement.earthweb.com/features/article.php/12297_3867751_3/Business-Intelligence-Software-Ten-Leaders.htm|tag|http://www.semanlink.net/tag/business_intelligence +http://itmanagement.earthweb.com/features/article.php/12297_3867751_3/Business-Intelligence-Software-Ten-Leaders.htm|title|Business Intelligence Software: Ten Leaders +http://itmanagement.earthweb.com/features/article.php/12297_3867751_3/Business-Intelligence-Software-Ten-Leaders.htm|creationTime|2010-09-21T12:03:29Z +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|creationDate|2013-05-15 +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|tag|http://www.semanlink.net/tag/luis_von_ahn +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|tag|http://www.semanlink.net/tag/captcha +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|tag|http://www.semanlink.net/tag/spam +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|comment|Throughout his career, von Ahn has focused on how to leverage human brainpower to solve computational problems that computers cannot solve on their own. +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|title|Luis von Ahn: Gamer Defeats Spam IIP Digital +http://iipdigital.usembassy.gov/st/english/publication/2012/11/20121114138624.html#axzz2TOAwLweQ|creationTime|2013-05-15T15:54:06Z +http://passeurdesciences.blog.lemonde.fr/2014/06/25/une-nouvelle-arme-contre-les-superbacteries/|creationDate|2014-06-26 +http://passeurdesciences.blog.lemonde.fr/2014/06/25/une-nouvelle-arme-contre-les-superbacteries/|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://passeurdesciences.blog.lemonde.fr/2014/06/25/une-nouvelle-arme-contre-les-superbacteries/|title|Une nouvelle arme contre les superbactéries Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2014/06/25/une-nouvelle-arme-contre-les-superbacteries/|creationTime|2014-06-26T00:58:47Z +http://www.snl-e.salk.edu/publications/Chichilnisky2001.pdf|creationDate|2014-01-20 +http://www.snl-e.salk.edu/publications/Chichilnisky2001.pdf|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.snl-e.salk.edu/publications/Chichilnisky2001.pdf|title|"""A simple white noise analysis of neuronal light responses"", E.J. Chichinisky 2000" +http://www.snl-e.salk.edu/publications/Chichilnisky2001.pdf|creationTime|2014-01-20T13:32:56Z +http://www.configworks.com/mz/AI_EDAM_2004.pdf|creationDate|2012-02-15 +http://www.configworks.com/mz/AI_EDAM_2004.pdf|tag|http://www.semanlink.net/tag/configuration_and_sw +http://www.configworks.com/mz/AI_EDAM_2004.pdf|comment|"we give a +description logic based definition of a configuration problem and show its equivalence with existing consistency-based +definitions, thus joining the two major streams in knowledge-based configuration ~description logics and predicate +logic0constraint based configuration!." +http://www.configworks.com/mz/AI_EDAM_2004.pdf|title|Configuration knowledge representations for Semantic Web applications +http://www.configworks.com/mz/AI_EDAM_2004.pdf|creationTime|2012-02-15T00:48:51Z +https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript|creationDate|2014-09-25 +https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript|tag|http://www.semanlink.net/tag/javascript_and_tutorial +https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript|tag|http://www.semanlink.net/tag/memory_leak +https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript|title|A re-introduction to JavaScript (JS Tutorial) +https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript|creationTime|2014-09-25T11:26:40Z +http://discussions.apple.com/thread.jspa?messageID=7143270|creationDate|2008-12-18 +http://discussions.apple.com/thread.jspa?messageID=7143270|tag|http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur +http://discussions.apple.com/thread.jspa?messageID=7143270|title|Apple - Support - Discussions - The Backlit Keyboard on MacBook Air its working upside down +http://discussions.apple.com/thread.jspa?messageID=7143270|creationTime|2008-12-18T18:19:15Z +http://www.offconvex.org/2018/06/17/textembeddings/|creationDate|2018-06-25 +http://www.offconvex.org/2018/06/17/textembeddings/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2018/06/17/textembeddings/|tag|http://www.semanlink.net/tag/sif_embeddings +http://www.offconvex.org/2018/06/17/textembeddings/|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.offconvex.org/2018/06/17/textembeddings/|comment|"> introduction to extremely simple ways of computing sentence +embeddings, which on many standard tasks, beat many state-of-the-art deep learning +methods. + +Related to [this paper](/doc/?uri=https%3A%2F%2Fopenreview.net%2Fforum%3Fid%3DSyK00v5xx) + +(BTW, contains a good intro to text embeddings) + +" +http://www.offconvex.org/2018/06/17/textembeddings/|relatedDoc|https://openreview.net/forum?id=SyK00v5xx +http://www.offconvex.org/2018/06/17/textembeddings/|title|Deep-learning-free Text and Sentence Embedding, Part 1 – Off the convex path +http://www.offconvex.org/2018/06/17/textembeddings/|creationTime|2018-06-25T21:00:24Z +http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY|creationDate|2014-09-16 +http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY|tag|http://www.semanlink.net/tag/money +http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY|tag|http://www.semanlink.net/tag/twitter +http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY|title|French bank becomes first to allow Twitter users to send money - Tech News - Digital Spy +http://www.digitalspy.co.uk/tech/news/a596872/french-bank-becomes-first-to-allow-twitter-users-to-send-money.html#~oQ0CvHwvUdoCBY|creationTime|2014-09-16T10:10:28Z +http://www.regular-expressions.info/|creationDate|2012-03-07 +http://www.regular-expressions.info/|tag|http://www.semanlink.net/tag/nlp_class +http://www.regular-expressions.info/|tag|http://www.semanlink.net/tag/regex +http://www.regular-expressions.info/|title|Regular-Expressions.info - Regex Tutorial, Examples and Reference - Regexp Patterns +http://www.regular-expressions.info/|creationTime|2012-03-07T13:08:56Z +http://esw.w3.org/topic/Job_Mart|creationDate|2008-08-18 +http://esw.w3.org/topic/Job_Mart|tag|http://www.semanlink.net/tag/offres_d_emploi +http://esw.w3.org/topic/Job_Mart|tag|http://www.semanlink.net/tag/semantic_web +http://esw.w3.org/topic/Job_Mart|comment|This page is for listing Semantic Web-related positions available (in academia, industry, government or whatever) as well as individuals looking for work +http://esw.w3.org/topic/Job_Mart|title|Job Mart - ESW Wiki +http://esw.w3.org/topic/Job_Mart|creationTime|2008-08-18T23:37:00Z +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|creationDate|2019-05-28 +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|tag|http://www.semanlink.net/tag/microsoft_concept_graph +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|tag|http://www.semanlink.net/tag/nlp_short_texts +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|comment|"Microsoft Concept Graph, a knowledge graph engine that provides concept tagging APIs to facilitate the understanding of human languages. Built upon Probase, a universal probabilistic taxonomy consisting of instances and concepts mined from the Web. + +The conceptualization model (also known as the Concept Tagging Model) aims to map text into semantic concept categories with some probabilities +" +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|title|Microsoft Concept Graph: Mining Semantic Concepts for Short Text Understanding MIT Press Journals (2019) +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|bookmarkOf|https://www.mitpressjournals.org/doi/full/10.1162/dint_a_00013 +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_mining|creationTime|2019-05-28T16:13:35Z +http://www.newscientist.com/article.ns?id=dn8383&print=true|creationDate|2005-11-28 +http://www.newscientist.com/article.ns?id=dn8383&print=true|tag|http://www.semanlink.net/tag/ntic +http://www.newscientist.com/article.ns?id=dn8383&print=true|tag|http://www.semanlink.net/tag/musique +http://www.newscientist.com/article.ns?id=dn8383&print=true|tag|http://www.semanlink.net/tag/jdd_apple +http://www.newscientist.com/article.ns?id=dn8383&print=true|title|Air guitarists’ rock dreams come true - New Scientist +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationDate|2015-09-13 +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/blockchain +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/bitcoin +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|comment|l’avènement d’un réseau décentralisé de transaction +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|title|La Blockchain et l’émergence des « distributed consensus engines » +http://www.christian-faure.net/2015/09/13/la-blockchain-et-lemergence-des-distributed-consensus-engines/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationTime|2015-09-13T12:20:18Z +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|creationDate|2013-07-11 +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|tag|http://www.semanlink.net/tag/mobile_apps +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|tag|http://www.semanlink.net/tag/garbage_collector +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|tag|http://www.semanlink.net/tag/memoire_informatique +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|tag|http://www.semanlink.net/tag/javascript +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|title|Why mobile web apps are slow Sealed Abstract +http://sealedabstract.com/rants/why-mobile-web-apps-are-slow/|creationTime|2013-07-11T00:52:14Z +http://www-128.ibm.com/developerworks/web/library/wa-semweb/|creationDate|2006-01-30 +http://www-128.ibm.com/developerworks/web/library/wa-semweb/|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www-128.ibm.com/developerworks/web/library/wa-semweb/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/web/library/wa-semweb/|title|The future of the Web is Semantic +http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html|creationDate|2015-05-17 +http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html|tag|http://www.semanlink.net/tag/management +http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html|tag|http://www.semanlink.net/tag/dev +http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html|title|Code rant: Heisenberg Developers +http://mikehadlow.blogspot.fr/2014/06/heisenberg-developers.html|creationTime|2015-05-17T00:17:46Z +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343|creationDate|2008-05-04 +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343|tag|http://www.semanlink.net/tag/ldow2008 +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343|title|Linked Data Trip Report - Part 1 (WWW2008) +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1343|creationTime|2008-05-04T15:49:31Z +http://www.canal.ird.fr|creationDate|2005-09-04 +http://www.canal.ird.fr|tag|http://www.semanlink.net/tag/ird +http://www.canal.ird.fr|title|Canal IRD +http://tools.ietf.org/html/rfc6596|creationDate|2013-01-03 +http://tools.ietf.org/html/rfc6596|tag|http://www.semanlink.net/tag/synonym_uris +http://tools.ietf.org/html/rfc6596|tag|http://www.semanlink.net/tag/uri +http://tools.ietf.org/html/rfc6596|title|RFC 6596 - The Canonical Link Relation +http://tools.ietf.org/html/rfc6596|creationTime|2013-01-03T16:13:07Z +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|creationDate|2007-12-06 +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|tag|http://www.semanlink.net/tag/ajax +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|tag|http://www.semanlink.net/tag/howto +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|title|"""Mastering Ajax"" - developerWorks : Web development : Technical library view" +http://www.ibm.com/developerworks/views/web/libraryview.jsp?search_by=Mastering+Ajax|creationTime|2007-12-06T00:50:14Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|creationDate|2007-05-08 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|tag|http://www.semanlink.net/tag/provocation_policiere +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|tag|http://www.semanlink.net/tag/presidentielles_2007 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|tag|http://www.semanlink.net/tag/banlieue +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|title|"A Aulnay, la victoire de M. Sarkozy vécue comme une ""grosse claque""" +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|creationTime|2007-05-08T10:22:00Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906553,0.html|source|Le Monde +http://greggkellogg.net/2012/08/21/json-ld-and-mongodb|creationDate|2012-08-23 +http://greggkellogg.net/2012/08/21/json-ld-and-mongodb|tag|http://www.semanlink.net/tag/json_ld +http://greggkellogg.net/2012/08/21/json-ld-and-mongodb|title|JSON-LD and MongoDB Gregg Kellogg +http://greggkellogg.net/2012/08/21/json-ld-and-mongodb|creationTime|2012-08-23T17:33:05Z +https://www.virtualbox.org/|creationDate|2017-11-20 +https://www.virtualbox.org/|tag|http://www.semanlink.net/tag/virtualbox +https://www.virtualbox.org/|title|Oracle VM VirtualBox +https://www.virtualbox.org/|creationTime|2017-11-20T09:14:41Z +http://stackoverflow.com/questions/899102/how-do-i-store-javascript-functions-in-a-queue-for-them-to-be-executed-eventually|creationDate|2010-12-30 +http://stackoverflow.com/questions/899102/how-do-i-store-javascript-functions-in-a-queue-for-them-to-be-executed-eventually|tag|http://www.semanlink.net/tag/javascript_tips +http://stackoverflow.com/questions/899102/how-do-i-store-javascript-functions-in-a-queue-for-them-to-be-executed-eventually|title|How do I store javascript functions in a queue for them to be executed eventually - Stack Overflow +http://stackoverflow.com/questions/899102/how-do-i-store-javascript-functions-in-a-queue-for-them-to-be-executed-eventually|creationTime|2010-12-30T23:10:39Z +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|creationDate|2006-03-11 +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|tag|http://www.semanlink.net/tag/genetique +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|tag|http://www.semanlink.net/tag/evolution +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|tag|http://www.semanlink.net/tag/separation_of_man_and_ape +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|comment|"First empirical demonstration in higher primates that evolution may often work through changes in gene regulation +" +http://www.sciam.com/print_version.cfm?articleID=0005FB4D-6102-140F-A10283414B7F0000|title|Scientific American: Separation of Man and Ape Down to Gene Expression +https://twitter.com/honnibal/status/1063108730219315201|creationDate|2018-11-18 +https://twitter.com/honnibal/status/1063108730219315201|tag|http://www.semanlink.net/tag/unsupervised_deep_pre_training +https://twitter.com/honnibal/status/1063108730219315201|tag|http://www.semanlink.net/tag/spacy +https://twitter.com/honnibal/status/1063108730219315201|tag|http://www.semanlink.net/tag/matthew_honnibal +https://twitter.com/honnibal/status/1063108730219315201|title|"Matthew Honnibal sur Twitter : ""Have been experimenting with an unsupervised pre-training technique for @spacy_io, similar to ULMFit/Elmo/BERT etc." +https://twitter.com/honnibal/status/1063108730219315201|creationTime|2018-11-18T10:39:39Z +http://my.opera.com/tomheath/blog/show.dml/306694|creationDate|2007-08-22 +http://my.opera.com/tomheath/blog/show.dml/306694|tag|http://www.semanlink.net/tag/tom_heath +http://my.opera.com/tomheath/blog/show.dml/306694|tag|http://www.semanlink.net/tag/linkto_semanlink +http://my.opera.com/tomheath/blog/show.dml/306694|tag|http://www.semanlink.net/tag/jena +http://my.opera.com/tomheath/blog/show.dml/306694|title|Applications Built on Jena - Tom Heath's Displacement Activities - by tomheath +http://my.opera.com/tomheath/blog/show.dml/306694|creationTime|2007-08-22T21:52:32Z +https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|creationDate|2019-03-29 +https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|tag|http://www.semanlink.net/tag/javascript_tips +https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|tag|http://www.semanlink.net/tag/json +https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|title|How can I pretty-print JSON using JavaScript? - Stack Overflow +https://stackoverflow.com/questions/4810841/how-can-i-pretty-print-json-using-javascript|creationTime|2019-03-29T17:56:44Z +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|creationDate|2008-06-12 +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/social_software +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/semantic_web_web_2_0 +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/alexandre_passant +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|tag|http://www.semanlink.net/tag/edf +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|title|Case Study: Enhancement and Integration of Corporate Social Software Using the Semantic Web +http://www.w3.org/2001/sw/sweo/public/UseCases/EDF/|creationTime|2008-06-12T08:16:25Z +https://blog.ouseful.info/2017/09/04/simple-text-analysis-using-python-identifying-named-entities-tagging-fuzzy-string-matching-and-topic-modelling/|creationDate|2019-04-24 +https://blog.ouseful.info/2017/09/04/simple-text-analysis-using-python-identifying-named-entities-tagging-fuzzy-string-matching-and-topic-modelling/|tag|http://www.semanlink.net/tag/text_processing +https://blog.ouseful.info/2017/09/04/simple-text-analysis-using-python-identifying-named-entities-tagging-fuzzy-string-matching-and-topic-modelling/|title|Simple Text Analysis Using Python – Identifying Named Entities, Tagging, Fuzzy String Matching and Topic Modelling – OUseful.Info, the blog… +https://blog.ouseful.info/2017/09/04/simple-text-analysis-using-python-identifying-named-entities-tagging-fuzzy-string-matching-and-topic-modelling/|creationTime|2019-04-24T11:35:53Z +http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/|creationDate|2013-07-14 +http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/|tag|http://www.semanlink.net/tag/catastrophe_naturelle +http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/|tag|http://www.semanlink.net/tag/solar_storm +http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/|title|When space weather attacks! +http://www.washingtonpost.com/blogs/wonkblog/wp/2013/07/13/when-space-weather-attacks/|creationTime|2013-07-14T17:06:24Z +https://bitcoinfoundation.org/blog/?p=418|creationDate|2014-03-02 +https://bitcoinfoundation.org/blog/?p=418|tag|http://www.semanlink.net/tag/bitcoin +https://bitcoinfoundation.org/blog/?p=418|title|Contrary to Mt. Gox’s Statement, Bitcoin is not at fault - Bitcoin Foundation: Blog +https://bitcoinfoundation.org/blog/?p=418|creationTime|2014-03-02T01:56:29Z +http://2007.xtech.org/|creationDate|2007-05-02 +http://2007.xtech.org/|tag|http://www.semanlink.net/tag/xtech_2007 +http://2007.xtech.org/|title|XTech 2007: The Ubiquitous Web: 15-18 May 2007, Paris, France +http://2007.xtech.org/|creationTime|2007-05-02T21:56:41Z +http://backfeed.cc/|creationDate|2016-03-27 +http://backfeed.cc/|tag|http://www.semanlink.net/tag/henry_story +http://backfeed.cc/|comment|A Social Operating System  for Decentralized Organizations +http://backfeed.cc/|title|Backfeed Decentralizing the Present +http://backfeed.cc/|creationTime|2016-03-27T12:25:01Z +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|creationDate|2016-01-04 +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|tag|http://www.semanlink.net/tag/one_learning_algorithm_hypothesis +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|tag|http://www.semanlink.net/tag/neuroscience +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|title|"Tasting the Light: Device Lets the Blind ""See"" with Their Tongues - Scientific American" +http://www.scientificamerican.com/article/device-lets-blind-see-with-tongues/|creationTime|2016-01-04T14:01:21Z +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|creationDate|2008-05-20 +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|tag|http://www.semanlink.net/tag/rdfa +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|tag|http://www.semanlink.net/tag/blog +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|tag|http://www.semanlink.net/tag/howto +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|title|RDFaiser votre blog, 2ème partie : la pratique Les petites cases +http://www.lespetitescases.net/rdfaiser-votre-blog-2-la-pratique|creationTime|2008-05-20T15:29:01Z +https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/|creationDate|2019-03-25 +https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/|tag|http://www.semanlink.net/tag/gensim +https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/|tag|http://www.semanlink.net/tag/sent2vec +https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/|title|Sent2Vec: An unsupervised approach towards learning sentence embeddings RARE Technologies +https://rare-technologies.com/sent2vec-an-unsupervised-approach-towards-learning-sentence-embeddings/|creationTime|2019-03-25T15:37:59Z +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|creationDate|2017-04-13 +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|tag|http://www.semanlink.net/tag/publicite +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|tag|http://www.semanlink.net/tag/speech_recognition +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|tag|http://www.semanlink.net/tag/malbouffe +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|tag|http://www.semanlink.net/tag/wikipedia +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|title|Burger King’s new ad forces Google Home to advertise the Whopper - The Verge +http://www.theverge.com/2017/4/12/15259400/burger-king-google-home-ad-wikipedia|creationTime|2017-04-13T18:16:28Z +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|creationDate|2018-04-21 +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|tag|http://www.semanlink.net/tag/chine +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|tag|http://www.semanlink.net/tag/education +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|comment|China wants to be the world leader in artificial intelligence by 2030. To get there, it's reinventing the way children are taught: evolving from a model in which the mastery of routine skills is the end of education, to one in which they’re a means to the end of creative inquiry +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|title|China’s children are its secret weapon in the global AI arms race WIRED UK +http://www.wired.co.uk/article/china-artificial-intelligence-education-superpower|creationTime|2018-04-21T11:25:45Z +http://www.boabinteractive.com.au/|creationDate|2008-05-04 +http://www.boabinteractive.com.au/|tag|http://www.semanlink.net/tag/www08 +http://www.boabinteractive.com.au/|tag|http://www.semanlink.net/tag/semantic_web_company +http://www.boabinteractive.com.au/|comment|BoaB is exploring a collaboration with leading Semantic Web organizations and natural resource management agencies such as the Great Barrier Reef Marine Park Authority (Climate Change) to develop cooperative information systems — systems that make sense of distributed data; built with an open software architecture; running on the global infrastructure of the web. +http://www.boabinteractive.com.au/|title|BoaB interactive - Web design, graphic design, multimedia, Content Management System (CMS) +http://www.boabinteractive.com.au/|creationTime|2008-05-04T14:27:10Z +http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html|creationDate|2012-08-18 +http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html|tag|http://www.semanlink.net/tag/maven +http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html|tag|http://www.semanlink.net/tag/web +http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html|title|Building Web Applications with Maven 2 Java.net +http://today.java.net/pub/a/today/2007/03/01/building-web-applications-with-maven-2.html|creationTime|2012-08-18T17:20:20Z +http://www.w3.org/2005/Talks/0517-boit-tbl/|creationDate|2005-11-23 +http://www.w3.org/2005/Talks/0517-boit-tbl/|tag|http://www.semanlink.net/tag/rdf_bus +http://www.w3.org/2005/Talks/0517-boit-tbl/|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/2005/Talks/0517-boit-tbl/|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.w3.org/2005/Talks/0517-boit-tbl/|tag|http://www.semanlink.net/tag/semantic_integration_hub +http://www.w3.org/2005/Talks/0517-boit-tbl/|comment|"""Keep your existing systems running, adapt them""" +http://www.w3.org/2005/Talks/0517-boit-tbl/|title|Berners-Lee - Sem Web Life Sciences - Bio-IT world +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|creationDate|2019-01-14 +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|tag|http://www.semanlink.net/tag/kullback_leibler_divergence +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|tag|http://www.semanlink.net/tag/cross_entropy +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|comment|"> If we think of a distribution y as the tool we use to encode symbols, then +entropy measures the number of bits we'll need if we use the correct tool . +This is optimal, in that we can't encode the symbols using fewer bits on +average. +> +> In contrast, cross entropy is the +number of bits we'll need if we encode symbols from using the wrong tool y^. +This consists of encoding the i-th symbol using log(1/yi ̂) bits instead of log(1/yi) +bits. +" +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|title|A Friendly Introduction to Cross-Entropy Loss +https://rdipietro.github.io/friendly-intro-to-cross-entropy-loss/|creationTime|2019-01-14T15:59:17Z +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|creationDate|2014-07-29 +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|tag|http://www.semanlink.net/tag/niger +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|tag|http://www.semanlink.net/tag/aqmi +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|tag|http://www.semanlink.net/tag/boko_haram +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|tag|http://www.semanlink.net/tag/islamisme +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|tag|http://www.semanlink.net/tag/trafic_de_drogue +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|title|Le Niger face à ses difficultés, l’islamisme rampant exporté de l’étranger, la menace d’Al-Qaïda, l’AQMI, l’utilisation du territoire comme voie de passage de la drogue et la question Touarègue +http://www.recherches-sur-le-terrorisme.com/Documentsterrorisme/niger.html|creationTime|2014-07-29T16:23:31Z +http://www2.cnrs.fr/presse/communique/5253.htm|creationDate|2017-10-16 +http://www2.cnrs.fr/presse/communique/5253.htm|tag|http://www.semanlink.net/tag/ondes_gravitationnelles +http://www2.cnrs.fr/presse/communique/5253.htm|tag|http://www.semanlink.net/tag/astronomie_multi_signaux +http://www2.cnrs.fr/presse/communique/5253.htm|title|Les ondes gravitationnelles font la première lumière sur la fusion d'étoiles à neutrons - Communiqués et dossiers de presse - CNRS +http://www2.cnrs.fr/presse/communique/5253.htm|creationTime|2017-10-16T18:00:46Z +https://micvog.com/2013/09/08/storm-first-story-detection/|creationDate|2017-07-26 +https://micvog.com/2013/09/08/storm-first-story-detection/|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +https://micvog.com/2013/09/08/storm-first-story-detection/|tag|http://www.semanlink.net/tag/nlp_short_texts +https://micvog.com/2013/09/08/storm-first-story-detection/|tag|http://www.semanlink.net/tag/twitter +https://micvog.com/2013/09/08/storm-first-story-detection/|tag|http://www.semanlink.net/tag/similarity_queries +https://micvog.com/2013/09/08/storm-first-story-detection/|title|How to spot first stories on Twitter using Storm Michael Vogiatzis +https://micvog.com/2013/09/08/storm-first-story-detection/|creationTime|2017-07-26T13:28:53Z +http://events.linkeddata.org/ldow2009/|creationDate|2009-03-18 +http://events.linkeddata.org/ldow2009/|tag|http://www.semanlink.net/tag/ldow +http://events.linkeddata.org/ldow2009/|title|Linked Data on the Web (LDOW2009) - Workshop at WWW2009, Madrid, Spain +http://events.linkeddata.org/ldow2009/|creationTime|2009-03-18T00:20:47Z +http://danakil.ethiopia.free.fr/index.htm|creationDate|2008-11-21 +http://danakil.ethiopia.free.fr/index.htm|tag|http://www.semanlink.net/tag/ethiopie +http://danakil.ethiopia.free.fr/index.htm|tag|http://www.semanlink.net/tag/djibouti +http://danakil.ethiopia.free.fr/index.htm|tag|http://www.semanlink.net/tag/volcan +http://danakil.ethiopia.free.fr/index.htm|tag|http://www.semanlink.net/tag/carnet_de_voyage +http://danakil.ethiopia.free.fr/index.htm|title|Le Triangle Afar: entre Ethiopie et Djibouti +http://danakil.ethiopia.free.fr/index.htm|creationTime|2008-11-21T23:21:23Z +https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/|creationDate|2018-03-21 +https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/|tag|http://www.semanlink.net/tag/facebook +https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/|tag|http://www.semanlink.net/tag/twitter +https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/|title|How to manipulate Facebook and Twitter instead of letting them manipulate you - MIT Technology Review +https://www.technologyreview.com/s/610576/how-to-manipulate-facebook-and-twitter-instead-of-letting-them-manipulate-you/|creationTime|2018-03-21T09:47:45Z +http://dannyayers.com/2011/07/24/Sitemap-notes|creationDate|2011-07-25 +http://dannyayers.com/2011/07/24/Sitemap-notes|tag|http://www.semanlink.net/tag/sitemaps +http://dannyayers.com/2011/07/24/Sitemap-notes|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2011/07/24/Sitemap-notes|title|Sitemaps notes - Danny Ayers : Raw Blog +http://dannyayers.com/2011/07/24/Sitemap-notes|creationTime|2011-07-25T17:29:01Z +http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf|creationDate|2012-06-25 +http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf|tag|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf|title|IKS: introduction and overview +http://www.iks-project.eu/sites/default/files/IKS-Introduction-and-Overview.pdf|creationTime|2012-06-25T18:26:07Z +http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html|creationDate|2014-11-15 +http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html|tag|http://www.semanlink.net/tag/samuel_goto +http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html|tag|http://www.semanlink.net/tag/resources_oriented_web_services +http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html|title|Hello World: What your API would look like as a WebPage +http://blog.sgo.to/2014/04/what-your-api-would-look-like-as-webpage.html|creationTime|2014-11-15T14:15:01Z +http://www.nytimes.com/2006/05/19/science/19tiny.html?ei=5088&en=5e2b672c1e3fc1a9&ex=1305691200&partner=rssnyt&emc=rss&pagewanted=print|creationDate|2006-05-25 +http://www.nytimes.com/2006/05/19/science/19tiny.html?ei=5088&en=5e2b672c1e3fc1a9&ex=1305691200&partner=rssnyt&emc=rss&pagewanted=print|tag|http://www.semanlink.net/tag/homme_de_flores +http://www.nytimes.com/2006/05/19/science/19tiny.html?ei=5088&en=5e2b672c1e3fc1a9&ex=1305691200&partner=rssnyt&emc=rss&pagewanted=print|comment|"Not all scientists agree that the 18,000-year-old ""little people"" fossils found on the Indonesian island of Flores should be designated an extinct human-related species." +http://www.nytimes.com/2006/05/19/science/19tiny.html?ei=5088&en=5e2b672c1e3fc1a9&ex=1305691200&partner=rssnyt&emc=rss&pagewanted=print|title|Debate on Little Human Fossil Enters Major Scientific Forum - New York Times +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|creationDate|2017-10-01 +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|tag|http://www.semanlink.net/tag/word2vec +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|tag|http://www.semanlink.net/tag/word_embedding +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|tag|http://www.semanlink.net/tag/chris_manning +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|comment|"includes description of word2vec +" +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|title|Distributed Word Representations for Information Retrieval +https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf|creationTime|2017-10-01T19:10:39Z +http://jekyllrb.com/|creationDate|2017-06-08 +http://jekyllrb.com/|tag|http://www.semanlink.net/tag/blog_software +http://jekyllrb.com/|comment|Transform your plain text into static websites and blogs +http://jekyllrb.com/|title|Jekyll • Simple, blog-aware, static sites +http://jekyllrb.com/|creationTime|2017-06-08T16:50:16Z +https://news.cnrs.fr/opinions/energy-hydrogens-great-promise|creationDate|2018-02-10 +https://news.cnrs.fr/opinions/energy-hydrogens-great-promise|tag|http://www.semanlink.net/tag/hydrogen +https://news.cnrs.fr/opinions/energy-hydrogens-great-promise|title|Energy: Hydrogen's Great Promise CNRS News +https://news.cnrs.fr/opinions/energy-hydrogens-great-promise|creationTime|2018-02-10T00:24:55Z +http://www.ajaxpatterns.org/|creationDate|2005-11-16 +http://www.ajaxpatterns.org/|tag|http://www.semanlink.net/tag/ajax +http://www.ajaxpatterns.org/|title|Main Page - Ajax Patterns Ajax Patterns +http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music|creationDate|2012-08-05 +http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music|tag|http://www.semanlink.net/tag/musique +http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music|tag|http://www.semanlink.net/tag/video +http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music|title|F.Gall/V.Sanson: la groupie du pianiste - Vidéo Dailymotion +http://www.dailymotion.com/video/xbbh6l_f-gall-v-sanson-la-groupie-du-piani_music|creationTime|2012-08-05T16:25:04Z +http://miniajax.com/|creationDate|2007-09-05 +http://miniajax.com/|tag|http://www.semanlink.net/tag/ajax +http://miniajax.com/|title|MiniAjax.com / A showroom of nice looking simple downloadable DHTML and AJAX scripts +http://miniajax.com/|creationTime|2007-09-05T21:15:04Z +https://rajarshd.github.io/papers/acl2015.pdf|creationDate|2017-11-21 +https://rajarshd.github.io/papers/acl2015.pdf|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +https://rajarshd.github.io/papers/acl2015.pdf|tag|http://www.semanlink.net/tag/word_embedding +https://rajarshd.github.io/papers/acl2015.pdf|title|Gaussian LDA for Topic Models with Word Embeddings (2015) +https://rajarshd.github.io/papers/acl2015.pdf|creationTime|2017-11-21T01:16:00Z +https://www.aiforhumanity.fr/|creationDate|2018-03-28 +https://www.aiforhumanity.fr/|tag|http://www.semanlink.net/tag/rapport_villani_sur_l_ia +https://www.aiforhumanity.fr/|title|AI for humanity +https://www.aiforhumanity.fr/|creationTime|2018-03-28T23:57:41Z +http://www.offconvex.org/2018/06/25/textembeddings/|creationDate|2018-06-25 +http://www.offconvex.org/2018/06/25/textembeddings/|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.offconvex.org/2018/06/25/textembeddings/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2018/06/25/textembeddings/|tag|http://www.semanlink.net/tag/sif_embeddings +http://www.offconvex.org/2018/06/25/textembeddings/|comment|"> Can we design a text embedding with the simplicity and transparency of SIF while also incorporating word order information? + +yes we can. +" +http://www.offconvex.org/2018/06/25/textembeddings/|title|Deep-learning-free Text and Sentence Embedding, Part 2 – Off the convex path +http://www.offconvex.org/2018/06/25/textembeddings/|creationTime|2018-06-25T21:04:28Z +https://dessia.tech/|creationDate|2019-05-07 +https://dessia.tech/|tag|http://www.semanlink.net/tag/ai_startups +https://dessia.tech/|comment|"> DessIA: le premier logiciel de conception mécanique basé sur l'intelligence artificielle +" +https://dessia.tech/|title|DessIA Technologies +https://dessia.tech/|creationTime|2019-05-07T11:29:27Z +http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1|creationDate|2016-06-09 +http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1|tag|http://www.semanlink.net/tag/architecture +http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1|tag|http://www.semanlink.net/tag/geometrie +http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1|title|24 Tons of Towering Stone, Held Together With Compression WIRED +http://www.wired.com/2016/06/compression-keeps-24-tons-stone-collapsing/#slide-1|creationTime|2016-06-09T22:47:53Z +http://www.flax.co.uk/blog/2013/12/11/principles-of-solr-application-design-part-1-of-2/|creationDate|2015-03-14 +http://www.flax.co.uk/blog/2013/12/11/principles-of-solr-application-design-part-1-of-2/|tag|http://www.semanlink.net/tag/solr +http://www.flax.co.uk/blog/2013/12/11/principles-of-solr-application-design-part-1-of-2/|title|Principles of Solr application design – part 1 of 2 +http://www.flax.co.uk/blog/2013/12/11/principles-of-solr-application-design-part-1-of-2/|creationTime|2015-03-14T23:35:16Z +http://sparcool.net/|creationDate|2009-04-21 +http://sparcool.net/|tag|http://www.semanlink.net/tag/sparql +http://sparcool.net/|tag|http://www.semanlink.net/tag/linked_data +http://sparcool.net/|tag|http://www.semanlink.net/tag/alexandre_passant +http://sparcool.net/|comment|SPARCool offers a way to run SPARQL queries about any URI that follows the Linked Data principles by calling a simple URL based on the http://sparcool.net/format/predicate[;l=lang]/URI pattern. +http://sparcool.net/|title|SPARCool +http://sparcool.net/|creationTime|2009-04-21T13:05:21Z +http://www.lejdd.fr/societe/sciences/a-la-rencontre-du-blob-cet-organisme-ni-animal-ni-vegetal-ni-champignon-3347009#xtor=CS1-4|creationDate|2017-06-01 +http://www.lejdd.fr/societe/sciences/a-la-rencontre-du-blob-cet-organisme-ni-animal-ni-vegetal-ni-champignon-3347009#xtor=CS1-4|tag|http://www.semanlink.net/tag/slime_mold +http://www.lejdd.fr/societe/sciences/a-la-rencontre-du-blob-cet-organisme-ni-animal-ni-vegetal-ni-champignon-3347009#xtor=CS1-4|title|A la rencontre du blob, cet organisme ni animal, ni végétal, ni champignon +http://www.lejdd.fr/societe/sciences/a-la-rencontre-du-blob-cet-organisme-ni-animal-ni-vegetal-ni-champignon-3347009#xtor=CS1-4|creationTime|2017-06-01T23:53:29Z +http://www.ibm.com/developerworks/xml/library/x-semweb.html|creationDate|2008-10-28 +http://www.ibm.com/developerworks/xml/library/x-semweb.html|tag|http://www.semanlink.net/tag/jena +http://www.ibm.com/developerworks/xml/library/x-semweb.html|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.ibm.com/developerworks/xml/library/x-semweb.html|tag|http://www.semanlink.net/tag/ldap +http://www.ibm.com/developerworks/xml/library/x-semweb.html|title|Expose LDAP directories to the Semantic Web with SquirrelRDF +http://www.ibm.com/developerworks/xml/library/x-semweb.html|creationTime|2008-10-28T22:40:41Z +http://www.openlinksw.com/weblog/oerling/?id=1504|creationDate|2008-12-17 +http://www.openlinksw.com/weblog/oerling/?id=1504|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/weblog/oerling/?id=1504|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1504|tag|http://www.semanlink.net/tag/tutorial +http://www.openlinksw.com/weblog/oerling/?id=1504|title|Virtuoso RDF: Getting Started for the Developer +http://www.openlinksw.com/weblog/oerling/?id=1504|creationTime|2008-12-17T16:46:33Z +http://muyueh.com/greenhoney/|creationDate|2014-11-27 +http://muyueh.com/greenhoney/|tag|http://www.semanlink.net/tag/langage +http://muyueh.com/greenhoney/|tag|http://www.semanlink.net/tag/couleur +http://muyueh.com/greenhoney/|comment|"Language represents our view of the world, and knowing its limits helps us understand how our perception works. I used the data from Wikipedia’s “Color” entry for different languages. My assumption was: ""Different languages have different ways to describe color.” +" +http://muyueh.com/greenhoney/|title|Green Honey: Different languages have different ways to describe color +http://muyueh.com/greenhoney/|creationTime|2014-11-27T13:44:10Z +http://onlinehub.stanford.edu/cs224|creationDate|2019-04-03 +http://onlinehub.stanford.edu/cs224|tag|http://www.semanlink.net/tag/nlp_stanford +http://onlinehub.stanford.edu/cs224|title|CS224n: Natural Language Processing with Deep Learning +http://onlinehub.stanford.edu/cs224|creationTime|2019-04-03T22:27:36Z +http://data.semanticweb.org/conference/eswc/2012/html|creationDate|2012-05-15 +http://data.semanticweb.org/conference/eswc/2012/html|tag|http://www.semanlink.net/tag/eswc_2012 +http://data.semanticweb.org/conference/eswc/2012/html|title|9th Extended Semantic Web Conference Semantic Web Dog Food +http://data.semanticweb.org/conference/eswc/2012/html|creationTime|2012-05-15T10:10:16Z +https://inc.cnrs.fr/fr/cnrsinfo/un-reseau-precurseur-de-la-biochimie-du-vivant-identifie|creationDate|2019-05-03 +https://inc.cnrs.fr/fr/cnrsinfo/un-reseau-precurseur-de-la-biochimie-du-vivant-identifie|tag|http://www.semanlink.net/tag/origine_de_la_vie +https://inc.cnrs.fr/fr/cnrsinfo/un-reseau-precurseur-de-la-biochimie-du-vivant-identifie|title|Un réseau précurseur de la biochimie du vivant identifié +https://inc.cnrs.fr/fr/cnrsinfo/un-reseau-precurseur-de-la-biochimie-du-vivant-identifie|creationTime|2019-05-03T17:04:20Z +http://www.dynamicorange.com/blog/archives/internet-technical/ldow2008.html|creationDate|2008-04-24 +http://www.dynamicorange.com/blog/archives/internet-technical/ldow2008.html|tag|http://www.semanlink.net/tag/ldow2008 +http://www.dynamicorange.com/blog/archives/internet-technical/ldow2008.html|title|I Really _Don't_ Know: LDOW2008 +http://www.dynamicorange.com/blog/archives/internet-technical/ldow2008.html|creationTime|2008-04-24T14:10:09Z +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|creationDate|2008-01-09 +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|tag|http://www.semanlink.net/tag/andy_seaborne +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|tag|http://www.semanlink.net/tag/joseki +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|tag|http://www.semanlink.net/tag/rdf_net_api +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|comment|A simple network interface to remote RDF models. There are two sets of functionality: a basic access mechanism that provides query capability to a collection of RDF statements, and an update mechanism where, subject to security constraints (not discussed here), changes to the RDF model can be made. +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|title|RDF Net API +http://www.w3.org/Submission/2003/SUBM-rdf-netapi-20031002/|creationTime|2008-01-09T23:59:15Z +https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488|creationDate|2017-11-03 +https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488|tag|http://www.semanlink.net/tag/word2vec +https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488|tag|http://www.semanlink.net/tag/spellchecker +https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488|title|Spell Checker using Word2vec Kaggle +https://www.kaggle.com/cpmpml/spell-checker-using-word2vec?scriptVersionId=1152488|creationTime|2017-11-03T10:46:08Z +https://arxiv.org/abs/1703.00993|creationDate|2017-08-28 +https://arxiv.org/abs/1703.00993|tag|http://www.semanlink.net/tag/word_embedding +https://arxiv.org/abs/1703.00993|tag|http://www.semanlink.net/tag/nlp_reading_comprehension +https://arxiv.org/abs/1703.00993|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://arxiv.org/abs/1703.00993|tag|http://www.semanlink.net/tag/oov +https://arxiv.org/abs/1703.00993|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1703.00993|arxiv_author|Hanxiao Liu +https://arxiv.org/abs/1703.00993|arxiv_author|Ruslan Salakhutdinov +https://arxiv.org/abs/1703.00993|arxiv_author|William W. Cohen +https://arxiv.org/abs/1703.00993|arxiv_author|Bhuwan Dhingra +https://arxiv.org/abs/1703.00993|comment|"abstract: +The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on + +1. the use of pre-trained word embeddings, and +2. the representation of out-of-vocabulary tokens at test time, + +can turn out to have a larger impact than architectural choices on the final performance + + + +" +https://arxiv.org/abs/1703.00993|title|[1703.00993] A Comparative Study of Word Embeddings for Reading Comprehension +https://arxiv.org/abs/1703.00993|creationTime|2017-08-28T00:22:38Z +https://arxiv.org/abs/1703.00993|arxiv_summary|"The focus of past machine learning research for Reading Comprehension tasks +has been primarily on the design of novel deep learning architectures. Here we +show that seemingly minor choices made on (1) the use of pre-trained word +embeddings, and (2) the representation of out-of-vocabulary tokens at test +time, can turn out to have a larger impact than architectural choices on the +final performance. We systematically explore several options for these choices, +and provide recommendations to researchers working in this area." +https://arxiv.org/abs/1703.00993|arxiv_firstAuthor|Bhuwan Dhingra +https://arxiv.org/abs/1703.00993|arxiv_updated|2017-03-02T23:58:54Z +https://arxiv.org/abs/1703.00993|arxiv_title|A Comparative Study of Word Embeddings for Reading Comprehension +https://arxiv.org/abs/1703.00993|arxiv_published|2017-03-02T23:58:54Z +https://arxiv.org/abs/1703.00993|arxiv_num|1703.00993 +http://youtube.com/results?search_query=moussa+poussi&search=Search|creationDate|2007-09-20 +http://youtube.com/results?search_query=moussa+poussi&search=Search|tag|http://www.semanlink.net/tag/youtube +http://youtube.com/results?search_query=moussa+poussi&search=Search|tag|http://www.semanlink.net/tag/moussa_poussi +http://youtube.com/results?search_query=moussa+poussi&search=Search|title|"YouTube - Search results for ""Moussa Poussi""" +http://youtube.com/results?search_query=moussa+poussi&search=Search|creationTime|2007-09-20T22:46:46Z +http://musicontology.com/|creationDate|2007-09-27 +http://musicontology.com/|tag|http://www.semanlink.net/tag/musique +http://musicontology.com/|tag|http://www.semanlink.net/tag/frederick_giasson +http://musicontology.com/|tag|http://www.semanlink.net/tag/ontologies +http://musicontology.com/|tag|http://www.semanlink.net/tag/yves_raymond +http://musicontology.com/|title|Music Ontology Specification +http://musicontology.com/|creationTime|2007-09-27T00:49:12Z +https://arxiv.org/pdf/1701.00185.pdf|creationDate|2017-11-04 +https://arxiv.org/pdf/1701.00185.pdf|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +https://arxiv.org/pdf/1701.00185.pdf|tag|http://www.semanlink.net/tag/surprises_me +https://arxiv.org/pdf/1701.00185.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1701.00185.pdf|tag|http://www.semanlink.net/tag/short_text_clustering +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Peng Wang +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Jun Zhao +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Suncong Zheng +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Guanhua Tian +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Jiaming Xu +https://arxiv.org/pdf/1701.00185.pdf|arxiv_author|Bo Xu +https://arxiv.org/pdf/1701.00185.pdf|comment|"> We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. +> +> Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. + +> The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. + +[conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) +" +https://arxiv.org/pdf/1701.00185.pdf|title|[1701.00185] Self-Taught Convolutional Neural Networks for Short Text Clustering +https://arxiv.org/pdf/1701.00185.pdf|creationTime|2017-11-04T19:27:04Z +https://arxiv.org/pdf/1701.00185.pdf|arxiv_summary|"Short text clustering is a challenging problem due to its sparseness of text +representation. Here we propose a flexible Self-Taught Convolutional neural +network framework for Short Text Clustering (dubbed STC^2), which can flexibly +and successfully incorporate more useful semantic features and learn non-biased +deep text representation in an unsupervised manner. In our framework, the +original raw text features are firstly embedded into compact binary codes by +using one existing unsupervised dimensionality reduction methods. Then, word +embeddings are explored and fed into convolutional neural networks to learn +deep feature representations, meanwhile the output units are used to fit the +pre-trained binary codes in the training process. Finally, we get the optimal +clusters by employing K-means to cluster the learned representations. Extensive +experimental results demonstrate that the proposed framework is effective, +flexible and outperform several popular clustering methods when tested on three +public short text datasets." +https://arxiv.org/pdf/1701.00185.pdf|arxiv_firstAuthor|Jiaming Xu +https://arxiv.org/pdf/1701.00185.pdf|arxiv_updated|2017-01-01T01:57:59Z +https://arxiv.org/pdf/1701.00185.pdf|arxiv_title|Self-Taught Convolutional Neural Networks for Short Text Clustering +https://arxiv.org/pdf/1701.00185.pdf|arxiv_published|2017-01-01T01:57:59Z +https://arxiv.org/pdf/1701.00185.pdf|arxiv_num|1701.00185 +http://www.youtube.com/watch?v=y7WrYSwMHKA|creationDate|2008-06-21 +http://www.youtube.com/watch?v=y7WrYSwMHKA|tag|http://www.semanlink.net/tag/ours_polaire +http://www.youtube.com/watch?v=y7WrYSwMHKA|tag|http://www.semanlink.net/tag/islande +http://www.youtube.com/watch?v=y7WrYSwMHKA|title|Polar bear executed in Iceland +http://www.youtube.com/watch?v=y7WrYSwMHKA|creationTime|2008-06-21T00:21:58Z +http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/|creationDate|2007-03-15 +http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/|tag|http://www.semanlink.net/tag/mp3 +http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/|tag|http://www.semanlink.net/tag/google +http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/|title|Google: une mine de mp3 +http://pisani.blog.lemonde.fr/2007/03/15/google-une-mine-de-mp3/|creationTime|2007-03-15T22:46:59Z +http://www.courrierinternational.com/article/2009/08/01/un-tour-du-monde-de-l-humour|creationDate|2009-11-13 +http://www.courrierinternational.com/article/2009/08/01/un-tour-du-monde-de-l-humour|tag|http://www.semanlink.net/tag/humour +http://www.courrierinternational.com/article/2009/08/01/un-tour-du-monde-de-l-humour|title|SUPPLÉMENT RIRE • Un tour du monde de l’humour Courrier international +http://www.courrierinternational.com/article/2009/08/01/un-tour-du-monde-de-l-humour|creationTime|2009-11-13T23:11:21Z +http://www.newscientist.com/article/dn26272-cosmic-inflation-is-dead-long-live-cosmic-inflation.html?full=true#.VCR1jUtGuww|creationDate|2014-09-25 +http://www.newscientist.com/article/dn26272-cosmic-inflation-is-dead-long-live-cosmic-inflation.html?full=true#.VCR1jUtGuww|tag|http://www.semanlink.net/tag/cosmic_inflation +http://www.newscientist.com/article/dn26272-cosmic-inflation-is-dead-long-live-cosmic-inflation.html?full=true#.VCR1jUtGuww|title|Cosmic inflation is dead, long live cosmic inflation! - 25 September 2014 - New Scientist +http://www.newscientist.com/article/dn26272-cosmic-inflation-is-dead-long-live-cosmic-inflation.html?full=true#.VCR1jUtGuww|creationTime|2014-09-25T22:09:09Z +http://www3.nationalgeographic.com/places/countries/country_niger.html|creationDate|2007-07-08 +http://www3.nationalgeographic.com/places/countries/country_niger.html|tag|http://www.semanlink.net/tag/niger +http://www3.nationalgeographic.com/places/countries/country_niger.html|title|Niger facts, Niger travel videos, flags, photos - National Geographic +http://www3.nationalgeographic.com/places/countries/country_niger.html|creationTime|2007-07-08T02:34:45Z +http://www.mamiwata.com/mamiwata.html|creationDate|2007-09-05 +http://www.mamiwata.com/mamiwata.html|tag|http://www.semanlink.net/tag/mami_wata +http://www.mamiwata.com/mamiwata.html|title|MAMI WATA IN THE AFRICAN-AMERICAN DIASPORA +http://www.mamiwata.com/mamiwata.html|creationTime|2007-09-05T00:39:09Z +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|creationDate|2016-03-27 +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|tag|http://www.semanlink.net/tag/apple +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|tag|http://www.semanlink.net/tag/deep_links +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|tag|http://www.semanlink.net/tag/ios +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|title|Apple’s iOS 9 Links Recall the Bad Old Days of Internet Explorer WIRED +http://www.wired.com/2016/03/apple-ios-9s-universal-links-recall-the-bad-old-days-of-internet-explorer/|creationTime|2016-03-27T18:17:13Z +http://whoo.ps/2015/02/23/futures-of-text|creationDate|2015-03-27 +http://whoo.ps/2015/02/23/futures-of-text|tag|http://www.semanlink.net/tag/markus_lanthaler +http://whoo.ps/2015/02/23/futures-of-text|tag|http://www.semanlink.net/tag/ui +http://whoo.ps/2015/02/23/futures-of-text|tag|http://www.semanlink.net/tag/text +http://whoo.ps/2015/02/23/futures-of-text|tag|http://www.semanlink.net/tag/mobile_apps +http://whoo.ps/2015/02/23/futures-of-text|title|Futures of text Whoops by Jonathan Libov +http://whoo.ps/2015/02/23/futures-of-text|creationTime|2015-03-27T23:02:33Z +http://www.mnot.net/cache_docs/|creationDate|2008-01-07 +http://www.mnot.net/cache_docs/|tag|http://www.semanlink.net/tag/http_cache +http://www.mnot.net/cache_docs/|tag|http://www.semanlink.net/tag/tutorial +http://www.mnot.net/cache_docs/|title|Caching tutorial +http://www.mnot.net/cache_docs/|creationTime|2008-01-07T00:35:28Z +https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html|creationDate|2017-07-18 +https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html|tag|http://www.semanlink.net/tag/elasticsearch +https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html|tag|http://www.semanlink.net/tag/nlp_tools +https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html|title|Dealing with Human Language Elasticsearch: The Definitive Guide [master] +https://www.elastic.co/guide/en/elasticsearch/guide/master/languages.html|creationTime|2017-07-18T14:49:17Z +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|creationDate|2010-11-22 +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|tag|http://www.semanlink.net/tag/google +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|tag|http://www.semanlink.net/tag/impot +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|tag|http://www.semanlink.net/tag/irlande +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|title|Comment Dublin a permis à Google de réduire ses impôts en Europe +http://www.latribune.fr/technos-medias/internet/20101122trib000574964/comment-dublin-a-permis-a-google-de-reduire-ses-impots-en-europe.html|creationTime|2010-11-22T16:50:25Z +http://www.graphdracula.net/|creationDate|2011-02-15 +http://www.graphdracula.net/|tag|http://www.semanlink.net/tag/javascript +http://www.graphdracula.net/|tag|http://www.semanlink.net/tag/graph_visualization +http://www.graphdracula.net/|title|Dracula Graph Library +http://www.graphdracula.net/|creationTime|2011-02-15T15:11:56Z +https://www.kickstarter.com/projects/1755283828/open-source-edition-of-livecode|creationDate|2014-11-23 +https://www.kickstarter.com/projects/1755283828/open-source-edition-of-livecode|tag|http://www.semanlink.net/tag/hypercard +https://www.kickstarter.com/projects/1755283828/open-source-edition-of-livecode|title|Next Generation LiveCode (Open Source) by RunRev Ltd — Kickstarter +https://www.kickstarter.com/projects/1755283828/open-source-edition-of-livecode|creationTime|2014-11-23T14:54:34Z +http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html|creationDate|2012-09-24 +http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html|tag|http://www.semanlink.net/tag/obscurantisme +http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html|tag|http://www.semanlink.net/tag/science +http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html|title|Qui a peur des vérités scientifiques ? +http://www.lemonde.fr/sciences/article/2012/09/20/qui-a-peur-des-verites-scientifiques_1763270_1650684.html|creationTime|2012-09-24T11:20:25Z +http://www.neo4j.org/|creationDate|2013-03-12 +http://www.neo4j.org/|tag|http://www.semanlink.net/tag/neo4j +http://www.neo4j.org/|title|Neo4j: The World's Leading Graph Database +http://www.neo4j.org/|creationTime|2013-03-12T14:59:48Z +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|creationDate|2015-02-19 +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|tag|http://www.semanlink.net/tag/javascript_rdf_parser +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|tag|http://www.semanlink.net/tag/olivier_rossel +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|comment|https://www.npmjs.com/package/n3 +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|title|Re: HELP about jena fuseki and NodeJS +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAM0wMfRs6fW9axUr_Jo++=afX9YRhkzLqTpt84eHFe8WMmnG-Q@mail.gmail.com%3e|creationTime|2015-02-19T01:13:22Z +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|creationDate|2008-06-22 +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|tag|http://www.semanlink.net/tag/google +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|tag|http://www.semanlink.net/tag/censorship +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|comment|"Cette recherche sur google.fr retourne 0 résultats
+En réponse à une demande légale adressée à Google, nous avons retiré 326 résultat(s) de cette page. Si vous souhaitez en savoir plus sur cette demande, vous pouvez consulter le site ChillingEffects.org." +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|title|site:ety.com - Google Search +http://www.google.com/search?hl=en&q=site%3Aety.com&btnG=Google+Search|creationTime|2008-06-22T03:11:37Z +http://answers.semanticweb.com/questions/26220/sparql-query-to-return-all-triples-recursively-that-make-up-an-rdfsclass-definition|creationDate|2016-02-13 +http://answers.semanticweb.com/questions/26220/sparql-query-to-return-all-triples-recursively-that-make-up-an-rdfsclass-definition|tag|http://www.semanlink.net/tag/sparql_tips +http://answers.semanticweb.com/questions/26220/sparql-query-to-return-all-triples-recursively-that-make-up-an-rdfsclass-definition|title|Concise Bounded Descriptions with Two CONSTRUCT Queries +http://answers.semanticweb.com/questions/26220/sparql-query-to-return-all-triples-recursively-that-make-up-an-rdfsclass-definition|creationTime|2016-02-13T11:34:43Z +http://aksw.org/Projects/OntoWiki.html|creationDate|2013-09-10 +http://aksw.org/Projects/OntoWiki.html|tag|http://www.semanlink.net/tag/ontowiki +http://aksw.org/Projects/OntoWiki.html|comment|Semantic data wiki as well as Linked Data publishing engine +http://aksw.org/Projects/OntoWiki.html|title|OntoWiki — Agile Knowledge Management and Semantic Web (AKSW) +http://aksw.org/Projects/OntoWiki.html|creationTime|2013-09-10T01:23:52Z +https://research.fb.com/facebook-research-at-emnlp/|creationDate|2018-11-01 +https://research.fb.com/facebook-research-at-emnlp/|tag|http://www.semanlink.net/tag/nlp_facebook +https://research.fb.com/facebook-research-at-emnlp/|tag|http://www.semanlink.net/tag/emnlp_2018 +https://research.fb.com/facebook-research-at-emnlp/|title|Facebook Research at EMNLP – Facebook Research +https://research.fb.com/facebook-research-at-emnlp/|creationTime|2018-11-01T17:12:02Z +http://jquery.org/|creationDate|2011-04-06 +http://jquery.org/|tag|http://www.semanlink.net/tag/jquery +http://jquery.org/|title|jQuery Project +http://jquery.org/|creationTime|2011-04-06T13:40:31Z +http://eunis.eea.europa.eu/|creationDate|2012-01-03 +http://eunis.eea.europa.eu/|tag|http://www.semanlink.net/tag/database +http://eunis.eea.europa.eu/|tag|http://www.semanlink.net/tag/linked_data +http://eunis.eea.europa.eu/|tag|http://www.semanlink.net/tag/biodiversity_data +http://eunis.eea.europa.eu/|tag|http://www.semanlink.net/tag/biodiversite +http://eunis.eea.europa.eu/|title|EUNIS biodiversity database +http://eunis.eea.europa.eu/|creationTime|2012-01-03T12:06:10Z +http://selberg.org/2008/04/23/themes-from-beijing/|creationDate|2008-06-22 +http://selberg.org/2008/04/23/themes-from-beijing/|tag|http://www.semanlink.net/tag/www08 +http://selberg.org/2008/04/23/themes-from-beijing/|title|WWW 2008 keynotes - Erik Selberg » Blog Archive » Themes from Beijing +http://selberg.org/2008/04/23/themes-from-beijing/|creationTime|2008-06-22T02:39:20Z +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|creationDate|2010-06-04 +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|tag|http://www.semanlink.net/tag/rdfa +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|comment|"I can say ""extract the RDF triples from the RDFa on that web page and then run this SPARQL query against it"" all with a single URL." +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|title|RESTful SPARQL queries of RDFa - bobdc.blog +http://www.snee.com/bobdc.blog/2010/06/restful-sparql-queries-of-rdfa.html|creationTime|2010-06-04T11:49:00Z +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|creationDate|2017-08-10 +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|tag|http://www.semanlink.net/tag/histoire_anglaise +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|tag|http://www.semanlink.net/tag/pauvrete +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|tag|http://www.semanlink.net/tag/david_ricardo +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|title|Poor Law Amendment Act 1834 - Wikipedia +https://en.wikipedia.org/wiki/Poor_Law_Amendment_Act_1834|creationTime|2017-08-10T00:05:32Z +http://webid.myxwiki.org/xwiki|creationDate|2011-01-18 +http://webid.myxwiki.org/xwiki|tag|http://www.semanlink.net/tag/webid +http://webid.myxwiki.org/xwiki|title|Web ID XWiki +http://webid.myxwiki.org/xwiki|creationTime|2011-01-18T10:40:06Z +http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/|creationDate|2011-10-13 +http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/|tag|http://www.semanlink.net/tag/diy +http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/|tag|http://www.semanlink.net/tag/bricolage +http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/|title|La prochaine révolution ? Faites-la vous même ! InternetActu +http://internetactu.blog.lemonde.fr/2011/10/12/la-prochaine-revolution-faites-la-vous-meme/|creationTime|2011-10-13T23:44:19Z +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|creationDate|2016-03-11 +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|tag|http://www.semanlink.net/tag/travail +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|tag|http://www.semanlink.net/tag/jobbotization +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|title|Bernard Stiegler : « Je propose la mise en place d’un revenu contributif, qui favorise l’engagement dans des projets » +http://www.lemonde.fr/emploi/article/2015/11/17/bernard-stiegler-je-propose-la-mise-en-place-d-un-revenu-contributif-qui-favorise-l-engagement-dans-des-projets_4812202_1698637.html|creationTime|2016-03-11T20:39:51Z +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|creationDate|2017-07-19 +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|tag|http://www.semanlink.net/tag/python_sample_code +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|tag|http://www.semanlink.net/tag/elasticsearch +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|tag|http://www.semanlink.net/tag/text_similarity +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|title|Document Similarity Analysis Using ElasticSearch and Python - Data Science Central +http://www.datasciencecentral.com/profiles/blogs/document-similarity-analysis-using-elasticsearch-and-python|creationTime|2017-07-19T14:23:50Z +http://www.jpl.nasa.gov/news/news.php?release=2013-120|creationDate|2013-04-04 +http://www.jpl.nasa.gov/news/news.php?release=2013-120|tag|http://www.semanlink.net/tag/titan +http://www.jpl.nasa.gov/news/news.php?release=2013-120|title|NASA Team Investigates Complex Chemistry at Titan - NASA Jet Propulsion Laboratory +http://www.jpl.nasa.gov/news/news.php?release=2013-120|creationTime|2013-04-04T14:15:59Z +http://prefuse.sourceforge.net/|creationDate|2005-09-25 +http://prefuse.sourceforge.net/|tag|http://www.semanlink.net/tag/graph_visualization +http://prefuse.sourceforge.net/|title|prefuse: an interactive visualization toolkit +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|creationDate|2017-06-16 +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|tag|http://www.semanlink.net/tag/introduction +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|tag|http://www.semanlink.net/tag/word_embedding +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|comment|topical vs typical relatedness +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|title|A Simple Introduction to Word Embeddings +https://fr.slideshare.net/BhaskarMitra3/a-simple-introduction-to-word-embeddings|creationTime|2017-06-16T01:54:23Z +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|creationDate|2013-10-18 +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|tag|http://www.semanlink.net/tag/vie_privee +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|tag|http://www.semanlink.net/tag/parlement_europeen +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|tag|http://www.semanlink.net/tag/lobby +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|title|Un quarteron d’eurodéputés va brader nos vies privées BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/10/17/un-quarteron-deurodeputes-va-brader-nos-vies-privees/|creationTime|2013-10-18T00:04:12Z +https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58|creationDate|2019-05-15 +https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58|tag|http://www.semanlink.net/tag/perse +https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58|comment|> When Cyrus eventually conquered Babylon, he freed the Jews and allowed them to return to Israel to rebuild their cities. +https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58|title|Understanding the Persian Empire – Cher Yi – Medium +https://medium.com/@tcherry/understanding-the-persian-empire-50b73bdcae58|creationTime|2019-05-15T18:06:54Z +http://danbri.org/words/2008/07/04/361|creationDate|2008-07-04 +http://danbri.org/words/2008/07/04/361|tag|http://www.semanlink.net/tag/semantic_wiki +http://danbri.org/words/2008/07/04/361|tag|http://www.semanlink.net/tag/semantic_media_wiki +http://danbri.org/words/2008/07/04/361|tag|http://www.semanlink.net/tag/hosting +http://danbri.org/words/2008/07/04/361|comment|referata.com: a hosting site for SMW-based semantic wikis. This is not the first site to offer hosting of wikis using Semantic MediaWiki (that’s Wikia, as of a few months ago), but it is the first to also offer the usage of Semantic Forms, Semantic Drilldown, Semantic Calendar, Semantic Google Maps and some of the other related extensions you’ve probably heard about; Widgets, Header Tabs, etc. As such, I consider it the first site that lets people create true collaborative databases, where many people can work together on a set of well-structured data. +http://danbri.org/words/2008/07/04/361|title|danbri’s foaf stories » Referata, a Semantic Media Wiki hosting site +http://danbri.org/words/2008/07/04/361|creationTime|2008-07-04T14:07:48Z +http://www.omnytex.com/articles/xhrstruts/|creationDate|2005-05-20 +http://www.omnytex.com/articles/xhrstruts/|tag|http://www.semanlink.net/tag/howto +http://www.omnytex.com/articles/xhrstruts/|tag|http://www.semanlink.net/tag/struts +http://www.omnytex.com/articles/xhrstruts/|tag|http://www.semanlink.net/tag/ajax +http://www.omnytex.com/articles/xhrstruts/|title|Ajax using XMLHttpRequest and Struts +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|creationDate|2009-04-04 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|tag|http://www.semanlink.net/tag/revolution_francaise +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|tag|http://www.semanlink.net/tag/crise_financiere +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|tag|http://www.semanlink.net/tag/la_france_est_un_pays_regicide +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|comment|Après 1789, 2009 ? +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|title|Après 1789, 2009 ? +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|creationTime|2009-04-04T19:06:42Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3224,50-1176699,0.html|source|Le Monde +https://github.com/zalandoresearch/flair|creationDate|2018-08-24 +https://github.com/zalandoresearch/flair|tag|http://www.semanlink.net/tag/github_project +https://github.com/zalandoresearch/flair|tag|http://www.semanlink.net/tag/nlp_tools +https://github.com/zalandoresearch/flair|tag|http://www.semanlink.net/tag/flair +https://github.com/zalandoresearch/flair|tag|http://www.semanlink.net/tag/word_embedding +https://github.com/zalandoresearch/flair|tag|http://www.semanlink.net/tag/sequence_labeling +https://github.com/zalandoresearch/flair|comment|"> A very simple framework for state-of-the-art NLP. Developed by Zalando Research. + +paper: [""Contextual String Embeddings for Sequence Labeling (2018)""](/doc/?uri=http%3A%2F%2Faclweb.org%2Fanthology%2FC18-1139) +" +https://github.com/zalandoresearch/flair|relatedDoc|http://aclweb.org/anthology/C18-1139 +https://github.com/zalandoresearch/flair|title|zalandoresearch/flair: A very simple framework for state-of-the-art NLP +https://github.com/zalandoresearch/flair|creationTime|2018-08-24T10:13:33Z +http://jena.sourceforge.net/DB/index.html|creationDate|2008-09-10 +http://jena.sourceforge.net/DB/index.html|tag|http://www.semanlink.net/tag/jena_and_database +http://jena.sourceforge.net/DB/index.html|title|Jena Relational Database backend +http://jena.sourceforge.net/DB/index.html|creationTime|2008-09-10T18:25:58Z +http://emnlp2014.org/tutorials/8_notes.pdf|creationDate|2017-11-12 +http://emnlp2014.org/tutorials/8_notes.pdf|tag|http://www.semanlink.net/tag/antoine_bordes +http://emnlp2014.org/tutorials/8_notes.pdf|tag|http://www.semanlink.net/tag/embeddings +http://emnlp2014.org/tutorials/8_notes.pdf|tag|http://www.semanlink.net/tag/nlp_facebook +http://emnlp2014.org/tutorials/8_notes.pdf|comment|"- Part 1: Unsupervised and Supervised Embeddings +- Part 2: Embeddings for Multi-relational Data" +http://emnlp2014.org/tutorials/8_notes.pdf|title|Embeddings methods for NLP (2014) (tutorial - Jason Weston - Facebook Research) +http://emnlp2014.org/tutorials/8_notes.pdf|creationTime|2017-11-12T11:42:07Z +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|creationDate|2012-11-05 +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|tag|http://www.semanlink.net/tag/mac_os_x_10_8 +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|tag|http://www.semanlink.net/tag/apache_on_my_mac +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|title|Install and configure Apache, MySQL, PHP on OSX 10.8 Mountain Lion +http://www.coolestguyplanettech.com/downtown/install-and-configure-apache-mysql-php-and-phpmyadmin-osx-108-mountain-lion|creationTime|2012-11-05T08:02:55Z +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|creationDate|2010-07-15 +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|tag|http://www.semanlink.net/tag/bp +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|tag|http://www.semanlink.net/tag/arctique +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|title|BP prêt à lancer un forage très périlleux en Arctique +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|creationTime|2010-07-15T10:47:30Z +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|source|Le Monde +http://www.lemonde.fr/planete/article/2010/07/09/bp-pret-a-lancer-un-forage-tres-perilleux-en-arctique_1385731_3244.html#ens_id=1341498|date|2010-07-10 +http://www.scoop.it/|creationDate|2013-12-17 +http://www.scoop.it/|title|Build engaged audiences through publishing by curation. Scoop.it +http://www.scoop.it/|creationTime|2013-12-17T00:51:21Z +http://scikit-learn.org/stable/modules/pipeline.html|creationDate|2015-10-21 +http://scikit-learn.org/stable/modules/pipeline.html|comment|"Sample code: Feature Union with Heterogeneous Data Sources + + +" +http://scikit-learn.org/stable/modules/pipeline.html|title|Pipeline and FeatureUnion: combining estimators — scikit-learn documentation +http://scikit-learn.org/stable/modules/pipeline.html|creationTime|2015-10-21T23:04:49Z +http://www.lemonde.fr/afrique/article/2012/07/01/la-destruction-des-remparts-protecteurs-de-tombouctou_1727539_3212.html|creationDate|2012-07-01 +http://www.lemonde.fr/afrique/article/2012/07/01/la-destruction-des-remparts-protecteurs-de-tombouctou_1727539_3212.html|tag|http://www.semanlink.net/tag/tombouctou +http://www.lemonde.fr/afrique/article/2012/07/01/la-destruction-des-remparts-protecteurs-de-tombouctou_1727539_3212.html|title|"Les mausolées, ""remparts protecteurs"" de Tombouctou, détruits par Ansar Eddine" +http://www.lemonde.fr/afrique/article/2012/07/01/la-destruction-des-remparts-protecteurs-de-tombouctou_1727539_3212.html|creationTime|2012-07-01T15:26:23Z +http://dig.csail.mit.edu/breadcrumbs/node/215|creationDate|2007-11-23 +http://dig.csail.mit.edu/breadcrumbs/node/215|tag|http://www.semanlink.net/tag/tim_berners_lee +http://dig.csail.mit.edu/breadcrumbs/node/215|tag|http://www.semanlink.net/tag/graph +http://dig.csail.mit.edu/breadcrumbs/node/215|title|Giant Global Graph Decentralized Information Group (DIG) Breadcrumbs +http://dig.csail.mit.edu/breadcrumbs/node/215|creationTime|2007-11-23T13:46:52Z +https://pipelines.puppet.com/docs/tutorials/build-and-deploy-python-with-docker/|creationDate|2018-03-26 +https://pipelines.puppet.com/docs/tutorials/build-and-deploy-python-with-docker/|tag|http://www.semanlink.net/tag/docker_python +https://pipelines.puppet.com/docs/tutorials/build-and-deploy-python-with-docker/|title|How to Build and Deploy a Python Application on Docker Distelli +https://pipelines.puppet.com/docs/tutorials/build-and-deploy-python-with-docker/|creationTime|2018-03-26T08:26:50Z +https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/|creationDate|2019-04-26 +https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/|tag|http://www.semanlink.net/tag/green_new_deal +https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/|title|A Message From the Future With Alexandria Ocasio-Cortez +https://theintercept.com/2019/04/17/green-new-deal-short-film-alexandria-ocasio-cortez/|creationTime|2019-04-26T23:02:15Z +http://www.mollio.org/|creationDate|2006-03-28 +http://www.mollio.org/|tag|http://www.semanlink.net/tag/css_html_templates +http://www.mollio.org/|tag|http://www.semanlink.net/tag/css +http://www.mollio.org/|comment|Mollio is a simple set of html/css templates. The aim was to create a set of page templates that use css for layout as well as some sample basic content which has also had some css applied. +http://www.mollio.org/|title|Mollio CSS/HTML Templates +http://www.pbs.org/mediashift/2013/04/why-facebook-will-have-trouble-achieving-search-success104|creationDate|2013-05-06 +http://www.pbs.org/mediashift/2013/04/why-facebook-will-have-trouble-achieving-search-success104|tag|http://www.semanlink.net/tag/facebook_graph_search +http://www.pbs.org/mediashift/2013/04/why-facebook-will-have-trouble-achieving-search-success104|title|Why Facebook’s Graph Search Could Be Doomed Mediashift PBS +http://www.pbs.org/mediashift/2013/04/why-facebook-will-have-trouble-achieving-search-success104|creationTime|2013-05-06T17:10:21Z +http://ksl.stanford.edu/|creationDate|2006-02-02 +http://ksl.stanford.edu/|tag|http://www.semanlink.net/tag/stanford +http://ksl.stanford.edu/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://ksl.stanford.edu/|tag|http://www.semanlink.net/tag/semantic_web +http://ksl.stanford.edu/|comment|"KSL conducts research in the areas of knowledge representation and automated +reasoning in the Artificial Intelligence Laboratory of the Department of Computer Science at Stanford University. Current work focuses on enabling technology for the Semantic Web, hybrid reasoning, explaining answers from heterogeneous applications, deductive question-answering, representing and reasoning with multiple contexts, knowledge aggregation, ontology engineering, and knowledge-based technology for intelligence analysts and other knowledge workers." +http://ksl.stanford.edu/|title|Stanford Knowledge Systems, AI Laboratory +http://www.npr.org/templates/story/story.php?storyId=96564952|creationDate|2008-12-13 +http://www.npr.org/templates/story/story.php?storyId=96564952|tag|http://www.semanlink.net/tag/industrie_miniere +http://www.npr.org/templates/story/story.php?storyId=96564952|tag|http://www.semanlink.net/tag/diamant +http://www.npr.org/templates/story/story.php?storyId=96564952|tag|http://www.semanlink.net/tag/canada +http://www.npr.org/templates/story/story.php?storyId=96564952|title|Mining For Diamonds In The Canadian Rough +http://www.npr.org/templates/story/story.php?storyId=96564952|creationTime|2008-12-13T12:30:56Z +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|creationDate|2019-03-23 +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|tag|http://www.semanlink.net/tag/reader_mode_browsers +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|tag|http://www.semanlink.net/tag/new_york_times +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|tag|http://www.semanlink.net/tag/times +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|tag|http://www.semanlink.net/tag/new_york +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|tag|http://www.semanlink.net/tag/safari +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|title|Reader Mode in Safari - The New York Times +https://www.nytimes.com/2018/07/03/technology/personaltech/safari-reader-mode.html|creationTime|2019-03-23T14:24:44Z +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|creationDate|2013-08-06 +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|tag|http://www.semanlink.net/tag/webid +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|tag|http://www.semanlink.net/tag/public_lod_w3_org +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|tag|http://www.semanlink.net/tag/hugh_glaser +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|title|WebID Frustration from Hugh Glaser on 2013-08-06 (public-lod@w3.org from August 2013) +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0021.html|creationTime|2013-08-06T20:09:21Z +https://workflowy.com/|creationDate|2012-01-17 +https://workflowy.com/|tag|http://www.semanlink.net/tag/danny_ayers +https://workflowy.com/|tag|http://www.semanlink.net/tag/web_tools +https://workflowy.com/|tag|http://www.semanlink.net/tag/organizer +https://workflowy.com/|title|WorkFlowy - Organize your brain. +https://workflowy.com/|creationTime|2012-01-17T01:09:04Z +https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/|creationDate|2016-04-14 +https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/|tag|http://www.semanlink.net/tag/docker_volumes +https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/|comment|Now what if you really want the data to be on a volume, because you need native I/O speeds? +https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/|title|Putting data in a volume in a Dockerfile +https://jpetazzo.github.io/2015/01/19/dockerfile-and-data-in-volumes/|creationTime|2016-04-14T01:07:45Z +http://www.afrik.com/article4010.html|creationDate|2007-08-24 +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/rigolo +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/gabon +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/film +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/comedie +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/corruption +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/politique +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/sexe +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/cinema_africain +http://www.afrik.com/article4010.html|tag|http://www.semanlink.net/tag/elephant +http://www.afrik.com/article4010.html|comment|"Comédie gabonaise +""Ce n'est pas parce que l'éléphant est maigre que ses couilles ne remplissent pas une calebasse."" +Une très bonne rengaine accompagne le film : ""My Son"", de Wasis Diop +" +http://www.afrik.com/article4010.html|title|Les couilles de l'éléphant +http://www.afrik.com/article4010.html|creationTime|2007-08-24T23:37:52Z +http://www.talis.com/tdn/platform/user/bigfoot/tour|creationDate|2008-07-20 +http://www.talis.com/tdn/platform/user/bigfoot/tour|tag|http://www.semanlink.net/tag/talis_platform +http://www.talis.com/tdn/platform/user/bigfoot/tour|comment|initial introduction to Bigfoot Stores and the APIs and services you can expect from one. +http://www.talis.com/tdn/platform/user/bigfoot/tour|title|Bigfoot - An initial tour Talis Developer Network +http://www.talis.com/tdn/platform/user/bigfoot/tour|creationTime|2008-07-20T15:41:49Z +https://arxiv.org/abs/1605.07427|creationDate|2018-11-14 +https://arxiv.org/abs/1605.07427|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1605.07427|tag|http://www.semanlink.net/tag/hierarchical_memory_networks +https://arxiv.org/abs/1605.07427|tag|http://www.semanlink.net/tag/yoshua_bengio +https://arxiv.org/abs/1605.07427|arxiv_author|Yoshua Bengio +https://arxiv.org/abs/1605.07427|arxiv_author|Hugo Larochelle +https://arxiv.org/abs/1605.07427|arxiv_author|Gerald Tesauro +https://arxiv.org/abs/1605.07427|arxiv_author|Sarath Chandar +https://arxiv.org/abs/1605.07427|arxiv_author|Sungjin Ahn +https://arxiv.org/abs/1605.07427|arxiv_author|Pascal Vincent +https://arxiv.org/abs/1605.07427|comment|> hybrid between hard and soft attention memory networks. The memory is organized in a hierarchical structure such that reading from it is done with less computation than soft attention over a flat memory, while also being easier to train than hard attention over a flat memory +https://arxiv.org/abs/1605.07427|title|[1605.07427] Hierarchical Memory Networks +https://arxiv.org/abs/1605.07427|creationTime|2018-11-14T01:42:02Z +https://arxiv.org/abs/1605.07427|arxiv_summary|"Memory networks are neural networks with an explicit memory component that +can be both read and written to by the network. The memory is often addressed +in a soft way using a softmax function, making end-to-end training with +backpropagation possible. However, this is not computationally scalable for +applications which require the network to read from extremely large memories. +On the other hand, it is well known that hard attention mechanisms based on +reinforcement learning are challenging to train successfully. In this paper, we +explore a form of hierarchical memory network, which can be considered as a +hybrid between hard and soft attention memory networks. The memory is organized +in a hierarchical structure such that reading from it is done with less +computation than soft attention over a flat memory, while also being easier to +train than hard attention over a flat memory. Specifically, we propose to +incorporate Maximum Inner Product Search (MIPS) in the training and inference +procedures for our hierarchical memory network. We explore the use of various +state-of-the art approximate MIPS techniques and report results on +SimpleQuestions, a challenging large scale factoid question answering task." +https://arxiv.org/abs/1605.07427|arxiv_firstAuthor|Sarath Chandar +https://arxiv.org/abs/1605.07427|arxiv_updated|2016-05-24T12:48:19Z +https://arxiv.org/abs/1605.07427|arxiv_title|Hierarchical Memory Networks +https://arxiv.org/abs/1605.07427|arxiv_published|2016-05-24T12:48:19Z +https://arxiv.org/abs/1605.07427|arxiv_num|1605.07427 +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|creationDate|2018-07-07 +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|tag|http://www.semanlink.net/tag/word2vec +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|tag|http://www.semanlink.net/tag/noise_contrastive_estimation +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|comment|"> In order to deal with the issue of the expensive computation of the softmax, Word2Vec uses a technique called noise-contrastive estimation... **The basic idea is to convert a multinomial classification problem (as it is the problem of predicting the next word) to a binary classification problem.** +" +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|title|How sampling works in Word2vec? Can someone please make me understand NCE and negative sampling? - Cross Validated +https://stats.stackexchange.com/questions/244616/how-sampling-works-in-word2vec-can-someone-please-make-me-understand-nce-and-ne/245452#245452|creationTime|2018-07-07T15:02:59Z +http://www.liberation.fr/actualite/societe/272511.FR.php|creationDate|2007-09-25 +http://www.liberation.fr/actualite/societe/272511.FR.php|tag|http://www.semanlink.net/tag/ca_craint +http://www.liberation.fr/actualite/societe/272511.FR.php|tag|http://www.semanlink.net/tag/juif +http://www.liberation.fr/actualite/societe/272511.FR.php|tag|http://www.semanlink.net/tag/carte_d_identite +http://www.liberation.fr/actualite/societe/272511.FR.php|tag|http://www.semanlink.net/tag/certificat_de_nationalite +http://www.liberation.fr/actualite/societe/272511.FR.php|comment|"""Le pire étant lorsque après avoir été français pendant plusieurs décennies, on vient vous dire : ha, mais en fait, non vous n’êtes pas français, vous allez devoir retourner dans votre pays car vous êtes en plus sans-papiers ! »" +http://www.liberation.fr/actualite/societe/272511.FR.php|title|"«Ma grand-mère a sorti sa carte avec la mention ""juif""»" +http://www.liberation.fr/actualite/societe/272511.FR.php|creationTime|2007-09-25T22:09:58Z +http://steveharris.tumblr.com/post/4590579712/construct-json|creationDate|2012-02-20 +http://steveharris.tumblr.com/post/4590579712/construct-json|tag|http://www.semanlink.net/tag/rdf_in_json +http://steveharris.tumblr.com/post/4590579712/construct-json|tag|http://www.semanlink.net/tag/sparql_construct +http://steveharris.tumblr.com/post/4590579712/construct-json|title|Misc Thoughts, CONSTRUCT JSON +http://steveharris.tumblr.com/post/4590579712/construct-json|creationTime|2012-02-20T22:26:57Z +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html|creationDate|2013-07-07 +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html|tag|http://www.semanlink.net/tag/bernard_vatant +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html|title|schema:domain / Re: Official OWL version outdated from Bernard Vatant on 2013-05-13 (public-vocabs@w3.org from May 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0099.html|creationTime|2013-07-07T00:20:05Z +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|creationDate|2014-08-28 +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|tag|http://www.semanlink.net/tag/sauver_la_planete +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|tag|http://www.semanlink.net/tag/synthetic_biology +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|tag|http://www.semanlink.net/tag/biodiversite +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|title|Modifier le vivant pour sauver la planète ? InternetActu +http://internetactu.blog.lemonde.fr/2014/08/27/modifier-le-vivant-pour-sauver-la-planete/|creationTime|2014-08-28T11:56:31Z +https://dl.acm.org/citation.cfm?id=3186000|creationDate|2018-04-28 +https://dl.acm.org/citation.cfm?id=3186000|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://dl.acm.org/citation.cfm?id=3186000|tag|http://www.semanlink.net/tag/knowledge_discovery +https://dl.acm.org/citation.cfm?id=3186000|comment|"**Best paper award** at theWebConf 2018. + +An approach to harvest higher-arity facts from textual sources. Our method is distantly supervised by seed facts, and uses the fact-pattern duality principle to gather fact candidates with high recall. For high precision, we devise a constraint-based reasoning method to eliminate false candidates. A major novelty is in coping with the difficulty that higher-arity facts are often expressed only partially in texts and strewn across multiple sources. For example, one sentence may refer to a drug, a disease and a group of patients, whereas another sentence talks about the drug, its dosage and the target group without mentioning the disease. Our methods cope well with such partially observed facts, at both pattern-learning and constraint-reasoning stages. + +" +https://dl.acm.org/citation.cfm?id=3186000|title|HighLife: Higher-arity Fact Harvesting +https://dl.acm.org/citation.cfm?id=3186000|creationTime|2018-04-28T01:06:34Z +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|creationDate|2008-06-19 +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|tag|http://www.semanlink.net/tag/kingsley_idehen +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|tag|http://www.semanlink.net/tag/slides +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|tag|http://www.semanlink.net/tag/linked_data +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|title|Creating, Deploying and Exploiting Linked Data +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2.html#(1)|creationTime|2008-06-19T18:22:52Z +https://medium.com/swlh/chatbots-were-the-next-big-thing-what-happened-5fc49dd6fa61|creationDate|2018-06-08 +https://medium.com/swlh/chatbots-were-the-next-big-thing-what-happened-5fc49dd6fa61|tag|http://www.semanlink.net/tag/chatbot +https://medium.com/swlh/chatbots-were-the-next-big-thing-what-happened-5fc49dd6fa61|title|Chatbots were the next big thing: what happened? – The Startup – Medium +https://medium.com/swlh/chatbots-were-the-next-big-thing-what-happened-5fc49dd6fa61|creationTime|2018-06-08T00:20:41Z +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|creationDate|2018-10-27 +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|tag|http://www.semanlink.net/tag/self_supervised_learning +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|tag|http://www.semanlink.net/tag/yann_lecun +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|comment|"I will propose the hypothesis that **self-supervised learning of predictive world models is an essential missing ingredient of current approaches to AI**. With such models, one can predict outcomes and plan courses of actions. One could argue that prediction is the essence of intelligence.  Good predictive models may be the basis of intuition, reasoning and ""common sense"", allowing us to fill in missing information: predicting the future from the past and present, or inferring the state of the world from noisy percepts." +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|title|Self-Supervised Learning, Yann LeCun, Facebook AI Research Dartmouth News +https://news.dartmouth.edu/events/event?event=53673#.W9RjaS_pPMU|creationTime|2018-10-27T15:12:16Z +http://datajournalism.stanford.edu/|creationDate|2010-10-25 +http://datajournalism.stanford.edu/|tag|http://www.semanlink.net/tag/information_visualization +http://datajournalism.stanford.edu/|tag|http://www.semanlink.net/tag/journalisme +http://datajournalism.stanford.edu/|title|Journalism in the Age of Data: A Video Report on Data Visualization by Geoff McGhee +http://datajournalism.stanford.edu/|creationTime|2010-10-25T23:19:07Z +http://dev.data2000.no/sgvizler/|creationDate|2015-08-14 +http://dev.data2000.no/sgvizler/|tag|http://www.semanlink.net/tag/sparql +http://dev.data2000.no/sgvizler/|tag|http://www.semanlink.net/tag/javascript +http://dev.data2000.no/sgvizler/|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://dev.data2000.no/sgvizler/|comment|a javascript which renders the result of SPARQL SELECT queries into charts or html elements. +http://dev.data2000.no/sgvizler/|title|Sgvizler +http://dev.data2000.no/sgvizler/|creationTime|2015-08-14T15:00:14Z +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050721.html|creationDate|2005-11-02 +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050721.html|tag|http://www.semanlink.net/tag/what_is_life +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050721.html|title|NOVA scienceNOW Dispatches: What We're Thinking About: What Is Life? PBS +https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm|creationDate|2017-11-11 +https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm|tag|http://www.semanlink.net/tag/lingo +https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm|comment|"two modifications for the Lingo algorithm in order to increase recall. + +(Includes description of original algo, of ""current"" v3.7.1 algo, and of 2 proposed modifications)" +https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm|title|Modifications for the Cluster Content Discovery and the Cluster Label Induction Phases of the Lingo Algorithm (2014) +https://www.researchgate.net/publication/291098860_Modifications_for_the_Cluster_Content_Discovery_and_the_Cluster_Label_Induction_Phases_of_the_Lingo_Algorithm|creationTime|2017-11-11T16:26:16Z +http://redlink.co/adding-semantic-search-to-apache-solr/|creationDate|2014-03-30 +http://redlink.co/adding-semantic-search-to-apache-solr/|tag|http://www.semanlink.net/tag/solr +http://redlink.co/adding-semantic-search-to-apache-solr/|tag|http://www.semanlink.net/tag/semantic_search +http://redlink.co/adding-semantic-search-to-apache-solr/|title|Adding Semantic Search to Apache Solr +http://redlink.co/adding-semantic-search-to-apache-solr/|creationTime|2014-03-30T16:50:08Z +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|creationDate|2019-05-27 +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|tag|http://www.semanlink.net/tag/pocketsphinx +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|title|Python speech to text with PocketSphinx – sophie's blog +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|bookmarkOf|http://blog.justsophie.com/python-speech-to-text-with-pocketsphinx/ +http://www.semanlink.net/doc/2019/05/python_speech_to_text_with_pock|creationTime|2019-05-27T19:18:30Z +https://ajax.dev.java.net/|creationDate|2006-05-29 +https://ajax.dev.java.net/|tag|http://www.semanlink.net/tag/java +https://ajax.dev.java.net/|tag|http://www.semanlink.net/tag/sun_microsystems +https://ajax.dev.java.net/|tag|http://www.semanlink.net/tag/ajax +https://ajax.dev.java.net/|comment|jMaki is all about enabling Java developers to use JavaScript in their Java based applications as either a JSP tag library or a JSF component. +https://ajax.dev.java.net/|title|ajax: Project jMaki +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|creationDate|2014-04-30 +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|tag|http://www.semanlink.net/tag/stanford +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|tag|http://www.semanlink.net/tag/neuromorphic_system +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|tag|http://www.semanlink.net/tag/computational_neuroscience +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|title|Stanford bioengineers create circuit board modeled on the human brain Stanford News Release +http://news.stanford.edu/pr/2014/pr-neurogrid-boahen-engineering-042814.html|creationTime|2014-04-30T13:59:32Z +http://vuejs.org/|creationDate|2015-10-11 +http://vuejs.org/|tag|http://www.semanlink.net/tag/javascript_librairies +http://vuejs.org/|comment|Vue.js is a library for building modern web interfaces. It provides data-reactive components with a simple and flexible API. +http://vuejs.org/|title|vue.js +http://vuejs.org/|creationTime|2015-10-11T09:46:57Z +http://www.w3.org/DesignIssues/Abstractions.html|creationDate|2007-11-14 +http://www.w3.org/DesignIssues/Abstractions.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/Abstractions.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/Abstractions.html|comment|The Semantic Web's realization is: It is isn't the documents which are actually interesting, it is the things they are about! +http://www.w3.org/DesignIssues/Abstractions.html|title|Abstractions in Web architecture - Design Issues +http://www.w3.org/DesignIssues/Abstractions.html|creationTime|2007-11-14T14:29:01Z +https://selfdrivingcars.mit.edu/|creationDate|2018-01-09 +https://selfdrivingcars.mit.edu/|tag|http://www.semanlink.net/tag/deep_learning +https://selfdrivingcars.mit.edu/|tag|http://www.semanlink.net/tag/driverless_car +https://selfdrivingcars.mit.edu/|title|MIT 6.S094: Deep Learning for Self-Driving Cars +https://selfdrivingcars.mit.edu/|creationTime|2018-01-09T13:43:25Z +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|creationDate|2013-09-21 +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|tag|http://www.semanlink.net/tag/maven_tips +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|tag|http://www.semanlink.net/tag/maven +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|comment|"""Could not resolve dependencies for project"" from local repository: explanations ; +workaroud:
+cd ~/.m2
+find . -name '_maven.repositories' -type f -delete
+2016-10, similar situation:
+find . -name '_remote.repositories' -type f -delete
+elipse pref, maven check offline
+ + + + + +" +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|title|Obscured Clarity: Using Maven Offline +http://obscuredclarity.blogspot.fr/2012/05/using-maven-offline.html|creationTime|2013-09-21T01:35:30Z +http://www.joelonsoftware.com/articles/Wrong.html|creationDate|2006-12-10 +http://www.joelonsoftware.com/articles/Wrong.html|tag|http://www.semanlink.net/tag/dev +http://www.joelonsoftware.com/articles/Wrong.html|tag|http://www.semanlink.net/tag/microsoft +http://www.joelonsoftware.com/articles/Wrong.html|title|Making Wrong Code Look Wrong - Joel on Software +http://www.bbc.com/news/health-34857015|creationDate|2015-11-26 +http://www.bbc.com/news/health-34857015|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.bbc.com/news/health-34857015|title|Antibiotic resistance: World on cusp of 'post-antibiotic era' - BBC News +http://www.bbc.com/news/health-34857015|creationTime|2015-11-26T00:05:39Z +http://www.geocities.com/anpipniger/index.html|creationDate|2006-04-02 +http://www.geocities.com/anpipniger/index.html|tag|http://www.semanlink.net/tag/irrigation +http://www.geocities.com/anpipniger/index.html|tag|http://www.semanlink.net/tag/verger_de_gado_a_niamey +http://www.geocities.com/anpipniger/index.html|comment|PROJET DE PROMOTION DE L’IRRIGATION PRIVEE Immeuble Leyma 2ème étage Niamey NIGER Tél : 73 38 07 Fax : 73 62 93 Email: info@anpip.org anpip@intnet.ne +http://www.geocities.com/anpipniger/index.html|title|AGENCE NIGERIENNE POUR LA PROMOTION DE L'IRRIGATION PRIVEE Agence Nigérienne pour la Promotion de l'Irrigation Privée +http://www.w3schools.com/cssref/css_selectors.asp|creationDate|2013-07-12 +http://www.w3schools.com/cssref/css_selectors.asp|tag|http://www.semanlink.net/tag/css +http://www.w3schools.com/cssref/css_selectors.asp|title|CSS Selector Reference +http://www.w3schools.com/cssref/css_selectors.asp|creationTime|2013-07-12T15:42:46Z +http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config|creationDate|2012-11-26 +http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config|tag|http://www.semanlink.net/tag/tomcat +http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config|tag|http://www.semanlink.net/tag/solr_not_english_only +http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config|title|SolrTomcat - URI Charset Config +http://wiki.apache.org/solr/SolrTomcat#URI_Charset_Config|creationTime|2012-11-26T00:17:14Z +http://tomcat.apache.org/tomcat-5.0-doc/config/context.html|creationDate|2006-09-13 +http://tomcat.apache.org/tomcat-5.0-doc/config/context.html|tag|http://www.semanlink.net/tag/developer_documentation +http://tomcat.apache.org/tomcat-5.0-doc/config/context.html|tag|http://www.semanlink.net/tag/tomcat +http://tomcat.apache.org/tomcat-5.0-doc/config/context.html|title|Server Configuration Reference - The Context Container +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|creationDate|2014-03-26 +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|tag|http://www.semanlink.net/tag/knowledge_graph +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|tag|http://www.semanlink.net/tag/search_engines +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|tag|http://www.semanlink.net/tag/seo +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|tag|http://www.semanlink.net/tag/schema_org +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|title|"How To Tell Search Engines What ""Entities"" Are On Your Web Pages" +http://searchengineland.com/5-ways-optimize-markup-knowledge-graph-semantic-search-186755|creationTime|2014-03-26T13:12:20Z +http://www.geoportail.fr/|creationDate|2006-09-23 +http://www.geoportail.fr/|tag|http://www.semanlink.net/tag/carte +http://www.geoportail.fr/|tag|http://www.semanlink.net/tag/ign +http://www.geoportail.fr/|tag|http://www.semanlink.net/tag/la_terre_vue_du_ciel +http://www.geoportail.fr/|title|GéoPortail +http://www.lemonde.fr/idees/article/2017/10/05/monsanto-papers-des-derives-inadmissibles_5196563_3232.html|creationDate|2017-10-06 +http://www.lemonde.fr/idees/article/2017/10/05/monsanto-papers-des-derives-inadmissibles_5196563_3232.html|tag|http://www.semanlink.net/tag/monsanto +http://www.lemonde.fr/idees/article/2017/10/05/monsanto-papers-des-derives-inadmissibles_5196563_3232.html|title|« Monsanto Papers » : des dérives inadmissibles +http://www.lemonde.fr/idees/article/2017/10/05/monsanto-papers-des-derives-inadmissibles_5196563_3232.html|creationTime|2017-10-06T00:40:01Z +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|creationDate|2013-08-07 +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|tag|http://www.semanlink.net/tag/paradoxe +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|tag|http://www.semanlink.net/tag/singe +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|tag|http://www.semanlink.net/tag/infini +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|comment|"Dans la mesure où la réalisation de tous les possibles équivaut à une absence totale d'information, une boutade classique de physiciens consiste à dire qu'il se peut tout simplement que nous ne soyons que l'une des formes possibles du ""rien""" +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|title|Paradoxe du singe savant - Wikipédia +http://fr.wikipedia.org/wiki/Paradoxe_du_singe_savant|creationTime|2013-08-07T00:39:22Z +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|creationDate|2017-10-23 +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|tag|http://www.semanlink.net/tag/tensorflow +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|tag|http://www.semanlink.net/tag/lstm_networks +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|tag|http://www.semanlink.net/tag/tutorial +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|title|Recurrent neural networks and LSTM tutorial in Python and TensorFlow - Adventures in Machine Learning +http://adventuresinmachinelearning.com/recurrent-neural-networks-lstm-tutorial-tensorflow/|creationTime|2017-10-23T08:53:16Z +http://en.wikipedia.org/wiki/Pre-Siberian_American_Aborigines#Lagoa_Santa|creationDate|2008-10-26 +http://en.wikipedia.org/wiki/Pre-Siberian_American_Aborigines#Lagoa_Santa|tag|http://www.semanlink.net/tag/first_americans +http://en.wikipedia.org/wiki/Pre-Siberian_American_Aborigines#Lagoa_Santa|title|Pre-Siberian American Aborigines - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Pre-Siberian_American_Aborigines#Lagoa_Santa|creationTime|2008-10-26T23:26:44Z +http://www.redmelon.net/tstme/4corners/|creationDate|2005-05-25 +http://www.redmelon.net/tstme/4corners/|tag|http://www.semanlink.net/tag/css +http://www.redmelon.net/tstme/4corners/|title|CSS Rounded Corners +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|creationDate|2018-08-04 +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|tag|http://www.semanlink.net/tag/madonna +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|comment|"Documentary film chronicling Madonna's 1990 World Tour +" +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|title|In Bed with Madonna +https://en.wikipedia.org/wiki/Madonna:_Truth_or_Dare|creationTime|2018-08-04T01:21:08Z +http://poetes.com/hugo/nuitdu4.htm|creationDate|2006-03-11 +http://poetes.com/hugo/nuitdu4.htm|tag|http://www.semanlink.net/tag/hugo +http://poetes.com/hugo/nuitdu4.htm|tag|http://www.semanlink.net/tag/poesie +http://poetes.com/hugo/nuitdu4.htm|comment|"C'est pour cela qu'il faut que les vieilles grand-mères,
De leurs pauvres doigts gris que fait trembler +le temps,
Cousent dans le linceul des enfants de sept ans. + +" +http://poetes.com/hugo/nuitdu4.htm|title|Souvenir de la nuit du 4 +https://nlp.stanford.edu/software/tmt/tmt-0.2/|creationDate|2017-05-23 +https://nlp.stanford.edu/software/tmt/tmt-0.2/|tag|http://www.semanlink.net/tag/topic_modeling +https://nlp.stanford.edu/software/tmt/tmt-0.2/|tag|http://www.semanlink.net/tag/nlp_stanford +https://nlp.stanford.edu/software/tmt/tmt-0.2/|title|Stanford Topic Modeling Toolbox +https://nlp.stanford.edu/software/tmt/tmt-0.2/|creationTime|2017-05-23T15:16:18Z +http://karpathy.github.io/2016/09/07/phd/|creationDate|2017-08-27 +http://karpathy.github.io/2016/09/07/phd/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://karpathy.github.io/2016/09/07/phd/|tag|http://www.semanlink.net/tag/phd +http://karpathy.github.io/2016/09/07/phd/|title|A Survival Guide to a PhD +http://karpathy.github.io/2016/09/07/phd/|creationTime|2017-08-27T02:20:49Z +http://www.semantic-web.at/1.36.resource.271.adrian-paschke-x22-corporate-semantic-web-also-addresses-the-pragmatic-aspects-of-using-se.htm|creationDate|2009-01-27 +http://www.semantic-web.at/1.36.resource.271.adrian-paschke-x22-corporate-semantic-web-also-addresses-the-pragmatic-aspects-of-using-se.htm|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semantic-web.at/1.36.resource.271.adrian-paschke-x22-corporate-semantic-web-also-addresses-the-pragmatic-aspects-of-using-se.htm|title|"Adrian Paschke: ""Corporate Semantic Web also addresses the pragmatic aspects of using Semantic Web technologies.""" +http://www.semantic-web.at/1.36.resource.271.adrian-paschke-x22-corporate-semantic-web-also-addresses-the-pragmatic-aspects-of-using-se.htm|creationTime|2009-01-27T19:44:53Z +http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes|creationDate|2014-05-27 +http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes|tag|http://www.semanlink.net/tag/twitterature +http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes|comment|Madame Fournier, M. Voisin, M. Septeuil se sont pendus : neurasthénie, cancer, chômage. +http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes|title|Nouvelles en trois lignes — Wikipédia +http://fr.wikipedia.org/wiki/Nouvelles_en_trois_lignes|creationTime|2014-05-27T14:09:39Z +http://www.mmds.org/|creationDate|2014-08-29 +http://www.mmds.org/|tag|http://www.semanlink.net/tag/coursera +http://www.mmds.org/|tag|http://www.semanlink.net/tag/data_mining +http://www.mmds.org/|title|Mining of Massive Datasets +http://www.mmds.org/|creationTime|2014-08-29T00:42:53Z +https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d|creationDate|2019-03-04 +https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d|tag|http://www.semanlink.net/tag/javascript_frameork +https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d|tag|http://www.semanlink.net/tag/survey +https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d|title|ReactJS vs Angular5 vs Vue.js — What to choose in 2018? +https://medium.com/@TechMagic/reactjs-vs-angular5-vs-vue-js-what-to-choose-in-2018-b91e028fa91d|creationTime|2019-03-04T08:50:42Z +http://docs.info.apple.com/article.html?artnum=106290|creationDate|2008-02-07 +http://docs.info.apple.com/article.html?artnum=106290|tag|http://www.semanlink.net/tag/mac_os_x +http://docs.info.apple.com/article.html?artnum=106290|tag|http://www.semanlink.net/tag/howto +http://docs.info.apple.com/article.html?artnum=106290|title|"Enabling and using the ""root"" user in Mac OS X" +http://docs.info.apple.com/article.html?artnum=106290|creationTime|2008-02-07T22:18:50Z +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|creationDate|2013-09-10 +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|tag|http://www.semanlink.net/tag/content_negotiation +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|tag|http://www.semanlink.net/tag/json_ld +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|title|About JSON-LD and Content-Negotiation Alexandre Passant +http://apassant.net/blog/2012/01/22/about-json-ld-and-content-negotiation/|creationTime|2013-09-10T12:34:14Z +http://readwrite.com/2008/09/02/twitter_novels_not_big_success_stories#awesm=~oFt2tEn7VYxTp4|creationDate|2014-05-27 +http://readwrite.com/2008/09/02/twitter_novels_not_big_success_stories#awesm=~oFt2tEn7VYxTp4|tag|http://www.semanlink.net/tag/twitterature +http://readwrite.com/2008/09/02/twitter_novels_not_big_success_stories#awesm=~oFt2tEn7VYxTp4|title|Twitter Novels: Not Big Success Stories Yet – ReadWrite +http://readwrite.com/2008/09/02/twitter_novels_not_big_success_stories#awesm=~oFt2tEn7VYxTp4|creationTime|2014-05-27T14:11:57Z +https://www.youtube.com/watch?v=qThJEKhNgvY|creationDate|2014-11-17 +https://www.youtube.com/watch?v=qThJEKhNgvY|tag|http://www.semanlink.net/tag/souvenirs +https://www.youtube.com/watch?v=qThJEKhNgvY|tag|http://www.semanlink.net/tag/musique_bresilienne +https://www.youtube.com/watch?v=qThJEKhNgvY|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=qThJEKhNgvY|comment|"ou ici" +https://www.youtube.com/watch?v=qThJEKhNgvY|title|"Alceu Valença - Morena Tropicana ""Ao Vivo"" HD - YouTube" +https://www.youtube.com/watch?v=qThJEKhNgvY|creationTime|2014-11-17T15:01:40Z +http://www.w3.org/DesignIssues/Diff|creationDate|2005-12-16 +http://www.w3.org/DesignIssues/Diff|tag|http://www.semanlink.net/tag/dev +http://www.w3.org/DesignIssues/Diff|tag|http://www.semanlink.net/tag/dan_connolly +http://www.w3.org/DesignIssues/Diff|tag|http://www.semanlink.net/tag/rdf_graphs +http://www.w3.org/DesignIssues/Diff|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/Diff|title|Delta: an ontology for the distribution of differences between RDF graphs +http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java|creationDate|2017-04-07 +http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java|tag|http://www.semanlink.net/tag/java_tip +http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java|title|uniqueidentifier - How do I create a unique ID in Java? - Stack Overflow +http://stackoverflow.com/questions/1389736/how-do-i-create-a-unique-id-in-java|creationTime|2017-04-07T01:19:14Z +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|creationDate|2014-03-28 +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|tag|http://www.semanlink.net/tag/java_tool +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|tag|http://www.semanlink.net/tag/artificial_neural_network +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|tag|http://www.semanlink.net/tag/deep_learning +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|tag|http://www.semanlink.net/tag/perceptron +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|tag|http://www.semanlink.net/tag/introduction +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|title|An Introduction to Deep Learning (in Java): From Perceptrons to Deep Networks Toptal +http://www.toptal.com/machine-learning/an-introduction-to-deep-learning-from-perceptrons-to-deep-networks|creationTime|2014-03-28T00:47:28Z +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|creationDate|2007-07-13 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|tag|http://www.semanlink.net/tag/owled_2007_and_fps +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|tag|http://www.semanlink.net/tag/fps_paper +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|comment|Mon papier à OWLED 2007 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|title|"""Semantic Web Technologies in Technical Automotive"" - CEUR-WS.org/Vol-258 - OWL: Experiences and Directions 2007" +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-258/paper04.pdf|creationTime|2007-07-13T18:47:11Z +http://lists.w3.org/Archives/Public/public-awwsw/2011Jan/0021.html|creationDate|2012-04-11 +http://lists.w3.org/Archives/Public/public-awwsw/2011Jan/0021.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-awwsw/2011Jan/0021.html|title|Re: [Fwd: Reversing HTTP Range 14 and SemWeb Cool URIs decision] +http://lists.w3.org/Archives/Public/public-awwsw/2011Jan/0021.html|creationTime|2012-04-11T13:40:09Z +http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/|creationDate|2016-06-29 +http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/|tag|http://www.semanlink.net/tag/verts +http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/|tag|http://www.semanlink.net/tag/brexit +http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/|title|Brexit: les Eurodéputés réagissent Philippe Lamberts +http://www.philippelamberts.eu/referendum-uk-les-eurodeputes-reagissent/|creationTime|2016-06-29T02:11:02Z +http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance|creationDate|2009-01-14 +http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance|tag|http://www.semanlink.net/tag/aventure +http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance|tag|http://www.semanlink.net/tag/antarctique +http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance|title|Expédition Endurance - Wikipédia +http://fr.wikipedia.org/wiki/Exp%C3%A9dition_Endurance|creationTime|2009-01-14T21:23:09Z +https://jakearchibald.com/2014/browser-cache-vary-broken/|creationDate|2016-03-29 +https://jakearchibald.com/2014/browser-cache-vary-broken/|tag|http://www.semanlink.net/tag/http_cache +https://jakearchibald.com/2014/browser-cache-vary-broken/|tag|http://www.semanlink.net/tag/brouteur +https://jakearchibald.com/2014/browser-cache-vary-broken/|tag|http://www.semanlink.net/tag/cache +https://jakearchibald.com/2014/browser-cache-vary-broken/|tag|http://www.semanlink.net/tag/vary_header +https://jakearchibald.com/2014/browser-cache-vary-broken/|title|The browser cache is Vary broken - JakeArchibald.com +https://jakearchibald.com/2014/browser-cache-vary-broken/|creationTime|2016-03-29T16:47:41Z +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|creationDate|2010-07-30 +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|tag|http://www.semanlink.net/tag/rdfa +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|tag|http://www.semanlink.net/tag/goodrelations +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|title|How Best Buy Is Using The Semantic Web - NYTimes.com +http://www.nytimes.com/external/readwriteweb/2010/07/01/01readwriteweb-how-best-buy-is-using-the-semantic-web-23031.html|creationTime|2010-07-30T14:44:16Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190|creationDate|2007-11-15 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190|tag|http://www.semanlink.net/tag/giovanni_tummarello +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190|tag|http://www.semanlink.net/tag/lod_mailing_list +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190|title|[Linking-open-data] ann: Semantic Web Pipes +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=22190|creationTime|2007-11-15T08:47:02Z +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|creationDate|2018-10-27 +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|tag|http://www.semanlink.net/tag/imbalanced_data +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|tag|http://www.semanlink.net/tag/statistical_classification +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|tag|http://www.semanlink.net/tag/survey +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|tag|http://www.semanlink.net/tag/adaboost +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|tag|http://www.semanlink.net/tag/boosting +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|comment|The proposed cost-sensitive boosting algorithms are applicable to any base classifier where AdaBoost can be applied +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|title|Cost-Sensitive Boosting for Classification of Imbalanced Data (2007) +https://uwspace.uwaterloo.ca/bitstream/handle/10012/3000/thesis.pdf|creationTime|2018-10-27T23:55:46Z +http://www.macosxhints.com/|creationDate|2007-03-02 +http://www.macosxhints.com/|tag|http://www.semanlink.net/tag/howto +http://www.macosxhints.com/|tag|http://www.semanlink.net/tag/mac_os_x +http://www.macosxhints.com/|title|macosxhints.com - OS X tips and tricks! +http://www.macosxhints.com/|creationTime|2007-03-02T21:16:06Z +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|creationDate|2013-01-29 +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|tag|http://www.semanlink.net/tag/abrutis +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|tag|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|tag|http://www.semanlink.net/tag/corruption +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|tag|http://www.semanlink.net/tag/aqmi +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|title|Les manuscrits de Tombouctou, victimes des islamistes et de la corruption +http://www.lemonde.fr/international/article/2013/01/29/les-manuscrits-de-tombouctou-victimes-des-islamistes-et-de-la-corruption_1823836_3210.html|creationTime|2013-01-29T18:14:33Z +http://programminghistorian.org/lessons/topic-modeling-and-mallet|creationDate|2012-09-20 +http://programminghistorian.org/lessons/topic-modeling-and-mallet|tag|http://www.semanlink.net/tag/mallet +http://programminghistorian.org/lessons/topic-modeling-and-mallet|tag|http://www.semanlink.net/tag/tutorial +http://programminghistorian.org/lessons/topic-modeling-and-mallet|tag|http://www.semanlink.net/tag/topic_modeling +http://programminghistorian.org/lessons/topic-modeling-and-mallet|comment|"- what topic modeling is and why you might want to employ it
+- how to install and work with the MALLET natural language processing toolkit to do so" +http://programminghistorian.org/lessons/topic-modeling-and-mallet|title|Getting Started with Topic Modeling and MALLET +http://programminghistorian.org/lessons/topic-modeling-and-mallet|creationTime|2012-09-20T10:47:05Z +https://www.tensorflow.org/install/install_mac|creationDate|2017-10-23 +https://www.tensorflow.org/install/install_mac|tag|http://www.semanlink.net/tag/tensorflow +https://www.tensorflow.org/install/install_mac|title|Installing TensorFlow on Mac OS X    TensorFlow +https://www.tensorflow.org/install/install_mac|creationTime|2017-10-23T00:19:06Z +http://fr.wikipedia.org/wiki/Plasmide|creationDate|2013-07-12 +http://fr.wikipedia.org/wiki/Plasmide|tag|http://www.semanlink.net/tag/genetique +http://fr.wikipedia.org/wiki/Plasmide|tag|http://www.semanlink.net/tag/manipulations_genetiques +http://fr.wikipedia.org/wiki/Plasmide|title|Plasmide - Wikipédia +http://fr.wikipedia.org/wiki/Plasmide|creationTime|2013-07-12T12:48:27Z +http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/|creationDate|2011-08-22 +http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/|tag|http://www.semanlink.net/tag/google_rich_snippets +http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/|tag|http://www.semanlink.net/tag/ian_davis +http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/|title|Google Author Rich Snippets Internet Alchemy +http://blog.iandavis.com/2011/08/21/google-author-rich-snippets/|creationTime|2011-08-22T19:36:24Z +http://colah.github.io/posts/2015-09-Visual-Information/|creationDate|2015-11-11 +http://colah.github.io/posts/2015-09-Visual-Information/|tag|http://www.semanlink.net/tag/information_theory +http://colah.github.io/posts/2015-09-Visual-Information/|tag|http://www.semanlink.net/tag/christopher_olah +http://colah.github.io/posts/2015-09-Visual-Information/|title|Visual Information Theory -- colah's blog +http://colah.github.io/posts/2015-09-Visual-Information/|creationTime|2015-11-11T11:24:32Z +http://blog.schema.org/2015/05/schema.html|creationDate|2015-05-14 +http://blog.schema.org/2015/05/schema.html|tag|http://www.semanlink.net/tag/schema_org +http://blog.schema.org/2015/05/schema.html|title|Schema.org 2.0 +http://blog.schema.org/2015/05/schema.html|creationTime|2015-05-14T12:26:54Z +http://www.offconvex.org/2018/09/18/alacarte/|creationDate|2018-09-18 +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/word_embedding +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/n_gram +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/a_la_carte_embedding +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/good +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/nlp_rare_words +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/sentence_embeddings +http://www.offconvex.org/2018/09/18/alacarte/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2018/09/18/alacarte/|comment|"A La Carte embeddings + +> Distributional methods for capturing meaning, such as word embeddings, often require observing many examples of words in context. But most humans can infer a reasonable meaning from very few or even a single occurrence... +> +> “Porgies live in shallow temperate marine waters” +> +> Inducing word embedding from their contexts: a surprising linear relationship" +http://www.offconvex.org/2018/09/18/alacarte/|title|Simple and efficient semantic embeddings for rare words, n-grams, and language features – Off the convex path +http://www.offconvex.org/2018/09/18/alacarte/|creationTime|2018-09-18T18:07:01Z +http://rdfa.info/|creationDate|2008-04-21 +http://rdfa.info/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.info/|comment|Linked Data in HTML +http://rdfa.info/|title|RDFa +http://rdfa.info/|creationTime|2008-04-21T15:50:02Z +http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2|creationDate|2014-04-26 +http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2|tag|http://www.semanlink.net/tag/google_uber_alles +http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2|tag|http://www.semanlink.net/tag/eric_schmidt +http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2|title|"""Why we fear Google"", Mathias Döpfner’s open letter to Eric Schmidt" +http://www.faz.net/aktuell/feuilleton/debatten/mathias-doepfner-s-open-letter-to-eric-schmidt-12900860.html?printPagedArticle=true#pageIndex_2|creationTime|2014-04-26T11:41:33Z +https://en.wikipedia.org/wiki/Sima_Humboldt|creationDate|2019-03-27 +https://en.wikipedia.org/wiki/Sima_Humboldt|tag|http://www.semanlink.net/tag/venezuela +https://en.wikipedia.org/wiki/Sima_Humboldt|tag|http://www.semanlink.net/tag/tepuys +https://en.wikipedia.org/wiki/Sima_Humboldt|title|Sima Humboldt - Wikipedia +https://en.wikipedia.org/wiki/Sima_Humboldt|creationTime|2019-03-27T13:01:19Z +http://www.nltk.org/_modules/nltk/tag/stanford.html|creationDate|2017-07-11 +http://www.nltk.org/_modules/nltk/tag/stanford.html|tag|http://www.semanlink.net/tag/nltk +http://www.nltk.org/_modules/nltk/tag/stanford.html|tag|http://www.semanlink.net/tag/stanford_pos_tagger +http://www.nltk.org/_modules/nltk/tag/stanford.html|title|Source code for nltk.tag.stanford — NLTK documentation +http://www.nltk.org/_modules/nltk/tag/stanford.html|creationTime|2017-07-11T16:13:00Z +http://sigmajs.org/|creationDate|2014-07-23 +http://sigmajs.org/|tag|http://www.semanlink.net/tag/sigma_js +http://sigmajs.org/|comment|JavaScript library dedicated to graph drawing +http://sigmajs.org/|title|Sigma js +http://sigmajs.org/|creationTime|2014-07-23T19:34:29Z +http://internetactu.blog.lemonde.fr/2016/02/20/le-vertigineux-avenir-des-echanges-executables/|creationDate|2016-02-22 +http://internetactu.blog.lemonde.fr/2016/02/20/le-vertigineux-avenir-des-echanges-executables/|tag|http://www.semanlink.net/tag/blockchain +http://internetactu.blog.lemonde.fr/2016/02/20/le-vertigineux-avenir-des-echanges-executables/|title|Le vertigineux avenir des échanges exécutables InternetActu +http://internetactu.blog.lemonde.fr/2016/02/20/le-vertigineux-avenir-des-echanges-executables/|creationTime|2016-02-22T14:10:02Z +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|creationDate|2010-09-22 +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|tag|http://www.semanlink.net/tag/sparql_1_1 +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|title|Fallback with SPARQL - bobdc.blog +http://www.snee.com/bobdc.blog/2010/09/fallback-with-sparql.html|creationTime|2010-09-22T21:48:38Z +http://stevelosh.com/blog/2018/08/a-road-to-common-lisp/|creationDate|2018-08-28 +http://stevelosh.com/blog/2018/08/a-road-to-common-lisp/|tag|http://www.semanlink.net/tag/lisp +http://stevelosh.com/blog/2018/08/a-road-to-common-lisp/|title|A Road to Common Lisp / Steve Losh +http://stevelosh.com/blog/2018/08/a-road-to-common-lisp/|creationTime|2018-08-28T09:58:45Z +http://www.semanticoverflow.com/|creationDate|2009-11-12 +http://www.semanticoverflow.com/|tag|http://www.semanlink.net/tag/semantic_overflow +http://www.semanticoverflow.com/|comment|For questions about semantic web techniques and technologies. +http://www.semanticoverflow.com/|title|Semantic Overflow +http://www.semanticoverflow.com/|creationTime|2009-11-12T18:16:02Z +https://arxiv.org/abs/1806.04470|creationDate|2018-06-28 +https://arxiv.org/abs/1806.04470|tag|http://www.semanlink.net/tag/sequence_labeling +https://arxiv.org/abs/1806.04470|tag|http://www.semanlink.net/tag/named_entity_recognition +https://arxiv.org/abs/1806.04470|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1806.04470|arxiv_author|Yue Zhang +https://arxiv.org/abs/1806.04470|arxiv_author|Shuailong Liang +https://arxiv.org/abs/1806.04470|arxiv_author|Jie Yang +https://arxiv.org/abs/1806.04470|comment|design challenges of constructing effective and efficient neural sequence labeling systems +https://arxiv.org/abs/1806.04470|title|[1806.04470] Design Challenges and Misconceptions in Neural Sequence Labeling +https://arxiv.org/abs/1806.04470|creationTime|2018-06-28T01:21:31Z +https://arxiv.org/abs/1806.04470|arxiv_summary|"We investigate the design challenges of constructing effective and efficient +neural sequence labeling systems, by reproducing twelve neural sequence +labeling models, which include most of the state-of-the-art structures, and +conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, +and POS tagging). Misconceptions and inconsistent conclusions in existing +literature are examined and clarified under statistical experiments. In the +comparison and analysis process, we reach several practical conclusions which +can be useful to practitioners." +https://arxiv.org/abs/1806.04470|arxiv_firstAuthor|Jie Yang +https://arxiv.org/abs/1806.04470|arxiv_updated|2018-07-12T09:31:10Z +https://arxiv.org/abs/1806.04470|arxiv_title|Design Challenges and Misconceptions in Neural Sequence Labeling +https://arxiv.org/abs/1806.04470|arxiv_published|2018-06-12T12:43:42Z +https://arxiv.org/abs/1806.04470|arxiv_num|1806.04470 +http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html|creationDate|2017-06-26 +http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html|tag|http://www.semanlink.net/tag/nltk +http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html|tag|http://www.semanlink.net/tag/rake +http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html|title|Salmon Run: Implementing the RAKE Algorithm with NLTK +http://sujitpal.blogspot.fr/2013/03/implementing-rake-algorithm-with-nltk.html|creationTime|2017-06-26T14:56:18Z +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|creationDate|2006-05-16 +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|tag|http://www.semanlink.net/tag/neandertal +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|tag|http://www.semanlink.net/tag/adn +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|comment|The first sequences of nuclear DNA to be taken from a Neanderthal have been reported at a US science meeting. +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|title|BBC NEWS - Neanderthal yields nuclear DNA +http://news.bbc.co.uk/1/hi/sci/tech/4986668.stm|source|BBC +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|creationDate|2018-05-20 +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|tag|http://www.semanlink.net/tag/what_could_go_wrong +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|tag|http://www.semanlink.net/tag/google +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|comment|This internal video from 2016 shows a Google concept for how total data collection could reshape society +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|title|Google’s Selfish Ledger is an unsettling vision of Silicon Valley social engineering - The Verge +https://www.theverge.com/2018/5/17/17344250/google-x-selfish-ledger-video-data-privacy|creationTime|2018-05-20T18:18:06Z +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|creationDate|2007-04-09 +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|tag|http://www.semanlink.net/tag/presidentielles_2007 +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|tag|http://www.semanlink.net/tag/ccfd +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|tag|http://www.semanlink.net/tag/immigration +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|title|Un migrant doit être traité comme un délinquant. NON ! +http://www.ccfd.asso.fr/2007/affiche-migrants-g.jpg|creationTime|2007-04-09T23:32:04Z +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|creationDate|2018-06-17 +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|tag|http://www.semanlink.net/tag/data_scientists +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|tag|http://www.semanlink.net/tag/datalakes +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|tag|http://www.semanlink.net/tag/semantic_enterprise +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|title|Why You Don’t Need Data Scientists – Kurt Cagle – Medium +https://medium.com/@kurtcagle/why-you-dont-need-data-scientists-a9654cc9f0e4|creationTime|2018-06-17T12:27:17Z +http://www.nigerime.com/|creationDate|2008-06-07 +http://www.nigerime.com/|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.nigerime.com/|tag|http://www.semanlink.net/tag/rap +http://www.nigerime.com/|tag|http://www.semanlink.net/tag/hip_hop +http://www.nigerime.com/|title|Nigerime: Le Portail du Rap et du Hip Hop Nigerien +http://www.nigerime.com/|creationTime|2008-06-07T14:51:07Z +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|creationDate|2013-11-19 +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|tag|http://www.semanlink.net/tag/virus +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|tag|http://www.semanlink.net/tag/paleoanthropology_genetics +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|tag|http://www.semanlink.net/tag/neandertal +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|title|Neanderthal virus DNA spotted hiding in modern humans - life - 18 November 2013 - New Scientist +http://www.newscientist.com/article/dn24598-neanderthal-virus-dna-spotted-hiding-in-modern-humans.html|creationTime|2013-11-19T00:47:43Z +http://siren.solutions/siren/overview/|creationDate|2015-03-06 +http://siren.solutions/siren/overview/|tag|http://www.semanlink.net/tag/siren +http://siren.solutions/siren/overview/|title|SIREn Solutions Solr & Elasticsearch Consultancy – Overview +http://siren.solutions/siren/overview/|creationTime|2015-03-06T15:30:54Z +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|creationDate|2008-01-03 +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|comment|"I did wonder about the following: in the case when the URI is not of +document, when currently we use 303, +then the server can return a document *about* it with an extra +header to explain to the browser +that it is actually giving you a description of it not the content of +it." +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|title|Alternative to 303 response: Description-ID: header From: Tim Berners-Lee +http://lists.w3.org/Archives/Public/www-tag/2007Dec/0024.html|creationTime|2008-01-03T12:07:21Z +https://blog.novatec-gmbh.de/the-problems-with-swagger/|creationDate|2018-01-05 +https://blog.novatec-gmbh.de/the-problems-with-swagger/|comment|"Swagger imposes some constraints, like the lack of hypermedia... if you are using swagger, you are probably giving up one of the most powerful feature of RESTful APIs. You are giving up evolvability! +" +https://blog.novatec-gmbh.de/the-problems-with-swagger/|title|The problems with Swagger - NovaTec Blog +https://blog.novatec-gmbh.de/the-problems-with-swagger/|creationTime|2018-01-05T21:22:22Z +http://blog.octo.com/introduction-a-la-technologie-blockchain/|creationDate|2016-06-29 +http://blog.octo.com/introduction-a-la-technologie-blockchain/|tag|http://www.semanlink.net/tag/blockchain +http://blog.octo.com/introduction-a-la-technologie-blockchain/|title|Introduction à la technologie Blockchain OCTO talks ! +http://blog.octo.com/introduction-a-la-technologie-blockchain/|creationTime|2016-06-29T11:18:48Z +http://stackoverflow.com/questions/18496940/how-to-deal-with-persistent-storage-e-g-databases-in-docker|creationDate|2016-04-13 +http://stackoverflow.com/questions/18496940/how-to-deal-with-persistent-storage-e-g-databases-in-docker|tag|http://www.semanlink.net/tag/docker_volumes +http://stackoverflow.com/questions/18496940/how-to-deal-with-persistent-storage-e-g-databases-in-docker|title|How to deal with persistent storage (e.g. databases) in docker - Stack Overflow +http://stackoverflow.com/questions/18496940/how-to-deal-with-persistent-storage-e-g-databases-in-docker|creationTime|2016-04-13T17:54:26Z +http://developer.yahoo.net/ypatterns/|creationDate|2006-02-17 +http://developer.yahoo.net/ypatterns/|tag|http://www.semanlink.net/tag/yahoo +http://developer.yahoo.net/ypatterns/|tag|http://www.semanlink.net/tag/ajax +http://developer.yahoo.net/ypatterns/|title|Yahoo! Design Pattern Library +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|creationDate|2015-08-29 +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|tag|http://www.semanlink.net/tag/angularjs +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|tag|http://www.semanlink.net/tag/json_ld +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|comment|a Linked Data View Builder +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|title|visuaLOD / visualod.bitbucket.org — Bitbucket +https://bitbucket.org/visuaLOD/visualod.bitbucket.org/overview|creationTime|2015-08-29T16:58:08Z +http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/|creationDate|2016-01-31 +http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/|tag|http://www.semanlink.net/tag/supply_chain +http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/|tag|http://www.semanlink.net/tag/blockchain +http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/|title|Bitcoin’s Blockchain Can Revolutionize Supply Chain Transparency Spend Matters +http://spendmatters.com/2015/11/09/why-bitcoins-blockchain-technology-could-revolutionize-supply-chain-transparency/|creationTime|2016-01-31T13:22:54Z +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|creationDate|2007-06-24 +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|tag|http://www.semanlink.net/tag/population_mondiale +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|tag|http://www.semanlink.net/tag/ville +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|tag|http://www.semanlink.net/tag/urbanisation +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|comment|"Les villes rassemblent désormais la moitié de +l’humanité (3,3 milliards d’hommes). Les pays en +développement, qui sont les plus peuplés, comptent +aujourd’hui le plus grand nombre de citadins. +L’Asie notamment, où habitent trois cinquièmes +des hommes, +abrite un citadin du monde sur deux. +Depuis 50 ans, la population des villes s’accroît +bien plus vite dans les pays pauvres que dans les +pays riches (4,3 % par an en moyenne en Afrique, +1,2% en Europe). C’est aussi dans les pays en développement +que se trouvent 15 des 20 plus grandes +agglomérations du monde aujourd’hui. Alors que +l’urbanisation est historiquement un des moteurs +du progrès économique et social, la forte croissance +urbaine actuelle dans les pays du Sud, avec les +difficultés de logement, d’emploi ou de transport +qu’elle engendre, semble au contraire ralentir leur +développement." +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|title|La moitié de la population mondiale vit en ville +http://www.ined.fr/fichier/t_publication/1300/publi_pdf1_435.pdf|creationTime|2007-06-24T21:30:16Z +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|creationDate|2011-09-23 +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|tag|http://www.semanlink.net/tag/semantic_web +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|tag|http://www.semanlink.net/tag/car_options_ontology +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|tag|http://www.semanlink.net/tag/semantic_web_company +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|tag|http://www.semanlink.net/tag/volkswagen +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|title|Volkswagen: Das Auto Company is Das Semantic Web Company! - semanticweb.com +http://semanticweb.com/volkswagen-das-auto-company-is-das-semantic-web-company_b23233|creationTime|2011-09-23T01:00:57Z +http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started|creationDate|2013-12-30 +http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started|tag|http://www.semanlink.net/tag/gnu_octave +http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started|tag|http://www.semanlink.net/tag/tutorial +http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started|title|Octave Programming Tutorial/Getting started - Wikibooks, open books for an open world +http://en.wikibooks.org/wiki/Octave_Programming_Tutorial/Getting_started|creationTime|2013-12-30T11:07:39Z +http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html|creationDate|2012-08-07 +http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html|tag|http://www.semanlink.net/tag/high_frequency_trading +http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html|tag|http://www.semanlink.net/tag/bug +http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html|title|Les déboires de Knight Capital, spécialiste du trading haute fréquence +http://www.lemonde.fr/economie/article/2012/08/07/les-deboires-de-knight-capital-specialiste-du-trading-haute-frequence_1743230_3234.html|creationTime|2012-08-07T15:42:55Z +https://discuss.elastic.co/t/loading-json-ld-into-es/19970|creationDate|2017-04-21 +https://discuss.elastic.co/t/loading-json-ld-into-es/19970|tag|http://www.semanlink.net/tag/json_ld +https://discuss.elastic.co/t/loading-json-ld-into-es/19970|tag|http://www.semanlink.net/tag/elasticsearch +https://discuss.elastic.co/t/loading-json-ld-into-es/19970|title|Loading JSON-LD into ES - Elasticsearch - Discuss the Elastic Stack +https://discuss.elastic.co/t/loading-json-ld-into-es/19970|creationTime|2017-04-21T13:33:35Z +http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all|creationDate|2007-06-18 +http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all|tag|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all|tag|http://www.semanlink.net/tag/chine +http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all|title|Video Games - The Life of the Chinese Gold Farmer - New York Times +http://www.nytimes.com/2007/06/17/magazine/17lootfarmers-t.html?ei=5088&en=a6282d1ddf608fc1&ex=1339732800&partner=rssnyt&emc=rss&pagewanted=all|creationTime|2007-06-18T22:09:46Z +http://dl.free.fr/|creationDate|2009-12-18 +http://dl.free.fr/|tag|http://www.semanlink.net/tag/ftp +http://dl.free.fr/|title|Free FTP +http://dl.free.fr/|creationTime|2009-12-18T11:31:45Z +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|creationDate|2018-01-15 +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|tag|http://www.semanlink.net/tag/japon +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|tag|http://www.semanlink.net/tag/clint_eastwood +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|tag|http://www.semanlink.net/tag/2eme_guerre_mondiale +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|comment|film de Clint Eastwood +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|title|Letters from Iwo Jima +https://en.wikipedia.org/wiki/Letters_from_Iwo_Jima|creationTime|2018-01-15T23:03:20Z +http://semanticweb.com/report-from-day-5-at-iswc_b24326|creationDate|2011-11-14 +http://semanticweb.com/report-from-day-5-at-iswc_b24326|tag|http://www.semanlink.net/tag/iswc +http://semanticweb.com/report-from-day-5-at-iswc_b24326|title|Report from Day 5 at ISWC - semanticweb.com +http://semanticweb.com/report-from-day-5-at-iswc_b24326|creationTime|2011-11-14T23:00:03Z +http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering|creationDate|2013-05-07 +http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering|tag|http://www.semanlink.net/tag/facebook_graph_search +http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering|tag|http://www.semanlink.net/tag/nlp +http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering|title|Facebook Natural Language Engineering +http://fr.scribd.com/doc/138527966/Facebook-Natural-Language-Engineering|creationTime|2013-05-07T18:53:25Z +http://world.honda.com/ASIMO/history/|creationDate|2006-06-01 +http://world.honda.com/ASIMO/history/|tag|http://www.semanlink.net/tag/honda +http://world.honda.com/ASIMO/history/|tag|http://www.semanlink.net/tag/robot_humanoide +http://world.honda.com/ASIMO/history/|comment|History of Honda humanoid robot +http://world.honda.com/ASIMO/history/|title|Honda Worldwide ASIMO History +http://en.wikipedia.org/wiki/Tulip_mania|creationDate|2008-01-25 +http://en.wikipedia.org/wiki/Tulip_mania|tag|http://www.semanlink.net/tag/tulipe +http://en.wikipedia.org/wiki/Tulip_mania|tag|http://www.semanlink.net/tag/bulle_speculative +http://en.wikipedia.org/wiki/Tulip_mania|title|Tulip mania - Wikipedia +http://en.wikipedia.org/wiki/Tulip_mania|creationTime|2008-01-25T15:05:42Z +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|creationDate|2018-11-05 +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|tag|http://www.semanlink.net/tag/avatar +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|tag|http://www.semanlink.net/tag/trump +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|tag|http://www.semanlink.net/tag/troll +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|comment|"1 million followers on Instagram and recently hacked by a Trump troll. But it's not human. + +La guerre des robots a commencé +" +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|title|Lil Miquela: The Digital Avatar Instagram Influencer +https://www.thecut.com/2018/05/lil-miquela-digital-avatar-instagram-influencer.html|creationTime|2018-11-05T18:26:40Z +http://spraakdata.gu.se/svedd/papers/courses/masterThes.pdf|creationDate|2019-04-03 +http://spraakdata.gu.se/svedd/papers/courses/masterThes.pdf|tag|http://www.semanlink.net/tag/acronyms_nlp +http://spraakdata.gu.se/svedd/papers/courses/masterThes.pdf|title|Acronym Recognition - Recognizing acronyms in Swedish texts +http://spraakdata.gu.se/svedd/papers/courses/masterThes.pdf|creationTime|2019-04-03T09:36:09Z +http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html|creationDate|2009-02-22 +http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html|tag|http://www.semanlink.net/tag/eclipse +http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html|tag|http://www.semanlink.net/tag/ant +http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html|title|Ant for eclipse - access eclipse configurations from within ant +http://ant4eclipse.sourceforge.net/ant-for-eclipse-faq.html|creationTime|2009-02-22T17:14:09Z +http://www.realgoodfood.com/ciao_vito.html|creationDate|2006-01-23 +http://www.realgoodfood.com/ciao_vito.html|tag|http://www.semanlink.net/tag/ciao_vito +http://www.realgoodfood.com/ciao_vito.html|title|Real Good Food Ciao Vito +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|creationDate|2014-02-18 +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|tag|http://www.semanlink.net/tag/desktop_search +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|tag|http://www.semanlink.net/tag/ubuntu +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|title|Querying my own MP3, image, and other file metadata with SPARQL - bobdc.blog +http://www.snee.com/bobdc.blog/2014/02/querying-my-own-mp3-image-and.html|creationTime|2014-02-18T00:04:05Z +https://en.wikipedia.org/wiki/All_About_Eve|creationDate|2017-12-18 +https://en.wikipedia.org/wiki/All_About_Eve|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/All_About_Eve|comment|"film de Mankiewicz. Une actrice de théâtre célèbre mais vieillissante (Bette Davis), et une jeunette qui prend sa place +" +https://en.wikipedia.org/wiki/All_About_Eve|title|All About Eve +https://en.wikipedia.org/wiki/All_About_Eve|creationTime|2017-12-18T23:06:12Z +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|creationDate|2014-03-19 +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|tag|http://www.semanlink.net/tag/seo +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|tag|http://www.semanlink.net/tag/schema_org +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|tag|http://www.semanlink.net/tag/json_ld +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|title|JSON-LD, the Google Knowledge Graph and schema.org SEO +http://www.seoskeptic.com/json-ld-google-knowledge-graph-schema-org-seo/|creationTime|2014-03-19T15:12:53Z +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|creationDate|2013-02-28 +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|tag|http://www.semanlink.net/tag/cerveau +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|tag|http://www.semanlink.net/tag/brain_to_brain_interface +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|title|A Brain-to-Brain Interface for Real-Time Sharing of Sensorimotor Information : Scientific Reports : Nature Publishing Group +http://www.nature.com/srep/2013/130228/srep01319/full/srep01319.html|creationTime|2013-02-28T22:49:55Z +http://www.offconvex.org/|creationDate|2018-09-09 +http://www.offconvex.org/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/|tag|http://www.semanlink.net/tag/ml_nlp_blog +http://www.offconvex.org/|title|Off the convex path +http://www.offconvex.org/|creationTime|2018-09-09T15:38:14Z +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html|creationDate|2008-04-04 +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html|tag|http://www.semanlink.net/tag/education +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html|tag|http://www.semanlink.net/tag/cringely +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html|title|I, Cringely . The Pulpit . Amish Paradise PBS +http://www.pbs.org/cringely/pulpit/2008/pulpit_20080328_004611.html|creationTime|2008-04-04T22:51:40Z +http://duckduckgo.com/|creationDate|2012-02-20 +http://duckduckgo.com/|tag|http://www.semanlink.net/tag/search_engines +http://duckduckgo.com/|title|DuckDuckGo +http://duckduckgo.com/|creationTime|2012-02-20T21:17:40Z +http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text|creationDate|2013-03-23 +http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text|tag|http://www.semanlink.net/tag/national_geographic +http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text|tag|http://www.semanlink.net/tag/de_extinction +http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text|title|Bringing Extinct Species Back to Life - Pictures, More From National Geographic Magazine +http://ngm.nationalgeographic.com/2013/04/species-revival/zimmer-text|creationTime|2013-03-23T01:07:06Z +http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html|creationDate|2012-06-24 +http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html|tag|http://www.semanlink.net/tag/rdf +http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html|tag|http://www.semanlink.net/tag/machine_learning_semantic_web +http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html|title|Graph Kernels for RDF data Semantic Web Dog Food +http://data.semanticweb.org/conference/eswc/2012/paper/research/132/html|creationTime|2012-06-24T11:33:01Z +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|creationDate|2014-12-30 +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|tag|http://www.semanlink.net/tag/danny_ayers +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|tag|http://www.semanlink.net/tag/wiki +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|tag|http://www.semanlink.net/tag/sparql_en_javascript +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|tag|http://www.semanlink.net/tag/sparql +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|title|Browser + SPARQL Server = Wiki Danny Ayers' Other Alternate Weblog +https://dannyayers.wordpress.com/2014/12/30/browser-sparql-server-wiki/|creationTime|2014-12-30T19:19:23Z +http://www.semanlink.net/2013/07/coldrdfa/|creationDate|2013-09-02 +http://www.semanlink.net/2013/07/coldrdfa/|tag|http://www.semanlink.net/tag/schema_org +http://www.semanlink.net/2013/07/coldrdfa/|tag|http://www.semanlink.net/tag/c2gweb_rdf +http://www.semanlink.net/2013/07/coldrdfa/|tag|http://www.semanlink.net/tag/c2gweb_and_product_description +http://www.semanlink.net/2013/07/coldrdfa/|tag|http://www.semanlink.net/tag/rdfa +http://www.semanlink.net/2013/07/coldrdfa/|comment|"et aussi http://www.semanlink.net/2013/07/coldrdfa/?url=confnew.html +" +http://www.semanlink.net/2013/07/coldrdfa/|title|RDFa / schema.org test +http://www.semanlink.net/2013/07/coldrdfa/|creationTime|2013-09-02T16:45:53Z +http://uk.co.rplug.renault.com/product/gen?embed=true|creationDate|2014-02-07 +http://uk.co.rplug.renault.com/product/gen?embed=true|tag|http://www.semanlink.net/tag/c2gweb_on_the_web +http://uk.co.rplug.renault.com/product/gen?embed=true|tag|http://www.semanlink.net/tag/c2gweb +http://uk.co.rplug.renault.com/product/gen?embed=true|tag|http://www.semanlink.net/tag/rdfa +http://uk.co.rplug.renault.com/product/gen?embed=true|tag|http://www.semanlink.net/tag/c2gweb_rdf +http://uk.co.rplug.renault.com/product/gen?embed=true|comment|"More complete description in JSON-LD and in turtle.
+You can test the HTML/RDFa content
+Note: this page only provides links to the models. Could and probably should also provide links involving other specifications. For instance, the choice ""Diesel"" could link to the URI of the ""Diesel"" specification, which includes the links to the corresponding configurations (one for each model that support diesel engine):
+http://uk.co.rplug.renault.com/product/gen/spec/PT1628_diesel/-?embed=true
+Again, a more complete description in turtle
+It wouldn't be easily possible, however, to have links to things such as ""climate control"", because this kind of specification depends on the model: the climate control on Clio is not the same thing as the climate control on Laguna. There are therefore different URIs for the different ""climate control"" specifications (one for each model that support some form of climate control). Here's the specification ""climate control"" on ""New Clio"":
+http://uk.co.rplug.renault.com/product/model/CL4/new-clio/spec/CAREG/automatic-climate-control?embed=true
+And here is the ""New Clio with climate control"" configuration, along with the corresponding test page. + + + + + + + + + + + + + + +" +http://uk.co.rplug.renault.com/product/gen?embed=true|title|Renault range with RDFa markup +http://uk.co.rplug.renault.com/product/gen?embed=true|creationTime|2014-02-07T18:42:46Z +http://ole-martin.net/hbase-tutorial-for-beginners/|creationDate|2013-03-12 +http://ole-martin.net/hbase-tutorial-for-beginners/|tag|http://www.semanlink.net/tag/hbase +http://ole-martin.net/hbase-tutorial-for-beginners/|tag|http://www.semanlink.net/tag/tutorial +http://ole-martin.net/hbase-tutorial-for-beginners/|title|HBase tutorial for beginners - a blog by Ole-Martin Mørk +http://ole-martin.net/hbase-tutorial-for-beginners/|creationTime|2013-03-12T11:20:05Z +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|creationDate|2012-03-10 +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|tag|http://www.semanlink.net/tag/microdata +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|tag|http://www.semanlink.net/tag/html_data +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|title|RDFa, Microdata, and RDF (Two Notes Published by the W3C HTML Data Task Force) Semantic Web Activity News +http://www.w3.org/blog/SW/2012/03/08/rdfa-microdata-and-rdf-two-notes-published-by-the-w3c-html-data-task-force/|creationTime|2012-03-10T22:30:22Z +http://www.pnas.org/content/early/2016/04/13/1520084113|creationDate|2016-04-22 +http://www.pnas.org/content/early/2016/04/13/1520084113|tag|http://www.semanlink.net/tag/conscience +http://www.pnas.org/content/early/2016/04/13/1520084113|tag|http://www.semanlink.net/tag/insecte +http://www.pnas.org/content/early/2016/04/13/1520084113|title|What insects can tell us about the origins of consciousness +http://www.pnas.org/content/early/2016/04/13/1520084113|creationTime|2016-04-22T12:59:01Z +https://arxiv.org/abs/1506.02142|creationDate|2019-05-13 +https://arxiv.org/abs/1506.02142|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1506.02142|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://arxiv.org/abs/1506.02142|tag|http://www.semanlink.net/tag/uncertainty_in_deep_learning +https://arxiv.org/abs/1506.02142|tag|http://www.semanlink.net/tag/dropout +https://arxiv.org/abs/1506.02142|arxiv_author|Zoubin Ghahramani +https://arxiv.org/abs/1506.02142|arxiv_author|Yarin Gal +https://arxiv.org/abs/1506.02142|title|[1506.02142] Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning +https://arxiv.org/abs/1506.02142|creationTime|2019-05-13T09:11:32Z +https://arxiv.org/abs/1506.02142|arxiv_summary|"Deep learning tools have gained tremendous attention in applied machine +learning. However such tools for regression and classification do not capture +model uncertainty. In comparison, Bayesian models offer a mathematically +grounded framework to reason about model uncertainty, but usually come with a +prohibitive computational cost. In this paper we develop a new theoretical +framework casting dropout training in deep neural networks (NNs) as approximate +Bayesian inference in deep Gaussian processes. A direct result of this theory +gives us tools to model uncertainty with dropout NNs -- extracting information +from existing models that has been thrown away so far. This mitigates the +problem of representing uncertainty in deep learning without sacrificing either +computational complexity or test accuracy. We perform an extensive study of the +properties of dropout's uncertainty. Various network architectures and +non-linearities are assessed on tasks of regression and classification, using +MNIST as an example. We show a considerable improvement in predictive +log-likelihood and RMSE compared to existing state-of-the-art methods, and +finish by using dropout's uncertainty in deep reinforcement learning." +https://arxiv.org/abs/1506.02142|arxiv_firstAuthor|Yarin Gal +https://arxiv.org/abs/1506.02142|arxiv_updated|2016-10-04T16:50:26Z +https://arxiv.org/abs/1506.02142|arxiv_title|Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning +https://arxiv.org/abs/1506.02142|arxiv_published|2015-06-06T12:30:43Z +https://arxiv.org/abs/1506.02142|arxiv_num|1506.02142 +https://sgugger.github.io/|creationDate|2019-01-03 +https://sgugger.github.io/|tag|http://www.semanlink.net/tag/ml_nlp_blog +https://sgugger.github.io/|tag|http://www.semanlink.net/tag/sylvain_gugger +https://sgugger.github.io/|title|Another data science student's blog (Sylvain Gugger) +https://sgugger.github.io/|creationTime|2019-01-03T17:45:41Z +http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why|creationDate|2015-01-26 +http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why|tag|http://www.semanlink.net/tag/sigma_js +http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why|tag|http://www.semanlink.net/tag/graph +http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why|title|What Is Best For Graph Rendering: Sigma.js Or D3.js? Why? - Quora +http://www.quora.com/What-is-best-for-graph-rendering-sigma-js-or-d3-js-Why|creationTime|2015-01-26T14:53:10Z +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|creationDate|2018-01-05 +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|tag|http://www.semanlink.net/tag/transe +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|tag|http://www.semanlink.net/tag/antoine_bordes +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|comment|"This work focuses on modeling multi-relational +data from KBs (Wordnet and Freebase in this paper), with the goal of providing an efficient +tool to complete them by automatically adding new facts, without requiring extra knowledge. + +**Embedding entities and relationships of multirelational +data**: a method which **models relationships by interpreting them as translations** operating on the +low-dimensional embeddings of the entities. Motivation: +- hierarchical relationships are extremely common in KBs and translations are the natural transformations for representing them. +- cf. word embeddings and the “capital of” relationship between countries and cities, which are (coincidentally rather than willingly) represented by the model as translations in the embedding space. This suggests that there may exist embedding spaces in which 1-to-1 relationships between entities of different types may, as well, be represented by translations. The intention of our model is to enforce such a structure of the embedding space. + + +[Good blog post by PY Vandenbussche](http://pyvandenbussche.info/2017/translating-embeddings-transe/)" +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|relatedDoc|http://pyvandenbussche.info/2017/translating-embeddings-transe/ +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|title|Translating Embeddings for Modeling Multi-relational Data (2013) +http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela|creationTime|2018-01-05T14:46:46Z +http://ohshitgit.com/|creationDate|2018-01-06 +http://ohshitgit.com/|tag|http://www.semanlink.net/tag/git +http://ohshitgit.com/|title|Oh, shit, git! +http://ohshitgit.com/|creationTime|2018-01-06T14:46:32Z +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|creationDate|2008-10-12 +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|tag|http://www.semanlink.net/tag/servlet +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|tag|http://www.semanlink.net/tag/uri_encoding +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|tag|http://www.semanlink.net/tag/sparql +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|tag|http://www.semanlink.net/tag/faq +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|tag|http://www.semanlink.net/tag/tomcat +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|comment|"The fact is: there is no char encoding in a GET request. So an unmodified installation of Tomcat will parse the URI in a GET as ISO-8859-1 (""the Servlet spec requires it""). But a SPARQL request, for instance, requires it to be UTF-8 encoded. So it can't work!!!
+Tomcat people insist that their behavior is a feature. Maybe it's not a bug, but I doubt it is a feature. To get tomcat working otherwise, you have to set a parameter in the server.xml (connector, URIEncoding) What if you don't control the tomcat install? Maybe the best thing to do is to parse the parameters from request.getQueryString(). In my web apps until now, I double UTF-8 encode the params in a GET (for instance, the semanlink bookmarklet) + + + + +" +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|title|FAQ/CharacterEncoding - Tomcat Wiki +http://wiki.apache.org/tomcat/FAQ/CharacterEncoding|creationTime|2008-10-12T17:29:10Z +http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/|creationDate|2014-09-26 +http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/|tag|http://www.semanlink.net/tag/google_deepmind +http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/|tag|http://www.semanlink.net/tag/video_games +http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/|title|Artificial General Intelligence that plays Atari video games: How did DeepMind do it? Robohub +http://robohub.org/artificial-general-intelligence-that-plays-atari-video-games-how-did-deepmind-do-it/|creationTime|2014-09-26T16:38:02Z +http://esw.w3.org/topic/SkosDev|creationDate|2006-07-06 +http://esw.w3.org/topic/SkosDev|tag|http://www.semanlink.net/tag/w3c +http://esw.w3.org/topic/SkosDev|tag|http://www.semanlink.net/tag/semanlink_related +http://esw.w3.org/topic/SkosDev|tag|http://www.semanlink.net/tag/skos +http://esw.w3.org/topic/SkosDev|comment|Best practices for naming resources of type skos:Concept with HTTP URIs +http://esw.w3.org/topic/SkosDev|title|SkosDev - ESW Wiki +http://smethur.st/posts/176135843|creationDate|2014-09-26 +http://smethur.st/posts/176135843|tag|http://www.semanlink.net/tag/url +http://smethur.st/posts/176135843|tag|http://www.semanlink.net/tag/bbc +http://smethur.st/posts/176135843|comment|Your organisation / brand / “product” / whatever is already at the mercy of search engines because that’s how real people use the web +http://smethur.st/posts/176135843|title|Yet another post about Google (not really) removing the URL bar from Chrome Smethurst +http://smethur.st/posts/176135843|creationTime|2014-09-26T11:05:57Z +http://www.flickr.com/photos/hyperfp|creationDate|2006-02-14 +http://www.flickr.com/photos/hyperfp|tag|http://www.semanlink.net/tag/flickr +http://www.flickr.com/photos/hyperfp|tag|http://www.semanlink.net/tag/fps +http://www.flickr.com/photos/hyperfp|title|Flickr: Photos from hyperfp +http://chatlogs.planetrdf.com/swig/2007-07-04.html#T13-34-27|creationDate|2008-05-08 +http://chatlogs.planetrdf.com/swig/2007-07-04.html#T13-34-27|tag|http://www.semanlink.net/tag/rdf_forms +http://chatlogs.planetrdf.com/swig/2007-07-04.html#T13-34-27|title|Semantic Web Interest Group IRC Chat Logs for 2007-07-04 +http://chatlogs.planetrdf.com/swig/2007-07-04.html#T13-34-27|creationTime|2008-05-08T14:18:00Z +http://www.youtube.com/watch?v=blzl4JmrjuE|creationDate|2012-09-15 +http://www.youtube.com/watch?v=blzl4JmrjuE|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=blzl4JmrjuE|tag|http://www.semanlink.net/tag/niger +http://www.youtube.com/watch?v=blzl4JmrjuE|tag|http://www.semanlink.net/tag/lutte_traditionnelle +http://www.youtube.com/watch?v=blzl4JmrjuE|title|Finale lutte traditionnelle Niger 2012 - YouTube +http://www.youtube.com/watch?v=blzl4JmrjuE|creationTime|2012-09-15T00:01:52Z +http://blog.bitflux.ch/wiki/LiveSearch|creationDate|2005-05-18 +http://blog.bitflux.ch/wiki/LiveSearch|tag|http://www.semanlink.net/tag/blog +http://blog.bitflux.ch/wiki/LiveSearch|tag|http://www.semanlink.net/tag/wiki +http://blog.bitflux.ch/wiki/LiveSearch|tag|http://www.semanlink.net/tag/livesearch +http://blog.bitflux.ch/wiki/LiveSearch|title|LiveSearch - Bitflux Blog Wiki +http://www.slf4j.org/|creationDate|2009-05-28 +http://www.slf4j.org/|tag|http://www.semanlink.net/tag/java_dev +http://www.slf4j.org/|tag|http://www.semanlink.net/tag/log4j +http://www.slf4j.org/|title|SLF4J: Simple Logging Facade for Java +http://www.slf4j.org/|creationTime|2009-05-28T15:58:16Z +http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html|creationDate|2017-08-31 +http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html|tag|http://www.semanlink.net/tag/musees_africains +http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html|tag|http://www.semanlink.net/tag/benin +http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html|title|L’incendie au musée d’Abomey relance le débat sur la conservation des trésors du Bénin +http://www.lemonde.fr/afrique/article/2017/08/31/l-incendie-au-musee-d-abomey-relance-le-debat-sur-la-conservation-des-tresors-du-benin_5179074_3212.html|creationTime|2017-08-31T13:14:53Z +https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e|creationDate|2018-08-14 +https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e|tag|http://www.semanlink.net/tag/meaning_in_nlp +https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e|title|Learning Meaning in Natural Language Processing - The Semantics Mega-Thread +https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e|creationTime|2018-08-14T22:02:18Z +http://en.wikipedia.org/wiki/Myxococcus_xanthus|creationDate|2011-04-04 +http://en.wikipedia.org/wiki/Myxococcus_xanthus|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://en.wikipedia.org/wiki/Myxococcus_xanthus|tag|http://www.semanlink.net/tag/bacteries +http://en.wikipedia.org/wiki/Myxococcus_xanthus|comment|A swarm of M. xanthus is a distributed system: a population of millions of identical entities that communicate among themselves in a non-centralized fashion, thus behaving as a single entity. +http://en.wikipedia.org/wiki/Myxococcus_xanthus|title|Myxococcus xanthus - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Myxococcus_xanthus|creationTime|2011-04-04T15:11:54Z +http://tech.groups.yahoo.com/group/jena-dev/message/35867|creationDate|2009-06-26 +http://tech.groups.yahoo.com/group/jena-dev/message/35867|tag|http://www.semanlink.net/tag/jena_dev +http://tech.groups.yahoo.com/group/jena-dev/message/35867|tag|http://www.semanlink.net/tag/jena_and_database +http://tech.groups.yahoo.com/group/jena-dev/message/35867|tag|http://www.semanlink.net/tag/joseki +http://tech.groups.yahoo.com/group/jena-dev/message/35867|title|jena-dev : Message: Joseki requirements +http://tech.groups.yahoo.com/group/jena-dev/message/35867|creationTime|2009-06-26T12:40:23Z +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|creationDate|2017-12-07 +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|tag|http://www.semanlink.net/tag/nuclear_power_no_thanks +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|tag|http://www.semanlink.net/tag/flippant +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|tag|http://www.semanlink.net/tag/terrorisme +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|tag|http://www.semanlink.net/tag/documentaire_tv +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|title|Sécurité nucléaire : le grand mensonge ARTE Cinema +http://cinema.arte.tv/fr/securite-nucleaire-le-grand-mensonge|creationTime|2017-12-07T00:10:57Z +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|creationDate|2008-11-12 +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|tag|http://www.semanlink.net/tag/mike_bergman +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|tag|http://www.semanlink.net/tag/umbel +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|tag|http://www.semanlink.net/tag/dbpedia +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|title|The Semantic Puzzle DBpedia, UMBEL & the Future Web’s Ecology - interview with Mike Bergman & Sören Auer +http://blog.semantic-web.at/2008/11/10/umbel-dbpedia-futureweb-ecology-interview/|creationTime|2008-11-12T15:28:32Z +https://www.pnas.org/content/early/2019/03/27/1817407116|creationDate|2019-04-02 +https://www.pnas.org/content/early/2019/03/27/1817407116|tag|http://www.semanlink.net/tag/tanis_kt +https://www.pnas.org/content/early/2019/03/27/1817407116|title|A seismically induced onshore surge deposit at the KPg boundary, North Dakota PNAS +https://www.pnas.org/content/early/2019/03/27/1817407116|creationTime|2019-04-02T20:44:12Z +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|creationDate|2018-12-30 +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|tag|http://www.semanlink.net/tag/antiquite_romaine +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|tag|http://www.semanlink.net/tag/francois_chollet +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|tag|http://www.semanlink.net/tag/archeologie +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|title|The memories around us – François Chollet – Medium +https://medium.com/@francois.chollet/the-memories-around-us-a3ba7d3a66a2|creationTime|2018-12-30T14:27:14Z +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|creationDate|2007-11-17 +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|tag|http://www.semanlink.net/tag/uri_reference +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|tag|http://www.semanlink.net/tag/semanlink_todo +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|tag|http://www.semanlink.net/tag/rdf +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|comment|"Because of the risk of confusion between RDF URI references that would be equivalent if derefenced, the use of %-escaped characters in RDF URI references is strongly discouraged.
+J'ai l'impression d'être à l'ouest dans semanlink" +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|title|"Definition of ""URI References"" in RDF (Concepts and Abstract Syntax)" +http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref|creationTime|2007-11-17T16:14:11Z +http://www.ihes.fr/~lafforgue/dem/courriel.html|creationDate|2005-11-25 +http://www.ihes.fr/~lafforgue/dem/courriel.html|tag|http://www.semanlink.net/tag/laurent_lafforgue +http://www.ihes.fr/~lafforgue/dem/courriel.html|tag|http://www.semanlink.net/tag/education +http://www.ihes.fr/~lafforgue/dem/courriel.html|title|Pourquoi j'ai démissionné du Haut Conseil de l'Education - Laurent Lafforgue +http://www.futura-sciences.com/magazines/terre/infos/actu/d/paleontologie-dinosaures-ont-disparu-mammiferes-nen-menaient-pas-large-63250/|creationDate|2016-06-25 +http://www.futura-sciences.com/magazines/terre/infos/actu/d/paleontologie-dinosaures-ont-disparu-mammiferes-nen-menaient-pas-large-63250/|tag|http://www.semanlink.net/tag/dinosaures +http://www.futura-sciences.com/magazines/terre/infos/actu/d/paleontologie-dinosaures-ont-disparu-mammiferes-nen-menaient-pas-large-63250/|title|Quand les dinosaures ont disparu, les mammifères n'en menaient pas large +http://www.futura-sciences.com/magazines/terre/infos/actu/d/paleontologie-dinosaures-ont-disparu-mammiferes-nen-menaient-pas-large-63250/|creationTime|2016-06-25T15:50:21Z +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|creationDate|2013-05-14 +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|tag|http://www.semanlink.net/tag/web_of_needs +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|tag|http://www.semanlink.net/tag/ldow2013 +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|comment|"""Tell what you need, and give it a URI""" +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|title|Building a Web of Needs, Florian Kleedorfer +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-13.pdf|creationTime|2013-05-14T18:37:26Z +http://dig.csail.mit.edu/breadcrumbs/node/194|creationDate|2007-05-19 +http://dig.csail.mit.edu/breadcrumbs/node/194|tag|http://www.semanlink.net/tag/dan_connolly +http://dig.csail.mit.edu/breadcrumbs/node/194|tag|http://www.semanlink.net/tag/grddl +http://dig.csail.mit.edu/breadcrumbs/node/194|tag|http://www.semanlink.net/tag/sparql +http://dig.csail.mit.edu/breadcrumbs/node/194|tag|http://www.semanlink.net/tag/linked_data +http://dig.csail.mit.edu/breadcrumbs/node/194|title|Linked Data at WWW2007: GRDDL, SPARQL, and Wikipedia, oh my! +http://dig.csail.mit.edu/breadcrumbs/node/194|creationTime|2007-05-19T14:27:01Z +http://xmlarmyknife.org/blog/archives/000285.html|creationDate|2006-05-28 +http://xmlarmyknife.org/blog/archives/000285.html|tag|http://www.semanlink.net/tag/grddl +http://xmlarmyknife.org/blog/archives/000285.html|tag|http://www.semanlink.net/tag/sparql +http://xmlarmyknife.org/blog/archives/000285.html|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://xmlarmyknife.org/blog/archives/000285.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://xmlarmyknife.org/blog/archives/000285.html|comment|"Embedded RDF is a method of embedding (a subset of) RDF within XHTML and HTML documents. A simple XSLT transformation can be used to extract the RDF from within the document. +
+A related and more generalised technology is GRDDL which defines how to associate transformation algorithms (i.e. XSLT stylesheets) with XHTML profiles or microformats so that there's a clear mapping from embedded metadata into RDF. +
+I've been experimenting with adding support for both of these technologies in the XMLArmyKnife SPARQL query service. This provides a means to directly query RDF embedded in XHTML documents." +http://xmlarmyknife.org/blog/archives/000285.html|title|XMLArmyKnife - Experimenting with EmbeddedRDF and GRDDL Support +http://www.mkbergman.com/?p=417|creationDate|2008-03-04 +http://www.mkbergman.com/?p=417|tag|http://www.semanlink.net/tag/linked_data +http://www.mkbergman.com/?p=417|tag|http://www.semanlink.net/tag/wikipedia +http://www.mkbergman.com/?p=417|title|99 Wikipedia Sources Aiding the Semantic Web » AI3:::Adaptive Information +http://www.mkbergman.com/?p=417|creationTime|2008-03-04T22:59:21Z +http://riese.joanneum.at/|creationDate|2010-07-16 +http://riese.joanneum.at/|tag|http://www.semanlink.net/tag/linked_data +http://riese.joanneum.at/|tag|http://www.semanlink.net/tag/semantic_statistics +http://riese.joanneum.at/|title|RDFizing and Interlinking the EuroStat Data Set Effort - riese +http://riese.joanneum.at/|creationTime|2010-07-16T15:55:10Z +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|creationDate|2014-12-29 +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|tag|http://www.semanlink.net/tag/edward_snowden +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|tag|http://www.semanlink.net/tag/online_security +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|tag|http://www.semanlink.net/tag/nsa +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|comment|New Snowden documents show that some forms of encryption still cause problems for the NSA. +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|title|Inside the NSA's War on Internet Security - SPIEGEL ONLINE +http://www.spiegel.de/international/germany/inside-the-nsa-s-war-on-internet-security-a-1010361.html|creationTime|2014-12-29T12:28:32Z +https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html|creationDate|2016-05-05 +https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html|tag|http://www.semanlink.net/tag/json_ld_frame +https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html|tag|http://www.semanlink.net/tag/public_linked_json_w3_org +https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html|title|Framing explanation and framing return values from Dave Longley on 2011-08-24 (public-linked-json@w3.org from August 2011) +https://lists.w3.org/Archives/Public/public-linked-json/2011Aug/0078.html|creationTime|2016-05-05T16:34:24Z +https://www.techdirt.com/blog/?tag=monkey+selfie|creationDate|2016-07-29 +https://www.techdirt.com/blog/?tag=monkey+selfie|tag|http://www.semanlink.net/tag/selfie +https://www.techdirt.com/blog/?tag=monkey+selfie|title|Monkey Selfie stories at Techdirt. +https://www.techdirt.com/blog/?tag=monkey+selfie|creationTime|2016-07-29T21:50:26Z +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|creationDate|2013-09-06 +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|tag|http://www.semanlink.net/tag/howto +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|tag|http://www.semanlink.net/tag/nsa +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|title|NSA surveillance: how to stay secure Bruce Schneier theguardian.com +http://www.theguardian.com/world/2013/sep/05/nsa-how-to-remain-secure-surveillance|creationTime|2013-09-06T22:14:06Z +http://www.insu.cnrs.fr/node/5745|creationDate|2016-03-27 +http://www.insu.cnrs.fr/node/5745|tag|http://www.semanlink.net/tag/exomars +http://www.insu.cnrs.fr/node/5745|title|L'Europe est à nouveau à la conquête de Mars +http://www.insu.cnrs.fr/node/5745|creationTime|2016-03-27T13:31:22Z +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|creationDate|2008-12-10 +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|tag|http://www.semanlink.net/tag/semantic_tagging +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|tag|http://www.semanlink.net/tag/wikipedia +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|tag|http://www.semanlink.net/tag/social_bookmarking +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|comment|Faviki is a social bookmarking tool that allows users to annotate the contents of web pages by Wikipedia concepts. +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|title|Case Study: Semantic Tags +http://www.w3.org/2001/sw/sweo/public/UseCases/Faviki/|creationTime|2008-12-10T14:06:17Z +https://deepmind.com/blog/alphago-zero-learning-scratch/|creationDate|2017-10-18 +https://deepmind.com/blog/alphago-zero-learning-scratch/|tag|http://www.semanlink.net/tag/reinforcement_learning +https://deepmind.com/blog/alphago-zero-learning-scratch/|tag|http://www.semanlink.net/tag/alphago +https://deepmind.com/blog/alphago-zero-learning-scratch/|title|AlphaGo Zero: Learning from scratch DeepMind +https://deepmind.com/blog/alphago-zero-learning-scratch/|creationTime|2017-10-18T22:43:19Z +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|creationDate|2018-11-05 +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|tag|http://www.semanlink.net/tag/emnlp_2018 +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|tag|http://www.semanlink.net/tag/slides +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|tag|http://www.semanlink.net/tag/tutorial +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|tag|http://www.semanlink.net/tag/code +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|title|Writing code for Natural language processing Research +https://medium.com/@hadyelsahar/writing-code-for-natural-language-processing-research-emnlp2018-nlproc-a87367cc5146|creationTime|2018-11-05T18:48:58Z +http://www.w3.org/DesignIssues/Principles.html|creationDate|2009-04-14 +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/test_of_independent_invention +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/architecture +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/web +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/principle_of_least_power +http://www.w3.org/DesignIssues/Principles.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/Principles.html|title|-- Axioms of Web architecture +http://www.w3.org/DesignIssues/Principles.html|creationTime|2009-04-14T01:12:12Z +http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/|creationDate|2017-07-05 +http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/|tag|http://www.semanlink.net/tag/startups +http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/|tag|http://www.semanlink.net/tag/complexite +http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/|title|Dans la Silicon Valley, « l’Oracle » français de la complexité tech and berries +http://clesnes.blog.lemonde.fr/2017/06/20/dans-la-silicon-valley-loracle-francais-de-la-complexite/|creationTime|2017-07-05T18:36:07Z +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|creationDate|2016-05-23 +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|tag|http://www.semanlink.net/tag/hittite +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|tag|http://www.semanlink.net/tag/homere +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|tag|http://www.semanlink.net/tag/sea_peoples +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|title|World War Zero brought down mystery civilisation of 'sea people' New Scientist +https://www.newscientist.com/article/2087924-world-war-zero-brought-down-mystery-civilisation-of-sea-people/#.V0HMbfG3hxY.twitter|creationTime|2016-05-23T08:43:29Z +http://www-128.ibm.com/developerworks/web/library/wa-ajaxintro3/?ca=dgr-lnxw01MasterAJAX3|creationDate|2006-02-17 +http://www-128.ibm.com/developerworks/web/library/wa-ajaxintro3/?ca=dgr-lnxw01MasterAJAX3|tag|http://www.semanlink.net/tag/ajax +http://www-128.ibm.com/developerworks/web/library/wa-ajaxintro3/?ca=dgr-lnxw01MasterAJAX3|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/web/library/wa-ajaxintro3/?ca=dgr-lnxw01MasterAJAX3|title|Mastering Ajax, Part 3: Advanced requests and responses in Ajax +https://www.quantamagazine.org/20170404-quantum-physicists-attack-the-riemann-hypothesis/|creationDate|2017-04-20 +https://www.quantamagazine.org/20170404-quantum-physicists-attack-the-riemann-hypothesis/|tag|http://www.semanlink.net/tag/hypothese_de_riemann +https://www.quantamagazine.org/20170404-quantum-physicists-attack-the-riemann-hypothesis/|title|Quantum Physicists Attack the Riemann Hypothesis Quanta Magazine +https://www.quantamagazine.org/20170404-quantum-physicists-attack-the-riemann-hypothesis/|creationTime|2017-04-20T20:13:08Z +http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept|creationDate|2010-12-22 +http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept|tag|http://www.semanlink.net/tag/skos +http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept|title|Describe a topic of a concept - Semantic Overflow +http://www.semanticoverflow.com/questions/830/describe-a-topic-of-a-concept|creationTime|2010-12-22T17:12:37Z +http://news.bbc.co.uk/1/hi/sci/tech/4166076.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/sci/tech/4166076.stm|tag|http://www.semanlink.net/tag/genome +http://news.bbc.co.uk/1/hi/sci/tech/4166076.stm|title|BBC NEWS Ocean bug has 'smallest genome' +http://news.bbc.co.uk/1/hi/sci/tech/4166076.stm|source|BBC +https://fr.wikipedia.org/wiki/Good_Morning_England|creationDate|2017-07-16 +https://fr.wikipedia.org/wiki/Good_Morning_England|tag|http://www.semanlink.net/tag/film +https://fr.wikipedia.org/wiki/Good_Morning_England|tag|http://www.semanlink.net/tag/rock +https://fr.wikipedia.org/wiki/Good_Morning_England|title|Good Morning England (The boat that Rocked) +https://fr.wikipedia.org/wiki/Good_Morning_England|creationTime|2017-07-16T23:09:44Z +http://beckr.org/marbles|creationDate|2008-06-10 +http://beckr.org/marbles|tag|http://www.semanlink.net/tag/rdf_browser +http://beckr.org/marbles|title|Marbles +http://beckr.org/marbles|creationTime|2008-06-10T22:01:39Z +http://doc.carrot2.org/|creationDate|2017-05-23 +http://doc.carrot2.org/|tag|http://www.semanlink.net/tag/carrot2 +http://doc.carrot2.org/|title|Carrot2 manual +http://doc.carrot2.org/|creationTime|2017-05-23T17:42:55Z +http://www.youtube.com/watch?v=bIeCF0LjVDw|creationDate|2008-11-14 +http://www.youtube.com/watch?v=bIeCF0LjVDw|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=bIeCF0LjVDw|tag|http://www.semanlink.net/tag/opera_do_malandro +http://www.youtube.com/watch?v=bIeCF0LjVDw|comment|"Memorável cena do filme ""Ópera do Malandro"", em que Elba Ramalho e Cláudia Ohana se enfrentam." +http://www.youtube.com/watch?v=bIeCF0LjVDw|title|"Elba Ramalho e Claudia Ohana em ""O Meu Amor""" +http://www.youtube.com/watch?v=bIeCF0LjVDw|creationTime|2008-11-14T22:39:42Z +http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version|creationDate|2016-11-14 +http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version|tag|http://www.semanlink.net/tag/maven_tips +http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version|title|maven project version inheritance - do I have to specify the parent version? - Stack Overflow +http://stackoverflow.com/questions/10582054/maven-project-version-inheritance-do-i-have-to-specify-the-parent-version|creationTime|2016-11-14T01:36:38Z +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|creationDate|2013-07-15 +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|tag|http://www.semanlink.net/tag/validation +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|tag|http://www.semanlink.net/tag/gmail +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|tag|http://www.semanlink.net/tag/schema_org +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|title|Testing Your Schemas - Schemas in Gmail — Google Developers +https://developers.google.com/gmail/schemas/testing-your-schema#schema_validator|creationTime|2013-07-15T18:52:59Z +https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/|creationDate|2015-03-11 +https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/|tag|http://www.semanlink.net/tag/solr +https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/|tag|http://www.semanlink.net/tag/elasticsearch +https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/|title|ElasticSearch vs Solr +https://www.loggly.com/blog/loggly-chose-elasticsearch-reliable-scalable-log-management/|creationTime|2015-03-11T18:16:05Z +http://fr.openoffice.org/docs/MacOSX_Install_fr_HowTo_OOo2_V1.4.pdf|creationDate|2007-02-13 +http://fr.openoffice.org/docs/MacOSX_Install_fr_HowTo_OOo2_V1.4.pdf|tag|http://www.semanlink.net/tag/openoffice +http://fr.openoffice.org/docs/MacOSX_Install_fr_HowTo_OOo2_V1.4.pdf|title|Installation d'OpenOffice.org 2 (version X11) Mac OS X PowerPC et Mac Intel +http://fr.openoffice.org/docs/MacOSX_Install_fr_HowTo_OOo2_V1.4.pdf|creationTime|2007-02-13T21:26:18Z +http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1|creationDate|2015-01-08 +http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1|tag|http://www.semanlink.net/tag/charlie_hebdo +http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1|tag|http://www.semanlink.net/tag/new_york_times +http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1|title|The Charlie Hebdo Massacre in Paris - NYTimes.com +http://www.nytimes.com/2015/01/08/opinion/the-charlie-hebdo-massacre-in-paris.html?hp&action=click&pgtype=Homepage&module=c-column-top-span-region®ion=c-column-top-span-region&WT.nav=c-column-top-span-region&_r=1|creationTime|2015-01-08T16:07:17Z +http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf|creationDate|2011-08-19 +http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/jersey_rdf +http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/henry_story +http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf|title|Serialising Java Objects to RDF with Jersey (The Sun BabelFish Blog) +http://blogs.oracle.com/bblfish/entry/serialising_java_objects_to_rdf|creationTime|2011-08-19T11:23:39Z +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|creationDate|2017-11-08 +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|tag|http://www.semanlink.net/tag/yoav_goldberg +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|tag|http://www.semanlink.net/tag/word_embedding +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|comment|"> While continuous word embeddings are gaining popularity, current models are based solely on linear contexts. In this work, we generalize the skip-gram model with negative sampling introduced by Mikolov et al. to include arbitrary contexts. + +> Experiments with dependency-based contexts show that they produce markedly different kinds of similarities. +> In particular, the bag-of-words +nature of the contexts in the “original” +SKIPGRAM model yield broad topical similarities, +while the dependency-based contexts yield +more functional similarities of a cohyponym nature. + + + + +" +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|title|Dependency-Based Word Embeddings Omer Levy +https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/|creationTime|2017-11-08T14:07:28Z +http://mactips.info/blog/?p=1867|creationDate|2007-07-07 +http://mactips.info/blog/?p=1867|tag|http://www.semanlink.net/tag/installing_wordpress +http://mactips.info/blog/?p=1867|comment|Fortunately there are comprehensive instructions at MacZealots.com. Unfortunately, the instructions were written in May 2005 for WordPress 1.5, and are slightly dated. There’s also a small area of uncertainty around editing the file called httpd.conf. This entry provides some useful extra tips. +http://mactips.info/blog/?p=1867|title|Prepare Mac OS X for WordPress +http://mactips.info/blog/?p=1867|creationTime|2007-07-07T15:25:31Z +http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf|creationDate|2008-11-03 +http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf|tag|http://www.semanlink.net/tag/paggr +http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf|comment|"paggr is an interactive application that simplifies the organization +and integration of distributed web information. In a few simple steps, users can +create personalized, ""smart data"" portals from a variety of sources and formats +such as RDF, remote SPARQL endpoints, microformats, RSS, Atom, RDFa, or +selected APIs. Data items in paggr can be linked to each other using an intuitive +drag&drop mechanism. Additionally, paggr offers a developer zone where +everyone can collaborate on reusable data widgets via simple web forms and a +SPARQL-based scripting language with templating mechanism." +http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf|title|paggr -Smart Data Portals +http://www.cs.vu.nl/~pmika/swc-2008/paggr-bnowack_paggr_2008_10_01.pdf|creationTime|2008-11-03T10:14:02Z +https://dzone.com/articles/functional-programming-java-8|creationDate|2016-09-30 +https://dzone.com/articles/functional-programming-java-8|tag|http://www.semanlink.net/tag/java_8_lambdas +https://dzone.com/articles/functional-programming-java-8|tag|http://www.semanlink.net/tag/java_8 +https://dzone.com/articles/functional-programming-java-8|tag|http://www.semanlink.net/tag/functional_programming +https://dzone.com/articles/functional-programming-java-8|title|Functional Programming with Java 8 Functions - DZone Java +https://dzone.com/articles/functional-programming-java-8|creationTime|2016-09-30T10:48:03Z +http://www.laserbox.fr/ere-du-laser|creationDate|2012-01-04 +http://www.laserbox.fr/ere-du-laser|tag|http://www.semanlink.net/tag/sylvain +http://www.laserbox.fr/ere-du-laser|tag|http://www.semanlink.net/tag/laser +http://www.laserbox.fr/ere-du-laser|tag|http://www.semanlink.net/tag/enseignement_scientifique +http://www.laserbox.fr/ere-du-laser|title|L’ère du laser Laserbox +http://www.laserbox.fr/ere-du-laser|creationTime|2012-01-04T21:32:59Z +http://linkeddatabook.com/editions/1.0/|creationDate|2012-11-22 +http://linkeddatabook.com/editions/1.0/|tag|http://www.semanlink.net/tag/chris_bizer +http://linkeddatabook.com/editions/1.0/|tag|http://www.semanlink.net/tag/linked_data +http://linkeddatabook.com/editions/1.0/|tag|http://www.semanlink.net/tag/howto +http://linkeddatabook.com/editions/1.0/|tag|http://www.semanlink.net/tag/tom_heath +http://linkeddatabook.com/editions/1.0/|tag|http://www.semanlink.net/tag/data_web +http://linkeddatabook.com/editions/1.0/|comment|"Superseds the paper by C. Bizer, T. Heath and cygri +" +http://linkeddatabook.com/editions/1.0/|title|Linked Data: Evolving the Web into a Global Data Space +http://linkeddatabook.com/editions/1.0/|creationTime|2012-11-22T01:59:17Z +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|creationDate|2018-11-10 +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|tag|http://www.semanlink.net/tag/embeddings +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|tag|http://www.semanlink.net/tag/aws_machine_learning +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|comment|"ex of uses: + +- Collaborative recommendation system +- Multi-label document classification +- Sentence embeddings" +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|title|Introduction to Amazon SageMaker Object2Vec  AWS Machine Learning Blog +https://aws.amazon.com/fr/blogs/machine-learning/introduction-to-amazon-sagemaker-object2vec/|creationTime|2018-11-10T11:53:00Z +http://news.bbc.co.uk/2/hi/business/10124807.stm|creationDate|2010-05-20 +http://news.bbc.co.uk/2/hi/business/10124807.stm|tag|http://www.semanlink.net/tag/allemagne +http://news.bbc.co.uk/2/hi/business/10124807.stm|tag|http://www.semanlink.net/tag/angela_merkel +http://news.bbc.co.uk/2/hi/business/10124807.stm|tag|http://www.semanlink.net/tag/euro_crisis +http://news.bbc.co.uk/2/hi/business/10124807.stm|title|BBC News - Shares hit by German short-selling ban +http://news.bbc.co.uk/2/hi/business/10124807.stm|creationTime|2010-05-20T00:33:38Z +http://news.bbc.co.uk/2/hi/business/10124807.stm|source|BBC +http://aarkangel.wordpress.com/2007/04/15/je-suis-un-chef-noir-%E2%80%93-heart-of-darkness/|creationDate|2007-04-30 +http://aarkangel.wordpress.com/2007/04/15/je-suis-un-chef-noir-%E2%80%93-heart-of-darkness/|tag|http://www.semanlink.net/tag/mission_voulet_chanoine +http://aarkangel.wordpress.com/2007/04/15/je-suis-un-chef-noir-%E2%80%93-heart-of-darkness/|title|Je suis un chef noir – Heart of Darkness +http://aarkangel.wordpress.com/2007/04/15/je-suis-un-chef-noir-%E2%80%93-heart-of-darkness/|creationTime|2007-04-30T22:12:59Z +http://confluence.atlassian.com/display/DOC/Installing+the+Confluence+EAR-WAR+edition|creationDate|2006-09-12 +http://confluence.atlassian.com/display/DOC/Installing+the+Confluence+EAR-WAR+edition|title|Installing the Confluence EAR-WAR edition - Confluence +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|creationDate|2011-12-05 +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|tag|http://www.semanlink.net/tag/greenpeace +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|comment|Ce 5 décembre à l’aube, des militants de Greenpeace se sont introduits dans la centrale nucléaire de Nogent-sur-Seine (Aube) à 95 kilomètres au sud-est de Paris pour porter un message: “Le nucléaire sûr n’existe pas”. +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|title|Nucléaire : au coeur du réacteur – liveblog Greenpeace France +http://www.greenpeace.org/france/fr/campagnes/nucleaire/Nucleaire--au-coeur-du-reacteur--liveblog/|creationTime|2011-12-05T17:45:30Z +https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf|creationDate|2018-11-08 +https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf|tag|http://www.semanlink.net/tag/pandas +https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf|tag|http://www.semanlink.net/tag/cheat_sheet +https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf|title|pandas cheat sheet +https://sgfin.github.io/files/cheatsheets/Python_cheatsheet_pandas.pdf|creationTime|2018-11-08T15:32:07Z +http://rdfa.info/2010/05/27/newsweek-using-rdfa/|creationDate|2010-05-28 +http://rdfa.info/2010/05/27/newsweek-using-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.info/2010/05/27/newsweek-using-rdfa/|title|Newsweek using RDFa +http://rdfa.info/2010/05/27/newsweek-using-rdfa/|creationTime|2010-05-28T18:36:34Z +http://esw.w3.org/topic/SparqlCalendarDemo|creationDate|2006-03-24 +http://esw.w3.org/topic/SparqlCalendarDemo|tag|http://www.semanlink.net/tag/ajax +http://esw.w3.org/topic/SparqlCalendarDemo|tag|http://www.semanlink.net/tag/sparql +http://esw.w3.org/topic/SparqlCalendarDemo|comment|This SPARQL Calendar Demo demonstrates the use of SPARQL queries over AJAX to integrate data from a myriad of native RDF and non-RDF sources. Currently, it finds calendar information from FOAF files, discovers shared interests between people, and suggests events that people with shared interests can attend together. +http://esw.w3.org/topic/SparqlCalendarDemo|title|SparqlCalendarDemo +https://tools.ietf.org/html/draft-kelly-json-hal-06|creationDate|2017-05-15 +https://tools.ietf.org/html/draft-kelly-json-hal-06|tag|http://www.semanlink.net/tag/hal +https://tools.ietf.org/html/draft-kelly-json-hal-06|tag|http://www.semanlink.net/tag/hateoas +https://tools.ietf.org/html/draft-kelly-json-hal-06|comment|The JSON Hypertext Application Language (HAL) is a standard which establishes conventions for expressing hypermedia controls, such as links, with JSON +https://tools.ietf.org/html/draft-kelly-json-hal-06|title|JSON Hypertext Application Language +https://tools.ietf.org/html/draft-kelly-json-hal-06|creationTime|2017-05-15T10:59:28Z +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|creationDate|2010-05-25 +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|tag|http://www.semanlink.net/tag/biodiversite +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|tag|http://www.semanlink.net/tag/agriculture +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|comment|"""La biodiversité ? Il ne s'agit pas que de petits oiseaux, mais aussi du conflit violent et très humain entre la course au profit maximal et le souci de vivre dignement de son travail et de la terre.""" +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|title|Guerre aux paysans, par Hervé Kempf +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|creationTime|2010-05-25T18:18:53Z +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2010/05/25/guerre-aux-paysans-par-herve-kempf_1362750_3232.html|date|2010-05-26 +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|creationDate|2017-04-03 +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|tag|http://www.semanlink.net/tag/artificial_intelligence +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|tag|http://www.semanlink.net/tag/semantic_web +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|comment|some of the semantic knowledge that researchers had to construct manually they can now learn automatically +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|title|A New Look at the Semantic Web September 2016 Communications of the ACM +http://cacm.acm.org/magazines/2016/9/206254-a-new-look-at-the-semantic-web/fulltext|creationTime|2017-04-03T10:15:11Z +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|creationDate|2009-02-22 +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|tag|http://www.semanlink.net/tag/litterature +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|tag|http://www.semanlink.net/tag/jerma +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|title|Anthologie de la littérature orale songhay-zarma par Mme Fatimata Mounkaïla (Ed. L’Harmattan 2008) - Le Republicain-Niger +http://www.republicain-niger.com/Index.asp?affiche=News_Display.asp&articleid=5043&rub=Arts+et+culture|creationTime|2009-02-22T16:15:35Z +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|creationDate|2006-04-03 +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|tag|http://www.semanlink.net/tag/hannibal +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|tag|http://www.semanlink.net/tag/heredia +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|tag|http://www.semanlink.net/tag/poesie +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|comment|"Et là-bas, sous le pont, adossé contre une arche,
+Hannibal écoutait, pensif et triomphant,
+Le piétinement sourd des légions en marche." +http://www.amiens.iufm.fr/amiens/cahier/biblio/Heredia/Rome.htm#IDH_LA_TREBBIA|title|Heredia - Les Trophées - La Trebbia : L'aube d'un jour sinistre a blanchi les hauteur Heredia - Les Trophées - La Trebbia : L'aube d'un jour sinistre a blanchi les hauteurs +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|creationDate|2013-11-21 +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|tag|http://www.semanlink.net/tag/mutualisme +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|tag|http://www.semanlink.net/tag/arbres +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|tag|http://www.semanlink.net/tag/esclavage +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|tag|http://www.semanlink.net/tag/fourmi +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|title|Comment un arbre mène des fourmis à l’esclavage Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/11/20/comment-un-arbre-mene-des-fourmis-a-lesclavage/|creationTime|2013-11-21T02:15:43Z +http://bolossdesbelleslettres.tumblr.com/|creationDate|2015-03-24 +http://bolossdesbelleslettres.tumblr.com/|tag|http://www.semanlink.net/tag/madame_bovary +http://bolossdesbelleslettres.tumblr.com/|tag|http://www.semanlink.net/tag/rigolo +http://bolossdesbelleslettres.tumblr.com/|title|Les boloss des Belles Lettres +http://bolossdesbelleslettres.tumblr.com/|creationTime|2015-03-24T09:38:08Z +http://species.wikipedia.org/wiki/Main_Page|creationDate|2005-10-11 +http://species.wikipedia.org/wiki/Main_Page|tag|http://www.semanlink.net/tag/biologie +http://species.wikipedia.org/wiki/Main_Page|tag|http://www.semanlink.net/tag/wiki +http://species.wikipedia.org/wiki/Main_Page|comment|Wikispecies is free. Because life is public domain! +http://species.wikipedia.org/wiki/Main_Page|title|Main Page - Wikispecies +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|creationDate|2019-04-18 +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|tag|http://www.semanlink.net/tag/nlp_microsoft +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|tag|http://www.semanlink.net/tag/machine_learned_ranking +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|comment|"> While most learning-to-rank methods learn the ranking function by minimizing the loss functions, it is the ranking measures (such as NDCG and MAP) that are used to evaluate the performance of the learned ranking function. In this work, we reveal the relationship between ranking measures and loss functions in learning-to-rank methods, such as Ranking SVM, RankBoost, RankNet, and ListMLE. + +> we have proved that many pairwise/listwise losses in learning to rank are actually upper bounds of measure-based ranking errors. As a result, the minimization of these loss functions will lead to the maximization of the ranking measures. The key to obtaining this result is to model ranking as a sequence of classification tasks, and define a so-called essential loss as the weighted sum of the classification errors of individual tasks in the sequence. + +> We have also shown a way to improve existing methods +by introducing appropriate weights to their loss functions." +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|title|Ranking Measures and Loss Functions in Learning to Rank (2009) +https://papers.nips.cc/paper/3708-ranking-measures-and-loss-functions-in-learning-to-rank|creationTime|2019-04-18T01:04:13Z +https://www.coursera.org/course/usefulgenetics|creationDate|2013-09-09 +https://www.coursera.org/course/usefulgenetics|tag|http://www.semanlink.net/tag/coursera +https://www.coursera.org/course/usefulgenetics|tag|http://www.semanlink.net/tag/genetique +https://www.coursera.org/course/usefulgenetics|title|Useful Genetics Coursera +https://www.coursera.org/course/usefulgenetics|creationTime|2013-09-09T21:37:49Z +http://semanticweb.com/semtechbiz-berlin-day-2_b26545|creationDate|2012-02-08 +http://semanticweb.com/semtechbiz-berlin-day-2_b26545|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://semanticweb.com/semtechbiz-berlin-day-2_b26545|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semanticweb.com/semtechbiz-berlin-day-2_b26545|title|#SemTechBiz Berlin – Day 2 - semanticweb.com +http://semanticweb.com/semtechbiz-berlin-day-2_b26545|creationTime|2012-02-08T12:50:27Z +http://lifehacker.com/5711409/how-to-search-for-hidden-packaged-and-system-files-in-os-x|creationDate|2013-09-08 +http://lifehacker.com/5711409/how-to-search-for-hidden-packaged-and-system-files-in-os-x|tag|http://www.semanlink.net/tag/mac_os_x_tip +http://lifehacker.com/5711409/how-to-search-for-hidden-packaged-and-system-files-in-os-x|title|How to Search for Hidden, Packaged, and System Files in OS X +http://lifehacker.com/5711409/how-to-search-for-hidden-packaged-and-system-files-in-os-x|creationTime|2013-09-08T14:15:56Z +http://www-128.ibm.com/developerworks/java/library/j-threads3.html|creationDate|2005-10-29 +http://www-128.ibm.com/developerworks/java/library/j-threads3.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www-128.ibm.com/developerworks/java/library/j-threads3.html|title|Threading lightly, Part 3: Sometimes it's best not to share +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|creationDate|2005-05-25 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|tag|http://www.semanlink.net/tag/richesses_sous_marines +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|tag|http://www.semanlink.net/tag/bacteries +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|tag|http://www.semanlink.net/tag/biodiversite +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|comment|A bacterium found 300 metres below sea could be used to fight the superbug MRSA, scientists believe. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/health/4579207.stm|title|BBC NEWS - Deep sea weapon against superbug +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|creationDate|2013-02-28 +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|tag|http://www.semanlink.net/tag/ford +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|tag|http://www.semanlink.net/tag/google +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|tag|http://www.semanlink.net/tag/destination_prediction +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|title|InformationWeek – Software > How is Ford using Google’s prediction engine to build self learning cars +http://m.informationweek.in/software/12-04-09/how_is_ford_using_google_s_prediction_engine_to_build_self_learning_cars.aspx|creationTime|2013-02-28T14:05:43Z +http://www.openlinksw.com/weblog/oerling/?id=1515|creationDate|2009-01-16 +http://www.openlinksw.com/weblog/oerling/?id=1515|tag|http://www.semanlink.net/tag/faceted_search +http://www.openlinksw.com/weblog/oerling/?id=1515|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1515|title|Faceted Search: Unlimited Data in Interactive Time +http://www.openlinksw.com/weblog/oerling/?id=1515|creationTime|2009-01-16T21:50:19Z +http://whc.unesco.org/en/tentativelists/5041/|creationDate|2008-12-11 +http://whc.unesco.org/en/tentativelists/5041/|tag|http://www.semanlink.net/tag/histoire_du_niger +http://whc.unesco.org/en/tentativelists/5041/|tag|http://www.semanlink.net/tag/jermakoye +http://whc.unesco.org/en/tentativelists/5041/|tag|http://www.semanlink.net/tag/dosso +http://whc.unesco.org/en/tentativelists/5041/|comment|Le palais du Chef de province de Dosso a été construit en 1904, peu de temps après l'accession du Zarmakoye Abdou Aoûta au trône. Le règne des Zarrmakoye a débuté au XVème siècle, lorsque Boukar fils de Tagur Gana s'installa dans le Zigui. +http://whc.unesco.org/en/tentativelists/5041/|title|Palais du Zarmakoye de Dosso - UNESCO World Heritage Centre +http://whc.unesco.org/en/tentativelists/5041/|creationTime|2008-12-11T23:20:08Z +http://db.uwaterloo.ca/LDQTut2013/|creationDate|2013-05-18 +http://db.uwaterloo.ca/LDQTut2013/|tag|http://www.semanlink.net/tag/linked_data +http://db.uwaterloo.ca/LDQTut2013/|tag|http://www.semanlink.net/tag/tutorial +http://db.uwaterloo.ca/LDQTut2013/|tag|http://www.semanlink.net/tag/olaf_hartig +http://db.uwaterloo.ca/LDQTut2013/|tag|http://www.semanlink.net/tag/www_2013 +http://db.uwaterloo.ca/LDQTut2013/|title|Tutorial: Linked Data Query Processing +http://db.uwaterloo.ca/LDQTut2013/|creationTime|2013-05-18T22:32:02Z +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|creationDate|2019-01-15 +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|tag|http://www.semanlink.net/tag/gradient_boosting +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|tag|http://www.semanlink.net/tag/adaboost +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|tag|http://www.semanlink.net/tag/boosting +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|title|What is the difference between gradient boosting and adaboost? - Quora +https://www.quora.com/What-is-the-difference-between-gradient-boosting-and-adaboost|creationTime|2019-01-15T11:43:35Z +http://swoogle.umbc.edu/about.php|creationDate|2005-06-15 +http://swoogle.umbc.edu/about.php|tag|http://www.semanlink.net/tag/finding_rdf_documents +http://swoogle.umbc.edu/about.php|tag|http://www.semanlink.net/tag/swoogle +http://swoogle.umbc.edu/about.php|tag|http://www.semanlink.net/tag/metadata_indexing +http://swoogle.umbc.edu/about.php|comment|"Swoogle is a crawler-based indexing and retrieval system for the Semantic Web -- +RDF and OWL documents encoded in XML or N3. Swoogle extracts metadata for each discovered document, and computes relations among them. Discovered documents are also indexed by an information retrieval system which can use either character N-Gram or URIrefs as keywords to find relevant documents and to compute the similarity among a set of documents. One of the interesting properties we compute is ontology rank, a measure of the importance of a Semantic Web document." +https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl|creationDate|2016-01-16 +https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl|tag|http://www.semanlink.net/tag/bitcoin +https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl|comment|Mike Hearn +https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl|title|The resolution of the Bitcoin experiment — Medium +https://medium.com/@octskyward/the-resolution-of-the-bitcoin-experiment-dabb30201f7#.fluiz2ocl|creationTime|2016-01-16T00:36:11Z +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|creationDate|2018-07-26 +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|tag|http://www.semanlink.net/tag/nlp_conference +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|comment|"> Two themes were most prominent for me at #ACL2018: +> 1. Understanding representations. +> 2. Evaluating models in more challenging settings. + +> Deep Learning has not changed our understanding of language. Its main contribution in this regard is to demonstrate that a neural network aka a computational model can perform certain NLP tasks, which shows that these tasks are not indicators of intelligence"" + +" +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|title|ACL 2018 Highlights: Understanding Representations and Evaluation in More Challenging Settings - AYLIEN +http://blog.aylien.com/acl-2018-highlights-understanding-representations-and-evaluation-in-more-challenging-settings/|creationTime|2018-07-26T16:49:55Z +http://www.w3.org/2007/02/turtle/primer/|creationDate|2008-03-18 +http://www.w3.org/2007/02/turtle/primer/|tag|http://www.semanlink.net/tag/rdf +http://www.w3.org/2007/02/turtle/primer/|tag|http://www.semanlink.net/tag/turtle +http://www.w3.org/2007/02/turtle/primer/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/2007/02/turtle/primer/|title|RDF Primer — Turtle version +http://www.w3.org/2007/02/turtle/primer/|creationTime|2008-03-18T16:07:41Z +http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php|creationDate|2014-12-20 +http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php|tag|http://www.semanlink.net/tag/rosetta +http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php|tag|http://www.semanlink.net/tag/philae +http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php|title|GP - De nouvelles informations sur Philae et Rosetta - CNES +http://www.cnes.fr/web/CNES-fr/11630-gp-de-nouvelles-informations-sur-philae-et-rosetta.php|creationTime|2014-12-20T10:17:16Z +http://histography.io/|creationDate|2015-10-10 +http://histography.io/|tag|http://www.semanlink.net/tag/timeline +http://histography.io/|tag|http://www.semanlink.net/tag/wikipedia +http://histography.io/|tag|http://www.semanlink.net/tag/histoire +http://histography.io/|title|Histography - Timeline of History +http://histography.io/|creationTime|2015-10-10T20:47:50Z +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|creationDate|2010-07-16 +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|tag|http://www.semanlink.net/tag/bbc +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|tag|http://www.semanlink.net/tag/yves_raymond +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|tag|http://www.semanlink.net/tag/triple_store_powered_site +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|title|First BBC microsite powered by a triple-store - DBTune blog +http://blog.dbtune.org/post/2010/07/13/First-BBC-microsite-powered-by-a-triple-store|creationTime|2010-07-16T14:41:57Z +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|creationDate|2015-11-15 +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|tag|http://www.semanlink.net/tag/france +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|tag|http://www.semanlink.net/tag/new_york_times +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|tag|http://www.semanlink.net/tag/attentats_13_11_2015 +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|comment|"France embodies everything religious zealots everywhere hate: enjoyment of life here on earth in a myriad little ways: a fragrant cup of coffee and buttery croissant in the morning, beautiful women in short dresses smiling freely on the street, the smell of warm bread, a bottle of wine shared with friends, a dab of perfume, children paying in the Luxembourg Gardens, the right not to believe in any god, not to worry about calories, to flirt and smoke and enjoy sex outside of marriage, to take vacations, to read any book you want, to go to school for free, to play, to laugh, to argue, to make fun of prelates and politicians alike, to leave worrying about the afterlife to the dead. +No country does life on earth better than the French. +Paris, we love you. We cry for you. You are mourning tonight, and we with you. We know you will laugh again, and sing again, and make love, and heal, because loving life is your essence. The forces of darkness will ebb. They will lose. They always do." +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|title|Attentats : le beau texte du « New York Times » était en fait un… commentaire +http://www.lemonde.fr/les-decodeurs/article/2015/11/15/attentats-le-beau-texte-du-new-york-times-etait-en-fait-un-commentaire_4810332_4355770.html|creationTime|2015-11-15T10:56:59Z +http://www.pbs.org/wgbh/nova/neutrino/missing.html|creationDate|2006-02-17 +http://www.pbs.org/wgbh/nova/neutrino/missing.html|tag|http://www.semanlink.net/tag/pauli +http://www.pbs.org/wgbh/nova/neutrino/missing.html|tag|http://www.semanlink.net/tag/pbs_program +http://www.pbs.org/wgbh/nova/neutrino/missing.html|tag|http://www.semanlink.net/tag/neutrino +http://www.pbs.org/wgbh/nova/neutrino/missing.html|comment|"How can you look inside the sun to see how it shines? In the mid-1960s, Ray +Davis and John Bahcall thought they had a way. Drawing on advances made by +other physicists earlier in the century, they intended to use notoriously +elusive particles called neutrinos to verify ideas about the sun's inner +workings. Theorist Bahcall calculated the number of neutrinos they expected to +find, and experimentalist Davis tried to catch them. But for more than three +decades, their results didn't jibe. In the chronology below, follow the case of +the missing neutrinos, which ultimately led not only to a triumph for Davis and +Bahcall but also to a surprising breakthrough in particle +physics." +http://www.pbs.org/wgbh/nova/neutrino/missing.html|title|NOVA The Ghost Particle Case of the Missing Particles PBS +http://www.bbc.co.uk/news/world-africa-18657463|creationDate|2012-06-30 +http://www.bbc.co.uk/news/world-africa-18657463|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.bbc.co.uk/news/world-africa-18657463|tag|http://www.semanlink.net/tag/tombouctou +http://www.bbc.co.uk/news/world-africa-18657463|tag|http://www.semanlink.net/tag/abrutis +http://www.bbc.co.uk/news/world-africa-18657463|title|BBC News - Timbuktu shrines damaged by Mali Ansar Dine Islamists +http://www.bbc.co.uk/news/world-africa-18657463|creationTime|2012-06-30T14:54:22Z +http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0|creationDate|2015-10-13 +http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0|tag|http://www.semanlink.net/tag/pauvrete +http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0|comment|The most important thing going on in the world today is something we almost never cover: a rapid decline in poverty, illiteracy and disease. +http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0|title|The Most Important Thing, and It’s Almost a Secret - The New York Times +http://www.nytimes.com/2015/10/01/opinion/nicholas-kristof-the-most-important-thing-and-its-almost-a-secret.html?_r=0|creationTime|2015-10-13T10:09:33Z +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|creationDate|2018-06-28 +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|tag|http://www.semanlink.net/tag/soleil +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|tag|http://www.semanlink.net/tag/missions_spatiales +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|comment|la sonde Parker Solar Probe partira de Floride cet été pour s’avancer au plus près de l’étoile +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|title|Un ticket pour le Soleil CNRS Le journal +https://lejournal.cnrs.fr/articles/un-ticket-pour-le-soleil?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530126148|creationTime|2018-06-28T01:30:27Z +http://drupal.org/node/560326|creationDate|2011-09-16 +http://drupal.org/node/560326|tag|http://www.semanlink.net/tag/drupal_rdf +http://drupal.org/node/560326|tag|http://www.semanlink.net/tag/skos +http://drupal.org/node/560326|title|RDF export/import should be W3C standard SKOS - here is the patch drupal.org +http://drupal.org/node/560326|creationTime|2011-09-16T18:11:02Z +http://danakil.ethiopia.free.fr/dallol.htm|creationDate|2008-11-21 +http://danakil.ethiopia.free.fr/dallol.htm|tag|http://www.semanlink.net/tag/dallol +http://danakil.ethiopia.free.fr/dallol.htm|title|Le Dallol (photos) +http://danakil.ethiopia.free.fr/dallol.htm|creationTime|2008-11-21T23:23:43Z +http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives|creationDate|2014-06-23 +http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives|tag|http://www.semanlink.net/tag/yves_raymond +http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives|tag|http://www.semanlink.net/tag/bbc +http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives|title|COMMA - shining a light into the archives - Blog - BBC R&D +http://www.bbc.co.uk/rd/blog/2014/06/comma-shining-a-light-into-the-archives|creationTime|2014-06-23T11:58:39Z +http://lavue.fr/ce-qui-est-arrive-dans-ce-cinema-a-laisse-tout-le-monde-sans-voix-la-campagne-choc/|creationDate|2014-06-27 +http://lavue.fr/ce-qui-est-arrive-dans-ce-cinema-a-laisse-tout-le-monde-sans-voix-la-campagne-choc/|tag|http://www.semanlink.net/tag/publicite +http://lavue.fr/ce-qui-est-arrive-dans-ce-cinema-a-laisse-tout-le-monde-sans-voix-la-campagne-choc/|title|Ce qui est arrivé dans ce cinéma a laissé tout le monde sans voix, la campagne choc +http://lavue.fr/ce-qui-est-arrive-dans-ce-cinema-a-laisse-tout-le-monde-sans-voix-la-campagne-choc/|creationTime|2014-06-27T01:20:47Z +http://vocab.deri.ie/csp|creationDate|2011-06-29 +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/constraint_satisfaction_problem +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/fadi_badra +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/fps_ontologies +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/osema_2011 +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/fps_paper +http://vocab.deri.ie/csp|tag|http://www.semanlink.net/tag/osema_deri_renault_paper +http://vocab.deri.ie/csp|title|CSP - A vocabulary to represent Constraint Satisfaction Problems. DERI Vocabularies +http://vocab.deri.ie/csp|creationTime|2011-06-29T11:17:13Z +http://www.lespetitescases.net/patrimoine-web-de-donnees|creationDate|2011-09-01 +http://www.lespetitescases.net/patrimoine-web-de-donnees|tag|http://www.semanlink.net/tag/digital_humanities +http://www.lespetitescases.net/patrimoine-web-de-donnees|tag|http://www.semanlink.net/tag/patrimoine +http://www.lespetitescases.net/patrimoine-web-de-donnees|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/patrimoine-web-de-donnees|tag|http://www.semanlink.net/tag/linked_data +http://www.lespetitescases.net/patrimoine-web-de-donnees|title|Patrimoine et Web de données Les petites cases +http://www.lespetitescases.net/patrimoine-web-de-donnees|creationTime|2011-09-01T11:40:01Z +http://schema.org/docs/datamodel.html|creationDate|2011-06-09 +http://schema.org/docs/datamodel.html|tag|http://www.semanlink.net/tag/schema_org +http://schema.org/docs/datamodel.html|tag|http://www.semanlink.net/tag/rdfa_lite +http://schema.org/docs/datamodel.html|comment|"Some good points:
The decision to allow multiple domains and ranges was purely pragmatic. While the computational properties of systems with a single domain and range are easier to understand, in practice, this forces the creation of a lot of artifical types, which are there purely to act as the domain/range of some properties.
+While we would like all the markup we get to follow the schema, in practice, we expect a lot of data that does not. We expect schema.org properties to be used with new types. We also expect that often, where we expect a property value of type Person, Place, Organization or some other subClassOf Thing, we will get a text string. In the spirit of ""some data is better than none"", we will accept this markup and do the best we can." +http://schema.org/docs/datamodel.html|title|schema.org - Data Model / Mapping to RDFa 1.1 +http://schema.org/docs/datamodel.html|creationTime|2011-06-09T14:29:18Z +http://dbpedia.org/docs/|creationDate|2007-04-04 +http://dbpedia.org/docs/|tag|http://www.semanlink.net/tag/dbpedia +http://dbpedia.org/docs/|comment|dbpedia.org is a community effort to extract structured information from Wikipedia and to make this information available on the Web. dbpedia allows you to ask sophisticated queries against Wikipedia and to link other datasets on the Web to Wikipedia data. +http://dbpedia.org/docs/|title|dbpedia.org - Using Wikipedia as a Web Database +http://dbpedia.org/docs/|creationTime|2007-04-04T22:26:03Z +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|creationDate|2009-04-22 +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|tag|http://www.semanlink.net/tag/universite +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|tag|http://www.semanlink.net/tag/e_learning +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|comment|knowledge is a public good and should be fully shared +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|title|Deseret News Universities will be 'irrelevant' by 2020, Y. professor says +http://www.deseretnews.com/article/print/705298649/Universities-will-be-irrelevant-by-2020-Y-professor-says.html|creationTime|2009-04-22T23:24:30Z +http://www.w3.org/DesignIssues/TagLabel.html|creationDate|2007-02-08 +http://www.w3.org/DesignIssues/TagLabel.html|tag|http://www.semanlink.net/tag/tagging +http://www.w3.org/DesignIssues/TagLabel.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/TagLabel.html|tag|http://www.semanlink.net/tag/semanlink_related +http://www.w3.org/DesignIssues/TagLabel.html|comment|"This article discusses how the user interface metaphor of a luggage label cam used to associate metadata from well-defined ontology with tags from a particular context.
+I heard of one online community which was considering making a system to allow one formally to state when one has committed to use a given tag in the same way as another person, or growing mesh of people. That would be a very interesting feature, as it would allow a useful definition to gain growing acceptance, to progressively move from being a private idea to being a group global standard.
+" +http://www.w3.org/DesignIssues/TagLabel.html|title|Using labels to give semantics to tags - Design Issues +http://www.w3.org/DesignIssues/TagLabel.html|creationTime|2007-02-08T21:51:18Z +https://neurovenge.antonomase.fr/|creationDate|2018-11-18 +https://neurovenge.antonomase.fr/|tag|http://www.semanlink.net/tag/artificial_intelligence +https://neurovenge.antonomase.fr/|comment|L’invention des machines inductives et la controverse de l’intelligence artificielle +https://neurovenge.antonomase.fr/|title|The Revenge of Neurons +https://neurovenge.antonomase.fr/|creationTime|2018-11-18T19:11:00Z +https://web.stanford.edu/~jurafsky/slp3/16.pdf|creationDate|2017-11-11 +https://web.stanford.edu/~jurafsky/slp3/16.pdf|tag|http://www.semanlink.net/tag/dan_jurafsky +https://web.stanford.edu/~jurafsky/slp3/16.pdf|tag|http://www.semanlink.net/tag/latent_semantic_analysis +https://web.stanford.edu/~jurafsky/slp3/16.pdf|tag|http://www.semanlink.net/tag/singular_value_decomposition +https://web.stanford.edu/~jurafsky/slp3/16.pdf|tag|http://www.semanlink.net/tag/word_embedding +https://web.stanford.edu/~jurafsky/slp3/16.pdf|comment|"> We will introduce three methods of generating very dense, short vectors: +> 1. using dimensionality reduction methods like SVD, +> 2. using neural nets like the popular skip-gram or CBOW approaches. +> 3. a quite different approach based on neighboring words called Brown clustering. + +" +https://web.stanford.edu/~jurafsky/slp3/16.pdf|title|Semantics with Dense Vectors +https://web.stanford.edu/~jurafsky/slp3/16.pdf|creationTime|2017-11-11T02:39:48Z +http://www.mindcad.com/|creationDate|2006-04-29 +http://www.mindcad.com/|tag|http://www.semanlink.net/tag/outliner +http://www.mindcad.com/|tag|http://www.semanlink.net/tag/thinking_tools +http://www.mindcad.com/|tag|http://www.semanlink.net/tag/mac_os_x +http://www.mindcad.com/|comment|Thinking Tools for OS X +http://www.mindcad.com/|title|MindCad +http://www.w3.org/Submission/CBD/|creationDate|2008-11-25 +http://www.w3.org/Submission/CBD/|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/Submission/CBD/|tag|http://www.semanlink.net/tag/concise_bounded_description +http://www.w3.org/Submission/CBD/|comment|W3C Member Submission 3 June 2005 +http://www.w3.org/Submission/CBD/|title|CBD - Concise Bounded Description +http://www.w3.org/Submission/CBD/|creationTime|2008-11-25T12:20:20Z +https://twitter.com/yuvalpi/status/1057909000551964673|creationDate|2018-11-01 +https://twitter.com/yuvalpi/status/1057909000551964673|tag|http://www.semanlink.net/tag/blackboxnlp_workshop_2018 +https://twitter.com/yuvalpi/status/1057909000551964673|tag|http://www.semanlink.net/tag/yoav_goldberg +https://twitter.com/yuvalpi/status/1057909000551964673|title|Trying to Understand Recurrent Neural Networks for Language Processing (tweets) +https://twitter.com/yuvalpi/status/1057909000551964673|creationTime|2018-11-01T16:58:32Z +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|creationDate|2014-06-27 +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|tag|http://www.semanlink.net/tag/david_cameron +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|tag|http://www.semanlink.net/tag/europe_and_uk +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|tag|http://www.semanlink.net/tag/commission_europeenne +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|tag|http://www.semanlink.net/tag/jean_claude_juncker +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|title|Le combat perdu de David Cameron contre Jean-Claude Juncker +http://www.lemonde.fr/europe/article/2014/06/26/le-combat-perdu-de-m-cameron-contre-m-juncker_4445641_3214.html|creationTime|2014-06-27T00:50:08Z +http://www.agora21.org/unesco/7savoirs/|creationDate|2005-05-27 +http://www.agora21.org/unesco/7savoirs/|tag|http://www.semanlink.net/tag/education +http://www.agora21.org/unesco/7savoirs/|tag|http://www.semanlink.net/tag/edgar_morin +http://www.agora21.org/unesco/7savoirs/|tag|http://www.semanlink.net/tag/jean_paul +http://www.agora21.org/unesco/7savoirs/|title|Edgar Morin : Les sept savoirs nécessaires à l’éducation du futur +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20VII.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20II.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20IV.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20Avant-propos.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20III.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20V.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20VI.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20CHAPITRE%20I.html +http://www.agora21.org/unesco/7savoirs/|seeAlso|http://www.semanlink.net/doc/2005/05/Edgar%20Morin%20-%20Les%20sept%20savoirs%20n%C3%A9cessaires%20%C3%A0%20l%E2%80%99%C3%A9ducation%20du%20futur%20-%20bibliographie.html +http://www.greenpeace.fr/zero-deforestation/index.php|creationDate|2010-06-17 +http://www.greenpeace.fr/zero-deforestation/index.php|tag|http://www.semanlink.net/tag/deforestation +http://www.greenpeace.fr/zero-deforestation/index.php|tag|http://www.semanlink.net/tag/greenpeace +http://www.greenpeace.fr/zero-deforestation/index.php|comment|Greenpeace appelle à « zéro déforestation d'ici 2020 » +http://www.greenpeace.fr/zero-deforestation/index.php|title|Zéro déforestation Mobilisez-vous pour la sauvegarde des forêts et du climat +http://www.greenpeace.fr/zero-deforestation/index.php|creationTime|2010-06-17T01:55:53Z +http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html|creationDate|2015-03-14 +http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html|tag|http://www.semanlink.net/tag/webcomponents +http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html|tag|http://www.semanlink.net/tag/json_ld +http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html|title|Official Google Webmaster Central Blog: Easier website development with Web Components and JSON-LD +http://googlewebmastercentral.blogspot.fr/2015/03/easier-website-development-with-web.html|creationTime|2015-03-14T14:51:45Z +http://www.youtube.com/watch?v=aA-gTNxy1rw|creationDate|2013-05-24 +http://www.youtube.com/watch?v=aA-gTNxy1rw|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/watch?v=aA-gTNxy1rw|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.youtube.com/watch?v=aA-gTNxy1rw|title|Bayes networks: How to use D-separation - illustrative examples - YouTube +http://www.youtube.com/watch?v=aA-gTNxy1rw|creationTime|2013-05-24T03:55:18Z +https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e|creationDate|2018-10-15 +https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e|tag|http://www.semanlink.net/tag/out_of_distribution_detection +https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e|tag|http://www.semanlink.net/tag/random_forest +https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e|title|Outlier Detection with Isolation Forest – Towards Data Science +https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e|creationTime|2018-10-15T13:27:09Z +https://newrepublic.com/article/117429/capital-twenty-first-century-thomas-piketty-reviewed|creationDate|2016-07-28 +https://newrepublic.com/article/117429/capital-twenty-first-century-thomas-piketty-reviewed|tag|http://www.semanlink.net/tag/thomas_piketty +https://newrepublic.com/article/117429/capital-twenty-first-century-thomas-piketty-reviewed|title|'Capital in the Twenty-First Century' by Thomas Piketty, reviewed New Republic +https://newrepublic.com/article/117429/capital-twenty-first-century-thomas-piketty-reviewed|creationTime|2016-07-28T11:43:46Z +http://tempsreel.nouvelobs.com/economie/20160212.OBS4574/blockchain-revolution-technologique-ou-mirage.html|creationDate|2016-02-14 +http://tempsreel.nouvelobs.com/economie/20160212.OBS4574/blockchain-revolution-technologique-ou-mirage.html|tag|http://www.semanlink.net/tag/blockchain +http://tempsreel.nouvelobs.com/economie/20160212.OBS4574/blockchain-revolution-technologique-ou-mirage.html|title|Blockchain : révolution technologique ou mirage ? - L'Obs +http://tempsreel.nouvelobs.com/economie/20160212.OBS4574/blockchain-revolution-technologique-ou-mirage.html|creationTime|2016-02-14T18:41:13Z +https://www.nytimes.com/2017/10/19/world/asia/myanmar-democracy-rohingya.html?_r=0|creationDate|2017-10-20 +https://www.nytimes.com/2017/10/19/world/asia/myanmar-democracy-rohingya.html?_r=0|tag|http://www.semanlink.net/tag/birmanie +https://www.nytimes.com/2017/10/19/world/asia/myanmar-democracy-rohingya.html?_r=0|title|Myanmar, Once a Hope for Democracy, Is Now a Study in How It Fails - The New York Times +https://www.nytimes.com/2017/10/19/world/asia/myanmar-democracy-rohingya.html?_r=0|creationTime|2017-10-20T15:37:22Z +http://ebiquity.umbc.edu/blogger/index.php?p=261|creationDate|2005-06-23 +http://ebiquity.umbc.edu/blogger/index.php?p=261|tag|http://www.semanlink.net/tag/skos +http://ebiquity.umbc.edu/blogger/index.php?p=261|title|EBB: ebiquity blog at UMBC » SKOS: Simple Knowledge Organization System +http://www.heppnetz.de/projects/eclassowl/|creationDate|2013-03-02 +http://www.heppnetz.de/projects/eclassowl/|tag|http://www.semanlink.net/tag/eclassowl +http://www.heppnetz.de/projects/eclassowl/|title|eClassOWL - The Web Ontology for Products and Services +http://www.heppnetz.de/projects/eclassowl/|creationTime|2013-03-02T21:25:27Z +http://www.der-mo.net/ASADO/|creationDate|2006-10-09 +http://www.der-mo.net/ASADO/|tag|http://www.semanlink.net/tag/airbus +http://www.der-mo.net/ASADO/|tag|http://www.semanlink.net/tag/technical_documentation +http://www.der-mo.net/ASADO/|tag|http://www.semanlink.net/tag/graph_visualization +http://www.der-mo.net/ASADO/|comment|"I wrote my bachelor's thesis about ""Projection Techniques for Document Maps"" in the context of this study project. On behalf of the aircraft manufacturer AIRBUS, we researched methodologies and technologies to analyse and structure the huge amount of documentation produced during aircraft construction." +http://www.der-mo.net/ASADO/|title|der-mo.net - Moritz Stefaner - Asado +https://www.lemonde.fr/idees/article/2019/05/06/biodiversite-l-humanite-face-a-ses-responsabilites_5458837_3232.html|creationDate|2019-05-06 +https://www.lemonde.fr/idees/article/2019/05/06/biodiversite-l-humanite-face-a-ses-responsabilites_5458837_3232.html|tag|http://www.semanlink.net/tag/biodiversite +https://www.lemonde.fr/idees/article/2019/05/06/biodiversite-l-humanite-face-a-ses-responsabilites_5458837_3232.html|title|Biodiversité : l’humanité face à ses responsabilités +https://www.lemonde.fr/idees/article/2019/05/06/biodiversite-l-humanite-face-a-ses-responsabilites_5458837_3232.html|creationTime|2019-05-06T16:41:01Z +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|creationDate|2018-07-13 +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|tag|http://www.semanlink.net/tag/propriete_intellectuelle +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|tag|http://www.semanlink.net/tag/ethiopie +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|tag|http://www.semanlink.net/tag/vive_le_capitalisme +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|comment|Une farine traditionnelle éthiopienne, produite depuis 3000 ans, sous le coup d'un brevet hollandais +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|title|En Ethiopie, la guerre du teff aura bien lieu +https://www.lemonde.fr/afrique/article/2018/07/13/en-ethiopie-la-guerre-du-teff-aura-bien-lieu_5331059_3212.html|creationTime|2018-07-13T23:10:06Z +https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be|creationDate|2016-12-27 +https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be|tag|http://www.semanlink.net/tag/hypothese_de_riemann +https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be|title|Visualizing the Riemann zeta function and analytic continuation - YouTube +https://www.youtube.com/watch?v=sD0NjbwqlYw&feature=youtu.be|creationTime|2016-12-27T16:51:47Z +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|creationDate|2016-09-18 +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|tag|http://www.semanlink.net/tag/politique +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|tag|http://www.semanlink.net/tag/medias +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|tag|http://www.semanlink.net/tag/post_verite +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|tag|http://www.semanlink.net/tag/verite +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|comment|« Est-ce que la vérité compte encore ? » +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|title|Les médias dans l’ère « de la politique post-vérité » +http://www.lemonde.fr/big-browser/article/2016/07/12/les-medias-dans-l-ere-de-la-politique-post-verite_4968559_4832693.html|creationTime|2016-09-18T11:25:32Z +http://publicdata.eu/|creationDate|2012-10-23 +http://publicdata.eu/|tag|http://www.semanlink.net/tag/public_data +http://publicdata.eu/|tag|http://www.semanlink.net/tag/europe +http://publicdata.eu/|tag|http://www.semanlink.net/tag/ckan +http://publicdata.eu/|title|PublicData.eu - Europe's Public Data +http://publicdata.eu/|creationTime|2012-10-23T01:05:03Z +http://emmeesse.wordpress.com/2006/09/29/folksonomies-e-tagging3/|creationDate|2007-04-03 +http://emmeesse.wordpress.com/2006/09/29/folksonomies-e-tagging3/|tag|http://www.semanlink.net/tag/linkto_semanlink +http://emmeesse.wordpress.com/2006/09/29/folksonomies-e-tagging3/|title|Folksonomies e tagging/3 « emmeesse +http://emmeesse.wordpress.com/2006/09/29/folksonomies-e-tagging3/|creationTime|2007-04-03T23:33:00Z +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|creationDate|2017-09-10 +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|tag|http://www.semanlink.net/tag/good +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|tag|http://www.semanlink.net/tag/word2vec +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|tag|http://www.semanlink.net/tag/tutorial +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|comment|skip-gram +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|title|Word2Vec Tutorial - The Skip-Gram Model · Chris McCormick +http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/|creationTime|2017-09-10T17:16:26Z +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|creationDate|2008-01-25 +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|tag|http://www.semanlink.net/tag/genome +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|tag|http://www.semanlink.net/tag/synthetic_life +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|tag|http://www.semanlink.net/tag/craig_venter +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|title|Scientists Build First Man-Made Genome; Synthetic Life Comes Next +http://www.wired.com/science/discoveries/news/2008/01/synthetic_genome|creationTime|2008-01-25T21:38:25Z +http://morenews.blogspot.com/2008/04/update-from-www2008.html|creationDate|2008-05-16 +http://morenews.blogspot.com/2008/04/update-from-www2008.html|tag|http://www.semanlink.net/tag/www08 +http://morenews.blogspot.com/2008/04/update-from-www2008.html|title|More News: Update from WWW2008 +http://morenews.blogspot.com/2008/04/update-from-www2008.html|creationTime|2008-05-16T15:51:21Z +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|creationDate|2018-05-10 +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|tag|http://www.semanlink.net/tag/graph_based_text_representations +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|tag|http://www.semanlink.net/tag/slides +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|comment|Slides of [tutorial](https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/) +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|relatedDoc|https://www2018.thewebconf.org/program/tutorials-track/tutorial-213/ +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|title|TUTORIAL: Graph-based Text Representations (SLIDES) +http://www.lix.polytechnique.fr/~mvazirg/gow_tutorial_webconf_2018.pdf|creationTime|2018-05-10T14:02:48Z +http://comm.semanticweb.org/|creationDate|2008-04-23 +http://comm.semanticweb.org/|tag|http://www.semanlink.net/tag/ontologies +http://comm.semanticweb.org/|tag|http://www.semanlink.net/tag/www08 +http://comm.semanticweb.org/|tag|http://www.semanlink.net/tag/multimedia +http://comm.semanticweb.org/|comment|Semantic descriptions of non-textual media available on the web can be used to facilitate retrieval and presentation of media assets and documents containing them. While technologies for multimedia semantic descriptions already exist, there is as yet no formal description of a high quality multimedia ontology that is compatible with existing (semantic) web technologies. We propose COMM - A Core Ontology for Multimedia based on both the MPEG-7 standard and the DOLCE foundational ontology. +http://comm.semanticweb.org/|title|COMM: Core Ontology on Multimedia +http://comm.semanticweb.org/|creationTime|2008-04-23T14:16:37Z +https://apps.facebook.com/friendsmusicquizz/|creationDate|2014-03-13 +https://apps.facebook.com/friendsmusicquizz/|tag|http://www.semanlink.net/tag/jeux_en_ligne +https://apps.facebook.com/friendsmusicquizz/|tag|http://www.semanlink.net/tag/alexandre_passant +https://apps.facebook.com/friendsmusicquizz/|tag|http://www.semanlink.net/tag/musique +https://apps.facebook.com/friendsmusicquizz/|tag|http://www.semanlink.net/tag/quizz +https://apps.facebook.com/friendsmusicquizz/|title|Friends music quizz, social music game +https://apps.facebook.com/friendsmusicquizz/|creationTime|2014-03-13T12:36:00Z +http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html|creationDate|2013-12-04 +http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html|tag|http://www.semanlink.net/tag/ssl +http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html|tag|http://www.semanlink.net/tag/nsa +http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html|title|A Few Thoughts on Cryptographic Engineering: How does the NSA break SSL? +http://blog.cryptographyengineering.com/2013/12/how-does-nsa-break-ssl.html|creationTime|2013-12-04T13:59:46Z +https://github.com/tatsy/markdown-it-imsize/issues/5|creationDate|2017-04-02 +https://github.com/tatsy/markdown-it-imsize/issues/5|tag|http://www.semanlink.net/tag/markdown_ittt +https://github.com/tatsy/markdown-it-imsize/issues/5|comment|RequireJS +https://github.com/tatsy/markdown-it-imsize/issues/5|title|Using this plugin in browser? · Issue #5 · tatsy/markdown-it-imsize +https://github.com/tatsy/markdown-it-imsize/issues/5|creationTime|2017-04-02T15:26:02Z +http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics|creationDate|2011-01-14 +http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics|tag|http://www.semanlink.net/tag/semantics +http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics|comment|"Various ways that one can specify semantics for a representation or modelling formalism, by Peter F. Patel-Schneider +" +http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics|title|Different Kinds Of Semantics +http://www.w3.org/2007/OWL/wiki/Different_Kinds_Of_Semantics|creationTime|2011-01-14T15:53:13Z +http://www.nytimes.com/2010/11/29/world/29cables.html|creationDate|2010-11-29 +http://www.nytimes.com/2010/11/29/world/29cables.html|tag|http://www.semanlink.net/tag/wikileaks +http://www.nytimes.com/2010/11/29/world/29cables.html|title|WikiLeaks Archive — Cables Uncloak U.S. Diplomacy - NYTimes.com +http://www.nytimes.com/2010/11/29/world/29cables.html|creationTime|2010-11-29T09:08:57Z +http://www.djerma.nl/|creationDate|2007-05-03 +http://www.djerma.nl/|tag|http://www.semanlink.net/tag/jerma +http://www.djerma.nl/|title|The Zarma website, the site about the Zarma (Djerma) language and culture. +http://www.djerma.nl/|creationTime|2007-05-03T01:11:42Z +http://lists.w3.org/Archives/Public/public-esw-thes/2005Jun/0043|creationDate|2007-01-03 +http://lists.w3.org/Archives/Public/public-esw-thes/2005Jun/0043|tag|http://www.semanlink.net/tag/httprange_14_solution +http://lists.w3.org/Archives/Public/public-esw-thes/2005Jun/0043|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2005Jun/0043|title|HTTP behaviour for SKOS Concepts from Miles, AJ \(Alistair\) on 2005-06-21 (public-esw-thes@w3.org from June 2005) +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|creationDate|2017-05-19 +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|tag|http://www.semanlink.net/tag/topic_modeling +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|tag|http://www.semanlink.net/tag/word2vec +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|title|Using Word2Vec for topic modeling - Stack Overflow +http://stackoverflow.com/questions/32979254/using-word2vec-for-topic-modeling|creationTime|2017-05-19T00:22:06Z +http://developer.apple.com/internet/webcontent/|creationDate|2006-01-20 +http://developer.apple.com/internet/webcontent/|tag|http://www.semanlink.net/tag/apple_developer_connection +http://developer.apple.com/internet/webcontent/|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://developer.apple.com/internet/webcontent/|tag|http://www.semanlink.net/tag/web_dev +http://developer.apple.com/internet/webcontent/|comment|Articles sur la création de sites web et les technos qui vont avec, par Apple +http://developer.apple.com/internet/webcontent/|title|Web Content Articles +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|creationDate|2018-05-30 +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|tag|http://www.semanlink.net/tag/word_embedding +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|tag|http://www.semanlink.net/tag/sentence_embeddings +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|tag|http://www.semanlink.net/tag/survey +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|comment|"Word embeddings SOTA: [ELMo](/tag/elmo) + +Sentence embeddings: While unsupervised representation learning of sentences had been the +norm for quite some time, with simple baselines like averaging word embeddings, a few novel unsupervised and supervised +approaches, as well as multi-task learning schemes, have emerged in late +2017-early 2018. + +" +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|title|The Current Best of Universal Word Embeddings and Sentence Embeddings (2018) +https://medium.com/huggingface/universal-word-sentence-embeddings-ce48ddc8fc3a|creationTime|2018-05-30T22:57:02Z +http://beta.slashdot.org/story/200313|creationDate|2014-04-05 +http://beta.slashdot.org/story/200313|tag|http://www.semanlink.net/tag/mecanique_quantique +http://beta.slashdot.org/story/200313|tag|http://www.semanlink.net/tag/p_np +http://beta.slashdot.org/story/200313|title|P vs. NP Problem Linked To the Quantum Nature of the Universe - Slashdot +http://beta.slashdot.org/story/200313|creationTime|2014-04-05T23:55:46Z +http://www.xml.com/pub/a/2005/11/16/introducing-sparql-querying-semantic-web-tutorial.html|creationDate|2005-11-18 +http://www.xml.com/pub/a/2005/11/16/introducing-sparql-querying-semantic-web-tutorial.html|tag|http://www.semanlink.net/tag/sparql +http://www.xml.com/pub/a/2005/11/16/introducing-sparql-querying-semantic-web-tutorial.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.xml.com/pub/a/2005/11/16/introducing-sparql-querying-semantic-web-tutorial.html|title|XML.com: Introducing SPARQL: Querying the Semantic Web +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|creationDate|2007-01-09 +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|tag|http://www.semanlink.net/tag/chris_bizer +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|tag|http://www.semanlink.net/tag/semantic_web_w3_org +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|tag|http://www.semanlink.net/tag/rdf_data_source +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|title|D2R Server publishing the DBLP Bibliography as Linked Data (Semantic Web grows 10%) from Chris Bizer +http://lists.w3.org/Archives/Public/semantic-web/2006Nov/0008.html|date|2006-11-02 +https://www.lemonde.fr/pixels/article/2019/03/12/tim-berners-lee-il-n-est-pas-trop-tard-pour-changer-le-web_5434682_4408996.html|creationDate|2019-03-12 +https://www.lemonde.fr/pixels/article/2019/03/12/tim-berners-lee-il-n-est-pas-trop-tard-pour-changer-le-web_5434682_4408996.html|tag|http://www.semanlink.net/tag/tim_berners_lee +https://www.lemonde.fr/pixels/article/2019/03/12/tim-berners-lee-il-n-est-pas-trop-tard-pour-changer-le-web_5434682_4408996.html|title|30 ans du Web : « Il n’est pas trop tard pour changer le Web », affirme Tim Berners-Lee +https://www.lemonde.fr/pixels/article/2019/03/12/tim-berners-lee-il-n-est-pas-trop-tard-pour-changer-le-web_5434682_4408996.html|creationTime|2019-03-12T11:49:36Z +http://ebiquity.umbc.edu/project/html/id/59/|creationDate|2005-07-04 +http://ebiquity.umbc.edu/project/html/id/59/|tag|http://www.semanlink.net/tag/owl +http://ebiquity.umbc.edu/project/html/id/59/|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://ebiquity.umbc.edu/project/html/id/59/|title|UMBC eBiquity Project: Bayes OWL +http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/|creationDate|2015-03-21 +http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/|tag|http://www.semanlink.net/tag/javascript +http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/|tag|http://www.semanlink.net/tag/json +http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/|title|Sending Javascript Functions Over JSON Solutoire.com +http://solutoire.com/2008/06/12/sending-javascript-functions-over-json/|creationTime|2015-03-21T19:14:16Z +http://idealliance.org/proceedings/xtech05/papers/02-07-02/|creationDate|2005-09-22 +http://idealliance.org/proceedings/xtech05/papers/02-07-02/|tag|http://www.semanlink.net/tag/reparation_automobile +http://idealliance.org/proceedings/xtech05/papers/02-07-02/|tag|http://www.semanlink.net/tag/ontologies +http://idealliance.org/proceedings/xtech05/papers/02-07-02/|title|How can ontologies help repair your car? +http://flickrvision.com/|creationDate|2007-05-16 +http://flickrvision.com/|tag|http://www.semanlink.net/tag/flickr +http://flickrvision.com/|title|flickrvision (beta) +http://flickrvision.com/|creationTime|2007-05-16T01:08:21Z +https://pdfs.semanticscholar.org/1f65/6b9c686c1e5db2a4d41f1ce7e270965def3e.pdf|creationDate|2017-05-20 +https://pdfs.semanticscholar.org/1f65/6b9c686c1e5db2a4d41f1ce7e270965def3e.pdf|tag|http://www.semanlink.net/tag/topic_models_word_embedding +https://pdfs.semanticscholar.org/1f65/6b9c686c1e5db2a4d41f1ce7e270965def3e.pdf|title|Improving Topic Models with Latent Feature Word Representations (slides) +https://pdfs.semanticscholar.org/1f65/6b9c686c1e5db2a4d41f1ce7e270965def3e.pdf|creationTime|2017-05-20T14:50:46Z +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|creationDate|2013-12-09 +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|tag|http://www.semanlink.net/tag/tutorial +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|tag|http://www.semanlink.net/tag/kids +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|tag|http://www.semanlink.net/tag/programming +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|title|Excite Kids To Code By Focusing Less On Coding - Slashdot +http://developers.slashdot.org/story/13/12/07/210237/excite-kids-to-code-by-focusing-less-on-coding?utm_source=rss1.0mainlinkanon&utm_medium=feed|creationTime|2013-12-09T15:17:16Z +http://www.youtube.com/watch?v=87HhuYZePZs|creationDate|2009-01-15 +http://www.youtube.com/watch?v=87HhuYZePZs|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=87HhuYZePZs|tag|http://www.semanlink.net/tag/niger +http://www.youtube.com/watch?v=87HhuYZePZs|title|Niger +http://www.youtube.com/watch?v=87HhuYZePZs|creationTime|2009-01-15T22:18:02Z +http://dannyayers.com/2006/04/28/triplestores-virtuosity|creationDate|2006-04-30 +http://dannyayers.com/2006/04/28/triplestores-virtuosity|tag|http://www.semanlink.net/tag/tools +http://dannyayers.com/2006/04/28/triplestores-virtuosity|tag|http://www.semanlink.net/tag/triplestore +http://dannyayers.com/2006/04/28/triplestores-virtuosity|comment|"Virtuoso provides:
  • Object-Relational DBMS Engine (ORDBMS like PostgreSQL and DBMS engine like MySQL)
  • +
  • XML Data Management (with support for XQuery, XPath, XSLT, and XML Schema)
  • +
  • RDF Triple Store (or Database) that supports SPARQL (Query Language, Transport Protocol, and XML Results Serialization format)
  • +
  • Service Oriented Architecture
  • +
  • Web Application Server (supports HTTP/WebDAV)
  • +
  • NNTP compliant Discussion Server
" +http://dannyayers.com/2006/04/28/triplestores-virtuosity|title|Triplestores! Virtuosity! Marsupials! +http://www.w3.org/Submission/2006/SUBM-owl11-overview-20061219/|creationDate|2007-09-19 +http://www.w3.org/Submission/2006/SUBM-owl11-overview-20061219/|tag|http://www.semanlink.net/tag/owl_1_1 +http://www.w3.org/Submission/2006/SUBM-owl11-overview-20061219/|title|OWL 1.1 Web Ontology Language Overview +http://www.w3.org/Submission/2006/SUBM-owl11-overview-20061219/|creationTime|2007-09-19T01:07:56Z +http://ascensionsemantica.blogspot.com/2009/09/new-spin-cycle.html|creationDate|2011-01-09 +http://ascensionsemantica.blogspot.com/2009/09/new-spin-cycle.html|tag|http://www.semanlink.net/tag/spin_functions +http://ascensionsemantica.blogspot.com/2009/09/new-spin-cycle.html|title|Ascension Semantica: A New SPIN Cycle +http://ascensionsemantica.blogspot.com/2009/09/new-spin-cycle.html|creationTime|2011-01-09T23:48:19Z +http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/|creationDate|2009-04-22 +http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/|tag|http://www.semanlink.net/tag/cybersurveillance +http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/|title|Comment contourner la cybersurveillance ? - BUG BROTHER - Blog LeMonde.fr +http://bugbrother.blog.lemonde.fr/2009/04/22/comment-contourner-la-cybersurveillance/|creationTime|2009-04-22T18:00:15Z +http://docs.info.apple.com/article.html?artnum=302983-fr|creationDate|2006-05-02 +http://docs.info.apple.com/article.html?artnum=302983-fr|tag|http://www.semanlink.net/tag/java_1_5_mac_os_x +http://docs.info.apple.com/article.html?artnum=302983-fr|comment|After installing J2SE 5.0 Release 4, J2SE 5.0 becomes preferred over Java 1.4.2, which will still be installed on your Mac. +http://docs.info.apple.com/article.html?artnum=302983-fr|title|About Java 2 Standard Edition (J2SE) 5.0 Release 4 +http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download|creationDate|2008-11-07 +http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download|tag|http://www.semanlink.net/tag/excel +http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download|tag|http://www.semanlink.net/tag/sparql +http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download|title|Querying Semantic Data from ISWC Wiki (USB Version) - SMWForum +http://smwforum.ontoprise.com/smwforum/index.php/Querying_Semantic_Data_from_ISWC_Wiki_(USB_Version)#Download|creationTime|2008-11-07T13:11:42Z +http://createjs.org/|creationDate|2012-02-07 +http://createjs.org/|tag|http://www.semanlink.net/tag/interactive_knowledge_stack +http://createjs.org/|tag|http://www.semanlink.net/tag/henri_bergius +http://createjs.org/|tag|http://www.semanlink.net/tag/jquery +http://createjs.org/|tag|http://www.semanlink.net/tag/create_js +http://createjs.org/|comment|Henri Bergius +http://createjs.org/|title|Create — A new kind of web editing interface +http://createjs.org/|creationTime|2012-02-07T20:58:12Z +https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/|creationDate|2015-11-16 +https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/|tag|http://www.semanlink.net/tag/rest +https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/|tag|http://www.semanlink.net/tag/oauth +https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/|title|Is OAuth Stateless? Can it work for REST? Loosely Connected +https://looselyconnected.wordpress.com/2010/11/12/is-oauth-stateless-can-it-work-for-rest/|creationTime|2015-11-16T12:11:29Z +https://www.wikitribune.com/|creationDate|2017-04-26 +https://www.wikitribune.com/|tag|http://www.semanlink.net/tag/fact_checking +https://www.wikitribune.com/|tag|http://www.semanlink.net/tag/journal +https://www.wikitribune.com/|title|Wikitribune – Evidence-based journalism +https://www.wikitribune.com/|creationTime|2017-04-26T12:34:07Z +http://instagram.com/mirrorsme|creationDate|2014-07-14 +http://instagram.com/mirrorsme|tag|http://www.semanlink.net/tag/selfie +http://instagram.com/mirrorsme|title|mirrorsme on Instagram +http://instagram.com/mirrorsme|creationTime|2014-07-14T13:11:15Z +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|creationDate|2018-03-20 +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|tag|http://www.semanlink.net/tag/oiseau +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|tag|http://www.semanlink.net/tag/biodiversite +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|tag|http://www.semanlink.net/tag/biodiversite_declin +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|tag|http://www.semanlink.net/tag/agriculture_francaise +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|title|Les oiseaux disparaissent des campagnes françaises à une vitesse « vertigineuse » +http://www.lemonde.fr/biodiversite/article/2018/03/20/les-oiseaux-disparaissent-des-campagnes-francaises-a-une-vitesse-vertigineuse_5273420_1652692.html|creationTime|2018-03-20T08:32:20Z +http://www.nuxeo.com/fr/content/download/4453/147878/file/Nuxeo%20Platform%20in%2015%20Minutes.pdf|creationDate|2012-08-02 +http://www.nuxeo.com/fr/content/download/4453/147878/file/Nuxeo%20Platform%20in%2015%20Minutes.pdf|tag|http://www.semanlink.net/tag/nuxeo +http://www.nuxeo.com/fr/content/download/4453/147878/file/Nuxeo%20Platform%20in%2015%20Minutes.pdf|title|Nuxeo Platform in 15 Minutes +http://www.nuxeo.com/fr/content/download/4453/147878/file/Nuxeo%20Platform%20in%2015%20Minutes.pdf|creationTime|2012-08-02T18:27:44Z +https://link.springer.com/article/10.1007/s10618-015-0430-1|creationDate|2018-01-27 +https://link.springer.com/article/10.1007/s10618-015-0430-1|tag|http://www.semanlink.net/tag/lip6 +https://link.springer.com/article/10.1007/s10618-015-0430-1|tag|http://www.semanlink.net/tag/patrick_gallinari +https://link.springer.com/article/10.1007/s10618-015-0430-1|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://link.springer.com/article/10.1007/s10618-015-0430-1|tag|http://www.semanlink.net/tag/knowledge_graph_completion +https://link.springer.com/article/10.1007/s10618-015-0430-1|comment|"Embedding entities and relations in the knowledge base to low dimensional vector representations and then predict the possible truth of additional facts to extend the knowledge base +" +https://link.springer.com/article/10.1007/s10618-015-0430-1|title|Knowledge base completion by learning pairwise-interaction differentiated embeddings SpringerLink (2015) +https://link.springer.com/article/10.1007/s10618-015-0430-1|creationTime|2018-01-27T13:21:31Z +https://www.youtube.com/watch?v=5FFRoYhTJQQ|creationDate|2017-12-18 +https://www.youtube.com/watch?v=5FFRoYhTJQQ|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=5FFRoYhTJQQ|tag|http://www.semanlink.net/tag/rigolo +https://www.youtube.com/watch?v=5FFRoYhTJQQ|tag|http://www.semanlink.net/tag/speech_recognition +https://www.youtube.com/watch?v=5FFRoYhTJQQ|tag|http://www.semanlink.net/tag/scotland +https://www.youtube.com/watch?v=5FFRoYhTJQQ|title|Burnistoun S1E1 - Voice Recognition Elevator - ELEVEN! - YouTube +https://www.youtube.com/watch?v=5FFRoYhTJQQ|creationTime|2017-12-18T13:50:24Z +http://fr.groups.yahoo.com/group/SemanticCampParis/|creationDate|2008-04-06 +http://fr.groups.yahoo.com/group/SemanticCampParis/|tag|http://www.semanlink.net/tag/semantic_camp_paris +http://fr.groups.yahoo.com/group/SemanticCampParis/|title|Yahoo! Groupes : SemanticCampParis +http://fr.groups.yahoo.com/group/SemanticCampParis/|creationTime|2008-04-06T23:15:57Z +http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/|creationDate|2013-10-05 +http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/|tag|http://www.semanlink.net/tag/google +http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/|tag|http://www.semanlink.net/tag/ibm +http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/|title|Google in Jeopardy: What If IBM's Watson Dethroned the King of Search? Wired Opinion Wired.com +http://www.wired.com/opinion/2013/10/google-in-jeopardy-what-if-watson-beat-the-search-giant/|creationTime|2013-10-05T23:56:29Z +http://blog.schema.org/2014/04/announcing-schemaorg-actions.html|creationDate|2014-09-08 +http://blog.schema.org/2014/04/announcing-schemaorg-actions.html|tag|http://www.semanlink.net/tag/schema_org_actions +http://blog.schema.org/2014/04/announcing-schemaorg-actions.html|title|Schema.org Actions +http://blog.schema.org/2014/04/announcing-schemaorg-actions.html|creationTime|2014-09-08T15:25:50Z +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|creationDate|2014-09-26 +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|tag|http://www.semanlink.net/tag/security +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|tag|http://www.semanlink.net/tag/jersey +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|tag|http://www.semanlink.net/tag/security_and_rest +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|title|Code and tech stuff: Writing REST Services in Java: Part 6 Security & Authorization +http://porterhead.blogspot.fr/2013/01/writing-rest-services-in-java-part-6.html|creationTime|2014-09-26T01:03:17Z +http://virtuoso.openlinksw.com/wiki/main|creationDate|2008-08-26 +http://virtuoso.openlinksw.com/wiki/main|tag|http://www.semanlink.net/tag/virtuoso_open_source_edition +http://virtuoso.openlinksw.com/wiki/main|title|OpenLink Virtuoso: Open-Source Edition +http://virtuoso.openlinksw.com/wiki/main|creationTime|2008-08-26T11:53:56Z +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|creationDate|2015-12-30 +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|tag|http://www.semanlink.net/tag/catastrophe_industrielle +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|tag|http://www.semanlink.net/tag/fire +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|tag|http://www.semanlink.net/tag/inde +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|title|This Hellish Underground Fire Has Burned for 100 Years WIRED +http://www.wired.com/2015/03/johnny-haglund-the-earth-is-on-fire#slide-1|creationTime|2015-12-30T20:27:42Z +http://www.w3.org/DesignIssues/Evolution.html|creationDate|2009-04-14 +http://www.w3.org/DesignIssues/Evolution.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/Evolution.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/Evolution.html|tag|http://www.semanlink.net/tag/test_of_independent_invention +http://www.w3.org/DesignIssues/Evolution.html|title|The Evolution of a specification -- Commentary on Web +http://www.w3.org/DesignIssues/Evolution.html|creationTime|2009-04-14T01:31:12Z +http://news.bbc.co.uk/2/hi/americas/4808342.stm|creationDate|2006-03-18 +http://news.bbc.co.uk/2/hi/americas/4808342.stm|tag|http://www.semanlink.net/tag/insecte +http://news.bbc.co.uk/2/hi/americas/4808342.stm|tag|http://www.semanlink.net/tag/pentagon +http://news.bbc.co.uk/2/hi/americas/4808342.stm|tag|http://www.semanlink.net/tag/darpa +http://news.bbc.co.uk/2/hi/americas/4808342.stm|title|BBC NEWS-Pentagon plans cyber-insect army +http://news.bbc.co.uk/2/hi/americas/4808342.stm|source|BBC +http://tools.wmflabs.org/reasonator/|creationDate|2014-01-26 +http://tools.wmflabs.org/reasonator/|tag|http://www.semanlink.net/tag/wikidata +http://tools.wmflabs.org/reasonator/|comment|Wikidata - in pretty! +http://tools.wmflabs.org/reasonator/|title|Reasonator +http://tools.wmflabs.org/reasonator/|creationTime|2014-01-26T20:56:26Z +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|creationDate|2012-04-11 +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|tag|http://www.semanlink.net/tag/jeni_tennison +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|tag|http://www.semanlink.net/tag/hugh_glaser +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|title|Re: Change Proposal for HttpRange-14 from Jeni Tennison +http://lists.w3.org/Archives/Public/public-lod/2012Mar/0159.html|creationTime|2012-04-11T17:11:59Z +https://twitter.com/radekosmulski/status/1124766298469277696|creationDate|2019-05-05 +https://twitter.com/radekosmulski/status/1124766298469277696|tag|http://www.semanlink.net/tag/sparse_matrix +https://twitter.com/radekosmulski/status/1124766298469277696|tag|http://www.semanlink.net/tag/scikit_learn +https://twitter.com/radekosmulski/status/1124766298469277696|tag|http://www.semanlink.net/tag/fast_ai +https://twitter.com/radekosmulski/status/1124766298469277696|tag|http://www.semanlink.net/tag/python_tips +https://twitter.com/radekosmulski/status/1124766298469277696|title|"Radek Osmulski sur Twitter : ""You would expect a difference in row access times depending on the type of a sparse matrix, but I didn't realize the difference would be so big!" +https://twitter.com/radekosmulski/status/1124766298469277696|creationTime|2019-05-05T10:32:30Z +http://paleodb.org/cgi-bin/bridge.pl|creationDate|2008-05-12 +http://paleodb.org/cgi-bin/bridge.pl|tag|http://www.semanlink.net/tag/paleontologie +http://paleodb.org/cgi-bin/bridge.pl|tag|http://www.semanlink.net/tag/database +http://paleodb.org/cgi-bin/bridge.pl|comment|The Paleobiology Database is an international scientific organization run by paleontological researchers from many institutions. We are bringing together taxonomic and distributional information about the entire fossil record of plants and animals. Our goal is to educate the public, summarize the literature for professionals, and foster statistical analyses of mass extinctions and other aspects of biodiversity. +http://paleodb.org/cgi-bin/bridge.pl|title|The Paleobiology Database +http://paleodb.org/cgi-bin/bridge.pl|creationTime|2008-05-12T09:49:50Z +http://www.flickr.com/photos/hyperfp/508241234/|creationDate|2007-06-27 +http://www.flickr.com/photos/hyperfp/508241234/|tag|http://www.semanlink.net/tag/gado +http://www.flickr.com/photos/hyperfp/508241234/|title|Gado sur Flickr +http://www.flickr.com/photos/hyperfp/508241234/|creationTime|2007-06-27T21:56:56Z +http://blog.samaltman.com/the-merge|creationDate|2017-12-07 +http://blog.samaltman.com/the-merge|tag|http://www.semanlink.net/tag/technological_singularity +http://blog.samaltman.com/the-merge|title|The Merge - Sam Altman +http://blog.samaltman.com/the-merge|creationTime|2017-12-07T18:31:41Z +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|creationDate|2007-04-20 +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|tag|http://www.semanlink.net/tag/web_2_0 +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|tag|http://www.semanlink.net/tag/tagging +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|comment|Diploma Thesis qui cite Semanlink +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|title|ConTag A Tagging System: Linking the Semantic Desktop with Web 2.0 +http://www.dfki.uni-kl.de/~horak/mypubs/ConTag.pdf|creationTime|2007-04-20T20:52:43Z +http://maximilian.developpez.com/mysql/queryCache/|creationDate|2008-11-18 +http://maximilian.developpez.com/mysql/queryCache/|tag|http://www.semanlink.net/tag/cache +http://maximilian.developpez.com/mysql/queryCache/|tag|http://www.semanlink.net/tag/mysql +http://maximilian.developpez.com/mysql/queryCache/|title|Etude pratique du cache de requêtes MySQL +http://maximilian.developpez.com/mysql/queryCache/|creationTime|2008-11-18T18:47:22Z +https://tryolabs.com/blog/2017/12/12/deep-learning-for-nlp-advancements-and-trends-in-2017/|creationDate|2017-12-13 +https://tryolabs.com/blog/2017/12/12/deep-learning-for-nlp-advancements-and-trends-in-2017/|tag|http://www.semanlink.net/tag/deep_nlp +https://tryolabs.com/blog/2017/12/12/deep-learning-for-nlp-advancements-and-trends-in-2017/|title|Deep Learning for NLP, advancements and trends in 2017 - Tryolabs Blog +https://tryolabs.com/blog/2017/12/12/deep-learning-for-nlp-advancements-and-trends-in-2017/|creationTime|2017-12-13T11:22:56Z +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|creationDate|2016-03-30 +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|tag|http://www.semanlink.net/tag/big_brother +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|tag|http://www.semanlink.net/tag/mind_control +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|comment|The new mind control
The internet has spawned subtle forms of influence that can flip elections and manipulate everything we say, think and do +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|title|How the internet flips elections and alters our thoughts Aeon Essays +https://aeon.co/essays/how-the-internet-flips-elections-and-alters-our-thoughts|creationTime|2016-03-30T02:11:36Z +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|creationDate|2015-08-29 +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|tag|http://www.semanlink.net/tag/sigma_js +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|tag|http://www.semanlink.net/tag/angularjs +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|tag|http://www.semanlink.net/tag/github_project +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|title|How to to integrate Linkurious.js into Angular.js +https://github.com/Linkurious/linkurious.js/wiki/How-to-integrate-with-Angular.js|creationTime|2015-08-29T19:10:54Z +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|creationDate|2013-09-14 +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|tag|http://www.semanlink.net/tag/photo_aerienne +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|tag|http://www.semanlink.net/tag/crise_financiere +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|tag|http://www.semanlink.net/tag/urbanisation +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|title|Economic Collapse Seen Through Aerial Photos of Abandoned Mansions Raw File Wired.com +http://www.wired.com/rawfile/2013/09/michael-light-aerial-photos|creationTime|2013-09-14T00:29:55Z +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|creationDate|2014-03-29 +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|tag|http://www.semanlink.net/tag/souvenirs +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|tag|http://www.semanlink.net/tag/musique_du_niger +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|comment|Me rappelle le festival de la jeunesse à Zinder en 1986 +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|title|Marietou - YouTube +https://www.youtube.com/watch?v=WKgyI3wK8Ws&list=PLFDDD39192668FEFF|creationTime|2014-03-29T17:25:54Z +https://sigmoidal.io/boosting-your-solutions-with-nlp/|creationDate|2018-02-18 +https://sigmoidal.io/boosting-your-solutions-with-nlp/|tag|http://www.semanlink.net/tag/nlp +https://sigmoidal.io/boosting-your-solutions-with-nlp/|tag|http://www.semanlink.net/tag/deep_nlp +https://sigmoidal.io/boosting-your-solutions-with-nlp/|title|Natural Language Processing Algorithms (NLP AI) - Sigmoidal +https://sigmoidal.io/boosting-your-solutions-with-nlp/|creationTime|2018-02-18T16:01:24Z +http://developer.apple.com/internet/safari/faq.html#|creationDate|2008-03-18 +http://developer.apple.com/internet/safari/faq.html#|tag|http://www.semanlink.net/tag/faq +http://developer.apple.com/internet/safari/faq.html#|tag|http://www.semanlink.net/tag/safari +http://developer.apple.com/internet/safari/faq.html#|title|Safari Developer FAQ +http://developer.apple.com/internet/safari/faq.html#|creationTime|2008-03-18T08:26:58Z +http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html|creationDate|2013-03-18 +http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html|tag|http://www.semanlink.net/tag/competitivite +http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html|tag|http://www.semanlink.net/tag/culture +http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html|title|La compétitivité est aussi culturelle +http://www.lemonde.fr/economie/article/2013/03/18/la-competitivite-est-aussi-culturelle_1849760_3234.html|creationTime|2013-03-18T16:37:13Z +http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php|creationDate|2017-06-20 +http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php|tag|http://www.semanlink.net/tag/nlp_french +http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php|tag|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php|title|Corpus arboré pour le français / French Treebank +http://www.llf.cnrs.fr/Gens/Abeille/French-Treebank-fr.php|creationTime|2017-06-20T13:48:17Z +http://www.openlinksw.com/blog/~kidehen/?id=1238|creationDate|2007-08-06 +http://www.openlinksw.com/blog/~kidehen/?id=1238|tag|http://www.semanlink.net/tag/openlink_ajax_toolkit_oat +http://www.openlinksw.com/blog/~kidehen/?id=1238|title|OpenLink Ajax Toolkit (OAT) 2.6 Released! +http://www.openlinksw.com/blog/~kidehen/?id=1238|creationTime|2007-08-06T18:50:45Z +http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/|creationDate|2013-04-25 +http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/|tag|http://www.semanlink.net/tag/immigration +http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/|title|La guerre aux migrants a fait 18 000 morts (au moins) BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/04/24/la-guerre-aux-migrants-a-fait-18-000-morts-au-moins/|creationTime|2013-04-25T00:02:03Z +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|creationDate|2011-04-05 +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|tag|http://www.semanlink.net/tag/inegalites +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|tag|http://www.semanlink.net/tag/money +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|comment|The top 1 percent have the best houses, the best educations, the best doctors, and the best lifestyles, but there is one thing that money doesn’t seem to have bought: an understanding that their fate is bound up with how the other 99 percent live. Throughout history, this is something that the top 1 percent eventually do learn. Too late. +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|title|Of the 1%, by the 1%, for the 1% Society Vanity Fair +http://www.vanityfair.com/society/features/2011/05/top-one-percent-201105?currentPage=all|creationTime|2011-04-05T10:25:47Z +http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html|creationDate|2009-09-18 +http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html|tag|http://www.semanlink.net/tag/andy_seaborne +http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html|tag|http://www.semanlink.net/tag/implementing_a_jena_graph +http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html|title|ARQtick: Jena-Mulgara : example of implementing a Jena graph +http://seaborne.blogspot.com/2008/01/jena-mulgara-example-of-implementing.html|creationTime|2009-09-18T19:18:49Z +http://herschel.cea.fr/|creationDate|2009-06-19 +http://herschel.cea.fr/|tag|http://www.semanlink.net/tag/herschel_telescope +http://herschel.cea.fr/|title|HERSCHEL : lumière sur les mondes enfouis de l'Univers (herschel.cea.fr) +http://herschel.cea.fr/|creationTime|2009-06-19T17:08:26Z +https://aip.scitation.org/doi/abs/10.1063/1.5042250|creationDate|2018-10-21 +https://aip.scitation.org/doi/abs/10.1063/1.5042250|tag|http://www.semanlink.net/tag/brains_in_silicon +https://aip.scitation.org/doi/abs/10.1063/1.5042250|tag|http://www.semanlink.net/tag/julie_grollier +https://aip.scitation.org/doi/abs/10.1063/1.5042250|title|Overcoming device unreliability with continuous learning in a population coding based computing system (2018 - Journal of Applied Physics) +https://aip.scitation.org/doi/abs/10.1063/1.5042250|creationTime|2018-10-21T16:42:53Z +https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538|creationDate|2017-07-26 +https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538|title|How to understand Locality Sensitive Hashing? - Stack Overflow +https://stackoverflow.com/questions/12952729/how-to-understand-locality-sensitive-hashing/12967538#12967538|creationTime|2017-07-26T01:35:31Z +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|creationDate|2010-12-27 +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|tag|http://www.semanlink.net/tag/sparql +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|tag|http://www.semanlink.net/tag/semantic_overflow +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|tag|http://www.semanlink.net/tag/topbraid_spin +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|title|SPARQL more than a query language! - Semantic Overflow +http://www.semanticoverflow.com/questions/2518/sparql-more-than-a-query-language|creationTime|2010-12-27T13:35:58Z +https://news.cnrs.fr/articles/bourbaki-and-the-foundations-of-modern-mathematics|creationDate|2019-03-14 +https://news.cnrs.fr/articles/bourbaki-and-the-foundations-of-modern-mathematics|tag|http://www.semanlink.net/tag/bourbaki +https://news.cnrs.fr/articles/bourbaki-and-the-foundations-of-modern-mathematics|title|Bourbaki and the Foundations of Modern Mathematics CNRS News +https://news.cnrs.fr/articles/bourbaki-and-the-foundations-of-modern-mathematics|creationTime|2019-03-14T22:47:32Z +http://neuralnetworksanddeeplearning.com/|creationDate|2017-09-12 +http://neuralnetworksanddeeplearning.com/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://neuralnetworksanddeeplearning.com/|tag|http://www.semanlink.net/tag/deep_learning +http://neuralnetworksanddeeplearning.com/|tag|http://www.semanlink.net/tag/tutorial +http://neuralnetworksanddeeplearning.com/|comment|free online book +http://neuralnetworksanddeeplearning.com/|title|Neural networks and deep learning +http://neuralnetworksanddeeplearning.com/|creationTime|2017-09-12T13:39:15Z +http://ourcodeworld.com/articles/read/359/top-7-best-markdown-editors-javascript-and-jquery-plugins|creationDate|2017-02-12 +http://ourcodeworld.com/articles/read/359/top-7-best-markdown-editors-javascript-and-jquery-plugins|tag|http://www.semanlink.net/tag/markown_javascript +http://ourcodeworld.com/articles/read/359/top-7-best-markdown-editors-javascript-and-jquery-plugins|title|Top 7: Best Markdown editors Javascript and jQuery plugins Our Code World +http://ourcodeworld.com/articles/read/359/top-7-best-markdown-editors-javascript-and-jquery-plugins|creationTime|2017-02-12T18:27:10Z +http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/|creationDate|2018-02-11 +http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/|tag|http://www.semanlink.net/tag/gafa +http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/|title|Démanteler les GAFA ? InternetActu +http://internetactu.blog.lemonde.fr/2018/02/10/demanteler-les-gafa/|creationTime|2018-02-11T01:04:19Z +http://www.wired.com/wired/archive/8.04/joy.html|creationDate|2005-10-19 +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/nanotechnologies +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/technological_singularity +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/dystopia +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/anticipation +http://www.wired.com/wired/archive/8.04/joy.html|tag|http://www.semanlink.net/tag/bill_joy +http://www.wired.com/wired/archive/8.04/joy.html|title|Wired 8.04: Why the future doesn't need us. +http://nadbordrozd.github.io/blog/2017/12/05/what-they-dont-tell-you-about-data-science-1/|creationDate|2017-12-10 +http://nadbordrozd.github.io/blog/2017/12/05/what-they-dont-tell-you-about-data-science-1/|tag|http://www.semanlink.net/tag/data_science +http://nadbordrozd.github.io/blog/2017/12/05/what-they-dont-tell-you-about-data-science-1/|title|What They Don't Tell You About Data Science. 1: You Are a Software Engineer First - DS lore +http://nadbordrozd.github.io/blog/2017/12/05/what-they-dont-tell-you-about-data-science-1/|creationTime|2017-12-10T19:54:07Z +http://en.wikipedia.org/wiki/Bowling_for_Columbine|creationDate|2008-11-04 +http://en.wikipedia.org/wiki/Bowling_for_Columbine|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Bowling_for_Columbine|tag|http://www.semanlink.net/tag/michael_moore +http://en.wikipedia.org/wiki/Bowling_for_Columbine|title|Bowling for Columbine +http://en.wikipedia.org/wiki/Bowling_for_Columbine|creationTime|2008-11-04T00:48:05Z +http://wiredreach.org/|creationDate|2005-09-05 +http://wiredreach.org/|tag|http://www.semanlink.net/tag/content_sharing +http://wiredreach.org/|tag|http://www.semanlink.net/tag/open_source +http://wiredreach.org/|tag|http://www.semanlink.net/tag/peer_to_peer +http://wiredreach.org/|comment|The WiredReach Platform allows users to selectively share content with others in a completely decentralized and secure manner. That means your content does not have to be uploaded to any central servers but rather can be shared right from your desktop or device. +http://wiredreach.org/|title|WiredReach A Universal Content Sharing Platform +http://stackoverflow.com/questions/603765/how-do-i-redirect-from-apache-to-tomcat|creationDate|2012-03-02 +http://stackoverflow.com/questions/603765/how-do-i-redirect-from-apache-to-tomcat|tag|http://www.semanlink.net/tag/integrating_tomcat_with_apache +http://stackoverflow.com/questions/603765/how-do-i-redirect-from-apache-to-tomcat|title|How do I redirect from Apache to Tomcat? +http://stackoverflow.com/questions/603765/how-do-i-redirect-from-apache-to-tomcat|creationTime|2012-03-02T10:36:44Z +http://www.songhay.org/|creationDate|2015-01-17 +http://www.songhay.org/|tag|http://www.semanlink.net/tag/songhai +http://www.songhay.org/|title|SONGHAY.ORG +http://www.songhay.org/|creationTime|2015-01-17T14:33:31Z +http://scienceblogs.com/startswithabang/2013/01/30/the-solar-storm-of-a-lifetime/|creationDate|2013-02-04 +http://scienceblogs.com/startswithabang/2013/01/30/the-solar-storm-of-a-lifetime/|tag|http://www.semanlink.net/tag/soleil +http://scienceblogs.com/startswithabang/2013/01/30/the-solar-storm-of-a-lifetime/|title|The Solar Storm of a Lifetime – Starts With A Bang +http://scienceblogs.com/startswithabang/2013/01/30/the-solar-storm-of-a-lifetime/|creationTime|2013-02-04T12:10:33Z +http://googleblog.blogspot.co.uk/2012/05/introducing-knowledge-graph-things-not.html|creationDate|2012-05-17 +http://googleblog.blogspot.co.uk/2012/05/introducing-knowledge-graph-things-not.html|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://googleblog.blogspot.co.uk/2012/05/introducing-knowledge-graph-things-not.html|title|Introducing the Knowledge Graph: things, not strings Official Google Blog +http://googleblog.blogspot.co.uk/2012/05/introducing-knowledge-graph-things-not.html|creationTime|2012-05-17T00:24:00Z +http://alignapi.gforge.inria.fr/edoal.html|creationDate|2012-11-10 +http://alignapi.gforge.inria.fr/edoal.html|tag|http://www.semanlink.net/tag/francois_scharffe +http://alignapi.gforge.inria.fr/edoal.html|tag|http://www.semanlink.net/tag/ontology_mapping +http://alignapi.gforge.inria.fr/edoal.html|title|EDOAL: Expressive and Declarative Ontology Alignment Language +http://alignapi.gforge.inria.fr/edoal.html|creationTime|2012-11-10T01:59:08Z +https://www.sciencesetavenir.fr/archeo-paleo/archeologie/decouverte-d-un-rare-cimetiere-d-urnes-funeraires-en-amazonie_127402|creationDate|2018-09-15 +https://www.sciencesetavenir.fr/archeo-paleo/archeologie/decouverte-d-un-rare-cimetiere-d-urnes-funeraires-en-amazonie_127402|tag|http://www.semanlink.net/tag/archeologie_amazonienne +https://www.sciencesetavenir.fr/archeo-paleo/archeologie/decouverte-d-un-rare-cimetiere-d-urnes-funeraires-en-amazonie_127402|title|Découverte d’un rare cimetière d’urnes funéraires en Amazonie +https://www.sciencesetavenir.fr/archeo-paleo/archeologie/decouverte-d-un-rare-cimetiere-d-urnes-funeraires-en-amazonie_127402|creationTime|2018-09-15T16:24:21Z +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|creationDate|2010-07-30 +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|tag|http://www.semanlink.net/tag/election +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|tag|http://www.semanlink.net/tag/linked_data +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|tag|http://www.semanlink.net/tag/government_data_as_linked_data +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|title|Publishing Local Open Data - Important Lessons from the Open Election Data project data.gov.uk +http://data.gov.uk/blog/publishing-local-open-data-important-lessons-open-election-data-project|creationTime|2010-07-30T16:37:44Z +http://www.defectivebydesign.org/|creationDate|2013-04-27 +http://www.defectivebydesign.org/|tag|http://www.semanlink.net/tag/drm +http://www.defectivebydesign.org/|title|We oppose DRM. Defective by Design +http://www.defectivebydesign.org/|creationTime|2013-04-27T20:05:28Z +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|creationDate|2018-08-28 +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|tag|http://www.semanlink.net/tag/embeddings +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|tag|http://www.semanlink.net/tag/chris_manning +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|tag|http://www.semanlink.net/tag/meaning_in_nlp +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|tag|http://www.semanlink.net/tag/slides +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|comment|"[YouTube](/doc/?uri=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DnFCxTtBqF5U) +" +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|relatedDoc|https://www.youtube.com/watch?v=nFCxTtBqF5U +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|title|Representations for Language: From Word Embeddings to Sentence Meanings (2017) - Slides +https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf|creationTime|2018-08-28T10:35:07Z +http://dret.net/glossary/grddl|creationDate|2005-12-16 +http://dret.net/glossary/grddl|tag|http://www.semanlink.net/tag/grddl +http://dret.net/glossary/grddl|title|Definition: GRDDL (Gleaning Resource Descriptions from Dialects of Languages) [Web and XML Glossary] +http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html|creationDate|2015-04-01 +http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html|tag|http://www.semanlink.net/tag/fukushima +http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html|comment|" +" +http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html|title|A la recherche des cœurs perdus des réacteurs nucléaires de Fukushima +http://www.lemonde.fr/planete/article/2015/04/01/a-la-recherche-des-c-urs-perdus-des-reacteurs-nucleaires-de-fukushima_4607166_3244.html|creationTime|2015-04-01T21:23:38Z +https://amp.theguardian.com/commentisfree/2018/mar/12/climate-change-is-a-disaster-foretold-just-like-the-first-world-war?CMP=share_btn_tw&__twitter_impression=true|creationDate|2018-03-12 +https://amp.theguardian.com/commentisfree/2018/mar/12/climate-change-is-a-disaster-foretold-just-like-the-first-world-war?CMP=share_btn_tw&__twitter_impression=true|tag|http://www.semanlink.net/tag/rechauffement_climatique +https://amp.theguardian.com/commentisfree/2018/mar/12/climate-change-is-a-disaster-foretold-just-like-the-first-world-war?CMP=share_btn_tw&__twitter_impression=true|title|Climate change is a disaster foretold, just like the first world war Jeff Sparrow Opinion The Guardian +https://amp.theguardian.com/commentisfree/2018/mar/12/climate-change-is-a-disaster-foretold-just-like-the-first-world-war?CMP=share_btn_tw&__twitter_impression=true|creationTime|2018-03-12T13:03:43Z +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|creationDate|2007-09-11 +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|tag|http://www.semanlink.net/tag/restaurant +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|tag|http://www.semanlink.net/tag/gastronomie +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|tag|http://www.semanlink.net/tag/ofir +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|tag|http://www.semanlink.net/tag/para +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|title|Revista Nosso Pará - On line +http://www.revistanossopara.com.br/conteudo.php?edicao=7&indice=71|creationTime|2007-09-11T21:26:37Z +http://contrecourant.france2.fr/article.php3?id_article=169|creationDate|2006-05-17 +http://contrecourant.france2.fr/article.php3?id_article=169|tag|http://www.semanlink.net/tag/origines_du_sida +http://contrecourant.france2.fr/article.php3?id_article=169|tag|http://www.semanlink.net/tag/chimpanze +http://contrecourant.france2.fr/article.php3?id_article=169|comment|"Documentaire accusant un vaccin contre la polio, fabriqué avec des reins de chimpanzés et utilisé au Congo, d'être à l'origine de la transmission du sida à l'homme. Le film est effrayant - ce qui ne veut pas dire qu'il ait raison. +" +http://contrecourant.france2.fr/article.php3?id_article=169|title|Les origines du Sida +https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth|creationDate|2016-09-18 +https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth|tag|http://www.semanlink.net/tag/verite +https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth|tag|http://www.semanlink.net/tag/post_verite +https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth|title|How technology disrupted the truth Katharine Viner Media The Guardian +https://www.theguardian.com/media/2016/jul/12/how-technology-disrupted-the-truth|creationTime|2016-09-18T11:24:14Z +http://requirejs.org/|creationDate|2013-01-02 +http://requirejs.org/|tag|http://www.semanlink.net/tag/javascript_librairies +http://requirejs.org/|comment|JavaScript file and module loader +http://requirejs.org/|title|RequireJS +http://requirejs.org/|creationTime|2013-01-02T10:52:18Z +http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained|creationDate|2013-04-24 +http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained|tag|http://www.semanlink.net/tag/python +http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained|title|iterator - The Python yield keyword explained - Stack Overflow +http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained|creationTime|2013-04-24T00:44:49Z +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|tag|http://www.semanlink.net/tag/ulmfit +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|comment|> Our first idea was to combine a structured data model with the text model from fast.ai. Later, when thinking about Jeremy Howard’s “Introduction of Language Modeling”2 in the 2018 course, we remembered his example of generating technical abstracts for papers. He had special flags that indicated the two sections of the abstract, , which indicated the category and , which was the text of the abstract. We realized that you might be able to pass the model information in a similar fashion +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|title|Introducing Metadata Enhanced ULMFiT Novetta Nexus +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|bookmarkOf|https://www.novetta.com/2019/03/introducing_me_ulmfit/ +http://www.semanlink.net/doc/2019/05/introducing_metadata_enhanced_u|creationTime|2019-05-20T18:31:37Z +https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers|creationDate|2017-04-11 +https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers|tag|http://www.semanlink.net/tag/http_cache +https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers|comment|Without the cache-control header set, no other caching headers will yield any results. +https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers|title|Increasing Application Performance with HTTP Cache Headers Heroku Dev Center +https://devcenter.heroku.com/articles/increasing-application-performance-with-http-cache-headers|creationTime|2017-04-11T18:57:21Z +http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/|creationDate|2011-06-08 +http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/|tag|http://www.semanlink.net/tag/html5 +http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/|tag|http://www.semanlink.net/tag/microdata +http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/|title|Microdata: HTML5’s Best-Kept Secret Webmonkey  Wired.com +http://www.webmonkey.com/2010/09/microdata-html5s-best-kept-secret/|creationTime|2011-06-08T23:20:55Z +https://github.com/omarsar/nlp_overview|creationDate|2018-11-17 +https://github.com/omarsar/nlp_overview|tag|http://www.semanlink.net/tag/overview +https://github.com/omarsar/nlp_overview|tag|http://www.semanlink.net/tag/nlp_current_state +https://github.com/omarsar/nlp_overview|tag|http://www.semanlink.net/tag/deep_nlp +https://github.com/omarsar/nlp_overview|title|omarsar/nlp_overview: Modern Deep Learning Techniques Applied to Natural Language Processing +https://github.com/omarsar/nlp_overview|creationTime|2018-11-17T15:17:33Z +http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html|creationDate|2013-06-30 +http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html|tag|http://www.semanlink.net/tag/ble +http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html|tag|http://www.semanlink.net/tag/lobby_agroalimentaire +http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html|title|Les variétés paysannes de blé en voie de disparition +http://www.lemonde.fr/planete/article/2013/06/30/les-varietes-paysannes-de-ble-en-voie-de-disparition_3436092_3244.html|creationTime|2013-06-30T21:56:04Z +https://solid.github.io/dweb-summit-2018/#decentralized-data|creationDate|2018-08-03 +https://solid.github.io/dweb-summit-2018/#decentralized-data|tag|http://www.semanlink.net/tag/slides +https://solid.github.io/dweb-summit-2018/#decentralized-data|tag|http://www.semanlink.net/tag/solid +https://solid.github.io/dweb-summit-2018/#decentralized-data|tag|http://www.semanlink.net/tag/ruben_verborgh +https://solid.github.io/dweb-summit-2018/#decentralized-data|title|Solid: Empowering people through choice +https://solid.github.io/dweb-summit-2018/#decentralized-data|creationTime|2018-08-03T09:00:53Z +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|creationDate|2014-05-26 +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|tag|http://www.semanlink.net/tag/banlieue +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|tag|http://www.semanlink.net/tag/lycee +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|tag|http://www.semanlink.net/tag/9_3 +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|title|Ce jeune prof de Drancy qui voulait « changer le monde et sa classe » Au centre, la banlieue +http://banlieue.blog.lemonde.fr/2014/05/25/ce-jeune-prof-de-drancy-qui-voulait-changer-le-monde-et-sa-classe/|creationTime|2014-05-26T00:04:05Z +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|creationDate|2007-08-21 +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|tag|http://www.semanlink.net/tag/corruption +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|tag|http://www.semanlink.net/tag/france_afrique +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|tag|http://www.semanlink.net/tag/gabon +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|title|Bal lugubre dans les monarchies africaines +http://www.lefigaro.fr/reportage/20070817.FIG000000021_bal_lugubre_dans_les_monarchies_africaines.html|creationTime|2007-08-21T22:19:35Z +http://en.wikipedia.org/wiki/Caral|creationDate|2010-06-29 +http://en.wikipedia.org/wiki/Caral|tag|http://www.semanlink.net/tag/perou +http://en.wikipedia.org/wiki/Caral|tag|http://www.semanlink.net/tag/civilisations_precolombiennes +http://en.wikipedia.org/wiki/Caral|title|Caral +http://en.wikipedia.org/wiki/Caral|creationTime|2010-06-29T22:56:21Z +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|creationDate|2018-03-22 +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|tag|http://www.semanlink.net/tag/topic_modeling +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|tag|http://www.semanlink.net/tag/tagging +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|tag|http://www.semanlink.net/tag/semantic_hashing +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|tag|http://www.semanlink.net/tag/nlp_facebook +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|comment|"Semantic Hashing using Tags and Topic Modeling, to incorporate both the tag information and the similarity information from probabilistic topic modeling. [Comments about the paper](https://sutheeblog.wordpress.com/2016/10/28/paper-reading-semantic-hashing-using-tags-and-topic-modeling-sigir13/). [Code on Github](https://github.com/zhuoxiongzhao/code-for-SHTTM) +" +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|title|Semantic hashing using tags and topic modeling (2013) +https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4|creationTime|2018-03-22T00:41:03Z +http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/|creationDate|2014-10-03 +http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/|tag|http://www.semanlink.net/tag/twitter +http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/|tag|http://www.semanlink.net/tag/ei +http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/|title|Lawfare › How a Blonde Tattooed Texas Girl Became an ISIS Twitter Star +http://www.lawfareblog.com/2014/10/how-a-blonde-tattooed-texas-girl-became-an-isis-twitter-star/|creationTime|2014-10-03T15:23:56Z +http://ceur-ws.org/Vol-717/paper2.pdf|creationDate|2011-12-17 +http://ceur-ws.org/Vol-717/paper2.pdf|tag|http://www.semanlink.net/tag/linked_learning +http://ceur-ws.org/Vol-717/paper2.pdf|comment|"Best paper award at Linked Learning 2011
+In this paper we have presented our research, which focuses on integrating Linked Data results into e-book navigation tools for students in order to enhance social learning. We base our work on user annotations, we process these annotations with Linked Data technologies, we use the produced synthetized results to modify our e-book display; we hope to maximize reading and learning efficiency, to create common environments and implicitly, communication opportunities. + + +" +http://ceur-ws.org/Vol-717/paper2.pdf|title|Using Linked Data to Reduce Learning Latency for e-Book Readers +http://ceur-ws.org/Vol-717/paper2.pdf|creationTime|2011-12-17T12:28:38Z +http://wiki.apache.org/incubator/StanbolProposal|creationDate|2010-12-17 +http://wiki.apache.org/incubator/StanbolProposal|tag|http://www.semanlink.net/tag/apache_stanbol +http://wiki.apache.org/incubator/StanbolProposal|comment|Apache Stanbol is a modular software stack and reusable set of components for semantic content management +http://wiki.apache.org/incubator/StanbolProposal|title|StanbolProposal - Incubator Wiki +http://wiki.apache.org/incubator/StanbolProposal|creationTime|2010-12-17T00:19:37Z +http://dannyayers.com/2006/04/13/jena-user-conference-|creationDate|2006-04-13 +http://dannyayers.com/2006/04/13/jena-user-conference-|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/04/13/jena-user-conference-|tag|http://www.semanlink.net/tag/jena_user_conference +http://dannyayers.com/2006/04/13/jena-user-conference-|title|Jena User Conference - programme up +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb|creationDate|2017-06-28 +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb|tag|http://www.semanlink.net/tag/fasttext +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb|title|gensim/FastText_Tutorial.ipynb +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb|creationTime|2017-06-28T01:07:02Z +http://meta.wikimedia.org/wiki/Wikidata/Development/RDF|creationDate|2013-09-12 +http://meta.wikimedia.org/wiki/Wikidata/Development/RDF|tag|http://www.semanlink.net/tag/wikidata_rdf +http://meta.wikimedia.org/wiki/Wikidata/Development/RDF|title|Wikidata/Development/RDF +http://meta.wikimedia.org/wiki/Wikidata/Development/RDF|creationTime|2013-09-12T00:07:25Z +https://github.com/ansell/aterms|creationDate|2015-04-13 +https://github.com/ansell/aterms|tag|http://www.semanlink.net/tag/aterm +https://github.com/ansell/aterms|tag|http://www.semanlink.net/tag/github_project +https://github.com/ansell/aterms|title|ATerm library on GitHub +https://github.com/ansell/aterms|creationTime|2015-04-13T12:10:32Z +http://stackoverflow.com/questions/3738137/javascript-variable-scope-question?rq=1|creationDate|2012-09-17 +http://stackoverflow.com/questions/3738137/javascript-variable-scope-question?rq=1|tag|http://www.semanlink.net/tag/javascript_closures +http://stackoverflow.com/questions/3738137/javascript-variable-scope-question?rq=1|title|jquery - Javascript variable scope question - Stack Overflow +http://stackoverflow.com/questions/3738137/javascript-variable-scope-question?rq=1|creationTime|2012-09-17T14:50:51Z +http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems|creationDate|2015-03-15 +http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems|tag|http://www.semanlink.net/tag/danny_ayers +http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems|tag|http://www.semanlink.net/tag/private_wiki +http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems|title|Hack Your Life With A Private Wiki Notebook Getting Things Done And Other Systems - WebSeitz/wiki +http://webseitz.fluxent.com/wiki/HackYourLifeWithAPrivateWikiNotebookGettingThingsDoneAndOtherSystems|creationTime|2015-03-15T23:30:37Z +http://www.w3.org/TR/sw-oosd-primer/|creationDate|2007-02-06 +http://www.w3.org/TR/sw-oosd-primer/|tag|http://www.semanlink.net/tag/dynamic_object_model_pattern +http://www.w3.org/TR/sw-oosd-primer/|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.w3.org/TR/sw-oosd-primer/|tag|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.w3.org/TR/sw-oosd-primer/|comment|Rather unnoticed from the main software engineering camps, the World Wide Web Consortium (W3C) has designed some very interesting technology in the context of its Semantic Web vision. This technology has been originally designed with the goal of making web pages easier to understand for intelligent agents and web services. Interestingly, however, it turns out that Semantic Web languages and tools could also play a major role in software development in general. +http://www.w3.org/TR/sw-oosd-primer/|title|A Semantic Web Primer for Object-Oriented Software Developers +http://www.w3.org/TR/sw-oosd-primer/|creationTime|2007-02-06T21:45:16Z +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html|creationDate|2013-01-29 +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html|tag|http://www.semanlink.net/tag/goodrelations +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html|tag|http://www.semanlink.net/tag/fps_post +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html|title|[goodrelations] gr:ProductFeature +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000506.html|creationTime|2013-01-29T15:15:45Z +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|creationDate|2009-05-25 +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|tag|http://www.semanlink.net/tag/lod_mailing_list +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|tag|http://www.semanlink.net/tag/global_human_sensor_net +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|tag|http://www.semanlink.net/tag/linking_open_data +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|tag|http://www.semanlink.net/tag/biodiversity_data +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|comment|"I recently invited participants in the upcoming e-Biosphere conference +(June 1-3, London) to join me in a collective demonstration of the +semantic web in action. +
+The short story is that we'll be integrating wildlife observations with +background biodiversity data to enable as many interesting queries (e.g., +""show species out of range"") as we can. +
The concept we're trying to illustrate: a global human sensor net" +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|title|Guerilla science: what can we do in 10 days? +http://lists.w3.org/Archives/Public/public-lod/2009May/0252.html|creationTime|2009-05-25T17:57:20Z +http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/|creationDate|2014-02-01 +http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/|tag|http://www.semanlink.net/tag/evgeny_morozov +http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/|tag|http://www.semanlink.net/tag/big_brother +http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/|title|Nous ne prenons pas assez au sérieux les implications politiques du numérique InternetActu +http://internetactu.blog.lemonde.fr/2014/01/31/nous-ne-prenons-pas-assez-au-serieux-les-implications-politiques-des-entreprises-du-numerique/|creationTime|2014-02-01T10:04:17Z +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|creationDate|2018-06-07 +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|tag|http://www.semanlink.net/tag/survey +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|tag|http://www.semanlink.net/tag/ai_business_perspectives +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|tag|http://www.semanlink.net/tag/deloitte +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|title|2017 Deloitte State of Cognitive Survey +https://www2.deloitte.com/content/dam/Deloitte/us/Documents/deloitte-analytics/us-da-2017-deloitte-state-of-cognitive-survey.pdf|creationTime|2018-06-07T23:58:17Z +https://doi.org/10.1145/3178876.3186007|creationDate|2018-05-10 +https://doi.org/10.1145/3178876.3186007|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://doi.org/10.1145/3178876.3186007|tag|http://www.semanlink.net/tag/embedding_evaluation +https://doi.org/10.1145/3178876.3186007|tag|http://www.semanlink.net/tag/word_embedding +https://doi.org/10.1145/3178876.3186007|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +https://doi.org/10.1145/3178876.3186007|tag|http://www.semanlink.net/tag/word_embedding_compositionality +https://doi.org/10.1145/3178876.3186007|comment|"comment obtenir les meilleures représentations de texte à partir de représentations de mots (word embeddings) ? L'auteur utilise des ressources lexicographiques (wordnet) pour ses tests : l'embedding obtenu pour la définition d'un mot est-il proche de celui du mot ? + +Le papier s'appuie sur une [thèse du même auteur](/doc/?uri=https%3A%2F%2Fesc.fnwi.uva.nl%2Fthesis%2Fcentraal%2Ffiles%2Ff1554608041.pdf), claire et bien écrite. +" +https://doi.org/10.1145/3178876.3186007|relatedDoc|https://esc.fnwi.uva.nl/thesis/centraal/files/f1554608041.pdf +https://doi.org/10.1145/3178876.3186007|title|Improving Word Embedding Compositionality using Lexicographic Definitions +https://doi.org/10.1145/3178876.3186007|creationTime|2018-05-10T16:29:46Z +http://www.paulgraham.com/progbot.html|creationDate|2005-11-26 +http://www.paulgraham.com/progbot.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/progbot.html|tag|http://www.semanlink.net/tag/lisp +http://www.paulgraham.com/progbot.html|comment|J'ai vu des sourcils se froncer hier quand j'ai évoqué l'effet pernicieux sur la réutilisabilité de la démarche d'analyse top-down pérenisée par toutes les méthodes de conception, y compris Expert, et que j'ai dit que seule une démarche bottom-up permet de créer des services naturellement et largement réutilisables. Ce très court texte d'un des gourous du Lisp vous explique cela de manière particulièrement limpide. BP +http://www.paulgraham.com/progbot.html|title|Programming Bottom-Up +https://github.com/shellac/java-rdfa|creationDate|2013-07-05 +https://github.com/shellac/java-rdfa|tag|http://www.semanlink.net/tag/damian_steer +https://github.com/shellac/java-rdfa|tag|http://www.semanlink.net/tag/rdfa_parser +https://github.com/shellac/java-rdfa|comment|"""The cruftiest RDFa parser in the world, I'll bet."" (sic)" +https://github.com/shellac/java-rdfa|title|shellac/java-rdfa ; github (Damian Steer) +https://github.com/shellac/java-rdfa|creationTime|2013-07-05T14:05:51Z +http://www.wysigot.com|creationDate|2005-09-21 +http://www.wysigot.com|tag|http://www.semanlink.net/tag/semanlink_related +http://www.wysigot.com|title|Wysigot - browse, capture and monitor the web +https://internetpolicy.mit.edu/blog-2018-fb-cambridgeanalytica/|creationDate|2018-03-21 +https://internetpolicy.mit.edu/blog-2018-fb-cambridgeanalytica/|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +https://internetpolicy.mit.edu/blog-2018-fb-cambridgeanalytica/|title|Facebook/Cambridge Analytica: Privacy lessons and a way forward Internet Policy Research Initiative @ MIT +https://internetpolicy.mit.edu/blog-2018-fb-cambridgeanalytica/|creationTime|2018-03-21T09:32:06Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|creationDate|2007-07-12 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|tag|http://www.semanlink.net/tag/tom_heath +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|tag|http://www.semanlink.net/tag/linked_data +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|tag|http://www.semanlink.net/tag/chris_bizer +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|title|How to publish Linked Data on the Web? +http://sites.wiwiss.fu-berlin.de/suhl/bizer/pub/drafts/LinkedDataTutorial/|creationTime|2007-07-12T22:58:26Z +http://developers.facebook.com/docs/reference/plugins/like|creationDate|2011-02-01 +http://developers.facebook.com/docs/reference/plugins/like|tag|http://www.semanlink.net/tag/facebook +http://developers.facebook.com/docs/reference/plugins/like|title|Facebook - Like button +http://developers.facebook.com/docs/reference/plugins/like|creationTime|2011-02-01T13:17:11Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|tag|http://www.semanlink.net/tag/federated_sparql_queries +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|tag|http://www.semanlink.net/tag/void_vocabulary_of_interlinked_datasets +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|title|Querying the Web of Interlinked Datasets using VOID Descriptions +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-06.pdf|creationTime|2012-04-16T11:43:41Z +http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf|creationDate|2016-09-04 +http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf|tag|http://www.semanlink.net/tag/computational_neuroscience +http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf|tag|http://www.semanlink.net/tag/neuroscience +http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf|title|23 problems in systems neuroscience +http://papers.cnl.salk.edu/PDFs/23%20Problems%20in%20Systems%20Neuroscience%202005-2921.pdf|creationTime|2016-09-04T12:52:16Z +https://www.infoq.com/articles/apache-shiro|creationDate|2018-08-05 +https://www.infoq.com/articles/apache-shiro|tag|http://www.semanlink.net/tag/apache_shiro +https://www.infoq.com/articles/apache-shiro|title|Application Security With Apache Shiro +https://www.infoq.com/articles/apache-shiro|creationTime|2018-08-05T18:53:39Z +http://www.bobdylan.com/songs/rolling.html|creationDate|2006-10-10 +http://www.bobdylan.com/songs/rolling.html|tag|http://www.semanlink.net/tag/lyrics +http://www.bobdylan.com/songs/rolling.html|tag|http://www.semanlink.net/tag/bob_dylan +http://www.bobdylan.com/songs/rolling.html|title|Bob Dylan: Like a Rolling Stone +http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/|creationDate|2014-12-20 +http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/|tag|http://www.semanlink.net/tag/search_engines +http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/|tag|http://www.semanlink.net/tag/deep_links +http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/|title|If You Think Deep Links Are a Big Deal Now, Just Wait WIRED +http://www.wired.com/2014/12/if-you-think-deep-links-are-a-big-deal-now-just-wait/|creationTime|2014-12-20T10:28:04Z +http://www.w3.org/2012/ldp/wiki/Main_Page|creationDate|2014-09-25 +http://www.w3.org/2012/ldp/wiki/Main_Page|tag|http://www.semanlink.net/tag/ldp_w3c +http://www.w3.org/2012/ldp/wiki/Main_Page|tag|http://www.semanlink.net/tag/wiki +http://www.w3.org/2012/ldp/wiki/Main_Page|title|Linked Data Platform - wiki +http://www.w3.org/2012/ldp/wiki/Main_Page|creationTime|2014-09-25T13:26:31Z +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|creationDate|2014-07-03 +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|tag|http://www.semanlink.net/tag/rock +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|tag|http://www.semanlink.net/tag/nlp +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|tag|http://www.semanlink.net/tag/lyrics +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|title|Sex and drugs and Rock’n’roll: Analysing the lyrics of the Rolling Stone 500 greatest songs of all time Alexandre Passant +http://apassant.net/2014/05/09/sex-and-drugs-and-rocknroll-analysing-the-lyrics-of-the-rolling-stone-500-greatest-songs-of-all-time/|creationTime|2014-07-03T00:14:59Z +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|creationDate|2007-04-01 +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|tag|http://www.semanlink.net/tag/rigolo +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|tag|http://www.semanlink.net/tag/google_maps +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|tag|http://www.semanlink.net/tag/ciao_vito +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|comment|16. Traverser l'océan Atlantique à la nage 3462 mi +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|title|from: 170 Rue de Lourmel to: Ciao Vito 2203 NE Alberta Portland, OR 97211 - Google Maps +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|creationTime|2007-04-01T22:04:51Z +http://www.google.com/maps?f=d&hl=fr&saddr=170+Rue+de+Lourmel,+75015+Paris,+France+(170+Rue+de+Lourmel)+%4048.839050,2.282307&daddr=2203+NE+Alberta+Portland,+OR+97211&sll=45.401955,-60.178655&sspn=86.629429,150.820313&layer=&ie=UTF8&z=3&om=1|seeAlso|http://www.semanlink.net/doc/2007/04/tociaovito.jpg +http://karpathy.github.io/neuralnets/|creationDate|2017-06-08 +http://karpathy.github.io/neuralnets/|tag|http://www.semanlink.net/tag/ann_introduction +http://karpathy.github.io/neuralnets/|title|Hacker's guide to Neural Networks +http://karpathy.github.io/neuralnets/|creationTime|2017-06-08T16:28:09Z +http://patterns.dataincubator.org/book/index.html|creationDate|2010-05-10 +http://patterns.dataincubator.org/book/index.html|tag|http://www.semanlink.net/tag/linked_data +http://patterns.dataincubator.org/book/index.html|tag|http://www.semanlink.net/tag/design_pattern +http://patterns.dataincubator.org/book/index.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://patterns.dataincubator.org/book/index.html|title|Linked Data Patterns +http://patterns.dataincubator.org/book/index.html|creationTime|2010-05-10T11:29:10Z +https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python|creationDate|2018-10-10 +https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python|tag|http://www.semanlink.net/tag/python +https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python|title|Python 3's f-Strings: An Improved String Formatting Syntax (Guide) – Real Python +https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python|creationTime|2018-10-10T11:22:58Z +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|creationDate|2013-09-06 +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|tag|http://www.semanlink.net/tag/turtle_in_html +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|tag|http://www.semanlink.net/tag/microdata +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|tag|http://www.semanlink.net/tag/rdflib +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|title|RDFa 1.1, microdata, and turtle-in-HTML now in the core distribution of RDFLib Ivan’s private site +http://ivan-herman.name/2013/03/01/rdfa-1-1-microdata-and-turtle-in-html-now-in-the-core-distribution-of-rdflib/|creationTime|2013-09-06T18:27:25Z +http://www.w3.org/TR/ldpatch/|creationDate|2014-09-22 +http://www.w3.org/TR/ldpatch/|tag|http://www.semanlink.net/tag/http_patch +http://www.w3.org/TR/ldpatch/|tag|http://www.semanlink.net/tag/ld_patch +http://www.w3.org/TR/ldpatch/|title|Linked Data Patch Format +http://www.w3.org/TR/ldpatch/|creationTime|2014-09-22T09:52:59Z +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|creationDate|2017-01-06 +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|tag|http://www.semanlink.net/tag/itunes +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|tag|http://www.semanlink.net/tag/apple_sucks +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|tag|http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|title|"How to remove the ""Could not connect to the iTunes store"" error message when your network does not allow access to the iTunes Store - Ask Different" +http://apple.stackexchange.com/questions/265428/how-to-remove-the-could-not-connect-to-the-itunes-store-error-message-when-you|creationTime|2017-01-06T18:56:50Z +http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html|creationDate|2007-03-16 +http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html|tag|http://www.semanlink.net/tag/bill_gates +http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html|tag|http://www.semanlink.net/tag/immigration +http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html|title|Gates warns on US immigration curbs +http://www.ft.com/cms/s/473893dc-ccde-11db-a938-000b5df10621.html|creationTime|2007-03-16T01:11:22Z +https://www.microsoft.com/en-us/research/project/dssm/|creationDate|2017-12-30 +https://www.microsoft.com/en-us/research/project/dssm/|tag|http://www.semanlink.net/tag/dssm_deep_semantic_similarity_model +https://www.microsoft.com/en-us/research/project/dssm/|comment|Deep neural network modeling technique for representing text strings (sentences, queries, predicates, entity mentions, etc.) in a continuous semantic space and modeling semantic similarity between two text strings +https://www.microsoft.com/en-us/research/project/dssm/|title|"DSSM (""Deep Semantic Similarity Model"") - Microsoft Research" +https://www.microsoft.com/en-us/research/project/dssm/|creationTime|2017-12-30T02:04:48Z +http://www.openlinksw.com/blog/~kidehen/?id=1237|creationDate|2007-07-28 +http://www.openlinksw.com/blog/~kidehen/?id=1237|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/~kidehen/?id=1237|tag|http://www.semanlink.net/tag/facebook +http://www.openlinksw.com/blog/~kidehen/?id=1237|tag|http://www.semanlink.net/tag/linked_data +http://www.openlinksw.com/blog/~kidehen/?id=1237|title|Injecting Facebook Data into the Semantic Data Web +http://www.openlinksw.com/blog/~kidehen/?id=1237|creationTime|2007-07-28T17:37:55Z +https://fr.slideshare.net/andrewkoo/textrank-algorithm|creationDate|2017-07-12 +https://fr.slideshare.net/andrewkoo/textrank-algorithm|tag|http://www.semanlink.net/tag/textrank +https://fr.slideshare.net/andrewkoo/textrank-algorithm|title|How does Textrank work? (slides) +https://fr.slideshare.net/andrewkoo/textrank-algorithm|creationTime|2017-07-12T00:48:39Z +http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/|creationDate|2008-11-12 +http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/|comment|RDF vocabulary to identify an aggregation and to describe its constituents +http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/|title|Open Archive Initiative’s aggregation vocabulary « Ivan’s private site +http://ivanherman.wordpress.com/2008/11/09/open-archive-initiative-aggregation-vocabulary/|creationTime|2008-11-12T15:32:24Z +http://www.lifehacker.com/software/feature/special-geek-to-live-129141.php|creationDate|2005-10-13 +http://www.lifehacker.com/software/feature/special-geek-to-live-129141.php|tag|http://www.semanlink.net/tag/bookmarklet +http://www.lifehacker.com/software/feature/special-geek-to-live-129141.php|title|Ten Must-Have Bookmarklets +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|creationDate|2018-03-19 +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|tag|http://www.semanlink.net/tag/archeologie +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|tag|http://www.semanlink.net/tag/egypte_antique +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|tag|http://www.semanlink.net/tag/herodote +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|tag|http://www.semanlink.net/tag/grece_antique +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|title|Naucratis ville grecque ou égyptienne, une question de méthode - Art, Archéologie et Antiquité +http://www.histoiredelantiquite.net/archeologie-egyptienne/naucratis-ville-grecque-ou-egyptienne-une-question-de-methode/|creationTime|2018-03-19T08:37:52Z +http://www.google.com/fusiontables/Home/|creationDate|2012-05-31 +http://www.google.com/fusiontables/Home/|tag|http://www.semanlink.net/tag/google_fusion_tables +http://www.google.com/fusiontables/Home/|tag|http://www.semanlink.net/tag/database +http://www.google.com/fusiontables/Home/|tag|http://www.semanlink.net/tag/cloud +http://www.google.com/fusiontables/Home/|title|Google Fusion Tables - Gather, visualize, and share data tables online +http://www.google.com/fusiontables/Home/|creationTime|2012-05-31T09:16:56Z +http://www.bubblecode.net/fr/2013/03/10/comprendre-oauth2/|creationDate|2015-11-16 +http://www.bubblecode.net/fr/2013/03/10/comprendre-oauth2/|tag|http://www.semanlink.net/tag/oauth2 +http://www.bubblecode.net/fr/2013/03/10/comprendre-oauth2/|title|Comprendre OAuth2 « BubbleCode by Johann Reinke +http://www.bubblecode.net/fr/2013/03/10/comprendre-oauth2/|creationTime|2015-11-16T11:53:19Z +http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf|creationDate|2008-11-03 +http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf|tag|http://www.semanlink.net/tag/iphone +http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf|tag|http://www.semanlink.net/tag/dbpedia_mobile +http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf|title|DBpedia Mobile (ISWC2008 presentation) +http://www.cs.vu.nl/~pmika/swc-2008/DBpedia%20Mobile-A%20Location-Aware%20Semantic%20Web%20Client-DBpediaMobile.pdf|creationTime|2008-11-03T10:20:25Z +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|creationDate|2012-05-08 +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|tag|http://www.semanlink.net/tag/paul_krugman +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|tag|http://www.semanlink.net/tag/crise_de_la_dette_publique_grecque +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|tag|http://www.semanlink.net/tag/presidentielles_2012 +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|title|Paul Krugman: Those Revolting Europeans - NYTimes.com +http://www.nytimes.com/2012/05/07/opinion/krugman-those-revolting-europeans.html|creationTime|2012-05-08T11:48:45Z +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|creationDate|2018-02-11 +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|tag|http://www.semanlink.net/tag/bitcoin +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|tag|http://www.semanlink.net/tag/bourse +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|tag|http://www.semanlink.net/tag/reinforcement_learning +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|title|Introduction to Learning to Trade with Reinforcement Learning – WildML +http://www.wildml.com/2018/02/introduction-to-learning-to-trade-with-reinforcement-learning/|creationTime|2018-02-11T12:20:30Z +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653|creationDate|2011-01-22 +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653|title|SPARQL guide for the Javascript Developer +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1653|creationTime|2011-01-22T15:02:20Z +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|creationDate|2012-05-10 +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|tag|http://www.semanlink.net/tag/solr +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|tag|http://www.semanlink.net/tag/openstructs +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|tag|http://www.semanlink.net/tag/solr_rdf +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|title|RDF Aggregates and Full Text Search on Steroids with Solr at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2009/04/29/rdf-aggregates-and-full-text-search-on-steroids-with-solr/|creationTime|2012-05-10T00:23:59Z +http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print|creationDate|2010-08-20 +http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print|tag|http://www.semanlink.net/tag/energie_solaire +http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print|title|Nuclear Energy Loses Cost Advantage - NYTimes.com +http://www.nytimes.com/2010/07/27/business/global/27iht-renuke.html?_r=1&pagewanted=print|creationTime|2010-08-20T14:56:58Z +https://en.wikipedia.org/wiki/Serial_Mom|creationDate|2016-03-27 +https://en.wikipedia.org/wiki/Serial_Mom|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Serial_Mom|tag|http://www.semanlink.net/tag/comedie +https://en.wikipedia.org/wiki/Serial_Mom|comment|avec Kathleen Turner +https://en.wikipedia.org/wiki/Serial_Mom|title|Serial Mom +https://en.wikipedia.org/wiki/Serial_Mom|creationTime|2016-03-27T22:48:26Z +https://api-platform.com/docs/distribution/|creationDate|2017-04-12 +https://api-platform.com/docs/distribution/|tag|http://www.semanlink.net/tag/api +https://api-platform.com/docs/distribution/|tag|http://www.semanlink.net/tag/json_ld +https://api-platform.com/docs/distribution/|title|API Platform: Creating your First API with API Platform, in 5 Minutes +https://api-platform.com/docs/distribution/|creationTime|2017-04-12T13:19:33Z +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|creationDate|2015-06-14 +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/pape_francois +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/tyrannical_exploitation_of_nature_by_mankind +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|comment|The most anticipated papal letter for decades calls for an end to the ‘tyrannical’ exploitation of nature by mankind. Could it lead to a step-change in the battle against global warming? +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|title|Explosive intervention by Pope Francis set to transform climate change debate The Guardian +http://www.theguardian.com/world/2015/jun/13/pope-francis-intervention-transforms-climate-change-debate?CMP=share_btn_tw|creationTime|2015-06-14T14:31:31Z +http://planb.nicecupoftea.org/archives/001293.html|creationDate|2005-04-21 +http://planb.nicecupoftea.org/archives/001293.html|tag|http://www.semanlink.net/tag/rdf +http://planb.nicecupoftea.org/archives/001293.html|tag|http://www.semanlink.net/tag/firefox +http://planb.nicecupoftea.org/archives/001293.html|title|Pigsty - a Firefox extension for RDF galleries +https://arxiv.org/abs/1710.06632|creationDate|2018-10-09 +https://arxiv.org/abs/1710.06632|tag|http://www.semanlink.net/tag/lexical_ambiguity +https://arxiv.org/abs/1710.06632|tag|http://www.semanlink.net/tag/using_word_embedding +https://arxiv.org/abs/1710.06632|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1710.06632|tag|http://www.semanlink.net/tag/sense_embeddings +https://arxiv.org/abs/1710.06632|arxiv_author|Mohammad Taher Pilehvar +https://arxiv.org/abs/1710.06632|arxiv_author|Nigel Collier +https://arxiv.org/abs/1710.06632|arxiv_author|Jose Camacho-Collados +https://arxiv.org/abs/1710.06632|arxiv_author|Roberto Navigli +https://arxiv.org/abs/1710.06632|title|[1710.06632] Towards a Seamless Integration of Word Senses into Downstream NLP Applications +https://arxiv.org/abs/1710.06632|creationTime|2018-10-09T15:08:40Z +https://arxiv.org/abs/1710.06632|arxiv_summary|"Lexical ambiguity can impede NLP systems from accurate understanding of +semantics. Despite its potential benefits, the integration of sense-level +information into NLP systems has remained understudied. By incorporating a +novel disambiguation algorithm into a state-of-the-art classification model, we +create a pipeline to integrate sense-level information into downstream NLP +applications. We show that a simple disambiguation of the input text can lead +to consistent performance improvement on multiple topic categorization and +polarity detection datasets, particularly when the fine granularity of the +underlying sense inventory is reduced and the document is sufficiently large. +Our results also point to the need for sense representation research to focus +more on in vivo evaluations which target the performance in downstream NLP +applications rather than artificial benchmarks." +https://arxiv.org/abs/1710.06632|arxiv_firstAuthor|Mohammad Taher Pilehvar +https://arxiv.org/abs/1710.06632|arxiv_updated|2017-10-18T09:13:06Z +https://arxiv.org/abs/1710.06632|arxiv_title|Towards a Seamless Integration of Word Senses into Downstream NLP Applications +https://arxiv.org/abs/1710.06632|arxiv_published|2017-10-18T09:13:06Z +https://arxiv.org/abs/1710.06632|arxiv_num|1710.06632 +http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/|creationDate|2015-08-06 +http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/|tag|http://www.semanlink.net/tag/abeille +http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/|tag|http://www.semanlink.net/tag/vaccin +http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/|title|Comment les abeilles vaccinent leurs petits Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2015/08/06/comment-les-abeilles-vaccinent-leurs-petits/|creationTime|2015-08-06T23:32:47Z +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|creationDate|2009-05-12 +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|tag|http://www.semanlink.net/tag/bien_envoye +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|comment|"Et voilà que nos parlementaires, d'anciens ministres de la culture, le gouvernement français, la France, quoi, s'avance, imbue d'elle-même... +nous venons annoncer que nous sommes vieux, fatigués, affaiblis, apeurés, mais que - ô gloire - nous pouvons encore donner quelques coups de matraque. +
+Cette loi non seulement ne protège pas les droits des créateurs, non seulement attente à la liberté sous de nombreux aspects, mais surtout agresse, voire insulte cette partie de la population qui - jeune peut-être - vit la mondialisation sans états d'âme, celle qui épouse le mouvement de l'histoire avec gaieté, curiosité, effronterie, qui utilise, crée, exploite, détourne l'outil du siècle." +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|title|Il est stupide d'aller contre Internet avec bâton, casque et ciseaux, par Eric Rochant +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|creationTime|2009-05-12T23:01:01Z +http://www.lemonde.fr/opinions/article/2009/05/12/il-est-stupide-d-aller-contre-internet-avec-baton-casque-et-ciseaux-par-eric-rochant_1192030_3232.html|source|Le Monde +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|creationDate|2015-04-04 +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|tag|http://www.semanlink.net/tag/parthe +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|tag|http://www.semanlink.net/tag/irak +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|tag|http://www.semanlink.net/tag/ei +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|title|Une vidéo de l'EI montre le saccage de la cité antique d'Hatra, en Irak +http://www.lemonde.fr/international/article/2015/04/04/une-video-de-l-ei-montre-le-saccage-de-la-cite-antique-d-hatra-en-irak_4609703_3210.html|creationTime|2015-04-04T16:55:14Z +http://duolingo.com/|creationDate|2013-05-15 +http://duolingo.com/|tag|http://www.semanlink.net/tag/education +http://duolingo.com/|tag|http://www.semanlink.net/tag/langues_vivantes +http://duolingo.com/|tag|http://www.semanlink.net/tag/anglais +http://duolingo.com/|tag|http://www.semanlink.net/tag/luis_von_ahn +http://duolingo.com/|title|Duolingo Apprends gratuitement l'anglais, l'espagnol, l'allemand, le portuguais et l'italien +http://duolingo.com/|creationTime|2013-05-15T15:33:33Z +http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks|creationDate|2009-05-20 +http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks|tag|http://www.semanlink.net/tag/jsonp +http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks|title|SPARCool supports JSONP callbacks Alexandre Passant +http://apassant.net/blog/2009/05/18/sparcool-supports-jsonp-callbacks|creationTime|2009-05-20T23:51:32Z +http://www.ldodds.com/blog/archives/000219.html|creationDate|2005-06-24 +http://www.ldodds.com/blog/archives/000219.html|tag|http://www.semanlink.net/tag/jena_rules +http://www.ldodds.com/blog/archives/000219.html|tag|http://www.semanlink.net/tag/tagging +http://www.ldodds.com/blog/archives/000219.html|comment|Extracting richer metadata from tagging conventions +http://www.ldodds.com/blog/archives/000219.html|title|Lost Boy: Fun with Jena Rules +http://money.cnn.com/2006/07/13/pf/rfid_passports/index.htm?cnn=yes|creationDate|2006-07-20 +http://money.cnn.com/2006/07/13/pf/rfid_passports/index.htm?cnn=yes|tag|http://www.semanlink.net/tag/rfid_passports +http://money.cnn.com/2006/07/13/pf/rfid_passports/index.htm?cnn=yes|comment|Basically, you've given everybody a little radio-frequency doodad that silently declares 'Hey, I'm a foreigner' +http://money.cnn.com/2006/07/13/pf/rfid_passports/index.htm?cnn=yes|title|Technologists object to U.S. RFID passports +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|creationDate|2005-06-02 +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|tag|http://www.semanlink.net/tag/finding_rdf_documents +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|tag|http://www.semanlink.net/tag/country_ontologies +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|tag|http://www.semanlink.net/tag/swoogle +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|comment|Searching for country ontologies +http://ebiquity.umbc.edu/v2.1/blogger/index.php?p=222|title|Finding RDF instance data with Swoogle +http://www.dlib.org/dlib/january06/guy/01guy.html|creationDate|2006-02-17 +http://www.dlib.org/dlib/january06/guy/01guy.html|tag|http://www.semanlink.net/tag/tagging +http://www.dlib.org/dlib/january06/guy/01guy.html|tag|http://www.semanlink.net/tag/folksonomy +http://www.dlib.org/dlib/january06/guy/01guy.html|tag|http://www.semanlink.net/tag/semanlink_related +http://www.dlib.org/dlib/january06/guy/01guy.html|comment|"We begin by looking at the issue of ""sloppy tags"", and ask if there are ways the folksonomy community could offset such problems and create systems that are conducive to searching, sorting and classifying. We then go on to question this ""tidying up"" approach and its underlying assumptions, highlighting issues surrounding removal of low-quality, redundant or nonsense metadata, and the potential risks of tidying too neatly and thereby losing the very openness that has made folksonomies so popular." +http://www.dlib.org/dlib/january06/guy/01guy.html|title|Folksonomies: Tidying up Tags? +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|creationDate|2014-01-20 +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|tag|http://www.semanlink.net/tag/davos +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|tag|http://www.semanlink.net/tag/inegalites +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|comment|"Après avoir longtemps cru dur comme fer que les inégalités de revenus étaient nécessaires pour récompenser le talent et inciter à l'innovation, le Forum commence à écouter les économistes qui alertaient sur la dangerosité du fossé en train de s'élargir entre les riches et les pauvres, à l'instar de Joseph Stiglitz, prix Nobel d'économie en 2001, ou François Bourguignon, ancien chef économiste de la Banque mondiale (2003-2007). +" +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|title|Les dangers de la montée des inégalités au menu du Forum de Davos +http://www.lemonde.fr/economie/article/2014/01/20/les-dangers-de-la-montee-des-inegalites-au-menu-du-forum-de-davos_4350849_3234.html|creationTime|2014-01-20T13:45:53Z +http://www.sciencemag.org/news/2016/10/europe-attempts-mars-landing|creationDate|2016-10-18 +http://www.sciencemag.org/news/2016/10/europe-attempts-mars-landing|tag|http://www.semanlink.net/tag/exomars +http://www.sciencemag.org/news/2016/10/europe-attempts-mars-landing|title|Europe attempts Mars landing Science AAAS +http://www.sciencemag.org/news/2016/10/europe-attempts-mars-landing|creationTime|2016-10-18T11:37:03Z +http://bibd.uni-giessen.de/gdoc/2002/uni/d020057.pdf|creationDate|2006-04-02 +http://bibd.uni-giessen.de/gdoc/2002/uni/d020057.pdf|tag|http://www.semanlink.net/tag/irrigation +http://bibd.uni-giessen.de/gdoc/2002/uni/d020057.pdf|comment|" + + Computer-based expert system to optimize the water supply for modern irrigation systems in selected regions in Egypt Computer-based expert system to optimize the water supply for modern irrigation systems in selected regions in Egypt +" +http://bibd.uni-giessen.de/gdoc/2002/uni/d020057.pdf|title|Computer-based expert system to optimize the water supply for modern irrigation systems in selected regions in Egypt +http://www.newscientistspace.com/article.ns?id=dn9337|creationDate|2006-06-21 +http://www.newscientistspace.com/article.ns?id=dn9337|tag|http://www.semanlink.net/tag/experience_scientifique +http://www.newscientistspace.com/article.ns?id=dn9337|tag|http://www.semanlink.net/tag/antimatiere +http://www.newscientistspace.com/article.ns?id=dn9337|tag|http://www.semanlink.net/tag/matiere_noire +http://www.newscientistspace.com/article.ns?id=dn9337|comment|A cosmic ray detector that will attempt to unlock the secrets of both antimatter and dark matter launched on Thursday. Called PAMELA, the experiment is set to spend at least three years in orbit – providing far longer coverage than the few days of data collected by previous space-based detectors. +http://www.newscientistspace.com/article.ns?id=dn9337|title|New Scientist - Antimatter and dark matter are new probe's prey +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|creationDate|2008-07-08 +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|tag|http://www.semanlink.net/tag/etat_policier +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|tag|http://www.semanlink.net/tag/fichage +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|comment|"Une grenouille plongée dans une casserole d'eau bouillante essaiera de se débattre et de s'enfuir. Le même batracien plongé dans de l'eau tiède se sentira bien. Montez la température, il se laissera engourdir et finira par mourir ébouillanté, sans avoir jamais réagi. +" +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|title|Les fichiers policiers et la grenouille ébouillantée, par Luc Bronner +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|creationTime|2008-07-08T21:16:51Z +http://www.lemonde.fr/opinions/article/2008/07/07/les-fichiers-policiers-et-la-grenouille-ebouillantee-par-luc-bronner_1067157_3232.html|source|Le Monde +https://redis.io/|creationDate|2018-07-12 +https://redis.io/|tag|http://www.semanlink.net/tag/redis +https://redis.io/|title|Redis +https://redis.io/|creationTime|2018-07-12T23:23:57Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/|creationDate|2007-07-13 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/|tag|http://www.semanlink.net/tag/ng4j +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/|title|NG4J - Named Graphs API for Jena +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/|creationTime|2007-07-13T19:00:19Z +https://arxiv.org/abs/1704.05358|creationDate|2018-10-06 +https://arxiv.org/abs/1704.05358|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1704.05358|tag|http://www.semanlink.net/tag/sentence_embeddings +https://arxiv.org/abs/1704.05358|tag|http://www.semanlink.net/tag/sentence_similarity +https://arxiv.org/abs/1704.05358|arxiv_author|Pramod Viswanath +https://arxiv.org/abs/1704.05358|arxiv_author|Suma Bhat +https://arxiv.org/abs/1704.05358|arxiv_author|Jiaqi Mu +https://arxiv.org/abs/1704.05358|comment|"> We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. + +A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values -> a subspace with dim 4 + +Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) + +" +https://arxiv.org/abs/1704.05358|title|[1704.05358] Representing Sentences as Low-Rank Subspaces +https://arxiv.org/abs/1704.05358|creationTime|2018-10-06T11:22:58Z +https://arxiv.org/abs/1704.05358|arxiv_summary|"Sentences are important semantic units of natural language. A generic, +distributional representation of sentences that can capture the latent +semantics is beneficial to multiple downstream applications. We observe a +simple geometry of sentences -- the word representations of a given sentence +(on average 10.23 words in all SemEval datasets with a standard deviation 4.84) +roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this +observation, we represent a sentence by the low-rank subspace spanned by its +word vectors. Such an unsupervised representation is empirically validated via +semantic textual similarity tasks on 19 different datasets, where it +outperforms the sophisticated neural network models, including skip-thought +vectors, by 15% on average." +https://arxiv.org/abs/1704.05358|arxiv_firstAuthor|Jiaqi Mu +https://arxiv.org/abs/1704.05358|arxiv_updated|2017-04-18T14:30:32Z +https://arxiv.org/abs/1704.05358|arxiv_title|Representing Sentences as Low-Rank Subspaces +https://arxiv.org/abs/1704.05358|arxiv_published|2017-04-18T14:30:32Z +https://arxiv.org/abs/1704.05358|arxiv_num|1704.05358 +https://arxiv.org/abs/1902.10618|creationDate|2019-02-28 +https://arxiv.org/abs/1902.10618|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1902.10618|tag|http://www.semanlink.net/tag/contextualised_word_representations +https://arxiv.org/abs/1902.10618|tag|http://www.semanlink.net/tag/word_embedding_compositionality +https://arxiv.org/abs/1902.10618|arxiv_author|Ido Dagan +https://arxiv.org/abs/1902.10618|arxiv_author|Vered Shwartz +https://arxiv.org/abs/1902.10618|comment|"How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (""give in"" is different from ""give"") but much worse with revealing implicit meaning (""hot tea"" is about temperature, ""hot debate"" isn't)." +https://arxiv.org/abs/1902.10618|title|[1902.10618] Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition +https://arxiv.org/abs/1902.10618|creationTime|2019-02-28T13:10:48Z +https://arxiv.org/abs/1902.10618|arxiv_summary|"Building meaningful phrase representations is challenging because phrase +meanings are not simply the sum of their constituent meanings. Lexical +composition can shift the meanings of the constituent words and introduce +implicit information. We tested a broad range of textual representations for +their capacity to address these issues. We found that as expected, +contextualized word representations perform better than static word embeddings, +more so on detecting meaning shift than in recovering implicit information, in +which their performance is still far from that of humans. Our evaluation suite, +including 5 tasks related to lexical composition effects, can serve future +research aiming to improve such representations." +https://arxiv.org/abs/1902.10618|arxiv_firstAuthor|Vered Shwartz +https://arxiv.org/abs/1902.10618|arxiv_updated|2019-05-19T13:47:16Z +https://arxiv.org/abs/1902.10618|arxiv_title|Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition +https://arxiv.org/abs/1902.10618|arxiv_published|2019-02-27T16:16:37Z +https://arxiv.org/abs/1902.10618|arxiv_num|1902.10618 +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|creationDate|2016-11-17 +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|tag|http://www.semanlink.net/tag/slideshare +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|tag|http://www.semanlink.net/tag/jsonld_mongodb +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|tag|http://www.semanlink.net/tag/gregg_kellogg +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|title|JSON-LD and MongoDB +http://www.slideshare.net/gkellogg1/jsonld-and-mongodb|creationTime|2016-11-17T00:33:39Z +http://developer.apple.com/tools/rubyonrails.html|creationDate|2006-03-01 +http://developer.apple.com/tools/rubyonrails.html|tag|http://www.semanlink.net/tag/mac_os_x +http://developer.apple.com/tools/rubyonrails.html|tag|http://www.semanlink.net/tag/ruby_on_rails +http://developer.apple.com/tools/rubyonrails.html|title|Using Ruby on Rails for Web Development on Mac OS X +http://www.bbc.co.uk/news/science-environment-12851772|creationDate|2011-03-25 +http://www.bbc.co.uk/news/science-environment-12851772|tag|http://www.semanlink.net/tag/first_americans +http://www.bbc.co.uk/news/science-environment-12851772|title|BBC News - Stone tools 'demand new American story' +http://www.bbc.co.uk/news/science-environment-12851772|creationTime|2011-03-25T17:48:47Z +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|creationDate|2012-02-23 +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|tag|http://www.semanlink.net/tag/naomi_klein +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|tag|http://www.semanlink.net/tag/critique_du_capitalisme +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|comment|Responding to climate change requires that we break every rule in the free-market playbook and that we do so with great urgency. We will need to rebuild the public sphere, reverse privatizations, relocalize large parts of economies, scale back overconsumption, bring back long-term planning, heavily regulate and tax corporations, maybe even nationalize some of them, cut military spending and recognize our debts to the global South. Of course, none of this has a hope in hell of happening unless it is accompanied by a massive, broad-based effort to radically reduce the influence that corporations have over the political process. That means, at a minimum, publicly funded elections and stripping corporations of their status as “people” under the law. In short, climate change supercharges the pre-existing case for virtually every progressive demand on the books, binding them into a coherent agenda based on a clear scientific imperative. +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|title|Capitalism vs. the Climate The Nation +http://www.thenation.com/article/164497/capitalism-vs-climate?page=full|creationTime|2012-02-23T20:45:17Z +https://www2018.thewebconf.org/proceedings/|creationDate|2018-04-23 +https://www2018.thewebconf.org/proceedings/|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://www2018.thewebconf.org/proceedings/|title|PROCEEDINGS – The Web Conference in Lyon +https://www2018.thewebconf.org/proceedings/|creationTime|2018-04-23T17:33:50Z +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|creationDate|2015-02-27 +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|tag|http://www.semanlink.net/tag/musee +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|tag|http://www.semanlink.net/tag/irak +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|tag|http://www.semanlink.net/tag/ei +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|tag|http://www.semanlink.net/tag/abrutis +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|title|Les images du musée de Mossoul saccagé par des djihadistes en Irak +http://www.lemonde.fr/international/article/2015/02/26/une-video-montre-l-etat-islamique-ravageant-un-musee-en-irak_4584260_3210.html|creationTime|2015-02-27T13:54:39Z +http://www.newsweek.com/id/157516|creationDate|2008-09-09 +http://www.newsweek.com/id/157516|tag|http://www.semanlink.net/tag/lhc +http://www.newsweek.com/id/157516|title|Large Hadron Collider May Explain Atom's Mysteries Newsweek.com +http://www.newsweek.com/id/157516|creationTime|2008-09-09T18:22:44Z +https://news.mit.edu/2018/machines-learn-language-human-interaction-1031|creationDate|2018-11-01 +https://news.mit.edu/2018/machines-learn-language-human-interaction-1031|tag|http://www.semanlink.net/tag/mit +https://news.mit.edu/2018/machines-learn-language-human-interaction-1031|tag|http://www.semanlink.net/tag/grounded_language_learning +https://news.mit.edu/2018/machines-learn-language-human-interaction-1031|title|Machines that learn language more like kids do MIT News +https://news.mit.edu/2018/machines-learn-language-human-interaction-1031|creationTime|2018-11-01T17:00:14Z +http://www.esa.int/SPECIALS/Herschel/index.html|creationDate|2009-06-19 +http://www.esa.int/SPECIALS/Herschel/index.html|tag|http://www.semanlink.net/tag/esa +http://www.esa.int/SPECIALS/Herschel/index.html|tag|http://www.semanlink.net/tag/herschel_telescope +http://www.esa.int/SPECIALS/Herschel/index.html|title|ESA - Herschel +http://www.esa.int/SPECIALS/Herschel/index.html|creationTime|2009-06-19T17:11:28Z +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|creationDate|2016-10-08 +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|tag|http://www.semanlink.net/tag/graphql +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|tag|http://www.semanlink.net/tag/rest +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|comment|let clients specify their own data needs against the capabilities exposed by a server +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|title|Why I believe GraphQL will come to replace REST +https://dev.to/reactiveconf/why-i-believe-graphql-will-come-to-replace-rest|creationTime|2016-10-08T14:59:30Z +http://www.lemonde.fr/sciences/article/2012/05/20/la-difficile-ascension-vers-la-resolution-d-un-probleme-mathematique_1704410_1650684.html|creationDate|2012-05-20 +http://www.lemonde.fr/sciences/article/2012/05/20/la-difficile-ascension-vers-la-resolution-d-un-probleme-mathematique_1704410_1650684.html|tag|http://www.semanlink.net/tag/conjecture_de_goldbach +http://www.lemonde.fr/sciences/article/2012/05/20/la-difficile-ascension-vers-la-resolution-d-un-probleme-mathematique_1704410_1650684.html|title|La difficile ascension vers la résolution d'un problème mathématique +http://www.lemonde.fr/sciences/article/2012/05/20/la-difficile-ascension-vers-la-resolution-d-un-probleme-mathematique_1704410_1650684.html|creationTime|2012-05-20T23:00:56Z +http://semwebcentral.org|creationDate|2005-06-15 +http://semwebcentral.org|tag|http://www.semanlink.net/tag/semantic_web +http://semwebcentral.org|title|SemWebCentral - SemWebCentral Home Page +http://pingthesemanticweb.com/|creationDate|2007-04-25 +http://pingthesemanticweb.com/|tag|http://www.semanlink.net/tag/frederick_giasson +http://pingthesemanticweb.com/|tag|http://www.semanlink.net/tag/linked_data +http://pingthesemanticweb.com/|tag|http://www.semanlink.net/tag/rdf_service +http://pingthesemanticweb.com/|title|Ping the Semantic Web.com - Share your RDF data with the World! +http://pingthesemanticweb.com/|creationTime|2007-04-25T15:39:05Z +http://apilama.com/2016/01/05/apis-and-linked-data-a-match-made-in-heaven/|creationDate|2016-02-25 +http://apilama.com/2016/01/05/apis-and-linked-data-a-match-made-in-heaven/|tag|http://www.semanlink.net/tag/apis_and_linked_data +http://apilama.com/2016/01/05/apis-and-linked-data-a-match-made-in-heaven/|title|APIs and Linked Data: A match made in Heaven APILama +http://apilama.com/2016/01/05/apis-and-linked-data-a-match-made-in-heaven/|creationTime|2016-02-25T03:10:32Z +http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html|creationDate|2011-04-18 +http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html|tag|http://www.semanlink.net/tag/jersey +http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html|comment|"permet de traiter facilement les .html, .rdf, etc... en remplacant ces suffixes par le accept-header correspondant avant de traiter la requête
+This filter may be used when the accetable media type and acceptable language need to be declared in the URI. +" +http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html|title|UriConnegFilter (jersey-bundle 1.6 API) +http://jersey.java.net/nonav/apidocs/latest/jersey/com/sun/jersey/api/container/filter/UriConnegFilter.html|creationTime|2011-04-18T18:12:31Z +http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/|creationDate|2012-09-05 +http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/|tag|http://www.semanlink.net/tag/json_ld +http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/|comment|application/ld+json +http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/|title|JSON-LD Syntax 1.0 +http://www.w3.org/TR/2012/WD-json-ld-syntax-20120712/|creationTime|2012-09-05T16:14:16Z +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|creationDate|2013-08-27 +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|tag|http://www.semanlink.net/tag/nike +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|tag|http://www.semanlink.net/tag/sigmund_freud +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|tag|http://www.semanlink.net/tag/tattoo +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|tag|http://www.semanlink.net/tag/polynesians +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|tag|http://www.semanlink.net/tag/magie +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|comment|"""Nike a donc cédé face à la puissance de la magie"" + + +" +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|title|"Sur les vêtements, le ""tattoo"" est tabou" +http://www.lemonde.fr/mode/article/2013/08/27/sur-les-vetements-le-tattoo-est-tabou_3467040_1383317.html|creationTime|2013-08-27T13:41:50Z +http://spasche.net/openinbrowser/|creationDate|2012-07-02 +http://spasche.net/openinbrowser/|tag|http://www.semanlink.net/tag/firefox_extension +http://spasche.net/openinbrowser/|title|Open in Browser Extension +http://spasche.net/openinbrowser/|creationTime|2012-07-02T01:46:30Z +http://barcamp.org/SemanticCampParis|creationDate|2008-02-16 +http://barcamp.org/SemanticCampParis|tag|http://www.semanlink.net/tag/wiki +http://barcamp.org/SemanticCampParis|tag|http://www.semanlink.net/tag/semantic_camp_paris +http://barcamp.org/SemanticCampParis|title|BarCamp wiki / SemanticCampParis +http://barcamp.org/SemanticCampParis|creationTime|2008-02-16T22:00:46Z +https://fr.wikipedia.org/wiki/Culture_Yamna|creationDate|2019-04-21 +https://fr.wikipedia.org/wiki/Culture_Yamna|tag|http://www.semanlink.net/tag/equitation +https://fr.wikipedia.org/wiki/Culture_Yamna|tag|http://www.semanlink.net/tag/genetique_histoire +https://fr.wikipedia.org/wiki/Culture_Yamna|tag|http://www.semanlink.net/tag/indo_europeen +https://fr.wikipedia.org/wiki/Culture_Yamna|tag|http://www.semanlink.net/tag/prehistoire +https://fr.wikipedia.org/wiki/Culture_Yamna|title|Culture Yamna — Wikipédia +https://fr.wikipedia.org/wiki/Culture_Yamna|creationTime|2019-04-21T14:38:15Z +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|creationDate|2008-01-25 +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|tag|http://www.semanlink.net/tag/moustique +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|tag|http://www.semanlink.net/tag/technique_de_l_insecte_sterile +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|tag|http://www.semanlink.net/tag/dengue +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|tag|http://www.semanlink.net/tag/ogm +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|title|Engineered Mosquitoes Could Wipe Out Dengue Fever +http://www.wired.com/science/planetearth/news/2008/01/gm_insects|creationTime|2008-01-25T21:32:22Z +http://milicicvuk.com/blog/2014/08/26/can-json-and-rdf-be-friends/|creationDate|2017-01-06 +http://milicicvuk.com/blog/2014/08/26/can-json-and-rdf-be-friends/|tag|http://www.semanlink.net/tag/rdf_in_json +http://milicicvuk.com/blog/2014/08/26/can-json-and-rdf-be-friends/|title|Can JSON and RDF be friends? +http://milicicvuk.com/blog/2014/08/26/can-json-and-rdf-be-friends/|creationTime|2017-01-06T16:58:01Z +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|creationDate|2015-10-21 +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|tag|http://www.semanlink.net/tag/nlp_sample_code +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|tag|http://www.semanlink.net/tag/python_nlp +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|tag|http://www.semanlink.net/tag/nltk +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|comment|Tokenization, POS Tagging, removing punctuation, stopwords, stemming, frequency distributions, Collocations, Bigrams, Trigrams, chunking, Splitting Training Sets + Test Sets, classifiers & scikit-learn, Cross Validating Classifiers, pipelines for classifiers +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|title|Python NLP - NLTK and scikit-learn +http://billchambers.me/tutorials/2015/01/14/python-nlp-cheatsheet-nltk-scikit-learn.html|creationTime|2015-10-21T18:36:28Z +http://www.newscientisttech.com/article.ns?id=mg19025566.400|creationDate|2006-06-22 +http://www.newscientisttech.com/article.ns?id=mg19025566.400|tag|http://www.semanlink.net/tag/cybersex +http://www.newscientisttech.com/article.ns?id=mg19025566.400|tag|http://www.semanlink.net/tag/second_life +http://www.newscientisttech.com/article.ns?id=mg19025566.400|title|New Scientist Tech - The irresistible rise of cybersex +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|creationDate|2006-08-23 +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|tag|http://www.semanlink.net/tag/apple_java +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|tag|http://www.semanlink.net/tag/diacritics_in_uri +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|comment|"A message I posted to Apple java-dev mailing list about a problem with uri of ""ééé.html"" on Mac. +" +http://lists.apple.com/archives/java-dev/2006/Aug/msg00325.html|title|file to uri to file madness +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|creationDate|2010-05-04 +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|tag|http://www.semanlink.net/tag/email +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|tag|http://www.semanlink.net/tag/foaf_ssl +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|tag|http://www.semanlink.net/tag/atom +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|tag|http://www.semanlink.net/tag/henry_story +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|title|replacing email with atom and foaf+ssl +http://lists.w3.org/Archives/Public/public-lod/2010May/0010.html|creationTime|2010-05-04T08:56:26Z +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|creationDate|2012-08-21 +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|tag|http://www.semanlink.net/tag/stanford +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|tag|http://www.semanlink.net/tag/online_course_materials +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|tag|http://www.semanlink.net/tag/technology_enhanced_learning +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|tag|http://www.semanlink.net/tag/education +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|title|Move Over Harvard And MIT, Stanford Has The Real “Revolution In Education” TechCrunch +http://techcrunch.com/2012/05/09/move-over-harvard-and-mit-stanford-has-the-real-revolution-in-education/|creationTime|2012-08-21T09:50:26Z +http://manu.sporny.org/category/json-ld/|creationDate|2012-02-20 +http://manu.sporny.org/category/json-ld/|tag|http://www.semanlink.net/tag/json_ld +http://manu.sporny.org/category/json-ld/|tag|http://www.semanlink.net/tag/manu_sporny +http://manu.sporny.org/category/json-ld/|title|JSON-LD The Beautiful, Tormented Machine +http://manu.sporny.org/category/json-ld/|creationTime|2012-02-20T22:48:21Z +http://www.bbc.co.uk/news/science-environment-17436365|creationDate|2012-04-22 +http://www.bbc.co.uk/news/science-environment-17436365|tag|http://www.semanlink.net/tag/synthetic_biology +http://www.bbc.co.uk/news/science-environment-17436365|tag|http://www.semanlink.net/tag/adn +http://www.bbc.co.uk/news/science-environment-17436365|title|BBC News - The strange new craft of making life from scratch +http://www.bbc.co.uk/news/science-environment-17436365|creationTime|2012-04-22T16:07:37Z +http://www.yworks.com/en/products_yfiles_about.htm|creationDate|2005-09-25 +http://www.yworks.com/en/products_yfiles_about.htm|tag|http://www.semanlink.net/tag/graph_visualization +http://www.yworks.com/en/products_yfiles_about.htm|comment|PAYANT +http://www.yworks.com/en/products_yfiles_about.htm|title|yFiles - Java Graph Layout and Visualization Library +http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html|creationDate|2005-11-10 +http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html|tag|http://www.semanlink.net/tag/ibm +http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html|tag|http://www.semanlink.net/tag/social_software +http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html|tag|http://www.semanlink.net/tag/del_icio_us +http://www.hyperorg.com/blogger/mtarchive/ibm_shows_delicious_for_the_en.html|title|Joho the Blog: IBM shows del.icio.us for the enterprise, and more +http://www.youtube.com/watch?v=yp8AjMBG87g|creationDate|2013-05-23 +http://www.youtube.com/watch?v=yp8AjMBG87g|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/watch?v=yp8AjMBG87g|tag|http://www.semanlink.net/tag/google_knowledge_graph +http://www.youtube.com/watch?v=yp8AjMBG87g|tag|http://www.semanlink.net/tag/dan_brickley +http://www.youtube.com/watch?v=yp8AjMBG87g|title|Google I/O 2013 - From Structured Data to the Knowledge Graph - YouTube +http://www.youtube.com/watch?v=yp8AjMBG87g|creationTime|2013-05-23T14:23:28Z +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|creationDate|2018-05-05 +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|tag|http://www.semanlink.net/tag/graph_embeddings +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|tag|http://www.semanlink.net/tag/spectral_clustering +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|tag|http://www.semanlink.net/tag/slides +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|title|Convolutional Neural Networks on Graphs +http://helper.ipam.ucla.edu/publications/dlt2018/dlt2018_14506.pdf|creationTime|2018-05-05T13:37:51Z +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|creationDate|2012-11-08 +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|tag|http://www.semanlink.net/tag/linked_data +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|tag|http://www.semanlink.net/tag/amsterdam +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|title|Keynote Video and Updates from the Amsterdam Fire Department - semanticweb.com +http://semanticweb.com/keynote-video-and-updates-from-the-amsterdam-fire-department_b32566#bart-keynote|creationTime|2012-11-08T01:10:43Z +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|creationDate|2017-09-03 +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|tag|http://www.semanlink.net/tag/pays_bas +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|tag|http://www.semanlink.net/tag/agriculture +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|comment|The Netherlands has become an agricultural giant by showing what the future of farming could look like. +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|title|This Tiny Country Feeds the World +http://www.nationalgeographic.com/magazine/2017/09/holland-agriculture-sustainable-farming/|creationTime|2017-09-03T12:16:14Z +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|creationDate|2018-04-25 +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|tag|http://www.semanlink.net/tag/rdf +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|tag|http://www.semanlink.net/tag/blockchain +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|tag|http://www.semanlink.net/tag/mirek_sopek +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|title|GraphChain – A Distributed Database with Explicit Semantics and Chained RDF Graphs +http://delivery.acm.org/10.1145/3200000/3191554/p1171-sopek.html?ip=37.71.228.186&id=3191554&acc=OPEN&key=4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35%2E6D218144511F3437&__acm__=1524690407_a6f0908759ebcbdcb90ed0cfa942743c|creationTime|2018-04-25T23:03:51Z +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|creationDate|2014-06-27 +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|tag|http://www.semanlink.net/tag/jean_claude_juncker +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|tag|http://www.semanlink.net/tag/david_cameron +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|tag|http://www.semanlink.net/tag/europe_and_uk +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|tag|http://www.semanlink.net/tag/union_europeenne +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|tag|http://www.semanlink.net/tag/commission_europeenne +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|title|David Cameron faces defeat in Juncker row as EU summit begins The Guardian +http://www.theguardian.com/politics/2014/jun/25/david-cameron-jean-claude-juncker-eu-summit|creationTime|2014-06-27T00:44:01Z +http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p|creationDate|2011-06-08 +http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p|tag|http://www.semanlink.net/tag/interactive_knowledge_stack +http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p|tag|http://www.semanlink.net/tag/meetup_web_semantique +http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p|title|Rencontre avec Aldo Gangemi et Valentina Presutti - Les rencontres du Web de données (Paris) - Meetup +http://www.meetup.com/paris-web-of-data/events/19464221/?a=md1p_grp&rv=md1p|creationTime|2011-06-08T22:35:00Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|creationDate|2006-07-20 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|tag|http://www.semanlink.net/tag/changement_climatique +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|tag|http://www.semanlink.net/tag/eve_africaine +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|comment|Scientists have identified a major climate crisis that struck Africa about 70,000 years ago and which may have changed the course of human history. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|title|BBC NEWS Ancient drought 'changed history' BBC NEWS Science/Nature Ancient drought 'changed history' +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4505516.stm|date|2005-12-07 +http://osxdaily.com/2012/05/22/install-wget-mac-os-x/|creationDate|2018-05-16 +http://osxdaily.com/2012/05/22/install-wget-mac-os-x/|tag|http://www.semanlink.net/tag/installing_apps +http://osxdaily.com/2012/05/22/install-wget-mac-os-x/|tag|http://www.semanlink.net/tag/mac_os_x +http://osxdaily.com/2012/05/22/install-wget-mac-os-x/|title|Install wget in Mac OS X Without Homebrew or MacPorts +http://osxdaily.com/2012/05/22/install-wget-mac-os-x/|creationTime|2018-05-16T23:46:13Z +http://www.linux.com/feature/144853|creationDate|2008-08-29 +http://www.linux.com/feature/144853|tag|http://www.semanlink.net/tag/kde +http://www.linux.com/feature/144853|tag|http://www.semanlink.net/tag/nepomuk +http://www.linux.com/feature/144853|title|Linux.com :: Nepomuk and KDE to introduce the semantic desktop +http://www.linux.com/feature/144853|creationTime|2008-08-29T12:05:57Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|creationDate|2006-07-20 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|tag|http://www.semanlink.net/tag/meteorite +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|tag|http://www.semanlink.net/tag/toutankhamon +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|title|BBC NEWS Tut's gem hints at space impact +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5196362.stm|date|2006-07-19 +http://marc.info/?l=tomcat-user&m=121762788714431&w=2|creationDate|2008-10-12 +http://marc.info/?l=tomcat-user&m=121762788714431&w=2|tag|http://www.semanlink.net/tag/uri_encoding +http://marc.info/?l=tomcat-user&m=121762788714431&w=2|title|'Re: request parameters mishandle utf-8 encoding' - MARC +http://marc.info/?l=tomcat-user&m=121762788714431&w=2|creationTime|2008-10-12T03:30:56Z +http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/|creationDate|2015-09-19 +http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/|tag|http://www.semanlink.net/tag/cerveau +http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/|tag|http://www.semanlink.net/tag/neuroscience +http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/|title|This Is Your Brain. This Is Your Brain as a Weapon. Foreign Policy +http://foreignpolicy.com/2015/09/14/this-is-your-brain-this-is-your-brain-as-a-weapon-darpa-dual-use-neuroscience/|creationTime|2015-09-19T03:20:21Z +http://hardware.slashdot.org/story/11/12/31/2022225/best-software-for-putting-lectures-online?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|creationDate|2012-01-02 +http://hardware.slashdot.org/story/11/12/31/2022225/best-software-for-putting-lectures-online?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|tag|http://www.semanlink.net/tag/e_learning +http://hardware.slashdot.org/story/11/12/31/2022225/best-software-for-putting-lectures-online?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|title|Best Software For Putting Lectures Online? - Slashdot +http://hardware.slashdot.org/story/11/12/31/2022225/best-software-for-putting-lectures-online?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|creationTime|2012-01-02T09:49:48Z +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|creationDate|2013-03-18 +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|tag|http://www.semanlink.net/tag/beer +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|tag|http://www.semanlink.net/tag/prehistoire +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|comment|With the help of the new psychopharmacological brew, humans could quell the angst of defying those herd instincts. Conversations around the campfire, no doubt, took on a new dimension: the painfully shy, their angst suddenly quelled, could now speak their minds. +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|title|How Beer Gave Us Civilization - NYTimes.com +http://www.nytimes.com/2013/03/17/opinion/sunday/how-beer-gave-us-civilization.html?_r=0|creationTime|2013-03-18T22:18:37Z +https://www.bbc.com/news/world-europe-47941794|creationDate|2019-04-15 +https://www.bbc.com/news/world-europe-47941794|tag|http://www.semanlink.net/tag/notre_dame_de_paris +https://www.bbc.com/news/world-europe-47941794|title|Notre-Dame cathedral: Firefighters tackle blaze in Paris - BBC News +https://www.bbc.com/news/world-europe-47941794|creationTime|2019-04-15T23:07:22Z +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|creationDate|2016-10-07 +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|tag|http://www.semanlink.net/tag/slideshare +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|tag|http://www.semanlink.net/tag/json_ld +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|tag|http://www.semanlink.net/tag/semantic_web_presentation +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|tag|http://www.semanlink.net/tag/hydra +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|tag|http://www.semanlink.net/tag/markus_lanthaler +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|title|From Strings to Things to a Web of Services +http://www.slideshare.net/lanthaler/from-strings-to-things-to-a-web-of-services|creationTime|2016-10-07T13:49:33Z +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|creationDate|2010-08-18 +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|tag|http://www.semanlink.net/tag/sarkozyland +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|comment|"la République ""assure l'égalité devant la loi de tous les citoyens, sans distinction d'origine, de race ou de religion"". +" +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|title|L'amour de soi et la haine des autres +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|creationTime|2010-08-18T11:03:35Z +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|source|Le Monde +http://www.lemonde.fr/idees/article/2010/08/17/l-amour-de-soi-et-la-haine-des-autres_1399704_3232.html|date|2010-08-18 +http://www.webdavsystem.com/ajax/programming/cross_origin_requests|creationDate|2013-02-06 +http://www.webdavsystem.com/ajax/programming/cross_origin_requests|tag|http://www.semanlink.net/tag/cross_origin_resource_sharing +http://www.webdavsystem.com/ajax/programming/cross_origin_requests|title|Cross-Origin Requests (CORS) in Internet Explorer, Firefox, Safari and Chrome +http://www.webdavsystem.com/ajax/programming/cross_origin_requests|creationTime|2013-02-06T16:59:30Z +https://www.w3.org/community/hydra/wiki/Collection_Design|creationDate|2015-02-18 +https://www.w3.org/community/hydra/wiki/Collection_Design|tag|http://www.semanlink.net/tag/hydra +https://www.w3.org/community/hydra/wiki/Collection_Design|title|Collection Design - Hydra Community Group +https://www.w3.org/community/hydra/wiki/Collection_Design|creationTime|2015-02-18T23:25:30Z +https://eng.uber.com/deep-neuroevolution/|creationDate|2017-12-19 +https://eng.uber.com/deep-neuroevolution/|tag|http://www.semanlink.net/tag/deep_learning +https://eng.uber.com/deep-neuroevolution/|tag|http://www.semanlink.net/tag/reinforcement_learning +https://eng.uber.com/deep-neuroevolution/|tag|http://www.semanlink.net/tag/uber +https://eng.uber.com/deep-neuroevolution/|tag|http://www.semanlink.net/tag/neuroevolution +https://eng.uber.com/deep-neuroevolution/|tag|http://www.semanlink.net/tag/evolutionary_algorithm +https://eng.uber.com/deep-neuroevolution/|comment|> a suite of five papers that support the emerging realization that neuroevolution, where neural networks are optimized through evolutionary algorithms, is also an effective method to train deep neural networks for reinforcement learning (RL) problems. +https://eng.uber.com/deep-neuroevolution/|title|Welcoming the Era of Deep Neuroevolution - Uber Engineering Blog +https://eng.uber.com/deep-neuroevolution/|creationTime|2017-12-19T09:26:01Z +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|creationDate|2006-04-09 +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|tag|http://www.semanlink.net/tag/craig_venter +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|tag|http://www.semanlink.net/tag/artificial_life +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|tag|http://www.semanlink.net/tag/genetique +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|tag|http://www.semanlink.net/tag/what_is_life +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|title|BBC NEWS 'More genes' needed to make life +http://news.bbc.co.uk/1/hi/sci/tech/4857868.stm|source|BBC +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|creationDate|2014-10-23 +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|tag|http://www.semanlink.net/tag/iswc +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|tag|http://www.semanlink.net/tag/peter_patel_schneider +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|tag|http://www.semanlink.net/tag/schema_org +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|comment|Schema.org is a way to add machine-understandable information to web pages that is processed by the major search engines to improve search performance. The definition of schema.org is provided as a set of web pages plus a partial mapping into RDF triples with unusual properties, and is incomplete in a number of places. This analysis of and formal semantics for schema.org provides a complete basis for a plausible version of what schema.org should be. +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|title|Analyzing Schema.org - Peter F. Patel-Schneider - ISWC 2014 +http://link.springer.com/chapter/10.1007%2F978-3-319-11964-9_17|creationTime|2014-10-23T23:43:41Z +https://github.com/linkeddata/rdflib.js/|creationDate|2013-07-15 +https://github.com/linkeddata/rdflib.js/|tag|http://www.semanlink.net/tag/tabulator +https://github.com/linkeddata/rdflib.js/|tag|http://www.semanlink.net/tag/linked_data_api +https://github.com/linkeddata/rdflib.js/|tag|http://www.semanlink.net/tag/javascript +https://github.com/linkeddata/rdflib.js/|title|Linked Data API for JavaScript +https://github.com/linkeddata/rdflib.js/|creationTime|2013-07-15T14:39:21Z +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|creationDate|2014-07-24 +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|tag|http://www.semanlink.net/tag/semantic_web +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|tag|http://www.semanlink.net/tag/knowledge_based_ai +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|tag|http://www.semanlink.net/tag/data_interoperability +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|comment|"Neither the semantic Web nor linked data has developed the practices, tooling or experience to actually interoperate data across the Web... Where it is compliant and from authoritative information sources, linked data can be a gold standard in data publishing. But, linked data is neither necessary nor essential, and may even be a diversion if it sucks the air from the room for what is more broadly useful.
+Interoperability requires reference structures, what we are calling Big Structure.
+Most analysts see firms that are actively pursuing data integration innovations as forward-thinking and more competitive.
+The semantic Web, in our view, is properly understood as a sub-domain of artificial intelligence. Semantic technologies mesh smoothly with natural language tasks and objectives. But, as we noted in a recent review article, artificial intelligence is itself undergoing a renaissance. These advances are coming about because of the use of knowledge-based AI (KBAI), which combines knowledge bases with machine learning and other AI approaches. Natural language and spoken interfaces combined with background knowledge and a few machine-language utilities are what underlie Apple’s Siri, for example.
+" +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|title|Big Structure: At The Nexus of Knowledge Bases, the Semantic Web and Artificial Intelligence AI3:::Adaptive Information +http://www.mkbergman.com/1773/big-structure-at-the-nexus-of-knowledge-bases-the-semantic-web-and-artificial-intelligence/|creationTime|2014-07-24T23:21:08Z +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers|creationDate|2008-02-04 +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers|tag|http://www.semanlink.net/tag/http_cache +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers|title|The Fishbowl: HTTP Conditional Get for RSS Hackers +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers|creationTime|2008-02-04T16:20:43Z +http://www.xml.com/lpt/a/2005/12/21/json-dynamic-script-tag.html|creationDate|2006-02-07 +http://www.xml.com/lpt/a/2005/12/21/json-dynamic-script-tag.html|tag|http://www.semanlink.net/tag/json +http://www.xml.com/lpt/a/2005/12/21/json-dynamic-script-tag.html|tag|http://www.semanlink.net/tag/web_services_for_javascript +http://www.xml.com/lpt/a/2005/12/21/json-dynamic-script-tag.html|title|XML.com: JSON and the Dynamic Script Tag: Easy, XML-less Web Services for JavaScript +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|creationDate|2017-07-02 +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|tag|http://www.semanlink.net/tag/xenophon +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|tag|http://www.semanlink.net/tag/athenes +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|tag|http://www.semanlink.net/tag/sparte +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|tag|http://www.semanlink.net/tag/citation +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|tag|http://www.semanlink.net/tag/thucydide +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|comment|"> les Lacédémoniens déclarèrent qu'ils ne réduiraient pas en servitude une ville grecque qui avait rendu un grand service à la Grèce, quand elle était menacée des plus grands dangers + +Xénophon" +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|title|Guerre du Péloponnèse — Wikipédia +https://fr.wikipedia.org/wiki/Guerre_du_P%C3%A9loponn%C3%A8se|creationTime|2017-07-02T13:14:40Z +https://www.tulevaisuustalo.fi/en/articles/basic-income-new-universalism/|creationDate|2017-02-11 +https://www.tulevaisuustalo.fi/en/articles/basic-income-new-universalism/|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://www.tulevaisuustalo.fi/en/articles/basic-income-new-universalism/|title|Basic income and the new universalism - Tulevaisuustalo +https://www.tulevaisuustalo.fi/en/articles/basic-income-new-universalism/|creationTime|2017-02-11T19:38:47Z +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|creationDate|2017-10-22 +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|tag|http://www.semanlink.net/tag/yoshua_bengio +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|tag|http://www.semanlink.net/tag/time_series +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|comment|> I would use the state-of-the-art [recurrent nets](/tag/recurrent_neural_network.html) (using gated units and multiple layers) to make predictions at each time step for some future horizon of interest. The RNN is then updated with the next observation to be ready for making the next prediction +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|title|How does one apply deep learning to time series forecasting? - Quora +https://www.quora.com/How-does-one-apply-deep-learning-to-time-series-forecasting|creationTime|2017-10-22T13:45:32Z +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|creationDate|2013-08-22 +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|tag|http://www.semanlink.net/tag/ruby +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|tag|http://www.semanlink.net/tag/mac_os_x_tip +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|tag|http://www.semanlink.net/tag/howto +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|title|How to install Ruby on Mac Nettuts+ +http://net.tutsplus.com/tutorials/ruby/how-to-install-ruby-on-a-mac/|creationTime|2013-08-22T15:09:19Z +http://www-128.ibm.com/developerworks/xml/library/j-sparql/|creationDate|2007-04-20 +http://www-128.ibm.com/developerworks/xml/library/j-sparql/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/xml/library/j-sparql/|tag|http://www.semanlink.net/tag/sparql_and_jena +http://www-128.ibm.com/developerworks/xml/library/j-sparql/|title|Search RDF data with SPARQL (and Jena) +http://www-128.ibm.com/developerworks/xml/library/j-sparql/|creationTime|2007-04-20T20:58:42Z +http://www.wired.com/reviews/2012/10/infotainment-systems/?pid=2696&viewall=true|creationDate|2012-11-30 +http://www.wired.com/reviews/2012/10/infotainment-systems/?pid=2696&viewall=true|tag|http://www.semanlink.net/tag/automobile +http://www.wired.com/reviews/2012/10/infotainment-systems/?pid=2696&viewall=true|title|Infotainment Systems Product Reviews Wired.com +http://www.wired.com/reviews/2012/10/infotainment-systems/?pid=2696&viewall=true|creationTime|2012-11-30T22:01:58Z +https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0|creationDate|2012-11-26 +https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0|tag|http://www.semanlink.net/tag/edf +https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0|tag|http://www.semanlink.net/tag/semantic_web +https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0|title|Programme Séminaire EDF Web Sémantique +https://docs.google.com/spreadsheet/ccc?key=0ArCcY-3xSj-vdGVnYVM5bWViVHhlLUlkUTNISUdmYmc#gid=0|creationTime|2012-11-26T16:41:21Z +https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad|creationDate|2018-04-05 +https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad|tag|http://www.semanlink.net/tag/introduction +https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad|tag|http://www.semanlink.net/tag/spacy +https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad|title|A short introduction to NLP in Python with spaCy – Towards Data Science +https://towardsdatascience.com/a-short-introduction-to-nlp-in-python-with-spacy-d0aa819af3ad|creationTime|2018-04-05T01:50:39Z +https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786|creationDate|2018-02-22 +https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786|tag|http://www.semanlink.net/tag/allemagne +https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786|tag|http://www.semanlink.net/tag/china_s_social_credit_system +https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786|title|Warning: Germany edges toward Chinese-style rating of citizens +https://global.handelsblatt.com/politics/germany-mass-surveillance-social-credit-china-big-data-886786|creationTime|2018-02-22T13:27:50Z +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|creationDate|2017-03-02 +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|tag|http://www.semanlink.net/tag/web_apis +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|tag|http://www.semanlink.net/tag/ruben_verborgh +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|tag|http://www.semanlink.net/tag/hydra +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|tag|http://www.semanlink.net/tag/rest +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|tag|http://www.semanlink.net/tag/slides +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|title|Web APIs +http://rubenverborgh.github.io/WebFundamentals/web-apis/#|creationTime|2017-03-02T12:25:50Z +https://www.youtube.com/watch?v=GBPZAQfs6JY|creationDate|2017-01-06 +https://www.youtube.com/watch?v=GBPZAQfs6JY|tag|http://www.semanlink.net/tag/music_of_africa +https://www.youtube.com/watch?v=GBPZAQfs6JY|comment|"ici aussi" +https://www.youtube.com/watch?v=GBPZAQfs6JY|title|"Acoustic Africa, ""Mayole"" (Africa Festival 2013)" +https://www.youtube.com/watch?v=GBPZAQfs6JY|creationTime|2017-01-06T14:05:58Z +http://news.bbc.co.uk/2/hi/science/nature/8027269.stm|creationDate|2009-05-06 +http://news.bbc.co.uk/2/hi/science/nature/8027269.stm|tag|http://www.semanlink.net/tag/histoire_de_l_afrique +http://news.bbc.co.uk/2/hi/science/nature/8027269.stm|tag|http://www.semanlink.net/tag/genetique_humaine +http://news.bbc.co.uk/2/hi/science/nature/8027269.stm|title|BBC NEWS Africa's genetic secrets unlocked +http://news.bbc.co.uk/2/hi/science/nature/8027269.stm|creationTime|2009-05-06T14:39:26Z +http://www.mkbergman.com/962/structured-web-gets-massive-boost/|creationDate|2011-06-07 +http://www.mkbergman.com/962/structured-web-gets-massive-boost/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/962/structured-web-gets-massive-boost/|tag|http://www.semanlink.net/tag/microdata +http://www.mkbergman.com/962/structured-web-gets-massive-boost/|title|Structured Web Gets Massive Boost » AI3:::Adaptive Information +http://www.mkbergman.com/962/structured-web-gets-massive-boost/|creationTime|2011-06-07T15:50:02Z +http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/|creationDate|2014-02-03 +http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/|tag|http://www.semanlink.net/tag/synaptic_web +http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/|tag|http://www.semanlink.net/tag/sw_has_failed +http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/|title|Three reasons why the Semantic Web has failed — Tech News and Analysis +http://gigaom.com/2013/11/03/three-reasons-why-the-semantic-web-has-failed/|creationTime|2014-02-03T22:53:41Z +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|creationDate|2015-05-25 +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|tag|http://www.semanlink.net/tag/xslt +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|tag|http://www.semanlink.net/tag/json_ld +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|tag|http://www.semanlink.net/tag/jena +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|title|An XSLT stylesheet transforming Jena's RDF/XML format to JSON-LD. +https://github.com/Graphity/graphity-client/blob/master/src/main/webapp/static/org/graphity/client/xsl/rdfxml2json-ld.xsl|creationTime|2015-05-25T10:58:42Z +http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555|creationDate|2015-02-18 +http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555|tag|http://www.semanlink.net/tag/niger +http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555|tag|http://www.semanlink.net/tag/boko_haram +http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555|title|«Au Niger, les prédicateurs remplissent le vide laissé par l'Etat» - Libération +http://www.liberation.fr/monde/2015/02/17/au-niger-les-predicateurs-remplissent-le-vide-laisse-par-l-etat_1204555|creationTime|2015-02-18T13:43:58Z +http://rterp.wordpress.com/2012/03/16/stamping-version-number-and-build-time-in-properties-file-with-maven/|creationDate|2013-09-23 +http://rterp.wordpress.com/2012/03/16/stamping-version-number-and-build-time-in-properties-file-with-maven/|tag|http://www.semanlink.net/tag/maven +http://rterp.wordpress.com/2012/03/16/stamping-version-number-and-build-time-in-properties-file-with-maven/|title|Stamping Version Number and Build Time in a Properties File with Maven Rob's Blog +http://rterp.wordpress.com/2012/03/16/stamping-version-number-and-build-time-in-properties-file-with-maven/|creationTime|2013-09-23T00:43:34Z +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|creationDate|2010-11-23 +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|tag|http://www.semanlink.net/tag/irlande +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|tag|http://www.semanlink.net/tag/grece +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|tag|http://www.semanlink.net/tag/crise_financiere +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|title|De la Grèce à l'Irlande, des stratégies économiques illusoires +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|creationTime|2010-11-23T17:33:58Z +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|source|Le Monde +http://www.lemonde.fr/economie/article/2010/11/23/de-la-grece-a-l-irlande-des-strategies-economiques-illusoires_1443521_3234.html|date|2010-11-23 +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|creationDate|2017-06-29 +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|tag|http://www.semanlink.net/tag/nltk +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|tag|http://www.semanlink.net/tag/nlp_french +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|title|How can I tag and chunk French text using NLTK and Python? - Stack Overflow +https://stackoverflow.com/questions/9663918/how-can-i-tag-and-chunk-french-text-using-nltk-and-python?rq=1|creationTime|2017-06-29T11:54:27Z +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|creationDate|2009-12-19 +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|tag|http://www.semanlink.net/tag/greenpeace +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|tag|http://www.semanlink.net/tag/sarkozy +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|tag|http://www.semanlink.net/tag/deforestation +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|title|Greenpeace: zéro déforestation d'ici 2020 +http://blog-s.greenpeace.fr/uploads/2009/12/affiche.jpg|creationTime|2009-12-19T11:47:59Z +http://www.decafbad.com/twiki/bin/view/Main/AgentFrank|creationDate|2005-06-15 +http://www.decafbad.com/twiki/bin/view/Main/AgentFrank|tag|http://www.semanlink.net/tag/rdf +http://www.decafbad.com/twiki/bin/view/Main/AgentFrank|tag|http://www.semanlink.net/tag/internet_tool +http://www.decafbad.com/twiki/bin/view/Main/AgentFrank|comment|The goal of Agent Frank is to be a personal intelligent intermediary and companion to internet infovores during their daily hunter/gatherer excursions. +http://www.decafbad.com/twiki/bin/view/Main/AgentFrank|title|AgentFrank - Main - Wiki - 0xDECAFBAD +http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html|creationDate|2013-07-15 +http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html|tag|http://www.semanlink.net/tag/dan_brickley +http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html|title|Re: Schema.org Autodiscovery? from Dan Brickley on 2013-07-12 (public-lod@w3.org from July 2013) +http://lists.w3.org/Archives/Public/public-lod/2013Jul/0096.html|creationTime|2013-07-15T18:54:37Z +http://www.aclweb.org/anthology/N15-1099|creationDate|2018-06-08 +http://www.aclweb.org/anthology/N15-1099|tag|http://www.semanlink.net/tag/word_embedding_compositionality +http://www.aclweb.org/anthology/N15-1099|tag|http://www.semanlink.net/tag/word_embedding +http://www.aclweb.org/anthology/N15-1099|tag|http://www.semanlink.net/tag/multiword_expressions +http://www.aclweb.org/anthology/N15-1099|title|A Word Embedding Approach to Predicting the Compositionality of Multiword Expressions (2015) +http://www.aclweb.org/anthology/N15-1099|creationTime|2018-06-08T07:46:42Z +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|creationDate|2014-03-26 +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|tag|http://www.semanlink.net/tag/json_ld +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|tag|http://www.semanlink.net/tag/martin_hepp +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|tag|http://www.semanlink.net/tag/google +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|tag|http://www.semanlink.net/tag/seo +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|tag|http://www.semanlink.net/tag/schema_org +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|title|JSON-LD: Finally, Google Honors Invisible Data for SEO The Hepp Research Blog on Marketing with Data +http://blog.heppresearch.com/2014/03/24/json-ld-finally-google-honors-invisible-data-for-seo/|creationTime|2014-03-26T14:07:27Z +http://www.hackdiary.com/archives/000070.html|creationDate|2005-09-05 +http://www.hackdiary.com/archives/000070.html|tag|http://www.semanlink.net/tag/dev +http://www.hackdiary.com/archives/000070.html|tag|http://www.semanlink.net/tag/semanlink_related +http://www.hackdiary.com/archives/000070.html|tag|http://www.semanlink.net/tag/yahoo +http://www.hackdiary.com/archives/000070.html|tag|http://www.semanlink.net/tag/wikipedia +http://www.hackdiary.com/archives/000070.html|title|hackdiary: Using Wikipedia and the Yahoo API to give structure to flat lists +http://bnode.org/blog/2009/06/12/commontag-too-complicated|creationDate|2009-06-15 +http://bnode.org/blog/2009/06/12/commontag-too-complicated|tag|http://www.semanlink.net/tag/common_tag +http://bnode.org/blog/2009/06/12/commontag-too-complicated|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2009/06/12/commontag-too-complicated|title|CommonTag too complicated? - benjamin nowack's blog +http://bnode.org/blog/2009/06/12/commontag-too-complicated|creationTime|2009-06-15T09:17:08Z +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|creationDate|2007-07-04 +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|tag|http://www.semanlink.net/tag/rdf_templating +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|comment|"Suppose you are developing an RDF-based web application that has these requirements: +
    +
  • The application has to display certain known types of resources in a particular way.
  • +
  • The application has to accept and store types of resources that may be unknown to you, the developer.
  • +
  • The application has to be smart enough to display those unknown types in a reasonable way.
" +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|title|Abductive reasoning, template generation, and RDF-in-HTML +http://semwebdev.keithalexander.co.uk/blog/posts/abductive-template|creationTime|2007-07-04T22:10:47Z +http://www.mimul.com:80/pebble/default/2007/11/24/1195909680000.html|creationDate|2007-12-30 +http://www.mimul.com:80/pebble/default/2007/11/24/1195909680000.html|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.mimul.com:80/pebble/default/2007/11/24/1195909680000.html|title|semanlink 소개 - Mimul's Developer World +http://www.mimul.com:80/pebble/default/2007/11/24/1195909680000.html|creationTime|2007-12-30T22:21:55Z +http://www.boingboing.net/2005/11/01/hollywood_after_the_.html|creationDate|2005-11-02 +http://www.boingboing.net/2005/11/01/hollywood_after_the_.html|tag|http://www.semanlink.net/tag/mpaa +http://www.boingboing.net/2005/11/01/hollywood_after_the_.html|tag|http://www.semanlink.net/tag/drm +http://www.boingboing.net/2005/11/01/hollywood_after_the_.html|comment|Under a new proposed Analog Hole bill, it will be illegal to make anything capable of digitizing video unless it either has all its outputs approved by the Hollywood studios, or is closed-source, proprietary and tamper-resistant. The idea is to make it impossible to create an MPEG from a video signal unless Hollywood approves it. +http://www.boingboing.net/2005/11/01/hollywood_after_the_.html|title|Boing Boing: Hollywood after the Anal. Hole again +http://www.meetup.com/paris-web-of-data/calendar/15099450/|creationDate|2010-12-01 +http://www.meetup.com/paris-web-of-data/calendar/15099450/|tag|http://www.semanlink.net/tag/christian_faure +http://www.meetup.com/paris-web-of-data/calendar/15099450/|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.meetup.com/paris-web-of-data/calendar/15099450/|tag|http://www.semanlink.net/tag/meetup_web_semantique +http://www.meetup.com/paris-web-of-data/calendar/15099450/|title|Comment réconcilier le SI legacy et le Web par le Web sémantique ? - Les rencontres du Web de données (Paris) - Meetup +http://www.meetup.com/paris-web-of-data/calendar/15099450/|creationTime|2010-12-01T02:11:13Z +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|creationDate|2011-03-08 +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|tag|http://www.semanlink.net/tag/tom_heath +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|tag|http://www.semanlink.net/tag/chris_bizer +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|tag|http://www.semanlink.net/tag/linked_data +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|tag|http://www.semanlink.net/tag/tim_berners_lee +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|title|Linked Data - The Story So Far +http://tomheath.com/papers/bizer-heath-berners-lee-ijswis-linked-data.pdf|creationTime|2011-03-08T09:08:15Z +http://jondavidjohn.com/javascript-closure-explained-using-events/|creationDate|2014-10-14 +http://jondavidjohn.com/javascript-closure-explained-using-events/|tag|http://www.semanlink.net/tag/javascript_closures +http://jondavidjohn.com/javascript-closure-explained-using-events/|title|Javascript closure explained using events +http://jondavidjohn.com/javascript-closure-explained-using-events/|creationTime|2014-10-14T10:32:47Z +http://www.zotero.org/|creationDate|2007-06-09 +http://www.zotero.org/|tag|http://www.semanlink.net/tag/zotero +http://www.zotero.org/|tag|http://www.semanlink.net/tag/frederick_giasson +http://www.zotero.org/|tag|http://www.semanlink.net/tag/firefox_extension +http://www.zotero.org/|comment|Zotero [zoh-TAIR-oh] is a free, easy-to-use Firefox extension to help you collect, manage, and cite your research sources. It lives right where you do your work — in the web browser itself. +http://www.zotero.org/|title|Zotero - The Next-Generation Research Tool +http://www.zotero.org/|creationTime|2007-06-09T00:09:24Z +http://www.aaai.org/AITopics/html/welcome.html|creationDate|2006-10-09 +http://www.aaai.org/AITopics/html/welcome.html|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.aaai.org/AITopics/html/welcome.html|comment|"AI TOPICS is a special web site provided by the American Association for Artificial Intelligence [AAAI] for students, teachers, journalists, and everyone who would like to explore what artificial intelligence is, and what AI scientists do. +Our goal is to offer a limited number of exemplary, non-technical resources that we have organized and annotated to provide you with meaningful access to basic, understandable information about the AI universe." +http://www.aaai.org/AITopics/html/welcome.html|title|Welcome to AI Topics (American Association for Artificial Intelligence) +https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-02-04 +https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/neo4j +https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/wikipedia +https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Navigate All the Knowledge with Spring + Neo4j +https://neo4j.com/blog/navigate-knowledge-spring-neo4j/?utm_content=buffer0812e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-02-04T03:58:25Z +http://aclweb.org/anthology/Q16-1002|creationDate|2018-08-23 +http://aclweb.org/anthology/Q16-1002|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +http://aclweb.org/anthology/Q16-1002|tag|http://www.semanlink.net/tag/yoshua_bengio +http://aclweb.org/anthology/Q16-1002|comment|> The composed meaning of the words in a dictionary definition (a tall, long-necked, spotted ruminant of Africa) should correspond to the meaning of the word they define (giraffe) +http://aclweb.org/anthology/Q16-1002|title|Learning to Understand Phrases by Embedding the Dictionary (2016) +http://aclweb.org/anthology/Q16-1002|creationTime|2018-08-23T22:28:38Z +http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot|creationDate|2006-05-26 +http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot|tag|http://www.semanlink.net/tag/thought_alone_controlled_device +http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot|tag|http://www.semanlink.net/tag/honda +http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot|comment|In a video demonstration in Tokyo, brain signals detected by a magnetic resonance imaging scanner were relayed to a robotic hand. A person in the MRI machine made a fist, spread his fingers and then made a V-sign. Several seconds later, a robotic hand mimicked the movements. +http://news.yahoo.com/s/ap/20060525/ap_on_hi_te/honda_robot|title|Honda says brain waves control robot +http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html|creationDate|2014-01-07 +http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html|tag|http://www.semanlink.net/tag/digital_economy +http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html|comment|"""A digital economy that appears to give things away for free — in return for being able to invade the privacy of its customers for commercial gain — isn’t free at all""" +http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html|title|Will Digital Networks Ruin Us? - NYTimes.com +http://www.nytimes.com/2014/01/07/opinion/nocera-will-digital-networks-ruin-us.html|creationTime|2014-01-07T17:46:01Z +http://news.bbc.co.uk/2/hi/science/nature/2975862.stm|creationDate|2009-05-06 +http://news.bbc.co.uk/2/hi/science/nature/2975862.stm|tag|http://www.semanlink.net/tag/origines_de_l_homme +http://news.bbc.co.uk/2/hi/science/nature/2975862.stm|tag|http://www.semanlink.net/tag/genetique_humaine +http://news.bbc.co.uk/2/hi/science/nature/2975862.stm|title|BBC NEWS Science/Nature When humans faced extinction +http://news.bbc.co.uk/2/hi/science/nature/2975862.stm|creationTime|2009-05-06T14:26:41Z +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|creationDate|2019-02-17 +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|tag|http://www.semanlink.net/tag/shoah +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|tag|http://www.semanlink.net/tag/antisemitisme +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|tag|http://www.semanlink.net/tag/video_ina_fr +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|comment|"> Vous voulez jouer avec la croix gammitte ? Regardez. Des millions d'hommes en sont morts... Vous voulez recommencer ? Vous êtes fous +" +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|title|A t'on oublié où mène la croix gammée ? - Vidéo Ina.fr +https://www.ina.fr/video/AFE85008572/a-t-on-oublie-ou-mene-la-croix-gammee-video.html|creationTime|2019-02-17T19:30:43Z +http://benlog.com/2008/06/19/dont-hash-secrets/|creationDate|2014-06-22 +http://benlog.com/2008/06/19/dont-hash-secrets/|tag|http://www.semanlink.net/tag/cryptography +http://benlog.com/2008/06/19/dont-hash-secrets/|tag|http://www.semanlink.net/tag/ben_adida +http://benlog.com/2008/06/19/dont-hash-secrets/|comment|So the next time you’re using a hash function on anything, ask yourself: is any of the stuff I’m hashing supposed to stay secret? If so, don’t hash. Instead, use HMAC. +http://benlog.com/2008/06/19/dont-hash-secrets/|title|Don’t Hash Secrets Benlog +http://benlog.com/2008/06/19/dont-hash-secrets/|creationTime|2014-06-22T22:45:58Z +http://appuirwanda.free.fr/article.php3?id_article=20|creationDate|2006-12-30 +http://appuirwanda.free.fr/article.php3?id_article=20|tag|http://www.semanlink.net/tag/romeo_dallaire +http://appuirwanda.free.fr/article.php3?id_article=20|tag|http://www.semanlink.net/tag/genocide_rwandais +http://appuirwanda.free.fr/article.php3?id_article=20|comment|Documentaire +http://appuirwanda.free.fr/article.php3?id_article=20|title|Roméo Dallaire, le dernier des justes +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|creationDate|2018-04-29 +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|tag|http://www.semanlink.net/tag/data_ownership +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|tag|http://www.semanlink.net/tag/privacy_and_internet +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|tag|http://www.semanlink.net/tag/solid +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|title|Who should hold the keys to our data? Nigel Shadbolt and Roger Hampson Opinion The Guardian +https://www.theguardian.com/commentisfree/2018/apr/29/in-charge-our-own-data-personal-information-facebook-scandal?utm_source=esp&utm_medium=Email&utm_campaign=GU+Today+main+NEW+H+categories&utm_term=273132&subid=8643697&CMP=EMCNEWEML6619I2|creationTime|2018-04-29T13:16:48Z +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|creationDate|2005-06-23 +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|tag|http://www.semanlink.net/tag/desertification +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|comment|Drylands cover 41% of the planet's land surface, and are growing. They are home to over two billion people, including the world's most impoverished, in areas such as central Asia and northern Africa. +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|title|BBC NEWS Science/Nature World's dry regions set to expand +http://news.bbc.co.uk/1/hi/sci/tech/4102780.stm|source|BBC +http://www.readwriteweb.com/archives/the_best_tools_for_visualization.php|creationDate|2008-03-17 +http://www.readwriteweb.com/archives/the_best_tools_for_visualization.php|tag|http://www.semanlink.net/tag/data_visualization_tools +http://www.readwriteweb.com/archives/the_best_tools_for_visualization.php|title|The Best Tools for Visualization - ReadWriteWeb +http://www.readwriteweb.com/archives/the_best_tools_for_visualization.php|creationTime|2008-03-17T13:25:19Z +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|creationDate|2008-09-24 +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|tag|http://www.semanlink.net/tag/sparql_tutorial +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|tag|http://www.semanlink.net/tag/sparql_sample_code +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|title|SPARQL By Example (Lee Feigenbaum) +http://www.cambridgesemantics.com/2008/09/sparql-by-example/|creationTime|2008-09-24T22:57:23Z +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|creationDate|2007-12-27 +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|tag|http://www.semanlink.net/tag/nasca +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|comment|Cahuachi is now revealed to have been abandoned after a series of natural disasters destroyed the city. But before they left it, the Nasca people covered the city in the arid pampa sand where, until recently, it has remained a barely visible mound in the desert. +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|title|BBC - Science & Nature - Cahuachi: The Lost City of Nasca +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|creationTime|2007-12-27T22:48:21Z +http://www.bbc.co.uk/science/horizon/1999/nasca.shtml|source|BBC +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|creationDate|2007-01-09 +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|tag|http://www.semanlink.net/tag/semanlink_related +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|tag|http://www.semanlink.net/tag/semantic_web_w3_org +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|tag|http://www.semanlink.net/tag/rdf +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|comment|'tagtriples' - a experimental format for exchanging and aggregating structured metadata. It's based on RDF ideas, but emphasises simplicity over precision. 2005-03-23 +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|title|tagtriples from Phil Dawes +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0168.html|date|2005-03-23 +http://milan2.free.fr/leopardPhpSQL/index.html|creationDate|2008-10-06 +http://milan2.free.fr/leopardPhpSQL/index.html|tag|http://www.semanlink.net/tag/leopard +http://milan2.free.fr/leopardPhpSQL/index.html|tag|http://www.semanlink.net/tag/mysql +http://milan2.free.fr/leopardPhpSQL/index.html|tag|http://www.semanlink.net/tag/php +http://milan2.free.fr/leopardPhpSQL/index.html|title|Initialiser PHP et MySQL avec Léopard 10.5 +http://milan2.free.fr/leopardPhpSQL/index.html|creationTime|2008-10-06T00:27:52Z +http://www.paulgraham.com/hiring.html|creationDate|2005-05-12 +http://www.paulgraham.com/hiring.html|title|Hiring is Obsolete +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|creationDate|2013-12-22 +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|tag|http://www.semanlink.net/tag/neuroscience +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|tag|http://www.semanlink.net/tag/enseignement_francais +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|tag|http://www.semanlink.net/tag/ecole +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|tag|http://www.semanlink.net/tag/ecole_montessori +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|title|Enseigner est une science +http://www.lemonde.fr/idees/article/2013/12/20/enseigner-est-une-science_4338294_3232.html|creationTime|2013-12-22T15:02:51Z +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|creationDate|2014-10-14 +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|tag|http://www.semanlink.net/tag/afrique +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|tag|http://www.semanlink.net/tag/esclavage +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|tag|http://www.semanlink.net/tag/philosophe +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|title|Anton Wilhelm Amo - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Anton_Wilhelm_Amo|creationTime|2014-10-14T22:40:27Z +https://stackoverflow.com/questions/15087322/how-to-predict-a-continuous-value-time-from-text-documents|creationDate|2018-02-06 +https://stackoverflow.com/questions/15087322/how-to-predict-a-continuous-value-time-from-text-documents|tag|http://www.semanlink.net/tag/predicting_numeric_values_from_text +https://stackoverflow.com/questions/15087322/how-to-predict-a-continuous-value-time-from-text-documents|title|How to predict a continuous value (time) from text documents? - Stack Overflow +https://stackoverflow.com/questions/15087322/how-to-predict-a-continuous-value-time-from-text-documents|creationTime|2018-02-06T16:45:28Z +http://www.html5rocks.com/en/tutorials/es6/promises/|creationDate|2015-03-05 +http://www.html5rocks.com/en/tutorials/es6/promises/|tag|http://www.semanlink.net/tag/javascript_promises +http://www.html5rocks.com/en/tutorials/es6/promises/|title|JavaScript Promises: There and back again - HTML5 Rocks +http://www.html5rocks.com/en/tutorials/es6/promises/|creationTime|2015-03-05T10:54:45Z +http://triplify.org/|creationDate|2008-03-18 +http://triplify.org/|tag|http://www.semanlink.net/tag/semantic_web_tools +http://triplify.org/|tag|http://www.semanlink.net/tag/linked_data +http://triplify.org/|comment|Triplify provides a building block for the “semantification” of Web applications. Triplify is a small plugin for Web applications, which reveals the semantic structures encoded in relational databases by making database content available as RDF, JSON or Linked Data. Triplify is very light weight +http://triplify.org/|title|triplify: expose semantics +http://triplify.org/|creationTime|2008-03-18T08:13:56Z +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|creationDate|2018-04-26 +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|tag|http://www.semanlink.net/tag/web_pollution +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|tag|http://www.semanlink.net/tag/democratie +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|title|« Le Web a développé des résistances antibiotiques à la démocratie » +http://www.lemonde.fr/pixels/article/2018/04/25/le-web-a-developpe-des-resistances-antibiotiques-a-la-democratie_5290627_4408996.html|creationTime|2018-04-26T08:17:19Z +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|creationDate|2013-10-12 +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|tag|http://www.semanlink.net/tag/sahel +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|tag|http://www.semanlink.net/tag/niger +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|tag|http://www.semanlink.net/tag/aqmi +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|title|Le Niger, aux avant-postes de la menace islamiste au Sahel +http://www.lemonde.fr/afrique/article/2013/02/04/le-niger-aux-avant-postes-de-la-menace-islamiste-au-sahel_1826764_3212.html|creationTime|2013-10-12T17:18:43Z +http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/|creationDate|2017-11-25 +http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/|tag|http://www.semanlink.net/tag/programming +http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/|tag|http://www.semanlink.net/tag/emmanuel_ledinot +http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/|title|Réinventer la programmation ? InternetActu +http://internetactu.blog.lemonde.fr/2017/11/25/reinventer-la-programmation/|creationTime|2017-11-25T19:01:28Z +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html|creationDate|2006-02-07 +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html|tag|http://www.semanlink.net/tag/ajax +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html|comment|Hacks to fool your user's web browser into thinking that the data is coming from the same domain as the web page. +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html|title|XML.com: Fixing AJAX: XMLHttpRequest Considered Harmful +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|creationDate|2016-05-25 +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|tag|http://www.semanlink.net/tag/apres_guerre +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|tag|http://www.semanlink.net/tag/berlin +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|tag|http://www.semanlink.net/tag/film_allemand +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|tag|http://www.semanlink.net/tag/nazisme +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|title|Die Mörder sind unter uns (les assassins sont parmi nous) +https://de.wikipedia.org/wiki/Die_M%C3%B6rder_sind_unter_uns|creationTime|2016-05-25T23:48:13Z +http://www.huma-num.fr/service/nakala|creationDate|2014-09-22 +http://www.huma-num.fr/service/nakala|tag|http://www.semanlink.net/tag/linked_data_service +http://www.huma-num.fr/service/nakala|comment|Partant du constat que de nombreux producteurs de données scientifiques ne disposent pas de l'infrastructure numérique nécessaire qui permettrait un accès persistant et interopérable à leurs données, la TGIR Huma-Num a mis en oeuvre un nouveau service d'exposition de données appelé NAKALA.  NAKALA propose deux grands types de services : des services d'accès aux données elles-mêmes et des services de présentation des métadonnées.  Les producteurs de données numériques ainsi soulagés de la gestion purement technique, peuvent ainsi se consacrer à la valorisation scientifique de leurs données. +http://www.huma-num.fr/service/nakala|title|NAKALA TGIR Huma-Num +http://www.huma-num.fr/service/nakala|creationTime|2014-09-22T12:37:12Z +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|creationDate|2013-01-07 +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|tag|http://www.semanlink.net/tag/linked_data_cache +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|tag|http://www.semanlink.net/tag/personal_data +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|tag|http://www.semanlink.net/tag/tom_heath +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|title|Populating Personal Linked Data Caches using Context Models [PDF] +http://www.dbis.informatik.hu-berlin.de/fileadmin/research/papers/posters/2012_wwwposter_hartig.pdf|creationTime|2013-01-07T01:12:25Z +http://socialmedia.net/2010/07/27/linked-data-an-introduction|creationDate|2010-09-02 +http://socialmedia.net/2010/07/27/linked-data-an-introduction|tag|http://www.semanlink.net/tag/linked_data +http://socialmedia.net/2010/07/27/linked-data-an-introduction|tag|http://www.semanlink.net/tag/semantic_web_presentation +http://socialmedia.net/2010/07/27/linked-data-an-introduction|title|Linked Data: An Introduction Navigating New Horizons +http://socialmedia.net/2010/07/27/linked-data-an-introduction|creationTime|2010-09-02T23:57:06Z +http://moodle.org/|creationDate|2010-08-30 +http://moodle.org/|tag|http://www.semanlink.net/tag/e_learning +http://moodle.org/|tag|http://www.semanlink.net/tag/open_source +http://moodle.org/|tag|http://www.semanlink.net/tag/web_tools +http://moodle.org/|comment|Moodle is a Course Management System (CMS), also known as a Learning Management System (LMS) or a Virtual Learning Environment (VLE). It is a Free web application that educators can use to create effective online learning sites. +http://moodle.org/|title|Moodle.org: open-source community-based tools for learning +http://moodle.org/|creationTime|2010-08-30T15:28:10Z +http://data.worldbank.org/|creationDate|2010-11-16 +http://data.worldbank.org/|tag|http://www.semanlink.net/tag/world_bank +http://data.worldbank.org/|title|Data The World Bank +http://data.worldbank.org/|creationTime|2010-11-16T17:27:27Z +https://gephi.org/|creationDate|2013-09-03 +https://gephi.org/|tag|http://www.semanlink.net/tag/graph_visualization +https://gephi.org/|tag|http://www.semanlink.net/tag/open_source +https://gephi.org/|title|Gephi, an open source graph visualization and manipulation software +https://gephi.org/|creationTime|2013-09-03T19:18:37Z +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|creationDate|2011-02-14 +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|tag|http://www.semanlink.net/tag/lod_mailing_list +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|tag|http://www.semanlink.net/tag/freebase +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|tag|http://www.semanlink.net/tag/linked_data +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|tag|http://www.semanlink.net/tag/google +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|title|Google's structured seach talk / Google squared UI +http://www.mail-archive.com/public-lod@w3.org/msg07612.html|creationTime|2011-02-14T09:36:27Z +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html|creationDate|2013-10-12 +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html|tag|http://www.semanlink.net/tag/arnaque +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html|tag|http://www.semanlink.net/tag/numericable +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html|title|Arnaque et Divulgation données personnelles par Numéricable : Internet - Forum Que Choisir +http://forum.quechoisir.org/divulgation-donnees-personnelles-par-numericable-t11122.html|creationTime|2013-10-12T12:44:07Z +http://scikit-learn.org/stable/auto_examples/index.html#|creationDate|2017-06-20 +http://scikit-learn.org/stable/auto_examples/index.html#|tag|http://www.semanlink.net/tag/documentation +http://scikit-learn.org/stable/auto_examples/index.html#|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/auto_examples/index.html#|tag|http://www.semanlink.net/tag/good +http://scikit-learn.org/stable/auto_examples/index.html#|title|scikit-learn documentation: General examples +http://scikit-learn.org/stable/auto_examples/index.html#|creationTime|2017-06-20T10:11:03Z +http://data.nytimes.com/|creationDate|2010-12-13 +http://data.nytimes.com/|tag|http://www.semanlink.net/tag/new_york_times +http://data.nytimes.com/|tag|http://www.semanlink.net/tag/linked_data +http://data.nytimes.com/|title|New York Times - Linked Open Data +http://data.nytimes.com/|creationTime|2010-12-13T16:29:52Z +http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html|creationDate|2009-03-11 +http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html|tag|http://www.semanlink.net/tag/rene_vautier +http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html|title|Le petit blanc à la caméra rouge +http://www.vivement-lundi.com/vivement_lundi/Le_petit_blanc.html|creationTime|2009-03-11T01:17:38Z +http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|creationDate|2012-04-27 +http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|tag|http://www.semanlink.net/tag/cinema +http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|tag|http://www.semanlink.net/tag/p_np +http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|title|Travelling Salesman, Thriller Set In a World Where P=NP - Slashdot +http://idle.slashdot.org/story/12/04/25/1519208/travelling-salesman-thriller-set-in-a-world-where-pnp?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+slashdot%2FeqWf+%28Slashdot%3A+Slashdot%29|creationTime|2012-04-27T23:32:07Z +https://rominirani.com/2015/07/31/docker-tutorial-series-part-7-data-volumes/|creationDate|2016-04-13 +https://rominirani.com/2015/07/31/docker-tutorial-series-part-7-data-volumes/|tag|http://www.semanlink.net/tag/docker_volumes +https://rominirani.com/2015/07/31/docker-tutorial-series-part-7-data-volumes/|title|Docker Tutorial Series : Part 7 : Data Volumes iRomin +https://rominirani.com/2015/07/31/docker-tutorial-series-part-7-data-volumes/|creationTime|2016-04-13T18:40:18Z +https://www.newscientist.com/article/dn28687-new-species-of-human-may-have-shared-our-caves-and-beds/|creationDate|2015-12-21 +https://www.newscientist.com/article/dn28687-new-species-of-human-may-have-shared-our-caves-and-beds/|tag|http://www.semanlink.net/tag/paleoanthropology +https://www.newscientist.com/article/dn28687-new-species-of-human-may-have-shared-our-caves-and-beds/|title|New species of human may have shared our caves – and beds New Scientist +https://www.newscientist.com/article/dn28687-new-species-of-human-may-have-shared-our-caves-and-beds/|creationTime|2015-12-21T14:17:48Z +http://inkdroid.org/journal/2013/01/05/fielding-notes/|creationDate|2015-02-17 +http://inkdroid.org/journal/2013/01/05/fielding-notes/|tag|http://www.semanlink.net/tag/roy_t_fielding +http://inkdroid.org/journal/2013/01/05/fielding-notes/|tag|http://www.semanlink.net/tag/citation +http://inkdroid.org/journal/2013/01/05/fielding-notes/|title|Usually when [...] building an application the only thing that lasts forever is the data, at least if you’re lucky +http://inkdroid.org/journal/2013/01/05/fielding-notes/|creationTime|2015-02-17T16:27:09Z +http://labs.unwieldy.net/moowheel/|creationDate|2010-08-27 +http://labs.unwieldy.net/moowheel/|tag|http://www.semanlink.net/tag/data_visualization_tools +http://labs.unwieldy.net/moowheel/|tag|http://www.semanlink.net/tag/javascript +http://labs.unwieldy.net/moowheel/|title|MooWheel: a javascript connections visualization library +http://labs.unwieldy.net/moowheel/|creationTime|2010-08-27T17:06:36Z +http://www.openlinksw.com/virtuoso/FAQ/index.htm|creationDate|2008-08-26 +http://www.openlinksw.com/virtuoso/FAQ/index.htm|tag|http://www.semanlink.net/tag/faq +http://www.openlinksw.com/virtuoso/FAQ/index.htm|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/virtuoso/FAQ/index.htm|title|OpenLink Universal Integration Middleware - Virtuoso Product Family - FAQ +http://www.openlinksw.com/virtuoso/FAQ/index.htm|creationTime|2008-08-26T11:48:37Z +http://en.wikipedia.org/wiki/Mulholland_Drive_(film)|creationDate|2010-06-01 +http://en.wikipedia.org/wiki/Mulholland_Drive_(film)|tag|http://www.semanlink.net/tag/film +http://en.wikipedia.org/wiki/Mulholland_Drive_(film)|comment|"Film de David Lynch +" +http://en.wikipedia.org/wiki/Mulholland_Drive_(film)|title|Mulholland Drive +http://en.wikipedia.org/wiki/Mulholland_Drive_(film)|creationTime|2010-06-01T23:07:48Z +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|creationDate|2016-09-10 +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|tag|http://www.semanlink.net/tag/deep_learning +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|tag|http://www.semanlink.net/tag/nlp +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|tag|http://www.semanlink.net/tag/chris_manning +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|title|Computational Linguistics and Deep Learning +http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00239|creationTime|2016-09-10T14:20:15Z +http://news.bbc.co.uk/1/hi/technology/4685231.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/technology/4685231.stm|tag|http://www.semanlink.net/tag/anticipation +http://news.bbc.co.uk/1/hi/technology/4685231.stm|tag|http://www.semanlink.net/tag/vie_artificielle +http://news.bbc.co.uk/1/hi/technology/4685231.stm|title|BBC NEWS A future full of hopes and fears +http://news.bbc.co.uk/1/hi/technology/4685231.stm|source|BBC +http://openjdk.java.net/projects/jdk7/features/|creationDate|2010-04-28 +http://openjdk.java.net/projects/jdk7/features/|tag|http://www.semanlink.net/tag/java_7 +http://openjdk.java.net/projects/jdk7/features/|title|JDK 7 Features +http://openjdk.java.net/projects/jdk7/features/|creationTime|2010-04-28T14:14:30Z +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|creationDate|2011-02-06 +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|tag|http://www.semanlink.net/tag/darwin +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|tag|http://www.semanlink.net/tag/france_inter +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|tag|http://www.semanlink.net/tag/genetique +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|title|France Inter > Les émissions > Sur les épaules de Darwin +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/|creationTime|2011-02-06T19:35:58Z +http://www.nytimes.com/2010/10/07/science/07bees.html?_r=1&ref=global-home|creationDate|2010-10-12 +http://www.nytimes.com/2010/10/07/science/07bees.html?_r=1&ref=global-home|title|Honeybee Killer Found by Army and Entomologists - NYTimes.com +http://www.nytimes.com/2010/10/07/science/07bees.html?_r=1&ref=global-home|creationTime|2010-10-12T09:08:33Z +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|creationDate|2015-03-13 +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|tag|http://www.semanlink.net/tag/eclipse +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|title|"eclipse - How to solve ""Plugin execution not covered by lifecycle configuration"" for Spring Data Maven Builds - Stack Overflow" +http://stackoverflow.com/questions/6352208/how-to-solve-plugin-execution-not-covered-by-lifecycle-configuration-for-sprin%20--%3E|creationTime|2015-03-13T16:05:03Z +https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/|creationDate|2016-04-09 +https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/|tag|http://www.semanlink.net/tag/brinxmat +https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/|tag|http://www.semanlink.net/tag/json_ld +https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/|title|JSON-LD, currently… Brinxmat's blog +https://brinxmat.wordpress.com/2015/10/28/json-ld-currently/|creationTime|2016-04-09T01:16:01Z +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|creationDate|2009-06-25 +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|tag|http://www.semanlink.net/tag/cassini +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|tag|http://www.semanlink.net/tag/encelade +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|comment|Nasa's Cassini spacecraft has obtained strong evidence that Saturn's tiny moon Enceladus retains liquid water. +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|title|BBC NEWS Science & Environment 'Misty caverns' on Enceladus moon +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|creationTime|2009-06-25T22:34:51Z +http://news.bbc.co.uk/2/hi/science/nature/8115148.stm|source|BBC +https://github.com/ijkilchenko/Fuzbal|creationDate|2018-03-29 +https://github.com/ijkilchenko/Fuzbal|tag|http://www.semanlink.net/tag/chrome_extension +https://github.com/ijkilchenko/Fuzbal|tag|http://www.semanlink.net/tag/using_word_embedding +https://github.com/ijkilchenko/Fuzbal|title|GitHub - ijkilchenko/Fuzbal: Chrome extension: Gives Ctrl+F like find results which include non-exact (fuzzy) matches using string edit-distance and GloVe/Word2Vec. Also searches by regular expressions. +https://github.com/ijkilchenko/Fuzbal|creationTime|2018-03-29T16:45:36Z +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|creationDate|2006-05-22 +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|tag|http://www.semanlink.net/tag/json +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|tag|http://www.semanlink.net/tag/sparql +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|tag|http://www.semanlink.net/tag/javascript +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_a_sparql.html|title|TechnicaLee Speaking: SPARQL Calendar Demo: A SPARQL JavaScript Library +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|creationDate|2018-08-06 +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|tag|http://www.semanlink.net/tag/artificial_neural_network +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|tag|http://www.semanlink.net/tag/backpropagation +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|tag|http://www.semanlink.net/tag/nn_tips +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|title|Tradeoff batch size vs. number of iterations to train a neural network - Cross Validated +https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network|creationTime|2018-08-06T18:22:42Z +http://www.opencms.org/en/|creationDate|2012-06-13 +http://www.opencms.org/en/|tag|http://www.semanlink.net/tag/cms +http://www.opencms.org/en/|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.opencms.org/en/|title|OpenCms, the Open Source Java Web Content Management System / CMS +http://www.opencms.org/en/|creationTime|2012-06-13T12:29:35Z +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|creationDate|2018-07-26 +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|tag|http://www.semanlink.net/tag/nlp_human_resources +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|tag|http://www.semanlink.net/tag/discounted_cumulative_gain +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|comment|[blog post](https://nlpparis.wordpress.com/2018/07/26/paris-nlp-meetup-6-season-2-linkvalue/) +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|title|Paris NLP Meetup #6 +https://www.meetup.com/fr-FR/Paris-NLP/events/242014884/?comment_table_id=493219381&comment_table_name=event_comment|creationTime|2018-07-26T00:26:25Z +http://ebiquity.umbc.edu/papers/select/search/html/613a353a7b693a303b643a303b693a313b643a303b693a323b733a303a22223b693a333b733a31313a22756e6365727461696e7479223b693a343b643a303b7d/|creationDate|2006-05-01 +http://ebiquity.umbc.edu/papers/select/search/html/613a353a7b693a303b643a303b693a313b643a303b693a323b733a303a22223b693a333b733a31313a22756e6365727461696e7479223b693a343b643a303b7d/|tag|http://www.semanlink.net/tag/owl +http://ebiquity.umbc.edu/papers/select/search/html/613a353a7b693a303b643a303b693a313b643a303b693a323b733a303a22223b693a333b733a31313a22756e6365727461696e7479223b693a343b643a303b7d/|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://ebiquity.umbc.edu/papers/select/search/html/613a353a7b693a303b643a303b693a313b643a303b693a323b733a303a22223b693a333b733a31313a22756e6365727461696e7479223b693a343b643a303b7d/|title|UMBC eBiquity - Publications - Probabilistic Framework for Semantic Web - OWL and Bayes Networks UMBC eBiquity - Publications - Probabilistic Framework for Semantic Web - OWL and Bayesian Networks +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|creationDate|2015-04-09 +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|tag|http://www.semanlink.net/tag/jersey +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|tag|http://www.semanlink.net/tag/junit +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|tag|http://www.semanlink.net/tag/tests +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|title|Writing lightweight REST integration tests with the Jersey Test Framework +https://blog.codecentric.de/en/2012/05/writing-lightweight-rest-integration-tests-with-the-jersey-test-framework/|creationTime|2015-04-09T10:13:50Z +https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/|creationDate|2017-02-26 +https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/|comment|jobs are for machines, and life is for people +https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/|title|How Automation is Going to Redefine What it Means to Work +https://futurism.com/how-automation-is-going-to-redefine-what-it-means-to-work/|creationTime|2017-02-26T10:57:29Z +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|creationDate|2007-11-21 +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|tag|http://www.semanlink.net/tag/neolithique +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|tag|http://www.semanlink.net/tag/venus_prehistoriques +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|tag|http://www.semanlink.net/tag/paleolithique +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|tag|http://www.semanlink.net/tag/danny_ayers +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|title|nissaba - paleolitische en neolithische vrouwenbeeldjes +http://www.nissaba.nl/nisphp/viewtopic.php?start=50&t=64|creationTime|2007-11-21T15:41:42Z +http://www.mkbergman.com/?p=240|creationDate|2008-01-19 +http://www.mkbergman.com/?p=240|tag|http://www.semanlink.net/tag/semantic_interoperability +http://www.mkbergman.com/?p=240|title|Models of Semantic Interoperability » AI3:::Adaptive Information +http://www.mkbergman.com/?p=240|creationTime|2008-01-19T17:24:21Z +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|creationDate|2012-09-02 +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|tag|http://www.semanlink.net/tag/mime_type +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|tag|http://www.semanlink.net/tag/talis_rdf_json +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|tag|http://www.semanlink.net/tag/json_ld +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|tag|http://www.semanlink.net/tag/mark_birbeck +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|tag|http://www.semanlink.net/tag/olivier_grisel +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|title|Recommended mimetype for JSON-LD payload? - Google Groups +https://groups.google.com/forum/#!topic/json-ld/55WBF7NWQSE|creationTime|2012-09-02T15:36:00Z +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|creationDate|2013-08-09 +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|tag|http://www.semanlink.net/tag/mozilla +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|tag|http://www.semanlink.net/tag/privacy_and_internet +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|tag|http://www.semanlink.net/tag/passwords +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|title|Identity at Mozilla +http://identity.mozilla.com/post/57712756801/persona-makes-signing-in-easy-for-gmail-users|creationTime|2013-08-09T13:16:45Z +http://www4.wiwiss.fu-berlin.de/is-group/snorql/|creationDate|2007-10-13 +http://www4.wiwiss.fu-berlin.de/is-group/snorql/|tag|http://www.semanlink.net/tag/snorql +http://www4.wiwiss.fu-berlin.de/is-group/snorql/|title|SPARQL Explorer for http://dbpedia.org/sparql +http://www4.wiwiss.fu-berlin.de/is-group/snorql/|creationTime|2007-10-13T00:05:16Z +http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524|creationDate|2013-09-02 +http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524|tag|http://www.semanlink.net/tag/bing +http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524|comment|Bing announced that Bing Shopping is being replaced by Product Search, which simply integrates product results within Bing search results rather than in a separate destination. Unlike Google, Bing also continues to offer both paid and free ways for merchants to have product listings. (August 23th, 2013) +http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524|title|Bing Says Goodbye To Bing Shopping, Hello Product Search With Rich Captions & Product Ads +http://searchengineland.com/bing-says-goodbye-to-bing-shopping-hello-product-search-with-rich-captions-product-ads-170524|creationTime|2013-09-02T16:39:02Z +http://danbri.org/words/2010/07/09/557|creationDate|2010-07-16 +http://danbri.org/words/2010/07/09/557|tag|http://www.semanlink.net/tag/semantic_statistics +http://danbri.org/words/2010/07/09/557|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2010/07/09/557|tag|http://www.semanlink.net/tag/universal_decimal_classification +http://danbri.org/words/2010/07/09/557|title|Subject classification and Statistics +http://danbri.org/words/2010/07/09/557|creationTime|2010-07-16T13:52:14Z +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|creationDate|2018-11-08 +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|tag|http://www.semanlink.net/tag/ensemble_learning +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|tag|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|tag|http://www.semanlink.net/tag/boosting +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|title|What is the difference between Bagging and Boosting? +https://quantdare.com/what-is-the-difference-between-bagging-and-boosting/|creationTime|2018-11-08T18:13:31Z +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|creationDate|2013-02-15 +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|tag|http://www.semanlink.net/tag/guha +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|tag|http://www.semanlink.net/tag/goodrelations +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|tag|http://www.semanlink.net/tag/schema_org +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|title|schema blog: Good Relations and Schema.org +http://blog.schema.org/2012/11/good-relations-and-schemaorg.html|creationTime|2013-02-15T02:41:07Z +http://www.netvibes.com/|creationDate|2006-04-24 +http://www.netvibes.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://www.netvibes.com/|title|www.netvibes.com Netvibes +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|creationDate|2011-11-13 +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|tag|http://www.semanlink.net/tag/faceted_search +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|tag|http://www.semanlink.net/tag/solr +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|tag|http://www.semanlink.net/tag/umbel +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|comment|"Search uses the integration of RDF and inferencing with full-text, faceted search using Solr +" +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|title|UMBEL Services, Part 2: Full-text, Faceted Search » AI3:::Adaptive Information +http://www.mkbergman.com/986/umbel-services-part-2-full-text-faceted-search/|creationTime|2011-11-13T14:25:52Z +http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/|creationDate|2010-05-17 +http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/|tag|http://www.semanlink.net/tag/hypersolutions +http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/|tag|http://www.semanlink.net/tag/m3_multi_media_museum +http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/|title|Le 6eme continent +http://web.archive.org/web/19990203112043/http://www.hypersolutions.fr/mmm/continent6/|creationTime|2010-05-17T12:28:08Z +http://www.businessinsider.com/why-programmers-work-at-night-2013-1|creationDate|2013-01-18 +http://www.businessinsider.com/why-programmers-work-at-night-2013-1|tag|http://www.semanlink.net/tag/night +http://www.businessinsider.com/why-programmers-work-at-night-2013-1|tag|http://www.semanlink.net/tag/programmers +http://www.businessinsider.com/why-programmers-work-at-night-2013-1|title|Why Programmers Work At Night - Business Insider +http://www.businessinsider.com/why-programmers-work-at-night-2013-1|creationTime|2013-01-18T00:52:25Z +http://www.inf.unibz.it/~franconi/dl/course/|creationDate|2007-07-09 +http://www.inf.unibz.it/~franconi/dl/course/|tag|http://www.semanlink.net/tag/description_logic +http://www.inf.unibz.it/~franconi/dl/course/|title|DESCRIPTION LOGICS course - Enrico Franconi +http://www.inf.unibz.it/~franconi/dl/course/|creationTime|2007-07-09T23:20:49Z +http://www.biodiversitylibrary.org/About.aspx|creationDate|2007-05-11 +http://www.biodiversitylibrary.org/About.aspx|tag|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.biodiversitylibrary.org/About.aspx|comment|Ten major natural history museum libraries, botanical libraries, and research institutions have joined to form the Biodiversity Heritage Library Project. The group is developing a strategy and operational plan to digitize the published literature of biodiversity held in their respective collections. This literature will be available through a global “biodiversity commons.” +http://www.biodiversitylibrary.org/About.aspx|title|Biodiversity Heritage Library +http://www.biodiversitylibrary.org/About.aspx|creationTime|2007-05-11T00:19:59Z +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|creationDate|2006-06-25 +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|tag|http://www.semanlink.net/tag/soap +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|tag|http://www.semanlink.net/tag/soap_vs_rest +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|tag|http://www.semanlink.net/tag/roy_t_fielding +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|tag|http://www.semanlink.net/tag/w3c +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|comment|"I can say with authority that the W3C +was created by big businesses specifically to prevent their own marketing +departments from destroying the value inherent in the Web through their +own, and their competitors', short-sighted, quarterly-revenue-driven +pursuit of profits
The only reason SOAP remains in the W3C for standardization is because +all of the other forums either rejected the concept out of hand or +refused to rubber-stamp a poor implementation of a bad idea. + +" +http://lists.w3.org/Archives/Public/www-tag/2002Apr/0235.html|title|Re: FW: draft findings on Unsafe Methods (whenToUseGet-7) from Roy T. Fielding on 2002-04-23 +http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189|creationDate|2014-03-26 +http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189|tag|http://www.semanlink.net/tag/semantic_search +http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189|tag|http://www.semanlink.net/tag/nlp_use_cases +http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189|title|Studio Ousia Envisions A World Of Semantic Augmented Reality - Semanticweb.com +http://semanticweb.com/studio-ousia-envisions-world-semantic-augmented-reality_b42189|creationTime|2014-03-26T13:14:32Z +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|creationDate|2014-07-29 +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/income_inequality +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/critique_du_capitalisme +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/classe_moyenne +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/capitalisme +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/minimum_wage +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|tag|http://www.semanlink.net/tag/billionaires +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|comment|"The difference between a poor society and a wealthy one is largely the difference in the number of solutions to human problems we have available to us.
+“Concentrating wealth at the top essentially creates a death spiral of falling demand.”
+if you want prosperity and growth, the more people you include, the more prosperity and growth you have
+this idea that the more greedy we are as individuals, the better off the society is, in general, is a lie.
" +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|title|Why capitalism has nothing to do with supply and demand Making Sen$e PBS NewsHour +http://www.pbs.org/newshour/making-sense/why-capitalism-has-nothing-to-do-with-supply-and-demand/|creationTime|2014-07-29T19:18:40Z +http://evernote.com|creationDate|2012-12-16 +http://evernote.com|tag|http://www.semanlink.net/tag/to_see +http://evernote.com|tag|http://www.semanlink.net/tag/semanlink_related +http://evernote.com|title|Evernote Rappelez-vous tout avec Evernote +http://evernote.com|creationTime|2012-12-16T21:52:02Z +http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/|creationDate|2015-08-07 +http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/|tag|http://www.semanlink.net/tag/rigolo +http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/|tag|http://www.semanlink.net/tag/judo +http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/|title|Comment j’ai humilié Loïc Pietri, champion du monde de judo Au tapis ! +http://combat.blog.lemonde.fr/2014/04/25/comment-jai-humilie-loic-pietri-champion-du-monde-de-judo/|creationTime|2015-08-07T00:08:50Z +http://arxiv.org/abs/0807.4145|creationDate|2008-08-17 +http://arxiv.org/abs/0807.4145|tag|http://www.semanlink.net/tag/hypothese_de_riemann +http://arxiv.org/abs/0807.4145|tag|http://www.semanlink.net/tag/jean_paul +http://arxiv.org/abs/0807.4145|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/0807.4145|arxiv_author|Jean-Paul Cardinal +http://arxiv.org/abs/0807.4145|comment|> we explore a class of equivalence relations over N* from which is constructed a sequence of symetric matrices related to the Mertens function. From numerical experimentations we suggest a conjecture, about the growth of the quadratic norm of these matrices, which implies the Riemann hypothesis. This suggests that matrix analysis methods may play a more important part in this classical and difficult problem. +http://arxiv.org/abs/0807.4145|title|[0807.4145] Une suite de matrices symétriques en rapport avec la fonction de Mertens +http://arxiv.org/abs/0807.4145|creationTime|2008-08-17T12:29:55Z +http://arxiv.org/abs/0807.4145|arxiv_summary|"In this paper we explore a class of equivalence relations over $\N^\ast$ from +which is constructed a sequence of symetric matrices related to the Mertens +function. From numerical experimentations we suggest a conjecture, about the +growth of the quadratic norm of these matrices, which implies the Riemann +hypothesis. This suggests that matrix analysis methods may play a more +important part in this classical and difficult problem." +http://arxiv.org/abs/0807.4145|arxiv_firstAuthor|Jean-Paul Cardinal +http://arxiv.org/abs/0807.4145|arxiv_updated|2016-02-27T19:27:43Z +http://arxiv.org/abs/0807.4145|arxiv_title|Une suite de matrices symétriques en rapport avec la fonction de Mertens +http://arxiv.org/abs/0807.4145|arxiv_published|2008-07-25T17:34:34Z +http://arxiv.org/abs/0807.4145|arxiv_num|0807.4145 +https://arxiv.org/abs/1405.4053|creationDate|2017-07-10 +https://arxiv.org/abs/1405.4053|tag|http://www.semanlink.net/tag/tomas_mikolov +https://arxiv.org/abs/1405.4053|tag|http://www.semanlink.net/tag/doc2vec +https://arxiv.org/abs/1405.4053|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1405.4053|arxiv_author|Tomas Mikolov +https://arxiv.org/abs/1405.4053|arxiv_author|Quoc V. Le +https://arxiv.org/abs/1405.4053|comment|"Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) + + +" +https://arxiv.org/abs/1405.4053|title|[1405.4053] Distributed Representations of Sentences and Documents +https://arxiv.org/abs/1405.4053|creationTime|2017-07-10T16:20:03Z +https://arxiv.org/abs/1405.4053|arxiv_summary|"Many machine learning algorithms require the input to be represented as a +fixed-length feature vector. When it comes to texts, one of the most common +fixed-length features is bag-of-words. Despite their popularity, bag-of-words +features have two major weaknesses: they lose the ordering of the words and +they also ignore semantics of the words. For example, ""powerful,"" ""strong"" and +""Paris"" are equally distant. In this paper, we propose Paragraph Vector, an +unsupervised algorithm that learns fixed-length feature representations from +variable-length pieces of texts, such as sentences, paragraphs, and documents. +Our algorithm represents each document by a dense vector which is trained to +predict words in the document. Its construction gives our algorithm the +potential to overcome the weaknesses of bag-of-words models. Empirical results +show that Paragraph Vectors outperform bag-of-words models as well as other +techniques for text representations. Finally, we achieve new state-of-the-art +results on several text classification and sentiment analysis tasks." +https://arxiv.org/abs/1405.4053|arxiv_firstAuthor|Quoc V. Le +https://arxiv.org/abs/1405.4053|arxiv_updated|2014-05-22T23:23:19Z +https://arxiv.org/abs/1405.4053|arxiv_title|Distributed Representations of Sentences and Documents +https://arxiv.org/abs/1405.4053|arxiv_published|2014-05-16T07:12:16Z +https://arxiv.org/abs/1405.4053|arxiv_num|1405.4053 +https://arxiv.org/abs/1710.04099|creationDate|2018-02-13 +https://arxiv.org/abs/1710.04099|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1710.04099|tag|http://www.semanlink.net/tag/word2vec +https://arxiv.org/abs/1710.04099|tag|http://www.semanlink.net/tag/gensim +https://arxiv.org/abs/1710.04099|tag|http://www.semanlink.net/tag/knowledge_graph +https://arxiv.org/abs/1710.04099|arxiv_author|Finn Årup Nielsen +https://arxiv.org/abs/1710.04099|comment|web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk +https://arxiv.org/abs/1710.04099|title|[1710.04099] Wembedder: Wikidata entity embedding web service +https://arxiv.org/abs/1710.04099|creationTime|2018-02-13T19:14:37Z +https://arxiv.org/abs/1710.04099|arxiv_summary|"I present a web service for querying an embedding of entities in the Wikidata +knowledge graph. The embedding is trained on the Wikidata dump using Gensim's +Word2Vec implementation and a simple graph walk. A REST API is implemented. +Together with the Wikidata API the web service exposes a multilingual resource +for over 600'000 Wikidata items and properties." +https://arxiv.org/abs/1710.04099|arxiv_firstAuthor|Finn Årup Nielsen +https://arxiv.org/abs/1710.04099|arxiv_updated|2017-10-11T14:56:27Z +https://arxiv.org/abs/1710.04099|arxiv_title|Wembedder: Wikidata entity embedding web service +https://arxiv.org/abs/1710.04099|arxiv_published|2017-10-11T14:56:27Z +https://arxiv.org/abs/1710.04099|arxiv_num|1710.04099 +http://www.rfi.fr/actufr/articles/075/article_42263.asp|creationDate|2006-03-06 +http://www.rfi.fr/actufr/articles/075/article_42263.asp|tag|http://www.semanlink.net/tag/pillage_de_vestiges_antiques +http://www.rfi.fr/actufr/articles/075/article_42263.asp|tag|http://www.semanlink.net/tag/archeologie_du_niger +http://www.rfi.fr/actufr/articles/075/article_42263.asp|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://www.rfi.fr/actufr/articles/075/article_42263.asp|comment|Des statuettes, des poteries et des haches polies... Ce sont en tout 6 400 pièces archéologiques qui ont été rapatriées la semaine dernière au Niger. Ces objets avaient été exportés illégalement par des trafiquants d'art, puis saisis par les douanes françaises à l'aéroport Roissy-Charles-de-Gaulle. Qui sont les trafiquants ? Comment empêcher le pillage des sites archéologiques du Sahel ? Abdoulaye Maga, archéologue nigérien, est également directeur de l'Institut de recherche en sciences humaines de Niamey. +http://www.rfi.fr/actufr/articles/075/article_42263.asp|title|RFI - Abdoulaye Maga - Invité Afrique RFI - 6 400 pièces archéologiques rapatriées la semaine dernière au Niger. +https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32|creationDate|2018-03-26 +https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32|tag|http://www.semanlink.net/tag/biodiversite_declin +https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32|tag|http://www.semanlink.net/tag/land_degradation +https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32|title|Worsening Worldwide Land Degradation Now ‘Critical’, Undermining Well-Being of 3.2 Billion People IPBES +https://www.ipbes.net/news/media-release-worsening-worldwide-land-degradation-now-%E2%80%98critical%E2%80%99-undermining-well-being-32|creationTime|2018-03-26T23:22:37Z +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB|creationDate|2014-04-05 +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB|tag|http://www.semanlink.net/tag/facebook +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB|comment|people need to know that Facebook is making things to improve the human experience, not just spending billions to make even more billions off our personal information +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB|title|Why No One Trusts Facebook To Power The Future – ReadWrite +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers#awesm=~oAB0wn16tHAyrB|creationTime|2014-04-05T23:36:53Z +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|creationDate|2018-01-30 +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|tag|http://www.semanlink.net/tag/transe +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|comment|> we start by analyzing the problems of TransE on reflexive/one-to-many/many-to-one/many-to-many relations. Accordingly we propose a method named translation on hyperplanes (TransH) which interprets a relation as a translating operation on a hyperplane +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|title|Knowledge Graph Embedding by Translating on Hyperplanes (2014) +https://pdfs.semanticscholar.org/2a3f/862199883ceff5e3c74126f0c80770653e05.pdf|creationTime|2018-01-30T13:35:21Z +http://worrydream.com/Engelbart/|creationDate|2013-07-05 +http://worrydream.com/Engelbart/|tag|http://www.semanlink.net/tag/engelbart +http://worrydream.com/Engelbart/|comment|"Here's the most facile interpretation of Engelbart, splendidly exhibited by this New York Times headline:
+Douglas C. Engelbart, Inventor of the Computer Mouse, Dies at 88 +
+This is as if you found the person who invented writing, and credited them for inventing the pencil." +http://worrydream.com/Engelbart/|title|A few words on Doug Engelbart +http://worrydream.com/Engelbart/|creationTime|2013-07-05T13:17:50Z +http://greasemonkey.mozdev.org|creationDate|2005-05-17 +http://greasemonkey.mozdev.org|tag|http://www.semanlink.net/tag/greasemonkey +http://greasemonkey.mozdev.org|title|mozdev.org - greasemonkey +http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm|tag|http://www.semanlink.net/tag/recherche +http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm|tag|http://www.semanlink.net/tag/declin_de_l_europe +http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm|title|BBC NEWS Science/Nature Europe moving in 'R&D slow lane' +http://news.bbc.co.uk/1/hi/sci/tech/4697883.stm|source|BBC +https://datascience.stackexchange.com/questions/12649/how-to-calculate-the-mini-batch-memory-impact-when-training-deep-learning-models|creationDate|2019-05-14 +https://datascience.stackexchange.com/questions/12649/how-to-calculate-the-mini-batch-memory-impact-when-training-deep-learning-models|tag|http://www.semanlink.net/tag/memory_requirements_in_nn +https://datascience.stackexchange.com/questions/12649/how-to-calculate-the-mini-batch-memory-impact-when-training-deep-learning-models|title|tensorflow - How to calculate the mini-batch memory impact when training deep learning models? - Data Science Stack Exchange +https://datascience.stackexchange.com/questions/12649/how-to-calculate-the-mini-batch-memory-impact-when-training-deep-learning-models|creationTime|2019-05-14T23:11:39Z +http://check.rdfa.info/|creationDate|2011-01-18 +http://check.rdfa.info/|tag|http://www.semanlink.net/tag/rdfa +http://check.rdfa.info/|tag|http://www.semanlink.net/tag/dev_tools +http://check.rdfa.info/|title|check.rdfa +http://check.rdfa.info/|creationTime|2011-01-18T12:03:20Z +https://github.com/tensorflow/models|creationDate|2018-02-28 +https://github.com/tensorflow/models|tag|http://www.semanlink.net/tag/sample_code +https://github.com/tensorflow/models|tag|http://www.semanlink.net/tag/tensorflow +https://github.com/tensorflow/models|tag|http://www.semanlink.net/tag/github_project +https://github.com/tensorflow/models|title|GitHub - tensorflow/models: Models and examples built with TensorFlow +https://github.com/tensorflow/models|creationTime|2018-02-28T23:55:28Z +http://sourceforge.net/projects/delicious-java|creationDate|2006-09-25 +http://sourceforge.net/projects/delicious-java|tag|http://www.semanlink.net/tag/delicious_java +http://sourceforge.net/projects/delicious-java|tag|http://www.semanlink.net/tag/sourceforge +http://sourceforge.net/projects/delicious-java|title|SourceForge.net: del.icio.us Java API +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|creationDate|2008-04-07 +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|tag|http://www.semanlink.net/tag/henry_story +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|tag|http://www.semanlink.net/tag/authentication +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|tag|http://www.semanlink.net/tag/rdf_and_social_networks +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|title|Sketch of a simple authentication protocol +http://lists.w3.org/Archives/Public/ietf-http-wg/2008AprJun/0017.html|creationTime|2008-04-07T22:43:27Z +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|creationDate|2008-10-29 +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|comment|suppose we want to use the value of the Dublin Core title predicate (dc:title) if it exists, and otherwise use the value of the rdfs:label predicate +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|title|TechnicaLee Speaking: using OPTIONAL to select a single value based on an ordered list of predicates which might appear in the data +http://www.thefigtrees.net/lee/blog/2006/04/sparql_calendar_demo_using_spa.html|creationTime|2008-10-29T17:40:43Z +http://en.wikipedia.org/wiki/Slime_mold|creationDate|2011-03-04 +http://en.wikipedia.org/wiki/Slime_mold|tag|http://www.semanlink.net/tag/slime_mold +http://en.wikipedia.org/wiki/Slime_mold|title|Slime mold - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Slime_mold|creationTime|2011-03-04T17:03:56Z +https://discussions.apple.com/thread/7366807|creationDate|2018-04-30 +https://discussions.apple.com/thread/7366807|tag|http://www.semanlink.net/tag/apple_sucks +https://discussions.apple.com/thread/7366807|tag|http://www.semanlink.net/tag/howto +https://discussions.apple.com/thread/7366807|tag|http://www.semanlink.net/tag/photo_numerique +https://discussions.apple.com/thread/7366807|tag|http://www.semanlink.net/tag/apple_photos +https://discussions.apple.com/thread/7366807|title|Can you correct skew or perspective with … - Apple Community +https://discussions.apple.com/thread/7366807|creationTime|2018-04-30T13:05:09Z +https://twitter.com/asutoshsahoo_97/status/1062407088436113409|creationDate|2018-11-14 +https://twitter.com/asutoshsahoo_97/status/1062407088436113409|tag|http://www.semanlink.net/tag/slides +https://twitter.com/asutoshsahoo_97/status/1062407088436113409|tag|http://www.semanlink.net/tag/ulmfit +https://twitter.com/asutoshsahoo_97/status/1062407088436113409|title|"Asutosh Sahoo sur Twitter : ""Slides of my seminar on ULMFIT""" +https://twitter.com/asutoshsahoo_97/status/1062407088436113409|creationTime|2018-11-14T22:09:48Z +http://digg.com/|creationDate|2006-05-23 +http://digg.com/|tag|http://www.semanlink.net/tag/news_website +http://digg.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://digg.com/|tag|http://www.semanlink.net/tag/social_software +http://digg.com/|comment|Digg is a technology news website that combines social bookmarking, blogging, RSS, and non-hierarchical editorial control. With digg, users submit stories for review, but rather than allow an editor to decide which stories go on the homepage, the users do. +http://digg.com/|title|digg +https://www.w3.org/community/hydra/wiki/Main_Page|creationDate|2015-02-19 +https://www.w3.org/community/hydra/wiki/Main_Page|tag|http://www.semanlink.net/tag/wiki +https://www.w3.org/community/hydra/wiki/Main_Page|tag|http://www.semanlink.net/tag/hydra +https://www.w3.org/community/hydra/wiki/Main_Page|title|Hydra Community Group - wiki +https://www.w3.org/community/hydra/wiki/Main_Page|creationTime|2015-02-19T12:45:27Z +http://wiki.surf.nl/display/vp/4.3+'InContext'+Visualiser|creationDate|2012-05-21 +http://wiki.surf.nl/display/vp/4.3+'InContext'+Visualiser|tag|http://www.semanlink.net/tag/rdf_data_visualization +http://wiki.surf.nl/display/vp/4.3+'InContext'+Visualiser|title|'InContext' Visualiser +http://wiki.surf.nl/display/vp/4.3+'InContext'+Visualiser|creationTime|2012-05-21T17:47:08Z +http://hive.apache.org/|creationDate|2013-03-12 +http://hive.apache.org/|tag|http://www.semanlink.net/tag/apache_hive +http://hive.apache.org/|comment|data warehouse system for Hadoop +http://hive.apache.org/|title|Welcome to Hive! +http://hive.apache.org/|creationTime|2013-03-12T11:31:01Z +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|creationDate|2009-01-06 +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|tag|http://www.semanlink.net/tag/recherche +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|tag|http://www.semanlink.net/tag/statistics +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|title|Facts versus Factions: the use and abuse of subjectivity in scientific research - PART 2 +http://ourworld.compuserve.com/homepages/rajm/twooesef.htm|creationTime|2009-01-06T22:45:40Z +http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html|creationDate|2005-10-20 +http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html|tag|http://www.semanlink.net/tag/wsdl +http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html|tag|http://www.semanlink.net/tag/web_services +http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html|tag|http://www.semanlink.net/tag/rest +http://www-128.ibm.com/developerworks/xml/library/x-tiphttp.html|title|Tip: Use XML directly over HTTP for Web services (where appropriate) +http://www.hrw.org/french/reports/rw94/rwandamai94.htm#_1_7|creationDate|2004-04-08 +http://www.hrw.org/french/reports/rw94/rwandamai94.htm#_1_7|tag|http://www.semanlink.net/tag/genocide_rwandais +http://www.hrw.org/french/reports/rw94/rwandamai94.htm#_1_7|source|Human Right Watch +http://local.google.com/maps?q=2203+NE+Alberta+St+Portland&spn=0.007102,0.006802&t=k&hl=fr|creationDate|2005-04-15 +http://local.google.com/maps?q=2203+NE+Alberta+St+Portland&spn=0.007102,0.006802&t=k&hl=fr|tag|http://www.semanlink.net/tag/vito +http://local.google.com/maps?q=2203+NE+Alberta+St+Portland&spn=0.007102,0.006802&t=k&hl=fr|tag|http://www.semanlink.net/tag/google_maps +http://local.google.com/maps?q=2203+NE+Alberta+St+Portland&spn=0.007102,0.006802&t=k&hl=fr|title|Ciao Vito (google maps) +http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0|creationDate|2015-09-21 +http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0|tag|http://www.semanlink.net/tag/medicaments +http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0|title|Drug Goes From $13.50 a Tablet to $750, Overnight - NYTimes.com +http://mobile.nytimes.com/2015/09/21/business/a-huge-overnight-increase-in-a-drugs-price-raises-protests.html?_r=0|creationTime|2015-09-21T10:55:40Z +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|creationDate|2014-01-29 +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|tag|http://www.semanlink.net/tag/antiquite +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|tag|http://www.semanlink.net/tag/peste +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|tag|http://www.semanlink.net/tag/moyen_age +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|title|Les résurrections de la peste, de l’Antiquité au Moyen Âge Dans les pas des archéologues +http://archeo.blog.lemonde.fr/2014/01/28/les-resurrections-de-la-peste-de-lantiquite-au-moyen-age/|creationTime|2014-01-29T22:40:33Z +https://www.oreilly.com/ideas/machine-learning-in-the-wild|creationDate|2015-12-30 +https://www.oreilly.com/ideas/machine-learning-in-the-wild|tag|http://www.semanlink.net/tag/control_theory +https://www.oreilly.com/ideas/machine-learning-in-the-wild|tag|http://www.semanlink.net/tag/machine_learning +https://www.oreilly.com/ideas/machine-learning-in-the-wild|title|Machine learning in the wild: A bridge between robust control and reinforcement learning. +https://www.oreilly.com/ideas/machine-learning-in-the-wild|creationTime|2015-12-30T20:12:08Z +https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months|creationDate|2018-10-20 +https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months|tag|http://www.semanlink.net/tag/stem_cell +https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months|tag|http://www.semanlink.net/tag/cerveau +https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months|title|This 3D Human 'Mini-Brain' Is Made of Stem Cells and Can Live For Months - Motherboard +https://motherboard.vice.com/en_us/article/qv955p/3d-human-mini-brain-made-of-stem-cells-can-live-for-months|creationTime|2018-10-20T15:27:05Z +http://bitworking.org/news/193/Do-we-need-WADL|creationDate|2014-10-22 +http://bitworking.org/news/193/Do-we-need-WADL|tag|http://www.semanlink.net/tag/hateoas +http://bitworking.org/news/193/Do-we-need-WADL|tag|http://www.semanlink.net/tag/rest +http://bitworking.org/news/193/Do-we-need-WADL|tag|http://www.semanlink.net/tag/api +http://bitworking.org/news/193/Do-we-need-WADL|comment|Yes, people want to describe interfaces, and those descriptions are brittle. If I download a WADL and compile my client today it will break tomorrow when you change the service. If, instead, you use hypertext, link following and request construction based on the hypertext and client state, then the interface won't break when you change servers, or URI structures. +http://bitworking.org/news/193/Do-we-need-WADL|title|Do we need WADL? BitWorking +http://bitworking.org/news/193/Do-we-need-WADL|creationTime|2014-10-22T01:19:07Z +http://erik.eae.net/archives/2005/05/27/18.55.22/|creationDate|2006-02-07 +http://erik.eae.net/archives/2005/05/27/18.55.22/|tag|http://www.semanlink.net/tag/encoding +http://erik.eae.net/archives/2005/05/27/18.55.22/|tag|http://www.semanlink.net/tag/ajax +http://erik.eae.net/archives/2005/05/27/18.55.22/|tag|http://www.semanlink.net/tag/javascript +http://erik.eae.net/archives/2005/05/27/18.55.22/|title|JS, Encoding and XMLHttpRequest +https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/|creationDate|2018-08-02 +https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/|tag|http://www.semanlink.net/tag/big_brother +https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/|tag|http://www.semanlink.net/tag/money +https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/|title|How a Cashless Society Could Embolden Big Brother - The Atlantic +https://www.theatlantic.com/technology/archive/2016/04/cashless-society/477411/|creationTime|2018-08-02T21:30:21Z +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|creationDate|2017-12-21 +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|tag|http://www.semanlink.net/tag/re_decentralize_the_web +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|tag|http://www.semanlink.net/tag/ruben_verborgh +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|comment|" +" +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|title|Paradigm shifts for the decentralized Web Ruben Verborgh +https://ruben.verborgh.org/blog/2017/12/20/paradigm-shifts-for-the-decentralized-web/|creationTime|2017-12-21T00:42:04Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/entity_mining +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/uri_reference +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/good +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/identity_crisis_in_linked_data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|tag|http://www.semanlink.net/tag/uri_identity +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|comment|"Entity mining is still a troublesome open problem. In past years many approaches allowed to automate the generation of equivalence links between references using schema matching or various heuristics based on the recognition of similar property values. In contrast, few of them considered the analysis of the network of equivalence links (“equivalence network”) as an indication of the likelihood and strength of the equivalence. + +Could a URI reference (URIRef) be thought as exactly “attached” to its referent? Could it make sense to talk about entity “identifiers” or would it be better to talk about more ambiguous “references”, i.e., placeholders for any model that satisfies the formal semantics of the Semantic Web (Hayes)? Booth observes that the aforementioned question, which in the past has been often regarded as fundamental in the debate about identity on the Web, is relatively unimportant. As long as an entity, identified by whatsoever URIRef, is associated to at least one description containing machine understandable information, this information can be automatically processed and used by applications. +" +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|title|A Spectrometry of Linked Data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-15.pdf|creationTime|2012-04-16T16:23:34Z +https://guillaumegenthial.github.io/serving.html|creationDate|2018-05-21 +https://guillaumegenthial.github.io/serving.html|tag|http://www.semanlink.net/tag/guillaume_genthial +https://guillaumegenthial.github.io/serving.html|tag|http://www.semanlink.net/tag/flask +https://guillaumegenthial.github.io/serving.html|title|Serving a model with Flask +https://guillaumegenthial.github.io/serving.html|creationTime|2018-05-21T12:05:53Z +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|creationDate|2015-04-02 +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|tag|http://www.semanlink.net/tag/tolerance +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|tag|http://www.semanlink.net/tag/sicile +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|tag|http://www.semanlink.net/tag/bon_chef_d_etat +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|title|Roger II de Sicile +http://fr.wikipedia.org/wiki/Roger_II_de_Sicile|creationTime|2015-04-02T22:17:21Z +http://videolectures.net/andrew_ng/|creationDate|2014-10-06 +http://videolectures.net/andrew_ng/|tag|http://www.semanlink.net/tag/ng +http://videolectures.net/andrew_ng/|title|Andrew Ng - Computer Science Department, Stanford University - VideoLectures.NET +http://videolectures.net/andrew_ng/|creationTime|2014-10-06T00:43:14Z +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|creationDate|2009-01-15 +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|tag|http://www.semanlink.net/tag/linked_data +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|tag|http://www.semanlink.net/tag/kingsley_idehen +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|tag|http://www.semanlink.net/tag/slides +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|title|Creating, Deploying and Exploiting Linked Data +http://virtuoso.openlinksw.com/presentations/Creating_Deploying_Exploiting_Linked_Data2/Creating_Deploying_Exploiting_Linked_Data2_TimBL_v3.html#(1)|creationTime|2009-01-15T18:40:49Z +http://www.lemonde.fr/sciences/article/2013/01/24/andre-choulika-sculpteur-de-genes_1822310_1650684.html|creationDate|2013-02-01 +http://www.lemonde.fr/sciences/article/2013/01/24/andre-choulika-sculpteur-de-genes_1822310_1650684.html|tag|http://www.semanlink.net/tag/manipulations_genetiques +http://www.lemonde.fr/sciences/article/2013/01/24/andre-choulika-sculpteur-de-genes_1822310_1650684.html|title|André Choulika, sculpteur de gènes +http://www.lemonde.fr/sciences/article/2013/01/24/andre-choulika-sculpteur-de-genes_1822310_1650684.html|creationTime|2013-02-01T20:25:59Z +http://www.manageability.org/blog/stuff/java-open-source-social-network|creationDate|2008-05-17 +http://www.manageability.org/blog/stuff/java-open-source-social-network|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.manageability.org/blog/stuff/java-open-source-social-network|title|Manageability - Open Source Social Networking Applications Written in Java +http://www.manageability.org/blog/stuff/java-open-source-social-network|creationTime|2008-05-17T23:44:38Z +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con|creationDate|2019-05-28 +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con|tag|http://www.semanlink.net/tag/microsoft_concept_graph +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con|title|Microsoft Concept Graph and Concept Tagging Release +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con|bookmarkOf|https://concept.research.microsoft.com/Home/Introduction +http://www.semanlink.net/doc/2019/05/microsoft_concept_graph_and_con|creationTime|2019-05-28T16:54:19Z +http://whc.unesco.org/fr/list/1225|creationDate|2011-01-04 +http://whc.unesco.org/fr/list/1225|tag|http://www.semanlink.net/tag/archeologie +http://whc.unesco.org/fr/list/1225|tag|http://www.semanlink.net/tag/sites_du_patrimoine_mondial_de_l_unesco +http://whc.unesco.org/fr/list/1225|tag|http://www.semanlink.net/tag/burkina_faso +http://whc.unesco.org/fr/list/1225|title|Ruines de Loropéni - UNESCO World Heritage Centre +http://whc.unesco.org/fr/list/1225|creationTime|2011-01-04T00:52:28Z +http://www.lemonde.fr/technologies/article/2013/06/12/pourquoi-stocker-toutes-nos-vies-sur-des-serveurs-aux-etats-unis_3428857_651865.html|creationDate|2013-06-13 +http://www.lemonde.fr/technologies/article/2013/06/12/pourquoi-stocker-toutes-nos-vies-sur-des-serveurs-aux-etats-unis_3428857_651865.html|tag|http://www.semanlink.net/tag/prism_surveillance_program +http://www.lemonde.fr/technologies/article/2013/06/12/pourquoi-stocker-toutes-nos-vies-sur-des-serveurs-aux-etats-unis_3428857_651865.html|title|"""Pourquoi stocker toutes nos vies sur des serveurs aux Etats-Unis ?""" +http://www.lemonde.fr/technologies/article/2013/06/12/pourquoi-stocker-toutes-nos-vies-sur-des-serveurs-aux-etats-unis_3428857_651865.html|creationTime|2013-06-13T00:20:26Z +http://www.openlinksw.com/blog/~kidehen/?id=1224|creationDate|2007-06-14 +http://www.openlinksw.com/blog/~kidehen/?id=1224|tag|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://www.openlinksw.com/blog/~kidehen/?id=1224|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.openlinksw.com/blog/~kidehen/?id=1224|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/~kidehen/?id=1224|comment|Actual SQL to RDF Mapping Demo / Experiment +http://www.openlinksw.com/blog/~kidehen/?id=1224|title|Enterprise 0.0, Linked Data, and Semantic Data Web +http://www.openlinksw.com/blog/~kidehen/?id=1224|creationTime|2007-06-14T21:51:49Z +http://dannyayers.com/2006/05/31/system-one-screencast|creationDate|2006-06-08 +http://dannyayers.com/2006/05/31/system-one-screencast|tag|http://www.semanlink.net/tag/knowledge_management +http://dannyayers.com/2006/05/31/system-one-screencast|title|System One screencast +https://www.quora.com/Who-is-doing-interesting-NLP-research-for-low-resource-languages|creationDate|2018-07-03 +https://www.quora.com/Who-is-doing-interesting-NLP-research-for-low-resource-languages|tag|http://www.semanlink.net/tag/cross_lingual_nlp +https://www.quora.com/Who-is-doing-interesting-NLP-research-for-low-resource-languages|title|Who is doing interesting NLP research for low resource languages? - Quora +https://www.quora.com/Who-is-doing-interesting-NLP-research-for-low-resource-languages|creationTime|2018-07-03T11:14:36Z +https://github.com/antoniogarrote/rdfstore-js#readme|creationDate|2011-08-28 +https://github.com/antoniogarrote/rdfstore-js#readme|tag|http://www.semanlink.net/tag/sparql_en_javascript +https://github.com/antoniogarrote/rdfstore-js#readme|tag|http://www.semanlink.net/tag/javascript_rdf +https://github.com/antoniogarrote/rdfstore-js#readme|tag|http://www.semanlink.net/tag/triplestore +https://github.com/antoniogarrote/rdfstore-js#readme|comment|rdfstore-js is a pure Javascript implementation of a RDF graph store with support for the SPARQL query and data manipulation language. +https://github.com/antoniogarrote/rdfstore-js#readme|title|rdfstore-js +https://github.com/antoniogarrote/rdfstore-js#readme|creationTime|2011-08-28T22:53:43Z +https://www.w3.org/TR/2016/PR-dwbp-20161215/|creationDate|2017-01-14 +https://www.w3.org/TR/2016/PR-dwbp-20161215/|tag|http://www.semanlink.net/tag/data_web +https://www.w3.org/TR/2016/PR-dwbp-20161215/|tag|http://www.semanlink.net/tag/w3c_data_activity +https://www.w3.org/TR/2016/PR-dwbp-20161215/|tag|http://www.semanlink.net/tag/best_practices +https://www.w3.org/TR/2016/PR-dwbp-20161215/|title|Data on the Web Best Practices +https://www.w3.org/TR/2016/PR-dwbp-20161215/|creationTime|2017-01-14T12:41:48Z +http://www.w3.org/TR/rdf-sparql-protocol/|creationDate|2007-10-13 +http://www.w3.org/TR/rdf-sparql-protocol/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/rdf-sparql-protocol/|tag|http://www.semanlink.net/tag/sparql +http://www.w3.org/TR/rdf-sparql-protocol/|comment|W3C Candidate Recommendation 6 April 2006 +http://www.w3.org/TR/rdf-sparql-protocol/|title|SPARQL Protocol for RDF +http://www.w3.org/TR/rdf-sparql-protocol/|creationTime|2007-10-13T14:46:56Z +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|creationDate|2014-03-26 +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|tag|http://www.semanlink.net/tag/robotique +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|tag|http://www.semanlink.net/tag/evolution +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|title|Finding perfection in the imperfect: Applying Darwinian neuro-evolution to robotics Robohub +http://robohub.org/finding-perfection-in-the-imperfect-applying-darwinian-neuro-evolution-to-robotics/|creationTime|2014-03-26T10:46:30Z +http://www.activesplit.com/rdfa_project/|creationDate|2012-02-21 +http://www.activesplit.com/rdfa_project/|tag|http://www.semanlink.net/tag/rdfquery +http://www.activesplit.com/rdfa_project/|tag|http://www.semanlink.net/tag/rdfa +http://www.activesplit.com/rdfa_project/|title|RDFa demonstracija +http://www.activesplit.com/rdfa_project/|creationTime|2012-02-21T09:42:07Z +https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890|creationDate|2018-06-08 +https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890|tag|http://www.semanlink.net/tag/antimatiere +https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890|tag|http://www.semanlink.net/tag/matiere_noire +https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890|title|Un Univers sans matière noire? CNRS Le journal +https://lejournal.cnrs.fr/articles/un-univers-sans-matiere-noire-0?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1528437890|creationTime|2018-06-08T14:02:56Z +http://www.ericandthetrip.com/|creationDate|2016-05-03 +http://www.ericandthetrip.com/|tag|http://www.semanlink.net/tag/eric_baetens +http://www.ericandthetrip.com/|tag|http://www.semanlink.net/tag/voyage +http://www.ericandthetrip.com/|title|Eric & The Trip. Plus qu'un voyage, le votre. +http://www.ericandthetrip.com/|creationTime|2016-05-03T02:09:04Z +http://motherboard.vice.com/blog/one-last-interview-with-barnaby-jack|creationDate|2013-07-29 +http://motherboard.vice.com/blog/one-last-interview-with-barnaby-jack|tag|http://www.semanlink.net/tag/barnaby_jack +http://motherboard.vice.com/blog/one-last-interview-with-barnaby-jack|title|Hacker Barnaby Jack Dies Days Before Revealing His Pacemaker Exploit: One Last Interview Motherboard +http://motherboard.vice.com/blog/one-last-interview-with-barnaby-jack|creationTime|2013-07-29T10:40:40Z +http://mlexplained.com/2017/12/29/attention-is-all-you-need-explained/|creationDate|2018-10-12 +http://mlexplained.com/2017/12/29/attention-is-all-you-need-explained/|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://mlexplained.com/2017/12/29/attention-is-all-you-need-explained/|title|"Paper Dissected: ""Attention is All You Need"" Explained Machine Learning Explained" +http://mlexplained.com/2017/12/29/attention-is-all-you-need-explained/|creationTime|2018-10-12T19:05:09Z +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|creationDate|2011-05-12 +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|tag|http://www.semanlink.net/tag/skos +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|title|Improve your taxonomy management using the W3C SKOS standard +http://www.ibm.com/developerworks/xml/library/x-skostaxonomy/index.html|creationTime|2011-05-12T21:53:51Z +http://faviki.com/|creationDate|2008-09-29 +http://faviki.com/|tag|http://www.semanlink.net/tag/semantic_web +http://faviki.com/|tag|http://www.semanlink.net/tag/semantic_tagging +http://faviki.com/|tag|http://www.semanlink.net/tag/wikipedia +http://faviki.com/|tag|http://www.semanlink.net/tag/social_bookmarking +http://faviki.com/|tag|http://www.semanlink.net/tag/dbpedia +http://faviki.com/|comment|Faviki is a tool that brings together social bookmarking and Wikipedia. It lets you bookmark web pages using Wikipedia's terms. In Faviki, everybody uses the same names for tags from the world's largest collection of knowledge! +http://faviki.com/|title|Faviki - Social bookmarking tool using smart semantic Wikipedia (DBpedia) tags +http://faviki.com/|creationTime|2008-09-29T13:17:48Z +http://fusiongrokker.com/post/using-git-as-your-subversion-client|creationDate|2016-03-25 +http://fusiongrokker.com/post/using-git-as-your-subversion-client|tag|http://www.semanlink.net/tag/svn +http://fusiongrokker.com/post/using-git-as-your-subversion-client|tag|http://www.semanlink.net/tag/git +http://fusiongrokker.com/post/using-git-as-your-subversion-client|title|Using Git as your Subversion Client • FusionGrokker +http://fusiongrokker.com/post/using-git-as-your-subversion-client|creationTime|2016-03-25T17:05:17Z +http://code.google.com/p/rdfquery/issues/detail?id=32|creationDate|2012-09-20 +http://code.google.com/p/rdfquery/issues/detail?id=32|tag|http://www.semanlink.net/tag/memory_leak +http://code.google.com/p/rdfquery/issues/detail?id=32|tag|http://www.semanlink.net/tag/rdfquery +http://code.google.com/p/rdfquery/issues/detail?id=32|title|Issue 32 - rdfquery - memory leaks/perfomance issues caused by $.rdf#databanks member (array) - RDF processing in your browser - Google Project Hosting +http://code.google.com/p/rdfquery/issues/detail?id=32|creationTime|2012-09-20T00:14:05Z +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|creationDate|2010-05-31 +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|tag|http://www.semanlink.net/tag/semantic_web_presentation +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|tag|http://www.semanlink.net/tag/slides +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|comment|W3C Semantic Web Education and Outreach Interest Group Presentation +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|title|Business Case for Semantic Web Technologies (slides) +http://www.w3.org/2001/sw/sweo/public/BusinessCase/Slides.pdf|creationTime|2010-05-31T09:46:00Z +http://en.wikipedia.org/wiki/Attack_the_Block|creationDate|2015-06-10 +http://en.wikipedia.org/wiki/Attack_the_Block|tag|http://www.semanlink.net/tag/science_fiction +http://en.wikipedia.org/wiki/Attack_the_Block|tag|http://www.semanlink.net/tag/film +http://en.wikipedia.org/wiki/Attack_the_Block|tag|http://www.semanlink.net/tag/comedie +http://en.wikipedia.org/wiki/Attack_the_Block|comment|2011 British science fiction comedy film +http://en.wikipedia.org/wiki/Attack_the_Block|title|Attack the Block +http://en.wikipedia.org/wiki/Attack_the_Block|creationTime|2015-06-10T22:13:42Z +http://dannyayers.com/2006/04/12/decent-outliner-still|creationDate|2006-04-12 +http://dannyayers.com/2006/04/12/decent-outliner-still|tag|http://www.semanlink.net/tag/semanlink_related +http://dannyayers.com/2006/04/12/decent-outliner-still|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/04/12/decent-outliner-still|tag|http://www.semanlink.net/tag/outliner +http://dannyayers.com/2006/04/12/decent-outliner-still|comment|"""Until recently I'd drop little notes into a local Wiki (or this blog), but my Wiki's got to the stage where it needs a lot of gardening, so I've reverted to mostly text notes (Aquamacs emacs). What's lacking is the ability to tag/file these in a useful fashion (to get the benefit of SemWeb tech).""" +http://dannyayers.com/2006/04/12/decent-outliner-still|title|Decent outliner still wanted +http://www.offconvex.org/2019/03/19/CURL/|creationDate|2019-03-20 +http://www.offconvex.org/2019/03/19/CURL/|tag|http://www.semanlink.net/tag/word2vec +http://www.offconvex.org/2019/03/19/CURL/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2019/03/19/CURL/|tag|http://www.semanlink.net/tag/embeddings +http://www.offconvex.org/2019/03/19/CURL/|tag|http://www.semanlink.net/tag/contrastive_self_supervised_learning +http://www.offconvex.org/2019/03/19/CURL/|comment|"[paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1902.09229). + +Why do objectives similar the one used by word2vec succeed in such diverse settings? (""Contrastive Unsupervised Representation +Learning"" (CURL): **methods that leverage similar pairs of data points**) + +> In contrastive learning the objective used at test time is very different from the training objective: generalization error is not the right +way to think about this. -> a framework that formalizes the notion of semantic +similarity that is implicitly used by these algorithms + +> **if the unsupervised loss happens to be small at the end of contrastive learning then the resulting +representations perform well on downstream classification** + +" +http://www.offconvex.org/2019/03/19/CURL/|relatedDoc|https://arxiv.org/abs/1902.09229 +http://www.offconvex.org/2019/03/19/CURL/|title|Contrastive Unsupervised Learning of Semantic Representations: A Theoretical Framework – Off the convex path (2019-03) +http://www.offconvex.org/2019/03/19/CURL/|creationTime|2019-03-20T16:15:33Z +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|creationDate|2017-08-25 +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|tag|http://www.semanlink.net/tag/prediction +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|tag|http://www.semanlink.net/tag/cerveau +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|tag|http://www.semanlink.net/tag/conscience +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|tag|http://www.semanlink.net/tag/models_of_consciousness +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|comment|In the 19th century, the German polymath Hermann von Helmholtz proposed that the brain is a prediction machine, and that what we see, hear and feel are nothing more than the brain’s best guesses about the causes of its sensory inputs. +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|title|The hard problem of consciousness is a distraction from the real one Aeon Essays +https://aeon.co/essays/the-hard-problem-of-consciousness-is-a-distraction-from-the-real-one|creationTime|2017-08-25T15:56:15Z +http://www.nltk.org/|creationDate|2015-10-21 +http://www.nltk.org/|tag|http://www.semanlink.net/tag/nltk +http://www.nltk.org/|title|NLTK (Natural Language Toolkit) - home +http://www.nltk.org/|creationTime|2015-10-21T18:38:33Z +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|creationDate|2012-12-01 +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|tag|http://www.semanlink.net/tag/sem_web_future +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|tag|http://www.semanlink.net/tag/internet_of_things +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|tag|http://www.semanlink.net/tag/siri +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|comment|"Abstract. In the next 10 years, we will see a Semantic Web that is +infused with a richer set of verbs: the ability not just to represent knowl- +edge about static datasets but the ability to use knowledge to perform +actions or operations. We argue that there are three trends that make +this outcome likely, namely, demand from current web applications (e.g. +Facebook's Like), the ubiquity of Javascript and the increasing instrumentation of the real world. +" +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|title|The rise of the verb +http://stko.geog.ucsb.edu/sw2022/sw2022_paper11.pdf|creationTime|2012-12-01T13:43:36Z +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|creationDate|2007-03-20 +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|tag|http://www.semanlink.net/tag/fps_post +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|tag|http://www.semanlink.net/tag/rdf_forms +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|tag|http://www.semanlink.net/tag/lod_mailing_list +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|comment|"Beside ""href"" links, forms are an important feature of the hypertext web. How +does this transpose to the web of data? Shouldn't there be a standardized way to +""include forms"" in RDF data? + + +" +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|title|[Linking-open-data] Forms in the web of data +http://simile.mit.edu/mail/BrowseList?listName=Linking%20Open%20Data&by=thread&from=11547|creationTime|2007-03-20T21:20:59Z +http://www.digg.com/|creationDate|2005-05-18 +http://www.digg.com/|tag|http://www.semanlink.net/tag/social_bookmarking +http://www.digg.com/|title|digg +http://www.digg.com/|creationTime|2005-05-18T22:00:00Z +http://nuevomedio.com/tepuy-vox/tepuys.html|creationDate|2005-11-10 +http://nuevomedio.com/tepuy-vox/tepuys.html|tag|http://www.semanlink.net/tag/tepuys +http://nuevomedio.com/tepuy-vox/tepuys.html|title|Tepuys y sus Saltos de Agua +http://winch5.blog.lemonde.fr/2013/08/23/commencez-a-lire-le-livre-de-winch5/|creationDate|2013-08-25 +http://winch5.blog.lemonde.fr/2013/08/23/commencez-a-lire-le-livre-de-winch5/|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/2013/08/23/commencez-a-lire-le-livre-de-winch5/|title|Commencez à lire le Livre de Winch5 Winch 5 +http://winch5.blog.lemonde.fr/2013/08/23/commencez-a-lire-le-livre-de-winch5/|creationTime|2013-08-25T12:47:25Z +http://journal.dajobe.org/journal/|creationDate|2008-04-08 +http://journal.dajobe.org/journal/|tag|http://www.semanlink.net/tag/david_beckett +http://journal.dajobe.org/journal/|title|Dave Beckett - Journalblog +http://journal.dajobe.org/journal/|creationTime|2008-04-08T11:48:41Z +http://www.mnot.net/blog/2005/08/13/excel_microformats|creationDate|2007-07-06 +http://www.mnot.net/blog/2005/08/13/excel_microformats|tag|http://www.semanlink.net/tag/grddl +http://www.mnot.net/blog/2005/08/13/excel_microformats|tag|http://www.semanlink.net/tag/mapping_data_from_spreadsheets_to_rdf +http://www.mnot.net/blog/2005/08/13/excel_microformats|tag|http://www.semanlink.net/tag/excel +http://www.mnot.net/blog/2005/08/13/excel_microformats|tag|http://www.semanlink.net/tag/microformats +http://www.mnot.net/blog/2005/08/13/excel_microformats|title|mnot’s Web log: Adding Semantics to Excel with Microformats and GRDDL +http://www.mnot.net/blog/2005/08/13/excel_microformats|creationTime|2007-07-06T22:40:07Z +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|creationDate|2012-12-19 +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|tag|http://www.semanlink.net/tag/clandestins +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|tag|http://www.semanlink.net/tag/immigration +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|tag|http://www.semanlink.net/tag/etat_policier +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|title|Calais : un « État policier en situation de guerre » BUG BROTHER +http://bugbrother.blog.lemonde.fr/2012/12/17/calais-un-etat-policier-en-situation-de-guerre/|creationTime|2012-12-19T00:01:50Z +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|creationDate|2012-07-30 +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|tag|http://www.semanlink.net/tag/knowledge_representation +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|tag|http://www.semanlink.net/tag/chris_welty +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|tag|http://www.semanlink.net/tag/semantic_technology +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|comment|In the traditional vision of AI, understanding flowed from perception through language to knowledge. It had always been envisioned that this understanding would be in some precise and unambiguous knowledge representation, and that all meaning processing would happen in this representation. This is the root of all semantic technology today. However, over time, the failure of the AI community to achieve this end-to-end vision made many, especially those in NLP, question the endpoint. In other words, to doubt the value of semantic technology. In this talk, we show that it was the vision, not the technology, that deserved to be doubted. Semantic technology has significant value in accomplishing tasks that require understanding, but it is not the endpoint. +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|title|Watson Goes Back to School - And what it tells us about the evolving role of semantic technology +http://semtechbiznyc2012.semanticweb.com/sessionPop.cfm?confid=68&proposalid=5022|creationTime|2012-07-30T23:59:28Z +http://www.armadillo.fr|creationDate|2011-01-23 +http://www.armadillo.fr|tag|http://www.semanlink.net/tag/semweb_pro_2011 +http://www.armadillo.fr|tag|http://www.semanlink.net/tag/ged +http://www.armadillo.fr|tag|http://www.semanlink.net/tag/triplestore +http://www.armadillo.fr|tag|http://www.semanlink.net/tag/multimedia +http://www.armadillo.fr|title|Armadillo - Gestion documentaire multimédia +http://www.armadillo.fr|creationTime|2011-01-23T23:21:20Z +http://www.armadillo.fr|seeAlso|http://fr.linkedin.com/pub/laurent-bel/1a/339/4b6 +http://blog.someben.com/2013/01/hashing-lang/#footnote3|creationDate|2016-01-08 +http://blog.someben.com/2013/01/hashing-lang/#footnote3|tag|http://www.semanlink.net/tag/feature_hashing +http://blog.someben.com/2013/01/hashing-lang/#footnote3|title|Hashing Language Some Ben? +http://blog.someben.com/2013/01/hashing-lang/#footnote3|creationTime|2016-01-08T11:48:50Z +http://schema.rdfs.org/faq.html|creationDate|2011-06-16 +http://schema.rdfs.org/faq.html|tag|http://www.semanlink.net/tag/schema_org +http://schema.rdfs.org/faq.html|tag|http://www.semanlink.net/tag/faq +http://schema.rdfs.org/faq.html|title|schema.rdfs.org - FAQ +http://schema.rdfs.org/faq.html|creationTime|2011-06-16T00:10:38Z +https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms|creationDate|2017-06-28 +https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms|tag|http://www.semanlink.net/tag/stemming +https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms|tag|http://www.semanlink.net/tag/nlp_french +https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms|title|Are there any efficient stemming algorithms in addition to the Porter and Carry algorithms? +https://www.researchgate.net/post/Are_there_any_efficient_stemming_algorithms_in_addition_to_the_Porter_and_Carry_algorithms|creationTime|2017-06-28T16:57:04Z +https://www.quora.com/What-should-I-do-to-increase-my-skills-in-deep-learning|creationDate|2017-08-16 +https://www.quora.com/What-should-I-do-to-increase-my-skills-in-deep-learning|tag|http://www.semanlink.net/tag/deep_learning +https://www.quora.com/What-should-I-do-to-increase-my-skills-in-deep-learning|title|What should I do to increase my skills in deep learning? - Quora +https://www.quora.com/What-should-I-do-to-increase-my-skills-in-deep-learning|creationTime|2017-08-16T10:36:57Z +http://www.ibm.com/developerworks/web/library/wa-rdf/|creationDate|2011-09-15 +http://www.ibm.com/developerworks/web/library/wa-rdf/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/web/library/wa-rdf/|tag|http://www.semanlink.net/tag/drupal_rdf +http://www.ibm.com/developerworks/web/library/wa-rdf/|title|The Semantic Web, Linked Data and Drupal, Part 1: Expose your data using RDF +http://www.ibm.com/developerworks/web/library/wa-rdf/|creationTime|2011-09-15T13:55:44Z +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|creationDate|2007-05-21 +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|tag|http://www.semanlink.net/tag/linked_data +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|comment|Exemples de connexion à des sources de données RDF via ajax (ne marche pas avec Safari) +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|title|"Hello Data Web (Take 3 - Feel The ""RDF"" Force)" +http://www.openlinksw.com:80/blog/~kidehen/index.vspx?page=&id=1144|creationTime|2007-05-21T23:27:32Z +http://scikit-learn.org/stable/modules/svm.html|creationDate|2016-01-11 +http://scikit-learn.org/stable/modules/svm.html|tag|http://www.semanlink.net/tag/support_vector_machine +http://scikit-learn.org/stable/modules/svm.html|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/modules/svm.html|title|Support Vector Machines — scikit-learn documentation +http://scikit-learn.org/stable/modules/svm.html|creationTime|2016-01-11T17:20:26Z +https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|creationDate|2017-12-31 +https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|tag|http://www.semanlink.net/tag/ai_black_box +https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|comment|> No one really knows how the most advanced algorithms do what they do. That could be a problem. +https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|title|The Dark Secret at the Heart of AI - MIT Technology Review +https://www.technologyreview.com/s/604087/the-dark-secret-at-the-heart-of-ai/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|creationTime|2017-12-31T10:51:53Z +http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/|creationDate|2009-04-25 +http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/|tag|http://www.semanlink.net/tag/junit +http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/|tag|http://www.semanlink.net/tag/tutorial +http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/|title|JUnit 4 in 60 Seconds at cavdar.net +http://www.cavdar.net/2008/07/21/junit-4-in-60-seconds/|creationTime|2009-04-25T03:41:29Z +https://arxiv.org/abs/1804.04526|creationDate|2018-04-15 +https://arxiv.org/abs/1804.04526|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1804.04526|tag|http://www.semanlink.net/tag/knowledge_graph +https://arxiv.org/abs/1804.04526|tag|http://www.semanlink.net/tag/rdf +https://arxiv.org/abs/1804.04526|arxiv_author|Elena Demidova +https://arxiv.org/abs/1804.04526|arxiv_author|Simon Gottschalk +https://arxiv.org/abs/1804.04526|comment|690 thousand contemporary and historical events and over 2.3 million temporal relations +https://arxiv.org/abs/1804.04526|title|[1804.04526] EventKG: A Multilingual Event-Centric Temporal Knowledge Graph +https://arxiv.org/abs/1804.04526|creationTime|2018-04-15T08:43:10Z +https://arxiv.org/abs/1804.04526|arxiv_summary|"One of the key requirements to facilitate semantic analytics of information +regarding contemporary and historical events on the Web, in the news and in +social media is the availability of reference knowledge repositories containing +comprehensive representations of events and temporal relations. Existing +knowledge graphs, with popular examples including DBpedia, YAGO and Wikidata, +focus mostly on entity-centric information and are insufficient in terms of +their coverage and completeness with respect to events and temporal relations. +EventKG presented in this paper is a multilingual event-centric temporal +knowledge graph that addresses this gap. EventKG incorporates over 690 thousand +contemporary and historical events and over 2.3 million temporal relations +extracted from several large-scale knowledge graphs and semi-structured sources +and makes them available through a canonical representation." +https://arxiv.org/abs/1804.04526|arxiv_firstAuthor|Simon Gottschalk +https://arxiv.org/abs/1804.04526|arxiv_updated|2018-04-12T14:12:48Z +https://arxiv.org/abs/1804.04526|arxiv_title|EventKG: A Multilingual Event-Centric Temporal Knowledge Graph +https://arxiv.org/abs/1804.04526|arxiv_published|2018-04-12T14:12:48Z +https://arxiv.org/abs/1804.04526|arxiv_num|1804.04526 +http://simile.mit.edu/exhibit/|creationDate|2006-12-11 +http://simile.mit.edu/exhibit/|tag|http://www.semanlink.net/tag/simile_exhibit +http://simile.mit.edu/exhibit/|comment|Exhibit is a lightweight structured data publishing framework that lets you create web pages with support for sorting, filtering, and rich visualizations by writing only HTML and optionally some CSS and Javascript code. +http://simile.mit.edu/exhibit/|title|SIMILE Exhibit +http://www.sitepoint.com/blogs/2006/03/15/do-you-know-your-character-encodings/|creationDate|2006-03-28 +http://www.sitepoint.com/blogs/2006/03/15/do-you-know-your-character-encodings/|tag|http://www.semanlink.net/tag/encoding +http://www.sitepoint.com/blogs/2006/03/15/do-you-know-your-character-encodings/|title|SitePoint Blogs » Do you know your character encodings? +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|creationDate|2010-05-25 +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|tag|http://www.semanlink.net/tag/big_brother +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|tag|http://www.semanlink.net/tag/nick_clegg +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|comment|And we will end practices that risk making Britain a place where our children grow up so used to their liberty being infringed that they accept it without question. +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|title|Nick Clegg's speech on constitutional reform The Liberal Democrats: Latest News Detail +http://www.libdems.org.uk/latest_news_detail.aspx?title=New_Politics:_Nick_Clegg%27s_speech_on_constitutional_reform&pPK=2c639a58-0da9-40c2-9ea4-3ac96cc7daa3|creationTime|2010-05-25T13:30:43Z +https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#|creationDate|2018-09-17 +https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#|tag|http://www.semanlink.net/tag/debug +https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#|tag|http://www.semanlink.net/tag/java_8_lambdas +https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#|title|java - How to debug stream().map(...) with lambda expressions? - Stack Overflow +https://stackoverflow.com/questions/24541786/how-to-debug-stream-map-with-lambda-expressions#|creationTime|2018-09-17T12:35:46Z +http://lov.okfn.org/dataset/lov/index.html|creationDate|2012-09-05 +http://lov.okfn.org/dataset/lov/index.html|tag|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://lov.okfn.org/dataset/lov/index.html|title|(LOV) Linked Open Vocabularies +http://lov.okfn.org/dataset/lov/index.html|creationTime|2012-09-05T22:52:46Z +http://cap2018.litislab.fr/slides_AB.pdf|creationDate|2018-10-26 +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/nlu +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/nlp_facebook +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/slides +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/antoine_bordes +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/human_like_ai +http://cap2018.litislab.fr/slides_AB.pdf|tag|http://www.semanlink.net/tag/france_is_ai_2018 +http://cap2018.litislab.fr/slides_AB.pdf|comment|mentions [Building machines that learn and think like people](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1604.00289) +http://cap2018.litislab.fr/slides_AB.pdf|relatedDoc|https://arxiv.org/abs/1604.00289 +http://cap2018.litislab.fr/slides_AB.pdf|title|Teaching Machines to Understand Natural Language (2018) +http://cap2018.litislab.fr/slides_AB.pdf|creationTime|2018-10-26T01:45:25Z +http://science.monstersandcritics.com/news/printer_1168946.php|creationDate|2006-06-06 +http://science.monstersandcritics.com/news/printer_1168946.php|tag|http://www.semanlink.net/tag/meteorite +http://science.monstersandcritics.com/news/printer_1168946.php|tag|http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien +http://science.monstersandcritics.com/news/printer_1168946.php|tag|http://www.semanlink.net/tag/antarctique +http://science.monstersandcritics.com/news/printer_1168946.php|title|Gigantic meteor crater found in Antarctica +http://www.slaney.org/malcolm/yahoo/Slaney2008-LSHTutorial.pdf|creationDate|2013-04-02 +http://www.slaney.org/malcolm/yahoo/Slaney2008-LSHTutorial.pdf|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.slaney.org/malcolm/yahoo/Slaney2008-LSHTutorial.pdf|title|Locality sensitive hashing for finding nearest neighbors +http://www.slaney.org/malcolm/yahoo/Slaney2008-LSHTutorial.pdf|creationTime|2013-04-02T00:22:43Z +http://rdf2h.github.io/rdf2h/|creationDate|2018-02-20 +http://rdf2h.github.io/rdf2h/|tag|http://www.semanlink.net/tag/rdf2h_browser +http://rdf2h.github.io/rdf2h/|title|Try RDF2h in your web browser +http://rdf2h.github.io/rdf2h/|creationTime|2018-02-20T22:45:56Z +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|creationDate|2011-08-20 +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|tag|http://www.semanlink.net/tag/fibonacci +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|tag|http://www.semanlink.net/tag/energie_solaire +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|comment|see http://leventtourne.free.fr/livreouvert/NombreOr/phyllotaxie.html +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|title|The Secret of the Fibonacci Sequence in Trees +http://www.amnh.org/nationalcenter/youngnaturalistawards/2011/aidan.html|creationTime|2011-08-20T23:09:24Z +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|creationDate|2007-10-23 +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|tag|http://www.semanlink.net/tag/monsanto +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|tag|http://www.semanlink.net/tag/colza_transgenique +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|tag|http://www.semanlink.net/tag/justice +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|title|Common Ground - January 2004 - Percy Schmeiser vs. Monsanto by Percy Schmeiser +http://commonground.ca/iss/0401150/percy_schmeiser.shtml|creationTime|2007-10-23T00:48:13Z +http://blog.xebia.fr/2009/09/15/servlet-3-0-les-3-points-marquants/#Lexcutionasynchronepourlesarch|creationDate|2011-02-03 +http://blog.xebia.fr/2009/09/15/servlet-3-0-les-3-points-marquants/#Lexcutionasynchronepourlesarch|tag|http://www.semanlink.net/tag/servlet_3_0 +http://blog.xebia.fr/2009/09/15/servlet-3-0-les-3-points-marquants/#Lexcutionasynchronepourlesarch|title|Servlet 3.0, les 3 points marquants Blog Xebia France +http://blog.xebia.fr/2009/09/15/servlet-3-0-les-3-points-marquants/#Lexcutionasynchronepourlesarch|creationTime|2011-02-03T23:08:27Z +http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/|creationDate|2016-06-26 +http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/|tag|http://www.semanlink.net/tag/disruption +http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/|tag|http://www.semanlink.net/tag/bernard_stiegler +http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/|title|Dans la disruption, allons-nous devenir fous ? Bernard Stiegler et l'urgence d'entrer dans le néguanthropocène - +http://maisouvaleweb.fr/dans-la-disruption-allons-nous-devenir-fous-bernard-stiegler-et-lurgence-dentrer-dans-le-neguanthropocene/|creationTime|2016-06-26T01:05:23Z +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|creationDate|2011-02-15 +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|tag|http://www.semanlink.net/tag/rdfa +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|tag|http://www.semanlink.net/tag/dita +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|title|Automated RDFa Output from DITA Open Toolkit - bobdc.blog +http://www.snee.com/bobdc.blog/2007/08/automated-rdfa-output-from-dit.html|creationTime|2011-02-15T12:18:10Z +https://medium.com/octavian-ai/deep-learning-with-knowledge-graphs-3df0b469a61a|creationDate|2018-12-09 +https://medium.com/octavian-ai/deep-learning-with-knowledge-graphs-3df0b469a61a|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +https://medium.com/octavian-ai/deep-learning-with-knowledge-graphs-3df0b469a61a|title|Deep Learning with Knowledge Graphs – Octavian – Medium +https://medium.com/octavian-ai/deep-learning-with-knowledge-graphs-3df0b469a61a|creationTime|2018-12-09T10:31:55Z +http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf|creationDate|2014-04-23 +http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf|tag|http://www.semanlink.net/tag/critique_du_capitalisme +http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf|tag|http://www.semanlink.net/tag/capitalisme +http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf|title|Caitalism 3.0 - A guide to reclaiming the commons +http://capitalism3.com/files/Capitalism_3.0_Peter_Barnes.pdf|creationTime|2014-04-23T21:47:22Z +http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm|creationDate|2006-05-05 +http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm|tag|http://www.semanlink.net/tag/automobile +http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm|tag|http://www.semanlink.net/tag/web_2_0_businesses +http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm|comment|"Zipcar, the urban car-sharing company that now operates in eight U.S. cities, has brought a Webby mindset -- automation, transparency, community -- to the rubber-meets-road business of car rental.
+Zipcar removes human operators from the rental process entirely. Customers sign up online, then reserve cars either through Zipcar's Web site or by calling an automated phone system that recognizes their cell-phone caller ID. The cars are parked in local lots and garages, unattended. Each car has a card reader mounted behind its windshield. If a customer has a reservation for that car during that time slot, the vehicle unlocks when she waves her ""Zipcard"" at it. The keys are inside. +" +http://www.businessweek.com/innovate/content/may2006/id20060504_282582.htm|title|A Self-Service Rental Car +http://linkeddatafragments.org/in-depth/#tpf|creationDate|2015-01-30 +http://linkeddatafragments.org/in-depth/#tpf|tag|http://www.semanlink.net/tag/triple_pattern_fragment +http://linkeddatafragments.org/in-depth/#tpf|title|Triple Pattern Fragment +http://linkeddatafragments.org/in-depth/#tpf|creationTime|2015-01-30T23:24:10Z +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-04-22 +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/neuroscience +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/cnrs +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/artificial_neurons +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|CNRS INSIS - Un neurone artificiel mille fois plus économe en énergie qu’un neurone biologique +http://www.cnrs.fr/insis/recherche/actualites/2017/04/neurone-artificiel.htm?utm_content=buffer15c9e&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-04-22T18:24:52Z +https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f|creationDate|2018-01-06 +https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f|tag|http://www.semanlink.net/tag/gradient_descent +https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f|tag|http://www.semanlink.net/tag/neuroevolution +https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f|title|Gradient descent vs. neuroevolution +https://towardsdatascience.com/gradient-descent-vs-neuroevolution-f907dace010f|creationTime|2018-01-06T15:24:23Z +http://www.uow.edu.au/arts/sts/bmartin/dissent/documents/AIDS/|creationDate|2006-05-26 +http://www.uow.edu.au/arts/sts/bmartin/dissent/documents/AIDS/|tag|http://www.semanlink.net/tag/origines_du_sida +http://www.uow.edu.au/arts/sts/bmartin/dissent/documents/AIDS/|comment|"One theory of the origin of AIDS is that it developed from contaminated vaccines (cultured on monkey kidneys) used in the world's first mass immunisation for polio, in Congo, between 1957 and 1960. +" +http://www.uow.edu.au/arts/sts/bmartin/dissent/documents/AIDS/|title|Polio vaccines and the origin of AIDS +http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/|creationDate|2016-01-09 +http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/|tag|http://www.semanlink.net/tag/google_deepmind +http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/|tag|http://www.semanlink.net/tag/multiagent_ai +http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/|title|How friendly is your AI? It depends on the rewards Robohub +http://robohub.org/how-friendly-is-your-ai-it-depends-on-the-rewards/|creationTime|2016-01-09T00:50:37Z +http://meta.wikimedia.org/wiki/Wikidata/Notes/Data_model_primer|creationDate|2013-09-12 +http://meta.wikimedia.org/wiki/Wikidata/Notes/Data_model_primer|tag|http://www.semanlink.net/tag/wikidata +http://meta.wikimedia.org/wiki/Wikidata/Notes/Data_model_primer|title|Wikidata/Data model primer +http://meta.wikimedia.org/wiki/Wikidata/Notes/Data_model_primer|creationTime|2013-09-12T00:02:23Z +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|creationDate|2017-05-27 +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|tag|http://www.semanlink.net/tag/reader_mode_browsers +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|tag|http://www.semanlink.net/tag/firefox +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|title|android - How is 'reader mode' in Firefox triggered? - Stack Overflow +https://stackoverflow.com/questions/15206695/how-is-reader-mode-in-firefox-triggered|creationTime|2017-05-27T13:07:29Z +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|creationDate|2018-09-27 +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|tag|http://www.semanlink.net/tag/guillaume_lample +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|tag|http://www.semanlink.net/tag/unsupervised_machine_translation +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|tag|http://www.semanlink.net/tag/nlp_facebook +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|title|Paris NLP Season 3 Meetup #1 Meetup +https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/|creationTime|2018-09-27T11:29:18Z +http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition|creationDate|2017-08-01 +http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition|tag|http://www.semanlink.net/tag/sable +http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition|title|Le sable - Enquête sur une disparition ARTE+7 +http://www.arte.tv/fr/videos/046598-000-A/le-sable-enquete-sur-une-disparition|creationTime|2017-08-01T19:01:59Z +http://www.bbc.com/news/science-environment-43115485|creationDate|2018-02-22 +http://www.bbc.com/news/science-environment-43115485|tag|http://www.semanlink.net/tag/migrations_humaines +http://www.bbc.com/news/science-environment-43115485|tag|http://www.semanlink.net/tag/neolithique +http://www.bbc.com/news/science-environment-43115485|tag|http://www.semanlink.net/tag/histoire_anglaise +http://www.bbc.com/news/science-environment-43115485|title|Ancient Britons 'replaced' by newcomers +http://www.bbc.com/news/science-environment-43115485|creationTime|2018-02-22T00:12:56Z +http://www.manageability.org/blog/stuff/nail-in-soaps-coffin|creationDate|2005-10-13 +http://www.manageability.org/blog/stuff/nail-in-soaps-coffin|tag|http://www.semanlink.net/tag/soap_vs_rest +http://www.manageability.org/blog/stuff/nail-in-soaps-coffin|title|More Nails For SOAP's Coffin +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|creationDate|2019-05-31 +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|tag|http://www.semanlink.net/tag/constraint_satisfaction_problem +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_author|Priya L. Donti +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_author|Bryan Wilder +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_author|Zico Kolter +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_author|Po-Wei Wang +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|title|[1905.12149] SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|bookmarkOf|https://arxiv.org/abs/1905.12149 +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|creationTime|2019-05-31T10:38:41Z +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_summary|"Integrating logical reasoning within deep learning architectures has been a +major goal of modern AI systems. In this paper, we propose a new direction +toward this goal by introducing a differentiable (smoothed) maximum +satisfiability (MAXSAT) solver that can be integrated into the loop of larger +deep learning systems. Our (approximate) solver is based upon a fast coordinate +descent approach to solving the semidefinite program (SDP) associated with the +MAXSAT problem. We show how to analytically differentiate through the solution +to this SDP and efficiently solve the associated backward pass. We demonstrate +that by integrating this solver into end-to-end learning systems, we can learn +the logical structure of challenging problems in a minimally supervised +fashion. In particular, we show that we can learn the parity function using +single-bit supervision (a traditionally hard task for deep networks) and learn +how to play 9x9 Sudoku solely from examples. We also solve a ""visual Sudok"" +problem that maps images of Sudoku puzzles to their associated logical +solutions by combining our MAXSAT solver with a traditional convolutional +architecture. Our approach thus shows promise in integrating logical structures +within deep learning." +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_firstAuthor|Po-Wei Wang +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_updated|2019-05-29T00:47:35Z +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_title|SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_published|2019-05-29T00:47:35Z +http://www.semanlink.net/doc/2019/05/_1905_12149_satnet_bridging_d|arxiv_num|1905.12149 +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|creationDate|2017-09-10 +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|tag|http://www.semanlink.net/tag/word2vec +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|tag|http://www.semanlink.net/tag/tutorial +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|tag|http://www.semanlink.net/tag/negative_sampling +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|comment|the tweaks to make training feasible +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|title|Word2Vec Tutorial Part 2 - Negative Sampling · Chris McCormick +http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/|creationTime|2017-09-10T17:23:52Z +http://phonetics.ucla.edu/appendix/languages/orowin/orowin.html|creationDate|2005-12-16 +http://phonetics.ucla.edu/appendix/languages/orowin/orowin.html|tag|http://www.semanlink.net/tag/disparition_de_langues_vivantes +http://phonetics.ucla.edu/appendix/languages/orowin/orowin.html|comment|A language spoken in Brazil. There are only four known speakers. Oro Win has a bilabial trill, preceded by a dental stop, forming a signe unit. +http://phonetics.ucla.edu/appendix/languages/orowin/orowin.html|title|Oro-Win +http://wiki.blojsom.com/wiki/display/blojsom/blojsom+Quickstart|creationDate|2006-02-04 +http://wiki.blojsom.com/wiki/display/blojsom/blojsom+Quickstart|tag|http://www.semanlink.net/tag/blojsom +http://wiki.blojsom.com/wiki/display/blojsom/blojsom+Quickstart|title|blojsom Quickstart +http://www.bbc.co.uk/news/science-environment-23288620|creationDate|2013-07-15 +http://www.bbc.co.uk/news/science-environment-23288620|tag|http://www.semanlink.net/tag/chene +http://www.bbc.co.uk/news/science-environment-23288620|tag|http://www.semanlink.net/tag/maladie +http://www.bbc.co.uk/news/science-environment-23288620|tag|http://www.semanlink.net/tag/foret +http://www.bbc.co.uk/news/science-environment-23288620|title|BBC News - Deadly oak disease 'spreading' in UK +http://www.bbc.co.uk/news/science-environment-23288620|creationTime|2013-07-15T10:25:00Z +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|creationDate|2013-02-03 +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|tag|http://www.semanlink.net/tag/de_broglie +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|tag|http://www.semanlink.net/tag/einstein +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|tag|http://www.semanlink.net/tag/mesure_du_temps +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|tag|http://www.semanlink.net/tag/mecanique_quantique +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|title|La masse se mesure aussi en secondes Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/02/03/physique-la-masse-se-mesure-aussi-en-secondes/|creationTime|2013-02-03T20:31:57Z +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|creationDate|2008-02-07 +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|tag|http://www.semanlink.net/tag/ajax +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|tag|http://www.semanlink.net/tag/javascript +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|tag|http://www.semanlink.net/tag/howto +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|tag|http://www.semanlink.net/tag/css +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|title|Dynamically loading an external JavaScript or CSS file +http://www.javascriptkit.com/javatutors/loadjavascriptcss.shtml|creationTime|2008-02-07T18:33:25Z +http://bigbrowser.blog.lemonde.fr/2012/09/06/presse-cheetah-le-robot-qui-court-plus-vite-quusain-bolt/|creationDate|2012-09-07 +http://bigbrowser.blog.lemonde.fr/2012/09/06/presse-cheetah-le-robot-qui-court-plus-vite-quusain-bolt/|tag|http://www.semanlink.net/tag/robotique +http://bigbrowser.blog.lemonde.fr/2012/09/06/presse-cheetah-le-robot-qui-court-plus-vite-quusain-bolt/|title|Cheetah, le robot qui court plus vite qu’Usain Bolt Big Browser +http://bigbrowser.blog.lemonde.fr/2012/09/06/presse-cheetah-le-robot-qui-court-plus-vite-quusain-bolt/|creationTime|2012-09-07T08:14:59Z +http://www.lemonde.fr/sciences/article/2014/11/14/philae-debut-du-forage-incertitudes-sur-l-energie-du-robot_4523823_1650684.html|creationDate|2014-11-15 +http://www.lemonde.fr/sciences/article/2014/11/14/philae-debut-du-forage-incertitudes-sur-l-energie-du-robot_4523823_1650684.html|tag|http://www.semanlink.net/tag/philae +http://www.lemonde.fr/sciences/article/2014/11/14/philae-debut-du-forage-incertitudes-sur-l-energie-du-robot_4523823_1650684.html|title|Philae a pu transmettre les données de son forage avant de couper le contact +http://www.lemonde.fr/sciences/article/2014/11/14/philae-debut-du-forage-incertitudes-sur-l-energie-du-robot_4523823_1650684.html|creationTime|2014-11-15T13:11:36Z +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|creationDate|2013-06-08 +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|tag|http://www.semanlink.net/tag/vie_privee +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|tag|http://www.semanlink.net/tag/lobby +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|title|Du droit à violer la vie privée des internautes au foyer BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/06/05/du-droit-a-violer-la-vie-privee-des-internautes-au-foyer/|creationTime|2013-06-08T09:35:58Z +http://demo.citizen-dan.org/conStruct/explorer|creationDate|2012-05-10 +http://demo.citizen-dan.org/conStruct/explorer|tag|http://www.semanlink.net/tag/graph_visualization +http://demo.citizen-dan.org/conStruct/explorer|tag|http://www.semanlink.net/tag/openstructs +http://demo.citizen-dan.org/conStruct/explorer|title| A Community Instance of theOpen Semantic Framework +http://demo.citizen-dan.org/conStruct/explorer|creationTime|2012-05-10T00:46:55Z +https://query.wikidata.org/|creationDate|2015-09-08 +https://query.wikidata.org/|tag|http://www.semanlink.net/tag/wikidata_query_service +https://query.wikidata.org/|title|Wikidata Query Service +https://query.wikidata.org/|creationTime|2015-09-08T23:42:22Z +http://ruder.io/text-classification-tensorflow-estimators/|creationDate|2018-04-17 +http://ruder.io/text-classification-tensorflow-estimators/|tag|http://www.semanlink.net/tag/tensorflow +http://ruder.io/text-classification-tensorflow-estimators/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://ruder.io/text-classification-tensorflow-estimators/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/text-classification-tensorflow-estimators/|title|Text Classification with TensorFlow Estimators +http://ruder.io/text-classification-tensorflow-estimators/|creationTime|2018-04-17T14:19:22Z +http://www.technorati.com/help/tags.html|creationDate|2005-06-02 +http://www.technorati.com/help/tags.html|tag|http://www.semanlink.net/tag/technorati +http://www.technorati.com/help/tags.html|title|Technorati: Using Technorati Tags +http://json-ld.org/playground/|creationDate|2014-11-23 +http://json-ld.org/playground/|tag|http://www.semanlink.net/tag/json_ld +http://json-ld.org/playground/|comment|"to play with Hydra, use this one" +http://json-ld.org/playground/|title|JSON-LD Playground +http://json-ld.org/playground/|creationTime|2014-11-23T15:00:10Z +http://linkedopencommerce.com/|creationDate|2011-06-23 +http://linkedopencommerce.com/|tag|http://www.semanlink.net/tag/goodrelations +http://linkedopencommerce.com/|tag|http://www.semanlink.net/tag/virtuoso +http://linkedopencommerce.com/|title|LOC - The Linked Open Commerce Dataspace +http://linkedopencommerce.com/|creationTime|2011-06-23T16:20:52Z +http://www.openlinksw.com/weblog/oerling/?id=1471|creationDate|2009-02-16 +http://www.openlinksw.com/weblog/oerling/?id=1471|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.openlinksw.com/weblog/oerling/?id=1471|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1471|tag|http://www.semanlink.net/tag/benchmark +http://www.openlinksw.com/weblog/oerling/?id=1471|comment|" 1. mapping relational to RDF, where possible, is faster than triple storage; and
+ 2. the equivalent relational solution can be some 10x faster than the pure triples representation. +" +http://www.openlinksw.com/weblog/oerling/?id=1471|title|ISWC 2008: The Scalable Knowledge Systems Workshop +http://www.openlinksw.com/weblog/oerling/?id=1471|creationTime|2009-02-16T17:04:42Z +http://www.mkbergman.com/?p=457|creationDate|2008-10-06 +http://www.mkbergman.com/?p=457|tag|http://www.semanlink.net/tag/linking_open_data +http://www.mkbergman.com/?p=457|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/?p=457|comment|Class-level Mappings Now Generalize Semantic Web Connectivity +http://www.mkbergman.com/?p=457|title|A New Constellation in the Linking Open Data (LOD) Sky » AI3:::Adaptive Information +http://www.mkbergman.com/?p=457|creationTime|2008-10-06T22:40:07Z +http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/|creationDate|2012-06-11 +http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/|tag|http://www.semanlink.net/tag/propriete_intellectuelle +http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/|title|Je n’ai pas le droit de lire le livre que j’ai acheté BUG BROTHER +http://bugbrother.blog.lemonde.fr/2012/06/10/je-nai-pas-le-droit-de-lire-le-livre-que-jai-achete/|creationTime|2012-06-11T17:03:00Z +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|creationDate|2018-03-16 +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|tag|http://www.semanlink.net/tag/samy_bengio +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|tag|http://www.semanlink.net/tag/multi_label_classification +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|comment|> Multi-class classification becomes challenging at test time when the number of classes is very large and testing against every possible class can become computationally infeasible. **This problem can be alleviated by imposing (or learning) a structure over the set of classes**. We propose **an algorithm for learning a tree-structure of classifiers** which, by optimizing the overall tree loss, provides superior accuracy to existing tree labeling methods. We also propose **a method that learns to embed labels in a low dimensional space** that is faster than non-embedding approaches and has superior accuracy to existing embedding approaches. Finally we combine the two ideas resulting in the label embedding tree that outperforms alternative methods including One-vs-Rest while being orders of magnitude faster. +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|title|Label Embedding Trees for Large Multi-Class Tasks (2010) +https://papers.nips.cc/paper/4027-label-embedding-trees-for-large-multi-class-tasks|creationTime|2018-03-16T23:57:54Z +http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_|creationDate|2008-04-17 +http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_|title|W3C Semantic Web Activity News - Report of the “Uncertainty Reasoning for the World Wide Web” Incubator Group at W3C +http://www.w3.org/blog/SW/2008/04/15/report_of_the_uncertainty_reasoning_for_|creationTime|2008-04-17T13:16:23Z +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|creationDate|2014-12-31 +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|tag|http://www.semanlink.net/tag/stanford +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|tag|http://www.semanlink.net/tag/javascript +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|tag|http://www.semanlink.net/tag/andrej_karpathy +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|tag|http://www.semanlink.net/tag/deep_learning +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|comment|ConvNetJS is a Javascript library for training Deep Learning models (mainly Neural Networks) entirely in your browser +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|title|ConvNetJS: Deep Learning in your browser +http://cs.stanford.edu/people/karpathy/convnetjs/index.html|creationTime|2014-12-31T02:07:14Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|tag|http://www.semanlink.net/tag/linked_data_gui +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|tag|http://www.semanlink.net/tag/semantic_mashups +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|tag|http://www.semanlink.net/tag/data_web +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|title|Interacting with the Web of Data through a Web of Inter-connected Lenses +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-12.pdf|creationTime|2012-04-16T15:13:44Z +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|creationDate|2019-01-24 +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/nlp_long_documents +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/bert +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_completion +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/information_extraction +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/nlp_juridique +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_construction +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/good +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|tag|http://www.semanlink.net/tag/slides +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|comment|"> Hyperlex is a contract analytics and management solution powered by artificial intelligence. Hyperlex helps companies manage and make the most of their contract portfolio by identifying relevant information and data to manage key contractual commitments. + +> Take-home message: +> +> - Sentence representation starts to be well understood empirically +> - Large document representation is still an open (and interesting) problem! + " +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|title|Romain Vial (Hyperlex) at Paris NLP meetup, slides +https://nlpparis.files.wordpress.com/2019/01/hyperlex_meetup23011.pdf|creationTime|2019-01-24T17:21:48Z +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|creationDate|2008-06-04 +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/semantic_tagging +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/kde +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/nepomuk +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|tag|http://www.semanlink.net/tag/leo_sauermann +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|comment|Semantic Web technologies (RDF, RDFS, OWL) are used as a backend metadata architecture in KDE 4.0 to tag, comment, annotate, etc, all files under Linux regardless of their file format, and to initiate corresponding search actions. +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|title|Case Study: KDE 4.0 Semantic Desktop Search and Tagging +http://www.w3.org/2001/sw/sweo/public/UseCases/Nepomuk/|creationTime|2008-06-04T23:05:40Z +https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/|creationDate|2018-01-05 +https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/|tag|http://www.semanlink.net/tag/edward_snowden +https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/|comment|"> 'Rights are for the powerless. They’re for the minority. They’re for the different. They’re for the weak’ + +" +https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/|title|Q&A: Edward Snowden on rights, privacy, secrets and leaks in conversation with Jimmy Wales – Wikitribune +https://www.wikitribune.com/story/2018/01/05/free_speech/qa-edward-snowden-on-rights-privacy-secrets-and-leaks-in-conversation-with-jimmy-wales/26810/|creationTime|2018-01-05T21:07:14Z +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|creationDate|2007-01-07 +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|tag|http://www.semanlink.net/tag/musique +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|tag|http://www.semanlink.net/tag/nelson_mandela +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|comment|"Free Nelson Mandela! +i-Tunes +" +http://www.youtube.com/watch?v=o3NJwyzFlTE&playnext=1|title|YouTube - Nelson Mandela - Special AKA +http://linkedu.eu/devtalk/?p=29|creationDate|2013-05-08 +http://linkedu.eu/devtalk/?p=29|tag|http://www.semanlink.net/tag/sparql +http://linkedu.eu/devtalk/?p=29|tag|http://www.semanlink.net/tag/r +http://linkedu.eu/devtalk/?p=29|tag|http://www.semanlink.net/tag/open_university +http://linkedu.eu/devtalk/?p=29|title|Showing distribution of Open University course topics with R and SPARQL « LinkedUp DevTalk +http://linkedu.eu/devtalk/?p=29|creationTime|2013-05-08T15:06:09Z +https://colab.research.google.com|creationDate|2018-05-31 +https://colab.research.google.com|tag|http://www.semanlink.net/tag/google_colab +https://colab.research.google.com|title|Colaboratory +https://colab.research.google.com|creationTime|2018-05-31T08:28:27Z +http://poolparty.punkt.at/|creationDate|2010-08-31 +http://poolparty.punkt.at/|tag|http://www.semanlink.net/tag/text_to_semantic_data +http://poolparty.punkt.at/|tag|http://www.semanlink.net/tag/skos_editor +http://poolparty.punkt.at/|tag|http://www.semanlink.net/tag/semantic_search +http://poolparty.punkt.at/|comment|"PoolParty is a thesaurus management system and a SKOS editor for the Semantic Web including text mining and linked data capabilities. The system helps to build and maintain multilingual thesauri providing an easy-to-use interface. PoolParty server provides semantic services to integrate semantic search or recommender systems into systems like CMS, DMS, CRM or Wikis
+But, it is expensive." +http://poolparty.punkt.at/|title|PoolParty » SKOS Thesaurus Management – Semantic Search – Linked Data +http://poolparty.punkt.at/|creationTime|2010-08-31T09:46:57Z +https://arxiv.org/abs/1411.4166|creationDate|2018-02-25 +https://arxiv.org/abs/1411.4166|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +https://arxiv.org/abs/1411.4166|tag|http://www.semanlink.net/tag/manaal_faruqui +https://arxiv.org/abs/1411.4166|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1411.4166|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://arxiv.org/abs/1411.4166|arxiv_author|Chris Dyer +https://arxiv.org/abs/1411.4166|arxiv_author|Jesse Dodge +https://arxiv.org/abs/1411.4166|arxiv_author|Eduard Hovy +https://arxiv.org/abs/1411.4166|arxiv_author|Manaal Faruqui +https://arxiv.org/abs/1411.4166|arxiv_author|Sujay K. Jauhar +https://arxiv.org/abs/1411.4166|arxiv_author|Noah A. Smith +https://arxiv.org/abs/1411.4166|comment|"Method for refining vector space representations using relational information from semantic lexicons **by encouraging linked words to have similar vector representations**, and it makes no assumptions about how the input vectors were constructed. + +Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a **post-processing step** by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. + +[github](https://github.com/mfaruqui/retrofitting) + +" +https://arxiv.org/abs/1411.4166|title|[1411.4166] Retrofitting Word Vectors to Semantic Lexicons +https://arxiv.org/abs/1411.4166|creationTime|2018-02-25T18:06:07Z +https://arxiv.org/abs/1411.4166|arxiv_summary|"Vector space word representations are learned from distributional information +of words in large corpora. Although such statistics are semantically +informative, they disregard the valuable information that is contained in +semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This +paper proposes a method for refining vector space representations using +relational information from semantic lexicons by encouraging linked words to +have similar vector representations, and it makes no assumptions about how the +input vectors were constructed. Evaluated on a battery of standard lexical +semantic evaluation tasks in several languages, we obtain substantial +improvements starting with a variety of word vector models. Our refinement +method outperforms prior techniques for incorporating semantic lexicons into +the word vector training algorithms." +https://arxiv.org/abs/1411.4166|arxiv_firstAuthor|Manaal Faruqui +https://arxiv.org/abs/1411.4166|arxiv_updated|2015-03-22T17:55:20Z +https://arxiv.org/abs/1411.4166|arxiv_title|Retrofitting Word Vectors to Semantic Lexicons +https://arxiv.org/abs/1411.4166|arxiv_published|2014-11-15T17:34:20Z +https://arxiv.org/abs/1411.4166|arxiv_num|1411.4166 +http://pisani.blog.lemonde.fr/pisani/2006/03/aperus_sur_la_t.html|creationDate|2006-03-10 +http://pisani.blog.lemonde.fr/pisani/2006/03/aperus_sur_la_t.html|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/pisani/2006/03/aperus_sur_la_t.html|tag|http://www.semanlink.net/tag/web_2_0 +http://pisani.blog.lemonde.fr/pisani/2006/03/aperus_sur_la_t.html|title|Transnets, des gadgets aux réseaux: Aperçus sur la toile de demain +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|creationDate|2012-04-18 +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|tag|http://www.semanlink.net/tag/personal_data +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|tag|http://www.semanlink.net/tag/facebook +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|title|Tim Berners-Lee: demand your data from Google and Facebook Technology guardian.co.uk +http://www.guardian.co.uk/technology/2012/apr/18/tim-berners-lee-google-facebook|creationTime|2012-04-18T10:00:49Z +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|creationDate|2015-01-08 +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|tag|http://www.semanlink.net/tag/economie_de_la_gratuite +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|tag|http://www.semanlink.net/tag/attali +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|tag|http://www.semanlink.net/tag/bernard_maris +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|comment|la vraie dimension de la gauche, c'est de donner à chacun la liberté d'usage de son temps. Pour en faire du bon temps. +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|title|Surmonter la crise politique grâce à une économie de la gratuité +http://www.lemonde.fr/idees/article/2014/09/16/surmonter-la-crise-politique-grace-a-une-economie-de-la-gratuite_4488343_3232.html|creationTime|2015-01-08T15:55:43Z +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|creationDate|2013-04-02 +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|tag|http://www.semanlink.net/tag/big_data +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|tag|http://www.semanlink.net/tag/industrie_pharmaceutique +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|comment|while Big Data is a real problem, it's not a Big Problem. What is a Big Problem... is the challenge of dealing with the diverse variety of (small) data that's needed for decision-making +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|title|Big Data sheds light on pharma's 'Small Data' problems - FierceBiotechIT +http://www.fiercebiotechit.com/story/big-data-sheds-light-pharmas-small-data-problems/2013-03-27|creationTime|2013-04-02T14:07:03Z +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|creationDate|2012-11-30 +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/machine_translation +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/deep_learning +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/machine_learning +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/ng +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/microsoft +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|tag|http://www.semanlink.net/tag/speech_recognition +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|comment|A voice recognition program translated a speech given by Richard F. Rashid, Microsoft’s top scientist, into Mandarin Chinese. +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|title|Scientists See Advances in Deep Learning, a Part of Artificial Intelligence - NYTimes.com +http://www.nytimes.com/2012/11/24/science/scientists-see-advances-in-deep-learning-a-part-of-artificial-intelligence.html?pagewanted=all&_r=1&&_r=0|creationTime|2012-11-30T22:48:49Z +https://docs.google.com/presentation/d/1AgpzDNp0z5GVrizQOc_CFZOj7UpttwJDd99fg-JliF8/edit#slide=id.p|creationDate|2013-08-12 +https://docs.google.com/presentation/d/1AgpzDNp0z5GVrizQOc_CFZOj7UpttwJDd99fg-JliF8/edit#slide=id.p|tag|http://www.semanlink.net/tag/wikidata +https://docs.google.com/presentation/d/1AgpzDNp0z5GVrizQOc_CFZOj7UpttwJDd99fg-JliF8/edit#slide=id.p|title|State of Wikidata (Wikimania 2013) - Google Drive +https://docs.google.com/presentation/d/1AgpzDNp0z5GVrizQOc_CFZOj7UpttwJDd99fg-JliF8/edit#slide=id.p|creationTime|2013-08-12T10:43:34Z +http://www.figoblog.org/node/2013|creationDate|2013-03-23 +http://www.figoblog.org/node/2013|tag|http://www.semanlink.net/tag/emmanuelle_bernes +http://www.figoblog.org/node/2013|tag|http://www.semanlink.net/tag/bnf +http://www.figoblog.org/node/2013|tag|http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles +http://www.figoblog.org/node/2013|tag|http://www.semanlink.net/tag/droit_d_auteur +http://www.figoblog.org/node/2013|title|Le droit de ReLIRE Figoblog +http://www.figoblog.org/node/2013|creationTime|2013-03-23T10:05:23Z +http://itu.dk/people/sathi/papers/kctob.pdf|creationDate|2009-01-15 +http://itu.dk/people/sathi/papers/kctob.pdf|tag|http://www.semanlink.net/tag/knowledge_compilation +http://itu.dk/people/sathi/papers/kctob.pdf|title|Knowledge Compilation Properties of Tree-of-BDDs +http://itu.dk/people/sathi/papers/kctob.pdf|creationTime|2009-01-15T22:22:26Z +https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/|creationDate|2018-10-22 +https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/|tag|http://www.semanlink.net/tag/grece_antique +https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/|tag|http://www.semanlink.net/tag/musique +https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/|title|Scientists Have Reconstructed Ancient Greek Music And You Can Listen To It IFLScience +https://www.iflscience.com/editors-blog/scientists-have-reconstructed-ancient-greek-music-and-you-can-listen-to-it/all/|creationTime|2018-10-22T16:25:41Z +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|creationDate|2008-06-12 +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|tag|http://www.semanlink.net/tag/open_standards +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|tag|http://www.semanlink.net/tag/commission_europeenne +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|comment|"'Where interoperability information is protected as a trade secret, there may be a lot of truth in the saying that the information is valuable because it is secret, rather than being secret because it is valuable... we should only standardize when there are demonstrable benefits, and we should not rush to standardize on a particular technology too early... I fail to see the interest of customers in including proprietary technology in standards when there are no clear and demonstrable benefits over non-proprietary alternatives.'""" +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|title|Neelie Kroes European Commissioner for Competition Policy - Being open about standards +http://europa.eu/rapid/pressReleasesAction.do?reference=SPEECH/08/317&format=HTML&aged=0&language=EN&guiLanguage=en|creationTime|2008-06-12T23:47:27Z +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|creationDate|2015-11-18 +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|tag|http://www.semanlink.net/tag/neuroscience +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|tag|http://www.semanlink.net/tag/jeff_hawkins +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|tag|http://www.semanlink.net/tag/neocortex +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|comment|Biologists have long puzzled over why neurons have thousands of synapses. Now neuroscientists have shown they are crucial not just for recognizing patterns but for learning the sequence in which they appear. +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|title|Why Neurons Have Thousands of Synapses, A Theory of Sequence Memory in Neocortex - Single Artificial Neuron Taught to Recognize Hundreds of Patterns MIT Technology Review +http://www.technologyreview.com/view/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|creationTime|2015-11-18T17:37:54Z +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|creationDate|2009-06-25 +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|tag|http://www.semanlink.net/tag/france_telecom +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|tag|http://www.semanlink.net/tag/rigolo +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|tag|http://www.semanlink.net/tag/publicite +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|tag|http://www.semanlink.net/tag/ina +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|tag|http://www.semanlink.net/tag/coupe_du_monde_1998 +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|title|France telecom coupe du monde +http://www.ina.fr/pub/divers/video/PUB901507050/france-telecom-coupe-du-monde-image-douaniers.fr.html|creationTime|2009-06-25T08:22:31Z +http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html|creationDate|2006-07-12 +http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html|tag|http://www.semanlink.net/tag/semanlink_related +http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html|tag|http://www.semanlink.net/tag/tagging +http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html|comment|A method that combines the flexibility of tagging with the search-narrowing power of a deep hierarchy is to combine the tags to an 'instant hierarchy +http://blog.thinkphp.de/archives/124-An-alternative-Approach-to-Tagging.html|title|An alternative Approach to Tagging - ThinkPHP /dev/blog +http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi|creationDate|2010-05-17 +http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi|tag|http://www.semanlink.net/tag/m3_multi_media_museum +http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi|tag|http://www.semanlink.net/tag/musee_de_niamey +http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi|title|Projet multimedia et internet au Musée de Niamey +http://web.archive.org/web/20001010205701/http://www.hypersolutions.fr/MuseeNiamey/MuseeNiamey.ssi|creationTime|2010-05-17T12:13:07Z +http://blog.okfn.org/2012/07/10/announcing-linked-open-vocabularies-lov-enabling-the-vocabulary-commons/|creationDate|2012-07-31 +http://blog.okfn.org/2012/07/10/announcing-linked-open-vocabularies-lov-enabling-the-vocabulary-commons/|tag|http://www.semanlink.net/tag/lov_linked_open_vocabularies +http://blog.okfn.org/2012/07/10/announcing-linked-open-vocabularies-lov-enabling-the-vocabulary-commons/|title|Announcing: Linked Open Vocabularies (LOV), enabling the vocabulary commons Open Knowledge Foundation Blog +http://blog.okfn.org/2012/07/10/announcing-linked-open-vocabularies-lov-enabling-the-vocabulary-commons/|creationTime|2012-07-31T09:22:38Z +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|creationDate|2013-01-07 +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|tag|http://www.semanlink.net/tag/mali +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|tag|http://www.semanlink.net/tag/droits_de_l_homme +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|title|Mali-Guinée : la Charte du Manden de 1222… Et si les droits de l’homme avaient été inventés en Afrique ? +http://populaction.com/guinee-mali-la-charte-du-manden-1222-et-si-les-droits-de-lhomme-avaient-ete-inventes-en-afrique/|creationTime|2013-01-07T22:32:59Z +http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/|creationDate|2011-06-10 +http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/|tag|http://www.semanlink.net/tag/seo +http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/|tag|http://www.semanlink.net/tag/schema_org +http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/|title|What Schema.org Means for SEO and Beyond +http://www.seoskeptic.com/what-schema-org-means-for-seo-and-beyond/|creationTime|2011-06-10T12:04:17Z +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|creationDate|2012-09-10 +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|tag|http://www.semanlink.net/tag/bacteries +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|comment|comme seuls les arthropodes femelles passent le parasite à leur progéniture via leurs ovocytes contaminés, chez le cloporte commun, en photo ci-dessus, Wolbachia est capable de... métamorphoser les mâles en femelles. +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|title|Comment on peut mourir de trop se défendre contre une bactérie Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2012/09/09/comment-on-peut-mourir-de-trop-se-defendre-contre-une-bacterie/|creationTime|2012-09-10T08:59:02Z +http://html5doctor.com/html5-custom-data-attributes/|creationDate|2015-06-12 +http://html5doctor.com/html5-custom-data-attributes/|tag|http://www.semanlink.net/tag/webcomponents +http://html5doctor.com/html5-custom-data-attributes/|tag|http://www.semanlink.net/tag/html5 +http://html5doctor.com/html5-custom-data-attributes/|title|HTML5 Custom Data Attributes (data-*) HTML5 Doctor +http://html5doctor.com/html5-custom-data-attributes/|creationTime|2015-06-12T00:40:33Z +http://developers.google.com/speed/pagespeed/insights/|creationDate|2013-08-24 +http://developers.google.com/speed/pagespeed/insights/|tag|http://www.semanlink.net/tag/web_tools +http://developers.google.com/speed/pagespeed/insights/|tag|http://www.semanlink.net/tag/webmasters_google +http://developers.google.com/speed/pagespeed/insights/|comment|Make your web pages fast on all devices. +http://developers.google.com/speed/pagespeed/insights/|title|PageSpeed Insights +http://developers.google.com/speed/pagespeed/insights/|creationTime|2013-08-24T01:19:49Z +http://benalman.com/news/2010/11/immediately-invoked-function-expression/|creationDate|2012-10-18 +http://benalman.com/news/2010/11/immediately-invoked-function-expression/|tag|http://www.semanlink.net/tag/javascript_patterns +http://benalman.com/news/2010/11/immediately-invoked-function-expression/|title|Ben Alman » Immediately-Invoked Function Expression (IIFE) +http://benalman.com/news/2010/11/immediately-invoked-function-expression/|creationTime|2012-10-18T17:43:35Z +https://www.reddit.com/r/IPython/comments/27zash/can_i_increase_notebook_cell_width_on_wide_screens/|creationDate|2017-06-30 +https://www.reddit.com/r/IPython/comments/27zash/can_i_increase_notebook_cell_width_on_wide_screens/|tag|http://www.semanlink.net/tag/ipython +https://www.reddit.com/r/IPython/comments/27zash/can_i_increase_notebook_cell_width_on_wide_screens/|title|Can I increase notebook cell width on wide screens? : IPython +https://www.reddit.com/r/IPython/comments/27zash/can_i_increase_notebook_cell_width_on_wide_screens/|creationTime|2017-06-30T16:31:25Z +http://formcept.com/blog/stanbol/|creationDate|2012-06-24 +http://formcept.com/blog/stanbol/|tag|http://www.semanlink.net/tag/apache_stanbol +http://formcept.com/blog/stanbol/|tag|http://www.semanlink.net/tag/tutorial +http://formcept.com/blog/stanbol/|title|Apache Stanbol - How to to create an enhancement engine +http://formcept.com/blog/stanbol/|creationTime|2012-06-24T11:40:28Z +http://www.macintouch.com/|creationDate|2007-11-09 +http://www.macintouch.com/|tag|http://www.semanlink.net/tag/tips +http://www.macintouch.com/|tag|http://www.semanlink.net/tag/news +http://www.macintouch.com/|tag|http://www.semanlink.net/tag/macintosh +http://www.macintouch.com/|title|MacInTouch: timely news and tips about the Apple Macintosh +http://www.macintouch.com/|creationTime|2007-11-09T13:06:46Z +https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france|creationDate|2014-09-10 +https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france|tag|http://www.semanlink.net/tag/co_ +https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france|tag|http://www.semanlink.net/tag/data_gouv_fr +https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france|title|Emissions de CO2 et de polluants des véhicules commercialisés en France - data.gouv.fr +https://www.data.gouv.fr/dataset/emissions-de-co2-et-de-polluants-des-vehicules-commercialises-en-france|creationTime|2014-09-10T09:47:34Z +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|creationDate|2005-06-29 +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|tag|http://www.semanlink.net/tag/rss_extensions +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|tag|http://www.semanlink.net/tag/rdf_vs_xml +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|tag|http://www.semanlink.net/tag/rss +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|comment|I know how I’m going to support these multiple extensions in my own code. I’ll have the RDF model internally and map to it (XSLT for now), taking advantage of subclass/subproperty inference for dealing with the semantic differences between the different formats (as Suzan suggests). SPARQL will allow me to query across the diffferent properties out of the box. Best of luck to everyone else. +http://dannyayers.com/archives/2005/06/28/im-saying-nothing/|title|Danny Ayers, Raw Blog - RSS extensions +http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/|creationDate|2015-07-22 +http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/|tag|http://www.semanlink.net/tag/javascript +http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/|tag|http://www.semanlink.net/tag/debug +http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/|title|Debugging Asynchronous JavaScript with Chrome DevTools - HTML5 Rocks +http://www.html5rocks.com/en/tutorials/developertools/async-call-stack/|creationTime|2015-07-22T15:18:34Z +https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html|creationDate|2018-10-25 +https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html|tag|http://www.semanlink.net/tag/bolsonaro +https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html|tag|http://www.semanlink.net/tag/caetano_veloso +https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html|title|Caetano Veloso: Dark Times Are Coming for My Country - The New York Times +https://www.nytimes.com/2018/10/24/opinion/caetano-veloso-brazil-bolsonaro.html|creationTime|2018-10-25T19:51:19Z +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|creationDate|2008-09-11 +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|tag|http://www.semanlink.net/tag/petrole +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|tag|http://www.semanlink.net/tag/bresil +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|tag|http://www.semanlink.net/tag/lula +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|title|FT.com / Lula’s new lucre: Brazil may keep full control of offshore oil +http://www.ft.com/cms/s/0/fcfbfd0a-7f64-11dd-a3da-000077b07658.html|creationTime|2008-09-11T13:36:38Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/08.pdf|creationDate|2006-12-23 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/08.pdf|tag|http://www.semanlink.net/tag/ontology_mapping +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/08.pdf|comment|"State of the art languages for +ontology mapping enable to express semantic relations between +homogeneous components of different ontologies, namely they +allow to map concepts into concepts, individuals into individuals, +and properties into properties. Many real cases, however, +highlight the necessity to establish semantic relations between +heterogeneous components. For example to map a concept into +a relation or vice versa." +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/08.pdf|title|Reconciling concepts and relations in heterogeneous ontologies +http://www.semweb.pro/|creationDate|2010-12-06 +http://www.semweb.pro/|tag|http://www.semanlink.net/tag/workshop +http://www.semweb.pro/|tag|http://www.semanlink.net/tag/semantic_web +http://www.semweb.pro/|tag|http://www.semanlink.net/tag/paris +http://www.semweb.pro/|title|SemWeb.Pro 2011 - January 17-18 Paris (SemWeb.Pro) +http://www.semweb.pro/|creationTime|2010-12-06T00:33:12Z +https://github.com/danja/seki|creationDate|2013-08-31 +https://github.com/danja/seki|tag|http://www.semanlink.net/tag/node_js +https://github.com/danja/seki|tag|http://www.semanlink.net/tag/sparql +https://github.com/danja/seki|tag|http://www.semanlink.net/tag/semantic_cms +https://github.com/danja/seki|tag|http://www.semanlink.net/tag/danny_ayers +https://github.com/danja/seki|comment|Seki is a front-end to an independent SPARQL server using node.js +https://github.com/danja/seki|title|danja/seki +https://github.com/danja/seki|creationTime|2013-08-31T22:37:03Z +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|creationDate|2012-04-25 +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|tag|http://www.semanlink.net/tag/linked_data +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|tag|http://www.semanlink.net/tag/big_data +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|tag|http://www.semanlink.net/tag/fujitsu +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|title|Linked-data-connecting-and-exploiting-big-data-(v1.0).pdf +http://www.fujitsu.com/uk/Images/Linked-data-connecting-and-exploiting-big-data-%28v1.0%29.pdf|creationTime|2012-04-25T14:05:55Z +https://github.com/3Top/word2vec-api|creationDate|2017-06-09 +https://github.com/3Top/word2vec-api|tag|http://www.semanlink.net/tag/github_project +https://github.com/3Top/word2vec-api|tag|http://www.semanlink.net/tag/francois_scharffe +https://github.com/3Top/word2vec-api|tag|http://www.semanlink.net/tag/word2vec +https://github.com/3Top/word2vec-api|tag|http://www.semanlink.net/tag/gensim +https://github.com/3Top/word2vec-api|comment|"Simple web service providing a word embedding API. The methods are based on Gensim Word2Vec implementation.
+List of word2vec datasets +" +https://github.com/3Top/word2vec-api|title|word2vec-api +https://github.com/3Top/word2vec-api|creationTime|2017-06-09T17:24:25Z +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|creationDate|2014-11-17 +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|tag|http://www.semanlink.net/tag/renato_matos +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|tag|http://www.semanlink.net/tag/souvenirs +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|comment|"vou prometer, a mim mesmo, nunca mais soffrir de tedio
+cantar meu baiao blues sera o menu melhor remedio
+o meu baiao é vermelho embora seja azul é verde é amarelo
+o meu baiao é reggae embora seja blue + +" +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|title|Baião Blues (Renato Matos - Box MP3 - Baixar músicas grátis +http://www.boxmp3.com.br/167439700/24-bai-o-blues-renato-matos.html|creationTime|2014-11-17T14:16:44Z +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|creationDate|2018-10-07 +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/google_cloud_platform +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/flask +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/keras +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/docker +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/docker_python +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|tag|http://www.semanlink.net/tag/kubernetes +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|title|Deploy Your First Deep Learning Model On Kubernetes With Python, Keras, Flask, and Docker +https://medium.com/analytics-vidhya/deploy-your-first-deep-learning-model-on-kubernetes-with-python-keras-flask-and-docker-575dc07d9e76|creationTime|2018-10-07T12:50:42Z +http://sparkjava.com/|creationDate|2017-05-15 +http://sparkjava.com/|tag|http://www.semanlink.net/tag/spark_java_web_framework +http://sparkjava.com/|title|Spark Framework: A tiny Java web framework +http://sparkjava.com/|creationTime|2017-05-15T18:21:16Z +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|creationDate|2011-04-25 +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|tag|http://www.semanlink.net/tag/film_noir +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|comment|"""Assurance sur la mort"" film noir américain de Billy Wilder" +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|title|Double Indemnity +http://en.wikipedia.org/wiki/Double_Indemnity_(film)|creationTime|2011-04-25T23:07:35Z +http://archive.timesonline.co.uk/|creationDate|2008-07-08 +http://archive.timesonline.co.uk/|tag|http://www.semanlink.net/tag/times +http://archive.timesonline.co.uk/|title|Times online +http://archive.timesonline.co.uk/|creationTime|2008-07-08T21:22:22Z +http://www.w3.org/TR/json-ld-api/|creationDate|2014-10-29 +http://www.w3.org/TR/json-ld-api/|tag|http://www.semanlink.net/tag/json_ld +http://www.w3.org/TR/json-ld-api/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/json-ld-api/|title|JSON-LD 1.0 Processing Algorithms and API +http://www.w3.org/TR/json-ld-api/|creationTime|2014-10-29T00:47:36Z +http://static.flickr.com/23/35123655_661c699b9f.jpg|creationDate|2006-03-30 +http://static.flickr.com/23/35123655_661c699b9f.jpg|tag|http://www.semanlink.net/tag/chameau +http://static.flickr.com/23/35123655_661c699b9f.jpg|tag|http://www.semanlink.net/tag/maria +http://static.flickr.com/23/35123655_661c699b9f.jpg|tag|http://www.semanlink.net/tag/euphrasie +http://static.flickr.com/23/35123655_661c699b9f.jpg|title|Euphrasie et Maria sont sur un chameau +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|creationDate|2012-03-05 +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|tag|http://www.semanlink.net/tag/open_data +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|tag|http://www.semanlink.net/tag/france +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|tag|http://www.semanlink.net/tag/linked_data +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|tag|http://www.semanlink.net/tag/antidot +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|title|Le plus grand projet Open Data / Linked Data français bientôt ouvert +http://www.viadeo.com/hub/forums/detaildiscussion/?containerId=0022beslm82oak7l&action=messageDetail&messageId=0021ndnqzpanhurn&forumId=00210bj4riztfylb|creationTime|2012-03-05T19:44:44Z +http://hal.ccsd.cnrs.fr/ccsd-00017763/en/|creationDate|2006-10-18 +http://hal.ccsd.cnrs.fr/ccsd-00017763/en/|tag|http://www.semanlink.net/tag/koskas +http://hal.ccsd.cnrs.fr/ccsd-00017763/en/|tag|http://www.semanlink.net/tag/radix_trees +http://hal.ccsd.cnrs.fr/ccsd-00017763/en/|comment|This paper describes a new algorithm2 dealing with databases. This algorithm allow to fully manage a database, but their most natural field of applications is the datawarehouse (OLAP). It lies on a de-normalized representation of the database. The data is stored in thesauruses and radix trees (a hierarchical representation of bitmaps) which have interesting properties. +http://hal.ccsd.cnrs.fr/ccsd-00017763/en/|title|A Hierarchical Database Manager +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|creationDate|2010-06-24 +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|tag|http://www.semanlink.net/tag/ouzbekistan +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|tag|http://www.semanlink.net/tag/shoira_otabekova +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|comment|"il y a une très jolie musique +" +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|title|Ouzbekistan la vallée du Ferghana +http://www.atmosphere-production.com/ouzbekistan-la-vallee-du-ferghana-details-98.html|creationTime|2010-06-24T02:25:11Z +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|creationDate|2007-07-04 +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|tag|http://www.semanlink.net/tag/javascript_rdf_parser +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|tag|http://www.semanlink.net/tag/tabulator +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|tag|http://www.semanlink.net/tag/sample_code +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|title|Dave Brondsema's Blog - Javascript RDFParser from Tabulator +http://brondsema.net/blog/index.php/2006/11/25/javascript_rdfparser_from_tabulator|creationTime|2007-07-04T00:16:34Z +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|creationDate|2018-04-05 +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|tag|http://www.semanlink.net/tag/nlp_stanford +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|tag|http://www.semanlink.net/tag/bi_lstm +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|tag|http://www.semanlink.net/tag/named_entity_recognition +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|title|Exploring neural architectures for NER (CS224N 2018) +http://web.stanford.edu/class/cs224n/reports/6896582.pdf|creationTime|2018-04-05T01:57:50Z +http://linksailor.com/nav|creationDate|2011-04-19 +http://linksailor.com/nav|tag|http://www.semanlink.net/tag/rdf_browser +http://linksailor.com/nav|tag|http://www.semanlink.net/tag/talis_platform +http://linksailor.com/nav|title|LinkSailor +http://linksailor.com/nav|creationTime|2011-04-19T00:13:25Z +http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/|creationDate|2017-05-19 +http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/|tag|http://www.semanlink.net/tag/nlp_and_humanities +http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/|tag|http://www.semanlink.net/tag/topic_modeling +http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/|title|Topic Modeling in the Humanities: An Overview - Maryland Institute for Technology in the Humanities +http://mith.umd.edu/topic-modeling-in-the-humanities-an-overview/|creationTime|2017-05-19T08:24:26Z +http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print|creationDate|2012-02-15 +http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print|tag|http://www.semanlink.net/tag/aterm +http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print|comment|ATerm (short for Annotated Term) is an abstract data type designed for the exchange of tree-like data structures between distributed applications. +http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print|title|The Meta-Environment - ATerms +http://www.meta-environment.org/twiki/view/Meta-Environment/ATerms?skin=print|creationTime|2012-02-15T00:11:37Z +http://www.lemonde.fr/vous/article/2013/01/24/demain-c-est-big-brother-qui-conduit_1822135_3238.html|creationDate|2013-01-24 +http://www.lemonde.fr/vous/article/2013/01/24/demain-c-est-big-brother-qui-conduit_1822135_3238.html|tag|http://www.semanlink.net/tag/driverless_car +http://www.lemonde.fr/vous/article/2013/01/24/demain-c-est-big-brother-qui-conduit_1822135_3238.html|title|Demain, c'est Big Brother qui conduit +http://www.lemonde.fr/vous/article/2013/01/24/demain-c-est-big-brother-qui-conduit_1822135_3238.html|creationTime|2013-01-24T23:46:31Z +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|creationDate|2008-12-18 +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|tag|http://www.semanlink.net/tag/recherche +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|tag|http://www.semanlink.net/tag/universite +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|comment|Alors, de grâce, laissons-les chercheurs en paix en n'oubliant pas qu'aucune loi n'empêchera leur cerveau de travailler le dimanche. +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|title|Chercher et enseigner à l'Université. 2- Au quotidien - Opinions - Le Monde.fr +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|creationTime|2008-12-18T22:18:29Z +http://www.lemonde.fr/opinions/chronique/2008/12/18/chercher-et-enseigner-a-l-universite-2-au-quotidien_1132440_3232.html|source|Le Monde +http://moat-project.org/|creationDate|2008-01-20 +http://moat-project.org/|tag|http://www.semanlink.net/tag/moat +http://moat-project.org/|title|MOAT: Meaning Of A Tag +http://moat-project.org/|creationTime|2008-01-20T15:42:42Z +http://dossierdoc.typepad.com/descripteurs/2012/05/de-retour-de-websem-pro-2012.html|creationDate|2012-05-15 +http://dossierdoc.typepad.com/descripteurs/2012/05/de-retour-de-websem-pro-2012.html|tag|http://www.semanlink.net/tag/semweb_pro_2012 +http://dossierdoc.typepad.com/descripteurs/2012/05/de-retour-de-websem-pro-2012.html|title|Descripteurs: De retour de SemWeb Pro 2012 +http://dossierdoc.typepad.com/descripteurs/2012/05/de-retour-de-websem-pro-2012.html|creationTime|2012-05-15T09:43:16Z +http://www.mkbergman.com/?p=291|creationDate|2006-11-07 +http://www.mkbergman.com/?p=291|tag|http://www.semanlink.net/tag/web_tools +http://www.mkbergman.com/?p=291|tag|http://www.semanlink.net/tag/semantic_web +http://www.mkbergman.com/?p=291|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.mkbergman.com/?p=291|title|AI3 - Comprehensive Listing of 250 Semantic Web Tools +https://staltz.com/the-web-began-dying-in-2014-heres-how.html|creationDate|2017-11-01 +https://staltz.com/the-web-began-dying-in-2014-heres-how.html|tag|http://www.semanlink.net/tag/the_web_is_dying +https://staltz.com/the-web-began-dying-in-2014-heres-how.html|comment|The Web may die like most other technologies do: simply by becoming less attractive than newer technologies +https://staltz.com/the-web-began-dying-in-2014-heres-how.html|title|André Staltz - The Web began dying in 2014, here's how +https://staltz.com/the-web-began-dying-in-2014-heres-how.html|creationTime|2017-11-01T10:32:13Z +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|creationDate|2013-05-30 +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|tag|http://www.semanlink.net/tag/installing_apps +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|tag|http://www.semanlink.net/tag/mac_os_x +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|tag|http://www.semanlink.net/tag/xcode +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|title|OS X: unable to execute clang: No such file or directory . Perpetuum Mobile . +http://jaranto.blogspot.fr/2012/08/os-x-unable-to-execute-clang-no-such.html|creationTime|2013-05-30T02:07:30Z +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|creationDate|2018-08-19 +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|tag|http://www.semanlink.net/tag/sentence_embeddings +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|tag|http://www.semanlink.net/tag/survey +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|tag|http://www.semanlink.net/tag/word_embedding +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|title|what are the pros and cons of the various unsupervised word and sentence/ document embedding models? - Quora +https://www.quora.com/What-are-the-semantic-models-except-word2vec-and-what-are-their-benefits|creationTime|2018-08-19T13:28:39Z +https://distill.pub/|creationDate|2018-09-09 +https://distill.pub/|tag|http://www.semanlink.net/tag/christopher_olah +https://distill.pub/|tag|http://www.semanlink.net/tag/ml_nlp_blog +https://distill.pub/|title|Distill — Latest articles about machine learning +https://distill.pub/|creationTime|2018-09-09T15:41:18Z +http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/|creationDate|2014-05-11 +http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/|tag|http://www.semanlink.net/tag/norvege +http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/|tag|http://www.semanlink.net/tag/securite_informatique +http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/|title|« Null CTRL », l’enquête inquiétante sur la sécurité informatique en Norvège J'ai du bon data +http://data.blog.lemonde.fr/2014/05/10/null-ctrl-lenquete-inquietante-sur-la-securite-informatique-en-norvege/|creationTime|2014-05-11T22:56:15Z +http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html|creationDate|2004-09-27 +http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html|tag|http://www.semanlink.net/tag/songhai +http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html|tag|http://www.semanlink.net/tag/haoussa +http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://webusers.xula.edu/jrotondo/Kingdoms/welcome.html|comment|"""Kingdoms of the Medieval Sudan,"" an electronic exploration of the history of the African states of Songhay, Kanem-Bornu, and Hausaland. " +http://www.mkbergman.com/?p=414|creationDate|2008-02-15 +http://www.mkbergman.com/?p=414|tag|http://www.semanlink.net/tag/rdf_tools +http://www.mkbergman.com/?p=414|tag|http://www.semanlink.net/tag/information_visualization +http://www.mkbergman.com/?p=414|tag|http://www.semanlink.net/tag/graph_visualization +http://www.mkbergman.com/?p=414|tag|http://www.semanlink.net/tag/rdf_data_visualization +http://www.mkbergman.com/?p=414|title|Large-scale RDF Graph Visualization Tools » AI3:::Adaptive Information +http://www.mkbergman.com/?p=414|creationTime|2008-02-15T23:25:57Z +http://www.mkbergman.com/?page_id=346|creationDate|2008-06-26 +http://www.mkbergman.com/?page_id=346|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.mkbergman.com/?page_id=346|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/?page_id=346|title|Sweet Tools (Sem Web) - Simple Version » AI3:::Adaptive Information +http://www.mkbergman.com/?page_id=346|creationTime|2008-06-26T22:47:34Z +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|creationDate|2012-04-18 +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|tag|http://www.semanlink.net/tag/machine_learning_semantic_web +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|tag|http://www.semanlink.net/tag/linked_data +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|tag|http://www.semanlink.net/tag/yago +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|tag|http://www.semanlink.net/tag/www_2012 +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|comment|"RECAL is a relational learning approach that can be applied ti vomplete knowledge bases in the lod cloud +code, etc. " +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|title|Factorizing Yago: scalable machine learning for the sw +http://www2012.wwwconference.org/proceedings/proceedings/p271.pdf|creationTime|2012-04-18T16:17:13Z +http://www.dustindiaz.com/css-shorthand/|creationDate|2005-12-02 +http://www.dustindiaz.com/css-shorthand/|tag|http://www.semanlink.net/tag/dev_tips +http://www.dustindiaz.com/css-shorthand/|tag|http://www.semanlink.net/tag/css +http://www.dustindiaz.com/css-shorthand/|title|CSS Shorthand Guide +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|creationDate|2012-02-23 +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|tag|http://www.semanlink.net/tag/latex +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|tag|http://www.semanlink.net/tag/sparql +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|comment|sparqlTeX - a LaTeX class / python script to easily embed SPARQL results into TeX files. Using it, one can directly integrate data from SPARQL endpoints, from RDFa-enabled content (such as the previous talk pages), but also from any microformatted page (any Linkedin profile as they’re using hResume) into a LaTeX document. For the last one, I’m using any23 to convert such data into RDF (actually, any23 is used for any file-based query as it extracts RDF from HTML pages even if they’re not W3C-valid) +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|title|apassant.net » Easy “copy and paste” from the Web to LaTeX with SPARQL +http://apassant.net/2012/02/20/easy-copy-and-paste-from-the-web-to-latex-with-sparql/|creationTime|2012-02-23T22:44:13Z +http://www.blogmarks.net/tag/rdf|creationDate|2005-04-26 +http://www.blogmarks.net/tag/rdf|tag|http://www.semanlink.net/tag/blogmarks +http://www.blogmarks.net/tag/rdf|tag|http://www.semanlink.net/tag/rdf +https://twitter.com/benadida/status/1116200296764436480|creationDate|2019-04-11 +https://twitter.com/benadida/status/1116200296764436480|tag|http://www.semanlink.net/tag/wiki +https://twitter.com/benadida/status/1116200296764436480|tag|http://www.semanlink.net/tag/ben_adida +https://twitter.com/benadida/status/1116200296764436480|title|"Ben Adida sur Twitter : ""Alright Twitter friends, I want to host a wiki for an open-source project. MediaWiki? Something else? Any host recommendations? I need clear editorial control with ease of submitting contributions, even small one-offs.""" +https://twitter.com/benadida/status/1116200296764436480|creationTime|2019-04-11T08:07:32Z +http://linkeduniversities.org/lu/|creationDate|2014-02-11 +http://linkeduniversities.org/lu/|tag|http://www.semanlink.net/tag/education_and_linked_data +http://linkeduniversities.org/lu/|tag|http://www.semanlink.net/tag/universite +http://linkeduniversities.org/lu/|title|Linked Universities :: Home +http://linkeduniversities.org/lu/|creationTime|2014-02-11T16:29:22Z +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|creationDate|2013-05-31 +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|tag|http://www.semanlink.net/tag/text_similarity +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|tag|http://www.semanlink.net/tag/information_retrieval_techniques +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|title|Fuzzy-Fingerprints for Text-Based Information Retrieval +http://www.uni-weimar.de/medien/webis/publications/papers/stein_2005a.pdf|creationTime|2013-05-31T15:22:46Z +http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/|creationDate|2007-05-23 +http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/|tag|http://www.semanlink.net/tag/musicbrainz +http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/|title|Free text search on Musicbrainz literals using Virtuoso RDF Views at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/05/17/free-text-search-on-musicbrainz-literals-using-virtuoso-rdf-views/|creationTime|2007-05-23T19:52:04Z +http://stefansavev.com/blog/custom-similarity-for-elasticsearch/|creationDate|2018-04-03 +http://stefansavev.com/blog/custom-similarity-for-elasticsearch/|tag|http://www.semanlink.net/tag/elasticsearch +http://stefansavev.com/blog/custom-similarity-for-elasticsearch/|tag|http://www.semanlink.net/tag/similarity_queries +http://stefansavev.com/blog/custom-similarity-for-elasticsearch/|title|Custom Similarity for ElasticSearch - Algorithms for Big Data +http://stefansavev.com/blog/custom-similarity-for-elasticsearch/|creationTime|2018-04-03T16:12:21Z +http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477|creationDate|2012-04-04 +http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477|tag|http://www.semanlink.net/tag/mercure +http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477|tag|http://www.semanlink.net/tag/messenger +http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477|title|Planet Mercury Even Weirder Than We Thought Wired Science Wired.com +http://www.wired.com/wiredscience/2012/03/dynamic-mercury-geology/?pid=3477|creationTime|2012-04-04T13:20:49Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|creationDate|2008-05-15 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|tag|http://www.semanlink.net/tag/richard_cyganiak +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|tag|http://www.semanlink.net/tag/bernard_vatant +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|tag|http://www.semanlink.net/tag/dbpedia +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|title|[Linking-open-data] Fw: linking geonames concepts to wikipedia and other concept +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=14215|creationTime|2008-05-15T23:24:03Z +https://www.wired.com/story/bitcoin-global-warming/|creationDate|2017-12-17 +https://www.wired.com/story/bitcoin-global-warming/|tag|http://www.semanlink.net/tag/bitcoin +https://www.wired.com/story/bitcoin-global-warming/|comment|“Had bitcoin been mined by doing something useful, then there would be a correspondence between useful work and the number of bitcoins you get … That creates a mental anchor point in people’s mind for how much a bitcoin should cost.” +https://www.wired.com/story/bitcoin-global-warming/|title|The Hard Math Behind Bitcoin's Global Warming Problem WIRED +https://www.wired.com/story/bitcoin-global-warming/|creationTime|2017-12-17T11:35:02Z +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|creationDate|2013-10-18 +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|tag|http://www.semanlink.net/tag/amsterdam +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|tag|http://www.semanlink.net/tag/government_data_as_linked_data +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|tag|http://www.semanlink.net/tag/government_data +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|tag|http://www.semanlink.net/tag/bart_van_leeuwen +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|title|Semantic Web in Emergency Response Systems – UPDATE - semanticweb.com +http://semanticweb.com/update-semantic-web-emergency-response-systems_b40188|creationTime|2013-10-18T22:34:08Z +https://towardsdatascience.com/a-gentle-introduction-to-graph-neural-network-basics-deepwalk-and-graphsage-db5d540d50b3|creationDate|2019-03-09 +https://towardsdatascience.com/a-gentle-introduction-to-graph-neural-network-basics-deepwalk-and-graphsage-db5d540d50b3|tag|http://www.semanlink.net/tag/graph_neural_networks +https://towardsdatascience.com/a-gentle-introduction-to-graph-neural-network-basics-deepwalk-and-graphsage-db5d540d50b3|title|A Gentle Introduction to Graph Neural Network (Basics, DeepWalk, and GraphSage) +https://towardsdatascience.com/a-gentle-introduction-to-graph-neural-network-basics-deepwalk-and-graphsage-db5d540d50b3|creationTime|2019-03-09T15:12:22Z +http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29|creationDate|2012-08-10 +http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29|tag|http://www.semanlink.net/tag/ontologies +http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29|tag|http://www.semanlink.net/tag/design_pattern +http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29|title|Ontology Design Patterns . org (ODP) +http://ontologydesignpatterns.org/wiki/Ontology_Design_Patterns_._org_%28ODP%29|creationTime|2012-08-10T12:11:26Z +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|creationDate|2011-12-21 +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|tag|http://www.semanlink.net/tag/mit +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|tag|http://www.semanlink.net/tag/e_learning +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|comment|MIT will couple online learning with research on learning +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|title|MIT launches online learning initiative - MIT News Office +http://web.mit.edu/newsoffice/2011/mitx-education-initiative-1219.html|creationTime|2011-12-21T23:13:46Z +http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology|creationDate|2010-03-07 +http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology|tag|http://www.semanlink.net/tag/e_learning +http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology|tag|http://www.semanlink.net/tag/langues_vivantes +http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology|title|The Web Way to Learn a Language +http://www.nytimes.com/2010/01/28/technology/personaltech/28basics.html?ref=technology|creationTime|2010-03-07T22:33:47Z +http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/|creationDate|2013-09-15 +http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/|tag|http://www.semanlink.net/tag/smartphone +http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/|title|Comment sécuriser son téléphone mouchard portable? BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/09/11/comment-securiser-son-telephone-mouchard-portable/|creationTime|2013-09-15T16:30:38Z +http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks|creationDate|2013-08-28 +http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks|tag|http://www.semanlink.net/tag/driverless_car +http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks|tag|http://www.semanlink.net/tag/nissan +http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks|title|Nissan Announces Unprecedented Autonomous Drive Benchmarks - Nissan Online Newsroom +http://nissannews.com/en-US/nissan/usa/releases/nissan-announces-unprecedented-autonomous-drive-benchmarks|creationTime|2013-08-28T12:29:02Z +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html|creationDate|2009-01-15 +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html|tag|http://www.semanlink.net/tag/semantic_web_crm +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html|title|Semantic Web project ideas number 2 (CRM) - bobdc.blog +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-1.html|creationTime|2009-01-15T18:29:21Z +http://blog.monstuff.com/archives/000252.html|creationDate|2005-09-01 +http://blog.monstuff.com/archives/000252.html|tag|http://www.semanlink.net/tag/debug +http://blog.monstuff.com/archives/000252.html|tag|http://www.semanlink.net/tag/greasemonkey +http://blog.monstuff.com/archives/000252.html|tag|http://www.semanlink.net/tag/ajax +http://blog.monstuff.com/archives/000252.html|title|AJAX Debugging with Greasemonkey +http://lists.w3.org/Archives/Public/semantic-web/2006Sep/0090.html|creationDate|2007-01-02 +http://lists.w3.org/Archives/Public/semantic-web/2006Sep/0090.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/semantic-web/2006Sep/0090.html|title|A URI for your Favourite Pub: httpRange-14 Question from T.Heath on 2006-09-21 (semantic-web@w3.org from September 2006) +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|creationDate|2008-04-07 +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|tag|http://www.semanlink.net/tag/extremophiles +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|tag|http://www.semanlink.net/tag/vie_extraterrestre +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|tag|http://www.semanlink.net/tag/encelade +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|tag|http://www.semanlink.net/tag/cassini_huygens +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|comment|"In recent years, life forms have been found on Earth that thrive in places where the sun doesn't shine and oxygen is not present because no photosynthesis takes place... +There are three such ecosystems found on Earth that would conceivably be a basis for life on Enceladus. Two are based on methanogens, which belong to an ancient group related to bacteria, called the archaea -- the rugged survivalists of bacteria that thrive in harsh environments without oxygen. Deep volcanic rocks along the Columbia River and in Idaho Falls host two of these ecosystems, which pull their energy from the chemical interaction of different rocks. The third ecosystem is powered by the energy produced in the radioactive decay in rocks, and was found deep below the surface in a mine in South Africa. + +" +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|title|Cassini-Huygens: Enceladus: A Perspective on Life on Enceladus: A World of Possibilities +http://saturn.jpl.nasa.gov/news/features/feature20080326.cfm|creationTime|2008-04-07T21:00:32Z +http://www.bbc.co.uk/news/science-environment-23274175|creationDate|2013-07-12 +http://www.bbc.co.uk/news/science-environment-23274175|tag|http://www.semanlink.net/tag/synthetic_biology +http://www.bbc.co.uk/news/science-environment-23274175|title|BBC News - Will synthetic biology become a GM-style battleground? +http://www.bbc.co.uk/news/science-environment-23274175|creationTime|2013-07-12T13:22:49Z +http://www.openrdf.org/|creationDate|2005-11-03 +http://www.openrdf.org/|tag|http://www.semanlink.net/tag/sesame +http://www.openrdf.org/|title|openRDF.org openRDF.org, home of Sesame +http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_|creationDate|2011-04-18 +http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_|tag|http://www.semanlink.net/tag/rdf_in_json +http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_|tag|http://www.semanlink.net/tag/rdf_working_group +http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_|title|W3C Semantic Web Activity News - RDF Working Group meets face-to-face in Amsterdam +http://www.w3.org/blog/SW/2011/04/15/rdf_working_group_meets_face_to_face_in_|creationTime|2011-04-18T20:31:54Z +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|creationDate|2018-05-18 +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|tag|http://www.semanlink.net/tag/cross_lingual_nlp +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|tag|http://www.semanlink.net/tag/microsoft_research +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|tag|http://www.semanlink.net/tag/nlp_microsoft +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|title|Bringing low-resource languages and spoken dialects into play with Semi-Supervised Universal Neural Machine Translation - Microsoft Research +https://www.microsoft.com/en-us/research/blog/bringing-low-resource-languages-spoken-dialects-play-semi-supervised-universal-neural-machine-translation/|creationTime|2018-05-18T15:49:11Z +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|creationDate|2019-05-10 +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|tag|http://www.semanlink.net/tag/niger +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|tag|http://www.semanlink.net/tag/litterature +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|tag|http://www.semanlink.net/tag/histoire_du_niger +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|title|Littérature nigérienne +http://ressources.ingall-niger.org/documents/livres/lectures/niger_litterature_1991.pdf|creationTime|2019-05-10T23:53:40Z +http://www.youtube.com/watch?v=5KnfJibBx7c|creationDate|2009-09-01 +http://www.youtube.com/watch?v=5KnfJibBx7c|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=5KnfJibBx7c|tag|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +http://www.youtube.com/watch?v=5KnfJibBx7c|title|4*400m, championnats du monde 1993, Paris +http://www.youtube.com/watch?v=5KnfJibBx7c|creationTime|2009-09-01T10:53:11Z +http://en.wikipedia.org/wiki/Tampopo|creationDate|2007-03-03 +http://en.wikipedia.org/wiki/Tampopo|tag|http://www.semanlink.net/tag/film_japonais +http://en.wikipedia.org/wiki/Tampopo|comment|"Tampopo begins when a pair of truck drivers happen onto a decrepit roadside fast food stop selling ramen noodles. The business is not doing too well, and after getting involved in a fight, the heroes decide to help the young owner, Tampopo, turn her establishment into a paragon of the ""art of noodle soup making""." +http://en.wikipedia.org/wiki/Tampopo|title|Tampopo +http://en.wikipedia.org/wiki/Tampopo|creationTime|2007-03-03T00:33:32Z +https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings|creationDate|2019-01-29 +https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings|tag|http://www.semanlink.net/tag/fasttext +https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings|tag|http://www.semanlink.net/tag/oov +https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings|title|Using FastText models (not vectors) for robust embeddings Kaggle +https://www.kaggle.com/mschumacher/using-fasttext-models-for-robust-embeddings|creationTime|2019-01-29T11:36:01Z +http://romiawasthy.blogspot.fi/2014/06/configure-solr-suggester.html|creationDate|2015-06-27 +http://romiawasthy.blogspot.fi/2014/06/configure-solr-suggester.html|tag|http://www.semanlink.net/tag/solr_autocomplete +http://romiawasthy.blogspot.fi/2014/06/configure-solr-suggester.html|title|Romi's blog: Configure Solr -Suggester +http://romiawasthy.blogspot.fi/2014/06/configure-solr-suggester.html|creationTime|2015-06-27T01:46:24Z +http://dataconomy.com/2016/01/understanding-dimensionality-reduction/|creationDate|2017-06-24 +http://dataconomy.com/2016/01/understanding-dimensionality-reduction/|tag|http://www.semanlink.net/tag/principal_component_analysis +http://dataconomy.com/2016/01/understanding-dimensionality-reduction/|tag|http://www.semanlink.net/tag/dimensionality_reduction +http://dataconomy.com/2016/01/understanding-dimensionality-reduction/|title|Understanding Dimensionality Reduction and its Applications - Dataconomy +http://dataconomy.com/2016/01/understanding-dimensionality-reduction/|creationTime|2017-06-24T01:55:07Z +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|creationDate|2013-04-17 +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|tag|http://www.semanlink.net/tag/alexandre_passant +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|tag|http://www.semanlink.net/tag/semantic_enterprise +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|tag|http://www.semanlink.net/tag/link_to_me +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|title|Enhancing Enterprise 2.0 Ecosystems Using Semantic Web and Linked Data Technologies:The SemSLATES Approach - Springer +http://link.springer.com/chapter/10.1007/978-1-4419-7665-9_5|creationTime|2013-04-17T10:06:32Z +http://en.wikipedia.org/wiki/Hausdorff_distance|creationDate|2013-04-23 +http://en.wikipedia.org/wiki/Hausdorff_distance|tag|http://www.semanlink.net/tag/destination_prediction +http://en.wikipedia.org/wiki/Hausdorff_distance|tag|http://www.semanlink.net/tag/mathematiques +http://en.wikipedia.org/wiki/Hausdorff_distance|title|Hausdorff distance - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Hausdorff_distance|creationTime|2013-04-23T17:03:16Z +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|creationDate|2016-01-31 +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|tag|http://www.semanlink.net/tag/hidden_markov_model +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|tag|http://www.semanlink.net/tag/speech_recognition +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|tag|http://www.semanlink.net/tag/tutorial +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|title|A tutorial on hidden markov models in speech recognition applications +http://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf|creationTime|2016-01-31T13:12:44Z +http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview|creationDate|2010-01-20 +http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview|tag|http://www.semanlink.net/tag/rdf_forms +http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview|title|Web Finger proposals overview +http://blogs.sun.com/bblfish/entry/web_finger_proposals_overview|creationTime|2010-01-20T18:38:09Z +http://wiki.eclipse.org/EGit/User_Guide#Getting_Started|creationDate|2013-09-23 +http://wiki.eclipse.org/EGit/User_Guide#Getting_Started|tag|http://www.semanlink.net/tag/tutorial +http://wiki.eclipse.org/EGit/User_Guide#Getting_Started|tag|http://www.semanlink.net/tag/egit +http://wiki.eclipse.org/EGit/User_Guide#Getting_Started|title|EGit/User Guide - Eclipsepedia +http://wiki.eclipse.org/EGit/User_Guide#Getting_Started|creationTime|2013-09-23T13:50:54Z +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|creationDate|2017-07-21 +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|tag|http://www.semanlink.net/tag/representation_learning +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|tag|http://www.semanlink.net/tag/using_word_embedding +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|comment|hmm, déjà [bookmarké sur arxiv](https://arxiv.org/abs/1607.00570) +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|relatedDoc|https://arxiv.org/abs/1607.00570 +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|title|Representation learning for very short texts using weighted word embedding aggregation +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|creationTime|2017-07-21T01:49:18Z +https://pdfs.semanticscholar.org/e398/d9d7e090a8d6f906b5da59925da212f6bc51.pdf|seeAlso|https://github.com/cedricdeboom/RepresentationLearning +https://github.com/bergie/noflo|creationDate|2012-02-28 +https://github.com/bergie/noflo|tag|http://www.semanlink.net/tag/henri_bergius +https://github.com/bergie/noflo|tag|http://www.semanlink.net/tag/node_js +https://github.com/bergie/noflo|comment|NoFlo is a simple flow-based programming implementation for Node.js +https://github.com/bergie/noflo|title|bergie/noflo - GitHub +https://github.com/bergie/noflo|creationTime|2012-02-28T11:04:34Z +http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/|creationDate|2015-03-05 +http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/|tag|http://www.semanlink.net/tag/javascript_promises +http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/|title|Promiscuous promises Ruben Verborgh +http://ruben.verborgh.org/blog/2013/12/31/promiscuous-promises/|creationTime|2015-03-05T10:55:31Z +https://eng.uber.com/mysql-migration/|creationDate|2016-08-02 +https://eng.uber.com/mysql-migration/|tag|http://www.semanlink.net/tag/mysql +https://eng.uber.com/mysql-migration/|tag|http://www.semanlink.net/tag/uber +https://eng.uber.com/mysql-migration/|comment|MySQL and PostgreSQL differences and the effect they can have +https://eng.uber.com/mysql-migration/|title|Why Uber Engineering Switched from Postgres to MySQL - Uber Engineering Blog +https://eng.uber.com/mysql-migration/|creationTime|2016-08-02T15:11:59Z +http://www.w3.org/Submission/ldbp/|creationDate|2012-09-30 +http://www.w3.org/Submission/ldbp/|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/Submission/ldbp/|tag|http://www.semanlink.net/tag/read_write_linked_data +http://www.w3.org/Submission/ldbp/|comment|A set of best practices and simple approach for a read-write Linked Data architecture, based on HTTP access to web resources that describe their state using RDF. +http://www.w3.org/Submission/ldbp/|title|Linked Data Basic Profile 1.0 +http://www.w3.org/Submission/ldbp/|creationTime|2012-09-30T10:38:23Z +http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use|creationDate|2015-12-30 +http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use|tag|http://www.semanlink.net/tag/cognitive_computing +http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use|title|IBM Watson APIs hold key to broader cognitive computing use +http://searchdatamanagement.techtarget.com/news/4500269406/IBM-Watson-APIs-hold-key-to-broader-cognitive-computing-use|creationTime|2015-12-30T20:19:54Z +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|creationDate|2018-10-25 +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|tag|http://www.semanlink.net/tag/caetano_veloso +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|tag|http://www.semanlink.net/tag/gilberto_gil +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|tag|http://www.semanlink.net/tag/londres +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|title|Gilberto Gil and Caetano Veloso in London Music The Guardian +https://www.theguardian.com/music/2010/jul/15/gilberto-gil-caetano-veloso-london|creationTime|2018-10-25T11:21:04Z +https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/|creationDate|2016-04-09 +https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/|tag|http://www.semanlink.net/tag/brinxmat +https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/|tag|http://www.semanlink.net/tag/linked_data +https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/|title|Creating functional linked-data solutions Brinxmat's blog +https://brinxmat.wordpress.com/2016/03/24/creating-functional-linked-data-solutions/|creationTime|2016-04-09T00:53:48Z +http://dalelane.co.uk/blog/?p=3403|creationDate|2019-04-18 +http://dalelane.co.uk/blog/?p=3403|tag|http://www.semanlink.net/tag/discounted_cumulative_gain +http://dalelane.co.uk/blog/?p=3403|title|Normalised Discounted Cumulative Gain +http://dalelane.co.uk/blog/?p=3403|creationTime|2019-04-18T08:56:13Z +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|creationDate|2014-09-26 +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|tag|http://www.semanlink.net/tag/authentication +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|tag|http://www.semanlink.net/tag/jersey +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|title|User authentication on a Jersey REST service - Stack Overflow +http://stackoverflow.com/questions/2902427/user-authentication-on-a-jersey-rest-service|creationTime|2014-09-26T00:59:43Z +http://www.coconut-palm-software.com/the_visual_editor/?p=25|creationDate|2007-07-07 +http://www.coconut-palm-software.com/the_visual_editor/?p=25|tag|http://www.semanlink.net/tag/java +http://www.coconut-palm-software.com/the_visual_editor/?p=25|tag|http://www.semanlink.net/tag/duck_typing +http://www.coconut-palm-software.com/the_visual_editor/?p=25|title|The Visual Editor » Java does Duck Typing +http://www.coconut-palm-software.com/the_visual_editor/?p=25|creationTime|2007-07-07T13:54:02Z +https://wit.ai/|creationDate|2015-01-06 +https://wit.ai/|tag|http://www.semanlink.net/tag/speech_recognition +https://wit.ai/|tag|http://www.semanlink.net/tag/nlp +https://wit.ai/|tag|http://www.semanlink.net/tag/internet_of_things +https://wit.ai/|comment|"We... turn speech into actionable data Your users give us voice or text, you get back structured data. +" +https://wit.ai/|title|Wit — Natural language for the Internet of Things +https://wit.ai/|creationTime|2015-01-06T11:31:49Z +http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html|creationDate|2014-08-30 +http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html|tag|http://www.semanlink.net/tag/https +http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html|title|The ability to automatically upgrade a reference to HTTPS from HTTP from Tim Berners-Lee on 2014-08-22 (semantic-web@w3.org from August 2014) +http://lists.w3.org/Archives/Public/semantic-web/2014Aug/0078.html|creationTime|2014-08-30T12:42:19Z +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|creationDate|2011-01-04 +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|tag|http://www.semanlink.net/tag/nok +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|tag|http://www.semanlink.net/tag/louvre +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|tag|http://www.semanlink.net/tag/terre_cuite +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|title|Nok sculpture, Louvre +http://fr.academic.ru/pictures/frwiki/78/Nok_sculpture_Louvre_70-1998-11-1.jpg|creationTime|2011-01-04T00:49:43Z +http://www.edge.org/3rd_culture/krauss06/krauss06.2_index.html|creationDate|2006-07-20 +http://www.edge.org/3rd_culture/krauss06/krauss06.2_index.html|tag|http://www.semanlink.net/tag/energie_du_vide +http://www.edge.org/3rd_culture/krauss06/krauss06.2_index.html|title|Edge: THE ENERGY OF EMPTY SPACE THAT ISN'T ZERO: A Talk with Lawrence Krauss +http://www.wired.com/2016/04/mathematician-solves-centuries-old-sphere-problem-higher-dimensions/|creationDate|2016-04-03 +http://www.wired.com/2016/04/mathematician-solves-centuries-old-sphere-problem-higher-dimensions/|tag|http://www.semanlink.net/tag/sphere_packing +http://www.wired.com/2016/04/mathematician-solves-centuries-old-sphere-problem-higher-dimensions/|title|Mathematician Solves the Centuries-Old Sphere Problem in Higher Dimensions WIRED +http://www.wired.com/2016/04/mathematician-solves-centuries-old-sphere-problem-higher-dimensions/|creationTime|2016-04-03T13:46:32Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html|creationDate|2013-07-07 +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html|tag|http://www.semanlink.net/tag/units_of_measure +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html|title|[SUMMARY] QuantitativeValue / Units of Measure - Proposal from Alex Milowski on 2013-06-06 (public-vocabs@w3.org from June 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0041.html|creationTime|2013-07-07T00:29:37Z +http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0|creationDate|2016-06-09 +http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0|tag|http://www.semanlink.net/tag/universal_income +http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0|comment|Cutting horses’ oat rations might have delayed their replacement by tractors, but it wouldn’t have stopped it +http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0|title|Jobs Threatened by Machines: A Once ‘Stupid’ Concern Gains Respect - The New York Times +http://www.nytimes.com/2016/06/08/business/economy/threatened-by-machines-a-once-stupid-concern-gains-respect.html?_r=0|creationTime|2016-06-09T22:59:15Z +http://strapdownjs.com/|creationDate|2015-10-10 +http://strapdownjs.com/|tag|http://www.semanlink.net/tag/markown_javascript +http://strapdownjs.com/|title|Strapdown.js - Instant and elegant Markdown documents +http://strapdownjs.com/|creationTime|2015-10-10T20:57:09Z +http://www.sindicetech.com/|creationDate|2012-07-31 +http://www.sindicetech.com/|tag|http://www.semanlink.net/tag/big_data +http://www.sindicetech.com/|tag|http://www.semanlink.net/tag/cloud +http://www.sindicetech.com/|tag|http://www.semanlink.net/tag/linked_data +http://www.sindicetech.com/|tag|http://www.semanlink.net/tag/sindice +http://www.sindicetech.com/|comment|Big Data meets Semantic Web. And it all makes sense. We're helping large enterprises build their own, Private Linked Data Clouds. +http://www.sindicetech.com/|title|Sindicetech: enterprise linked data clouds +http://www.sindicetech.com/|creationTime|2012-07-31T10:02:59Z +http://jboye.com/blogpost/semantic-technologies-what-is-in-it-for-the-unhappy-cms-customer/|creationDate|2011-10-08 +http://jboye.com/blogpost/semantic-technologies-what-is-in-it-for-the-unhappy-cms-customer/|tag|http://www.semanlink.net/tag/semantic_cms +http://jboye.com/blogpost/semantic-technologies-what-is-in-it-for-the-unhappy-cms-customer/|title|Semantic Technologies: What is in it for the (unhappy) CMS customer? J. Boye +http://jboye.com/blogpost/semantic-technologies-what-is-in-it-for-the-unhappy-cms-customer/|creationTime|2011-10-08T21:30:22Z +http://wiki.ontoworld.org/index.php/Semantic_Wiki_State_Of_The_Art|creationDate|2006-02-17 +http://wiki.ontoworld.org/index.php/Semantic_Wiki_State_Of_The_Art|tag|http://www.semanlink.net/tag/semantic_wiki +http://wiki.ontoworld.org/index.php/Semantic_Wiki_State_Of_The_Art|comment|Liste de logiciels SW Wiki +http://wiki.ontoworld.org/index.php/Semantic_Wiki_State_Of_The_Art|title|Semantic Wiki State Of The Art - Wiki@OntoWorld +http://tartarus.org/martin/PorterStemmer/|creationDate|2012-03-18 +http://tartarus.org/martin/PorterStemmer/|tag|http://www.semanlink.net/tag/nlp_class +http://tartarus.org/martin/PorterStemmer/|tag|http://www.semanlink.net/tag/stemming +http://tartarus.org/martin/PorterStemmer/|title|Porter Stemming Algorithm +http://tartarus.org/martin/PorterStemmer/|creationTime|2012-03-18T12:29:41Z +http://pouchdb.com/2015/05/18/we-have-a-problem-with-promises.html|creationDate|2015-07-16 +http://pouchdb.com/2015/05/18/we-have-a-problem-with-promises.html|tag|http://www.semanlink.net/tag/javascript_promises +http://pouchdb.com/2015/05/18/we-have-a-problem-with-promises.html|title|We have a problem with promises +http://pouchdb.com/2015/05/18/we-have-a-problem-with-promises.html|creationTime|2015-07-16T12:18:35Z +http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html|creationDate|2017-11-03 +http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html|tag|http://www.semanlink.net/tag/brexit +http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html|comment|"Ordered by the House of Commons + +> All trade deals in history are struck between people that are trying to get closer together. This is the first trade deal in history struck between partners who are trying to get further apart." +http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html|title|Oral evidence - The UK's economic relationship with the European Union - 25 Oct 2017 +http://data.parliament.uk/writtenevidence/committeeevidence.svc/evidencedocument/treasury-committee/the-uks-economic-relationship-with-the-european-union/oral/72053.html|creationTime|2017-11-03T00:07:32Z +http://fauconnier.github.io/|creationDate|2017-07-20 +http://fauconnier.github.io/|tag|http://www.semanlink.net/tag/nlp_french +http://fauconnier.github.io/|tag|http://www.semanlink.net/tag/word2vec +http://fauconnier.github.io/|title|Some pre-trained word2vec models for French +http://fauconnier.github.io/|creationTime|2017-07-20T13:00:27Z +http://kashori.com/2006/11/claims-about-power-of-uri.html|creationDate|2008-01-25 +http://kashori.com/2006/11/claims-about-power-of-uri.html|tag|http://www.semanlink.net/tag/uri +http://kashori.com/2006/11/claims-about-power-of-uri.html|title|Claims About the Power of URI References +http://kashori.com/2006/11/claims-about-power-of-uri.html|creationTime|2008-01-25T08:45:14Z +http://www.fastly.com/blog/best-practices-for-using-the-vary-header/|creationDate|2015-05-14 +http://www.fastly.com/blog/best-practices-for-using-the-vary-header/|tag|http://www.semanlink.net/tag/vary_header +http://www.fastly.com/blog/best-practices-for-using-the-vary-header/|title|Best Practices for Using the Vary Header Fastly - The Next Gen CDN +http://www.fastly.com/blog/best-practices-for-using-the-vary-header/|creationTime|2015-05-14T15:21:55Z +http://neurosciencenews.com/decision-making-eeg-free-will-3333/|creationDate|2016-01-04 +http://neurosciencenews.com/decision-making-eeg-free-will-3333/|tag|http://www.semanlink.net/tag/neuroscience +http://neurosciencenews.com/decision-making-eeg-free-will-3333/|tag|http://www.semanlink.net/tag/free_will +http://neurosciencenews.com/decision-making-eeg-free-will-3333/|title|Do We Have Free Will? The Brain-Computer Duel – Neuroscience News +http://neurosciencenews.com/decision-making-eeg-free-will-3333/|creationTime|2016-01-04T18:31:25Z +http://docs.info.apple.com/article.html?artnum=302412|creationDate|2006-01-15 +http://docs.info.apple.com/article.html?artnum=302412|tag|http://www.semanlink.net/tag/java_1_5_mac_os_x +http://docs.info.apple.com/article.html?artnum=302412|comment|Java 2 Platform, Standard Edition (J2SE) 5.0 Release 3 allows applications and applets developed for the J2SE 5.0 platform to run on Mac OS X v 10.4.2 and later. This update does not change the default version of Java on your Mac from Java 1.4.2 to J2SE 5.0, though Java applications that require J2SE 5.0 may specifically request it. You can change the preferred Java version for applications and applets by using the new Java Preferences utility. This utility is installed by the J2SE 5.0 update at /Applications/Utilities/Java/J2SE 5.0/. +http://docs.info.apple.com/article.html?artnum=302412|title|About Java 2 Platform Standard Edition (J2SE) 5.0 Release 3 for Mac OS X v 10.4.2 or later +http://www.eurotexte.fr/100pieges/index.htm|creationDate|2007-02-16 +http://www.eurotexte.fr/100pieges/index.htm|tag|http://www.semanlink.net/tag/les_100_pieges_de_l_anglais +http://www.eurotexte.fr/100pieges/index.htm|title|Les 100 pièges de l'Anglais +http://www.eurotexte.fr/100pieges/index.htm|creationTime|2007-02-16T21:57:30Z +http://www2013.org/companion/p1253.pdf|creationDate|2013-05-30 +http://www2013.org/companion/p1253.pdf|tag|http://www.semanlink.net/tag/skos +http://www2013.org/companion/p1253.pdf|tag|http://www.semanlink.net/tag/www_2013 +http://www2013.org/companion/p1253.pdf|tag|http://www.semanlink.net/tag/bernhard_haslhofer +http://www2013.org/companion/p1253.pdf|tag|http://www.semanlink.net/tag/web_search +http://www2013.org/companion/p1253.pdf|title|Using SKOS vocabularies for improving Web Search +http://www2013.org/companion/p1253.pdf|creationTime|2013-05-30T09:26:35Z +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|creationDate|2008-09-01 +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|tag|http://www.semanlink.net/tag/publishing_rdf_vocabularies +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|tag|http://www.semanlink.net/tag/w3c_note +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|comment|This document describes best practice recipes for publishing vocabularies or ontologies on the Web (in RDF Schema or OWL). +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|title|Best Practice Recipes for Publishing RDF Vocabularies +http://www.w3.org/TR/2008/NOTE-swbp-vocab-pub-20080828/|creationTime|2008-09-01T13:39:14Z +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|creationDate|2010-12-17 +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|tag|http://www.semanlink.net/tag/clark_and_parsia +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|tag|http://www.semanlink.net/tag/government_data_as_linked_data +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|tag|http://www.semanlink.net/tag/sw_demo +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|comment|Painless, efficient, and mostly meeting-free information integration is another reason the Semantic Web kicks ass. +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|title|Another Reason Semantic Web Kicks Ass—Clark & Parsia: Thinking Clearly +http://weblog.clarkparsia.com/2010/05/26/another-reason-semantic-web-kicks-ass|creationTime|2010-12-17T00:46:57Z +http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html|creationDate|2010-12-17 +http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html|tag|http://www.semanlink.net/tag/topbraid_spin +http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html|tag|http://www.semanlink.net/tag/owl +http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html|title|WHERE OWL fails +http://composing-the-semantic-web.blogspot.com/2010/04/where-owl-fails.html|creationTime|2010-12-17T00:49:10Z +http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology|creationDate|2011-04-05 +http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology|tag|http://www.semanlink.net/tag/goodrelations +http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology|tag|http://www.semanlink.net/tag/richard_cyganiak +http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology|title|Investigating Community Implementation of the GoodRelations Ontology +http://www.slideshare.net/cygri/investigating-community-implementation-of-the-goodrelations-ontology|creationTime|2011-04-05T10:27:51Z +http://www.panic.com/transmit/index.html|creationDate|2007-03-16 +http://www.panic.com/transmit/index.html|tag|http://www.semanlink.net/tag/ftp +http://www.panic.com/transmit/index.html|tag|http://www.semanlink.net/tag/os_x_app +http://www.panic.com/transmit/index.html|title|Panic - Transmit 3 - The next-generation Mac OS X FTP client! +http://www.panic.com/transmit/index.html|creationTime|2007-03-16T01:23:35Z +https://towardsdatascience.com/lda2vec-word-embeddings-in-topic-models-4ee3fc4b2843|creationDate|2017-12-11 +https://towardsdatascience.com/lda2vec-word-embeddings-in-topic-models-4ee3fc4b2843|tag|http://www.semanlink.net/tag/lda2vec +https://towardsdatascience.com/lda2vec-word-embeddings-in-topic-models-4ee3fc4b2843|title|LDA2vec: Word Embeddings in Topic Models – Towards Data Science +https://towardsdatascience.com/lda2vec-word-embeddings-in-topic-models-4ee3fc4b2843|creationTime|2017-12-11T13:46:53Z +https://threadreaderapp.com/thread/1065841141201989632.html|creationDate|2018-11-23 +https://threadreaderapp.com/thread/1065841141201989632.html|tag|http://www.semanlink.net/tag/uncontacted_peoples +https://threadreaderapp.com/thread/1065841141201989632.html|comment|let’s do them a favor and leave them alone. +https://threadreaderapp.com/thread/1065841141201989632.html|title|"Thread by @RespectableLaw: ""There's been a lot of talk about the missionary killed by the natives of North Sentinel Island. They're probably so aggressive because of th […]""" +https://threadreaderapp.com/thread/1065841141201989632.html|creationTime|2018-11-23T10:12:54Z +http://www.norconex.com/serving-autocomplete-suggestions-fast/|creationDate|2015-06-15 +http://www.norconex.com/serving-autocomplete-suggestions-fast/|tag|http://www.semanlink.net/tag/solr_autocomplete +http://www.norconex.com/serving-autocomplete-suggestions-fast/|tag|http://www.semanlink.net/tag/elasticsearch +http://www.norconex.com/serving-autocomplete-suggestions-fast/|title|Serving autocomplete suggestions fast! +http://www.norconex.com/serving-autocomplete-suggestions-fast/|creationTime|2015-06-15T17:49:18Z +http://www.datasciencecentral.com/profiles/blogs/new-batch-of-machine-learning-resources-and-articles-from-niche?overrideMobileRedirect=1|creationDate|2014-07-24 +http://www.datasciencecentral.com/profiles/blogs/new-batch-of-machine-learning-resources-and-articles-from-niche?overrideMobileRedirect=1|tag|http://www.semanlink.net/tag/machine_learning +http://www.datasciencecentral.com/profiles/blogs/new-batch-of-machine-learning-resources-and-articles-from-niche?overrideMobileRedirect=1|title|New batch of machine learning resources and articles from niche bloggers - Data Science Central +http://www.datasciencecentral.com/profiles/blogs/new-batch-of-machine-learning-resources-and-articles-from-niche?overrideMobileRedirect=1|creationTime|2014-07-24T02:24:18Z +http://www.dustindiaz.com/top-ten-javascript/|creationDate|2005-12-02 +http://www.dustindiaz.com/top-ten-javascript/|tag|http://www.semanlink.net/tag/javascript +http://www.dustindiaz.com/top-ten-javascript/|tag|http://www.semanlink.net/tag/dev_tips +http://www.dustindiaz.com/top-ten-javascript/|title|Top 10 custom JavaScript functions of all time +http://www.youtube.com/watch?v=drhgAfLFG7M|creationDate|2007-01-03 +http://www.youtube.com/watch?v=drhgAfLFG7M|tag|http://www.semanlink.net/tag/apple +http://www.youtube.com/watch?v=drhgAfLFG7M|tag|http://www.semanlink.net/tag/anticipation +http://www.youtube.com/watch?v=drhgAfLFG7M|title|YouTube - Apple's Knowledge Navigator Video +http://www.developer.com/java/web/article.php/3904871/Top-7-Features-in-Tomcat-7-The-New-and-the-Improved.htm|creationDate|2012-04-10 +http://www.developer.com/java/web/article.php/3904871/Top-7-Features-in-Tomcat-7-The-New-and-the-Improved.htm|tag|http://www.semanlink.net/tag/tomcat_7 +http://www.developer.com/java/web/article.php/3904871/Top-7-Features-in-Tomcat-7-The-New-and-the-Improved.htm|title|Top 7 Features in Tomcat 7: The New and the Improved - Developer.com +http://www.developer.com/java/web/article.php/3904871/Top-7-Features-in-Tomcat-7-The-New-and-the-Improved.htm|creationTime|2012-04-10T12:48:09Z +http://bertails.org/2014/09/20/why-ldpatch|creationDate|2014-09-25 +http://bertails.org/2014/09/20/why-ldpatch|tag|http://www.semanlink.net/tag/ld_patch +http://bertails.org/2014/09/20/why-ldpatch|tag|http://www.semanlink.net/tag/alexandre_bertails +http://bertails.org/2014/09/20/why-ldpatch|title|Why LD-PATCH (Alexandre Bertails) +http://bertails.org/2014/09/20/why-ldpatch|creationTime|2014-09-25T12:01:19Z +https://github.com/zazukoians/rdf-ext|creationDate|2015-02-25 +https://github.com/zazukoians/rdf-ext|tag|http://www.semanlink.net/tag/javascript_rdf +https://github.com/zazukoians/rdf-ext|title|RDF Interfaces Extension +https://github.com/zazukoians/rdf-ext|creationTime|2015-02-25T12:49:00Z +http://blog.newrelic.com/2012/10/09/helpful-javascript-patterns/?utm_source=BLOG&utm_medium=content&utm_content=designpatterns&utm_campaign=RPM&utm_term=JavaScript&mpc=CN-BLOG-RPM-EN-100-Helpful-JavaScript|creationDate|2012-10-11 +http://blog.newrelic.com/2012/10/09/helpful-javascript-patterns/?utm_source=BLOG&utm_medium=content&utm_content=designpatterns&utm_campaign=RPM&utm_term=JavaScript&mpc=CN-BLOG-RPM-EN-100-Helpful-JavaScript|tag|http://www.semanlink.net/tag/javascript_patterns +http://blog.newrelic.com/2012/10/09/helpful-javascript-patterns/?utm_source=BLOG&utm_medium=content&utm_content=designpatterns&utm_campaign=RPM&utm_term=JavaScript&mpc=CN-BLOG-RPM-EN-100-Helpful-JavaScript|title|Helpful JavaScript Patterns New Relic blog +http://blog.newrelic.com/2012/10/09/helpful-javascript-patterns/?utm_source=BLOG&utm_medium=content&utm_content=designpatterns&utm_campaign=RPM&utm_term=JavaScript&mpc=CN-BLOG-RPM-EN-100-Helpful-JavaScript|creationTime|2012-10-11T15:02:04Z +http://www.crockford.com/JSON/|creationDate|2006-02-07 +http://www.crockford.com/JSON/|tag|http://www.semanlink.net/tag/json +http://www.crockford.com/JSON/|title|JSON Home: introducing JSON Introducing JSON +http://blog.elliottkember.com/chromes-insane-password-security-strategy|creationDate|2013-08-07 +http://blog.elliottkember.com/chromes-insane-password-security-strategy|tag|http://www.semanlink.net/tag/security +http://blog.elliottkember.com/chromes-insane-password-security-strategy|tag|http://www.semanlink.net/tag/chrome +http://blog.elliottkember.com/chromes-insane-password-security-strategy|tag|http://www.semanlink.net/tag/passwords +http://blog.elliottkember.com/chromes-insane-password-security-strategy|title|Chrome’s insane password security strategy • Elliott Kember +http://blog.elliottkember.com/chromes-insane-password-security-strategy|creationTime|2013-08-07T01:10:20Z +http://software.newsforge.com/article.pl?sid=06/05/12/1539231|creationDate|2006-05-23 +http://software.newsforge.com/article.pl?sid=06/05/12/1539231|tag|http://www.semanlink.net/tag/howto +http://software.newsforge.com/article.pl?sid=06/05/12/1539231|tag|http://www.semanlink.net/tag/wiki_software +http://software.newsforge.com/article.pl?sid=06/05/12/1539231|title|Putting MediaWiki to use in an organization +https://nlp.h-its.org/bpemb/|creationDate|2019-01-31 +https://nlp.h-its.org/bpemb/|tag|http://www.semanlink.net/tag/subword_embeddings +https://nlp.h-its.org/bpemb/|tag|http://www.semanlink.net/tag/encoding +https://nlp.h-its.org/bpemb/|tag|http://www.semanlink.net/tag/embeddings +https://nlp.h-its.org/bpemb/|tag|http://www.semanlink.net/tag/wikipedia +https://nlp.h-its.org/bpemb/|comment|a collection of pre-trained subword embeddings in 275 languages, based on Byte-Pair Encoding (BPE) and trained on Wikipedia +https://nlp.h-its.org/bpemb/|title|BPEmb: Subword Embeddings +https://nlp.h-its.org/bpemb/|creationTime|2019-01-31T23:53:03Z +https://en.wikipedia.org/wiki/Sentinelese|creationDate|2017-05-26 +https://en.wikipedia.org/wiki/Sentinelese|tag|http://www.semanlink.net/tag/uncontacted_peoples +https://en.wikipedia.org/wiki/Sentinelese|title|Sentinelese - Wikipedia +https://en.wikipedia.org/wiki/Sentinelese|creationTime|2017-05-26T00:45:00Z +http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php|creationDate|2010-07-30 +http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php|tag|http://www.semanlink.net/tag/readwriteweb_com +http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php|title|Semantify - Automate Your Semantic Web SEO in Five Minutes +http://www.readwriteweb.com/archives/semantify_automate_your_semantic_web_seo_in_five_minutes.php|creationTime|2010-07-30T14:50:25Z +https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles|creationDate|2014-07-25 +https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles|tag|http://www.semanlink.net/tag/cnrs +https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles|tag|http://www.semanlink.net/tag/biopiles +https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles|title|Le bel avenir des biopiles CNRS le journal +https://lejournal.cnrs.fr/articles/le-bel-avenir-des-biopiles|creationTime|2014-07-25T15:55:37Z +http://dannyayers.com/archives/2005/07/04/skin-deep/|creationDate|2005-07-06 +http://dannyayers.com/archives/2005/07/04/skin-deep/|tag|http://www.semanlink.net/tag/owl +http://dannyayers.com/archives/2005/07/04/skin-deep/|tag|http://www.semanlink.net/tag/taxonomies +http://dannyayers.com/archives/2005/07/04/skin-deep/|title|Danny Ayers, Raw Blog - Taxonomies in OWL +http://www.content-space.de/dokuwiki/blog/2008/semanlink_-_semantische_bookmarks|creationDate|2008-04-10 +http://www.content-space.de/dokuwiki/blog/2008/semanlink_-_semantische_bookmarks|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.content-space.de/dokuwiki/blog/2008/semanlink_-_semantische_bookmarks|title|Semanlink - semantische Bookmarks +http://www.content-space.de/dokuwiki/blog/2008/semanlink_-_semantische_bookmarks|creationTime|2008-04-10T22:31:09Z +http://www.w3schools.com/js/default.asp|creationDate|2006-07-27 +http://www.w3schools.com/js/default.asp|tag|http://www.semanlink.net/tag/good +http://www.w3schools.com/js/default.asp|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.w3schools.com/js/default.asp|title|JavaScript Tutorial (w3schools.com) +https://www.youtube.com/watch?v=eHGt7z-br5g|creationDate|2016-10-12 +https://www.youtube.com/watch?v=eHGt7z-br5g|tag|http://www.semanlink.net/tag/monsanto +https://www.youtube.com/watch?v=eHGt7z-br5g|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=eHGt7z-br5g|title|Manu Chao: Te lo digo, te lo canto: FUERA MONSANTO!! +https://www.youtube.com/watch?v=eHGt7z-br5g|creationTime|2016-10-12T00:03:30Z +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|creationDate|2012-05-15 +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|tag|http://www.semanlink.net/tag/good +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|tag|http://www.semanlink.net/tag/data_web +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|tag|http://www.semanlink.net/tag/linked_data +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|tag|http://www.semanlink.net/tag/knowledge_representation +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|comment|Semantic technologies are fundamentally about knowledge representation, not data transfer. +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|title|Pragmatic Approaches to the Semantic Web » AI3:::Adaptive Information +http://www.mkbergman.com/1006/pragmatic-approaches-to-the-semantic-web/|creationTime|2012-05-15T14:15:04Z +https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/|creationDate|2019-01-23 +https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/|tag|http://www.semanlink.net/tag/nlp_facebook +https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/|comment|> We are open-sourcing a newly expanded and enhanced version of our natural language processing toolkit, LASER. It now performs zero-shot cross-lingual transfer with more than 90 languages, written in 28 different alphabets. +https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/|title|LASER natural language processing toolkit - Facebook Code +https://code.fb.com/ai-research/laser-multilingual-sentence-embeddings/|creationTime|2019-01-23T17:55:56Z +http://beckr.org/DBpediaMobile/|creationDate|2008-11-03 +http://beckr.org/DBpediaMobile/|tag|http://www.semanlink.net/tag/dbpedia_mobile +http://beckr.org/DBpediaMobile/|title|DBpedia Mobile (site) +http://beckr.org/DBpediaMobile/|creationTime|2008-11-03T10:17:54Z +http://www.gnu.org/software/octave/|creationDate|2012-04-24 +http://www.gnu.org/software/octave/|tag|http://www.semanlink.net/tag/gnu_octave +http://www.gnu.org/software/octave/|comment|GNU Octave is a high-level interpreted language, primarily intended for numerical computations. +http://www.gnu.org/software/octave/|title|GNU Octave +http://www.gnu.org/software/octave/|creationTime|2012-04-24T13:00:53Z +http://css.maxdesign.com.au/floatutorial/|creationDate|2007-08-10 +http://css.maxdesign.com.au/floatutorial/|tag|http://www.semanlink.net/tag/tutorial +http://css.maxdesign.com.au/floatutorial/|tag|http://www.semanlink.net/tag/css +http://css.maxdesign.com.au/floatutorial/|title|Floatutorial: Step by step CSS float tutorial +http://css.maxdesign.com.au/floatutorial/|creationTime|2007-08-10T17:09:59Z +http://www.cringely.com/2016/02/19/the-fbi-v-apple-isnt-at-all-the-way-you-think-it-is/|creationDate|2016-02-19 +http://www.cringely.com/2016/02/19/the-fbi-v-apple-isnt-at-all-the-way-you-think-it-is/|tag|http://www.semanlink.net/tag/fbi_v_apple +http://www.cringely.com/2016/02/19/the-fbi-v-apple-isnt-at-all-the-way-you-think-it-is/|title|I, Cringely The FBI v. Apple isn’t at all the way you think it is - I, Cringely +http://www.cringely.com/2016/02/19/the-fbi-v-apple-isnt-at-all-the-way-you-think-it-is/|creationTime|2016-02-19T13:51:39Z +https://nlpparis.wordpress.com/|creationDate|2018-11-29 +https://nlpparis.wordpress.com/|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://nlpparis.wordpress.com/|title|Paris NLP - blog +https://nlpparis.wordpress.com/|creationTime|2018-11-29T12:58:54Z +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|creationDate|2013-06-18 +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|tag|http://www.semanlink.net/tag/wikidata +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|tag|http://www.semanlink.net/tag/semtechbiz +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|tag|http://www.semanlink.net/tag/knowledge_graph +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|tag|http://www.semanlink.net/tag/yahoo +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|title|At SemTechBiz, Knowledge Graphs Are Everywhere - semanticweb.com +http://semanticweb.com/at-semtechbiz-knowledge-graphs-are-everywhere_b37724?goback=%2Egde_1630687_member_247880484|creationTime|2013-06-18T00:57:44Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|creationDate|2007-05-28 +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|tag|http://www.semanlink.net/tag/services_publics +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|tag|http://www.semanlink.net/tag/bbc +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|comment|"""La BBC est jugée trop dure envers les banques, les hypermarchés et les compagnies pétrolières,... +L'agriculture bio et le petit commerce bénéficient d'un traitement de faveur.""
+Ca, c'est du service public. God save the BBC! +" +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|title|La BBC n'aime pas les patrons, par Marc Roche - Le Monde.fr +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|creationTime|2007-05-28T21:11:47Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-915879,0.html|source|Le Monde +http://www.aaronsw.com/weblog/mylifewithtim|creationDate|2008-09-02 +http://www.aaronsw.com/weblog/mylifewithtim|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.aaronsw.com/weblog/mylifewithtim|tag|http://www.semanlink.net/tag/aaron_swartz +http://www.aaronsw.com/weblog/mylifewithtim|title|My Life With Tim (Aaron Swartz's Raw Thought) +http://www.aaronsw.com/weblog/mylifewithtim|creationTime|2008-09-02T14:18:04Z +https://github.com/json-ld/json-ld.org/issues/343|creationDate|2016-08-02 +https://github.com/json-ld/json-ld.org/issues/343|tag|http://www.semanlink.net/tag/stackoverflow_q +https://github.com/json-ld/json-ld.org/issues/343|tag|http://www.semanlink.net/tag/json_ld +https://github.com/json-ld/json-ld.org/issues/343|tag|http://www.semanlink.net/tag/markdown +https://github.com/json-ld/json-ld.org/issues/343|title|markdown to json-ld +https://github.com/json-ld/json-ld.org/issues/343|creationTime|2016-08-02T11:59:46Z +http://druid.io/|creationDate|2014-12-18 +http://druid.io/|tag|http://www.semanlink.net/tag/big_data +http://druid.io/|tag|http://www.semanlink.net/tag/real_time +http://druid.io/|tag|http://www.semanlink.net/tag/analytics +http://druid.io/|title|Druid Real-time Exploratory Analytics on Large Datasets +http://druid.io/|creationTime|2014-12-18T11:38:57Z +http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488|creationDate|2006-04-29 +http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488|tag|http://www.semanlink.net/tag/xri +http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488|tag|http://www.semanlink.net/tag/rdf +http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488|comment|"A general usage pattern that is emerging in the use of XRIs is the +pattern of including standardized 'triples' in the XRI to identify a +resource." +http://www.seedwiki.com/wiki/itag/power_i-tags.cfm?wpid=220488|title|Power I-Tags +http://colah.github.io/posts/2015-08-Understanding-LSTMs/|creationDate|2015-10-16 +http://colah.github.io/posts/2015-08-Understanding-LSTMs/|tag|http://www.semanlink.net/tag/lstm_networks +http://colah.github.io/posts/2015-08-Understanding-LSTMs/|tag|http://www.semanlink.net/tag/christopher_olah +http://colah.github.io/posts/2015-08-Understanding-LSTMs/|title|Understanding LSTM Networks -- colah's blog +http://colah.github.io/posts/2015-08-Understanding-LSTMs/|creationTime|2015-10-16T14:24:57Z +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|creationDate|2010-04-01 +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|tag|http://www.semanlink.net/tag/rdf +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|tag|http://www.semanlink.net/tag/php +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|tag|http://www.semanlink.net/tag/rdf_tools +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|title|RAP - Rdf API for PHP V0.9.6 - Home +http://www.seasr.org/wp-content/plugins/meandre/rdfapi-php/doc/|creationTime|2010-04-01T15:52:32Z +http://www.w3.org/2007/powder/blog|creationDate|2010-07-01 +http://www.w3.org/2007/powder/blog|tag|http://www.semanlink.net/tag/powder +http://www.w3.org/2007/powder/blog|title|POWDER Working Group Blog +http://www.w3.org/2007/powder/blog|creationTime|2010-07-01T11:48:51Z +https://www.bbc.co.uk/news/world-africa-45262081|creationDate|2018-09-03 +https://www.bbc.co.uk/news/world-africa-45262081|tag|http://www.semanlink.net/tag/nigeria +https://www.bbc.co.uk/news/world-africa-45262081|tag|http://www.semanlink.net/tag/langues_vivantes +https://www.bbc.co.uk/news/world-africa-45262081|title|Ubang: The Nigerian village where men and women speak different languages - BBC News +https://www.bbc.co.uk/news/world-africa-45262081|creationTime|2018-09-03T11:45:46Z +http://www.lemonde.fr/afrique/article/2018/01/19/la-classe-africaine-notre-serie-consacree-a-l-education_5243976_3212.html|creationDate|2018-01-21 +http://www.lemonde.fr/afrique/article/2018/01/19/la-classe-africaine-notre-serie-consacree-a-l-education_5243976_3212.html|tag|http://www.semanlink.net/tag/enseignement_en_afrique +http://www.lemonde.fr/afrique/article/2018/01/19/la-classe-africaine-notre-serie-consacree-a-l-education_5243976_3212.html|title|La classe africaine +http://www.lemonde.fr/afrique/article/2018/01/19/la-classe-africaine-notre-serie-consacree-a-l-education_5243976_3212.html|creationTime|2018-01-21T19:41:14Z +http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/|creationDate|2017-05-04 +http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/|tag|http://www.semanlink.net/tag/greffe_de_tete +http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/|comment|Greffe de tête = Greffe du corps +http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/|title|Un pas de plus vers la greffe de tête Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2017/05/04/un-pas-de-plus-vers-la-greffe-de-tete/|creationTime|2017-05-04T20:07:04Z +http://www.cs.princeton.edu/~blei/topicmodeling.html|creationDate|2013-08-19 +http://www.cs.princeton.edu/~blei/topicmodeling.html|tag|http://www.semanlink.net/tag/topic_modeling +http://www.cs.princeton.edu/~blei/topicmodeling.html|tag|http://www.semanlink.net/tag/david_blei +http://www.cs.princeton.edu/~blei/topicmodeling.html|comment|links to introductory materials, corpus browsers based on topic models, and open source software (from my research group) for topic modeling. +http://www.cs.princeton.edu/~blei/topicmodeling.html|title|David M. Blei: Topic modeling +http://www.cs.princeton.edu/~blei/topicmodeling.html|creationTime|2013-08-19T17:02:40Z +http://news.bbc.co.uk/2/hi/science/nature/7329505.stm|creationDate|2008-04-13 +http://news.bbc.co.uk/2/hi/science/nature/7329505.stm|tag|http://www.semanlink.net/tag/first_americans +http://news.bbc.co.uk/2/hi/science/nature/7329505.stm|title|BBC NEWS Faeces hint at first Americans +http://news.bbc.co.uk/2/hi/science/nature/7329505.stm|creationTime|2008-04-13T13:12:18Z +http://news.bbc.co.uk/2/hi/science/nature/7329505.stm|source|BBC +http://www.youtube.com/watch?v=3-UGbrhWcak|creationDate|2013-02-22 +http://www.youtube.com/watch?v=3-UGbrhWcak|tag|http://www.semanlink.net/tag/souvenirs +http://www.youtube.com/watch?v=3-UGbrhWcak|tag|http://www.semanlink.net/tag/saudade +http://www.youtube.com/watch?v=3-UGbrhWcak|tag|http://www.semanlink.net/tag/blues +http://www.youtube.com/watch?v=3-UGbrhWcak|tag|http://www.semanlink.net/tag/youtube +http://www.youtube.com/watch?v=3-UGbrhWcak|title|Champion Jack Dupree - Christina, Christina Blues - YouTube +http://www.youtube.com/watch?v=3-UGbrhWcak|creationTime|2013-02-22T14:11:21Z +https://www.bbc.com/news/entertainment-arts-44820536|creationDate|2018-08-13 +https://www.bbc.com/news/entertainment-arts-44820536|tag|http://www.semanlink.net/tag/news +https://www.bbc.com/news/entertainment-arts-44820536|tag|http://www.semanlink.net/tag/bbc +https://www.bbc.com/news/entertainment-arts-44820536|title|Obituary: VS Naipaul - BBC News +https://www.bbc.com/news/entertainment-arts-44820536|creationTime|2018-08-13T18:07:12Z +https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur|creationDate|2017-05-10 +https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur|tag|http://www.semanlink.net/tag/macron +https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur|tag|http://www.semanlink.net/tag/hackers +https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur|title|Hackers Came, but the French Were Prepared - The New York Times +https://www.nytimes.com/2017/05/09/world/europe/hackers-came-but-the-french-were-prepared.html?smid=tw-nytimes&smtyp=cur|creationTime|2017-05-10T21:00:59Z +https://distill.pub/2017/ctc/|creationDate|2017-11-28 +https://distill.pub/2017/ctc/|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +https://distill.pub/2017/ctc/|comment|A visual guide to Connectionist Temporal Classification, an algorithm used to train deep neural networks in speech recognition, handwriting recognition and other sequence problems. +https://distill.pub/2017/ctc/|title|Sequence Modeling with CTC +https://distill.pub/2017/ctc/|creationTime|2017-11-28T08:49:33Z +https://class.coursera.org/ml-005/forum/thread?thread_id=122|creationDate|2014-04-28 +https://class.coursera.org/ml-005/forum/thread?thread_id=122|tag|http://www.semanlink.net/tag/coursera_machine_learning +https://class.coursera.org/ml-005/forum/thread?thread_id=122|tag|http://www.semanlink.net/tag/time_series +https://class.coursera.org/ml-005/forum/thread?thread_id=122|title|How can Neural Networks be applied to Time Series Forecasting? +https://class.coursera.org/ml-005/forum/thread?thread_id=122|creationTime|2014-04-28T15:40:13Z +http://www.ijcis.info/Vol4N2/pp63-71.pdf|creationDate|2011-04-04 +http://www.ijcis.info/Vol4N2/pp63-71.pdf|tag|http://www.semanlink.net/tag/semantic_web_assisted_learning +http://www.ijcis.info/Vol4N2/pp63-71.pdf|tag|http://www.semanlink.net/tag/e_learning +http://www.ijcis.info/Vol4N2/pp63-71.pdf|title|E-Learning Model Based On Semantic Web Technology +http://www.ijcis.info/Vol4N2/pp63-71.pdf|creationTime|2011-04-04T15:52:15Z +https://fr.slideshare.net/fpservant/ec-webslides|creationDate|2017-04-12 +https://fr.slideshare.net/fpservant/ec-webslides|tag|http://www.semanlink.net/tag/fps_ec_web_14 +https://fr.slideshare.net/fpservant/ec-webslides|tag|http://www.semanlink.net/tag/fpservant_slideshare +https://fr.slideshare.net/fpservant/ec-webslides|title|"EC-WEB 2014 ""Automotive ranges as e-commerce data""" +https://fr.slideshare.net/fpservant/ec-webslides|creationTime|2017-04-12T13:48:51Z +http://fr.wikipedia.org/wiki/Bassas_da_India|creationDate|2013-06-01 +http://fr.wikipedia.org/wiki/Bassas_da_India|tag|http://www.semanlink.net/tag/atoll +http://fr.wikipedia.org/wiki/Bassas_da_India|tag|http://www.semanlink.net/tag/ocean_indien +http://fr.wikipedia.org/wiki/Bassas_da_India|title|Bassas da India - Wikipédia +http://fr.wikipedia.org/wiki/Bassas_da_India|creationTime|2013-06-01T21:39:02Z +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|creationDate|2013-11-28 +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|tag|http://www.semanlink.net/tag/android +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|title|Using SPARQL queries from native Android apps - bobdc.blog +http://www.snee.com/bobdc.blog/2013/11/using-sparql-queries-from-nati.html|creationTime|2013-11-28T23:50:18Z +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|creationDate|2008-05-04 +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|tag|http://www.semanlink.net/tag/linked_data +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|tag|http://www.semanlink.net/tag/ldow2008 +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|title|Linked Data and Information Architecture +http://www.openlinksw.com/dataspace/oerling/weblog/Orri%20Erling's%20Blog/1347|creationTime|2008-05-04T14:49:00Z +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|creationDate|2018-09-18 +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|tag|http://www.semanlink.net/tag/nlp_topic_extraction +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|tag|http://www.semanlink.net/tag/sentiment_analysis +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|tag|http://www.semanlink.net/tag/spacy +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|title|Key topics extraction and contextual sentiment of users’ reviews +https://tech.goibibo.com/key-topics-extraction-and-contextual-sentiment-of-users-reviews-20e63c0fd7ca|creationTime|2018-09-18T15:05:58Z +http://glish.com/css/7.asp|creationDate|2005-08-29 +http://glish.com/css/7.asp|tag|http://www.semanlink.net/tag/css +http://glish.com/css/7.asp|title|glish.com : CSS layout techniques : 3 columns, the holy grail +http://www.siteduzero.com/tutoriel-3-14663-memo-pour-les-regex.html#ss_part_1|creationDate|2010-01-27 +http://www.siteduzero.com/tutoriel-3-14663-memo-pour-les-regex.html#ss_part_1|tag|http://www.semanlink.net/tag/regex +http://www.siteduzero.com/tutoriel-3-14663-memo-pour-les-regex.html#ss_part_1|title|Mémo pour les Regex +http://www.siteduzero.com/tutoriel-3-14663-memo-pour-les-regex.html#ss_part_1|creationTime|2010-01-27T00:16:52Z +http://www.www2015.it/documents/proceedings/proceedings/p864.pdf|creationDate|2015-05-22 +http://www.www2015.it/documents/proceedings/proceedings/p864.pdf|tag|http://www.semanlink.net/tag/rdf +http://www.www2015.it/documents/proceedings/proceedings/p864.pdf|tag|http://www.semanlink.net/tag/www_2015 +http://www.www2015.it/documents/proceedings/proceedings/p864.pdf|title|Deriving an Emergent Relational Schema from RDF Data +http://www.www2015.it/documents/proceedings/proceedings/p864.pdf|creationTime|2015-05-22T00:19:25Z +http://www.youtube.com/watch?v=c4d24NQua6U|creationDate|2010-06-24 +http://www.youtube.com/watch?v=c4d24NQua6U|tag|http://www.semanlink.net/tag/shoira_otabekova +http://www.youtube.com/watch?v=c4d24NQua6U|title|Seni sogindim - Shoira Otabekova +http://www.youtube.com/watch?v=c4d24NQua6U|creationTime|2010-06-24T23:56:36Z +http://www.wired.com/2016/08/dear-college-students-take-geology/|creationDate|2016-09-01 +http://www.wired.com/2016/08/dear-college-students-take-geology/|tag|http://www.semanlink.net/tag/geologie +http://www.wired.com/2016/08/dear-college-students-take-geology/|title|Dear College Students: You Should Take Geology WIRED +http://www.wired.com/2016/08/dear-college-students-take-geology/|creationTime|2016-09-01T21:57:49Z +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|creationDate|2007-11-07 +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|tag|http://www.semanlink.net/tag/rif +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|comment|"basic format that allows logic rules to be exchanged between rule-based systems.
+W3C Working Draft 30 October 2007" +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|title|RIF Basic Logic Dialect +http://www.w3.org/TR/2007/WD-rif-bld-20071030/|creationTime|2007-11-07T17:02:44Z +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|creationDate|2018-07-07 +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|tag|http://www.semanlink.net/tag/uruguay +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|tag|http://www.semanlink.net/tag/coupe_du_monde_2018 +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|comment|L’Uruguay, le pays où le football est roi +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|title|France-Uruguay. “Tuer ou se faire tuer : les Uruguayens connaissent la règle de la Coupe du monde” Courrier international +https://www.courrierinternational.com/article/france-uruguay-tuer-ou-se-faire-tuer-les-uruguayens-connaissent-la-regle-de-la-coupe-du?utm_campaign=partenariatmondial2018&utm_medium=article&utm_source=lemonde.fr|creationTime|2018-07-07T15:14:34Z +http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/|creationDate|2013-01-03 +http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/|tag|http://www.semanlink.net/tag/robotique +http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/|tag|http://www.semanlink.net/tag/economie +http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/|title|Économie numérique : Robots, le retour Lois des réseaux +http://reseaux.blog.lemonde.fr/2013/01/03/economie-numerique-robots-retour/|creationTime|2013-01-03T23:15:44Z +http://www.telegraph.co.uk/culture/books/4248401/100-novels-everyone-should-read.html|creationDate|2014-06-06 +http://www.telegraph.co.uk/culture/books/4248401/100-novels-everyone-should-read.html|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.telegraph.co.uk/culture/books/4248401/100-novels-everyone-should-read.html|title|100 novels everyone should read - Telegraph +http://www.telegraph.co.uk/culture/books/4248401/100-novels-everyone-should-read.html|creationTime|2014-06-06T22:52:36Z +http://www.w3.org/2009/03/xbrl/linked-data.png|creationDate|2010-07-01 +http://www.w3.org/2009/03/xbrl/linked-data.png|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/2009/03/xbrl/linked-data.png|title|linked-data.png +http://www.w3.org/2009/03/xbrl/linked-data.png|creationTime|2010-07-01T18:17:21Z +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|creationDate|2019-04-17 +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|tag|http://www.semanlink.net/tag/cerveau +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|tag|http://www.semanlink.net/tag/alzheimer +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|comment|I should +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|title|One Day There May Be a Drug to Turbocharge the Brain. Who Should Get It? - The New York Times +https://www.nytimes.com/2019/04/02/health/klotho-brain-enhancement-dementia-alzheimers.html|creationTime|2019-04-17T13:15:13Z +http://nlpprogress.com/english/entity_linking.html|creationDate|2019-04-25 +http://nlpprogress.com/english/entity_linking.html|tag|http://www.semanlink.net/tag/sebastian_ruder +http://nlpprogress.com/english/entity_linking.html|tag|http://www.semanlink.net/tag/entity_linking +http://nlpprogress.com/english/entity_linking.html|title|Entity Linking NLP-progress +http://nlpprogress.com/english/entity_linking.html|creationTime|2019-04-25T13:47:01Z +http://onjava.com/lpt/a/6293|creationDate|2005-10-28 +http://onjava.com/lpt/a/6293|tag|http://www.semanlink.net/tag/browser_back_button +http://onjava.com/lpt/a/6293|tag|http://www.semanlink.net/tag/ajax +http://onjava.com/lpt/a/6293|title|ONJava.com: AJAX: How to Handle Bookmarks and Back Buttons +http://harry.hchen1.com/2006/01/29/255|creationDate|2006-01-30 +http://harry.hchen1.com/2006/01/29/255|tag|http://www.semanlink.net/tag/tagging +http://harry.hchen1.com/2006/01/29/255|tag|http://www.semanlink.net/tag/semanlink_related +http://harry.hchen1.com/2006/01/29/255|title|Harry Chen Thinks Aloud » Can ThinkMap Make Better Flickr and Technorati? +http://jena.sourceforge.net/assembler/assembler-howto.html|creationDate|2008-11-17 +http://jena.sourceforge.net/assembler/assembler-howto.html|tag|http://www.semanlink.net/tag/jena_assembler +http://jena.sourceforge.net/assembler/assembler-howto.html|tag|http://www.semanlink.net/tag/howto +http://jena.sourceforge.net/assembler/assembler-howto.html|title|the Assembler howto +http://jena.sourceforge.net/assembler/assembler-howto.html|creationTime|2008-11-17T00:44:50Z +http://www.blogmarks.net|creationDate|2005-04-09 +http://www.blogmarks.net|tag|http://www.semanlink.net/tag/blogmarks +http://www.heppnetz.de/ontologies/vso/ns|creationDate|2011-03-24 +http://www.heppnetz.de/ontologies/vso/ns|tag|http://www.semanlink.net/tag/vso +http://www.heppnetz.de/ontologies/vso/ns|title|The Vehicle Sales Ontology (VSO) +http://www.heppnetz.de/ontologies/vso/ns|creationTime|2011-03-24T18:28:53Z +https://arxiv.org/abs/1902.05196v1|creationDate|2019-02-18 +https://arxiv.org/abs/1902.05196v1|tag|http://www.semanlink.net/tag/nlp_text_classification +https://arxiv.org/abs/1902.05196v1|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1902.05196v1|tag|http://www.semanlink.net/tag/category_embedding +https://arxiv.org/abs/1902.05196v1|tag|http://www.semanlink.net/tag/categorical_variables +https://arxiv.org/abs/1902.05196v1|arxiv_author|Minji Seo +https://arxiv.org/abs/1902.05196v1|arxiv_author|Seung-won Hwang +https://arxiv.org/abs/1902.05196v1|arxiv_author|Sua Sung +https://arxiv.org/abs/1902.05196v1|arxiv_author|Jihyeok Kim +https://arxiv.org/abs/1902.05196v1|arxiv_author|Kyungjae Lee +https://arxiv.org/abs/1902.05196v1|arxiv_author|Reinald Kim Amplayo +https://arxiv.org/abs/1902.05196v1|comment|> We observe that **current representation methods for categorical metadata... are not as effective as claimed** in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category +https://arxiv.org/abs/1902.05196v1|title|[1902.05196] Categorical Metadata Representation for Customized Text Classification +https://arxiv.org/abs/1902.05196v1|creationTime|2019-02-18T08:20:43Z +https://arxiv.org/abs/1902.05196v1|arxiv_summary|"The performance of text classification has improved tremendously using +intelligently engineered neural-based models, especially those injecting +categorical metadata as additional information, e.g., using user/product +information for sentiment classification. These information have been used to +modify parts of the model (e.g., word embeddings, attention mechanisms) such +that results can be customized according to the metadata. We observe that +current representation methods for categorical metadata, which are devised for +human consumption, are not as effective as claimed in popular classification +methods, outperformed even by simple concatenation of categorical features in +the final layer of the sentence encoder. We conjecture that categorical +features are harder to represent for machine use, as available context only +indirectly describes the category, and even such context is often scarce (for +tail category). To this end, we propose to use basis vectors to effectively +incorporate categorical metadata on various parts of a neural-based model. This +additionally decreases the number of parameters dramatically, especially when +the number of categorical features is large. Extensive experiments on various +datasets with different properties are performed and show that through our +method, we can represent categorical metadata more effectively to customize +parts of the model, including unexplored ones, and increase the performance of +the model greatly." +https://arxiv.org/abs/1902.05196v1|arxiv_firstAuthor|Jihyeok Kim +https://arxiv.org/abs/1902.05196v1|arxiv_updated|2019-02-14T03:07:53Z +https://arxiv.org/abs/1902.05196v1|arxiv_title|Categorical Metadata Representation for Customized Text Classification +https://arxiv.org/abs/1902.05196v1|arxiv_published|2019-02-14T03:07:53Z +https://arxiv.org/abs/1902.05196v1|arxiv_num|1902.05196 +http://www.bbc.co.uk/news/entertainment-arts-11056840|creationDate|2010-08-23 +http://www.bbc.co.uk/news/entertainment-arts-11056840|tag|http://www.semanlink.net/tag/industrie_du_disque +http://www.bbc.co.uk/news/entertainment-arts-11056840|title|BBC News - How commonplace is autotune? +http://www.bbc.co.uk/news/entertainment-arts-11056840|creationTime|2010-08-23T19:29:25Z +http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html|creationDate|2009-06-06 +http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html|tag|http://www.semanlink.net/tag/jsp +http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html|tag|http://www.semanlink.net/tag/sparql +http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html|title|Composing the Semantic Web: Creating documents with SPARQL and JSP +http://composing-the-semantic-web.blogspot.com/2007/11/creating-documents-with-sparql-and-jsp.html|creationTime|2009-06-06T01:16:29Z +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|creationDate|2013-03-05 +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|tag|http://www.semanlink.net/tag/sahel +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|tag|http://www.semanlink.net/tag/niger +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|tag|http://www.semanlink.net/tag/architecture_en_terre +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|tag|http://www.semanlink.net/tag/banco +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|title|L’architecture en terre: une solution pour le Sahel - NIGER - RFI +http://www.rfi.fr/afrique/20130301-architecture-terre-une-solution-le-sahel|creationTime|2013-03-05T00:18:05Z +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|creationDate|2017-09-29 +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|tag|http://www.semanlink.net/tag/npm +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|tag|http://www.semanlink.net/tag/webcomponents +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|comment|What if we could leverage all the code in npm and allow people to use our libraries with as little effort as a <script> include? +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|title|I’ve seen the future, it’s full of HTML. – Mikeal – Medium +https://medium.com/@mikeal/ive-seen-the-future-it-s-full-of-html-2577246f2210|creationTime|2017-09-29T01:29:54Z +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|creationDate|2019-01-23 +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|tag|http://www.semanlink.net/tag/2018 +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|tag|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|title|14 NLP Research Breakthroughs You Can Apply To Your Business - 2018 +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|bookmarkOf|https://www.topbots.com/most-important-ai-nlp-research/ +http://www.semanlink.net/doc/2019/01/most-important-ai-nlp-research|creationTime|2019-01-23T22:56:57Z +http://vimeo.com/45633052|creationDate|2012-07-12 +http://vimeo.com/45633052|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://vimeo.com/45633052|tag|http://www.semanlink.net/tag/henri_bergius +http://vimeo.com/45633052|tag|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://vimeo.com/45633052|tag|http://www.semanlink.net/tag/create_js +http://vimeo.com/45633052|title|Decoupling Content Management with CreateJS and VIE on Vimeo +http://vimeo.com/45633052|creationTime|2012-07-12T20:01:47Z +http://colah.github.io/posts/2015-09-NN-Types-FP/|creationDate|2015-09-04 +http://colah.github.io/posts/2015-09-NN-Types-FP/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://colah.github.io/posts/2015-09-NN-Types-FP/|tag|http://www.semanlink.net/tag/functional_programming +http://colah.github.io/posts/2015-09-NN-Types-FP/|title|Neural Networks, Types, and Functional Programming -- colah's blog +http://colah.github.io/posts/2015-09-NN-Types-FP/|creationTime|2015-09-04T10:04:37Z +http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/|creationDate|2007-08-01 +http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/|tag|http://www.semanlink.net/tag/semantic_desktop +http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/|tag|http://www.semanlink.net/tag/personal_information_management +http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/|title|Cognitive Aspects of Semantic Desktop to Support PIM « Danzinde +http://danzinde.wordpress.com/2007/07/31/cognitive-aspects-of-semantic-desktop-to-support-pim/|creationTime|2007-08-01T17:03:36Z +http://www.planetastronomy.com/articles/mesure-distance.htm|creationDate|2008-05-17 +http://www.planetastronomy.com/articles/mesure-distance.htm|tag|http://www.semanlink.net/tag/histoire_de_l_astronomie +http://www.planetastronomy.com/articles/mesure-distance.htm|tag|http://www.semanlink.net/tag/astronomie +http://www.planetastronomy.com/articles/mesure-distance.htm|title|La mesure des distances en astronomie des origines à nos jours +http://www.planetastronomy.com/articles/mesure-distance.htm|creationTime|2008-05-17T16:52:44Z +http://passeurdesciences.blog.lemonde.fr/2013/05/05/decouverte-de-deux-planetes-oceans/|creationDate|2013-08-21 +http://passeurdesciences.blog.lemonde.fr/2013/05/05/decouverte-de-deux-planetes-oceans/|tag|http://www.semanlink.net/tag/exoplanetes +http://passeurdesciences.blog.lemonde.fr/2013/05/05/decouverte-de-deux-planetes-oceans/|title|Découverte de deux planètes-océans Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/05/05/decouverte-de-deux-planetes-oceans/|creationTime|2013-08-21T23:36:04Z +http://www.w3.org/RDF/Validator/|creationDate|2007-01-19 +http://www.w3.org/RDF/Validator/|tag|http://www.semanlink.net/tag/rdf_validator +http://www.w3.org/RDF/Validator/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/RDF/Validator/|tag|http://www.semanlink.net/tag/sw_online_tools +http://www.w3.org/RDF/Validator/|title|W3C RDF Validation Service +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|creationDate|2017-08-09 +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|tag|http://www.semanlink.net/tag/esclavage +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|tag|http://www.semanlink.net/tag/tombouctou +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|comment|"marin français, réduit en esclavage au Maroc, qui aurait accompagné son maître à Tombouctou (16-17e siècle) +" +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|title|Paul IMBERT +http://stgil.e-monsite.com/pages/personnages-de-st-gilles-croix-de-vie/paul-imbert.html|creationTime|2017-08-09T16:47:48Z +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|creationDate|2018-05-31 +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|tag|http://www.semanlink.net/tag/recommender_systems +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|tag|http://www.semanlink.net/tag/restricted_boltzmann_machine +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|comment|Theory behind Restricted Boltzmann Machines — A powerful Tool for Recomender Systems +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|title|Deep Learning meets Physics: Restricted Boltzmann Machines Part I +https://towardsdatascience.com/deep-learning-meets-physics-restricted-boltzmann-machines-part-i-6df5c4918c15|creationTime|2018-05-31T08:19:32Z +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|creationDate|2018-02-25 +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|tag|http://www.semanlink.net/tag/yann_lecun +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|tag|http://www.semanlink.net/tag/chris_manning +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|comment|> Reward should be innate – that is, understanding the world correctly should be its own reward +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|title|Deep Learning, Structure and Innate Priors - A Discussion between Yann LeCun and Christopher Manning Abigail See +http://www.abigailsee.com/2018/02/21/deep-learning-structure-and-innate-priors.html|creationTime|2018-02-25T11:38:36Z +http://www.paulgraham.com/avg.html|creationDate|2005-11-24 +http://www.paulgraham.com/avg.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/avg.html|tag|http://www.semanlink.net/tag/lisp +http://www.paulgraham.com/avg.html|tag|http://www.semanlink.net/tag/yahoo +http://www.paulgraham.com/avg.html|comment|When you choose technology, you have to ignore what other people are doing, and consider only what will work the best. +http://www.paulgraham.com/avg.html|title|Beating the Averages +http://www.objectlearn.com|creationDate|2006-01-07 +http://www.objectlearn.com|tag|http://www.semanlink.net/tag/jsp +http://www.objectlearn.com|tag|http://www.semanlink.net/tag/eclipse +http://www.objectlearn.com|title|ObjectLearn - Home of Lomboz +http://www.bbc.co.uk/music/beta|creationDate|2008-08-20 +http://www.bbc.co.uk/music/beta|tag|http://www.semanlink.net/tag/bbc +http://www.bbc.co.uk/music/beta|tag|http://www.semanlink.net/tag/musicbrainz +http://www.bbc.co.uk/music/beta|title|BBC - Music - Beta +http://www.bbc.co.uk/music/beta|creationTime|2008-08-20T22:21:34Z +http://blog.wavii.com/2012/08/23/duped-by-dupes/|creationDate|2012-08-25 +http://blog.wavii.com/2012/08/23/duped-by-dupes/|tag|http://www.semanlink.net/tag/nlp +http://blog.wavii.com/2012/08/23/duped-by-dupes/|tag|http://www.semanlink.net/tag/named_entity_recognition +http://blog.wavii.com/2012/08/23/duped-by-dupes/|comment|using cosine similarity normalized by TF-IDF +http://blog.wavii.com/2012/08/23/duped-by-dupes/|title|Duped by Dupes Wavii Blog +http://blog.wavii.com/2012/08/23/duped-by-dupes/|creationTime|2012-08-25T20:13:35Z +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|creationDate|2016-02-09 +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/bombe_atomique +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/lobby_nucleaire +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/plutonium +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|tag|http://www.semanlink.net/tag/ca_craint +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|title|"""Terres nucléaires"" : une histoire du plutonium" +http://www.sciencesetavenir.fr/decouvrir/tele-cinema/20150926.OBS6576/tele-terres-nucleaires-une-histoire-du-plutonium.html|creationTime|2016-02-09T23:36:30Z +http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html|creationDate|2014-04-17 +http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html|tag|http://www.semanlink.net/tag/europe +http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html|tag|http://www.semanlink.net/tag/cohn_bendit +http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html|title|Daniel Cohn-Bendit insiste, une dernière fois, sur la «nécessité d’Europe » +http://www.lemonde.fr/europe/article/2014/04/17/daniel-cohn-bendit-insiste-une-derniere-fois-sur-la-necessite-d-europe_4402807_3214.html|creationTime|2014-04-17T13:55:35Z +http://www.volcano.si.edu/world/volcano.cfm?vnum=0201-041&volpage=photos&photo=099008|creationDate|2008-11-21 +http://www.volcano.si.edu/world/volcano.cfm?vnum=0201-041&volpage=photos&photo=099008|tag|http://www.semanlink.net/tag/dallol +http://www.volcano.si.edu/world/volcano.cfm?vnum=0201-041&volpage=photos&photo=099008|title|Global Volcanism Program Dallol.jpg +http://www.volcano.si.edu/world/volcano.cfm?vnum=0201-041&volpage=photos&photo=099008|creationTime|2008-11-21T23:01:53Z +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|creationDate|2017-11-12 +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|tag|http://www.semanlink.net/tag/using_word_embedding +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|tag|http://www.semanlink.net/tag/text_similarity +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|comment|How a phrase or a sentence can be represented as a vector using the vectors of its constituent words? See also [Evaluating Neural Word Representations in Tensor-Based Compositional Settings](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.698.4036&rep=rep1&type=pdf) +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|title|A Comparison of Vector-based Representations for Semantic Composition (Blacoe and Lapata - 2012) +http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=EB97C3236E6A06E7A5592EC92A7D0F54?doi=10.1.1.261.2530&rep=rep1&type=pdf|creationTime|2017-11-12T02:18:15Z +http://wikisem.makolab.pl|creationDate|2013-06-03 +http://wikisem.makolab.pl|tag|http://www.semanlink.net/tag/wiki +http://wikisem.makolab.pl|tag|http://www.semanlink.net/tag/gao +http://wikisem.makolab.pl|title|GAO wiki +http://wikisem.makolab.pl|creationTime|2013-06-03T13:29:02Z +http://pgm.stanford.edu/intro.pdf|creationDate|2013-05-23 +http://pgm.stanford.edu/intro.pdf|tag|http://www.semanlink.net/tag/probabilistic_graphical_models +http://pgm.stanford.edu/intro.pdf|tag|http://www.semanlink.net/tag/daphne_koller +http://pgm.stanford.edu/intro.pdf|title|Probabilistic Graphical Models (book, Koller) +http://pgm.stanford.edu/intro.pdf|creationTime|2013-05-23T08:41:30Z +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|creationDate|2018-08-14 +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|tag|http://www.semanlink.net/tag/brexit +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|tag|http://www.semanlink.net/tag/milliardaire +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|comment|Jim Ratcliffe, l’homme le plus riche du Royaume-Uni, partisan du Brexit, est un fervent soutien de la déréglementation et des impôts les plus bas +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|title|Jim Ratcliffe, le brexiteur milliardaire qui part se réfugier… à Monaco +https://www.lemonde.fr/referendum-sur-le-brexit/article/2018/08/13/jim-ratcliffe-le-brexiteur-milliardaire-qui-part-se-refugier-a-monaco_5342077_4872498.html|creationTime|2018-08-14T14:22:59Z +http://www.nemrud.nl/|creationDate|2005-11-21 +http://www.nemrud.nl/|tag|http://www.semanlink.net/tag/nemrud +http://www.nemrud.nl/|title|International Nemrud Foundation - Word Heritage Monument in Turkey +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|creationDate|2016-08-01 +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|tag|http://www.semanlink.net/tag/rigolo +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|tag|http://www.semanlink.net/tag/uberisation +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|comment|"Poop with us
+Leave the mess to someone else
+And get back to what really matters - you and your dog." +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|title|Pooper, la fausse application de ramassage de crottes qui ridiculise « l’ubérisation » +http://www.lemonde.fr/big-browser/article/2016/08/01/pooper-la-fausse-application-de-ramassage-de-crottes-qui-ridiculise-l-uberisation_4977196_4832693.html|creationTime|2016-08-01T23:09:54Z +http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)|creationDate|2012-12-10 +http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)|comment|film français de J. Delannoy avec Gabin, dialogues de Audiard +http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)|title|Le Baron de l'écluse (film) +http://fr.wikipedia.org/wiki/Le_Baron_de_l%27%C3%A9cluse_(film)|creationTime|2012-12-10T01:50:25Z +http://www.w3.org/2009/03/xbrl/report.html|creationDate|2010-07-01 +http://www.w3.org/2009/03/xbrl/report.html|tag|http://www.semanlink.net/tag/edward_curry +http://www.w3.org/2009/03/xbrl/report.html|tag|http://www.semanlink.net/tag/xbrl +http://www.w3.org/2009/03/xbrl/report.html|title|Report for the Workshop on Improving Access to Financial Data on the Web +http://www.w3.org/2009/03/xbrl/report.html|creationTime|2010-07-01T18:11:05Z +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|creationDate|2017-11-16 +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/the_web_is_dying +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/tim_berners_lee +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|tag|http://www.semanlink.net/tag/future_of_the_web +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|comment|> The way ad revenue works with clickbait is not fulfilling the goal of helping humanity promote truth and democracy. So I am concerned +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|title|Tim Berners-Lee on the future of the web: 'The system is failing' Technology The Guardian +https://www.theguardian.com/technology/2017/nov/15/tim-berners-lee-world-wide-web-net-neutrality?CMP=share_btn_tw|creationTime|2017-11-16T22:58:39Z +https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/|creationDate|2017-05-22 +https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/|tag|http://www.semanlink.net/tag/topic_modeling +https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/|title|Topic modeling made just simple enough. The Stone and the Shell +https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/|creationTime|2017-05-22T11:37:25Z +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|tag|http://www.semanlink.net/tag/uber +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|tag|http://www.semanlink.net/tag/syndicalisme +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|title|Uber, Lyft drivers manipulate fares at Reagan National causing artificial price surges WJLA +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|bookmarkOf|https://wjla.com/news/local/uber-and-lyft-drivers-fares-at-reagan-national +http://www.semanlink.net/doc/2019/05/uber_lyft_drivers_manipulate_f|creationTime|2019-05-20T23:05:26Z +https://github.com/NatLibFi/Skosmos|creationDate|2015-03-02 +https://github.com/NatLibFi/Skosmos|tag|http://www.semanlink.net/tag/skos +https://github.com/NatLibFi/Skosmos|title|Skosmos +https://github.com/NatLibFi/Skosmos|creationTime|2015-03-02T09:31:21Z +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|creationDate|2014-05-18 +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|tag|http://www.semanlink.net/tag/nlp_hierarchical_text_classification +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|tag|http://www.semanlink.net/tag/support_vector_machine +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|tag|http://www.semanlink.net/tag/bayesian_classification +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|title|Hierarchical classification: Combining Bayes with SVM +http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_Cesa-BianchiGZ06.pdf|creationTime|2014-05-18T11:31:25Z +http://allforces.com/2005/08/23/wordpress-on-mac-subdomains/|creationDate|2007-07-07 +http://allforces.com/2005/08/23/wordpress-on-mac-subdomains/|tag|http://www.semanlink.net/tag/wordpress +http://allforces.com/2005/08/23/wordpress-on-mac-subdomains/|title|WordPress on Mac Part 2: Sub-Domains All Forces +http://allforces.com/2005/08/23/wordpress-on-mac-subdomains/|creationTime|2007-07-07T15:31:12Z +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|creationDate|2017-05-13 +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|tag|http://www.semanlink.net/tag/origine_de_l_agriculture +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|tag|http://www.semanlink.net/tag/pain +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|tag|http://www.semanlink.net/tag/archeologie +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|tag|http://www.semanlink.net/tag/catalhoyuk +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|title|L’homme préhistorique aimait les pains moutardés Dans les pas des archéologues +http://archeo.blog.lemonde.fr/2017/05/13/lhomme-prehistorique-aimait-les-pains-moutardes/|creationTime|2017-05-13T18:39:53Z +http://dannyayers.com|creationDate|2005-07-04 +http://dannyayers.com|tag|http://www.semanlink.net/tag/semantic_blog +http://dannyayers.com|title|Danny Ayers, Raw Blog +http://mspace.fm/|creationDate|2006-05-22 +http://mspace.fm/|tag|http://www.semanlink.net/tag/mspace +http://mspace.fm/|title|mSpace +http://data.semanticweb.org/person/francois-paul-servant/html|creationDate|2008-11-11 +http://data.semanticweb.org/person/francois-paul-servant/html|tag|http://www.semanlink.net/tag/link_to_me +http://data.semanticweb.org/person/francois-paul-servant/html|title|François-Paul Servant Semantic Web Dog Food +http://data.semanticweb.org/person/francois-paul-servant/html|creationTime|2008-11-11T12:40:05Z +http://simile.mit.edu/RDFizers/|creationDate|2006-03-31 +http://simile.mit.edu/RDFizers/|tag|http://www.semanlink.net/tag/rdfizers +http://simile.mit.edu/RDFizers/|tag|http://www.semanlink.net/tag/simile +http://simile.mit.edu/RDFizers/|comment|RDFizers are tools that allow to transform existing data into an RDF representation. +http://simile.mit.edu/RDFizers/|title|SIMILE RDFizers +http://louvre-boite.viabloga.com/news/50.shtml|creationDate|2005-04-30 +http://louvre-boite.viabloga.com/news/50.shtml|tag|http://www.semanlink.net/tag/tagging +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|creationDate|2015-02-27 +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|tag|http://www.semanlink.net/tag/unix_howto +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|tag|http://www.semanlink.net/tag/path +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|tag|http://www.semanlink.net/tag/os_x_unix +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|tag|http://www.semanlink.net/tag/mac_os_x_tip +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|title|How To Edit Your PATH Environment Variables On Mac OS X +http://hathaway.cc/post/69201163472/how-to-edit-your-path-environment-variables-on-mac|creationTime|2015-02-27T14:13:10Z +http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html|creationDate|2012-07-13 +http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html|tag|http://www.semanlink.net/tag/ajax +http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html|tag|http://www.semanlink.net/tag/download_execute_javascript +http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html|title|Loading JavaScript Functions Via AJAX - Experiment Garden +http://www.experimentgarden.com/2010/06/loading-javascript-functions-via-ajax.html|creationTime|2012-07-13T02:05:44Z +http://www.myspace.com/bandalaya|creationDate|2007-01-30 +http://www.myspace.com/bandalaya|tag|http://www.semanlink.net/tag/renato_matos +http://www.myspace.com/bandalaya|title|Renato Matos (myspace) +http://www.myspace.com/bandalaya|creationTime|2007-01-30T21:39:25Z +http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html|creationDate|2016-03-28 +http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html|tag|http://www.semanlink.net/tag/deforestation +http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html|title|La Chine fait main basse sur les forêts africaines +http://www.lemonde.fr/afrique/article/2016/03/28/la-chine-fait-main-basse-sur-les-forets-africaines_4891052_3212.html|creationTime|2016-03-28T16:59:59Z +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|creationDate|2008-06-20 +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|tag|http://www.semanlink.net/tag/chine_afrique +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|tag|http://www.semanlink.net/tag/niger_petrole +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|tag|http://www.semanlink.net/tag/diffa +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|title|Accord pétrolier entre le CNODC et le Niger +http://www1.planeteafrique.com/Liberation/Index.asp?affiche=News_Display.asp&articleid=1219|creationTime|2008-06-20T23:52:24Z +http://www.w3.org/2007/03/RdfRDB/report|creationDate|2009-02-10 +http://www.w3.org/2007/03/RdfRDB/report|tag|http://www.semanlink.net/tag/rdf_access_to_relational_databases +http://www.w3.org/2007/03/RdfRDB/report|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2007/03/RdfRDB/report|title|Report from the W3C Workshop on RDF Access to Relational Databases +http://www.w3.org/2007/03/RdfRDB/report|creationTime|2009-02-10T22:38:25Z +http://sourceforge.net/projects/eulergui/|creationDate|2011-01-04 +http://sourceforge.net/projects/eulergui/|tag|http://www.semanlink.net/tag/n3 +http://sourceforge.net/projects/eulergui/|tag|http://www.semanlink.net/tag/reasoning +http://sourceforge.net/projects/eulergui/|comment|A lightweight IDE for Artificial Intelligence. Started as GUI for the Euler reasoning engine. The sources can be N3, RDF, OWL, UML, eCore, plain XML or XSD, files or URL's. Wraps Drools (or CWM, FuXi) as N3 rules engines. Model based app. generation. +http://sourceforge.net/projects/eulergui/|title|EulerGUI +http://sourceforge.net/projects/eulergui/|creationTime|2011-01-04T22:32:56Z +http://googleresearch.blogspot.fr/2013/03/learning-from-big-data-40-million.html|creationDate|2013-03-12 +http://googleresearch.blogspot.fr/2013/03/learning-from-big-data-40-million.html|tag|http://www.semanlink.net/tag/wikilinks_corpus +http://googleresearch.blogspot.fr/2013/03/learning-from-big-data-40-million.html|title|Learning from Big Data: 40 Million Entities in Context +http://googleresearch.blogspot.fr/2013/03/learning-from-big-data-40-million.html|creationTime|2013-03-12T14:49:55Z +http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530|creationDate|2011-11-13 +http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530|tag|http://www.semanlink.net/tag/access_control +http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530|tag|http://www.semanlink.net/tag/semantic_web +http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530|title|Get More Robust Access Control, Courtesy of Semantic Technology - semanticweb.com +http://semanticweb.com/get-more-robust-access-control-courtesy-of-semantic-technology_b24530|creationTime|2011-11-13T14:22:20Z +http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx|creationDate|2013-03-25 +http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx|tag|http://www.semanlink.net/tag/bing +http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx|tag|http://www.semanlink.net/tag/satori +http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx|title|Understand Your World with Bing - Search Blog +http://www.bing.com/blogs/site_blogs/b/search/archive/2013/03/21/satorii.aspx|creationTime|2013-03-25T13:15:21Z +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|creationDate|2013-06-06 +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|tag|http://www.semanlink.net/tag/schema_org +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|tag|http://www.semanlink.net/tag/json_ld +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|tag|http://www.semanlink.net/tag/dan_brickley +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|title|schema blog: Schema.org and JSON-LD +http://blog.schema.org/2013/06/schemaorg-and-json-ld.html|creationTime|2013-06-06T11:51:01Z +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|creationDate|2015-01-28 +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|tag|http://www.semanlink.net/tag/gnu_octave +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|tag|http://www.semanlink.net/tag/programming_language +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|tag|http://www.semanlink.net/tag/python +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|tag|http://www.semanlink.net/tag/r +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|comment|R’s strength is in statistical analysis. Octave is good for developing Machine Learning algorithms for numeric problems. Python is a general programming language strong in algorithm building for both number and text mining. +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|title|R, Octave, and Python: Which Suits Your Analysis Needs? - Dice News +http://news.dice.com/2012/05/16/r-octave-python-suits-your-analysis-needs/|creationTime|2015-01-28T01:22:55Z +http://cs.stanford.edu/people/karpathy/|creationDate|2017-08-27 +http://cs.stanford.edu/people/karpathy/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://cs.stanford.edu/people/karpathy/|title|Andrej Karpathy Academic Website +http://cs.stanford.edu/people/karpathy/|creationTime|2017-08-27T01:37:54Z +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|creationDate|2017-05-24 +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|tag|http://www.semanlink.net/tag/nlp_topic_extraction +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|tag|http://www.semanlink.net/tag/brown_corpus +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|tag|http://www.semanlink.net/tag/python_nlp +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|comment|"based on simple POS tagging (using the Brown corpus), less accurate than the default NLTK tools, but faster +" +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|title|An Efficient Way to Extract the Main Topics from a Sentence The Tokenizer +https://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/|creationTime|2017-05-24T17:58:13Z +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|creationDate|2013-09-15 +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|tag|http://www.semanlink.net/tag/marchands_d_arme +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|tag|http://www.semanlink.net/tag/wikileaks +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|tag|http://www.semanlink.net/tag/cybersurveillance +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|title|WikiLeaks a traqué les vendeurs d’armes de surveillance Rue89 +http://www.rue89.com/2013/09/04/nouvelles-revelations-lunite-contre-espionnage-wikileaks-245374|creationTime|2013-09-15T14:29:30Z +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|creationDate|2017-05-18 +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|tag|http://www.semanlink.net/tag/python_sample_code +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|tag|http://www.semanlink.net/tag/nlp_sample_code +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|tag|http://www.semanlink.net/tag/word2vec +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|comment|"> Overall, we won’t be throwing away our SVMs any time soon in favor of word2vec but it has it’s place in text classification. +> +> 1. SVM’s are pretty great at text classification tasks +> 2. Models based on simple averaging of word-vectors can be surprisingly good too (given how much information is lost in taking the average) +> 3. but they only seem to have a clear advantage when there is ridiculously little labeled training data +> +> Update 2017: actually, the best way to utilise the pretrained embeddings would probably be this [using keras](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html) + +Sample code to benchmark a few text categorization models to test whehter word embeddings like word2vec can improve text classification accuracy. +Sample code (based on scikit-learn) includes an embedding vectorizer that is given embedding dataset and vectorizes texts by taking the mean of all the vectors corresponding to individual words. + +" +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|relatedDoc|https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|title|Text Classification With Word2Vec - DS lore (2016) +http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/|creationTime|2017-05-18T23:42:46Z +http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis|creationDate|2010-05-21 +http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis|tag|http://www.semanlink.net/tag/tagging +http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis|title|Search, Tagging and Wikis (2007) +http://blogs.sun.com/bblfish/entry/search_tagging_and_wikis|creationTime|2010-05-21T14:05:43Z +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|creationDate|2017-12-19 +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|tag|http://www.semanlink.net/tag/silicon_valley +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|tag|http://www.semanlink.net/tag/ai_dangers +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|tag|http://www.semanlink.net/tag/capitalisme +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|comment|Who pursues their goals with monomaniacal focus, oblivious to the possibility of negative consequences? +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|title|Silicon Valley Is Turning Into Its Own Worst Fear +https://www.buzzfeed.com/tedchiang/the-real-danger-to-civilization-isnt-ai-its-runaway|creationTime|2017-12-19T13:49:44Z +https://pouannes.github.io/blog/decorators/|creationDate|2019-03-24 +https://pouannes.github.io/blog/decorators/|tag|http://www.semanlink.net/tag/python +https://pouannes.github.io/blog/decorators/|title|Finally understanding decorators in Python • Pierre Ouannes +https://pouannes.github.io/blog/decorators/|creationTime|2019-03-24T19:07:30Z +http://peacecorpsonline.org/messages/messages/2629/2025470.html|creationDate|2006-01-23 +http://peacecorpsonline.org/messages/messages/2629/2025470.html|tag|http://www.semanlink.net/tag/vito +http://peacecorpsonline.org/messages/messages/2629/2025470.html|title|Peace Corps Online - RPCV Vito DiLullo is looking forward to welcoming friends to his table +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|creationDate|2017-01-29 +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|tag|http://www.semanlink.net/tag/langues +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|tag|http://www.semanlink.net/tag/saudade +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|tag|http://www.semanlink.net/tag/words +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|tag|http://www.semanlink.net/tag/sagesse_du_langage +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|title|BBC - Future - The ‘untranslatable’ emotions you never knew you had +http://www.bbc.com/future/story/20170126-the-untranslatable-emotions-you-never-knew-you-had|creationTime|2017-01-29T14:53:05Z +http://www.w3.org/community/hydra/|creationDate|2014-11-26 +http://www.w3.org/community/hydra/|tag|http://www.semanlink.net/tag/hydra +http://www.w3.org/community/hydra/|title|Hydra Community Group @ W3C +http://www.w3.org/community/hydra/|creationTime|2014-11-26T13:10:17Z +http://www.nytimes.com/2014/01/26/opinion/sunday/what-drives-success.html?_r=1|creationDate|2014-02-18 +http://www.nytimes.com/2014/01/26/opinion/sunday/what-drives-success.html?_r=1|comment|Why some cultural groups are more successful +http://www.nytimes.com/2014/01/26/opinion/sunday/what-drives-success.html?_r=1|title|What Drives Success? - NYTimes.com +http://www.nytimes.com/2014/01/26/opinion/sunday/what-drives-success.html?_r=1|creationTime|2014-02-18T01:16:53Z +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|creationDate|2016-07-11 +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|tag|http://www.semanlink.net/tag/obama +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|tag|http://www.semanlink.net/tag/war_on_drugs +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|tag|http://www.semanlink.net/tag/prison +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|comment|On July 13, 2015, President Obama commuted the prison sentences of 46 nonviolent drug offenders. Here’s what their lives are like now. +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|title|Obama sent these people home from prison early. Now what? The Washington Post +http://www.washingtonpost.com/sf/opinions/wp/2016/07/08/one-year-out/?tid=sm_tw|creationTime|2016-07-11T18:58:50Z +http://paigrain.debatpublic.net/?p=6827|creationDate|2013-03-24 +http://paigrain.debatpublic.net/?p=6827|tag|http://www.semanlink.net/tag/corruption +http://paigrain.debatpublic.net/?p=6827|tag|http://www.semanlink.net/tag/loi_sur_les_oeuvres_indisponibles +http://paigrain.debatpublic.net/?p=6827|title|Corruption des institutions – Communs / Commons +http://paigrain.debatpublic.net/?p=6827|creationTime|2013-03-24T10:17:32Z +http://www.cortical.io/|creationDate|2017-07-10 +http://www.cortical.io/|tag|http://www.semanlink.net/tag/cortical_io +http://www.cortical.io/|comment|"""semantic fingerprint"" representation of words +" +http://www.cortical.io/|title|Cortical.io - Fast, precise, intuitive NLP +http://www.cortical.io/|creationTime|2017-07-10T14:57:06Z +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|creationDate|2011-06-28 +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|tag|http://www.semanlink.net/tag/javascript +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|tag|http://www.semanlink.net/tag/howto +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|tag|http://www.semanlink.net/tag/nathan_rixham +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|tag|http://www.semanlink.net/tag/cross_origin_resource_sharing +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|title|Please allow JS access to Ontologies and LOD +http://lists.w3.org/Archives/Public/semantic-web/2010Oct/0226.html|creationTime|2011-06-28T16:12:13Z +http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961|creationDate|2013-09-21 +http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961|tag|http://www.semanlink.net/tag/the_guardian +http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961|tag|http://www.semanlink.net/tag/bombe_atomique +http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961|title|US nearly detonated atomic bomb over North Carolina – secret document World news The Guardian +http://www.theguardian.com/world/2013/sep/20/usaf-atomic-bomb-north-carolina-1961|creationTime|2013-09-21T15:02:44Z +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|creationDate|2015-11-08 +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|tag|http://www.semanlink.net/tag/survey +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|title|Understanding Convolutional Neural Networks for NLP WildML +http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/|creationTime|2015-11-08T11:53:24Z +http://www.mkbergman.com/?p=326|creationDate|2007-01-24 +http://www.mkbergman.com/?p=326|tag|http://www.semanlink.net/tag/simile_exhibit +http://www.mkbergman.com/?p=326|tag|http://www.semanlink.net/tag/howto +http://www.mkbergman.com/?p=326|title|AI3: Converting ‘Sweet Tools’ to an Exhibit +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|creationDate|2013-04-17 +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|tag|http://www.semanlink.net/tag/semantic_wiki +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|tag|http://www.semanlink.net/tag/linked_data_collaborative_editing +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|comment|"The creation of language resources is a time-consuming process requiring the efforts of many people. The use of resources col- +laboratively created by non-linguists can potentially ameliorate this situation. However, such resources often contain more errors +compared to resources created by experts. For the particular case of lexica, we analyse the case of Wiktionary, a resource created +along wiki principles and argue that through the use of a principled lexicon model, namely lemon, the resulting data could be +better understandable to machines. We then present a platform called lemon source that supports the creation of linked lexical data +along the lemon model. This tool builds on the concept of a semantic wiki to enable collaborative editing of the resources by many +users concurrently. In this paper, we describe the model, the tool and present an evaluation of its usability based on a small group of users." +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|title|Collaborative semantic editing of linked data lexica +http://www.lrec-conf.org/proceedings/lrec2012/pdf/544_Paper.pdf|creationTime|2013-04-17T10:13:59Z +http://data.blog.lemonde.fr/2015/10/23/le-fact-checking-peut-il-sautomatiser/|creationDate|2015-10-31 +http://data.blog.lemonde.fr/2015/10/23/le-fact-checking-peut-il-sautomatiser/|tag|http://www.semanlink.net/tag/fact_checking +http://data.blog.lemonde.fr/2015/10/23/le-fact-checking-peut-il-sautomatiser/|title|Le fact-checking peut-il s’automatiser ? J'ai du bon data +http://data.blog.lemonde.fr/2015/10/23/le-fact-checking-peut-il-sautomatiser/|creationTime|2015-10-31T10:16:58Z +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|creationDate|2017-07-11 +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|tag|http://www.semanlink.net/tag/stanford_pos_tagger +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|tag|http://www.semanlink.net/tag/nltk +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|comment|A module for interfacing with the Stanford taggers. +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|title|nltk.tag.stanford module — NLTK documentation +http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford|creationTime|2017-07-11T15:43:03Z +http://www.webdeveloperjuice.com/2011/09/28/7-beautiful-web-based-timeline-using-javascript-and-css/|creationDate|2012-11-28 +http://www.webdeveloperjuice.com/2011/09/28/7-beautiful-web-based-timeline-using-javascript-and-css/|tag|http://www.semanlink.net/tag/timeline +http://www.webdeveloperjuice.com/2011/09/28/7-beautiful-web-based-timeline-using-javascript-and-css/|title|7 Beautiful Web Based Timeline Using Javascript and CSS +http://www.webdeveloperjuice.com/2011/09/28/7-beautiful-web-based-timeline-using-javascript-and-css/|creationTime|2012-11-28T00:17:46Z +https://www.eff.org/deeplinks/2013/11/drm-cars-will-drive-consumers-crazy|creationDate|2013-11-18 +https://www.eff.org/deeplinks/2013/11/drm-cars-will-drive-consumers-crazy|tag|http://www.semanlink.net/tag/drm +https://www.eff.org/deeplinks/2013/11/drm-cars-will-drive-consumers-crazy|title|DRM in Cars Will Drive Consumers Crazy Electronic Frontier Foundation +https://www.eff.org/deeplinks/2013/11/drm-cars-will-drive-consumers-crazy|creationTime|2013-11-18T10:57:51Z +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|creationDate|2008-05-08 +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|tag|http://www.semanlink.net/tag/dataportability +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|tag|http://www.semanlink.net/tag/address_book +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|tag|http://www.semanlink.net/tag/social_software +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|tag|http://www.semanlink.net/tag/rdf_and_social_networks +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|title|BOF-5911: Building a Web 3.0 Address Book +http://blogs.sun.com/bblfish/entry/bof_5911_building_a_web|creationTime|2008-05-08T03:32:50Z +https://github.s3.amazonaws.com/media/progit.en.pdf|creationDate|2013-09-08 +https://github.s3.amazonaws.com/media/progit.en.pdf|tag|http://www.semanlink.net/tag/git +https://github.s3.amazonaws.com/media/progit.en.pdf|comment|Git documentation +https://github.s3.amazonaws.com/media/progit.en.pdf|title|Pro Git +https://github.s3.amazonaws.com/media/progit.en.pdf|creationTime|2013-09-08T14:18:44Z +http://www.lespetitescases.net/semweblabs/drupalModules.php|creationDate|2011-09-15 +http://www.lespetitescases.net/semweblabs/drupalModules.php|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/semweblabs/drupalModules.php|tag|http://www.semanlink.net/tag/drupal_rdf +http://www.lespetitescases.net/semweblabs/drupalModules.php|title|Modules pour Drupal +http://www.lespetitescases.net/semweblabs/drupalModules.php|creationTime|2011-09-15T10:40:07Z +http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/|creationDate|2009-01-16 +http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/|tag|http://www.semanlink.net/tag/ivan_herman +http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/|title|A different usage of RDFa… « Ivan’s private site +http://ivan-herman.name/2009/01/14/a-different-usage-of-rdfa/|creationTime|2009-01-16T21:43:54Z +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|creationDate|2013-08-21 +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|tag|http://www.semanlink.net/tag/guerre +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|tag|http://www.semanlink.net/tag/drones +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|title|« Théorie du drone », de Grégoire Chamayou : la guerre est finie +http://www.christian-faure.net/2013/05/10/theorie-du-drone-de-gregoire-chamayou-la-guerre-est-finie|creationTime|2013-08-21T01:12:46Z +https://github.com/dmlc/dgl|creationDate|2018-12-12 +https://github.com/dmlc/dgl|tag|http://www.semanlink.net/tag/graph_neural_networks +https://github.com/dmlc/dgl|tag|http://www.semanlink.net/tag/github_project +https://github.com/dmlc/dgl|comment|DGL is a Python package that interfaces between existing tensor libraries and data being expressed as graphs. It makes implementing graph neural networks (including Graph Convolution Networks, TreeLSTM, and many others) easy while maintaining high computation efficiency +https://github.com/dmlc/dgl|title|dmlc/dgl: Python package built to ease deep learning on graph, on top of existing DL frameworks. +https://github.com/dmlc/dgl|creationTime|2018-12-12T22:41:46Z +http://edition.cnn.com/2008/POLITICS/11/04/obama.transcript/index.html?iref=mpstoryview|creationDate|2008-11-05 +http://edition.cnn.com/2008/POLITICS/11/04/obama.transcript/index.html?iref=mpstoryview|tag|http://www.semanlink.net/tag/obama +http://edition.cnn.com/2008/POLITICS/11/04/obama.transcript/index.html?iref=mpstoryview|title|Transcript: 'This is your victory,' says Obama - CNN.com +http://edition.cnn.com/2008/POLITICS/11/04/obama.transcript/index.html?iref=mpstoryview|creationTime|2008-11-05T13:17:15Z +http://maps.google.fr/maps?f=q&source=s_q&hl=fr&geocode=&q=niamey&sll=46.75984,1.738281&sspn=14.077708,19.599609&ie=UTF8&hq=&hnear=Niamey,+Niger&ll=13.590133,2.100331&spn=0.002438,0.002393&t=h&z=19|creationDate|2011-01-13 +http://maps.google.fr/maps?f=q&source=s_q&hl=fr&geocode=&q=niamey&sll=46.75984,1.738281&sspn=14.077708,19.599609&ie=UTF8&hq=&hnear=Niamey,+Niger&ll=13.590133,2.100331&spn=0.002438,0.002393&t=h&z=19|tag|http://www.semanlink.net/tag/affaires_de_gado_a_niamey +http://maps.google.fr/maps?f=q&source=s_q&hl=fr&geocode=&q=niamey&sll=46.75984,1.738281&sspn=14.077708,19.599609&ie=UTF8&hq=&hnear=Niamey,+Niger&ll=13.590133,2.100331&spn=0.002438,0.002393&t=h&z=19|title|Gado, parcelles Tchangarey 8714 pqrs +http://maps.google.fr/maps?f=q&source=s_q&hl=fr&geocode=&q=niamey&sll=46.75984,1.738281&sspn=14.077708,19.599609&ie=UTF8&hq=&hnear=Niamey,+Niger&ll=13.590133,2.100331&spn=0.002438,0.002393&t=h&z=19|creationTime|2011-01-13T22:15:49Z +http://research.talis.com/2005/erdf/wiki/Main/RdfInHtml|creationDate|2006-05-29 +http://research.talis.com/2005/erdf/wiki/Main/RdfInHtml|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://research.talis.com/2005/erdf/wiki/Main/RdfInHtml|comment|This document describes how a subset of RDF can be embedded into XHTML or HTML by using common idioms and attributes. No new elements or attributes have been invented and the usages of the HTML attributes are within normal bounds. This scheme is designed to work with CSS and other HTML support technologies. +http://research.talis.com/2005/erdf/wiki/Main/RdfInHtml|title|Embedded RDF Wiki :: Talis +http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl|creationDate|2006-05-02 +http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl|tag|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl|tag|http://www.semanlink.net/tag/second_life +http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl|comment|A journey into a place in cyberspace where thousands of people have imaginary lives. Some even make a good living. Big advertisers are taking notice +http://www.businessweek.com/print/magazine/content/06_18/b3982001.htm?chan=gl|title|My Virtual Life +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|creationDate|2007-08-01 +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|tag|http://www.semanlink.net/tag/semantic_web +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|tag|http://www.semanlink.net/tag/multimedia +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|comment|This document gives an overview on the state-of-the-art of multimedia metadata formats. Initially, practical relevant vocabularies for developers of Semantic Web applications are listed according to their modality scope. In the second part of this document, the focus is set on the integration of the multimedia vocabularies into the Semantic Web, that is to say, formal representations of the vocabularies are discussed. +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|title|Multimedia Vocabularies on the Semantic Web +http://www.w3.org/2005/Incubator/mmsem/XGR-vocabularies/|creationTime|2007-08-01T17:00:39Z +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|creationDate|2017-04-12 +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|tag|http://www.semanlink.net/tag/cerveau +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|tag|http://www.semanlink.net/tag/evolution +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|comment|the evolution of our brain from its origin in ancient seas to its dramatic expansion in one ape +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|title|A brief history of the brain New Scientist +https://www.newscientist.com/article/mg21128311.800-a-brief-history-of-the-brain|creationTime|2017-04-12T21:24:01Z +https://watson-api-explorer.mybluemix.net/|creationDate|2017-06-06 +https://watson-api-explorer.mybluemix.net/|tag|http://www.semanlink.net/tag/i_b_m_s_watson +https://watson-api-explorer.mybluemix.net/|tag|http://www.semanlink.net/tag/api +https://watson-api-explorer.mybluemix.net/|title|Watson API Explorer +https://watson-api-explorer.mybluemix.net/|creationTime|2017-06-06T11:57:28Z +http://www.fofomag.com/index.asp?affiche=news_Display.asp&ArticleID=920|creationDate|2008-05-18 +http://www.fofomag.com/index.asp?affiche=news_Display.asp&ArticleID=920|tag|http://www.semanlink.net/tag/moussa_poussi +http://www.fofomag.com/index.asp?affiche=news_Display.asp&ArticleID=920|title|Moussa Poussi hospitalisé +http://www.fofomag.com/index.asp?affiche=news_Display.asp&ArticleID=920|creationTime|2008-05-18T17:16:20Z +https://github.com/keon/awesome-nlp|creationDate|2018-05-22 +https://github.com/keon/awesome-nlp|tag|http://www.semanlink.net/tag/links +https://github.com/keon/awesome-nlp|tag|http://www.semanlink.net/tag/nlp +https://github.com/keon/awesome-nlp|comment|"A curated list of resources dedicated to NLP +" +https://github.com/keon/awesome-nlp|title|keon/awesome-nlp: A curated list of resources dedicated to Natural Language Processing (NLP) +https://github.com/keon/awesome-nlp|creationTime|2018-05-22T23:54:13Z +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|creationDate|2007-12-27 +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|tag|http://www.semanlink.net/tag/nasca +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|comment|"On a barren desert in South America is one of the greatest archaeological wonders of the world. Etched in the surface of the pampa are hundreds of straight lines, geometric shapes and the images of animals and birds. These are the Nasca lines, built by the Nasca people, but why they were created has defied explanation. Now archaeologists have begun to uncover the lost world of the line builders. + +" +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|title|BBC - Science & Nature - The Lost City of Nasca +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|creationTime|2007-12-27T22:51:11Z +http://www.bbc.co.uk/science/horizon/1999/nasca_script.shtml|source|BBC +http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold|creationDate|2014-01-31 +http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold|tag|http://www.semanlink.net/tag/arnaque +http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold|tag|http://www.semanlink.net/tag/orange +http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold|title|Arnaque à Blinkogold / orange +http://www.commentcamarche.net/forum/affich-23600010-arnaque-a-blinkogold|creationTime|2014-01-31T09:37:23Z +http://www.semanticscripting.org/SFSW2008/|creationDate|2008-01-23 +http://www.semanticscripting.org/SFSW2008/|tag|http://www.semanlink.net/tag/workshop +http://www.semanticscripting.org/SFSW2008/|title|Scripting for the Semantic Web (SFSW2008) +http://www.semanticscripting.org/SFSW2008/|creationTime|2008-01-23T23:09:59Z +https://www.continuum.io/anaconda-overview|creationDate|2017-05-28 +https://www.continuum.io/anaconda-overview|tag|http://www.semanlink.net/tag/anaconda +https://www.continuum.io/anaconda-overview|title|Anaconda Continuum +https://www.continuum.io/anaconda-overview|creationTime|2017-05-28T18:55:56Z +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|creationDate|2018-01-04 +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|tag|http://www.semanlink.net/tag/concept_learning +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|tag|http://www.semanlink.net/tag/one_shot_generalization +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|comment|> People learning new concepts can often generalize successfully from just a single example, yet machine learning algorithms typically require tens or hundreds of examples to perform with similar accuracy... We present a computational model that captures these human learning abilities for a large class of simple visual concepts: handwritten characters from the world’s alphabets +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|title|Human-level concept learning through probabilistic program induction (2015) +https://cims.nyu.edu/~brenden/LakeEtAl2015Science.pdf|creationTime|2018-01-04T14:56:17Z +http://scot-project.org/?page_id=2|creationDate|2007-08-24 +http://scot-project.org/?page_id=2|tag|http://www.semanlink.net/tag/tagging +http://scot-project.org/?page_id=2|tag|http://www.semanlink.net/tag/social_networks +http://scot-project.org/?page_id=2|tag|http://www.semanlink.net/tag/skos +http://scot-project.org/?page_id=2|comment|The SCOT(Social Semantic Cloud Of Tags) ontology is to semantically represent the structure and semantics of a collection of tags and to represent social networks among users based on the tags. +http://scot-project.org/?page_id=2|title|SCOT:Let’s Share Tags! » About SCOT +http://scot-project.org/?page_id=2|creationTime|2007-08-24T00:12:32Z +https://en.wikipedia.org/wiki/Zanj_Rebellion|creationDate|2017-05-18 +https://en.wikipedia.org/wiki/Zanj_Rebellion|tag|http://www.semanlink.net/tag/esclavage +https://en.wikipedia.org/wiki/Zanj_Rebellion|tag|http://www.semanlink.net/tag/irak +https://en.wikipedia.org/wiki/Zanj_Rebellion|tag|http://www.semanlink.net/tag/histoire +https://en.wikipedia.org/wiki/Zanj_Rebellion|tag|http://www.semanlink.net/tag/revolte +https://en.wikipedia.org/wiki/Zanj_Rebellion|title|Zanj Rebellion - Wikipedia +https://en.wikipedia.org/wiki/Zanj_Rebellion|creationTime|2017-05-18T22:24:19Z +http://www.zdnet.com/blog/networking/freedom-box-freeing-the-internet-one-server-at-a-time/698|creationDate|2011-09-09 +http://www.zdnet.com/blog/networking/freedom-box-freeing-the-internet-one-server-at-a-time/698|tag|http://www.semanlink.net/tag/freedom_box +http://www.zdnet.com/blog/networking/freedom-box-freeing-the-internet-one-server-at-a-time/698|title|Freedom Box: Freeing the Internet one Server at a time ZDNet +http://www.zdnet.com/blog/networking/freedom-box-freeing-the-internet-one-server-at-a-time/698|creationTime|2011-09-09T10:57:23Z +https://blog.usievents.com/interview-technique-a-poison-remede/|creationDate|2017-02-05 +https://blog.usievents.com/interview-technique-a-poison-remede/|tag|http://www.semanlink.net/tag/proletarisation +https://blog.usievents.com/interview-technique-a-poison-remede/|tag|http://www.semanlink.net/tag/christian_faure +https://blog.usievents.com/interview-technique-a-poison-remede/|tag|http://www.semanlink.net/tag/citation +https://blog.usievents.com/interview-technique-a-poison-remede/|comment|"“La prolétarisation est ce qui consiste à priver un sujet (producteur, consommateur, concepteur) de ses savoirs (savoir-faire, savoir-vivre, savoir concevoir, savoir décider)""
+Savoir / saveur
+Adopter plutôt que s'adapter
+Pratiques / usages
+Contre la prolétarisation, l'amateur
+Amateur / spéculateur. Il n'y a pas plus sérieux, il n'y a pas mieux fait qu'un travail d'amateur. Professionnel, ça veut dire à vendre, c'est tout.
+ +" +https://blog.usievents.com/interview-technique-a-poison-remede/|title|"Interview : ""La technique est à la fois notre poison et notre remède"" - USI Events - Blog" +https://blog.usievents.com/interview-technique-a-poison-remede/|creationTime|2017-02-05T02:06:36Z +http://www.tatuagemdaboa.com.br/|creationDate|2008-05-22 +http://www.tatuagemdaboa.com.br/|tag|http://www.semanlink.net/tag/rigolo +http://www.tatuagemdaboa.com.br/|tag|http://www.semanlink.net/tag/bresil +http://www.tatuagemdaboa.com.br/|comment|A Juliana está de tatuagem nova. Veja aqui. +http://www.tatuagemdaboa.com.br/|title|Bar da Boa +http://www.tatuagemdaboa.com.br/|creationTime|2008-05-22T21:26:58Z +https://github.com/FasterXML/jackson-databind/|creationDate|2015-03-10 +https://github.com/FasterXML/jackson-databind/|tag|http://www.semanlink.net/tag/jackson +https://github.com/FasterXML/jackson-databind/|tag|http://www.semanlink.net/tag/github_project +https://github.com/FasterXML/jackson-databind/|comment|The general-purpose data-binding functionality and tree-model for Jackson Data Processor. It builds on core streaming parser/generator package, and uses Jackson Annotations for configuration. +https://github.com/FasterXML/jackson-databind/|title|FasterXML/jackson-databind +https://github.com/FasterXML/jackson-databind/|creationTime|2015-03-10T10:37:47Z +https://wit.ai/blog/2014/12/19/dan-jurafsky-food|creationDate|2015-01-06 +https://wit.ai/blog/2014/12/19/dan-jurafsky-food|tag|http://www.semanlink.net/tag/dan_jurafsky +https://wit.ai/blog/2014/12/19/dan-jurafsky-food|title|The Language of Food (and Dating), by Dan Jurafsky +https://wit.ai/blog/2014/12/19/dan-jurafsky-food|creationTime|2015-01-06T11:28:59Z +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|creationDate|2012-02-11 +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|tag|http://www.semanlink.net/tag/rest +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|tag|http://www.semanlink.net/tag/benjamin_nowack +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|tag|http://www.semanlink.net/tag/linked_data_dev +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|title|From Idea to Web - Creating Linked Data Apps +http://www.slideshare.net/bengee/bnowack-from-ideatoweb|creationTime|2012-02-11T10:16:27Z +http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423|creationDate|2016-04-09 +http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423|tag|http://www.semanlink.net/tag/json_ld +http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423|title|semantic web - In JSON-LD, is it possible to extend a context? - Stack Overflow +http://stackoverflow.com/questions/26232346/in-json-ld-is-it-possible-to-extend-a-context/26236423#26236423|creationTime|2016-04-09T11:32:57Z +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|creationDate|2018-01-03 +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|tag|http://www.semanlink.net/tag/brain_implants +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|tag|http://www.semanlink.net/tag/brain_initiative +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|comment|Last November, researchers from the University of Southern California announced the successful results achieved with brain implants to improve memory +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|title|Brain Implants and the BRAIN Initiative: lights and Shadows - OpenMind +https://www.bbvaopenmind.com/en/brain-implants-and-the-brain-initiative-lights-and-shadows/?utm_source=twitter&utm_medium=techreview&utm_campaign=MITcompany&utm_content=ImplantesBRAIN|creationTime|2018-01-03T00:55:29Z +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|creationDate|2013-05-12 +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|tag|http://www.semanlink.net/tag/nsa +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|tag|http://www.semanlink.net/tag/tips +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|tag|http://www.semanlink.net/tag/google +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|title|Use These Secret NSA Google Search Tips to Become Your Own Spy Agency Threat Level Wired.com +http://www.wired.com/threatlevel/2013/05/nsa-manual-on-hacking-internet/|creationTime|2013-05-12T13:22:14Z +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|creationDate|2017-07-19 +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|tag|http://www.semanlink.net/tag/latent_semantic_analysis +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|tag|http://www.semanlink.net/tag/chris_manning +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|tag|http://www.semanlink.net/tag/synonymy +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|tag|http://www.semanlink.net/tag/lexical_ambiguity +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|comment|"VSM : problem with synonymy and polysemy (eg. synonyms are accorded separate dimensions) + +Could we use the co-occurrences of terms to capture the latent semantic associations of terms and alleviate these problems? + +Concluding remarks: + +- computational cost of the SVD is significant + - biggest obstacle to the widespread adoption to LSI. + - One approach to this obstacle: build the LSI representation on a randomly sampled subset of the documents, following which the remaining documents are ``folded in'' (cf Gensim tutorial ""[Random Projection (used as an option to speed up LSI)](https://radimrehurek.com/gensim/models/rpmodel.html)"") +- As we reduce k, recall tends to increase, as expected. +- **Most surprisingly**, a value of k in the low hundreds can actually increase precision. **This appears to suggest that for a suitable value of *k*, LSI addresses some of the challenges of synonymy**. +- LSI works best in applications where there is little overlap between queries and documents. (--??) + +The experiments also documented some modes where LSI failed to match the effectiveness of more traditional indexes and score computations. + +LSI shares two basic drawbacks of vector space retrieval: + +- no good way of expressing negations +- no way of enforcing Boolean conditions. + +LSI can be viewed as soft clustering by interpreting each dimension of the reduced space as a cluster and the value that a document has on that dimension as its fractional membership in that cluster. + + + + + + + + +" +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|title|"Latent semantic indexing (""Introduction to Information Retrieval"" Manning 2008)" +https://nlp.stanford.edu/IR-book/html/htmledition/latent-semantic-indexing-1.html|creationTime|2017-07-19T09:54:04Z +http://www.technologyreview.com/Infotech/19627/?a=f|creationDate|2007-11-08 +http://www.technologyreview.com/Infotech/19627/?a=f|tag|http://www.semanlink.net/tag/twine +http://www.technologyreview.com/Infotech/19627/?a=f|comment|Radar Networks' free tool provides a smarter way to find information and increase productivity. +http://www.technologyreview.com/Infotech/19627/?a=f|title|Technology Review: Twine +http://www.technologyreview.com/Infotech/19627/?a=f|creationTime|2007-11-08T16:28:50Z +https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb|creationDate|2019-04-03 +https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb|tag|http://www.semanlink.net/tag/spellchecker +https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb|tag|http://www.semanlink.net/tag/acronyms_nlp +https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb|title|A machine learning model to understand fancy abbreviations, trained on Tolkien +https://medium.com/swlh/a-machine-learning-model-to-understand-fancy-abbreviations-trained-on-tolkien-36601b73ecbb|creationTime|2019-04-03T13:13:20Z +http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html|creationDate|2008-08-25 +http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html|tag|http://www.semanlink.net/tag/multnomah_falls +http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html|tag|http://www.semanlink.net/tag/missoula_floods +http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html|title|NOVA Mystery of the Megaflood +http://www.pbs.org/wgbh/nova/megaflood/scab-nf.html|creationTime|2008-08-25T14:12:28Z +http://biopython.org|creationDate|2015-01-04 +http://biopython.org|tag|http://www.semanlink.net/tag/bioinformatics +http://biopython.org|tag|http://www.semanlink.net/tag/python +http://biopython.org|comment|The Biopython Project is an international association of developers of freely available Python tools for computational molecular biology +http://biopython.org|title|Biopython +http://biopython.org|creationTime|2015-01-04T19:36:25Z +http://www.deeplearningbook.org/contents/representation.html|creationDate|2017-12-16 +http://www.deeplearningbook.org/contents/representation.html|tag|http://www.semanlink.net/tag/representation_learning +http://www.deeplearningbook.org/contents/representation.html|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.deeplearningbook.org/contents/representation.html|tag|http://www.semanlink.net/tag/ian_goodfellow +http://www.deeplearningbook.org/contents/representation.html|title|"Representation learning (in ""Deep Learning"", Ian Goodfellow and Yoshua Bengio and Aaron Courville)" +http://www.deeplearningbook.org/contents/representation.html|creationTime|2017-12-16T14:31:43Z +http://www.youtube.com/watch?v=SA9_3cxfHyI|creationDate|2008-05-18 +http://www.youtube.com/watch?v=SA9_3cxfHyI|tag|http://www.semanlink.net/tag/femme +http://www.youtube.com/watch?v=SA9_3cxfHyI|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=SA9_3cxfHyI|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.youtube.com/watch?v=SA9_3cxfHyI|comment|"Sauvez la femme
+C'est la mère de l'Humanité" +http://www.youtube.com/watch?v=SA9_3cxfHyI|title|Adams Junior - La mère de l'Humanité - YouTube +http://www.youtube.com/watch?v=SA9_3cxfHyI|creationTime|2008-05-18T17:33:32Z +https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf|creationDate|2013-09-13 +https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf|tag|http://www.semanlink.net/tag/semanlink +https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf|tag|http://www.semanlink.net/tag/github_project +https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf|title|Old semanlink schema in a github project! +https://github.com/taurenshaman/semantic-web/blob/master/data/semanlink-schema2001.rdf|creationTime|2013-09-13T00:08:24Z +http://www.ldodds.com/blog/archives/000248.html|creationDate|2005-11-04 +http://www.ldodds.com/blog/archives/000248.html|tag|http://www.semanlink.net/tag/amazon_mechanical_turk +http://www.ldodds.com/blog/archives/000248.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000248.html|title|Lost Boy: It's Like the Ultimate Lazy Web +http://science.sciencemag.org/content/324/5923/81|creationDate|2017-10-22 +http://science.sciencemag.org/content/324/5923/81|tag|http://www.semanlink.net/tag/science +http://science.sciencemag.org/content/324/5923/81|tag|http://www.semanlink.net/tag/machine_learning +http://science.sciencemag.org/content/324/5923/81|tag|http://www.semanlink.net/tag/physique +http://science.sciencemag.org/content/324/5923/81|comment|Without any prior knowledge about physics, kinematics, or geometry, the algorithm discovered Hamiltonians, Lagrangians, and other laws of geometric and momentum conservation +http://science.sciencemag.org/content/324/5923/81|title|Distilling Free-Form Natural Laws from Experimental Data Science +http://science.sciencemag.org/content/324/5923/81|creationTime|2017-10-22T13:56:59Z +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|creationDate|2008-01-25 +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|tag|http://www.semanlink.net/tag/sparql +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|title|TechnicaLee Speaking: Why SPARQL? +http://www.thefigtrees.net/lee/blog/2008/01/why_sparql.html|creationTime|2008-01-25T13:10:07Z +http://junit.sourceforge.net/doc/faq/faq.htm|creationDate|2006-07-22 +http://junit.sourceforge.net/doc/faq/faq.htm|tag|http://www.semanlink.net/tag/faq +http://junit.sourceforge.net/doc/faq/faq.htm|tag|http://www.semanlink.net/tag/junit +http://junit.sourceforge.net/doc/faq/faq.htm|title|JUnit FAQ +http://www.calacademy.org/exhibits/xtremelife/life_on_earth.php|creationDate|2006-05-23 +http://www.calacademy.org/exhibits/xtremelife/life_on_earth.php|tag|http://www.semanlink.net/tag/extremophiles +http://www.calacademy.org/exhibits/xtremelife/life_on_earth.php|title|Extreme Life on Earth (California Academy of Sciences - Natural History) +http://www.pbs.org/cringely/pulpit/pulpit20050609.html|creationDate|2005-06-10 +http://www.pbs.org/cringely/pulpit/pulpit20050609.html|tag|http://www.semanlink.net/tag/microsoft +http://www.pbs.org/cringely/pulpit/pulpit20050609.html|tag|http://www.semanlink.net/tag/apple_intel +http://www.pbs.org/cringely/pulpit/pulpit20050609.html|comment|Apple's Decision to Use Intel Processors Is Nothing Less Than an Attempt to Dethrone Microsoft. Really. +http://www.pbs.org/cringely/pulpit/pulpit20050609.html|title|PBS I, Cringely . June 9, 2005 - Going for Broke +https://github.com/aneesha/RAKE|creationDate|2017-06-26 +https://github.com/aneesha/RAKE|tag|http://www.semanlink.net/tag/github_project +https://github.com/aneesha/RAKE|tag|http://www.semanlink.net/tag/rake +https://github.com/aneesha/RAKE|tag|http://www.semanlink.net/tag/python_nlp +https://github.com/aneesha/RAKE|title|RAKE: A python implementation of the Rapid Automatic Keyword Extraction +https://github.com/aneesha/RAKE|creationTime|2017-06-26T14:43:49Z +http://msc2010.org/mscwork/|creationDate|2012-02-23 +http://msc2010.org/mscwork/|tag|http://www.semanlink.net/tag/mathematiques +http://msc2010.org/mscwork/|tag|http://www.semanlink.net/tag/skos +http://msc2010.org/mscwork/|comment|The Mathematics Subject Classification (MSC) is being converted into a SKOS form incorporating much other related material. +http://msc2010.org/mscwork/|title|Mathematics Subject Classification MSC2010 +http://msc2010.org/mscwork/|creationTime|2012-02-23T21:55:24Z +http://www.wired.com/2015/07/pluto-new-horizons-2/|creationDate|2015-07-11 +http://www.wired.com/2015/07/pluto-new-horizons-2/|tag|http://www.semanlink.net/tag/new_horizons +http://www.wired.com/2015/07/pluto-new-horizons-2/|title|New Horizons' Long, Dark, Amazing Journey to Pluto...And Beyond WIRED +http://www.wired.com/2015/07/pluto-new-horizons-2/|creationTime|2015-07-11T15:23:33Z +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|creationDate|2014-04-05 +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|tag|http://www.semanlink.net/tag/genocide_rwandais +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|tag|http://www.semanlink.net/tag/genocide +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|comment|" « Quelques-uns l’ont voulu, d’autres l’ont fait, tous l’ont laissé faire. » (""A shocking crime was committed on the unscrupulous initiative of few individuals, with the blessing of more, and amid the passive acquiescence of all"") (Tacite)
+« S’il y a eu un génocide, il peut y en avoir un autre, puisque la cause est toujours là et qu’on ne la connaît pas. » (rapporté par Jean Hatzfeld) +" +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|title|Comment devient-on un bourreau ? +http://www.lemonde.fr/idees/article/2014/04/03/comment-devient-on-un-bourreau_4395245_3232.html|creationTime|2014-04-05T18:37:31Z +http://www.lemonde.fr/culture/article/2013/02/07/les-manuscrits-sauves-de-tombouctou_1828672_3246.html|creationDate|2013-02-18 +http://www.lemonde.fr/culture/article/2013/02/07/les-manuscrits-sauves-de-tombouctou_1828672_3246.html|tag|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.lemonde.fr/culture/article/2013/02/07/les-manuscrits-sauves-de-tombouctou_1828672_3246.html|title|Les manuscrits sauvés de Tombouctou +http://www.lemonde.fr/culture/article/2013/02/07/les-manuscrits-sauves-de-tombouctou_1828672_3246.html|creationTime|2013-02-18T10:55:19Z +http://www.w3.org/2001/tag/awwsw/issue57/20110625/#id35291|creationDate|2011-06-26 +http://www.w3.org/2001/tag/awwsw/issue57/20110625/#id35291|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/2001/tag/awwsw/issue57/20110625/#id35291|title|Providing and discovering definitions of URIs +http://www.w3.org/2001/tag/awwsw/issue57/20110625/#id35291|creationTime|2011-06-26T14:38:17Z +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|creationDate|2012-08-27 +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|tag|http://www.semanlink.net/tag/access_control +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|tag|http://www.semanlink.net/tag/ajax +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|title|javascript - XmlHttpRequest error: Origin null is not allowed by Access-Control-Allow-Origin - Stack Overflow +http://stackoverflow.com/questions/3595515/xmlhttprequest-error-origin-null-is-not-allowed-by-access-control-allow-origin|creationTime|2012-08-27T18:56:02Z +http://www.hyperdata.it/|creationDate|2015-02-09 +http://www.hyperdata.it/|tag|http://www.semanlink.net/tag/danny_ayers +http://www.hyperdata.it/|title|FooWiki - danja +http://www.hyperdata.it/|creationTime|2015-02-09T09:46:34Z +http://www.kryogenix.org/code/browser/jses/|creationDate|2005-05-25 +http://www.kryogenix.org/code/browser/jses/|tag|http://www.semanlink.net/tag/javascript +http://www.kryogenix.org/code/browser/jses/|tag|http://www.semanlink.net/tag/howto +http://www.kryogenix.org/code/browser/jses/|title|JavaScript Event Sheets +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|creationDate|2009-09-21 +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|tag|http://www.semanlink.net/tag/link_to_me +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|tag|http://www.semanlink.net/tag/google +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|comment|Premier sur la liste ! (2009-09-21) +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|title|Linking enterprise data - Recherche Google +http://www.google.fr/search?hl=fr&source=hp&q=Linking+enterprise+data&btnG=Recherche+Google&meta=&aq=f&oq=|creationTime|2009-09-21T22:53:01Z +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEM7ZTULWFE_0.html|creationDate|2005-11-30 +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEM7ZTULWFE_0.html|tag|http://www.semanlink.net/tag/eau_de_mars +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEM7ZTULWFE_0.html|tag|http://www.semanlink.net/tag/mars_express +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEM7ZTULWFE_0.html|title|ESA - Results from Mars Express - Buried craters and underground ice - Mars Express uncovers depths of Mars +http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/|creationDate|2011-01-17 +http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/|tag|http://www.semanlink.net/tag/anthropocene +http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/|title|Bienvenue dans une nouvelle ère géologique, l’anthropocène - Eco(lo) - Blog LeMonde.fr +http://ecologie.blog.lemonde.fr/2011/01/14/bienvenue-dans-une-nouvelle-ere-geologique-lanthropocene/|creationTime|2011-01-17T21:47:19Z +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|creationDate|2018-03-13 +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|tag|http://www.semanlink.net/tag/antarctique +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|tag|http://www.semanlink.net/tag/ressources_halieutiques +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|tag|http://www.semanlink.net/tag/peche +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|tag|http://www.semanlink.net/tag/krill +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|tag|http://www.semanlink.net/tag/greenpeace +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|title|Greenpeace alerte sur le boom de la pêche au krill en Antarctique +http://www.lemonde.fr/planete/article/2018/03/13/le-krill-un-petit-crustace-tres-convoite_5270007_3244.html|creationTime|2018-03-13T14:11:33Z +http://code.google.com/p/backplanejs/wiki/UsingRdfa|creationDate|2012-03-19 +http://code.google.com/p/backplanejs/wiki/UsingRdfa|tag|http://www.semanlink.net/tag/rdfa +http://code.google.com/p/backplanejs/wiki/UsingRdfa|tag|http://www.semanlink.net/tag/backplanejs +http://code.google.com/p/backplanejs/wiki/UsingRdfa|title|backplanejs: A basic introduction to using the RDFa Parser in your web pages +http://code.google.com/p/backplanejs/wiki/UsingRdfa|creationTime|2012-03-19T22:48:35Z +http://www.bnode.org/archives2/47|creationDate|2005-11-16 +http://www.bnode.org/archives2/47|tag|http://www.semanlink.net/tag/sparql +http://www.bnode.org/archives2/47|tag|http://www.semanlink.net/tag/triplestore +http://www.bnode.org/archives2/47|tag|http://www.semanlink.net/tag/rdf_performance_issues +http://www.bnode.org/archives2/47|title|Quad store performance issues +http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w|creationDate|2007-07-28 +http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w|tag|http://www.semanlink.net/tag/rwanda +http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w|tag|http://www.semanlink.net/tag/internet_en_afrique +http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w|title|Africa, Offline: Waiting for the Web - New York Times +http://www.nytimes.com/2007/07/22/business/yourmoney/22rwanda.html?ei=5088&en=83f0d24da11aabd1&ex=1342756800&adxnnl=1&partner=rssnyt&emc=rss&adxnnlx=1185641013-NY6K9QLvEmOIlTll3V9Y1w|creationTime|2007-07-28T18:46:58Z +http://www.arte.tv/fr/semaine/244,broadcastingNum=1232779,day=6,week=2,year=2011.html|creationDate|2011-01-11 +http://www.arte.tv/fr/semaine/244,broadcastingNum=1232779,day=6,week=2,year=2011.html|tag|http://www.semanlink.net/tag/la_ronde_de_nuit +http://www.arte.tv/fr/semaine/244,broadcastingNum=1232779,day=6,week=2,year=2011.html|title|La Ronde de Nuit, secrets d'un tableau - Peter Greenaway +http://www.arte.tv/fr/semaine/244,broadcastingNum=1232779,day=6,week=2,year=2011.html|creationTime|2011-01-11T01:04:13Z +http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/|creationDate|2017-04-23 +http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/|tag|http://www.semanlink.net/tag/art +http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/|tag|http://www.semanlink.net/tag/insolite +http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/|title|Incredible Optical Illusion Body Painting by Trina Merry – Fubiz Media +http://www.fubiz.net/2017/03/29/incredible-optical-illusion-body-painting-by-trina-merry/|creationTime|2017-04-23T13:34:59Z +http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying|creationDate|2013-09-06 +http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying|tag|http://www.semanlink.net/tag/the_guardian +http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying|title|The US government has betrayed the internet. We need to take it back Bruce Schneier The Guardian +http://www.theguardian.com/commentisfree/2013/sep/05/government-betrayed-internet-nsa-spying|creationTime|2013-09-06T21:44:27Z +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html|creationDate|2017-07-19 +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html|tag|http://www.semanlink.net/tag/text_similarity +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html|tag|http://www.semanlink.net/tag/elasticsearch +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html|title|Similarity module Elasticsearch Reference +https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-similarity.html|creationTime|2017-07-19T14:38:13Z +https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf|creationDate|2018-04-08 +https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf|tag|http://www.semanlink.net/tag/andy_seaborne +https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf|tag|http://www.semanlink.net/tag/rdf_and_property_graphs +https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf|title|Two graph data models : RDF and Property Graphs +https://www.slideshare.net/andyseaborne/201602-graphs-pgrdf|creationTime|2018-04-08T12:45:59Z +http://en.wikipedia.org/wiki/War/Dance|creationDate|2009-01-13 +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/enfants +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/creve_coeur +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/ouganda +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/musique +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/documentaire +http://en.wikipedia.org/wiki/War/Dance|tag|http://www.semanlink.net/tag/guerre_civile +http://en.wikipedia.org/wiki/War/Dance|comment|"C'est la brousse ici : ce n'est pas prudent de pleurer aussi fort. +" +http://en.wikipedia.org/wiki/War/Dance|title|War Dance +http://en.wikipedia.org/wiki/War/Dance|creationTime|2009-01-13T01:21:17Z +http://ontologies.makolab.com/uco/ns.html|creationDate|2012-12-13 +http://ontologies.makolab.com/uco/ns.html|tag|http://www.semanlink.net/tag/makolab +http://ontologies.makolab.com/uco/ns.html|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://ontologies.makolab.com/uco/ns.html|title|Used Cars Ontology Language Reference +http://ontologies.makolab.com/uco/ns.html|creationTime|2012-12-13T11:38:22Z +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|creationDate|2013-03-07 +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|tag|http://www.semanlink.net/tag/immune_system +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|tag|http://www.semanlink.net/tag/bacteries +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|tag|http://www.semanlink.net/tag/virus +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|title|Un virus prend la bactérie du choléra à son propre piège Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/03/06/un-virus-prend-la-bacterie-du-cholera-a-son-propre-piege/|creationTime|2013-03-07T00:49:03Z +https://class.coursera.org/datasci-001/wiki/view?page=syllabus|creationDate|2013-06-16 +https://class.coursera.org/datasci-001/wiki/view?page=syllabus|tag|http://www.semanlink.net/tag/coursera_introduction_to_data_science +https://class.coursera.org/datasci-001/wiki/view?page=syllabus|title|Syllabus Introduction to Data Science +https://class.coursera.org/datasci-001/wiki/view?page=syllabus|creationTime|2013-06-16T10:44:31Z +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D|creationDate|2007-10-13 +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D|tag|http://www.semanlink.net/tag/wikipedia_page_to_concept +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D|comment|"SELECT ?s ?p +WHERE { + { ?s ?p } +} + + + +" +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D|title|SPARQL query to find the dbPedia concept corresponding to a wikipedia page +http://dbpedia.org/snorql/?query=SELECT+%3Fsubject+%3Fp%0D%0AWHERE+%7B%0D%0A++%7B+%3Fsubject+%3Fp+%3Chttp%3A%2F%2Fde.wikipedia.org%2Fwiki%2FBerlin%3E+%7D%0D%0A%7D|creationTime|2007-10-13T00:28:42Z +http://neurosciencenews.com/light-crispr-cas9-4917/|creationDate|2016-08-27 +http://neurosciencenews.com/light-crispr-cas9-4917/|tag|http://www.semanlink.net/tag/genome_editing +http://neurosciencenews.com/light-crispr-cas9-4917/|tag|http://www.semanlink.net/tag/crispr_cas9 +http://neurosciencenews.com/light-crispr-cas9-4917/|title|Using Light to Control Genome Editing – Neuroscience News +http://neurosciencenews.com/light-crispr-cas9-4917/|creationTime|2016-08-27T10:22:40Z +http://bblfish.net/coop/|creationDate|2015-10-18 +http://bblfish.net/coop/|tag|http://www.semanlink.net/tag/henry_story +http://bblfish.net/coop/|title|co-operating.systems +http://bblfish.net/coop/|creationTime|2015-10-18T19:51:16Z +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|creationDate|2014-06-18 +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|tag|http://www.semanlink.net/tag/named_entity_recognition +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|tag|http://www.semanlink.net/tag/nlp_sem_web +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|tag|http://www.semanlink.net/tag/orange +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|tag|http://www.semanlink.net/tag/part_of_speech_tagging +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|title|Extraction de la semantique +http://fr.slideshare.net/julienplu/extraction-de-lasemantique|creationTime|2014-06-18T09:29:38Z +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|creationDate|2012-04-20 +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|tag|http://www.semanlink.net/tag/data_web +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|tag|http://www.semanlink.net/tag/linked_data +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|tag|http://www.semanlink.net/tag/workshop +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|tag|http://www.semanlink.net/tag/www_2012 +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|title|Linked Data on the Web Workshop at WWW 2012 - semanticweb.com +http://semanticweb.com/linked-data-on-the-web-workshop-at-www-2012_b28328|creationTime|2012-04-20T01:26:08Z +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|creationDate|2009-07-21 +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|tag|http://www.semanlink.net/tag/immigration +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|tag|http://www.semanlink.net/tag/liberte_egalite_fraternite +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|title|Et si on rentrait tous chez nous ? Pensées d’une immigrée - Opinions - Le Monde.fr +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|creationTime|2009-07-21T18:39:10Z +http://www.lemonde.fr/opinions/chronique/2009/07/20/et-si-on-rentrait-tous-chez-nous-pensees-d-une-immigree_1220598_3232.html|source|Le Monde +http://www.margueritte.fr/bloc/?p=496|creationDate|2016-06-09 +http://www.margueritte.fr/bloc/?p=496|tag|http://www.semanlink.net/tag/courtadon +http://www.margueritte.fr/bloc/?p=496|title|Pierre de Lave au Palais Royal, Paris BLOC +http://www.margueritte.fr/bloc/?p=496|creationTime|2016-06-09T22:41:31Z +http://www.youtube.com/watch?v=mXlyDwywq3Q|creationDate|2013-12-19 +http://www.youtube.com/watch?v=mXlyDwywq3Q|tag|http://www.semanlink.net/tag/chanson +http://www.youtube.com/watch?v=mXlyDwywq3Q|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=mXlyDwywq3Q|title|ZAZ On ira +http://www.youtube.com/watch?v=mXlyDwywq3Q|creationTime|2013-12-19T16:32:27Z +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|creationDate|2009-04-28 +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|tag|http://www.semanlink.net/tag/bnf +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|tag|http://www.semanlink.net/tag/linked_data +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|comment|"Announce: release an *experimental* service [1] that provides the RAMEAU subject headings as open linked data. +Rameau [2] is the main subject vocabulary used at the French national library (BnF) and many other French institutions. This site, a result of the TELplus project, and a collaboration between the Vrije Universiteit Amsterdam and BnF, aims at encouraging experimentation with Rameau on the web of da" +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|title|SKOS implementation: Rameau subjects as linked data +http://lists.w3.org/Archives/Public/public-lod/2009Apr/0105.html|creationTime|2009-04-28T11:11:15Z +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|creationDate|2008-06-05 +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|tag|http://www.semanlink.net/tag/niger +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|tag|http://www.semanlink.net/tag/moussa_poussi +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|tag|http://www.semanlink.net/tag/niamey +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|title|Nigerportal le portail du Niger - Décès à Niamey du chanteur Moussa Poussy +http://www.nigerportal.com/home1/modules.php?name=News&file=article&sid=132|creationTime|2008-06-05T22:29:27Z +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|creationDate|2013-07-11 +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|tag|http://www.semanlink.net/tag/torture +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|tag|http://www.semanlink.net/tag/jail +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|tag|http://www.semanlink.net/tag/usa +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|comment|What’s emerged from the reports and testimonies reads like a mix of medieval cruelty and sci-fi dystopia. +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|title|The Horrible Psychology of Solitary Confinement Wired Science Wired.com +http://www.wired.com/wiredscience/2013/07/solitary-confinement-2/|creationTime|2013-07-11T01:27:19Z +https://moz.com/blog/301-redirection-rules-for-seo|creationDate|2017-10-19 +https://moz.com/blog/301-redirection-rules-for-seo|tag|http://www.semanlink.net/tag/http_redirect +https://moz.com/blog/301-redirection-rules-for-seo|tag|http://www.semanlink.net/tag/google_seo +https://moz.com/blog/301-redirection-rules-for-seo|title|301 Redirects Rules Change: What You Need to Know for SEO - Moz +https://moz.com/blog/301-redirection-rules-for-seo|creationTime|2017-10-19T22:35:06Z +http://www.w3.org/TR/xhtml-rdfa-primer/|creationDate|2006-05-23 +http://www.w3.org/TR/xhtml-rdfa-primer/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/TR/xhtml-rdfa-primer/|title|RDF/A Primer 1.0: Embedding RDF in XHTML +http://apassant.net/files/publications/these-apassant.pdf|creationDate|2009-07-07 +http://apassant.net/files/publications/these-apassant.pdf|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/files/publications/these-apassant.pdf|tag|http://www.semanlink.net/tag/semantic_enterprise +http://apassant.net/files/publications/these-apassant.pdf|title|"""Technologies du Web Sémantique pour l'Entreprise 2.0"" - Thèse - Alexandre Passant" +http://apassant.net/files/publications/these-apassant.pdf|creationTime|2009-07-07T19:25:36Z +https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0|creationDate|2018-08-08 +https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0|tag|http://www.semanlink.net/tag/sanjeev_arora +https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0|title|Mathematics of Machine Learning: An introduction +https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0|creationTime|2018-08-08T13:53:29Z +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|creationDate|2019-05-23 +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|title|Introducing FastBert — A simple Deep Learning library for BERT Models +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|bookmarkOf|https://medium.com/huggingface/introducing-fastbert-a-simple-deep-learning-library-for-bert-models-89ff763ad384 +http://www.semanlink.net/doc/2019/05/introducing_fastbert_a_simple|creationTime|2019-05-23T08:23:28Z +http://umbel.org/intro.xhtml|creationDate|2008-04-25 +http://umbel.org/intro.xhtml|tag|http://www.semanlink.net/tag/umbel +http://umbel.org/intro.xhtml|title|UMBEL Intro +http://umbel.org/intro.xhtml|creationTime|2008-04-25T08:59:50Z +http://semtechbizberlin2012.semanticweb.com/|creationDate|2011-11-02 +http://semtechbizberlin2012.semanticweb.com/|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semtechbizberlin2012.semanticweb.com/|title|SEMTECHBIZ Berlin 2012: The Semantic Tech & Business Conference +http://semtechbizberlin2012.semanticweb.com/|creationTime|2011-11-02T23:21:35Z +https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/|creationDate|2016-03-29 +https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/|tag|http://www.semanlink.net/tag/vary_header +https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/|tag|http://www.semanlink.net/tag/internet_explorer +https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/|title|Vary with Care IEInternals +https://blogs.msdn.microsoft.com/ieinternals/2009/06/17/vary-with-care/|creationTime|2016-03-29T16:52:26Z +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|creationDate|2010-05-31 +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|tag|http://www.semanlink.net/tag/deri +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|tag|http://www.semanlink.net/tag/semantic_web_web_2_0 +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|tag|http://www.semanlink.net/tag/alexandre_passant +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|tag|http://www.semanlink.net/tag/slides +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|title|Semantic Enterprise 2.0 - Enabling Semantic Web technologies +http://www.slideshare.net/terraces/semantic-enterprise-20-enabling-semantic-web-technologies-in-enterprise-20-environment|creationTime|2010-05-31T09:30:31Z +http://www.wired.com/wiredenterprise/2013/06/observos-internet-of-places/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationDate|2013-07-11 +http://www.wired.com/wiredenterprise/2013/06/observos-internet-of-places/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|tag|http://www.semanlink.net/tag/arduino +http://www.wired.com/wiredenterprise/2013/06/observos-internet-of-places/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|title|Ex-Restaurant Man Erects the 'Internet of Places' Wired Enterprise Wired.com +http://www.wired.com/wiredenterprise/2013/06/observos-internet-of-places/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationTime|2013-07-11T08:54:53Z +https://www.youtube.com/watch?v=TnokFAwi1yc|creationDate|2015-08-07 +https://www.youtube.com/watch?v=TnokFAwi1yc|tag|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +https://www.youtube.com/watch?v=TnokFAwi1yc|title|Eunice Barber, Paris 2003 Long Jump Women +https://www.youtube.com/watch?v=TnokFAwi1yc|creationTime|2015-08-07T19:25:20Z +http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print|creationDate|2006-01-06 +http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print|tag|http://www.semanlink.net/tag/robotique +http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print|tag|http://www.semanlink.net/tag/lego +http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print|comment|A sophisticated and open-ended robot development kit masquerading as a kid's toy. +http://news.com.com/2102-1041_3-6020603.html?tag=st.util.print|title|Lego Mindstorms no kids' toy CNET News.com +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|creationDate|2011-12-26 +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|tag|http://www.semanlink.net/tag/monuments_historiques +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|tag|http://www.semanlink.net/tag/antidot +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|tag|http://www.semanlink.net/tag/sem_web_demo +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|comment|An app to discover 44,000 French monuments. Completed in four days. By one person. Without coding. +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|title|Discovering French Monuments, With the Help of the Semantic Web - semanticweb.com +http://semanticweb.com/discovering-french-monuments-with-the-help-of-the-semantic-web_b25564|creationTime|2011-12-26T18:08:34Z +http://www.corequant.com/?p=1|creationDate|2013-09-11 +http://www.corequant.com/?p=1|tag|http://www.semanlink.net/tag/sentiment_analysis +http://www.corequant.com/?p=1|tag|http://www.semanlink.net/tag/tutorial +http://www.corequant.com/?p=1|tag|http://www.semanlink.net/tag/rapidminer +http://www.corequant.com/?p=1|title|Sentiment Analysis in RapidMiner / Technology Blog +http://www.corequant.com/?p=1|creationTime|2013-09-11T16:56:50Z +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|creationDate|2017-02-04 +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|tag|http://www.semanlink.net/tag/insolite +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|tag|http://www.semanlink.net/tag/patrimoine +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|tag|http://www.semanlink.net/tag/pantheon_paris +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|title|Untergunther, réparateurs clandestins du patrimoine +https://www.franceculture.fr/architecture/untergunther-reparateurs-clandestins-du-patrimoine|creationTime|2017-02-04T15:08:56Z +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|creationDate|2018-01-05 +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/microsoft_research +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/combining_word_and_entity_embeddings +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|tag|http://www.semanlink.net/tag/nlp_microsoft +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|comment|"method of **jointly embedding knowledge graphs and a text corpus** so that **entities and words/phrases are represented in the same vector space**. + +Promising improvement in the accuracy of predicting facts, compared to separately embedding knowledge graphs and text (in particular, enables the prediction of facts containing entities out of the knowledge graph) + +[cité par J. Moreno](/doc/?uri=https%3A%2F%2Fhal.archives-ouvertes.fr%2Fhal-01626196%2Fdocument) + +" +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|relatedDoc|https://hal.archives-ouvertes.fr/hal-01626196/document +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|title|Knowledge Graph and Text Jointly Embedding (2014) +http://emnlp2014.org/papers/pdf/EMNLP2014167.pdf|creationTime|2018-01-05T15:41:19Z +http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf|creationDate|2011-12-17 +http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf|tag|http://www.semanlink.net/tag/identity_crisis_in_linked_data +http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf|tag|http://www.semanlink.net/tag/ora_lassila +http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf|title|Identity Crisis in Linked Data +http://www.w3.org/2011/09/LinkedData/ledp2011_submission_13.pdf|creationTime|2011-12-17T15:20:31Z +http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html|creationDate|2015-08-29 +http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html|tag|http://www.semanlink.net/tag/tutorial +http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html|tag|http://www.semanlink.net/tag/angularjs +http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html|title|JSApps 101: AngularJS In A Nutshell +http://www.freakzion.com/index.php/The-Blog-November-2014/jsapps-101-angularjs-in-a-nutshell.html|creationTime|2015-08-29T16:49:22Z +http://del.icio.us/|creationDate|2005-04-04 +http://del.icio.us/|tag|http://www.semanlink.net/tag/bookmarks +http://del.icio.us/|tag|http://www.semanlink.net/tag/semantic_web_sites +http://del.icio.us/|tag|http://www.semanlink.net/tag/del_icio +http://del.icio.us/|title|http://del.icio.us +http://del.icio.us/|creationTime|2005-04-04T22:00:00Z +https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1|creationDate|2018-08-04 +https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1|tag|http://www.semanlink.net/tag/inversion_of_control +https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1|tag|http://www.semanlink.net/tag/dependency_injection +https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1|title|Inversion of Control vs Dependency Injection - Stack Overflow +https://stackoverflow.com/questions/6550700/inversion-of-control-vs-dependency-injection?rq=1|creationTime|2018-08-04T22:44:28Z +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|creationDate|2009-07-08 +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|tag|http://www.semanlink.net/tag/quads +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|tag|http://www.semanlink.net/tag/anzo +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|title|CONSTRUCTing Quads - TechnicaLee Speaking +http://www.thefigtrees.net/lee/blog/2009/07/constructing_quads.html|creationTime|2009-07-08T16:41:31Z +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|creationDate|2017-02-26 +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|tag|http://www.semanlink.net/tag/big_data +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|tag|http://www.semanlink.net/tag/mainstream_media +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|tag|http://www.semanlink.net/tag/trump +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|title|Robert Mercer: the big data billionaire waging war on mainstream media Politics The Guardian +https://www.theguardian.com/politics/2017/feb/26/robert-mercer-breitbart-war-on-media-steve-bannon-donald-trump-nigel-farage|creationTime|2017-02-26T16:21:37Z +http://www.techempower.com/blog/2013/03/26/everything-about-java-8/|creationDate|2013-03-28 +http://www.techempower.com/blog/2013/03/26/everything-about-java-8/|tag|http://www.semanlink.net/tag/java_8 +http://www.techempower.com/blog/2013/03/26/everything-about-java-8/|title|Everything about Java 8 - TechEmpower Blog +http://www.techempower.com/blog/2013/03/26/everything-about-java-8/|creationTime|2013-03-28T23:49:09Z +https://junyanz.github.io/CycleGAN/|creationDate|2017-06-09 +https://junyanz.github.io/CycleGAN/|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://junyanz.github.io/CycleGAN/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +https://junyanz.github.io/CycleGAN/|title|Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks +https://junyanz.github.io/CycleGAN/|creationTime|2017-06-09T17:46:05Z +http://www.calacademy.org/|creationDate|2006-05-23 +http://www.calacademy.org/|tag|http://www.semanlink.net/tag/museum_d_histoire_naturelle +http://www.calacademy.org/|comment|Natural History Museum +http://www.calacademy.org/|title|California Academy of Sciences +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|creationDate|2017-06-14 +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|comment|"evaluation and analysis of Unsupervised Keyphrase Extraction algorithms on a variety of standard evaluation dataset. + +Author Kazi Saidul Hasan provides C++ [implementations of the discussed algos](http://www.hlt.utdallas.edu/~saidul/code.html) (Tf-Idf, TextRank, SingleRank, ExpandRank) + +[by same author](/doc/?uri=http%3A%2F%2Facl2014.org%2Facl2014%2FP14-1%2Fpdf%2FP14-1119.pdf) + + + +" +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|relatedDoc|http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|title|Conundrums in Unsupervised Keyphrase Extraction: Making Sense of the State-of-the-Art (2010) +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|creationTime|2017-06-14T00:51:23Z +http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf|date|2010 +http://nonaedvige.ras.eu.org/|creationDate|2008-09-03 +http://nonaedvige.ras.eu.org/|tag|http://www.semanlink.net/tag/edvige +http://nonaedvige.ras.eu.org/|tag|http://www.semanlink.net/tag/fichage +http://nonaedvige.ras.eu.org/|tag|http://www.semanlink.net/tag/etat_policier +http://nonaedvige.ras.eu.org/|tag|http://www.semanlink.net/tag/petition +http://nonaedvige.ras.eu.org/|tag|http://www.semanlink.net/tag/sarkozy +http://nonaedvige.ras.eu.org/|comment|"""They who can give up essential liberty to obtain a little temporary safety, deserve neither liberty nor safety."" (Benjamin Franklin)
+Et si le gouvernement de Vichy avait hérité d'un fichier Edvige ? + + +" +http://nonaedvige.ras.eu.org/|title|Pour obtenir l'abandon du fichier EDVIGE +http://nonaedvige.ras.eu.org/|creationTime|2008-09-03T21:27:23Z +http://www.aclweb.org/anthology/D14-1181|creationDate|2017-11-07 +http://www.aclweb.org/anthology/D14-1181|tag|http://www.semanlink.net/tag/frequently_cited_paper +http://www.aclweb.org/anthology/D14-1181|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +http://www.aclweb.org/anthology/D14-1181|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.aclweb.org/anthology/D14-1181|comment|"experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. + +[Github project](https://github.com/yoonkim/CNN_sentence) with code, updates to paper, and links to valuable resources, such as a [Denny Britz](/tag/denny_britz)'s [implementation in TensorFlow](https://github.com/dennybritz/cnn-text-classification-tf) + +" +http://www.aclweb.org/anthology/D14-1181|relatedDoc|https://datawarrior.wordpress.com/2016/10/12/short-text-categorization-using-deep-neural-networks-and-word-embedding-models/ +http://www.aclweb.org/anthology/D14-1181|title|Convolutional Neural Networks for Sentence Classification (2014) +http://www.aclweb.org/anthology/D14-1181|creationTime|2017-11-07T09:47:58Z +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/|creationDate|2014-03-25 +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/|tag|http://www.semanlink.net/tag/libshorttext +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/|comment|by the Machine Learning Group at National Taiwan University +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/|title|LibShortText: A Library for Short-text Classification and Analysis +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/|creationTime|2014-03-25T11:09:28Z +https://github.com/hubgit/md-ld|creationDate|2015-10-04 +https://github.com/hubgit/md-ld|tag|http://www.semanlink.net/tag/json_ld +https://github.com/hubgit/md-ld|tag|http://www.semanlink.net/tag/markdown +https://github.com/hubgit/md-ld|tag|http://www.semanlink.net/tag/github_project +https://github.com/hubgit/md-ld|comment|MD-LD extends Markdown's reference link syntax to allow easy authoring of structured data. +https://github.com/hubgit/md-ld|title|MD-LD +https://github.com/hubgit/md-ld|creationTime|2015-10-04T11:40:05Z +http://stackoverflow.com/questions/16042885/swagger-hashmap-property-type|creationDate|2017-04-20 +http://stackoverflow.com/questions/16042885/swagger-hashmap-property-type|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/16042885/swagger-hashmap-property-type|title|api - Swagger HashMap property type - Stack Overflow +http://stackoverflow.com/questions/16042885/swagger-hashmap-property-type|creationTime|2017-04-20T17:07:50Z +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|creationDate|2013-02-28 +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|tag|http://www.semanlink.net/tag/cerveau +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|tag|http://www.semanlink.net/tag/brain_to_brain_interface +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|title|Deux rats séparés par un continent mais reliés par le cerveau +http://www.lemonde.fr/sciences/article/2013/02/28/deux-rats-separes-par-un-continent-mais-relies-par-le-cerveau_1840845_1650684.html|creationTime|2013-02-28T22:51:37Z +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|creationDate|2014-07-23 +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|tag|http://www.semanlink.net/tag/big_data +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|tag|http://www.semanlink.net/tag/machine_learning_tool +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|tag|http://www.semanlink.net/tag/machine_learning +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|tag|http://www.semanlink.net/tag/nosql +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|title|Great list of resources - NoSQL, Big Data, Machine Learning and more GitHub - Data Science Central +http://www.datasciencecentral.com/profiles/blogs/great-list-of-resources-nosql-big-data-ml-and-much-more-posted-on?overrideMobileRedirect=1|creationTime|2014-07-23T19:35:28Z +http://worrydream.com/TheWebOfAlexandria/|creationDate|2015-07-14 +http://worrydream.com/TheWebOfAlexandria/|tag|http://www.semanlink.net/tag/web +http://worrydream.com/TheWebOfAlexandria/|tag|http://www.semanlink.net/tag/library_of_alexandria +http://worrydream.com/TheWebOfAlexandria/|title|The Web of Alexandria +http://worrydream.com/TheWebOfAlexandria/|creationTime|2015-07-14T23:48:06Z +https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486|creationDate|2018-05-20 +https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486|tag|http://www.semanlink.net/tag/nltk +https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486|tag|http://www.semanlink.net/tag/stanford_ner +https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486|title|How to Train your Own Model with NLTK and Stanford NER Tagger? (for English, French, German…) +https://blog.sicara.com/train-ner-model-with-nltk-stanford-tagger-english-french-german-6d90573a9486|creationTime|2018-05-20T15:33:35Z +http://www.cigref.fr/wp/wp-content/uploads/2016/09/Gouvernance-IA-CIGREF-LEXING-2016.pdf|creationDate|2017-12-14 +http://www.cigref.fr/wp/wp-content/uploads/2016/09/Gouvernance-IA-CIGREF-LEXING-2016.pdf|title|Gouvernance de l'intelligence artificielle dans les grandes entreprises +http://www.cigref.fr/wp/wp-content/uploads/2016/09/Gouvernance-IA-CIGREF-LEXING-2016.pdf|creationTime|2017-12-14T21:32:21Z +http://www.irt.org/script/script.htm|creationDate|2006-07-27 +http://www.irt.org/script/script.htm|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.irt.org/script/script.htm|tag|http://www.semanlink.net/tag/faq +http://www.irt.org/script/script.htm|title|irt.org - JavaScript FAQ Knowledge Base +http://arxiv.org/abs/1601.07752|creationDate|2016-05-28 +http://arxiv.org/abs/1601.07752|tag|http://www.semanlink.net/tag/polynomial +http://arxiv.org/abs/1601.07752|tag|http://www.semanlink.net/tag/jean_paul +http://arxiv.org/abs/1601.07752|tag|http://www.semanlink.net/tag/algorithmes +http://arxiv.org/abs/1601.07752|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1601.07752|arxiv_author|Victor Y. Pan +http://arxiv.org/abs/1601.07752|title|[1601.07752] Enhancing the Power of Cardinal's Algorithm +http://arxiv.org/abs/1601.07752|creationTime|2016-05-28T09:14:36Z +http://arxiv.org/abs/1601.07752|arxiv_summary|"Cardinal's factorization algorithm of 1996 splits a univariate polynomial +into two factors with root sets separated by the imaginary axis, which is an +important goal itself and a basic step toward root-finding. The novelty of the +algorithm and its potential power have been well recognized by experts +immediately, but by 2016, that is, two decades later, its practical value still +remains nil, particularly because of the high computational cost of performing +its final stage by means of computing approximate greatest common divisor of +two polynomials. We briefly recall Cardinal's algorithm and its difficulties, +amend it based on some works performed since 1996, extend its power to +splitting out factors of a more general class, and reduce the final stage of +the algorithm to quite manageable computations with structured matrices. Some +of our techniques can be of independent interest for matrix computations." +http://arxiv.org/abs/1601.07752|arxiv_firstAuthor|Victor Y. Pan +http://arxiv.org/abs/1601.07752|arxiv_updated|2017-04-13T15:53:50Z +http://arxiv.org/abs/1601.07752|arxiv_title|Enhancing the Power of Cardinal's Algorithm +http://arxiv.org/abs/1601.07752|arxiv_published|2016-01-28T13:30:37Z +http://arxiv.org/abs/1601.07752|arxiv_num|1601.07752 +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|creationDate|2017-07-19 +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|tag|http://www.semanlink.net/tag/text_similarity +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|tag|http://www.semanlink.net/tag/latent_semantic_analysis +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|tag|http://www.semanlink.net/tag/python_sample_code +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|tag|http://www.semanlink.net/tag/nlp_sample_code +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|comment|"> ""The thing to note here is that documents no. 2 would never be returned by a standard boolean fulltext search, because they do not share any common words with query string""" +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|title|Gensim tutorial: Similarity Queries +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Similarity_Queries.ipynb|creationTime|2017-07-19T14:54:26Z +http://www2.cnrs.fr/presse/communique/3633.htm|creationDate|2014-06-26 +http://www2.cnrs.fr/presse/communique/3633.htm|tag|http://www.semanlink.net/tag/gabon +http://www2.cnrs.fr/presse/communique/3633.htm|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www2.cnrs.fr/presse/communique/3633.htm|tag|http://www.semanlink.net/tag/fossile +http://www2.cnrs.fr/presse/communique/3633.htm|comment|Les plus vieux fossiles d'organismes pluricellulaires jamais observés. Plus de 400 fossiles vieux de 2,1 milliards d'années dont des dizaines de nouveaux spécimens. +http://www2.cnrs.fr/presse/communique/3633.htm|title|La plus vieille biodiversité découverte dans un écosystème marin au Gabon - CNRS +http://www2.cnrs.fr/presse/communique/3633.htm|creationTime|2014-06-26T13:23:44Z +http://www.nytimes.com/2014/11/05/opinion/why-sand-is-disappearing.html?ref=international&_r=2|creationDate|2014-11-08 +http://www.nytimes.com/2014/11/05/opinion/why-sand-is-disappearing.html?ref=international&_r=2|tag|http://www.semanlink.net/tag/sand +http://www.nytimes.com/2014/11/05/opinion/why-sand-is-disappearing.html?ref=international&_r=2|title|Why Sand Is Disappearing - NYTimes.com +http://www.nytimes.com/2014/11/05/opinion/why-sand-is-disappearing.html?ref=international&_r=2|creationTime|2014-11-08T08:02:24Z +https://www.youtube.com/watch?v=nFCxTtBqF5U|creationDate|2018-05-29 +https://www.youtube.com/watch?v=nFCxTtBqF5U|tag|http://www.semanlink.net/tag/embeddings +https://www.youtube.com/watch?v=nFCxTtBqF5U|tag|http://www.semanlink.net/tag/chris_manning +https://www.youtube.com/watch?v=nFCxTtBqF5U|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=nFCxTtBqF5U|tag|http://www.semanlink.net/tag/meaning_in_nlp +https://www.youtube.com/watch?v=nFCxTtBqF5U|comment|"[Slides](/doc/?uri=https%3A%2F%2Fnlp.stanford.edu%2Fmanning%2Ftalks%2FSimons-Institute-Manning-2017.pdf) + +**What's special about human language? the only hope for explainable intelligence**. + +Symbols are not just an invention of logic / classical AI. + +Meaning: a solution via distributional similarity based representations. One of the most successfull ideas of modern NLP. + +> You shall know a word by the company it keeps (JR Firth 1957) + +The BiLSTM hegemony + +Neural Bag of words + +> ""Surprisingly effective for many tasks :-("" [cf ""DAN"", Deep Averaging Network, Iyyver et al.](/doc/?uri=http%3A%2F%2Fwww.cs.cornell.edu%2Fcourses%2Fcs5740%2F2016sp%2Fresources%2Fdans.pdf) + + +" +https://www.youtube.com/watch?v=nFCxTtBqF5U|relatedDoc|https://nlp.stanford.edu/manning/talks/Simons-Institute-Manning-2017.pdf +https://www.youtube.com/watch?v=nFCxTtBqF5U|relatedDoc|http://www.cs.cornell.edu/courses/cs5740/2016sp/resources/dans.pdf +https://www.youtube.com/watch?v=nFCxTtBqF5U|title|Representations for Language: From Word Embeddings to Sentence Meanings (2017) - YouTube +https://www.youtube.com/watch?v=nFCxTtBqF5U|creationTime|2018-05-29T12:33:55Z +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|creationDate|2012-06-30 +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|tag|http://www.semanlink.net/tag/vie_extraterrestre +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|tag|http://www.semanlink.net/tag/eau_extraterrestre +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|tag|http://www.semanlink.net/tag/titan +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|title|Des marées sous Titan, une lune de Saturne +http://www.lemonde.fr/sciences/article/2012/06/28/des-marees-sous-titan_1726205_1650684.html|creationTime|2012-06-30T00:58:26Z +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|creationDate|2013-09-05 +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|tag|http://www.semanlink.net/tag/tutorial +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|tag|http://www.semanlink.net/tag/rapidminer +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|tag|http://www.semanlink.net/tag/text_mining +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|title|Vancouver Data Blog by Neil McGuigan: Text Analytics with RapidMiner Part 1 of 6 - Loading Text +http://vancouverdata.blogspot.fr/2010/11/text-analytics-with-rapidminer-loading.html|creationTime|2013-09-05T11:05:54Z +https://github.com/facebookresearch/fastText|creationDate|2017-06-28 +https://github.com/facebookresearch/fastText|tag|http://www.semanlink.net/tag/github_project +https://github.com/facebookresearch/fastText|tag|http://www.semanlink.net/tag/fasttext +https://github.com/facebookresearch/fastText|title|facebookresearch/fastText: Library for fast text representation and classification. +https://github.com/facebookresearch/fastText|creationTime|2017-06-28T01:02:51Z +http://www.scottsantens.com/should-the-amount-of-basic-income-vary-with-cost-of-living-differences|creationDate|2017-02-26 +http://www.scottsantens.com/should-the-amount-of-basic-income-vary-with-cost-of-living-differences|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://www.scottsantens.com/should-the-amount-of-basic-income-vary-with-cost-of-living-differences|title|Scott Santens - Should the Amount of Basic Income Vary With Cost of Living Differences? +http://www.scottsantens.com/should-the-amount-of-basic-income-vary-with-cost-of-living-differences|creationTime|2017-02-26T11:15:39Z +http://arxiv.org/pdf/1301.3781.pdf|creationDate|2016-01-13 +http://arxiv.org/pdf/1301.3781.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/pdf/1301.3781.pdf|tag|http://www.semanlink.net/tag/word2vec +http://arxiv.org/pdf/1301.3781.pdf|tag|http://www.semanlink.net/tag/word_embedding +http://arxiv.org/pdf/1301.3781.pdf|tag|http://www.semanlink.net/tag/tomas_mikolov +http://arxiv.org/pdf/1301.3781.pdf|arxiv_author|Kai Chen +http://arxiv.org/pdf/1301.3781.pdf|arxiv_author|Greg Corrado +http://arxiv.org/pdf/1301.3781.pdf|arxiv_author|Tomas Mikolov +http://arxiv.org/pdf/1301.3781.pdf|arxiv_author|Jeffrey Dean +http://arxiv.org/pdf/1301.3781.pdf|comment|"We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. +" +http://arxiv.org/pdf/1301.3781.pdf|title|[1301.3781] Efficient Estimation of Word Representations in Vector Space +http://arxiv.org/pdf/1301.3781.pdf|creationTime|2016-01-13T23:07:45Z +http://arxiv.org/pdf/1301.3781.pdf|arxiv_summary|"We propose two novel model architectures for computing continuous vector +representations of words from very large data sets. The quality of these +representations is measured in a word similarity task, and the results are +compared to the previously best performing techniques based on different types +of neural networks. We observe large improvements in accuracy at much lower +computational cost, i.e. it takes less than a day to learn high quality word +vectors from a 1.6 billion words data set. Furthermore, we show that these +vectors provide state-of-the-art performance on our test set for measuring +syntactic and semantic word similarities." +http://arxiv.org/pdf/1301.3781.pdf|arxiv_firstAuthor|Tomas Mikolov +http://arxiv.org/pdf/1301.3781.pdf|arxiv_updated|2013-09-07T00:30:40Z +http://arxiv.org/pdf/1301.3781.pdf|arxiv_title|Efficient Estimation of Word Representations in Vector Space +http://arxiv.org/pdf/1301.3781.pdf|arxiv_published|2013-01-16T18:24:43Z +http://arxiv.org/pdf/1301.3781.pdf|arxiv_num|1301.3781 +https://arxiv.org/pdf/1705.08039.pdf|creationDate|2017-12-16 +https://arxiv.org/pdf/1705.08039.pdf|tag|http://www.semanlink.net/tag/nlp_facebook +https://arxiv.org/pdf/1705.08039.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1705.08039.pdf|tag|http://www.semanlink.net/tag/these_irit_renault_biblio_initiale +https://arxiv.org/pdf/1705.08039.pdf|tag|http://www.semanlink.net/tag/poincare_embeddings +https://arxiv.org/pdf/1705.08039.pdf|arxiv_author|Douwe Kiela +https://arxiv.org/pdf/1705.08039.pdf|arxiv_author|Maximilian Nickel +https://arxiv.org/pdf/1705.08039.pdf|comment|> While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space +https://arxiv.org/pdf/1705.08039.pdf|title|[1705.08039] Poincaré Embeddings for Learning Hierarchical Representations +https://arxiv.org/pdf/1705.08039.pdf|creationTime|2017-12-16T14:41:31Z +https://arxiv.org/pdf/1705.08039.pdf|arxiv_summary|"Representation learning has become an invaluable approach for learning from +symbolic data such as text and graphs. However, while complex symbolic datasets +often exhibit a latent hierarchical structure, state-of-the-art methods +typically learn embeddings in Euclidean vector spaces, which do not account for +this property. For this purpose, we introduce a new approach for learning +hierarchical representations of symbolic data by embedding them into hyperbolic +space -- or more precisely into an n-dimensional Poincar\'e ball. Due to the +underlying hyperbolic geometry, this allows us to learn parsimonious +representations of symbolic data by simultaneously capturing hierarchy and +similarity. We introduce an efficient algorithm to learn the embeddings based +on Riemannian optimization and show experimentally that Poincar\'e embeddings +outperform Euclidean embeddings significantly on data with latent hierarchies, +both in terms of representation capacity and in terms of generalization +ability." +https://arxiv.org/pdf/1705.08039.pdf|arxiv_firstAuthor|Maximilian Nickel +https://arxiv.org/pdf/1705.08039.pdf|arxiv_updated|2017-05-26T17:40:55Z +https://arxiv.org/pdf/1705.08039.pdf|arxiv_title|Poincaré Embeddings for Learning Hierarchical Representations +https://arxiv.org/pdf/1705.08039.pdf|arxiv_published|2017-05-22T23:14:36Z +https://arxiv.org/pdf/1705.08039.pdf|arxiv_num|1705.08039 +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|creationDate|2015-01-19 +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|tag|http://www.semanlink.net/tag/triplestore +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|tag|http://www.semanlink.net/tag/neo4j +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|title|Storing and querying RDF in Neo4j - bobdc.blog +http://www.snee.com/bobdc.blog/2014/01/storing-and-querying-rdf-in-ne.html|creationTime|2015-01-19T15:23:02Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/|creationDate|2007-01-15 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/|tag|http://www.semanlink.net/tag/disco_hyperdata_browser +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/|tag|http://www.semanlink.net/tag/good +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/|comment|Simple browser for navigating the Semantic Web as an unbound set of data sources. The browser renders all information, that it can find on the Semantic Web about a specific resource, as an HTML page. This resource description contains hyperlinks that allow you to navigate between resources. While you move from resource to resource, the browser dynamically retrieves information by dereferencing HTTP URIs and by following rdfs:seeAlso links. +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/disco/|title|Disco Hyperdata Browser +http://www.nature.com/news/2010/100630/full/news.2010.323.html|creationDate|2010-06-30 +http://www.nature.com/news/2010/100630/full/news.2010.323.html|tag|http://www.semanlink.net/tag/paleontologie +http://www.nature.com/news/2010/100630/full/news.2010.323.html|comment|Two-billion-year-old fossils could indicate steps towards multicellularity. +http://www.nature.com/news/2010/100630/full/news.2010.323.html|title|Ancient macrofossils unearthed in West Africa : Nature News +http://www.nature.com/news/2010/100630/full/news.2010.323.html|creationTime|2010-06-30T21:48:47Z +http://yyue.blogspot.ca/2015/01/a-brief-overview-of-deep-learning.html|creationDate|2015-01-15 +http://yyue.blogspot.ca/2015/01/a-brief-overview-of-deep-learning.html|tag|http://www.semanlink.net/tag/deep_learning +http://yyue.blogspot.ca/2015/01/a-brief-overview-of-deep-learning.html|title|Random Ponderings: A Brief Overview of Deep Learning +http://yyue.blogspot.ca/2015/01/a-brief-overview-of-deep-learning.html|creationTime|2015-01-15T22:06:16Z +http://opendatapress.org/|creationDate|2014-01-28 +http://opendatapress.org/|tag|http://www.semanlink.net/tag/linked_data_publishing +http://opendatapress.org/|tag|http://www.semanlink.net/tag/google +http://opendatapress.org/|title|Open Data Press - Google Sheets to Open Data +http://opendatapress.org/|creationTime|2014-01-28T10:06:44Z +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|creationDate|2010-04-27 +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|tag|http://www.semanlink.net/tag/minting_uris +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|tag|http://www.semanlink.net/tag/lod_mailing_list +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|tag|http://www.semanlink.net/tag/uri_opacity +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|comment|"The “URI opacity” axiom does not say that URIs should be opaque. It says that clients should *treat them* as opaque. +" +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|title|CoIN: Composition of Identifier Names +http://www.mail-archive.com/public-lod@w3.org/msg05020.html|creationTime|2010-04-27T10:07:02Z +http://deliprao.com/|creationDate|2016-08-24 +http://deliprao.com/|tag|http://www.semanlink.net/tag/delip_rao +http://deliprao.com/|title|Delip Rao +http://deliprao.com/|creationTime|2016-08-24T14:47:51Z +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|creationDate|2018-07-07 +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|tag|http://www.semanlink.net/tag/riches +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|tag|http://www.semanlink.net/tag/anticipation +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|title|Survival of the Richest +https://medium.com/s/futurehuman/survival-of-the-richest-9ef6cddd0cc1|creationTime|2018-07-07T09:54:50Z +https://gist.github.com/andyferra/2554919|creationDate|2017-04-14 +https://gist.github.com/andyferra/2554919|title|Github Markdown CSS - for Markdown Editor Preview +https://gist.github.com/andyferra/2554919|creationTime|2017-04-14T11:06:46Z +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|creationDate|2012-12-09 +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|tag|http://www.semanlink.net/tag/jardinage +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|tag|http://www.semanlink.net/tag/esprit_de_resistance +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|tag|http://www.semanlink.net/tag/semencier +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|comment|"dans le commerce, l'immense majorité des graines sont de type ""hybride F1"", des croisements sélectionnés auto dégénérescents, qui ne donneront rien si on les replante d'une année sur l'autre" +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|title|Jardinier résistant, échange graines de laitues contre plants de tomates Une Année en France +http://crise.blog.lemonde.fr/2012/12/01/jardinier-resistant-echange-graines-de-laitues-contre-plants-de-tomates/|creationTime|2012-12-09T11:56:00Z +https://github.com/josephmisiti/awesome-machine-learning|creationDate|2014-07-17 +https://github.com/josephmisiti/awesome-machine-learning|tag|http://www.semanlink.net/tag/machine_learning +https://github.com/josephmisiti/awesome-machine-learning|tag|http://www.semanlink.net/tag/software +https://github.com/josephmisiti/awesome-machine-learning|title|A curated list of awesome Machine Learning frameworks, libraries and software. +https://github.com/josephmisiti/awesome-machine-learning|creationTime|2014-07-17T14:51:22Z +http://www.semanticweb.com/features/index_to_the_creative_destruction_7_act_play_161403.asp|creationDate|2010-07-31 +http://www.semanticweb.com/features/index_to_the_creative_destruction_7_act_play_161403.asp|tag|http://www.semanlink.net/tag/semantic_web_business +http://www.semanticweb.com/features/index_to_the_creative_destruction_7_act_play_161403.asp|title|Index To The Creative Destruction 7 Act Play - Semantic Web +http://www.semanticweb.com/features/index_to_the_creative_destruction_7_act_play_161403.asp|creationTime|2010-07-31T14:11:15Z +http://vene.ro/blog/word-movers-distance-in-python.html|creationDate|2017-11-12 +http://vene.ro/blog/word-movers-distance-in-python.html|tag|http://www.semanlink.net/tag/word_mover_s_distance +http://vene.ro/blog/word-movers-distance-in-python.html|title|Word Mover’s Distance in Python +http://vene.ro/blog/word-movers-distance-in-python.html|creationTime|2017-11-12T02:54:16Z +http://jamendo.org/|creationDate|2007-06-13 +http://jamendo.org/|tag|http://www.semanlink.net/tag/jamendo +http://jamendo.org/|comment|Large collection of Creative Commons licensed songs +http://jamendo.org/|title|Jamendo.org +http://jamendo.org/|creationTime|2007-06-13T23:29:25Z +https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions|creationDate|2019-05-14 +https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions|tag|http://www.semanlink.net/tag/survey +https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions|tag|http://www.semanlink.net/tag/entity_linking +https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions|title|Entity Linking with a Knowledge Base: Issues, Techniques, and Solutions. W Shen (2015) +https://www.researchgate.net/publication/273397652_Entity_Linking_with_a_Knowledge_Base_Issues_Techniques_and_Solutions|creationTime|2019-05-14T18:29:05Z +http://www.sindice.com/|creationDate|2007-06-13 +http://www.sindice.com/|tag|http://www.semanlink.net/tag/sindice +http://www.sindice.com/|comment|"Sindice is a simple lookup index for Semantic Web resources. Sindice indexes the Semantic Web and can tell you which sources mention a certain resource URI. Sindice does not store the RDF, does not answer queries, and does not try to be a ""Semantic Web search engine"". You can simply use Sindice from within your application to implement a ""find more information"" button." +http://www.sindice.com/|title|sindice +http://www.sindice.com/|creationTime|2007-06-13T23:42:06Z +http://danbri.org/words/2008/01/06/246|creationDate|2008-11-07 +http://danbri.org/words/2008/01/06/246|tag|http://www.semanlink.net/tag/spreadsheets +http://danbri.org/words/2008/01/06/246|tag|http://www.semanlink.net/tag/sparql +http://danbri.org/words/2008/01/06/246|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2008/01/06/246|title|danbri’s foaf stories » SPARQL results in spreadsheets +http://danbri.org/words/2008/01/06/246|creationTime|2008-11-07T13:11:13Z +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|creationDate|2018-05-13 +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|tag|http://www.semanlink.net/tag/co_ +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|tag|http://www.semanlink.net/tag/origine_de_la_vie +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|comment|Une simple réaction entre le dioxyde de carbone (CO2) et divers métaux dans de l’eau salée donne une nouvelle piste pour comprendre l’origine de biomolécules – et donc de la vie – sur Terre. +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|title|Le CO2 à l’origine de la vie ? +http://www.cnrs.fr/inc/communication/direct_labos/moran2.htm|creationTime|2018-05-13T10:23:27Z +https://www.quora.com/What-is-Noise-Contrastive-estimation-NCE|creationDate|2018-07-21 +https://www.quora.com/What-is-Noise-Contrastive-estimation-NCE|tag|http://www.semanlink.net/tag/noise_contrastive_estimation +https://www.quora.com/What-is-Noise-Contrastive-estimation-NCE|title|What is Noise Contrastive estimation (NCE)? - Quora +https://www.quora.com/What-is-Noise-Contrastive-estimation-NCE|creationTime|2018-07-21T09:50:08Z +http://scot.curriculum.edu.au/index.html|creationDate|2010-07-26 +http://scot.curriculum.edu.au/index.html|tag|http://www.semanlink.net/tag/thesaurus +http://scot.curriculum.edu.au/index.html|tag|http://www.semanlink.net/tag/ims_vdex +http://scot.curriculum.edu.au/index.html|tag|http://www.semanlink.net/tag/skos +http://scot.curriculum.edu.au/index.html|tag|http://www.semanlink.net/tag/education +http://scot.curriculum.edu.au/index.html|comment|"The Schools Online Thesaurus (ScOT) provides a controlled vocabulary of terms used in Australian and New Zealand schools. Schools Online Thesaurus web service (ScOTwS) allows systems to get ScOT terms by ID, name or by other parameters. Data is available in SKOS RDF and VDEX xml +" +http://scot.curriculum.edu.au/index.html|title|Schools Online Thesaurus +http://scot.curriculum.edu.au/index.html|creationTime|2010-07-26T18:35:50Z +http://iandavis.com/blog/2009/08/time-in-rdf-6|creationDate|2009-08-27 +http://iandavis.com/blog/2009/08/time-in-rdf-6|tag|http://www.semanlink.net/tag/time_in_rdf +http://iandavis.com/blog/2009/08/time-in-rdf-6|title|Internet Alchemy » Representing Time in RDF Part 6 +http://iandavis.com/blog/2009/08/time-in-rdf-6|creationTime|2009-08-27T13:52:24Z +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|creationDate|2017-07-18 +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|tag|http://www.semanlink.net/tag/i_b_m_s_watson +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|tag|http://www.semanlink.net/tag/nlp_tools +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|comment|The AlchemyLanguage API uses natural language processing technology and machine learning algorithms to extract semantic meta-data from content, such as information on people, places, companies, topics, facts, relationships, authors, and languages. +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|title|Watson: Alchemy Language v1 API Explorer +https://watson-api-explorer.mybluemix.net/apis/alchemy-language-v1|creationTime|2017-07-18T18:04:05Z +http://judyshapiro.sharedby.co/share/FGN8rQ|creationDate|2013-02-08 +http://judyshapiro.sharedby.co/share/FGN8rQ|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://judyshapiro.sharedby.co/share/FGN8rQ|tag|http://www.semanlink.net/tag/science +http://judyshapiro.sharedby.co/share/FGN8rQ|title|27 Science Fictions That Became Science Facts In 2012 +http://judyshapiro.sharedby.co/share/FGN8rQ|creationTime|2013-02-08T19:58:32Z +https://www.technologyreview.com/lists/technologies/2017/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|creationDate|2017-12-31 +https://www.technologyreview.com/lists/technologies/2017/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +https://www.technologyreview.com/lists/technologies/2017/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|title|10 Breakthrough Technologies 2017 - MIT Technology Review +https://www.technologyreview.com/lists/technologies/2017/?utm_source=twitter.com&utm_medium=social&utm_content=2017-12-31&utm_campaign=Technology+Review|creationTime|2017-12-31T10:43:46Z +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|creationDate|2017-10-25 +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|tag|http://www.semanlink.net/tag/tutorial +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|tag|http://www.semanlink.net/tag/keras +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|tag|http://www.semanlink.net/tag/time_series +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|tag|http://www.semanlink.net/tag/sample_code +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|title|Multivariate Time Series Forecasting with LSTMs in Keras - Machine Learning Mastery +https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/|creationTime|2017-10-25T15:58:38Z +http://www.wired.com/autopia/2014/02/feds-v2v/|creationDate|2014-02-03 +http://www.wired.com/autopia/2014/02/feds-v2v/|tag|http://www.semanlink.net/tag/vehicular_communication_systems +http://www.wired.com/autopia/2014/02/feds-v2v/|tag|http://www.semanlink.net/tag/automobile +http://www.wired.com/autopia/2014/02/feds-v2v/|title|Feds Will Require All New Vehicles to Talk to Each Other Autopia Wired.com +http://www.wired.com/autopia/2014/02/feds-v2v/|creationTime|2014-02-03T22:10:32Z +http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_|creationDate|2019-05-20 +http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_|tag|http://www.semanlink.net/tag/combining_numerical_and_text_features +http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_|title|How to combine categorical and continuous input features for neural network training - Data Science Stack Exchange +http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_|bookmarkOf|https://datascience.stackexchange.com/questions/29634/how-to-combine-categorical-and-continuous-input-features-for-neural-network-trai +http://www.semanlink.net/doc/2019/05/how_to_combine_categorical_and_|creationTime|2019-05-20T19:07:47Z +http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/|creationDate|2015-09-15 +http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/|tag|http://www.semanlink.net/tag/uber +http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/|tag|http://www.semanlink.net/tag/proletaire +http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/|title|Comment les « travailleurs 1099 » menacent l’Uber-économie Silicon 2.0 +http://siliconvalley.blog.lemonde.fr/2015/09/03/comment-les-travailleurs-1099-menacent-luber-economie/|creationTime|2015-09-15T13:29:12Z +https://js.tensorflow.org/|creationDate|2018-10-10 +https://js.tensorflow.org/|tag|http://www.semanlink.net/tag/tensorflow +https://js.tensorflow.org/|tag|http://www.semanlink.net/tag/javascript +https://js.tensorflow.org/|title|TensorFlow.js +https://js.tensorflow.org/|creationTime|2018-10-10T11:27:13Z +http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF|creationDate|2009-02-07 +http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF|tag|http://www.semanlink.net/tag/concise_bounded_description +http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF|title|Bounded Descriptions in RDF - n² wiki +http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF|creationTime|2009-02-07T23:13:02Z +http://wikileaks.org/Transcript-Meeting-Assange-Schmidt|creationDate|2013-04-28 +http://wikileaks.org/Transcript-Meeting-Assange-Schmidt|tag|http://www.semanlink.net/tag/julian_assange +http://wikileaks.org/Transcript-Meeting-Assange-Schmidt|tag|http://www.semanlink.net/tag/eric_schmidt +http://wikileaks.org/Transcript-Meeting-Assange-Schmidt|title|Transcript of secret meeting between Julian Assange and Google CEO Eric Schmidt +http://wikileaks.org/Transcript-Meeting-Assange-Schmidt|creationTime|2013-04-28T00:09:28Z +http://prefix.cc/|creationDate|2012-04-25 +http://prefix.cc/|tag|http://www.semanlink.net/tag/rdf_dev +http://prefix.cc/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://prefix.cc/|tag|http://www.semanlink.net/tag/rdf_tools +http://prefix.cc/|title|namespace lookup for RDF developers prefix.cc +http://prefix.cc/|creationTime|2012-04-25T11:48:10Z +https://openreview.net/forum?id=rJXMpikCZ|creationDate|2018-11-14 +https://openreview.net/forum?id=rJXMpikCZ|tag|http://www.semanlink.net/tag/yoshua_bengio +https://openreview.net/forum?id=rJXMpikCZ|tag|http://www.semanlink.net/tag/attention_in_graphs +https://openreview.net/forum?id=rJXMpikCZ|comment|A novel approach to processing graph-structured data by neural networks, leveraging **masked self-attentional layers over a node's neighborhood**. (-> different weights to different nodes in a neighborhood, without requiring any kind of computationally intensive matrix operation or depending on knowing the graph structure upfront). +https://openreview.net/forum?id=rJXMpikCZ|title|Graph Attention Networks (2018) +https://openreview.net/forum?id=rJXMpikCZ|creationTime|2018-11-14T02:10:45Z +http://www.picklematrix.net/archives/000979.html|creationDate|2005-09-23 +http://www.picklematrix.net/archives/000979.html|tag|http://www.semanlink.net/tag/rdf +http://www.picklematrix.net/archives/000979.html|tag|http://www.semanlink.net/tag/oracle +http://www.picklematrix.net/archives/000979.html|comment|Oracle 10g Support for RDF +http://www.picklematrix.net/archives/000979.html|title|SemErgence: Oracle 10g Support for RDF +http://thefigtrees.net/lee/sw/sparql-faq|creationDate|2006-10-09 +http://thefigtrees.net/lee/sw/sparql-faq|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://thefigtrees.net/lee/sw/sparql-faq|tag|http://www.semanlink.net/tag/sparql +http://thefigtrees.net/lee/sw/sparql-faq|tag|http://www.semanlink.net/tag/faq +http://thefigtrees.net/lee/sw/sparql-faq|title|SPARQL Protocol and Query Language: Frequently AskedQuestions +https://thegradient.pub/nlp-imagenet/|creationDate|2018-07-09 +https://thegradient.pub/nlp-imagenet/|tag|http://www.semanlink.net/tag/sebastian_ruder +https://thegradient.pub/nlp-imagenet/|tag|http://www.semanlink.net/tag/language_model +https://thegradient.pub/nlp-imagenet/|comment|"Pretrained word embeddings have a major limitation: they only incorporate previous knowledge in the first layer of the model---the rest of the network still needs to be trained from scratch + +> The long reign of word vectors as NLP’s core representation technique has seen an exciting new line of challengers emerge: ELMo, ULMFiT, and the OpenAI transformer. These works made headlines by demonstrating that pretrained language models can be used to achieve state-of-the-art results on a wide range of NLP tasks. + +> it only seems to be a question of time until pretrained word embeddings will be dethroned and replaced by pretrained language models in the toolbox of every NLP practitioner. This will likely open many new applications for NLP in settings with limited amounts of labeled data. +" +https://thegradient.pub/nlp-imagenet/|title|NLP's ImageNet moment has arrived +https://thegradient.pub/nlp-imagenet/|creationTime|2018-07-09T17:13:24Z +http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/|creationDate|2009-05-20 +http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/|tag|http://www.semanlink.net/tag/jsonp +http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/|comment|The browser security model dictates that XMLHttpRequest, frames, etc. must have the same domain in order to communicate. That's not a terrible idea, for security reasons, but it sure does make distributed web development suck. There are traditionally three solutions to solving this problem. Local proxy, Flash, Script tag... I'm proposing a new technology agnostic standard methodology for the script tag method for cross-domain data fetching: JSON with Padding, or simply JSONP. +http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/|title|from __future__ import * » Remote JSON - JSONP +http://bob.pythonmac.org/archives/2005/12/05/remote-json-jsonp/|creationTime|2009-05-20T23:08:05Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/15.pdf|creationDate|2006-12-23 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/15.pdf|tag|http://www.semanlink.net/tag/ontologies +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/15.pdf|tag|http://www.semanlink.net/tag/archeologie +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/15.pdf|title|A Framework for Ontological Description of Archaeological Scientific Publications +http://www.w3.org/2006/07/SWD/wiki/RDFa|creationDate|2007-10-06 +http://www.w3.org/2006/07/SWD/wiki/RDFa|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2006/07/SWD/wiki/RDFa|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/2006/07/SWD/wiki/RDFa|title|RDFa - W3C Semantic Web Deployment Wiki +http://www.w3.org/2006/07/SWD/wiki/RDFa|creationTime|2007-10-06T02:22:42Z +http://answers.semanticweb.com/search/?q=schema.org&Submit=search&t=question|creationDate|2013-07-07 +http://answers.semanticweb.com/search/?q=schema.org&Submit=search&t=question|tag|http://www.semanlink.net/tag/schema_org +http://answers.semanticweb.com/search/?q=schema.org&Submit=search&t=question|title|semanticweb.com: questions matching 'schema.org' +http://answers.semanticweb.com/search/?q=schema.org&Submit=search&t=question|creationTime|2013-07-07T17:16:40Z +http://e-claire.org/index.php?2005/04/17/25-outils-onlinegestionnaires-de-favorisbookmarks-social-bookmarking-applications|creationDate|2005-04-28 +http://e-claire.org/index.php?2005/04/17/25-outils-onlinegestionnaires-de-favorisbookmarks-social-bookmarking-applications|tag|http://www.semanlink.net/tag/social_bookmarking +http://e-claire.org/index.php?2005/04/17/25-outils-onlinegestionnaires-de-favorisbookmarks-social-bookmarking-applications|tag|http://www.semanlink.net/tag/tagging +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|creationDate|2019-05-31 +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|tag|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|tag|http://www.semanlink.net/tag/paludisme +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|title|GM fungus rapidly kills 99% of malaria mosquitoes, study suggests - BBC News +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|bookmarkOf|https://www.bbc.com/news/health-48464510 +http://www.semanlink.net/doc/2019/05/gm_fungus_rapidly_kills_99_of_|creationTime|2019-05-31T18:47:23Z +http://herschel.esac.esa.int/|creationDate|2009-06-19 +http://herschel.esac.esa.int/|tag|http://www.semanlink.net/tag/herschel_telescope +http://herschel.esac.esa.int/|title|Herschel Science Centre +http://herschel.esac.esa.int/|creationTime|2009-06-19T17:11:15Z +https://c2gweb.qa.heliosnissan.net/c2gweb/product/gen?embed=true|creationDate|2014-11-07 +https://c2gweb.qa.heliosnissan.net/c2gweb/product/gen?embed=true|tag|http://www.semanlink.net/tag/c2gweb +https://c2gweb.qa.heliosnissan.net/c2gweb/product/gen?embed=true|title|C2GWeb Helios +https://c2gweb.qa.heliosnissan.net/c2gweb/product/gen?embed=true|creationTime|2014-11-07T16:26:57Z +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|creationDate|2014-03-21 +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|tag|http://www.semanlink.net/tag/text_editor +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|tag|http://www.semanlink.net/tag/cms +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|tag|http://www.semanlink.net/tag/the_guardian +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|title|Inside the Guardian’s CMS: meet Scribe, an extensible rich text editor Info theguardian.com +http://www.theguardian.com/info/developer-blog/2014/mar/20/inside-the-guardians-cms-meet-scribe-an-extensible-rich-text-editor|creationTime|2014-03-21T12:05:08Z +https://www.owasp.org/index.php/REST_Security_Cheat_Sheet|creationDate|2015-11-16 +https://www.owasp.org/index.php/REST_Security_Cheat_Sheet|tag|http://www.semanlink.net/tag/rest_security +https://www.owasp.org/index.php/REST_Security_Cheat_Sheet|tag|http://www.semanlink.net/tag/cheat_sheet +https://www.owasp.org/index.php/REST_Security_Cheat_Sheet|title|REST Security Cheat Sheet - OWASP +https://www.owasp.org/index.php/REST_Security_Cheat_Sheet|creationTime|2015-11-16T12:00:06Z +http://www.psikopat.com/html/spirale.htm|creationDate|2008-10-28 +http://www.psikopat.com/html/spirale.htm|tag|http://www.semanlink.net/tag/illusion_d_optique +http://www.psikopat.com/html/spirale.htm|title|Le disk wahouuu +http://www.psikopat.com/html/spirale.htm|creationTime|2008-10-28T22:57:04Z +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|creationDate|2017-01-07 +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|tag|http://www.semanlink.net/tag/kassav +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|tag|http://www.semanlink.net/tag/j_y_etais +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|title|Kassav Zenith 89 - YouTube +https://www.youtube.com/watch?v=jk2rZTwesp4&index=1&list=RDjk2rZTwesp4|creationTime|2017-01-07T01:20:32Z +http://bitcoin.org/bitcoin.pdf|creationDate|2013-12-23 +http://bitcoin.org/bitcoin.pdf|tag|http://www.semanlink.net/tag/bitcoin +http://bitcoin.org/bitcoin.pdf|title|Bitcoin: A Peer-to-Peer Electronic Cash System ; Satoshi Nakamoto +http://bitcoin.org/bitcoin.pdf|creationTime|2013-12-23T11:57:41Z +http://container42.com/2014/11/18/data-only-container-madness/|creationDate|2016-04-14 +http://container42.com/2014/11/18/data-only-container-madness/|tag|http://www.semanlink.net/tag/docker_volumes +http://container42.com/2014/11/18/data-only-container-madness/|title|Data-only container madness · Container42 +http://container42.com/2014/11/18/data-only-container-madness/|creationTime|2016-04-14T17:31:34Z +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|creationDate|2012-06-27 +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|tag|http://www.semanlink.net/tag/faq +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|tag|http://www.semanlink.net/tag/svn +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|tag|http://www.semanlink.net/tag/eclipse +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|title|FAQ Eclipse / subclipse install +http://eclipse.developpez.com/faq/?page=gestionDeVersion#installerSubclipseClient|creationTime|2012-06-27T16:46:37Z +http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html|creationDate|2009-02-25 +http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html|tag|http://www.semanlink.net/tag/wtp +http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html|tag|http://www.semanlink.net/tag/eclipse +http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html|title|[news.eclipse.webtools] sharing resources between dynamic web projects +http://dev.eclipse.org/newslists/news.eclipse.webtools/msg18066.html|creationTime|2009-02-25T00:03:09Z +http://rollerweblogger.org/wiki/Wiki.jsp?page=InstallationGuide12|creationDate|2006-09-12 +http://rollerweblogger.org/wiki/Wiki.jsp?page=InstallationGuide12|comment|"By default, Roller saves uploaded files under the directory ${user.home}/roller_data/uploads . Here ${user.home} is the Java system property that normally evaluates to the home directory of the user identity executing the server's JVM process. + +In most cases, this default will probably work fine for you. However, for security reasons, some Tomcat installations (and other containers as well) are set up to run as a server user identity whose home directory does not exist or is not writable by the server user itself. If this is the case for your site, override the property uploads.dir in roller.properties (see ConfigurationGuide for details)." +http://rollerweblogger.org/wiki/Wiki.jsp?page=InstallationGuide12|title|RollerWiki: InstallationGuide12 +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|creationDate|2012-10-26 +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|tag|http://www.semanlink.net/tag/pollution +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|tag|http://www.semanlink.net/tag/sante +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|tag|http://www.semanlink.net/tag/paludisme +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|title|La pollution industrielle fait autant de ravages sur la santé que le paludisme +http://www.lemonde.fr/sante/article/2012/10/25/la-pollution-fait-autant-de-ravages-sur-la-sante-que-le-paludisme_1780288_1651302.html|creationTime|2012-10-26T01:09:07Z +http://www.aims.ac.za/|creationDate|2015-04-26 +http://www.aims.ac.za/|tag|http://www.semanlink.net/tag/new_africa +http://www.aims.ac.za/|tag|http://www.semanlink.net/tag/mathematiques +http://www.aims.ac.za/|title|African Institute for Mathematical Sciences AIMS +http://www.aims.ac.za/|creationTime|2015-04-26T13:26:08Z +https://www.theguardian.com/commentisfree/2017/jan/12/universal-basic-income-finland-uk|creationDate|2017-03-11 +https://www.theguardian.com/commentisfree/2017/jan/12/universal-basic-income-finland-uk|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://www.theguardian.com/commentisfree/2017/jan/12/universal-basic-income-finland-uk|title|Universal basic income is becoming an urgent necessity Guy Standing Opinion The Guardian +https://www.theguardian.com/commentisfree/2017/jan/12/universal-basic-income-finland-uk|creationTime|2017-03-11T00:42:57Z +http://www.inhotim.org.br/|creationDate|2014-08-02 +http://www.inhotim.org.br/|tag|http://www.semanlink.net/tag/belo_horizonte +http://www.inhotim.org.br/|title|Inhotim +http://www.inhotim.org.br/|creationTime|2014-08-02T12:16:02Z +http://forum.macbidouille.com/lofiversion/index.php/t137183.html|creationDate|2006-09-16 +http://forum.macbidouille.com/lofiversion/index.php/t137183.html|tag|http://www.semanlink.net/tag/airport +http://forum.macbidouille.com/lofiversion/index.php/t137183.html|tag|http://www.semanlink.net/tag/noos +http://forum.macbidouille.com/lofiversion/index.php/t137183.html|comment|"noos est un FAI assez mesquin : si tu lis les conditions générales de ventes de cet opérateur, tu verras que tu n'as droit qu'à un seul ordinateur connecté à moins de leur louer un modem wifi à 5 euros par moi. Et donc, les modems fournis par noos ne fonctionnent qu'avec une seul adresse MAC et pas n'importe laquelle. Dès que tu changes l'ordi connecté au modem noos, il faut que tu reset le modem pour que celui-ci s'initialise avec l'adresse MAC du nouvel ordi. Mais, et c'est là que noos est mesquin, ce n'est pas vrai avec les routeurs. L'astuce utilisée par beaucoup est de cloner l'adresse MAC de leur ordi sur leur routeur (beaucoup de routeurs le permettent mais pas l'airport express) +" +http://forum.macbidouille.com/lofiversion/index.php/t137183.html|title|Forums MacBidouille > Noos avec airport express +http://finance.blog.lemonde.fr/2012/02/12/la-banque-centrale-europeenne-risque-t-elle-dexploser/|creationDate|2012-02-13 +http://finance.blog.lemonde.fr/2012/02/12/la-banque-centrale-europeenne-risque-t-elle-dexploser/|tag|http://www.semanlink.net/tag/banque_centrale_europeenne +http://finance.blog.lemonde.fr/2012/02/12/la-banque-centrale-europeenne-risque-t-elle-dexploser/|title|La Banque Centrale Européenne risque-t-elle d’exploser en prêtant aux banques? Démystifier la finance +http://finance.blog.lemonde.fr/2012/02/12/la-banque-centrale-europeenne-risque-t-elle-dexploser/|creationTime|2012-02-13T09:11:19Z +http://www.honeynet.org/papers/webapp/index.html|creationDate|2007-03-16 +http://www.honeynet.org/papers/webapp/index.html|tag|http://www.semanlink.net/tag/web_application_threats +http://www.honeynet.org/papers/webapp/index.html|title|Know your Enemy: Web Application Threats +http://www.honeynet.org/papers/webapp/index.html|creationTime|2007-03-16T18:28:14Z +http://www.twine.com/|creationDate|2007-11-08 +http://www.twine.com/|tag|http://www.semanlink.net/tag/twine +http://www.twine.com/|comment|A revolutionary new way to share, organize, and find information. Use Twine to better leverage and contribute to the collective intelligence of your friends, colleagues, groups and teams. Twine ties it all together. +http://www.twine.com/|title|Twine +http://www.twine.com/|creationTime|2007-11-08T23:03:02Z +http://www.eetimes.com/news/semi/showArticle.jhtml?articleID=180201688|creationDate|2006-02-17 +http://www.eetimes.com/news/semi/showArticle.jhtml?articleID=180201688|tag|http://www.semanlink.net/tag/rfid +http://www.eetimes.com/news/semi/showArticle.jhtml?articleID=180201688|title|EETimes.com - Cellphone could crack RFID tags, says cryptographer +http://fonnesbeck.github.io/ScipySuperpack/|creationDate|2014-07-16 +http://fonnesbeck.github.io/ScipySuperpack/|tag|http://www.semanlink.net/tag/github_project +http://fonnesbeck.github.io/ScipySuperpack/|tag|http://www.semanlink.net/tag/python +http://fonnesbeck.github.io/ScipySuperpack/|comment|A script for building the Python scientific stack on OS X +http://fonnesbeck.github.io/ScipySuperpack/|title|fonnesbeck/ScipySuperpack @ GitHub +http://fonnesbeck.github.io/ScipySuperpack/|creationTime|2014-07-16T17:20:14Z +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|creationDate|2018-03-16 +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|tag|http://www.semanlink.net/tag/mixture_distribution +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|tag|http://www.semanlink.net/tag/k_means_clustering +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|comment|"The non-probabilistic nature of [k-means](/tag/k_means_clustering) and its use of simple distance-from-cluster-center to assign cluster membership leads to poor performance for many real-world situations. We take a look at Gaussian mixture models (GMMs), which can be viewed as an extension of the ideas behind k-means, but can also be a powerful tool for estimation beyond simple clustering. + +A Gaussian mixture model (GMM) attempts to find a mixture of multi-dimensional Gaussian probability distributions that best model any input dataset. In the simplest case, GMMs can be used for finding clusters in the same manner as k-means. + +But because GMM contains a probabilistic model under the hood, it is also possible to find probabilistic cluster assignments (in Scikit-Learn, using predict_proba) + +> Though GMM is often categorized as a clustering algorithm, fundamentally it is an algorithm for density estimation. That is to say, the result of a GMM fit to some data is technically not a clustering model, but a generative probabilistic model describing the distribution of the data. + +-> a natural means of determining the optimal number of components for a given dataset +" +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|title|In Depth: Gaussian Mixture Models Python Data Science Handbook +https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html|creationTime|2018-03-16T16:45:54Z +http://spec.commonmark.org/|creationDate|2017-01-05 +http://spec.commonmark.org/|tag|http://www.semanlink.net/tag/markdown +http://spec.commonmark.org/|tag|http://www.semanlink.net/tag/spec +http://spec.commonmark.org/|title|CommonMark Spec +http://spec.commonmark.org/|creationTime|2017-01-05T01:05:49Z +http://www.w3.org/2007/08/grddl/|creationDate|2007-12-09 +http://www.w3.org/2007/08/grddl/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2007/08/grddl/|tag|http://www.semanlink.net/tag/grddl +http://www.w3.org/2007/08/grddl/|title|W3C GRDDL service +http://www.w3.org/2007/08/grddl/|creationTime|2007-12-09T22:27:30Z +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html|creationDate|2014-04-24 +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html|tag|http://www.semanlink.net/tag/maxent_models +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html|comment|Collection of links, papers, software... +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html|title|Maximum Entropy Modeling +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html|creationTime|2014-04-24T01:05:58Z +http://www.jpl.nasa.gov/news/news.php?release=2012-381|creationDate|2012-12-09 +http://www.jpl.nasa.gov/news/news.php?release=2012-381|tag|http://www.semanlink.net/tag/voyager +http://www.jpl.nasa.gov/news/news.php?release=2012-381|title|NASA Voyager 1 Encounters New Region in Deep Space - NASA Jet Propulsion Laboratory +http://www.jpl.nasa.gov/news/news.php?release=2012-381|creationTime|2012-12-09T13:09:29Z +http://code.google.com/webtoolkit/|creationDate|2006-05-17 +http://code.google.com/webtoolkit/|tag|http://www.semanlink.net/tag/java +http://code.google.com/webtoolkit/|tag|http://www.semanlink.net/tag/ajax +http://code.google.com/webtoolkit/|tag|http://www.semanlink.net/tag/google +http://code.google.com/webtoolkit/|title|Google Web Toolkit - Build AJAX apps in the Java language +http://scikit-learn.org/stable/modules/feature_extraction.html|creationDate|2015-10-21 +http://scikit-learn.org/stable/modules/feature_extraction.html|tag|http://www.semanlink.net/tag/feature_extraction +http://scikit-learn.org/stable/modules/feature_extraction.html|tag|http://www.semanlink.net/tag/feature_hashing +http://scikit-learn.org/stable/modules/feature_extraction.html|title|Feature extraction — scikit-learn documentation +http://scikit-learn.org/stable/modules/feature_extraction.html|creationTime|2015-10-21T17:12:20Z +http://liza.io/ijcai-session-notes-learning-common-sense/|creationDate|2018-07-23 +http://liza.io/ijcai-session-notes-learning-common-sense/|tag|http://www.semanlink.net/tag/ijcai +http://liza.io/ijcai-session-notes-learning-common-sense/|tag|http://www.semanlink.net/tag/common_sense +http://liza.io/ijcai-session-notes-learning-common-sense/|title|IJCAI Session Notes: Learning Common Sense · Liza +http://liza.io/ijcai-session-notes-learning-common-sense/|creationTime|2018-07-23T12:52:24Z +http://norman.walsh.name/threads/webservices|creationDate|2005-10-13 +http://norman.walsh.name/threads/webservices|tag|http://www.semanlink.net/tag/norman_walsh +http://norman.walsh.name/threads/webservices|tag|http://www.semanlink.net/tag/web_services +http://norman.walsh.name/threads/webservices|comment|"If I'm ever going to understand Web Services, I'm going to have +to build one." +http://norman.walsh.name/threads/webservices|title|Norman Walsh - Thread: Web Services +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|creationDate|2013-09-04 +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|tag|http://www.semanlink.net/tag/disparition_d_especes +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|tag|http://www.semanlink.net/tag/especes_menacees +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|tag|http://www.semanlink.net/tag/champignon +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|tag|http://www.semanlink.net/tag/amphibiens +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|tag|http://www.semanlink.net/tag/epidemie +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|title|Un nouveau champignon-tueur s’attaque aux amphibiens Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/09/04/un-nouveau-champignon-tueur-attaque-amphibiens-biodiversite/|creationTime|2013-09-04T21:26:04Z +http://www.ldodds.com/blog/archives/000289.html|creationDate|2006-06-08 +http://www.ldodds.com/blog/archives/000289.html|tag|http://www.semanlink.net/tag/google +http://www.ldodds.com/blog/archives/000289.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000289.html|tag|http://www.semanlink.net/tag/sparql +http://www.ldodds.com/blog/archives/000289.html|comment|"Last night I took my first look at Google Co-op, in particular the ""Subscribed Links"" feature which allows users to add services to Google search results." +http://www.ldodds.com/blog/archives/000289.html|title|Lost Boy: Feeding Google Co-Op with SPARQL +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|creationDate|2011-11-09 +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|tag|http://www.semanlink.net/tag/economie_ecologique +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|tag|http://www.semanlink.net/tag/taxe_carbone +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|tag|http://www.semanlink.net/tag/gouvernement_sarkozy +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|title|Réduire nos émissions de 30 % pour favoriser l’emploi, le rapport qui dérange Eco(lo) +http://ecologie.blog.lemonde.fr/2011/11/08/reduire-nos-emissions-de-30-et-favoriser-l%E2%80%99emploi-le-rapport-qui-derange/|creationTime|2011-11-09T00:54:05Z +http://machinelearningmastery.com/useful-things-to-know-about-machine-learning/|creationDate|2015-12-22 +http://machinelearningmastery.com/useful-things-to-know-about-machine-learning/|tag|http://www.semanlink.net/tag/machine_learning +http://machinelearningmastery.com/useful-things-to-know-about-machine-learning/|title|Useful Things To Know About Machine Learning - Machine Learning Mastery +http://machinelearningmastery.com/useful-things-to-know-about-machine-learning/|creationTime|2015-12-22T19:34:04Z +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|creationDate|2018-10-13 +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|tag|http://www.semanlink.net/tag/trump +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|tag|http://www.semanlink.net/tag/documentaire_tv +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|title|Comment Trump a manipulé l'Amérique ARTE +https://www.arte.tv/fr/videos/082806-000-A/comment-trump-a-manipule-l-amerique/|creationTime|2018-10-13T10:46:21Z +https://www.mnot.net/cache_docs/|creationDate|2015-05-14 +https://www.mnot.net/cache_docs/|tag|http://www.semanlink.net/tag/http_cache +https://www.mnot.net/cache_docs/|title|Un tutoriel de la mise en cache +https://www.mnot.net/cache_docs/|creationTime|2015-05-14T15:49:59Z +http://www.w3.org/2001/sw/sweo/|creationDate|2006-10-09 +http://www.w3.org/2001/sw/sweo/|tag|http://www.semanlink.net/tag/semantic_web +http://www.w3.org/2001/sw/sweo/|tag|http://www.semanlink.net/tag/education +http://www.w3.org/2001/sw/sweo/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2001/sw/sweo/|comment|W3C is pleased to announce the launch of the Semantic Web Education and Outreach Interest Group, chaired by Susie Stephens (Oracle). The group is is chartered to collect proof-of-concept business cases, demonstration prototypes, etc, based on successful implementations of Semantic Web technologies, collect user experiences, develop and facilitate community outreach strategies, training and educational resources. +http://www.w3.org/2001/sw/sweo/|title|Semantic Web Education and Outreach (SWEO) Interest Group +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|creationDate|2017-05-18 +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|tag|http://www.semanlink.net/tag/using_word_embedding +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|tag|http://www.semanlink.net/tag/text_similarity +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|tag|http://www.semanlink.net/tag/word_embedding +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|tag|http://www.semanlink.net/tag/nlp_short_texts +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|comment|"We investigate whether determining short text similarity is possible +using only semantic features. + +A novel feature of our +approach is that an arbitrary number of word embedding sets can be +incorporated." +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|title|Short Text Similarity with Word Embeddings +https://staff.fnwi.uva.nl/m.derijke/wp-content/papercite-data/pdf/kenter-short-2015.pdf|creationTime|2017-05-18T01:58:44Z +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|creationDate|2016-07-03 +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|tag|http://www.semanlink.net/tag/disruption +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|tag|http://www.semanlink.net/tag/innovation +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|title|Bernard Stiegler: «L’accélération de l’innovation court-circuite tout ce qui contribue à l’élaboration de la civilisation» - Libération +http://www.liberation.fr/debats/2016/07/01/bernard-stiegler-l-acceleration-de-l-innovation-court-circuite-tout-ce-qui-contribue-a-l-elaboration_1463430|creationTime|2016-07-03T01:54:35Z +https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true|creationDate|2018-09-08 +https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true|tag|http://www.semanlink.net/tag/ulmfit +https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true|tag|http://www.semanlink.net/tag/elmo +https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true|title|AI Can Recognize Images, But Text Has Been Tricky—Until Now WIRED +https://www.wired.com/story/ai-can-recognize-images-but-understand-headline/amp?__twitter_impression=true|creationTime|2018-09-08T00:19:53Z +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|creationDate|2016-07-17 +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|tag|http://www.semanlink.net/tag/lynn_margulis +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|tag|http://www.semanlink.net/tag/sexe +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|tag|http://www.semanlink.net/tag/mitochondries +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|comment|Eukaryotes are bound by two features—mitochondria and sex—and we believe there is a neglected link here +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|title|Sex Seems Like a Waste—So Why Do So Many Creatures Need It to Reproduce? +http://nautil.us/issue/34/adaptation/sex-is-a-coping-mechanism|creationTime|2016-07-17T10:39:42Z +https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang|creationDate|2017-10-03 +https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang|tag|http://www.semanlink.net/tag/deep_learning +https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang|comment|Eric Jang, Research Engineer at Google Brain +https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang|title|What product breakthroughs will recent advances in deep learning enable? - Quora +https://www.quora.com/What-product-breakthroughs-will-recent-advances-in-deep-learning-enable/answer/Eric-Jang|creationTime|2017-10-03T13:58:50Z +http://bayosphere.com/why-drupal|creationDate|2005-05-17 +http://bayosphere.com/why-drupal|title|Why Drupal? Bayosphere +http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf|creationDate|2014-04-08 +http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf|comment|"> The notion of ’tip-off’ words (words that are highly indicative of the article belonging to a particular topic) suggested to us that fairly robust multi-label classification should be achievable with only a limited set of high-information words, and moreover, without access to any explicit priors on class labels + +> On the whole our research validated the common +approach of using binary-classifiers to learn multi-label +topic classifications for new articles. The tfidf approach +captures some interesting aspects of the intuition behind +how people may classify news articles, but we were +not able to lower the error produced by the tfidf model +sufficiently to make it practically competitive with the +binary classification scheme" +http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf|title|Learning Multilabel classification of news articles (2013) +http://cs229.stanford.edu/proj2013/ChaseGenainKarniolTambour-LearningMulti-LabelTopicClassificationofNewsArticles.pdf|creationTime|2014-04-08T17:20:45Z +https://www.youtube.com/watch?v=L3TcSwwQL_g|creationDate|2016-03-26 +https://www.youtube.com/watch?v=L3TcSwwQL_g|tag|http://www.semanlink.net/tag/souvenirs +https://www.youtube.com/watch?v=L3TcSwwQL_g|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=L3TcSwwQL_g|tag|http://www.semanlink.net/tag/musique_du_niger +https://www.youtube.com/watch?v=L3TcSwwQL_g|title|Djamila +https://www.youtube.com/watch?v=L3TcSwwQL_g|creationTime|2016-03-26T13:21:31Z +https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc|creationDate|2016-05-07 +https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc|tag|http://www.semanlink.net/tag/japonais +https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc|tag|http://www.semanlink.net/tag/apprendre_une_langue +https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc|title|How to Really Learn a Language (Maybe) — Medium +https://medium.com/@chacon/how-to-really-learn-a-language-maybe-c1b07223278c#.fkbf2sjxc|creationTime|2016-05-07T00:14:51Z +http://trice.semsol.org/|creationDate|2008-09-09 +http://trice.semsol.org/|tag|http://www.semanlink.net/tag/rdf +http://trice.semsol.org/|tag|http://www.semanlink.net/tag/php +http://trice.semsol.org/|tag|http://www.semanlink.net/tag/web_dev_framework +http://trice.semsol.org/|comment|Trice is Web development framework that uses RDF technology to increase productivity and flexibility for everyday Web programming. +http://trice.semsol.org/|title|Trice +http://trice.semsol.org/|creationTime|2008-09-09T14:50:11Z +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|creationDate|2014-01-11 +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|tag|http://www.semanlink.net/tag/civilisation_de_l_indus +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|tag|http://www.semanlink.net/tag/ted +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|tag|http://www.semanlink.net/tag/coursera_computational_neuroscience +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|title|Rajesh Rao: Une Pierre de Rosette pour l'écriture de l'Indus Video on TED.com +http://www.ted.com/talks/rajesh_rao_computing_a_rosetta_stone_for_the_indus_script.html|creationTime|2014-01-11T11:06:05Z +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|creationDate|2017-12-06 +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|tag|http://www.semanlink.net/tag/features_machine_learning +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|tag|http://www.semanlink.net/tag/using_word_embedding +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|tag|http://www.semanlink.net/tag/nlp_text_classification +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|comment|l2-normalize the dense vectors. +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|title|machine learning - Text categorization: combining different kind of features - Data Science Stack Exchange +https://datascience.stackexchange.com/questions/987/text-categorization-combining-different-kind-of-features|creationTime|2017-12-06T16:51:37Z +http://lists.w3.org/Archives/Public/semantic-web/2010Jun/0077.html|creationDate|2010-06-16 +http://lists.w3.org/Archives/Public/semantic-web/2010Jun/0077.html|tag|http://www.semanlink.net/tag/rdf_graph_versioning +http://lists.w3.org/Archives/Public/semantic-web/2010Jun/0077.html|title|Diff/versioning as metadata for dynamic graphs? +http://lists.w3.org/Archives/Public/semantic-web/2010Jun/0077.html|creationTime|2010-06-16T08:40:25Z +http://www.websemanticsjournal.org/ps/pub/2005-15|creationDate|2007-02-06 +http://www.websemanticsjournal.org/ps/pub/2005-15|tag|http://www.semanlink.net/tag/entailment +http://www.websemanticsjournal.org/ps/pub/2005-15|tag|http://www.semanlink.net/tag/owl +http://www.websemanticsjournal.org/ps/pub/2005-15|tag|http://www.semanlink.net/tag/computational_complexity +http://www.websemanticsjournal.org/ps/pub/2005-15|tag|http://www.semanlink.net/tag/rdf_schema +http://www.websemanticsjournal.org/ps/pub/2005-15|comment|We prove that entailment for RDFS (RDF Schema) is decidable, NP-complete, and in P if the target graph does not contain blank nodes.We show that the standard set of entailment rules for RDFS is incomplete and that this can be corrected by allowing blank nodes in predicate position. We define semantic extensions of RDFS that involve datatypes and a subset of the OWL vocabulary that includes the property-related vocabulary (e.g. Functional- Property), the comparisons (e.g. sameAs and differentFrom) and the value restrictions (e.g. allValuesFrom). These semantic extensions are in line with the 'if-semantics' of RDFS and weaker than the 'iff-semantics' of D-entailment and OWL (DL or Full). For these semantic extensions we present entailment rules, prove completeness results, prove that consistency is in P and that, just as for RDFS, entailment is NP-complete, and in P if the target graph does not contain blank nodes. There are no restrictions on use to obtain decidability: classes can be used as instances. +http://www.websemanticsjournal.org/ps/pub/2005-15|title|Horst, Herman J. ter: Completeness, decidability and complexity of entailment for RDF Schema and a semantic extension involving the OWL vocabulary +http://www.websemanticsjournal.org/ps/pub/2005-15|creationTime|2007-02-06T23:27:39Z +http://applidium.com/en/news/cracking_siri/|creationDate|2011-11-15 +http://applidium.com/en/news/cracking_siri/|tag|http://www.semanlink.net/tag/hack +http://applidium.com/en/news/cracking_siri/|tag|http://www.semanlink.net/tag/siri +http://applidium.com/en/news/cracking_siri/|title|Applidium — Cracking Siri +http://applidium.com/en/news/cracking_siri/|creationTime|2011-11-15T21:19:15Z +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|creationDate|2011-06-07 +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|tag|http://www.semanlink.net/tag/microdata +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|tag|http://www.semanlink.net/tag/schema_org +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|title|Schema.org - Threat or Opportunity? - benjamin nowack's blog +http://bnode.org/blog/2011/06/06/schema-org-threat-or-opportunity|creationTime|2011-06-07T14:03:30Z +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|creationDate|2018-11-06 +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|tag|http://www.semanlink.net/tag/good +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|tag|http://www.semanlink.net/tag/deep_learning +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|tag|http://www.semanlink.net/tag/yoshua_bengio +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|title|Learning Deep Architectures for AI By Yoshua Bengio (2009) +https://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf|creationTime|2018-11-06T10:29:46Z +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|creationDate|2010-08-12 +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|tag|http://www.semanlink.net/tag/n3 +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|tag|http://www.semanlink.net/tag/csv +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|tag|http://www.semanlink.net/tag/tips +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|title|Converting CSV to RDF - bobdc.blog +http://www.snee.com/bobdc.blog/2010/08/converting-csv-to-rdf.html|creationTime|2010-08-12T16:05:24Z +http://www.abc.net.au/tv/fora/stories/2009/04/24/2552097-p.htm|creationDate|2009-07-20 +http://www.abc.net.au/tv/fora/stories/2009/04/24/2552097-p.htm|tag|http://www.semanlink.net/tag/rosetta_project +http://www.abc.net.au/tv/fora/stories/2009/04/24/2552097-p.htm|title|Daniel Everett: Endangered Languages and Lost Knowledge +http://www.abc.net.au/tv/fora/stories/2009/04/24/2552097-p.htm|creationTime|2009-07-20T18:54:30Z +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|creationDate|2016-09-17 +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|tag|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|tag|http://www.semanlink.net/tag/ibm +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|tag|http://www.semanlink.net/tag/semantic_enterprise_architecture +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|title|What IBM, the Semantic Web Company, and Siemens are doing with semantic technologies ZDNet +http://www.zdnet.com/article/a-little-semantics-goes-a-long-way/|creationTime|2016-09-17T15:06:38Z +https://cmusphinx.github.io/wiki/phonemerecognition/|creationDate|2019-04-16 +https://cmusphinx.github.io/wiki/phonemerecognition/|tag|http://www.semanlink.net/tag/speech_recognition +https://cmusphinx.github.io/wiki/phonemerecognition/|tag|http://www.semanlink.net/tag/acronyms_nlp +https://cmusphinx.github.io/wiki/phonemerecognition/|comment|Frequently, people want to use Sphinx to do phoneme recognition. In other words, they would like to convert speech to a stream of phonemes rather than words. This is possible, although the results can be disappointing. The reason is that automatic speech recognition relies heavily on contextual constraints (i.e. language modeling) to guide the search algorithm. +https://cmusphinx.github.io/wiki/phonemerecognition/|title|Phoneme Recognition (caveat emptor) – CMUSphinx Open Source Speech Recognition +https://cmusphinx.github.io/wiki/phonemerecognition/|creationTime|2019-04-16T23:14:33Z +https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web|creationDate|2016-02-20 +https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web|tag|http://www.semanlink.net/tag/yann_lecun +https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web|tag|http://www.semanlink.net/tag/college_de_france +https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web|title|Yann LeCun, Collège de France +https://drive.google.com/folderview?id=0BxKBnD5y2M8NclFWSXNxa0JlZTg&usp=drive_web|creationTime|2016-02-20T14:38:31Z +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|creationDate|2007-09-19 +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|tag|http://www.semanlink.net/tag/tim_berners_lee +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|tag|http://www.semanlink.net/tag/richard_cyganiak +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|tag|http://www.semanlink.net/tag/lod_mailing_list +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|tag|http://www.semanlink.net/tag/lod_limitations_on_browseable_data +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|comment|"The idea is to formalize the notion “If you're interested in values of a certain +property, go look over there.” +" +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|title|[Linking-open-data] Returning to backlinks +http://simile.mit.edu/mail/ReadMsg?listId=14&msgId=20926|creationTime|2007-09-19T14:06:51Z +https://www.oreilly.com/ideas/open-endedness-the-last-grand-challenge-youve-never-heard-of|creationDate|2018-03-30 +https://www.oreilly.com/ideas/open-endedness-the-last-grand-challenge-youve-never-heard-of|tag|http://www.semanlink.net/tag/open_endedness +https://www.oreilly.com/ideas/open-endedness-the-last-grand-challenge-youve-never-heard-of|title|Open-endedness: The last grand challenge you’ve never heard of - O'Reilly Media +https://www.oreilly.com/ideas/open-endedness-the-last-grand-challenge-youve-never-heard-of|creationTime|2018-03-30T13:55:20Z +http://www.economist.com/news/briefing/21677228-technology-behind-bitcoin-lets-people-who-do-not-know-or-trust-each-other-build-dependable?fsrc=scn/tw/te/pe/ed/blockchains|creationDate|2015-10-31 +http://www.economist.com/news/briefing/21677228-technology-behind-bitcoin-lets-people-who-do-not-know-or-trust-each-other-build-dependable?fsrc=scn/tw/te/pe/ed/blockchains|tag|http://www.semanlink.net/tag/blockchain +http://www.economist.com/news/briefing/21677228-technology-behind-bitcoin-lets-people-who-do-not-know-or-trust-each-other-build-dependable?fsrc=scn/tw/te/pe/ed/blockchains|title|The great chain of being sure about things The Economist +http://www.economist.com/news/briefing/21677228-technology-behind-bitcoin-lets-people-who-do-not-know-or-trust-each-other-build-dependable?fsrc=scn/tw/te/pe/ed/blockchains|creationTime|2015-10-31T23:53:56Z +http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html|creationDate|2012-11-15 +http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html|tag|http://www.semanlink.net/tag/goodrelations +http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html|tag|http://www.semanlink.net/tag/fps_post +http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html|title|The goodrelations November 2012 Archive by thread +http://ebusiness-unibw.org/pipermail/goodrelations/2012-November/thread.html|creationTime|2012-11-15T14:56:18Z +http://neuro.imm.dtu.dk/wiki/ESWC_2012|creationDate|2012-05-30 +http://neuro.imm.dtu.dk/wiki/ESWC_2012|tag|http://www.semanlink.net/tag/eswc_2012 +http://neuro.imm.dtu.dk/wiki/ESWC_2012|title|9th Extended Semantic Web Conference - Brede Wiki +http://neuro.imm.dtu.dk/wiki/ESWC_2012|creationTime|2012-05-30T14:59:37Z +http://michaeldoig.net/4/installing-mamp-and-wordpress.htm|creationDate|2007-07-07 +http://michaeldoig.net/4/installing-mamp-and-wordpress.htm|tag|http://www.semanlink.net/tag/installing_wordpress +http://michaeldoig.net/4/installing-mamp-and-wordpress.htm|title|Installing Wordpress Locally Using MAMP — Michael Doig +http://michaeldoig.net/4/installing-mamp-and-wordpress.htm|creationTime|2007-07-07T15:38:13Z +https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2015-12-21 +https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/tutorial +https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/tensorflow +https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Simple end-to-end TensorFlow examples Bcomposes +https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow:-examples/?utm_content=buffer46554&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2015-12-21T19:05:46Z +https://www.quora.com/When-should-we-not-use-Elasticsearch|creationDate|2017-06-18 +https://www.quora.com/When-should-we-not-use-Elasticsearch|tag|http://www.semanlink.net/tag/elasticsearch +https://www.quora.com/When-should-we-not-use-Elasticsearch|title|When should we not use Elasticsearch? - Quora +https://www.quora.com/When-should-we-not-use-Elasticsearch|creationTime|2017-06-18T13:35:15Z +http://n2.talis.com/wiki/SPARQL_intro|creationDate|2008-03-04 +http://n2.talis.com/wiki/SPARQL_intro|tag|http://www.semanlink.net/tag/sparql_tutorial +http://n2.talis.com/wiki/SPARQL_intro|tag|http://www.semanlink.net/tag/talis +http://n2.talis.com/wiki/SPARQL_intro|title|SPARQL intro - N2 wiki +http://n2.talis.com/wiki/SPARQL_intro|creationTime|2008-03-04T23:00:39Z +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html?page=2|creationDate|2009-05-20 +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html?page=2|tag|http://www.semanlink.net/tag/script_tag_hack +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html?page=2|title|XML.com: Fixing AJAX: XMLHttpRequest Considered Harmful +http://www.xml.com/pub/a/2005/11/09/fixing-ajax-xmlhttprequest-considered-harmful.html?page=2|creationTime|2009-05-20T23:41:13Z +http://pisani.blog.lemonde.fr/pisani/2005/05/mind_manager_vi.html|creationDate|2005-05-16 +http://pisani.blog.lemonde.fr/pisani/2005/05/mind_manager_vi.html|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/pisani/2005/05/mind_manager_vi.html|title|Transnets : Mind Manager : Visualiser la complexité, penser autrement +https://www.monumentaltrees.com/fr/|creationDate|2019-04-09 +https://www.monumentaltrees.com/fr/|tag|http://www.semanlink.net/tag/arbres_remarquables +https://www.monumentaltrees.com/fr/|title|Arbres monumentaux · un inventaire des arbres gros et anciens +https://www.monumentaltrees.com/fr/|creationTime|2019-04-09T19:49:56Z +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|creationDate|2019-05-27 +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|comment|"- ""facts only"" based embeddings + - Translational Distance Models, distance-based scoring functions + - TransE and co: entities and relations as points in vector space + - TransH: relation-specific hyperplanes. + - Gaussian Embeddings: entities and relations as random variables + - Semantic matching models, similarity-based scoring functions + - RESCAL (entities as a vector, relation as a matrix) and familiy + - DistMult (relation as diagonal matrix) + - Holographic Embeddings (HolE) + - etc. + - Matching with Neural Networks + - SME, etc +- incorporating other info: entity types, relation paths, +textual descriptions, as well as logical rules + - entity types + - semantically smooth embedding +(SSE): entities of the same type to stay close +to each other in the embedding space + - type-embodied knowledge representation +learning (TKRL), which can handle hierarchical +entity categories and multiple category labels +- Applications in downstream tasks" +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|title|Knowledge Graph Embedding: A Survey of Approaches and Applications - IEEE (2017) +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|bookmarkOf|https://ieeexplore.ieee.org/document/8047276 +http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su|creationTime|2019-05-27T18:42:06Z +http://ikewiki.salzburgresearch.at/|creationDate|2008-04-25 +http://ikewiki.salzburgresearch.at/|tag|http://www.semanlink.net/tag/semantic_wiki +http://ikewiki.salzburgresearch.at/|tag|http://www.semanlink.net/tag/kiwi_project +http://ikewiki.salzburgresearch.at/|title|IkeWiki +http://ikewiki.salzburgresearch.at/|creationTime|2008-04-25T14:53:46Z +http://www.wired.com/2014/06/the-future-of-biotech-crops/|creationDate|2014-06-24 +http://www.wired.com/2014/06/the-future-of-biotech-crops/|tag|http://www.semanlink.net/tag/ogm +http://www.wired.com/2014/06/the-future-of-biotech-crops/|title|The Next Generation of GM Crops Has Arrived—And So Has the Controversy Science WIRED +http://www.wired.com/2014/06/the-future-of-biotech-crops/|creationTime|2014-06-24T23:25:18Z +http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html|creationDate|2013-04-11 +http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html|tag|http://www.semanlink.net/tag/usa +http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html|tag|http://www.semanlink.net/tag/censure_et_maltraitance_animale +http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html|title|Filmer la cruauté envers les animaux devient un crime aux Etats-Unis +http://www.lemonde.fr/planete/article/2013/04/11/filmer-la-cruaute-envers-les-animaux-devient-un-crime-aux-etats-unis_3157361_3244.html|creationTime|2013-04-11T21:40:32Z +http://www.sitemaps.org/protocol.html|creationDate|2013-10-21 +http://www.sitemaps.org/protocol.html|tag|http://www.semanlink.net/tag/sitemaps +http://www.sitemaps.org/protocol.html|title|sitemaps.org - Protocol +http://www.sitemaps.org/protocol.html|creationTime|2013-10-21T17:02:27Z +https://openreview.net/forum?id=SyK00v5xx|creationDate|2018-05-10 +https://openreview.net/forum?id=SyK00v5xx|tag|http://www.semanlink.net/tag/good +https://openreview.net/forum?id=SyK00v5xx|tag|http://www.semanlink.net/tag/sentence_embeddings +https://openreview.net/forum?id=SyK00v5xx|tag|http://www.semanlink.net/tag/sanjeev_arora +https://openreview.net/forum?id=SyK00v5xx|tag|http://www.semanlink.net/tag/sif_embeddings +https://openreview.net/forum?id=SyK00v5xx|tag|http://www.semanlink.net/tag/singular_value_decomposition +https://openreview.net/forum?id=SyK00v5xx|comment|"> Use word embeddings computed using one of the popular methods on unlabeled corpus like Wikipedia, represent the sentence by a weighted average of the word vectors, and then modify them a bit using PCA/SVD + +[github project](https://github.com/PrincetonML/SIF) + +[blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2018%2F06%2F17%2Ftextembeddings%2F) + +See also [youtube: Sanjeev Arora on ""A theoretical approach to semantic representations""](https://www.youtube.com/watch?v=KR46z_V0BVw) + + + + + +" +https://openreview.net/forum?id=SyK00v5xx|relatedDoc|https://www.youtube.com/watch?v=KR46z_V0BVw +https://openreview.net/forum?id=SyK00v5xx|relatedDoc|http://www.offconvex.org/2018/06/17/textembeddings/ +https://openreview.net/forum?id=SyK00v5xx|title|A Simple but Tough-to-Beat Baseline for Sentence Embeddings (2017) +https://openreview.net/forum?id=SyK00v5xx|creationTime|2018-05-10T17:08:40Z +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|creationDate|2007-04-05 +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|tag|http://www.semanlink.net/tag/mineralogie +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|comment|With lengths over 11m, the giant gypsum crystals found in Mexico's Cueva de los Cristales are a great natural wonder. +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|title|Giant crystals enjoyed perfection +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|creationTime|2007-04-05T21:51:31Z +http://news.bbc.co.uk/2/hi/science/nature/6518161.stm|source|BBC +http://tinyclouds.org/colorize/|creationDate|2016-01-09 +http://tinyclouds.org/colorize/|tag|http://www.semanlink.net/tag/deep_learning +http://tinyclouds.org/colorize/|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://tinyclouds.org/colorize/|title|Colorizing Black and White Photos with deep learning +http://tinyclouds.org/colorize/|creationTime|2016-01-09T00:27:11Z +http://data.semanticweb.org/conference/www/2012/paper/809/html|creationDate|2012-04-19 +http://data.semanticweb.org/conference/www/2012/paper/809/html|tag|http://www.semanlink.net/tag/www_2012 +http://data.semanticweb.org/conference/www/2012/paper/809/html|tag|http://www.semanlink.net/tag/sparql_1_1 +http://data.semanticweb.org/conference/www/2012/paper/809/html|comment|"Best paper award at www 2012. +see also +" +http://data.semanticweb.org/conference/www/2012/paper/809/html|title|Counting beyond a Yottabyte, or how SPARQL 1.1 Property Paths will prevent adoption of the standard Semantic Web Dog Food +http://data.semanticweb.org/conference/www/2012/paper/809/html|creationTime|2012-04-19T10:32:00Z +http://linter.structured-data.org/|creationDate|2013-06-24 +http://linter.structured-data.org/|tag|http://www.semanlink.net/tag/validation +http://linter.structured-data.org/|tag|http://www.semanlink.net/tag/rdfa_tool +http://linter.structured-data.org/|comment|Enter a URL to see what structured data your page contains. +http://linter.structured-data.org/|title|Structured Data Linter +http://linter.structured-data.org/|creationTime|2013-06-24T23:31:02Z +https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300|creationDate|2016-08-25 +https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300|tag|http://www.semanlink.net/tag/apple +https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300|tag|http://www.semanlink.net/tag/machine_learning +https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300|title|An Exclusive Look at How AI and Machine Learning Work at Apple – Backchannel +https://backchannel.com/an-exclusive-look-at-how-ai-and-machine-learning-work-at-apple-8dbfb131932b#.3jskir300|creationTime|2016-08-25T01:00:18Z +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|creationDate|2014-02-20 +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|tag|http://www.semanlink.net/tag/elevage_industriel +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|tag|http://www.semanlink.net/tag/elevage_porcin +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|tag|http://www.semanlink.net/tag/censure_et_maltraitance_animale +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|comment|Deeply disassociated from so much of the animal cruelty in our society +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|title|Is That Sausage Worth This? - NYTimes.com +http://www.nytimes.com/2014/02/20/opinion/kristof-is-that-sausage-worth-this.html?_r=0|creationTime|2014-02-20T20:40:41Z +http://messenger.jhuapl.edu/|creationDate|2008-01-15 +http://messenger.jhuapl.edu/|tag|http://www.semanlink.net/tag/messenger +http://messenger.jhuapl.edu/|title|MESSENGER Web Site +http://messenger.jhuapl.edu/|creationTime|2008-01-15T14:02:08Z +https://www.ghostery.com|creationDate|2015-01-24 +https://www.ghostery.com|tag|http://www.semanlink.net/tag/privacy_and_internet +https://www.ghostery.com|title|Ghostery, Inc. +https://www.ghostery.com|creationTime|2015-01-24T11:36:26Z +http://www.alphaworks.ibm.com/tech/sher|creationDate|2008-07-19 +http://www.alphaworks.ibm.com/tech/sher|tag|http://www.semanlink.net/tag/ontologies +http://www.alphaworks.ibm.com/tech/sher|tag|http://www.semanlink.net/tag/reasoning +http://www.alphaworks.ibm.com/tech/sher|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.alphaworks.ibm.com/tech/sher|comment|Scalable Highly Expressive Reasoner (SHER) is a breakthrough technology that provides ontology analytics over highly expressive ontologies (OWL-DL without nominals). SHER does not do any inferencing on load; hence it deals better with quickly changing data (the downside is, of course, that reasoning is performed at query time). The tool can reason on approximately seven million triples in seconds, and it scales to data sets with 60 million triples, responding to queries in minutes. It has been used to semantically index 300 million triples from medical literature. SHER tolerates logical inconsistencies in the data, and it can quickly point you to these inconsistencies in the data and help you clean up inconsistencies before issuing semantic queries. The tool explains (or justifies) why a particular result set is an answer to the query; this explanation is useful for validation by domain experts. +http://www.alphaworks.ibm.com/tech/sher|title|alphaWorks : Scalable Highly Expressive Reasoner : Overview +http://www.alphaworks.ibm.com/tech/sher|creationTime|2008-07-19T18:14:06Z +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|creationDate|2014-03-01 +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/cerveau +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/guepe +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/zombie +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/parasitisme +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/cafard +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/free_will +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|title|Absurd Creature of the Week: The Wasp That Enslaves Cockroaches With a Sting to the Brain - Wired Science +http://www.wired.com/wiredscience/2014/02/absurd-creature-of-the-week-jewel-wasp/|creationTime|2014-03-01T21:49:58Z +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|creationDate|2013-09-22 +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|tag|http://www.semanlink.net/tag/chimere +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|tag|http://www.semanlink.net/tag/genetique +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|tag|http://www.semanlink.net/tag/adn +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|title|DNA Double Take - NYTimes.com +http://www.nytimes.com/2013/09/17/science/dna-double-take.html|creationTime|2013-09-22T11:32:23Z +http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html|creationDate|2014-07-03 +http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html|tag|http://www.semanlink.net/tag/immigration +http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html|tag|http://www.semanlink.net/tag/valls +http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html|title|Quand un article du « Monde », lu par Manuel Valls, a changé la vie de Darina Al-Joundi +http://www.lemonde.fr/festival/article/2014/07/03/21-juillet-2012-le-monde-change-la-vie-de-darina-al-joundi_4450404_4415198.html|creationTime|2014-07-03T21:53:06Z +http://crave.cnet.co.uk/desktops/0,39029426,49256662,00.htm|creationDate|2006-03-11 +http://crave.cnet.co.uk/desktops/0,39029426,49256662,00.htm|tag|http://www.semanlink.net/tag/center_media_microsoft +http://crave.cnet.co.uk/desktops/0,39029426,49256662,00.htm|tag|http://www.semanlink.net/tag/mac_mini +http://crave.cnet.co.uk/desktops/0,39029426,49256662,00.htm|title|Mac Mini vs. Microsoft Media Center: Round 1 - Crave at CNET.co.uk +http://www.asteraweke.com/11162.jpg|creationDate|2007-09-29 +http://www.asteraweke.com/11162.jpg|tag|http://www.semanlink.net/tag/aster_aweke +http://www.asteraweke.com/11162.jpg|title|Aster Aweke +http://www.asteraweke.com/11162.jpg|creationTime|2007-09-29T01:59:49Z +http://www.aclweb.org/anthology/Q15-1016|creationDate|2017-11-11 +http://www.aclweb.org/anthology/Q15-1016|tag|http://www.semanlink.net/tag/word_embedding +http://www.aclweb.org/anthology/Q15-1016|tag|http://www.semanlink.net/tag/distributional_semantics +http://www.aclweb.org/anthology/Q15-1016|tag|http://www.semanlink.net/tag/pointwise_mutual_information +http://www.aclweb.org/anthology/Q15-1016|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.aclweb.org/anthology/Q15-1016|comment|"> We reveal that much of the performance gains of word embeddings are due to certain system design choices and hyperparameter optimizations, rather than the embedding algorithms themselves. Furthermore, we show that these modifications can be transferred to traditional distributional models, yielding similar gains. In contrast to prior reports, we observe mostly local or insignificant performance differences between the methods, with no global advantage to any single approach over the others. + +" +http://www.aclweb.org/anthology/Q15-1016|title|Improving Distributional Similarity with Lessons Learned from Word Embeddings (O Levy - 2015) +http://www.aclweb.org/anthology/Q15-1016|creationTime|2017-11-11T22:01:16Z +http://www.nature.com/news/2006/060605/full/060605-7.html|creationDate|2006-06-08 +http://www.nature.com/news/2006/060605/full/060605-7.html|tag|http://www.semanlink.net/tag/origine_de_la_vie +http://www.nature.com/news/2006/060605/full/060605-7.html|tag|http://www.semanlink.net/tag/stromatolithes +http://www.nature.com/news/2006/060605/full/060605-7.html|comment|New studies of the Strelley Pool stromatolites in Autralia, which are more than 3.4 billion years old, suggest that they were shaped by living organisms. +http://www.nature.com/news/2006/060605/full/060605-7.html|title|Complex ecosystems arrived early +http://www.nature.com/news/2006/060605/full/060605-7.html|source|Nature +http://news.bbc.co.uk/2/hi/science/nature/8040073.stm|creationDate|2009-05-12 +http://news.bbc.co.uk/2/hi/science/nature/8040073.stm|tag|http://www.semanlink.net/tag/herschel_telescope +http://news.bbc.co.uk/2/hi/science/nature/8040073.stm|title|BBC NEWS World's most daunting parking job +http://news.bbc.co.uk/2/hi/science/nature/8040073.stm|creationTime|2009-05-12T13:37:16Z +http://news.bbc.co.uk/2/hi/science/nature/8040073.stm|source|BBC +http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html|creationDate|2014-06-05 +http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html|tag|http://www.semanlink.net/tag/encryption +http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html|tag|http://www.semanlink.net/tag/edward_snowden +http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html|title|L'étrange disparition du logiciel de chiffrement TrueCrypt +http://www.lemonde.fr/pixels/article/2014/06/04/l-etrange-disparition-du-logiciel-truecrypt_4431134_4408996.html|creationTime|2014-06-05T08:37:10Z +http://trimc-devops.blogspot.fr/2015/03/running-docker-applications-apache.html|creationDate|2016-04-06 +http://trimc-devops.blogspot.fr/2015/03/running-docker-applications-apache.html|tag|http://www.semanlink.net/tag/docker_tomcat +http://trimc-devops.blogspot.fr/2015/03/running-docker-applications-apache.html|title|DevOps: Docker and Apache Tomcat +http://trimc-devops.blogspot.fr/2015/03/running-docker-applications-apache.html|creationTime|2016-04-06T15:34:06Z +http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/|creationDate|2005-10-13 +http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/|tag|http://www.semanlink.net/tag/tagging +http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/|tag|http://www.semanlink.net/tag/google +http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/|comment|Google has silently added a Bookmarks feature to My Search History, enabling you to quickly tag and comment any web page you’ve visited. +http://google.blognewschannel.com/index.php/archives/2005/10/10/google-adds-tagging/|title|Google Adds Tagging +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|creationDate|2008-09-12 +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|tag|http://www.semanlink.net/tag/lyrics +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|tag|http://www.semanlink.net/tag/bresil +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|tag|http://www.semanlink.net/tag/cazuza +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|comment|"Te chamam de ladrao, de bicha, maconheiro
+Transformam o pais inteiro num puteiro
+Pois assim se ganha mais dinheiro" +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|title|O TEMPO NAO PARA lyrics CAZUZA +http://www.lyricsbay.com/o_tempo_nao_para_lyrics-cazuza.html|creationTime|2008-09-12T18:54:17Z +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|creationDate|2008-05-14 +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|tag|http://www.semanlink.net/tag/semantic_tagging +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|tag|http://www.semanlink.net/tag/powder +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|title|Using tags for POWDER content labels +http://lists.w3.org/Archives/Public/public-swd-wg/2007Apr/0015.html|creationTime|2008-05-14T21:30:27Z +http://commontag.org/mappings|creationDate|2011-01-03 +http://commontag.org/mappings|tag|http://www.semanlink.net/tag/common_tag +http://commontag.org/mappings|title|Mappings between the CommonTag vocabulary and existing vocabularies +http://commontag.org/mappings|creationTime|2011-01-03T11:23:30Z +http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto|creationDate|2015-02-06 +http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto|tag|http://www.semanlink.net/tag/new_yorker +http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto|tag|http://www.semanlink.net/tag/ei +http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto|title|Why ISIS Murdered Kenji Goto - The New Yorker +http://www.newyorker.com/news/daily-comment/isis-murdered-kenji-goto|creationTime|2015-02-06T10:55:27Z +http://rdf.greggkellogg.net/distiller|creationDate|2014-07-21 +http://rdf.greggkellogg.net/distiller|tag|http://www.semanlink.net/tag/rdf_validator +http://rdf.greggkellogg.net/distiller|title|RDF Distiller +http://rdf.greggkellogg.net/distiller|creationTime|2014-07-21T02:24:12Z +http://zitgist.com/|creationDate|2007-05-23 +http://zitgist.com/|tag|http://www.semanlink.net/tag/zitgist +http://zitgist.com/|title|Zitgist: the Semantic Web Query Service +http://zitgist.com/|creationTime|2007-05-23T21:11:11Z +http://www.arte-tv.com/fr/search__results/1095558.html|creationDate|2006-01-29 +http://www.arte-tv.com/fr/search__results/1095558.html|tag|http://www.semanlink.net/tag/aratta +http://www.arte-tv.com/fr/search__results/1095558.html|tag|http://www.semanlink.net/tag/sumer +http://www.arte-tv.com/fr/search__results/1095558.html|tag|http://www.semanlink.net/tag/jiroft +http://www.arte-tv.com/fr/search__results/1095558.html|tag|http://www.semanlink.net/tag/civilisation_elamite +http://www.arte-tv.com/fr/search__results/1095558.html|tag|http://www.semanlink.net/tag/mesopotamie +http://www.arte-tv.com/fr/search__results/1095558.html|title|Sélection de liens sur Aratta - ARTE +http://www.simongbrown.com/blog/2005/07/06/delicious_jsp.html|creationDate|2006-09-25 +http://www.simongbrown.com/blog/2005/07/06/delicious_jsp.html|tag|http://www.semanlink.net/tag/delicious_java +http://www.simongbrown.com/blog/2005/07/06/delicious_jsp.html|title|delicious-jsp - Simon Brown +http://internetalchemy.org/2005/11/naked-metadata-using-embedded-rdf|creationDate|2005-11-10 +http://internetalchemy.org/2005/11/naked-metadata-using-embedded-rdf|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://internetalchemy.org/2005/11/naked-metadata-using-embedded-rdf|title|Internet Alchemy Naked Metadata Using Embedded RDF +http://www.w3.org/2005/Incubator/urw3/|creationDate|2007-03-16 +http://www.w3.org/2005/Incubator/urw3/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2005/Incubator/urw3/|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.w3.org/2005/Incubator/urw3/|title|W3C Uncertainty Reasoning for the World Wide Web Incubator Group +http://www.w3.org/2005/Incubator/urw3/|creationTime|2007-03-16T18:38:53Z +http://news.bbc.co.uk/2/hi/science_and_environment/10196807.stm|creationDate|2010-06-04 +http://news.bbc.co.uk/2/hi/science_and_environment/10196807.stm|tag|http://www.semanlink.net/tag/hayabusa +http://news.bbc.co.uk/2/hi/science_and_environment/10196807.stm|title|BBC News - Asteroid probe 'on home straight' +http://news.bbc.co.uk/2/hi/science_and_environment/10196807.stm|creationTime|2010-06-04T13:25:08Z +https://www.theguardian.com/us-news/2017/may/13/chelsea-manning-freedom-us-military-wikileaks|creationDate|2017-05-13 +https://www.theguardian.com/us-news/2017/may/13/chelsea-manning-freedom-us-military-wikileaks|tag|http://www.semanlink.net/tag/chelsea_manning +https://www.theguardian.com/us-news/2017/may/13/chelsea-manning-freedom-us-military-wikileaks|title|Chelsea Manning prepares for freedom: 'I want to breathe the warm spring air' US news The Guardian +https://www.theguardian.com/us-news/2017/may/13/chelsea-manning-freedom-us-military-wikileaks|creationTime|2017-05-13T18:30:00Z +https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html|creationDate|2018-11-27 +https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html|tag|http://www.semanlink.net/tag/crispr_cas9 +https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html|tag|http://www.semanlink.net/tag/modification_du_genome_humain +https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html|title|Des bébés génétiquement modifiés seraient nés en Chine +https://www.lemonde.fr/sciences/article/2018/11/26/des-bebes-genetiquement-modifies-seraient-nes-en-chine_5388942_1650684.html|creationTime|2018-11-27T00:11:45Z +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|creationDate|2012-03-16 +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|tag|http://www.semanlink.net/tag/punk +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|tag|http://www.semanlink.net/tag/banque +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|tag|http://www.semanlink.net/tag/parti_socialiste +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|title|Matthieu Pigasse - Wikipédia +http://fr.wikipedia.org/wiki/Matthieu_Pigasse|creationTime|2012-03-16T00:52:08Z +http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/|creationDate|2008-10-21 +http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/|tag|http://www.semanlink.net/tag/sparql_update +http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/|title|SPARQL Update +http://www.w3.org/Submission/2008/SUBM-SPARQL-Update-20080715/|creationTime|2008-10-21T14:36:34Z +https://lejournal.cnrs.fr/articles/france-terre-de-dinosaures?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530943412|creationDate|2018-07-07 +https://lejournal.cnrs.fr/articles/france-terre-de-dinosaures?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530943412|tag|http://www.semanlink.net/tag/dinosaures +https://lejournal.cnrs.fr/articles/france-terre-de-dinosaures?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530943412|title|France, terre de dinosaures CNRS Le journal +https://lejournal.cnrs.fr/articles/france-terre-de-dinosaures?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#Echobox=1530943412|creationTime|2018-07-07T10:04:33Z +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/doc/|creationDate|2014-03-26 +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/doc/|tag|http://www.semanlink.net/tag/libshorttext +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/doc/|title|Welcome to LibShortText documentation! — LibShortText 1.1 documentation +http://www.csie.ntu.edu.tw/~cjlin/libshorttext/doc/|creationTime|2014-03-26T14:56:13Z +http://www.smartlogic.com/|creationDate|2013-10-04 +http://www.smartlogic.com/|tag|http://www.semanlink.net/tag/makolab_semantic_day +http://www.smartlogic.com/|title|Content Intelligence Software for automatic content classification, text analysis and information visualization. +http://www.smartlogic.com/|creationTime|2013-10-04T00:22:34Z +http://christop.club/2014/05/06/using-gensim-for-lda/|creationDate|2017-06-02 +http://christop.club/2014/05/06/using-gensim-for-lda/|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://christop.club/2014/05/06/using-gensim-for-lda/|tag|http://www.semanlink.net/tag/gensim +http://christop.club/2014/05/06/using-gensim-for-lda/|tag|http://www.semanlink.net/tag/nlp_sample_code +http://christop.club/2014/05/06/using-gensim-for-lda/|title|Using Gensim for LDA (notebook) +http://christop.club/2014/05/06/using-gensim-for-lda/|creationTime|2017-06-02T02:45:23Z +http://blog.datagraph.org/2010/04/rdf-nosql-diff|creationDate|2014-10-14 +http://blog.datagraph.org/2010/04/rdf-nosql-diff|tag|http://www.semanlink.net/tag/triplestore +http://blog.datagraph.org/2010/04/rdf-nosql-diff|tag|http://www.semanlink.net/tag/nosql +http://blog.datagraph.org/2010/04/rdf-nosql-diff|title|How RDF Databases Differ from Other NoSQL Solutions - The Datagraph Blog +http://blog.datagraph.org/2010/04/rdf-nosql-diff|creationTime|2014-10-14T02:15:52Z +http://www.comtech-serv.com/dita.shtml|creationDate|2011-05-20 +http://www.comtech-serv.com/dita.shtml|tag|http://www.semanlink.net/tag/dita +http://www.comtech-serv.com/dita.shtml|title|Hot Topics: DITA - Darwin Information Typing Architecture +http://www.comtech-serv.com/dita.shtml|creationTime|2011-05-20T14:55:42Z +http://docs.api.talis.com/platform-api/output-types/rdf-json|creationDate|2012-02-20 +http://docs.api.talis.com/platform-api/output-types/rdf-json|tag|http://www.semanlink.net/tag/talis_rdf_json +http://docs.api.talis.com/platform-api/output-types/rdf-json|title|RDF JSON - docs.api (Talis) +http://docs.api.talis.com/platform-api/output-types/rdf-json|creationTime|2012-02-20T23:32:29Z +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|creationDate|2017-06-08 +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|tag|http://www.semanlink.net/tag/i_b_m_s_watson +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|tag|http://www.semanlink.net/tag/survey_analysis +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|comment|Use this storybook to analyze results of surveys from online tools such as SurveyMonkey +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|title|Survey results analysis - Analytics Exchange +https://console.ng.bluemix.net/data/exchange/public/entry/view/ac418581e657fc785fe9573c1013c3a6|creationTime|2017-06-08T14:06:31Z +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|creationDate|2008-04-23 +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|tag|http://www.semanlink.net/tag/owl_dl +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|tag|http://www.semanlink.net/tag/rules +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|tag|http://www.semanlink.net/tag/reasoning +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|tag|http://www.semanlink.net/tag/www08 +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|comment|"Very good presentation at WWW 2008. Nominated for the best paper award
+Abstract: Applications of semantic technologies often require the representation of and reasoning with structured objects—that is, objects composed of parts connected in complex ways. Although OWL is a general and powerful language, its class descriptions and axioms cannot be used to describe arbitrarily connected structures. An OWL representation of structured objects can thus be underconstrained, which reduces the inferences that can be drawn and causes performance problems in reasoning. To address these problems, we extend OWL with description graphs, which allow for the description of structured objects in a simple and precise way. To represent conditional aspects of the domain, we also allow for SWRL-like rules over description graphs. Based on an observation about the nature of structured objects, we ensure decidability of our formalism. We also present a hypertableau-based decision procedure, which we implemented in the HermiT reasoner. To evaluate its performance, we have extracted description graphs from the GALEN and FMA ontologies, classified them successfully, and even detected a modeling error in GALEN. + +" +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|title|Structured Objects in OWL: Representation and Reasoning. In Proc. of the 17th Int. World Wide Web Conference (WWW 2008), Beijing +http://web.comlab.ox.ac.uk/oucl/work/boris.motik/publications/mgs08-structured-objects.pdf|creationTime|2008-04-23T11:14:12Z +https://spark.apache.org/|creationDate|2014-12-18 +https://spark.apache.org/|tag|http://www.semanlink.net/tag/apache_spark +https://spark.apache.org/|comment|"""a fast and general engine for large-scale data processing"" ""fast and general-purpose cluster computing system""
+Apache Spark is a fast and general-purpose cluster computing system. It provides high-level APIs in Java, Scala and Python, and an optimized engine that supports general execution graphs. It also supports a rich set of higher-level tools including Spark SQL for SQL and structured data processing, MLlib for machine learning, GraphX for graph processing, and Spark Streaming. + + +" +https://spark.apache.org/|title|Apache Spark +https://spark.apache.org/|creationTime|2014-12-18T11:43:50Z +http://www.brockman.se/writing/method-references.html.utf8|creationDate|2006-06-19 +http://www.brockman.se/writing/method-references.html.utf8|tag|http://www.semanlink.net/tag/javascript +http://www.brockman.se/writing/method-references.html.utf8|comment|"A general solution to the simple but common problem of attaching + event listeners to HTML elements owned by + object-oriented code is presented. The solution does not rely on + the questionable method of injecting hidden backreferences to the + application logic into the HTML elements; rather, + the listeners are connected to the application logic in a purely + hygeinic fashion through the use of lexical closures." +http://www.brockman.se/writing/method-references.html.utf8|title|Object-Oriented Event Listening through Partial Application in JavaScript +https://www.theguardian.com/news/2017/nov/05/paradise-papers-leak-reveals-secrets-of-world-elites-hidden-wealth?CMP=twt_gu|creationDate|2017-11-06 +https://www.theguardian.com/news/2017/nov/05/paradise-papers-leak-reveals-secrets-of-world-elites-hidden-wealth?CMP=twt_gu|tag|http://www.semanlink.net/tag/paradise_papers +https://www.theguardian.com/news/2017/nov/05/paradise-papers-leak-reveals-secrets-of-world-elites-hidden-wealth?CMP=twt_gu|title|Paradise Papers leak reveals secrets of the world elite's hidden wealth News The Guardian +https://www.theguardian.com/news/2017/nov/05/paradise-papers-leak-reveals-secrets-of-world-elites-hidden-wealth?CMP=twt_gu|creationTime|2017-11-06T13:12:07Z +http://ejohn.org/blog/processingjs/|creationDate|2008-05-11 +http://ejohn.org/blog/processingjs/|tag|http://www.semanlink.net/tag/javascript +http://ejohn.org/blog/processingjs/|comment|a Javascript port of the Processing Visualization Language and a first step towards Javascript being a rival to Flash for online graphics content +http://ejohn.org/blog/processingjs/|title|John Resig - Processing.js +http://ejohn.org/blog/processingjs/|creationTime|2008-05-11T22:30:44Z +http://www.vocabs.org/|creationDate|2013-03-18 +http://www.vocabs.org/|tag|http://www.semanlink.net/tag/online_tool +http://www.vocabs.org/|tag|http://www.semanlink.net/tag/publishing_rdf_vocabularies +http://www.vocabs.org/|tag|http://www.semanlink.net/tag/rdf_tools +http://www.vocabs.org/|comment|This site uses powerful community-driven voting-based approaches, such as the ones used on StackOverflow or Hacker News, to allow you to discuss about an ontology and write triple definitions directly in the comments you're making.... So in essence this is very similar to opening your text editor, and writing some Turtle syntax down. The cool part is that it happens online, so other people can immediately comment and validate what you wrote. +http://www.vocabs.org/|title|RDF vocabularies +http://www.vocabs.org/|creationTime|2013-03-18T13:39:40Z +http://www.rdfabout.com/demo/validator/|creationDate|2009-04-01 +http://www.rdfabout.com/demo/validator/|tag|http://www.semanlink.net/tag/sw_online_tools +http://www.rdfabout.com/demo/validator/|tag|http://www.semanlink.net/tag/rdf_validator +http://www.rdfabout.com/demo/validator/|tag|http://www.semanlink.net/tag/n3 +http://www.rdfabout.com/demo/validator/|title|Online N3 Validator +http://www.rdfabout.com/demo/validator/|creationTime|2009-04-01T01:43:35Z +http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/|creationDate|2013-08-19 +http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/|tag|http://www.semanlink.net/tag/arn +http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/|tag|http://www.semanlink.net/tag/ogm +http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/|title|La stratégie diabolique des futures plantes OGM Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/08/18/la-strategie-diabolique-des-futures-plantes-ogm/|creationTime|2013-08-19T09:13:30Z +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|creationDate|2007-06-07 +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|tag|http://www.semanlink.net/tag/owled_2007 +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|tag|http://www.semanlink.net/tag/owl +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|comment|"Rob Shearer The University of Manchester
+(nice demo of an ajax script at OWLED 2007)
+Abstract. This paper presents a simple data model for the representation +of OWL ontologies (including the new features of OWL 1.1). The +model is built from basic structures native to all common programming +environments, so it can be used directly as an API for ontology analysis +and manipulation. Furthermore, serialization of these structures using +the widely-supported YAML standard yields a readable text format +suitable for ontology authoring by average users with text editors and +code-management tools. +" +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|title|Structured Ontology Format +http://owled2007.iut-velizy.uvsq.fr/PapersPDF/submission_18.pdf|creationTime|2007-06-07T16:08:28Z +http://fr.wikipedia.org/wiki/Milou_en_mai|creationDate|2006-09-20 +http://fr.wikipedia.org/wiki/Milou_en_mai|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Milou_en_mai|tag|http://www.semanlink.net/tag/mai_68 +http://fr.wikipedia.org/wiki/Milou_en_mai|comment|Film français de Louis Malle (1990) avec Michel Piccoli et Miou-Miou. +http://fr.wikipedia.org/wiki/Milou_en_mai|title|Milou en mai +http://blocodosargentopimenta.com.br/|creationDate|2013-05-21 +http://blocodosargentopimenta.com.br/|tag|http://www.semanlink.net/tag/musique +http://blocodosargentopimenta.com.br/|tag|http://www.semanlink.net/tag/www_2013 +http://blocodosargentopimenta.com.br/|tag|http://www.semanlink.net/tag/beatles +http://blocodosargentopimenta.com.br/|tag|http://www.semanlink.net/tag/samba +http://blocodosargentopimenta.com.br/|tag|http://www.semanlink.net/tag/rio_de_janeiro +http://blocodosargentopimenta.com.br/|title|Bloco do Sargento Pimenta +http://blocodosargentopimenta.com.br/|creationTime|2013-05-21T08:48:25Z +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|creationDate|2014-04-22 +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|tag|http://www.semanlink.net/tag/topic_modeling +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|tag|http://www.semanlink.net/tag/mallet +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|tag|http://www.semanlink.net/tag/microblogs +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|title|Real-Time Topic Modeling of Microblogs +http://www.oracle.com/technetwork/articles/java/micro-1925135.html|creationTime|2014-04-22T18:21:08Z +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=19&ll=48.839252,2.281881&spn=0.001326,0.003224&t=k|creationDate|2006-09-23 +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=19&ll=48.839252,2.281881&spn=0.001326,0.003224&t=k|tag|http://www.semanlink.net/tag/google_maps +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=19&ll=48.839252,2.281881&spn=0.001326,0.003224&t=k|tag|http://www.semanlink.net/tag/170_rue_de_lourmel +http://maps.google.com/maps?f=q&hl=fr&q=170+rue+de+Lourmel+Paris+France&ie=UTF8&om=1&z=19&ll=48.839252,2.281881&spn=0.001326,0.003224&t=k|title|170, rue de Lourmel +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|creationDate|2013-08-20 +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|tag|http://www.semanlink.net/tag/html_parsing +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|tag|http://www.semanlink.net/tag/java_dev +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|tag|http://www.semanlink.net/tag/java +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|title|Removing HTML from a Java String - Stack Overflow +http://stackoverflow.com/questions/240546/removing-html-from-a-java-string|creationTime|2013-08-20T17:11:03Z +https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/|creationDate|2019-04-09 +https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/|tag|http://www.semanlink.net/tag/orne +https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/|tag|http://www.semanlink.net/tag/arbres_remarquables +https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/|title|Les ifs amoureux de la Lande-Patry +https://www.monumentaltrees.com/fr/fra/orne/landepatry/2208_cimetieredelalandepatry/3306/|creationTime|2019-04-09T19:45:34Z +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|creationDate|2005-06-23 +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|tag|http://www.semanlink.net/tag/g8 +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|comment|A leaked copy of a document on climate change being drafted for the G8 summit suggests plans have been watered down. +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|title|BBC NEWS Politics G8 climate plans 'watered down' +http://news.bbc.co.uk/1/hi/uk_politics/4102144.stm|source|BBC +http://www.lemonde.fr/biodiversite/article/2017/07/10/la-sixieme-extinction-de-masse-des-animaux-s-accelere-de-maniere-dramatique_5158718_1652692.html|creationDate|2017-07-10 +http://www.lemonde.fr/biodiversite/article/2017/07/10/la-sixieme-extinction-de-masse-des-animaux-s-accelere-de-maniere-dramatique_5158718_1652692.html|tag|http://www.semanlink.net/tag/extinction_de_masse +http://www.lemonde.fr/biodiversite/article/2017/07/10/la-sixieme-extinction-de-masse-des-animaux-s-accelere-de-maniere-dramatique_5158718_1652692.html|title|La sixième extinction de masse des animaux s’accélère +http://www.lemonde.fr/biodiversite/article/2017/07/10/la-sixieme-extinction-de-masse-des-animaux-s-accelere-de-maniere-dramatique_5158718_1652692.html|creationTime|2017-07-10T22:20:40Z +http://tool-man.org/examples/|creationDate|2005-05-04 +http://tool-man.org/examples/|tag|http://www.semanlink.net/tag/javascript +http://tool-man.org/examples/|tag|http://www.semanlink.net/tag/css +http://tool-man.org/examples/|tag|http://www.semanlink.net/tag/dev +http://tool-man.org/examples/|title|Direct Manipulation Using JavaScript and CSS +http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html|creationDate|2012-03-24 +http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html|tag|http://www.semanlink.net/tag/n_gram +http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html|tag|http://www.semanlink.net/tag/google +http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html|title|All Our N-gram are Belong to You +http://googleresearch.blogspot.fr/2006/08/all-our-n-gram-are-belong-to-you.html|creationTime|2012-03-24T09:03:58Z +http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/|creationDate|2005-10-25 +http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/|tag|http://www.semanlink.net/tag/rss +http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/|tag|http://www.semanlink.net/tag/rigolo +http://dannyayers.com/archives/2005/10/24/yet-another-rss-history/|title|Danny Ayers, Raw Blog : » Yet Another RSS History +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|creationDate|2015-05-11 +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|tag|http://www.semanlink.net/tag/bernard_vatant +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|tag|http://www.semanlink.net/tag/ontologies +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|tag|http://www.semanlink.net/tag/uri +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|comment|2015-05-07 5:36 GMT-03:00 Bernard Vatant : > (...) But to put in bluntly, in many cases, well-maintained codes for > standardized identities (languages, countries, towns, units ...) are more > sustainable ways to share identities than URIs, +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|title|Re: Sustainable Codes vs Volatile URIs Re: URIs / Ontology for Physical Units and Quantities from Peter Krauss on 2015-05-07 (public-vocabs@w3.org from May 2015) +https://lists.w3.org/Archives/Public/public-vocabs/2015May/0042.html|creationTime|2015-05-11T08:45:48Z +http://blog.semantic-web.at/2010/08/31/why-skos-thesauri-matter-the-next-generation-of-semantic-technologies/|creationDate|2010-08-31 +http://blog.semantic-web.at/2010/08/31/why-skos-thesauri-matter-the-next-generation-of-semantic-technologies/|tag|http://www.semanlink.net/tag/skos +http://blog.semantic-web.at/2010/08/31/why-skos-thesauri-matter-the-next-generation-of-semantic-technologies/|title|The Semantic Puzzle Why SKOS thesauri matter – the next generation of semantic technologies +http://blog.semantic-web.at/2010/08/31/why-skos-thesauri-matter-the-next-generation-of-semantic-technologies/|creationTime|2010-08-31T09:42:57Z +http://coding.smashingmagazine.com/2008/09/16/jquery-examples-and-best-practices/|creationDate|2012-07-13 +http://coding.smashingmagazine.com/2008/09/16/jquery-examples-and-best-practices/|tag|http://www.semanlink.net/tag/jquery +http://coding.smashingmagazine.com/2008/09/16/jquery-examples-and-best-practices/|title|jQuery and JavaScript Coding: Examples and Best Practices Smashing Coding +http://coding.smashingmagazine.com/2008/09/16/jquery-examples-and-best-practices/|creationTime|2012-07-13T02:20:24Z +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|creationDate|2019-05-23 +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|tag|http://www.semanlink.net/tag/denny_vrandecic +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|tag|http://www.semanlink.net/tag/thewebconf_2019 +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|title|Knowledge Graph Technology and Applications 2019 - Simia +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|bookmarkOf|http://simia.net/wiki/Knowledge_Graph_Technology_and_Applications_2019 +http://www.semanlink.net/doc/2019/05/knowledge_graph_technology_and_|creationTime|2019-05-23T21:00:13Z +http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf|creationDate|2017-06-27 +http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf|tag|http://www.semanlink.net/tag/machine_learned_ranking +http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf|comment|ranking method that balances prestige and diversity. Unlike PageRank, DivRank employs a time-variant random walk process, which facilitates the rich-gets-richer mechanism in ranking. Diversity is achieved through the “competition” process between adjacent vertices. +http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf|title|DivRank: the Interplay of Prestige and Diversity in Information Networks +http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf|creationTime|2017-06-27T10:56:08Z +http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla|creationDate|2014-09-05 +http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla|tag|http://www.semanlink.net/tag/linked_data_browser +http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla|tag|http://www.semanlink.net/tag/linked_data +http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla|title|Browsing and Traversing Linked Data with LODmilla +http://ercim-news.ercim.eu/en96/special/browsing-and-traversing-linked-data-with-lodmilla|creationTime|2014-09-05T18:55:14Z +http://dev.uriqr.com/|creationDate|2007-06-13 +http://dev.uriqr.com/|tag|http://www.semanlink.net/tag/uriqr +http://dev.uriqr.com/|comment|"""URI everything, and everything is cool"", right? But where do you find +the URIs if you're doing this on a small scale? We have a great +opportunity to spread the Linked Data message if we help people to make +their FOAF files Linked Data, we just need to give them the tools. +

+So, I created Uriqr (as in ""eureka""), a simple little search engine +aimed squarely at FOAF hackers and other RDF-aware people. It looks in a +SPARQL endpoint of crawled data for any URIs rdfs:label'led or +foaf:name'd with your search term(s). The results are then looked up +against Sindice and ranked according to how many other documents +reference them. This is cool when I want to get rid of some of the +bNodes in my FOAF file, as it helps me decide which URIs to use for +other people without having to look in their FOAF file. +" +http://dev.uriqr.com/|title|Uriqr - A URI Search Engine +http://dev.uriqr.com/|creationTime|2007-06-13T23:09:47Z +http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html|creationDate|2012-08-09 +http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html|tag|http://www.semanlink.net/tag/rdfquery +http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html|tag|http://www.semanlink.net/tag/jeni_tennison +http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html|title|Mark it up! (nice rdfQuery demo) +http://rdfquery.googlecode.com/svn/trunk/demos/markup/markup.html|creationTime|2012-08-09T15:09:38Z +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|creationDate|2013-03-30 +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|tag|http://www.semanlink.net/tag/transition_energetique +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|tag|http://www.semanlink.net/tag/energies_fossiles_non_conventionnelles +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|tag|http://www.semanlink.net/tag/pic_de_hubbert +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|title|Débat sur la transition énergétique : vous pouvez répéter la question ? Oil Man +http://petrole.blog.lemonde.fr/2013/03/29/debat-sur-la-transition-energetique-vous-pouvez-repeter-la-question/|creationTime|2013-03-30T10:22:29Z +http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/|creationDate|2015-12-10 +http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/|tag|http://www.semanlink.net/tag/mongodb +http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/|title|Why You Should Never Use MongoDB « Sarah Mei +http://www.sarahmei.com/blog/2013/11/11/why-you-should-never-use-mongodb/|creationTime|2015-12-10T03:15:29Z +https://nlp.stanford.edu/projects/glove/|creationDate|2017-05-18 +https://nlp.stanford.edu/projects/glove/|tag|http://www.semanlink.net/tag/chris_manning +https://nlp.stanford.edu/projects/glove/|tag|http://www.semanlink.net/tag/nlp_stanford +https://nlp.stanford.edu/projects/glove/|tag|http://www.semanlink.net/tag/glove +https://nlp.stanford.edu/projects/glove/|tag|http://www.semanlink.net/tag/richard_socher +https://nlp.stanford.edu/projects/glove/|title|GloVe: Global Vectors for Word Representation +https://nlp.stanford.edu/projects/glove/|creationTime|2017-05-18T22:49:32Z +http://www.wired.com/2014/09/metanautix/|creationDate|2014-09-08 +http://www.wired.com/2014/09/metanautix/|tag|http://www.semanlink.net/tag/dremel +http://www.wired.com/2014/09/metanautix/|tag|http://www.semanlink.net/tag/big_data +http://www.wired.com/2014/09/metanautix/|title|Ex-Googler Shares His Big-Data Secrets With the Masses Enterprise WIRED +http://www.wired.com/2014/09/metanautix/|creationTime|2014-09-08T22:05:59Z +http://sourceforge.net/projects/delicious-java/|creationDate|2006-05-13 +http://sourceforge.net/projects/delicious-java/|tag|http://www.semanlink.net/tag/del_icio_us +http://sourceforge.net/projects/delicious-java/|tag|http://www.semanlink.net/tag/java_dev +http://sourceforge.net/projects/delicious-java/|comment|delicious-java is a Java API for interacting with the del.icio.us social bookmarks service. BSD license +http://sourceforge.net/projects/delicious-java/|title|SourceForge.net: del.icio.us Java API +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|creationDate|2008-06-25 +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|tag|http://www.semanlink.net/tag/semantic_tagging +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|tag|http://www.semanlink.net/tag/wikipedia +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|tag|http://www.semanlink.net/tag/dbpedia +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|title|Faviki uses Wikipedia and DBpedia for semantic tagging +http://ebiquity.umbc.edu/blogger/2008/05/26/faviki-uses-wikipedia-and-dbpedia-for-semantic-tagging/|creationTime|2008-06-25T19:50:08Z +https://www.newscientist.com/article/2077533-test-driving-the-hydrogen-car-that-makes-a-little-go-a-long-way/|creationDate|2016-02-18 +https://www.newscientist.com/article/2077533-test-driving-the-hydrogen-car-that-makes-a-little-go-a-long-way/|tag|http://www.semanlink.net/tag/hydrogen_cars +https://www.newscientist.com/article/2077533-test-driving-the-hydrogen-car-that-makes-a-little-go-a-long-way/|title|Test-driving the hydrogen car that makes a little go a long way New Scientist +https://www.newscientist.com/article/2077533-test-driving-the-hydrogen-car-that-makes-a-little-go-a-long-way/|creationTime|2016-02-18T00:20:26Z +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|creationDate|2005-11-02 +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|tag|http://www.semanlink.net/tag/bacteries +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|tag|http://www.semanlink.net/tag/craig_venter +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|tag|http://www.semanlink.net/tag/what_is_life +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|tag|http://www.semanlink.net/tag/adn +http://www.pbs.org/wgbh/nova/sciencenow/dispatches/050707.html|title|NOVA scienceNOW Dispatches: What We're Thinking About: Keep It Very Simple PBS +http://schema.org/|creationDate|2011-06-07 +http://schema.org/|tag|http://www.semanlink.net/tag/schema_org +http://schema.org/|tag|http://www.semanlink.net/tag/search_engines +http://schema.org/|comment|This site provides a collection of schemas, i.e., html tags, that webmasters can use to markup their pages in ways recognized by major search providers. +http://schema.org/|title|schema.org +http://schema.org/|creationTime|2011-06-07T13:45:07Z +http://www.w3.org/2009/12/rdf-ws/Report.html|creationDate|2010-08-20 +http://www.w3.org/2009/12/rdf-ws/Report.html|tag|http://www.semanlink.net/tag/rdf_next_steps +http://www.w3.org/2009/12/rdf-ws/Report.html|title|W3C Workshop — RDF Next Steps: Workshop Report +http://www.w3.org/2009/12/rdf-ws/Report.html|creationTime|2010-08-20T13:14:17Z +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|creationDate|2013-08-08 +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|tag|http://www.semanlink.net/tag/slides +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|tag|http://www.semanlink.net/tag/big_data_semantic_web +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|tag|http://www.semanlink.net/tag/ora_lassila +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|title|Size does not matter (if your data is in a silo) +http://fr.slideshare.net/OraLassila/size-does-not-matter-if-you-data-is-in-a-silo|creationTime|2013-08-08T23:57:11Z +http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html|creationDate|2013-12-10 +http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html|tag|http://www.semanlink.net/tag/parlement_europeen +http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html|tag|http://www.semanlink.net/tag/chalutage_en_eaux_profondes +http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html|title|Le Parlement européen rejette l'interdiction du chalutage en eaux profondes +http://www.lemonde.fr/planete/article/2013/12/10/le-parlement-europeen-rejette-l-interdiction-du-chalutage-en-eaux-profondes_3528525_3244.html|creationTime|2013-12-10T14:31:52Z +http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html|creationDate|2013-07-08 +http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html|tag|http://www.semanlink.net/tag/adn +http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html|tag|http://www.semanlink.net/tag/biohackers +http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html|title|Biohackers : les bricoleurs d'ADN +http://www.lemonde.fr/technologies/visuel/2013/07/07/biohackers-les-bricoleurs-d-adn_3441946_651865.html|creationTime|2013-07-08T16:01:23Z +http://www.w3.org/wiki/WebSchemas|creationDate|2013-06-24 +http://www.w3.org/wiki/WebSchemas|tag|http://www.semanlink.net/tag/schema_org +http://www.w3.org/wiki/WebSchemas|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/wiki/WebSchemas|tag|http://www.semanlink.net/tag/web_schemas_task_force +http://www.w3.org/wiki/WebSchemas|tag|http://www.semanlink.net/tag/wiki +http://www.w3.org/wiki/WebSchemas|title|WebSchemas - W3C Wiki +http://www.w3.org/wiki/WebSchemas|creationTime|2013-06-24T18:08:43Z +http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388|creationDate|2006-05-21 +http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388|tag|http://www.semanlink.net/tag/web_services +http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388|tag|http://www.semanlink.net/tag/amazon +http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388|comment|"Developers are like artists; they produce their best work if they have the freedom to do so, but they need good tools
Giving developers operational responsibilities has greatly enhanced the quality of the services, both from a customer and a technology point of view. The traditional model is that you take your software to the wall that separates development and operations, and throw it over and then forget about it. Not at Amazon. You build it, you run it. +" +http://www.acmqueue.com/modules.php?name=Content&pa=showpage&pid=388|title|Learning from the Amazon technology platform (a Conversation with Werner Vogel) +https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712|creationDate|2018-08-21 +https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712|tag|http://www.semanlink.net/tag/ml_google +https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712|comment|"a neural network model that can learn simple to complex numerical functions with great extrapolation (generalisation) ability +" +https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712|title|Simple guide to Neural Arithmetic Logic Units (NALU): Explanation, Intuition and Code +https://medium.com/@faizanmukardam/simple-guide-to-neural-arithmetic-logic-units-nalu-explanation-intuition-and-code-64bc22605712|creationTime|2018-08-21T17:25:23Z +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|creationDate|2014-02-23 +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|tag|http://www.semanlink.net/tag/separation_of_man_and_ape +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|tag|http://www.semanlink.net/tag/citation +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|tag|http://www.semanlink.net/tag/chimpanze +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|comment|Louis Leakey to Jane Goodall +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|title|“Now we must redefine ‘tool’, redefine ‘man’, or accept chimpanzees as humans.” Sufficiently Radical +http://nneitz.wordpress.com/2010/09/18/now-we-must-redefine-tool-redefine-man-or-accept-chimpanzees-as-humans/|creationTime|2014-02-23T01:55:24Z +https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie|creationDate|2016-10-17 +https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie|tag|http://www.semanlink.net/tag/amour +https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie|tag|http://www.semanlink.net/tag/film_italien +https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie|title|Pain, Amour et Fantaisie — Wikipédia +https://fr.wikipedia.org/wiki/Pain,_Amour_et_Fantaisie|creationTime|2016-10-17T00:52:16Z +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|creationDate|2017-11-12 +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|tag|http://www.semanlink.net/tag/critique_du_capitalisme +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|tag|http://www.semanlink.net/tag/crise_des_subprimes +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|tag|http://www.semanlink.net/tag/michael_moore +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|comment|[youtube](https://www.youtube.com/watch?v=i75a8YexReY) +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|title|Capitalism: A Love Story - Wikipedia +https://en.wikipedia.org/wiki/Capitalism:_A_Love_Story|creationTime|2017-11-12T01:24:32Z +http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|creationDate|2011-11-14 +http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|tag|http://www.semanlink.net/tag/boura +http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|title|Détours des Mondes: Les sites archéologiques de Bura Asinda-Sikka au Niger +http://detoursdesmondes.typepad.com/dtours_des_mondes/2011/01/bura-asinda-sikka-vallees-du-niger-terracotas.html|creationTime|2011-11-14T14:09:28Z +http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html|creationDate|2011-12-21 +http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html|tag|http://www.semanlink.net/tag/prediction +http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html|tag|http://www.semanlink.net/tag/ibm +http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html|title|IBM's Five Predictions for the Next Five Years - BusinessWeek +http://www.businessweek.com/technology/ibms-five-predictions-for-the-next-five-years-12192011.html|creationTime|2011-12-21T23:11:52Z +https://distill.pub/2017/aia/|creationDate|2017-12-05 +https://distill.pub/2017/aia/|tag|http://www.semanlink.net/tag/gui +https://distill.pub/2017/aia/|tag|http://www.semanlink.net/tag/artificial_intelligence +https://distill.pub/2017/aia/|comment|By creating user interfaces which let us work with the representations inside machine learning models, we can give people new tools for reasoning. +https://distill.pub/2017/aia/|title|Using Artificial Intelligence to Augment Human Intelligence +https://distill.pub/2017/aia/|creationTime|2017-12-05T18:28:06Z +http://del.icio.us/help/api/|creationDate|2006-09-25 +http://del.icio.us/help/api/|tag|http://www.semanlink.net/tag/delicious_api +http://del.icio.us/help/api/|tag|http://www.semanlink.net/tag/del_icio_us +http://del.icio.us/help/api/|title|del.icio.us/help/api +http://www.kk.org/2008/08/out-of-control-the-illustrated.php|creationDate|2011-06-22 +http://www.kk.org/2008/08/out-of-control-the-illustrated.php|tag|http://www.semanlink.net/tag/self_organizing_systems +http://www.kk.org/2008/08/out-of-control-the-illustrated.php|title|Out Of Control - The New Biology of Machines, Social Systems, & the Economic World - Kevin Kelly +http://www.kk.org/2008/08/out-of-control-the-illustrated.php|creationTime|2011-06-22T15:14:01Z +http://www.kurzweilai.net/|creationDate|2013-09-28 +http://www.kurzweilai.net/|tag|http://www.semanlink.net/tag/ray_kurzweil +http://www.kurzweilai.net/|title|Kurzweil Accelerating Intelligence +http://www.kurzweilai.net/|creationTime|2013-09-28T12:29:20Z +http://twitter.bug.quietbabylon.com/|creationDate|2017-09-24 +http://twitter.bug.quietbabylon.com/|tag|http://www.semanlink.net/tag/temps +http://twitter.bug.quietbabylon.com/|tag|http://www.semanlink.net/tag/science_fiction +http://twitter.bug.quietbabylon.com/|tag|http://www.semanlink.net/tag/twitter +http://twitter.bug.quietbabylon.com/|tag|http://www.semanlink.net/tag/rigolo +http://twitter.bug.quietbabylon.com/|comment|A short sci-fi story in the form of a Twitter bug report (2013) +http://twitter.bug.quietbabylon.com/|title|Tweets out of Context +http://twitter.bug.quietbabylon.com/|creationTime|2017-09-24T16:07:05Z +http://news.bbc.co.uk/2/hi/science/nature/7864087.stm|creationDate|2009-05-12 +http://news.bbc.co.uk/2/hi/science/nature/7864087.stm|tag|http://www.semanlink.net/tag/herschel_telescope +http://news.bbc.co.uk/2/hi/science/nature/7864087.stm|title|BBC NEWS 'Silver sensation' seeks cold cosmos +http://news.bbc.co.uk/2/hi/science/nature/7864087.stm|creationTime|2009-05-12T13:36:21Z +http://news.bbc.co.uk/2/hi/science/nature/7864087.stm|source|BBC +http://edition.cnn.com/2013/04/17/opinion/rinaudo-robots/index.html?iid=article_sidebar|creationDate|2013-04-19 +http://edition.cnn.com/2013/04/17/opinion/rinaudo-robots/index.html?iid=article_sidebar|tag|http://www.semanlink.net/tag/robotique +http://edition.cnn.com/2013/04/17/opinion/rinaudo-robots/index.html?iid=article_sidebar|title|Opinion: Why robots are ready for takeoff - CNN.com +http://edition.cnn.com/2013/04/17/opinion/rinaudo-robots/index.html?iid=article_sidebar|creationTime|2013-04-19T14:04:16Z +http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/|creationDate|2008-06-17 +http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/|tag|http://www.semanlink.net/tag/apple +http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/|tag|http://www.semanlink.net/tag/sproutcore +http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/|title|Cocoa for Windows + Flash Killer = SproutCore — RoughlyDrafted Magazine +http://www.roughlydrafted.com/2008/06/14/cocoa-for-windows-flash-killer-sproutcore/|creationTime|2008-06-17T23:23:45Z +https://hackernoon.com/life-after-1-year-of-using-neo4j-4eca5ce95bf5#.wa9wncbe4|creationDate|2017-02-26 +https://hackernoon.com/life-after-1-year-of-using-neo4j-4eca5ce95bf5#.wa9wncbe4|tag|http://www.semanlink.net/tag/neo4j +https://hackernoon.com/life-after-1-year-of-using-neo4j-4eca5ce95bf5#.wa9wncbe4|title|Life after 1 year of using Neo4J +https://hackernoon.com/life-after-1-year-of-using-neo4j-4eca5ce95bf5#.wa9wncbe4|creationTime|2017-02-26T10:41:44Z +http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/|creationDate|2014-06-24 +http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/|tag|http://www.semanlink.net/tag/cloud +http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/|tag|http://www.semanlink.net/tag/wolfram_language +http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/|title|Wolfram Programming Cloud Is Live!—Stephen Wolfram Blog +http://blog.stephenwolfram.com/2014/06/wolfram-programming-cloud-is-live/|creationTime|2014-06-24T00:36:27Z +http://www4.wiwiss.fu-berlin.de/rdf_browser/|creationDate|2007-04-03 +http://www4.wiwiss.fu-berlin.de/rdf_browser/|tag|http://www.semanlink.net/tag/disco_hyperdata_browser +http://www4.wiwiss.fu-berlin.de/rdf_browser/|comment|Try it here! +http://www4.wiwiss.fu-berlin.de/rdf_browser/|title|Disco - Hyperdata Browser +http://www4.wiwiss.fu-berlin.de/rdf_browser/|creationTime|2007-04-03T22:52:30Z +https://www.datacamp.com/community/tutorials/pandas-multi-index|creationDate|2018-10-08 +https://www.datacamp.com/community/tutorials/pandas-multi-index|tag|http://www.semanlink.net/tag/pandas +https://www.datacamp.com/community/tutorials/pandas-multi-index|title|pandas Multi-index and groupbys (article) - DataCamp +https://www.datacamp.com/community/tutorials/pandas-multi-index|creationTime|2018-10-08T09:42:43Z +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|creationDate|2011-04-05 +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|tag|http://www.semanlink.net/tag/public_data +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|title|Pourquoi la réutilisation des données publiques à des fins commerciales doit être gratuite +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|creationTime|2011-04-05T10:14:00Z +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|source|Le Monde +http://www.lemonde.fr/technologies/article/2011/03/11/pourquoi-la-reutilisation-des-donnees-publiques-a-des-fins-commerciales-doit-etre-gratuite_1491922_651865.html#ens_id=1502935|date|2011-03-11 +https://dl.acm.org/citation.cfm?doid=3209542.3209561|creationDate|2018-07-12 +https://dl.acm.org/citation.cfm?doid=3209542.3209561|tag|http://www.semanlink.net/tag/lynda_tamine +https://dl.acm.org/citation.cfm?doid=3209542.3209561|tag|http://www.semanlink.net/tag/nlp_short_texts +https://dl.acm.org/citation.cfm?doid=3209542.3209561|tag|http://www.semanlink.net/tag/twitter +https://dl.acm.org/citation.cfm?doid=3209542.3209561|title|Studying the Spatio-Temporal Dynamics of Small-Scale Events in Twitter +https://dl.acm.org/citation.cfm?doid=3209542.3209561|creationTime|2018-07-12T00:27:39Z +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|creationDate|2018-12-12 +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|tag|http://www.semanlink.net/tag/pytorch +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|tag|http://www.semanlink.net/tag/google_cloud_platform +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|tag|http://www.semanlink.net/tag/fast_ai +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|title|DeepLearning Images Revision M13/14. PyTorch 1.0. Git Integration. Smaller Boot Time. +https://blog.kovalevskyi.com/deeplearning-images-revision-m13-14-pytorch-1-0-git-integration-smaller-boot-time-1cb5bda59968|creationTime|2018-12-12T08:29:37Z +http://khaidoan.wikidot.com/solr|creationDate|2012-05-14 +http://khaidoan.wikidot.com/solr|tag|http://www.semanlink.net/tag/faq +http://khaidoan.wikidot.com/solr|tag|http://www.semanlink.net/tag/solr +http://khaidoan.wikidot.com/solr|title|Apache Solr - Do only what matters +http://khaidoan.wikidot.com/solr|creationTime|2012-05-14T16:01:04Z +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|creationDate|2014-01-08 +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|tag|http://www.semanlink.net/tag/javascript +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|tag|http://www.semanlink.net/tag/presentation_tool +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|tag|http://www.semanlink.net/tag/html5 +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|title|Use deck.js as a remote presentation tool ginger's thoughts +http://gingertech.net/2014/01/08/use-deck-js-as-a-remote-presentation-tool/|creationTime|2014-01-08T14:11:49Z +http://www.youtube.com/watch?v=ow40LQs0ue4|creationDate|2008-11-10 +http://www.youtube.com/watch?v=ow40LQs0ue4|tag|http://www.semanlink.net/tag/miriam_makeba +http://www.youtube.com/watch?v=ow40LQs0ue4|tag|http://www.semanlink.net/tag/afrique_du_sud +http://www.youtube.com/watch?v=ow40LQs0ue4|tag|http://www.semanlink.net/tag/hymne_national +http://www.youtube.com/watch?v=ow40LQs0ue4|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=ow40LQs0ue4|title|N'Kosi Sikeleli Africa- With Miriam Makeba +http://www.youtube.com/watch?v=ow40LQs0ue4|creationTime|2008-11-10T10:21:15Z +http://www.macworld.com/article/51830/2006/07/showallfinder.html|creationDate|2011-07-18 +http://www.macworld.com/article/51830/2006/07/showallfinder.html|tag|http://www.semanlink.net/tag/mac_os_x_tip +http://www.macworld.com/article/51830/2006/07/showallfinder.html|tag|http://www.semanlink.net/tag/finder +http://www.macworld.com/article/51830/2006/07/showallfinder.html|comment|"in a terminal window:
+defaults write com.apple.finder AppleShowAllFiles -boolean true
+This was working before yosemite: +defaults write com.apple.Finder AppleShowAllFiles YES
+Then relaunch finder (from the ""force quit"" menu -- option-clic finder in the dock / relaunch doesn't seem to work anymore) + + +" +http://www.macworld.com/article/51830/2006/07/showallfinder.html|title|Show all files in the Finder Software Mac OS X Hints Macworld +http://www.macworld.com/article/51830/2006/07/showallfinder.html|creationTime|2011-07-18T23:08:11Z +http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html|creationDate|2010-08-27 +http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html|tag|http://www.semanlink.net/tag/owl_rl +http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html|tag|http://www.semanlink.net/tag/dean_allemang +http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html|title|S is for Semantics: Extending OWL RL +http://dallemang.typepad.com/my_weblog/2010/08/extending-owl-rl-.html|creationTime|2010-08-27T13:16:36Z +http://products.enterpriseitplanet.com/dms/rdbms/1228848416.html|creationDate|2009-02-06 +http://products.enterpriseitplanet.com/dms/rdbms/1228848416.html|tag|http://www.semanlink.net/tag/virtuoso_review +http://products.enterpriseitplanet.com/dms/rdbms/1228848416.html|title|Virtuoso (OpenLink Software, Inc) - Data Management/Storage/Relational Databases - Enterprise IT Planet Product Guide +http://products.enterpriseitplanet.com/dms/rdbms/1228848416.html|creationTime|2009-02-06T22:33:26Z +http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/|creationDate|2016-06-21 +http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/|tag|http://www.semanlink.net/tag/dao_attack +http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/|tag|http://www.semanlink.net/tag/blockchain +http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/|title|Downfall of DAO Digital Currency Fund Shows Blockchain Reputational Risk - CIO Journal. - WSJ +http://blogs.wsj.com/cio/2016/06/20/downfall-of-dao-digital-currency-fund-shows-blockchain-reputational-risk/|creationTime|2016-06-21T10:26:37Z +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|creationDate|2015-01-30 +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|tag|http://www.semanlink.net/tag/linked_data_fragments +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|comment|So what kinds of questions should a server answer? Well, we have to design questions in such a way that their answers enable clients to solve queries. +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|title|Towards Web-scale Web querying Ruben Verborgh +http://ruben.verborgh.org/blog/2014/03/11/towards-web-scale-web-querying/|creationTime|2015-01-30T21:48:41Z +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|creationDate|2014-04-30 +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|tag|http://www.semanlink.net/tag/neuromorphic_system +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|tag|http://www.semanlink.net/tag/computational_neuroscience +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|comment|n this paper, we describe the design of Neurogrid, a neuromorphic system for simulating large-scale neural models in real time. Neuromorphic systems realize the function of biological neural systems by emulating their structure. Designers of such systems face three major design choices: 1) whether to emulate the four neural elements—axonal arbor, synapse, dendritic tree, and soma—with dedicated or shared electronic circuits; 2) whether to implement these electronic circuits in an analog or digital manner; and 3) whether to interconnect arrays of these silicon neurons with a mesh or a tree network. The choices we made were: 1) we emulated all neural elements except the soma with shared electronic circuits; this choice maximized the number of synaptic connections; 2) we realized all electronic circuits except those for axonal arbors in an analog manner; this choice maximized energy efficiency; and 3) we interconnected neural arrays in a tree network; this choice maximized throughput. These three choices made it possible to simulate a million neurons with billions of synaptic connections in real time—for the first time—using 16 Neurocores integrated on a board that consumes three watts. +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|title|IEEE Xplore Abstract - Neurogrid: A Mixed-Analog-Digital Multichip System for Large-Scale Neural Simulations +http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&tp=&arnumber=6805187|creationTime|2014-04-30T14:01:36Z +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|creationDate|2018-01-23 +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|tag|http://www.semanlink.net/tag/lip6 +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|tag|http://www.semanlink.net/tag/patrick_gallinari +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|tag|http://www.semanlink.net/tag/embeddings +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|tag|http://www.semanlink.net/tag/gaussian_embedding +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|title|LEARNING GRAPH EMBEDDINGS FOR NODE LABELING AND INFORMATION DIFFUSION IN SOCIAL NETWORKS (2017) +http://www-connex.lip6.fr/~denoyer/wordpress/wp-content/uploads/2014/09/criteo_2017.pdf|creationTime|2018-01-23T14:42:50Z +https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf|creationDate|2019-05-07 +https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf|tag|http://www.semanlink.net/tag/wikidata +https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf|tag|http://www.semanlink.net/tag/linked_data +https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf|title|Technical usability of Wikidata’s linked data +https://pdfs.semanticscholar.org/f6d1/6eaf975af03a172c73843ff506592c952a04.pdf|creationTime|2019-05-07T15:53:41Z +https://staltz.com/a-plan-to-rescue-the-web-from-the-internet.html|creationDate|2018-02-03 +https://staltz.com/a-plan-to-rescue-the-web-from-the-internet.html|tag|http://www.semanlink.net/tag/the_web_is_dying +https://staltz.com/a-plan-to-rescue-the-web-from-the-internet.html|title|André Staltz - A plan to rescue the Web from the Internet +https://staltz.com/a-plan-to-rescue-the-web-from-the-internet.html|creationTime|2018-02-03T12:53:24Z +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|creationDate|2007-06-08 +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|tag|http://www.semanlink.net/tag/good +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|tag|http://www.semanlink.net/tag/livre +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|tag|http://www.semanlink.net/tag/hypercard +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|tag|http://www.semanlink.net/tag/linked_data +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|title|My Personal Library and the Semantic Web at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/06/04/my-personal-library-and-the-semantic-web/|creationTime|2007-06-08T23:55:27Z +http://news.bbc.co.uk/2/hi/africa/5129350.stm|creationDate|2006-09-15 +http://news.bbc.co.uk/2/hi/africa/5129350.stm|tag|http://www.semanlink.net/tag/horreur +http://news.bbc.co.uk/2/hi/africa/5129350.stm|tag|http://www.semanlink.net/tag/lord_s_resistance_army +http://news.bbc.co.uk/2/hi/africa/5129350.stm|title|BBC NEWS - LRA victim: 'I cannot forget and forgive' +http://news.bbc.co.uk/2/hi/africa/5129350.stm|source|BBC +http://news.bbc.co.uk/2/hi/africa/5129350.stm|seeAlso|http://www.semanlink.net/doc/2006/09/bbc_ochol203x250.jpg +http://dcentproject.eu/|creationDate|2014-09-16 +http://dcentproject.eu/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://dcentproject.eu/|tag|http://www.semanlink.net/tag/decentralized_social_network +http://dcentproject.eu/|comment|D-CENT is a Europe-wide project creating privacy-aware tools and applications for direct democracy and economic empowerment. Together with the citizens and developers, we are creating a decentralised social networking platform for large-scale collaboration and decision-making. +http://dcentproject.eu/|title|D-CENT +http://dcentproject.eu/|creationTime|2014-09-16T23:58:34Z +https://github.com/carrot2/carrot2|creationDate|2017-05-23 +https://github.com/carrot2/carrot2|tag|http://www.semanlink.net/tag/carrot2 +https://github.com/carrot2/carrot2|tag|http://www.semanlink.net/tag/github_project +https://github.com/carrot2/carrot2|comment|Open Source Search Results Clustering Engine. It can automatically organize small collections of documents (like, ehm, search results), into thematic categories. +https://github.com/carrot2/carrot2|title|Carrot2: Text Clustering Algorithms and Applications +https://github.com/carrot2/carrot2|creationTime|2017-05-23T12:12:49Z +http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html|creationDate|2014-07-19 +http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html|tag|http://www.semanlink.net/tag/france_afrique +http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html|title|Les entrepreneurs africains, un atout pour la France +http://www.lemonde.fr/idees/article/2014/07/18/les-entrepreneurs-africains-un-atout-pour-la-france_4458954_3232.html|creationTime|2014-07-19T08:49:26Z +http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison|creationDate|2011-10-12 +http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison|tag|http://www.semanlink.net/tag/jeni_tennison +http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison|tag|http://www.semanlink.net/tag/government_data_as_linked_data +http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison|title|Guest Post: A Developers' Guide to the Linked Data APIs - Jeni Tennison data.gov.uk +http://data.gov.uk/blog/guest-post-developers-guide-linked-data-apis-jeni-tennison|creationTime|2011-10-12T16:34:18Z +http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html|creationDate|2017-11-22 +http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html|tag|http://www.semanlink.net/tag/tribunal_penal_international +http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html|tag|http://www.semanlink.net/tag/mladic +http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html|title|Les crimes de Ratko Mladic, « parmi les plus haineux de l’humanité » +http://www.lemonde.fr/europe/article/2017/11/22/le-verdict-du-proces-de-ratko-mladic-accuse-de-genocide-attendu-a-la-haye_5218588_3214.html|creationTime|2017-11-22T19:55:12Z +http://internetactu.blog.lemonde.fr/2012/05/16/big-data-grande-illusion/|creationDate|2012-05-21 +http://internetactu.blog.lemonde.fr/2012/05/16/big-data-grande-illusion/|tag|http://www.semanlink.net/tag/big_data +http://internetactu.blog.lemonde.fr/2012/05/16/big-data-grande-illusion/|title|Big Data, grande illusion ? InternetActu +http://internetactu.blog.lemonde.fr/2012/05/16/big-data-grande-illusion/|creationTime|2012-05-21T17:29:35Z +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|creationDate|2012-01-26 +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/steve_jobs +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/travail +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/chine_technologie +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/outsourcing +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/apple +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|tag|http://www.semanlink.net/tag/delocalisations +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|title|Apple, America and a Squeezed Middle Class - NYTimes.com +http://www.nytimes.com/2012/01/22/business/apple-america-and-a-squeezed-middle-class.html|creationTime|2012-01-26T08:28:47Z +http://cassandra.apache.org/|creationDate|2013-02-14 +http://cassandra.apache.org/|tag|http://www.semanlink.net/tag/cassandra +http://cassandra.apache.org/|title|The Apache Cassandra Project +http://cassandra.apache.org/|creationTime|2013-02-14T11:30:21Z +http://aws.amazon.com/fr/architecture/|creationDate|2014-07-02 +http://aws.amazon.com/fr/architecture/|tag|http://www.semanlink.net/tag/aws +http://aws.amazon.com/fr/architecture/|title|Centre d'architecture AWS +http://aws.amazon.com/fr/architecture/|creationTime|2014-07-02T17:04:51Z +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|creationDate|2015-09-15 +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|tag|http://www.semanlink.net/tag/jeu_d_echecs +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|tag|http://www.semanlink.net/tag/deep_learning +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|comment|Giraffe: Using Deep Reinforcement Learning to Play Chess +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|title|Deep Learning Machine Teaches Itself Chess in 72 Hours, Plays at International Master Level MIT Technology Review +http://www.technologyreview.com/view/541276/deep-learning-machine-teaches-itself-chess-in-72-hours-plays-at-international-master/|creationTime|2015-09-15T09:59:50Z +https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/|creationDate|2019-04-27 +https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/|tag|http://www.semanlink.net/tag/virus +https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/|comment|Each liter of seawater on this planet is home to about 100 billion viral particles +https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/|title|Scientists Just Found 200,000 New Marine Viruses NOVA PBS +https://www.pbs.org/wgbh/nova/article/200000-marine-viruses/|creationTime|2019-04-27T20:09:29Z +http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php|creationDate|2007-09-15 +http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php|tag|http://www.semanlink.net/tag/youtube +http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php|tag|http://www.semanlink.net/tag/imovie +http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php|title|From iMovie to YouTube, a.s.a.p. +http://thoughton.co.uk/digitallife/weblog/2007/03/from_imovie_to.php|creationTime|2007-09-15T23:31:55Z +https://arxiv.org/abs/1712.01208v1|creationDate|2017-12-11 +https://arxiv.org/abs/1712.01208v1|tag|http://www.semanlink.net/tag/learned_index_structures +https://arxiv.org/abs/1712.01208v1|tag|http://www.semanlink.net/tag/nips_2017 +https://arxiv.org/abs/1712.01208v1|tag|http://www.semanlink.net/tag/semantic_hashing +https://arxiv.org/abs/1712.01208v1|tag|http://www.semanlink.net/tag/ml_google +https://arxiv.org/abs/1712.01208v1|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1712.01208v1|arxiv_author|Alex Beutel +https://arxiv.org/abs/1712.01208v1|arxiv_author|Tim Kraska +https://arxiv.org/abs/1712.01208v1|arxiv_author|Neoklis Polyzotis +https://arxiv.org/abs/1712.01208v1|arxiv_author|Ed H. Chi +https://arxiv.org/abs/1712.01208v1|arxiv_author|Jeffrey Dean +https://arxiv.org/abs/1712.01208v1|comment|"> we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs +> +> Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes." +https://arxiv.org/abs/1712.01208v1|title|[1712.01208] The Case for Learned Index Structures +https://arxiv.org/abs/1712.01208v1|creationTime|2017-12-11T19:25:09Z +https://arxiv.org/abs/1712.01208v1|arxiv_summary|"Indexes are models: a B-Tree-Index can be seen as a model to map a key to the +position of a record within a sorted array, a Hash-Index as a model to map a +key to a position of a record within an unsorted array, and a BitMap-Index as a +model to indicate if a data record exists or not. In this exploratory research +paper, we start from this premise and posit that all existing index structures +can be replaced with other types of models, including deep-learning models, +which we term learned indexes. The key idea is that a model can learn the sort +order or structure of lookup keys and use this signal to effectively predict +the position or existence of records. We theoretically analyze under which +conditions learned indexes outperform traditional index structures and describe +the main challenges in designing learned index structures. Our initial results +show, that by using neural nets we are able to outperform cache-optimized +B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over +several real-world data sets. More importantly though, we believe that the idea +of replacing core components of a data management system through learned models +has far reaching implications for future systems designs and that this work +just provides a glimpse of what might be possible." +https://arxiv.org/abs/1712.01208v1|arxiv_firstAuthor|Tim Kraska +https://arxiv.org/abs/1712.01208v1|arxiv_updated|2018-04-30T07:54:41Z +https://arxiv.org/abs/1712.01208v1|arxiv_title|The Case for Learned Index Structures +https://arxiv.org/abs/1712.01208v1|arxiv_published|2017-12-04T17:18:41Z +https://arxiv.org/abs/1712.01208v1|arxiv_num|1712.01208 +https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google|creationDate|2016-08-28 +https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google|tag|http://www.semanlink.net/tag/knowledge +https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google|comment|Knowledge is not wisdom, but it is a prerequisite for wisdom – and that’s one thing the digital revolution hasn’t changed. +https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google|title|Does knowledge matter in the age of Google? The Guardian +https://www.theguardian.com/lifeandstyle/2016/aug/28/inner-life-does-knowledge-matter-in-the-age-of-google|creationTime|2016-08-28T15:50:45Z +http://www.nasa.gov/mission_pages/voyager/termination_shock.html|creationDate|2008-05-17 +http://www.nasa.gov/mission_pages/voyager/termination_shock.html|tag|http://www.semanlink.net/tag/voyager +http://www.nasa.gov/mission_pages/voyager/termination_shock.html|tag|http://www.semanlink.net/tag/heliosphere +http://www.nasa.gov/mission_pages/voyager/termination_shock.html|title|Voyager 2 Proves the Solar System is Squashed +http://www.nasa.gov/mission_pages/voyager/termination_shock.html|creationTime|2008-05-17T14:02:12Z +http://www.manudibango.net/|creationDate|2009-11-05 +http://www.manudibango.net/|tag|http://www.semanlink.net/tag/manu_dibango +http://www.manudibango.net/|title|Manu DIBANGO - Official Website - Site Web Officiel +http://www.manudibango.net/|creationTime|2009-11-05T19:09:30Z +http://www.asimovinstitute.org/neural-network-zoo/|creationDate|2017-10-07 +http://www.asimovinstitute.org/neural-network-zoo/|tag|http://www.semanlink.net/tag/artificial_neural_network +http://www.asimovinstitute.org/neural-network-zoo/|title|The Neural Network Zoo - The Asimov Institute +http://www.asimovinstitute.org/neural-network-zoo/|creationTime|2017-10-07T11:12:32Z +http://www.flickr.com/photos/milstan/sets/72157623935704725/|creationDate|2010-05-14 +http://www.flickr.com/photos/milstan/sets/72157623935704725/|tag|http://www.semanlink.net/tag/hypiosvocampparismay2010 +http://www.flickr.com/photos/milstan/sets/72157623935704725/|title|Hypios VoCamp Paris 2010 - a set on Flickr +http://www.flickr.com/photos/milstan/sets/72157623935704725/|creationTime|2010-05-14T23:15:03Z +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|creationDate|2012-01-01 +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|tag|http://www.semanlink.net/tag/cory_doctorow +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|tag|http://www.semanlink.net/tag/propriete_intellectuelle +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|comment|As a member of the Walkman generation, I have made peace with the fact that I will require a hearing aid long before I die, and of course, it won't be a hearing aid, it will be a computer I put in my body. So when I get into a car -- a computer I put my body into -- with my hearing aid -- a computer I put inside my body -- I want to know that these technologies are not designed to keep secrets from me, and to prevent me from terminating processes on them that work against my interests. +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|title|The Coming War on General Computation - Cory Doctorow +https://github.com/jwise/28c3-doctorow/blob/master/transcript.md|creationTime|2012-01-01T22:58:36Z +http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf|creationDate|2012-10-23 +http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf|tag|http://www.semanlink.net/tag/slides +http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf|tag|http://www.semanlink.net/tag/data_publica +http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf|title|Data publica : open data (slides) +http://telecom.ebizproduction.com/sites/default/files/files/open%20data%20F%20bancilhon.pdf|creationTime|2012-10-23T01:12:56Z +https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/|creationDate|2016-06-05 +https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/|tag|http://www.semanlink.net/tag/logistic_regression +https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/|comment|the log-odds is not an arbitrary choice, infact a very natural choice. The sigmoid is simply a consequence of modeling the log-odds with a linear function +https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/|title|Where does the Sigmoid in Logistic Regression come from? Onionesque Reality +https://onionesquereality.wordpress.com/2016/05/18/where-does-the-sigmoid-in-logistic-regression-come-from/|creationTime|2016-06-05T00:50:24Z +http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf|creationDate|2010-07-24 +http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf|tag|http://www.semanlink.net/tag/tips +http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf|tag|http://www.semanlink.net/tag/business_case +http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf|title|Business Case Tips +http://new.bizvortex.com/wp-content/uploads/2010/05/Business-Case-Tips.pdf|creationTime|2010-07-24T10:41:13Z +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|creationDate|2015-05-14 +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|tag|http://www.semanlink.net/tag/annotations +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|tag|http://www.semanlink.net/tag/jersey_cache_control +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|comment|Cache-Control Using Annotations With Jersey +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|title|java - Jersey: Default Cache Control to no-cache - Stack Overflow +http://stackoverflow.com/questions/10934316/jersey-default-cache-control-to-no-cache|creationTime|2015-05-14T13:12:15Z +http://www.jenitennison.com/blog/node/145|creationDate|2010-08-27 +http://www.jenitennison.com/blog/node/145|tag|http://www.semanlink.net/tag/freebase +http://www.jenitennison.com/blog/node/145|tag|http://www.semanlink.net/tag/linked_data +http://www.jenitennison.com/blog/node/145|tag|http://www.semanlink.net/tag/howto +http://www.jenitennison.com/blog/node/145|title|Using Freebase Gridworks to Create Linked Data Jeni's Musings +http://www.jenitennison.com/blog/node/145|creationTime|2010-08-27T14:02:07Z +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|creationDate|2013-05-25 +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|tag|http://www.semanlink.net/tag/memory_prediction_framework +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|tag|http://www.semanlink.net/tag/cerveau +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|tag|http://www.semanlink.net/tag/jeff_hawkins +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|comment|"The intuitive but incorrect assumption that kept us from understanding brains and building intelligent machines is: ""Intelligence is defined by behavior""... Intelligence is defined by prediction " +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|title|Jeff Hawkins: How brain science will change computing - CornellCast +http://www.cornell.edu/video/jeff-hawkins-on-how-brain-science-will-change-computing|creationTime|2013-05-25T14:55:36Z +http://www.moula-moula.de|creationDate|2005-04-17 +http://www.moula-moula.de|tag|http://www.semanlink.net/tag/desert +http://www.moula-moula.de|tag|http://www.semanlink.net/tag/photos_du_niger +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|creationDate|2018-05-21 +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|tag|http://www.semanlink.net/tag/sequence_labeling +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|tag|http://www.semanlink.net/tag/named_entity_recognition +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|tag|http://www.semanlink.net/tag/bi_lstm +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|tag|http://www.semanlink.net/tag/github_project +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|comment|([linked from here](http://nlp.town/blog/ner-and-the-road-to-deep-learning/)) +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|relatedDoc|http://nlp.town/blog/ner-and-the-road-to-deep-learning/ +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|title|BiLSTM-CNN-CRF Implementation for Sequence Tagging +https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf|creationTime|2018-05-21T12:59:21Z +https://rare-technologies.com/text-summarization-with-gensim/|creationDate|2017-06-02 +https://rare-technologies.com/text-summarization-with-gensim/|tag|http://www.semanlink.net/tag/gensim +https://rare-technologies.com/text-summarization-with-gensim/|tag|http://www.semanlink.net/tag/textrank +https://rare-technologies.com/text-summarization-with-gensim/|tag|http://www.semanlink.net/tag/automatic_summarization +https://rare-technologies.com/text-summarization-with-gensim/|title|Text Summarization with Gensim +https://rare-technologies.com/text-summarization-with-gensim/|creationTime|2017-06-02T01:02:20Z +https://arxiv.org/abs/1810.07150|creationDate|2018-10-22 +https://arxiv.org/abs/1810.07150|tag|http://www.semanlink.net/tag/semantic_hashing +https://arxiv.org/abs/1810.07150|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1810.07150|arxiv_author|Pedro Alonso +https://arxiv.org/abs/1810.07150|arxiv_author|Marcus Liwicki +https://arxiv.org/abs/1810.07150|arxiv_author|Foteini Simistira +https://arxiv.org/abs/1810.07150|arxiv_author|Gyorgy Kovacs +https://arxiv.org/abs/1810.07150|arxiv_author|Ayushman Dash +https://arxiv.org/abs/1810.07150|arxiv_author|Kumar Shridhar +https://arxiv.org/abs/1810.07150|arxiv_author|Vinaychandran Pondenkandath +https://arxiv.org/abs/1810.07150|arxiv_author|Amit Sahu +https://arxiv.org/abs/1810.07150|arxiv_author|Gustav Grund Pihlgren +https://arxiv.org/abs/1810.07150|title|[1810.07150] Subword Semantic Hashing for Intent Classification on Small Datasets +https://arxiv.org/abs/1810.07150|creationTime|2018-10-22T14:23:00Z +https://arxiv.org/abs/1810.07150|arxiv_summary|"In this paper, we introduce the use of Semantic Hashing as embedding for the +task of Intent Classification and achieve state-of-the-art performance on three +frequently used benchmarks. Intent Classification on a small dataset is a +challenging task for data-hungry state-of-the-art Deep Learning based systems. +Semantic Hashing is an attempt to overcome such a challenge and learn robust +text classification. Current word embedding based are dependent on +vocabularies. One of the major drawbacks of such methods is out-of-vocabulary +terms, especially when having small training datasets and using a wider +vocabulary. This is the case in Intent Classification for chatbots, where +typically small datasets are extracted from internet communication. Two +problems arise by the use of internet communication. First, such datasets miss +a lot of terms in the vocabulary to use word embeddings efficiently. Second, +users frequently make spelling errors. Typically, the models for intent +classification are not trained with spelling errors and it is difficult to +think about ways in which users will make mistakes. Models depending on a word +vocabulary will always face such issues. An ideal classifier should handle +spelling errors inherently. With Semantic Hashing, we overcome these challenges +and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and +Web Application. Our benchmarks are available online: +https://github.com/kumar-shridhar/Know-Your-Intent" +https://arxiv.org/abs/1810.07150|arxiv_firstAuthor|Kumar Shridhar +https://arxiv.org/abs/1810.07150|arxiv_updated|2019-09-14T15:42:30Z +https://arxiv.org/abs/1810.07150|arxiv_title|Subword Semantic Hashing for Intent Classification on Small Datasets +https://arxiv.org/abs/1810.07150|arxiv_published|2018-10-16T17:25:22Z +https://arxiv.org/abs/1810.07150|arxiv_num|1810.07150 +http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html|creationDate|2015-05-12 +http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html|tag|http://www.semanlink.net/tag/edward_snowden +http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html|tag|http://www.semanlink.net/tag/scandale_des_ecoutes_en_allemagne +http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html|title|Pour Snowden, le scandale des écoutes en Allemagne confirme un espionnage « de masse » +http://www.lemonde.fr/pixels/article/2015/05/08/pour-snowden-le-scandale-des-ecoutes-en-allemagne-confirme-un-espionnage-de-masse_4630493_4408996.html|creationTime|2015-05-12T02:02:46Z +http://www.lafcpug.org/Tutorials/basic_you_tube.html|creationDate|2007-09-16 +http://www.lafcpug.org/Tutorials/basic_you_tube.html|tag|http://www.semanlink.net/tag/youtube +http://www.lafcpug.org/Tutorials/basic_you_tube.html|tag|http://www.semanlink.net/tag/imovie +http://www.lafcpug.org/Tutorials/basic_you_tube.html|title|uploading your movies for YOU Tube +http://www.lafcpug.org/Tutorials/basic_you_tube.html|creationTime|2007-09-16T00:06:07Z +https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump|creationDate|2017-12-16 +https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump|tag|http://www.semanlink.net/tag/trump +https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump|tag|http://www.semanlink.net/tag/paleontologie +https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump|title|Why are palaeontologists suing Trump? Elsa Panciroli Science The Guardian +https://www.theguardian.com/science/2017/dec/13/why-are-palaeontologists-suing-trump|creationTime|2017-12-16T17:56:28Z +http://googlewebmastercentral.blogspot.fr/2008/09/demystifying-duplicate-content-penalty.html|creationDate|2013-08-23 +http://googlewebmastercentral.blogspot.fr/2008/09/demystifying-duplicate-content-penalty.html|tag|http://www.semanlink.net/tag/webmasters_google +http://googlewebmastercentral.blogspot.fr/2008/09/demystifying-duplicate-content-penalty.html|title|"Official Google Webmaster Central Blog: Demystifying the ""duplicate content penalty""" +http://googlewebmastercentral.blogspot.fr/2008/09/demystifying-duplicate-content-penalty.html|creationTime|2013-08-23T14:20:44Z +http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb|creationDate|2010-08-20 +http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb|tag|http://www.semanlink.net/tag/dynamic_semantic_publishing +http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb|title|Dynamic Semantic Publishing for any Blog (Part 2: Linked ReadWriteWeb) - benjamin nowack's blog +http://bnode.org/blog/2010/08/13/dynamic-semantic-publishing-for-any-blog-part-2-linked-readwriteweb|creationTime|2010-08-20T12:49:18Z +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|creationDate|2010-07-01 +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|tag|http://www.semanlink.net/tag/sig_ma +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|tag|http://www.semanlink.net/tag/richard_cyganiak +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|tag|http://www.semanlink.net/tag/rdf_bus +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|comment|Richard Cyganiak had a brilliant talk about how corporate data integration can benefit from RDF-Solutions, because a RDF based data concept can be developed step-by-step in contradiction to a “conservative corporate data integration” which always goes with a general redesign of the whole data-structure of a company. Richard calls this “pay as you go” – and I think this is what the industry looks for.” (from http://blog.semantic-web.at/2010/09/06/the-review-in-a-car/) +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|title|Richard Cyganiak: Sigma EE – Reaping low-hanging fruits in RDF-based data integration – I-SEMANTICS +http://i-semantics.tugraz.at/industry-track/richard-cyganiak-sigmaee|creationTime|2010-07-01T16:25:52Z +https://lejournal.cnrs.fr/articles/crispr-cas9-des-ciseaux-genetiques-pour-le-cerveau|creationDate|2016-05-03 +https://lejournal.cnrs.fr/articles/crispr-cas9-des-ciseaux-genetiques-pour-le-cerveau|tag|http://www.semanlink.net/tag/crispr_cas9 +https://lejournal.cnrs.fr/articles/crispr-cas9-des-ciseaux-genetiques-pour-le-cerveau|title|CRISPR-Cas9: des ciseaux génétiques pour le cerveau CNRS Le journal +https://lejournal.cnrs.fr/articles/crispr-cas9-des-ciseaux-genetiques-pour-le-cerveau|creationTime|2016-05-03T16:44:04Z +http://www.crystalfighters.com/|creationDate|2017-02-18 +http://www.crystalfighters.com/|tag|http://www.semanlink.net/tag/musique +http://www.crystalfighters.com/|title|Crystal Fighters +http://www.crystalfighters.com/|creationTime|2017-02-18T02:31:40Z +http://mpld3.github.io/index.html|creationDate|2017-06-28 +http://mpld3.github.io/index.html|tag|http://www.semanlink.net/tag/matplotlib +http://mpld3.github.io/index.html|tag|http://www.semanlink.net/tag/d3js +http://mpld3.github.io/index.html|title|mpld3 — Bringing Matplotlib to the Browser +http://mpld3.github.io/index.html|creationTime|2017-06-28T14:58:29Z +http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse|creationDate|2013-11-29 +http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse|tag|http://www.semanlink.net/tag/eclipse +http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse|tag|http://www.semanlink.net/tag/sonarqube +http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse|title|Installing SonarQube in Eclipse - SonarQube - Codehaus +http://docs.codehaus.org/display/SONAR/Installing+SonarQube+in+Eclipse|creationTime|2013-11-29T11:32:08Z +http://www.nytimes.com/2013/10/18/science/fossil-skull-may-rewrite-humans-evolutionary-story.html?adxnnl=1&partner=rss&emc=rss&adxnnlx=1382129702-eokKMk+XDdhKASJOK4RWCg|creationDate|2013-10-18 +http://www.nytimes.com/2013/10/18/science/fossil-skull-may-rewrite-humans-evolutionary-story.html?adxnnl=1&partner=rss&emc=rss&adxnnlx=1382129702-eokKMk+XDdhKASJOK4RWCg|tag|http://www.semanlink.net/tag/paleoanthropology +http://www.nytimes.com/2013/10/18/science/fossil-skull-may-rewrite-humans-evolutionary-story.html?adxnnl=1&partner=rss&emc=rss&adxnnlx=1382129702-eokKMk+XDdhKASJOK4RWCg|title|Skull Fossil Suggests Simpler Human Lineage - NYTimes.com +http://www.nytimes.com/2013/10/18/science/fossil-skull-may-rewrite-humans-evolutionary-story.html?adxnnl=1&partner=rss&emc=rss&adxnnlx=1382129702-eokKMk+XDdhKASJOK4RWCg|creationTime|2013-10-18T22:57:49Z +http://publib.boulder.ibm.com/infocenter/wsiihelp/v8r3/index.jsp?topic=/com.ibm.websphere.ii.foundation.appdev.content.doc/developing/iiyvwdg12.htm|creationDate|2007-11-20 +http://publib.boulder.ibm.com/infocenter/wsiihelp/v8r3/index.jsp?topic=/com.ibm.websphere.ii.foundation.appdev.content.doc/developing/iiyvwdg12.htm|tag|http://www.semanlink.net/tag/encoding +http://publib.boulder.ibm.com/infocenter/wsiihelp/v8r3/index.jsp?topic=/com.ibm.websphere.ii.foundation.appdev.content.doc/developing/iiyvwdg12.htm|title|Internationalisation des composants Web +http://publib.boulder.ibm.com/infocenter/wsiihelp/v8r3/index.jsp?topic=/com.ibm.websphere.ii.foundation.appdev.content.doc/developing/iiyvwdg12.htm|creationTime|2007-11-20T23:01:29Z +https://www.wired.com/story/facebook-alternatives/|creationDate|2018-03-24 +https://www.wired.com/story/facebook-alternatives/|tag|http://www.semanlink.net/tag/facebook +https://www.wired.com/story/facebook-alternatives/|title|The Best Alternative For Every Facebook Feature WIRED +https://www.wired.com/story/facebook-alternatives/|creationTime|2018-03-24T14:28:29Z +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|creationDate|2008-04-08 +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|tag|http://www.semanlink.net/tag/rss +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|tag|http://www.semanlink.net/tag/sioc +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|tag|http://www.semanlink.net/tag/sparql +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|title|From RSS to SIOC using SPARQL : Alexandre Passant +http://apassant.net/blog/2006/10/05/from-rss-to-sioc-using-sparql/|creationTime|2008-04-08T12:59:33Z +http://simile.mit.edu/wiki/Piggy_Bank|creationDate|2007-01-02 +http://simile.mit.edu/wiki/Piggy_Bank|tag|http://www.semanlink.net/tag/piggy_bank +http://simile.mit.edu/wiki/Piggy_Bank|title|Piggy Bank - Home page +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|creationDate|2009-04-22 +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|tag|http://www.semanlink.net/tag/cybersurveillance +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|tag|http://www.semanlink.net/tag/privacy_and_internet +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|tag|http://www.semanlink.net/tag/hackers +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|title|Comment contourner les systèmes de traçabilité ? +http://bugbrother.blog.lemonde.fr/files/2009/04/hermessortezcouvert.1240345050.pdf|creationTime|2009-04-22T22:56:05Z +http://www.w3.org/2001/tag/doc/whenToUseGet.html#checklist|creationDate|2010-06-04 +http://www.w3.org/2001/tag/doc/whenToUseGet.html#checklist|tag|http://www.semanlink.net/tag/http_get_vs_post +http://www.w3.org/2001/tag/doc/whenToUseGet.html#checklist|title|URIs, Addressability, and the use of HTTP GET and POST +http://www.w3.org/2001/tag/doc/whenToUseGet.html#checklist|creationTime|2010-06-04T09:55:43Z +http://aclweb.org/anthology/P18-1002|creationDate|2018-09-18 +http://aclweb.org/anthology/P18-1002|tag|http://www.semanlink.net/tag/a_la_carte_embedding +http://aclweb.org/anthology/P18-1002|title|A La Carte Embedding: Cheap but Effective Induction of Semantic Feature Vectors (2018) +http://aclweb.org/anthology/P18-1002|creationTime|2018-09-18T18:15:49Z +http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html|creationDate|2011-02-16 +http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html|tag|http://www.semanlink.net/tag/graph_visualization +http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html|title|GraphViz and SKOS... +http://lists.w3.org/Archives/Public/public-esw-thes/2011Feb/0021.html|creationTime|2011-02-16T00:07:14Z +http://cs231n.github.io/convolutional-networks/|creationDate|2016-01-18 +http://cs231n.github.io/convolutional-networks/|tag|http://www.semanlink.net/tag/computer_vision +http://cs231n.github.io/convolutional-networks/|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://cs231n.github.io/convolutional-networks/|title|CS231n Convolutional Neural Networks for Visual Recognition +http://cs231n.github.io/convolutional-networks/|creationTime|2016-01-18T00:29:55Z +http://www.brainjar.com/|creationDate|2007-11-27 +http://www.brainjar.com/|tag|http://www.semanlink.net/tag/css +http://www.brainjar.com/|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.brainjar.com/|title|BrainJar.com: Experiments in Web Programming +http://www.brainjar.com/|creationTime|2007-11-27T14:25:55Z +http://blogs.zdnet.com/semantic-web/?p=132|creationDate|2008-05-17 +http://blogs.zdnet.com/semantic-web/?p=132|tag|http://www.semanlink.net/tag/www08 +http://blogs.zdnet.com/semantic-web/?p=132|tag|http://www.semanlink.net/tag/commercialising_the_semantic_web +http://blogs.zdnet.com/semantic-web/?p=132|tag|http://www.semanlink.net/tag/fps_and_www_2008 +http://blogs.zdnet.com/semantic-web/?p=132|tag|http://www.semanlink.net/tag/paul_miller +http://blogs.zdnet.com/semantic-web/?p=132|title|Commercialising the Semantic Web (panel at www 2008) +http://blogs.zdnet.com/semantic-web/?p=132|creationTime|2008-05-17T23:12:12Z +http://weblog.burningbird.net/2005/10/12/portable-data/|creationDate|2005-10-14 +http://weblog.burningbird.net/2005/10/12/portable-data/|tag|http://www.semanlink.net/tag/rdf_in_files +http://weblog.burningbird.net/2005/10/12/portable-data/|tag|http://www.semanlink.net/tag/semanlink_related +http://weblog.burningbird.net/2005/10/12/portable-data/|tag|http://www.semanlink.net/tag/shelley_powers +http://weblog.burningbird.net/2005/10/12/portable-data/|comment|This is the true power of RDF over relational: relational doesn’t work well with isolated, discrete objects, while RDF does. It is a truly portable database. Anyone can drop the data in at their sites without worry about having to create a database, or manage it. As for portability: how easy can you copy files? +http://weblog.burningbird.net/2005/10/12/portable-data/|title|Burningbird » Portable Data +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|creationDate|2008-05-08 +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|tag|http://www.semanlink.net/tag/fps_paper +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|tag|http://www.semanlink.net/tag/fps_and_ldow2008 +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|tag|http://www.semanlink.net/tag/rdf_forms +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|tag|http://www.semanlink.net/tag/semantic_enterprise +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|comment|My paper at LDOW2008 +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|title|Linking Enterprise Data +http://events.linkeddata.org/ldow2008/papers/21-servant-linking-enterprise-data.pdf|creationTime|2008-05-08T14:21:36Z +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|creationDate|2009-06-17 +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|tag|http://www.semanlink.net/tag/histoire_des_sciences +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|tag|http://www.semanlink.net/tag/physique +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|tag|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|title|Un siècle de progrès sans merci +http://www.cerimes.education.fr/index.php?page=fiches,view,1920,4,7,248,,,search_this_word,10|creationTime|2009-06-17T00:59:34Z +http://nowiknow.com/the-lichen-loophole/|creationDate|2015-12-22 +http://nowiknow.com/the-lichen-loophole/|tag|http://www.semanlink.net/tag/amazon +http://nowiknow.com/the-lichen-loophole/|title|The Lichen Loophole +http://nowiknow.com/the-lichen-loophole/|creationTime|2015-12-22T19:30:02Z +http://wiki.iks-project.eu/index.php/VIE|creationDate|2011-09-13 +http://wiki.iks-project.eu/index.php/VIE|tag|http://www.semanlink.net/tag/rdfa +http://wiki.iks-project.eu/index.php/VIE|tag|http://www.semanlink.net/tag/annotations +http://wiki.iks-project.eu/index.php/VIE|tag|http://www.semanlink.net/tag/javascript +http://wiki.iks-project.eu/index.php/VIE|tag|http://www.semanlink.net/tag/interactive_knowledge_stack +http://wiki.iks-project.eu/index.php/VIE|comment|VIE, or Vienna IKS Editables is a project to make the content of your web pages editable through annotations. This means that by simply including some RDFa on your pages you can built rich user interactions and interfaces on the JavaScript level. +http://wiki.iks-project.eu/index.php/VIE|title|VIE - IKS Project +http://wiki.iks-project.eu/index.php/VIE|creationTime|2011-09-13T14:22:53Z +http://www.jgoodwin.net/?p=1043|creationDate|2013-08-21 +http://www.jgoodwin.net/?p=1043|tag|http://www.semanlink.net/tag/dynamic_topic_model +http://www.jgoodwin.net/?p=1043|title|Experimenting with Dynamic Topic Models Jonathan Goodwin +http://www.jgoodwin.net/?p=1043|creationTime|2013-08-21T18:45:40Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399|creationDate|2012-04-24 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399|tag|http://www.semanlink.net/tag/maximum_entropy +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399|tag|http://www.semanlink.net/tag/nlp +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399|title|CiteSeerX — A Maximum Entropy Approach to Natural Language Processing +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8399|creationTime|2012-04-24T00:39:13Z +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|creationDate|2018-07-09 +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|tag|http://www.semanlink.net/tag/cea +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|tag|http://www.semanlink.net/tag/quantum_computing +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|tag|http://www.semanlink.net/tag/mecanique_quantique +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|title|Révolutions quantiques - CEA +http://www.cea.fr/multimedia/Documents/publications/clefs-cea/CLEFS66-FR-FINAL.pdf|creationTime|2018-07-09T09:09:47Z +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|creationDate|2009-01-14 +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|tag|http://www.semanlink.net/tag/youtube_video +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|tag|http://www.semanlink.net/tag/seyni_kountche +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|title|Seyni Kountché +http://www.youtube.com/watch?v=tM6VOtsGVt0&NR=1|creationTime|2009-01-14T22:50:07Z +http://ebiquity.umbc.edu/blogger/2007/08/18/rdf123-maps-spreadsheet-data-to-rdf/|creationDate|2007-08-22 +http://ebiquity.umbc.edu/blogger/2007/08/18/rdf123-maps-spreadsheet-data-to-rdf/|tag|http://www.semanlink.net/tag/rdf123 +http://ebiquity.umbc.edu/blogger/2007/08/18/rdf123-maps-spreadsheet-data-to-rdf/|title|RDF123 maps spreadsheet data to RDF +http://ebiquity.umbc.edu/blogger/2007/08/18/rdf123-maps-spreadsheet-data-to-rdf/|creationTime|2007-08-22T00:16:18Z +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|creationDate|2017-12-04 +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|tag|http://www.semanlink.net/tag/computational_neuroscience +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|tag|http://www.semanlink.net/tag/neural_coding +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|comment|They analyse a neuronal signal and then try to reproduce it using the empirical Bayes model and then using the hidden Markov model. They then decide whether it is digital or analog depending on the model that best simulates the characteristics of the original signal. +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|title|Solving the Neural Code Conundrum: Digital or Analog? - MIT Technology Review +https://www.technologyreview.com/s/522066/solving-the-neural-code-conundrum-digital-or-analog/|creationTime|2017-12-04T09:03:51Z +http://start.aimpages.com/|creationDate|2006-05-22 +http://start.aimpages.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://start.aimpages.com/|tag|http://www.semanlink.net/tag/aol +http://start.aimpages.com/|comment|"AOL’s social networking product +" +http://start.aimpages.com/|title|AIM Pages +http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown|creationDate|2015-10-12 +http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown|tag|http://www.semanlink.net/tag/markdown +http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown|tag|http://www.semanlink.net/tag/stackoverflow_q +http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown|title|html - Cross-reference (named anchor) in markdown - Stack Overflow +http://stackoverflow.com/questions/5319754/cross-reference-named-anchor-in-markdown|creationTime|2015-10-12T19:21:09Z +https://diuf.unifr.ch/main/xi/diplodocus|creationDate|2012-05-31 +https://diuf.unifr.ch/main/xi/diplodocus|tag|http://www.semanlink.net/tag/philippe_cudre_mauroux +https://diuf.unifr.ch/main/xi/diplodocus|tag|http://www.semanlink.net/tag/rdf +https://diuf.unifr.ch/main/xi/diplodocus|tag|http://www.semanlink.net/tag/rdf_tools +https://diuf.unifr.ch/main/xi/diplodocus|comment|"dipLODocus[RDF] is a new system for RDF data processing supporting both simple transactional queries and complex analytics efficiently. dipLODocus[RDF] is based on a novel hybrid storage model considering RDF data both from a graph perspective (by storing RDF subgraphs or RDF molecules) and from a ""vertical"" analytics perspective (by storing compact lists of literal values for a given attribute)." +https://diuf.unifr.ch/main/xi/diplodocus|title|dipLODocus[RDF] +https://diuf.unifr.ch/main/xi/diplodocus|creationTime|2012-05-31T12:09:26Z +http://www.dajobe.org/2004/01/turtle/|creationDate|2008-04-08 +http://www.dajobe.org/2004/01/turtle/|tag|http://www.semanlink.net/tag/turtle +http://www.dajobe.org/2004/01/turtle/|tag|http://www.semanlink.net/tag/david_beckett +http://www.dajobe.org/2004/01/turtle/|title|Turtle - Terse RDF Triple Language +http://www.dajobe.org/2004/01/turtle/|creationTime|2008-04-08T14:36:59Z +http://www.permadi.com/tutorial/jsFunc/|creationDate|2008-01-05 +http://www.permadi.com/tutorial/jsFunc/|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.permadi.com/tutorial/jsFunc/|title|"Introduction and Features of JavaScript ""Function"" Objects" +http://www.permadi.com/tutorial/jsFunc/|creationTime|2008-01-05T16:20:30Z +http://www.openrdf.org|creationDate|2005-04-26 +http://www.openrdf.org|tag|http://www.semanlink.net/tag/rdf_schema_querying +http://www.openrdf.org|tag|http://www.semanlink.net/tag/rdf_schema_inferencing +http://www.openrdf.org|tag|http://www.semanlink.net/tag/triplestore +http://www.openrdf.org|comment|Sesame is an open source RDF database with support for RDF Schema inferencing and querying. +https://www.ted.com/talks/douglas_rushkoff_how_to_be_team_human_in_the_digital_future#t-731427|creationDate|2019-01-02 +https://www.ted.com/talks/douglas_rushkoff_how_to_be_team_human_in_the_digital_future#t-731427|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://www.ted.com/talks/douglas_rushkoff_how_to_be_team_human_in_the_digital_future#t-731427|title|"Douglas Rushkoff: How to be ""Team Human"" in the digital future TED Talk" +https://www.ted.com/talks/douglas_rushkoff_how_to_be_team_human_in_the_digital_future#t-731427|creationTime|2019-01-02T13:00:39Z +http://ast2014.fzi.de/|creationDate|2014-05-06 +http://ast2014.fzi.de/|tag|http://www.semanlink.net/tag/ast_workshop +http://ast2014.fzi.de/|title|Applications of Semantic Technologies - AST 2014 8th International Workshop at INFORMATIK 2014, September 22-26, 2014, Stuttgart (Germany) +http://ast2014.fzi.de/|creationTime|2014-05-06T15:33:36Z +https://www.youtube.com/watch?v=j5iFupLkwXo|creationDate|2015-08-07 +https://www.youtube.com/watch?v=j5iFupLkwXo|tag|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +https://www.youtube.com/watch?v=j5iFupLkwXo|title|Les français vice-champions du monde du 4x400m (WC Paris 2003) HQ - YouTube +https://www.youtube.com/watch?v=j5iFupLkwXo|creationTime|2015-08-07T19:13:20Z +http://www.markbaker.ca/2003/05/RDF-Forms/|creationDate|2007-03-20 +http://www.markbaker.ca/2003/05/RDF-Forms/|tag|http://www.semanlink.net/tag/rdf_forms +http://www.markbaker.ca/2003/05/RDF-Forms/|title|RDF Forms +http://www.markbaker.ca/2003/05/RDF-Forms/|creationTime|2007-03-20T22:28:43Z +https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html|creationDate|2015-09-17 +https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html|tag|http://www.semanlink.net/tag/tim_berners_lee +https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html|tag|http://www.semanlink.net/tag/public_key_cryptography_in_browsers +https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html|title|keygen being destroyed when we need it from Tim Berners-Lee on 2015-09-01 (www-tag@w3.org from September 2015) +https://lists.w3.org/Archives/Public/www-tag/2015Sep/0000.html|creationTime|2015-09-17T22:55:39Z +https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot|creationDate|2016-07-12 +https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot|tag|http://www.semanlink.net/tag/neoliberalism +https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot|tag|http://www.semanlink.net/tag/critique_du_liberalisme +https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot|title|Neoliberalism – the ideology at the root of all our problems Books The Guardian +https://www.theguardian.com/books/2016/apr/15/neoliberalism-ideology-problem-george-monbiot|creationTime|2016-07-12T13:07:06Z +http://amid.fish/reproducing-deep-rl|creationDate|2018-04-10 +http://amid.fish/reproducing-deep-rl|tag|http://www.semanlink.net/tag/deep_learning_implementing +http://amid.fish/reproducing-deep-rl|tag|http://www.semanlink.net/tag/reinforcement_learning +http://amid.fish/reproducing-deep-rl|title|Lessons Learned Reproducing a Deep Reinforcement Learning Paper +http://amid.fish/reproducing-deep-rl|creationTime|2018-04-10T13:33:49Z +http://www.readwriteweb.com/archives/semantic_web_patterns.php|creationDate|2010-07-30 +http://www.readwriteweb.com/archives/semantic_web_patterns.php|tag|http://www.semanlink.net/tag/readwriteweb_com +http://www.readwriteweb.com/archives/semantic_web_patterns.php|tag|http://www.semanlink.net/tag/semantic_web +http://www.readwriteweb.com/archives/semantic_web_patterns.php|tag|http://www.semanlink.net/tag/design_pattern +http://www.readwriteweb.com/archives/semantic_web_patterns.php|title|Semantic Web Patterns: A Guide to Semantic Technologies +http://www.readwriteweb.com/archives/semantic_web_patterns.php|creationTime|2010-07-30T14:47:12Z +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|creationDate|2018-12-04 +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|tag|http://www.semanlink.net/tag/neural_machine_translation +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|tag|http://www.semanlink.net/tag/francois_yvon +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|comment|"- François Yvon, LIMSI/CNRS Using monolingual data in Neural Machine Translation +- Kezhan SHI, Data Science manager at Allianz France," +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|title|Paris NLP Season 3 Meetup #2 – Paris NLP +https://nlpparis.wordpress.com/2018/11/29/paris-nlp-season-3-meetup-2/|creationTime|2018-12-04T17:52:21Z +https://news.ycombinator.com/item?id=18085765|creationDate|2018-10-01 +https://news.ycombinator.com/item?id=18085765|tag|http://www.semanlink.net/tag/documentaire_tv +https://news.ycombinator.com/item?id=18085765|title|Ask HN: What are some of the best documentaries you've seen? Hacker News +https://news.ycombinator.com/item?id=18085765|creationTime|2018-10-01T15:06:12Z +http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval|creationDate|2008-11-27 +http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval|tag|http://www.semanlink.net/tag/darfour +http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval|tag|http://www.semanlink.net/tag/documentaire_tv +http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval|title|Darfour, le diable arrive à cheval +http://teleobs.nouvelobs.com/tv_programs/2008/11/7/chaine/planete/2/30/darfour-le-diable-arrive-a-cheval|creationTime|2008-11-27T01:39:10Z +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|creationDate|2010-09-06 +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|tag|http://www.semanlink.net/tag/owl +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|tag|http://www.semanlink.net/tag/open_world_assumption +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|tag|http://www.semanlink.net/tag/semantic_overflow +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|title|Expressing Constraints using RDF/OWL or something else? - Semantic Overflow +http://www.semanticoverflow.com/questions/1476/expressing-constraints-using-rdf-owl-or-something-else|creationTime|2010-09-06T21:57:43Z +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|creationDate|2017-11-06 +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|tag|http://www.semanlink.net/tag/denny_britz +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|tag|http://www.semanlink.net/tag/convolutional_neural_network_and_nn_4_nlp +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|tag|http://www.semanlink.net/tag/tensorflow +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|title|Implementing a CNN for Text Classification in TensorFlow – WildML +http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/|creationTime|2017-11-06T18:56:50Z +http://lists.w3.org/Archives/Public/public-vocabs/|creationDate|2013-06-14 +http://lists.w3.org/Archives/Public/public-vocabs/|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/|tag|http://www.semanlink.net/tag/web_schemas_task_force +http://lists.w3.org/Archives/Public/public-vocabs/|tag|http://www.semanlink.net/tag/mailing_list +http://lists.w3.org/Archives/Public/public-vocabs/|title|public-vocabs@w3.org Mail Archives +http://lists.w3.org/Archives/Public/public-vocabs/|creationTime|2013-06-14T15:36:18Z +http://rdf2h-browser.linked.solutions|creationDate|2018-02-20 +http://rdf2h-browser.linked.solutions|tag|http://www.semanlink.net/tag/rdf2h_browser +http://rdf2h-browser.linked.solutions|title|RDF2h Browser +http://rdf2h-browser.linked.solutions|creationTime|2018-02-20T22:42:36Z +https://jalammar.github.io/illustrated-transformer/|creationDate|2018-07-09 +https://jalammar.github.io/illustrated-transformer/|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://jalammar.github.io/illustrated-transformer/|tag|http://www.semanlink.net/tag/neural_machine_translation +https://jalammar.github.io/illustrated-transformer/|tag|http://www.semanlink.net/tag/deep_learning_attention +https://jalammar.github.io/illustrated-transformer/|comment|"a model that uses attention to boost the speed with which neural machine translation models can be trained, cf. ""Attention is all you need""" +https://jalammar.github.io/illustrated-transformer/|title|The Illustrated Transformer – Jay Alammar +https://jalammar.github.io/illustrated-transformer/|creationTime|2018-07-09T17:27:17Z +http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669|creationDate|2011-10-08 +http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669|tag|http://www.semanlink.net/tag/semantic_enterprise +http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669|tag|http://www.semanlink.net/tag/w3c +http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669|title|Catching Up With the W3C And Its Focus On the Enterprise - semanticweb.com +http://semanticweb.com/catching-up-with-the-w3c-and-its-focus-on-the-enterprise_b23669|creationTime|2011-10-08T21:28:07Z +https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States|creationDate|2017-08-30 +https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States|tag|http://www.semanlink.net/tag/livre_a_lire +https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States|tag|http://www.semanlink.net/tag/usa_histoire +https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States|title|A People's History of the United States - Wikipedia +https://en.wikipedia.org/wiki/A_People%27s_History_of_the_United_States|creationTime|2017-08-30T23:21:29Z +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|creationDate|2009-11-12 +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|tag|http://www.semanlink.net/tag/yves_raymond +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|tag|http://www.semanlink.net/tag/dbtune +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|tag|http://www.semanlink.net/tag/linked_data +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|title|Music recommendation and Linked Data - DBTune blog +http://blog.dbtune.org/post/2009/10/27/Music-recommendation-and-Linked-Data|creationTime|2009-11-12T13:58:14Z +http://www.spurl.net/|creationDate|2005-04-27 +http://www.spurl.net/|tag|http://www.semanlink.net/tag/spurl +http://www.spurl.net/|title|http://www.spurl.net +http://www.spurl.net/|creationTime|2005-04-27T22:00:00Z +http://www.javaworld.com/javaworld/jw-04-2004/jw-0419-httpunit.html|creationDate|2008-11-18 +http://www.javaworld.com/javaworld/jw-04-2004/jw-0419-httpunit.html|tag|http://www.semanlink.net/tag/httpunit +http://www.javaworld.com/javaworld/jw-04-2004/jw-0419-httpunit.html|title|Test Web applications with HttpUnit - JavaWorld +http://www.javaworld.com/javaworld/jw-04-2004/jw-0419-httpunit.html|creationTime|2008-11-18T11:33:59Z +https://prodi.gy/|creationDate|2018-12-09 +https://prodi.gy/|tag|http://www.semanlink.net/tag/nlp_tools +https://prodi.gy/|tag|http://www.semanlink.net/tag/machine_learning_tool +https://prodi.gy/|tag|http://www.semanlink.net/tag/spacy +https://prodi.gy/|comment|> a machine teaching tool +https://prodi.gy/|title|Prodigy · An annotation tool for AI, Machine Learning & NLP +https://prodi.gy/|creationTime|2018-12-09T09:52:31Z +https://arxiv.org/abs/1802.01021|creationDate|2019-04-25 +https://arxiv.org/abs/1802.01021|tag|http://www.semanlink.net/tag/entity_linking +https://arxiv.org/abs/1802.01021|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1802.01021|arxiv_author|Jonathan Raiman +https://arxiv.org/abs/1802.01021|arxiv_author|Olivier Raiman +https://arxiv.org/abs/1802.01021|title|[1802.01021] DeepType: Multilingual Entity Linking by Neural Type System Evolution +https://arxiv.org/abs/1802.01021|creationTime|2019-04-25T16:06:44Z +https://arxiv.org/abs/1802.01021|arxiv_summary|"The wealth of structured (e.g. Wikidata) and unstructured data about the +world available today presents an incredible opportunity for tomorrow's +Artificial Intelligence. So far, integration of these two different modalities +is a difficult process, involving many decisions concerning how best to +represent the information so that it will be captured or useful, and +hand-labeling large amounts of data. DeepType overcomes this challenge by +explicitly integrating symbolic information into the reasoning process of a +neural network with a type system. First we construct a type system, and +second, we use it to constrain the outputs of a neural network to respect the +symbolic structure. We achieve this by reformulating the design problem into a +mixed integer problem: create a type system and subsequently train a neural +network with it. In this reformulation discrete variables select which +parent-child relations from an ontology are types within the type system, while +continuous variables control a classifier fit to the type system. The original +problem cannot be solved exactly, so we propose a 2-step algorithm: 1) +heuristic search or stochastic optimization over discrete variables that define +a type system informed by an Oracle and a Learnability heuristic, 2) gradient +descent to fit classifier parameters. We apply DeepType to the problem of +Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC +KBP 2010) and find that it outperforms all existing solutions by a wide margin, +including approaches that rely on a human-designed type system or recent deep +learning-based entity embeddings, while explicitly using symbolic information +lets it integrate new entities without retraining." +https://arxiv.org/abs/1802.01021|arxiv_firstAuthor|Jonathan Raiman +https://arxiv.org/abs/1802.01021|arxiv_updated|2018-02-03T20:13:42Z +https://arxiv.org/abs/1802.01021|arxiv_title|DeepType: Multilingual Entity Linking by Neural Type System Evolution +https://arxiv.org/abs/1802.01021|arxiv_published|2018-02-03T20:13:42Z +https://arxiv.org/abs/1802.01021|arxiv_num|1802.01021 +http://www.lespetitescases.net/photos-livres-musiques-what-else|creationDate|2010-02-22 +http://www.lespetitescases.net/photos-livres-musiques-what-else|tag|http://www.semanlink.net/tag/linked_data +http://www.lespetitescases.net/photos-livres-musiques-what-else|tag|http://www.semanlink.net/tag/mp3 +http://www.lespetitescases.net/photos-livres-musiques-what-else|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/photos-livres-musiques-what-else|comment|Prototype né de l'envie de transformer en RDF les métadonnées ID3 embarquées dans mes fichiers MP3 et de les relier aux données du Linked Data +http://www.lespetitescases.net/photos-livres-musiques-what-else|title|Photos, livres, musiques, what else ? Les petites cases +http://www.lespetitescases.net/photos-livres-musiques-what-else|creationTime|2010-02-22T17:51:33Z +http://link.springer.com/book/10.1007/978-3-319-10491-1|creationDate|2014-09-13 +http://link.springer.com/book/10.1007/978-3-319-10491-1|tag|http://www.semanlink.net/tag/ec_web_14 +http://link.springer.com/book/10.1007/978-3-319-10491-1|title|EC-WEB 2014 proceedings - Springer +http://link.springer.com/book/10.1007/978-3-319-10491-1|creationTime|2014-09-13T12:07:28Z +http://bugbrother.blog.lemonde.fr/2009/02/26/tout-ce-que-vous-avez-toujours-voulu-pirater-sans-jamais-savoir-comment-proceder/|creationDate|2009-02-26 +http://bugbrother.blog.lemonde.fr/2009/02/26/tout-ce-que-vous-avez-toujours-voulu-pirater-sans-jamais-savoir-comment-proceder/|tag|http://www.semanlink.net/tag/hackers +http://bugbrother.blog.lemonde.fr/2009/02/26/tout-ce-que-vous-avez-toujours-voulu-pirater-sans-jamais-savoir-comment-proceder/|title|Tout ce que vous avez toujours voulu pirater sans jamais savoir comment procéder - BUG BROTHER - Blog LeMonde.fr +http://bugbrother.blog.lemonde.fr/2009/02/26/tout-ce-que-vous-avez-toujours-voulu-pirater-sans-jamais-savoir-comment-proceder/|creationTime|2009-02-26T23:18:15Z +https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html|creationDate|2018-04-14 +https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html|tag|http://www.semanlink.net/tag/text_embeddings +https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html|tag|http://www.semanlink.net/tag/bias +https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html|title|Google Developers Blog: Text Embedding Models Contain Bias. Here's Why That Matters. +https://developers.googleblog.com/2018/04/text-embedding-models-contain-bias.html|creationTime|2018-04-14T11:35:00Z +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|creationDate|2018-11-08 +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|tag|http://www.semanlink.net/tag/imbalanced_data +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|tag|http://www.semanlink.net/tag/bootstrap_aggregating_bagging +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|comment|"(phD thesis) CNN with a bootstrapping strategy +" +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|title|"""Deep Learning Based Imbalanced Data Classification and Information Retrieval for Multimedia Big Data"" by Yilin Yan" +https://scholarlyrepository.miami.edu/oa_dissertations/2145/|creationTime|2018-11-08T18:24:42Z +https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article|creationDate|2019-04-18 +https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article|tag|http://www.semanlink.net/tag/nlp_microsoft +https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article|tag|http://www.semanlink.net/tag/machine_learned_ranking +https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article|title|A Short Introduction to Learning to Rank (2010) +https://www.jstage.jst.go.jp/article/transinf/E94.D/10/E94.D_10_1854/_article|creationTime|2019-04-18T16:27:37Z +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|creationDate|2011-03-29 +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|tag|http://www.semanlink.net/tag/economie_ecologique +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|tag|http://www.semanlink.net/tag/verts +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|tag|http://www.semanlink.net/tag/allemagne +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|comment|« Aujourd'hui, l'économie verte s'appuie sur des chaires universitaires et des centres de recherches de plus en plus nombreux », rappelle Félix Christian Matthes, directeur du département Energie du très renommé Institut de recherches appliquées en Ecologie de Fribourg. Les verts ont donc une longueur d'avance sur bien des sujets centraux de notre époque, un capital politique qui a de l'avenir. +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|title|Les vraies raisons du succès des Verts allemands - LExpansion.com +http://lexpansion.lexpress.fr/economie/les-vraies-raisons-du-succes-des-verts-allemands_251414.html|creationTime|2011-03-29T14:09:03Z +http://ontologies.makolab.com/gc/gc.html|creationDate|2013-04-16 +http://ontologies.makolab.com/gc/gc.html|tag|http://www.semanlink.net/tag/mirek_sopek +http://ontologies.makolab.com/gc/gc.html|title|Gainesville Core +http://ontologies.makolab.com/gc/gc.html|creationTime|2013-04-16T18:06:53Z +http://www.ibm.com/developerworks/web/library/wa-memleak/|creationDate|2012-09-14 +http://www.ibm.com/developerworks/web/library/wa-memleak/|tag|http://www.semanlink.net/tag/javascript +http://www.ibm.com/developerworks/web/library/wa-memleak/|tag|http://www.semanlink.net/tag/memory_leak +http://www.ibm.com/developerworks/web/library/wa-memleak/|title|Memory leak patterns in JavaScript +http://www.ibm.com/developerworks/web/library/wa-memleak/|creationTime|2012-09-14T01:30:28Z +http://platon.escet.urjc.es/~axel/sparqltutorial/|creationDate|2007-06-13 +http://platon.escet.urjc.es/~axel/sparqltutorial/|tag|http://www.semanlink.net/tag/sparql +http://platon.escet.urjc.es/~axel/sparqltutorial/|tag|http://www.semanlink.net/tag/tutorial +http://platon.escet.urjc.es/~axel/sparqltutorial/|tag|http://www.semanlink.net/tag/eswc_2007 +http://platon.escet.urjc.es/~axel/sparqltutorial/|title|ESWC 2007 Tutorial: SPARQL - Where are we? Current state, theory and practice +http://platon.escet.urjc.es/~axel/sparqltutorial/|creationTime|2007-06-13T23:38:49Z +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|creationDate|2018-03-05 +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|tag|http://www.semanlink.net/tag/named_entity_recognition +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|tag|http://www.semanlink.net/tag/guillaume_genthial +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|tag|http://www.semanlink.net/tag/bi_lstm +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|tag|http://www.semanlink.net/tag/conditional_random_field +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|comment|"bi-LSTM + CRF with character embeddings for NER and POS. +[linked from here](http://nlp.town/blog/ner-and-the-road-to-deep-learning/)" +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|relatedDoc|http://nlp.town/blog/ner-and-the-road-to-deep-learning/ +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|title|Sequence Tagging with Tensorflow +https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html|creationTime|2018-03-05T18:51:35Z +http://meta.wikimedia.org/wiki/Wikidata/Data_model|creationDate|2013-09-12 +http://meta.wikimedia.org/wiki/Wikidata/Data_model|tag|http://www.semanlink.net/tag/wikidata +http://meta.wikimedia.org/wiki/Wikidata/Data_model|title|Wikidata/Data model primer +http://meta.wikimedia.org/wiki/Wikidata/Data_model|creationTime|2013-09-12T00:03:16Z +http://dowhatimean.net/2006/08/exporting-the-os-x-address-book-to-foaf|creationDate|2006-10-09 +http://dowhatimean.net/2006/08/exporting-the-os-x-address-book-to-foaf|tag|http://www.semanlink.net/tag/mac_os_x +http://dowhatimean.net/2006/08/exporting-the-os-x-address-book-to-foaf|tag|http://www.semanlink.net/tag/foaf +http://dowhatimean.net/2006/08/exporting-the-os-x-address-book-to-foaf|title|dowhatimean.net » Exporting the OS X Address Book to FOAF +https://www.quora.com/What-is-life-like-in-China/answer/Kaiser-Kuo?srid=h6K|creationDate|2015-11-01 +https://www.quora.com/What-is-life-like-in-China/answer/Kaiser-Kuo?srid=h6K|tag|http://www.semanlink.net/tag/chine +https://www.quora.com/What-is-life-like-in-China/answer/Kaiser-Kuo?srid=h6K|title|Kaiser Kuo's answer to What is life like in China? - Quora +https://www.quora.com/What-is-life-like-in-China/answer/Kaiser-Kuo?srid=h6K|creationTime|2015-11-01T14:33:44Z +https://www.youtube.com/watch?v=VIRCybGgHts|creationDate|2018-01-22 +https://www.youtube.com/watch?v=VIRCybGgHts|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://www.youtube.com/watch?v=VIRCybGgHts|tag|http://www.semanlink.net/tag/backpropagation_vs_biology +https://www.youtube.com/watch?v=VIRCybGgHts|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=VIRCybGgHts|tag|http://www.semanlink.net/tag/geoffrey_hinton +https://www.youtube.com/watch?v=VIRCybGgHts|title|"Stanford Seminar - ""Can the brain do back-propagation?"" - Geoffrey Hinton" +https://www.youtube.com/watch?v=VIRCybGgHts|creationTime|2018-01-22T11:16:36Z +http://blogs.sun.com/mr/entry/closures|creationDate|2010-04-28 +http://blogs.sun.com/mr/entry/closures|tag|http://www.semanlink.net/tag/closure +http://blogs.sun.com/mr/entry/closures|tag|http://www.semanlink.net/tag/java_7 +http://blogs.sun.com/mr/entry/closures|title|Closures for java - Mark Reinhold’s Blog +http://blogs.sun.com/mr/entry/closures|creationTime|2010-04-28T14:13:33Z +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|creationDate|2016-04-25 +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|tag|http://www.semanlink.net/tag/jeff_hawkins +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|tag|http://www.semanlink.net/tag/neuroscience +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|comment|Why Neurons Have Thousands of Synapses, A Theory of Sequence Memory in Neocortex +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|title|Single Artificial Neuron Taught to Recognize Hundreds of Patterns +https://www.technologyreview.com/s/543486/single-artificial-neuron-taught-to-recognize-hundreds-of-patterns/|creationTime|2016-04-25T14:52:20Z +http://www.npr.org/sections/goatsandsoda/2016/08/25/491261766/new-virus-breaks-the-rules-of-infection|creationDate|2016-08-27 +http://www.npr.org/sections/goatsandsoda/2016/08/25/491261766/new-virus-breaks-the-rules-of-infection|tag|http://www.semanlink.net/tag/virus +http://www.npr.org/sections/goatsandsoda/2016/08/25/491261766/new-virus-breaks-the-rules-of-infection|title|New Study Finds A Virus That Breaks The Rules Of Viral Infection : Goats and Soda : NPR +http://www.npr.org/sections/goatsandsoda/2016/08/25/491261766/new-virus-breaks-the-rules-of-infection|creationTime|2016-08-27T00:48:50Z +https://www.eventbrite.com/e/2019-knowledge-graph-conference-tickets-54867900367?aff=efbneb|creationDate|2019-04-11 +https://www.eventbrite.com/e/2019-knowledge-graph-conference-tickets-54867900367?aff=efbneb|tag|http://www.semanlink.net/tag/knowledge_graph_conference_2019 +https://www.eventbrite.com/e/2019-knowledge-graph-conference-tickets-54867900367?aff=efbneb|title|2019 Knowledge Graph Conference - Knowledge Graphs for AI in the Enterprise +https://www.eventbrite.com/e/2019-knowledge-graph-conference-tickets-54867900367?aff=efbneb|creationTime|2019-04-11T00:48:58Z +http://en.wikipedia.org/wiki/Ashoka|creationDate|2012-12-12 +http://en.wikipedia.org/wiki/Ashoka|tag|http://www.semanlink.net/tag/personnage_historique +http://en.wikipedia.org/wiki/Ashoka|tag|http://www.semanlink.net/tag/boudhisme +http://en.wikipedia.org/wiki/Ashoka|tag|http://www.semanlink.net/tag/antiquite_de_l_inde +http://en.wikipedia.org/wiki/Ashoka|title|Ashoka +http://en.wikipedia.org/wiki/Ashoka|creationTime|2012-12-12T00:08:27Z +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|creationDate|2017-07-10 +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|tag|http://www.semanlink.net/tag/similarity_queries +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|tag|http://www.semanlink.net/tag/gensim +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|tag|http://www.semanlink.net/tag/tutorial +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|comment|Using the (Annoy Approximate Nearest Neighbors Oh Yeah) library for similarity queries with a Word2Vec model built with gensim. +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|title|gensim : Similarity Queries using Annoy (Tutorial) +https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/annoytutorial.ipynb|creationTime|2017-07-10T19:15:18Z +https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow|creationDate|2018-08-28 +https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow|tag|http://www.semanlink.net/tag/pytorch +https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow|tag|http://www.semanlink.net/tag/tensorflow +https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow|title|For what tasks is Pytorch preferable to Tensorflow? - Quora +https://www.quora.com/For-what-tasks-is-Pytorch-preferable-to-Tensorflow|creationTime|2018-08-28T09:23:39Z +https://www.quora.com/How-does-doc2vec-represent-feature-vector-of-a-document-Can-anyone-explain-mathematically-how-the-process-is-done/answer/Piyush-Bhardwaj-7|creationDate|2018-02-14 +https://www.quora.com/How-does-doc2vec-represent-feature-vector-of-a-document-Can-anyone-explain-mathematically-how-the-process-is-done/answer/Piyush-Bhardwaj-7|tag|http://www.semanlink.net/tag/doc2vec +https://www.quora.com/How-does-doc2vec-represent-feature-vector-of-a-document-Can-anyone-explain-mathematically-how-the-process-is-done/answer/Piyush-Bhardwaj-7|title|Explanation for Doc2Vec - Quora +https://www.quora.com/How-does-doc2vec-represent-feature-vector-of-a-document-Can-anyone-explain-mathematically-how-the-process-is-done/answer/Piyush-Bhardwaj-7|creationTime|2018-02-14T01:19:08Z +http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345|creationDate|2008-11-10 +http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345|tag|http://www.semanlink.net/tag/loi_sur_le_telechargement +http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345|tag|http://www.semanlink.net/tag/cnil +http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345|title|L'avis confidentiel de la Cnil contre la loi Création et Internet, actualité Tech & Net : Le Point +http://www.lepoint.fr/actualites-technologie-internet/l-avis-confidentiel-de-la-cnil-contre-la-loi-creation-et-internet/1387/0/288345|creationTime|2008-11-10T00:57:40Z +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|creationDate|2008-06-12 +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|tag|http://www.semanlink.net/tag/semantic_web_life_sciences +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|tag|http://www.semanlink.net/tag/owl +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|title|Experiences with the conversion of SenseLab databases to RDF/OWL +http://www.w3.org/TR/2008/NOTE-hcls-senselab-20080604/|creationTime|2008-06-12T08:35:10Z +http://www.mulgara.org/|creationDate|2006-10-09 +http://www.mulgara.org/|tag|http://www.semanlink.net/tag/triplestore +http://www.mulgara.org/|comment|"Open Source fork of Kowari. +" +http://www.mulgara.org/|title|Mulgara: open source scalable RDF database written entirely in Java. +http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/|creationDate|2014-05-01 +http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/|tag|http://www.semanlink.net/tag/googleplus +http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/|tag|http://www.semanlink.net/tag/automobile +http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/|title|Google+ For Auto Dealers, Mechanics, and Auto Parts Stores Above the Fold & Socially Acceptable +http://blog.intrapromote.com/google-for-auto-dealers-mechanics-and-auto-parts-stores/|creationTime|2014-05-01T00:55:54Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|creationDate|2006-07-20 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|tag|http://www.semanlink.net/tag/afrique +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|tag|http://www.semanlink.net/tag/unesco +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|tag|http://www.semanlink.net/tag/rockart +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|title|BBC NEWS Five new heritage sites in Africa +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/world/africa/5176110.stm|date|2006-07-13 +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|creationDate|2018-11-01 +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|tag|http://www.semanlink.net/tag/tutorial +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|tag|http://www.semanlink.net/tag/deep_latent_variable_models +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|tag|http://www.semanlink.net/tag/variational_bayesian_methods +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|tag|http://www.semanlink.net/tag/nlp_harvard +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|tag|http://www.semanlink.net/tag/emnlp_2018 +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|comment|[arxiv](https://arxiv.org/abs/1812.06834.pdf) +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|title|Deep Latent-Variable Models for Natural Language - Tutorial - harvardnlp +http://nlp.seas.harvard.edu/latent-nlp-tutorial.html|creationTime|2018-11-01T22:28:15Z +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|creationDate|2011-01-09 +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|tag|http://www.semanlink.net/tag/christian_faure +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|tag|http://www.semanlink.net/tag/gautier_poupeau +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|tag|http://www.semanlink.net/tag/meetup_web_semantique +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|title|Compte-rendu du premier Meetup Web Sémantique « Blog Antidot +http://blog.antidot.net/2010/12/22/compte-rendu-du-premier-meetup-web-semantique/|creationTime|2011-01-09T21:38:11Z +http://pt.wikipedia.org/wiki/Milton_Santos|creationDate|2008-11-10 +http://pt.wikipedia.org/wiki/Milton_Santos|tag|http://www.semanlink.net/tag/geographie +http://pt.wikipedia.org/wiki/Milton_Santos|tag|http://www.semanlink.net/tag/bresil +http://pt.wikipedia.org/wiki/Milton_Santos|tag|http://www.semanlink.net/tag/ofir +http://pt.wikipedia.org/wiki/Milton_Santos|title|Milton Santos - Wikipédia, a enciclopédia livre +http://pt.wikipedia.org/wiki/Milton_Santos|creationTime|2008-11-10T18:45:15Z +http://danbri.org/words/2005/08/06/121|creationDate|2005-08-18 +http://danbri.org/words/2005/08/06/121|tag|http://www.semanlink.net/tag/sparql_and_jena +http://danbri.org/words/2005/08/06/121|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2005/08/06/121|tag|http://www.semanlink.net/tag/owl +http://danbri.org/words/2005/08/06/121|tag|http://www.semanlink.net/tag/protege +http://danbri.org/words/2005/08/06/121|title|danbri’s foaf stories » SPARQLing Protégé-OWL Jena integration +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|creationDate|2008-03-04 +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|tag|http://www.semanlink.net/tag/referentiel_des_operations +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|tag|http://www.semanlink.net/tag/link_to_me +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|comment|Invited talk given at the First China Semantic Web Symposium (CSWS 2007), in Beijing, China, on November 19, 2007. +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|title|Ivan Herman - Semantic Web Adoption +http://www.w3.org/2007/Talks/1119-Beijing-IH/HTML/img40.html|creationTime|2008-03-04T22:51:46Z +https://www.quora.com/How-do-I-find-out-if-an-idea-I-want-to-patent-is-already-patented-by-someone-else|creationDate|2019-02-20 +https://www.quora.com/How-do-I-find-out-if-an-idea-I-want-to-patent-is-already-patented-by-someone-else|tag|http://www.semanlink.net/tag/patent_finding +https://www.quora.com/How-do-I-find-out-if-an-idea-I-want-to-patent-is-already-patented-by-someone-else|title|How to find out if an idea I want to patent is already patented by someone else - Quora +https://www.quora.com/How-do-I-find-out-if-an-idea-I-want-to-patent-is-already-patented-by-someone-else|creationTime|2019-02-20T11:47:40Z +http://2015.eswc-conferences.org/lisegetoor|creationDate|2014-11-27 +http://2015.eswc-conferences.org/lisegetoor|tag|http://www.semanlink.net/tag/combining_statistics_and_semantics +http://2015.eswc-conferences.org/lisegetoor|tag|http://www.semanlink.net/tag/eswc +http://2015.eswc-conferences.org/lisegetoor|title|Combining Statistics and Semantics to Turn Data into Knowledge - Lise Getoor, University of California, US 12th ESWC 2015 +http://2015.eswc-conferences.org/lisegetoor|creationTime|2014-11-27T13:37:03Z +http://addyosmani.com/blog/essential-js-namespacing/#beginners|creationDate|2012-10-17 +http://addyosmani.com/blog/essential-js-namespacing/#beginners|tag|http://www.semanlink.net/tag/javascript_patterns +http://addyosmani.com/blog/essential-js-namespacing/#beginners|title|Essential JavaScript Namespacing Patterns +http://addyosmani.com/blog/essential-js-namespacing/#beginners|creationTime|2012-10-17T02:02:36Z +http://www.macdevcenter.com/pub/a/mac/2003/02/25/apple_scripting.html|creationDate|2006-01-24 +http://www.macdevcenter.com/pub/a/mac/2003/02/25/apple_scripting.html|tag|http://www.semanlink.net/tag/apple_java +http://www.macdevcenter.com/pub/a/mac/2003/02/25/apple_scripting.html|tag|http://www.semanlink.net/tag/applescript +http://www.macdevcenter.com/pub/a/mac/2003/02/25/apple_scripting.html|title|MacDevCenter.com: Controlling Your Mac with AppleScript and Java +http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print|creationDate|2005-09-12 +http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print|tag|http://www.semanlink.net/tag/agriculture +http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print|tag|http://www.semanlink.net/tag/maladie +http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print|comment|Biologists warned Thursday that a virulent new strain of a previously controlled plant disease had emerged in East Africa and could wipe out 10 percent of the world's wheat production if its spread is not halted. +http://www.nytimes.com/2005/09/09/international/africa/09wheat.html?ei=5088&en=fa9323f74b1f82b0&ex=1283918400&partner=rssnyt&emc=rss&pagewanted=print|title|New Strain of Wheat Rust Appears in Africa - New York Times +http://www.semanticwave.com/blog/archives/000225.jsp|creationDate|2007-05-25 +http://www.semanticwave.com/blog/archives/000225.jsp|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanticwave.com/blog/archives/000225.jsp|tag|http://www.semanlink.net/tag/tagging +http://www.semanticwave.com/blog/archives/000225.jsp|comment|"""Apps can allow for the entry of tags for a particular subject through different predicates.""" +http://www.semanticwave.com/blog/archives/000225.jsp|title|Semantic Wave: Breaking Tags Out of Their Existential Crisis +http://www.semanticwave.com/blog/archives/000225.jsp|creationTime|2007-05-25T15:10:02Z +http://www.megapixel.net|creationDate|2005-05-26 +http://www.megapixel.net|tag|http://www.semanlink.net/tag/photo_numerique +http://www.megapixel.net|tag|http://www.semanlink.net/tag/guide_d_achat +http://www.megapixel.net|title|megapixel.net Webzine: Digital Camera Reviews and Information +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|creationDate|2013-05-25 +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/google +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/jeff_hawkins +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/ng +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/one_learning_algorithm_hypothesis +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|title|The Man Behind the Google Brain: Andrew Ng and the Quest for the New AI Wired Enterprise Wired.com +http://www.wired.com/wiredenterprise/2013/05/neuro-artificial-intelligence/all/|creationTime|2013-05-25T22:37:02Z +http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/|creationDate|2014-03-08 +http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/|tag|http://www.semanlink.net/tag/technological_singularity +http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/|tag|http://www.semanlink.net/tag/robotique +http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/|title|Why robots will not be smarter than humans by 2029 Robohub +http://robohub.org/why-robots-will-not-be-smarter-than-humans-by-2029/|creationTime|2014-03-08T14:08:13Z +http://microformats.org/wiki/faqs-for-rdf|creationDate|2005-11-09 +http://microformats.org/wiki/faqs-for-rdf|tag|http://www.semanlink.net/tag/microformats +http://microformats.org/wiki/faqs-for-rdf|tag|http://www.semanlink.net/tag/rdf +http://microformats.org/wiki/faqs-for-rdf|title|faqs-for-rdf - Microformats +https://www.npmjs.com/package/markdown-it-replace-link|creationDate|2017-04-01 +https://www.npmjs.com/package/markdown-it-replace-link|tag|http://www.semanlink.net/tag/markdown_ittt +https://www.npmjs.com/package/markdown-it-replace-link|title|markdown-it-replace-link +https://www.npmjs.com/package/markdown-it-replace-link|creationTime|2017-04-01T14:26:34Z +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|creationDate|2013-01-12 +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|tag|http://www.semanlink.net/tag/france_inter +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|tag|http://www.semanlink.net/tag/cuba +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|tag|http://www.semanlink.net/tag/music_of_africa +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|title|L’Afrique enchantée : à la découverte l’afro-cubain Mundo Latino +http://salsa.blog.lemonde.fr/2013/01/10/lafrique-enchantee-a-la-decouverte-lafro-cubain/|creationTime|2013-01-12T23:56:18Z +http://www.cnrs.fr/insb/recherche/parutions/articles2016/d-devienne.html|creationDate|2017-01-05 +http://www.cnrs.fr/insb/recherche/parutions/articles2016/d-devienne.html|tag|http://www.semanlink.net/tag/tree_of_life +http://www.cnrs.fr/insb/recherche/parutions/articles2016/d-devienne.html|title|Une plongée vertigineuse dans la diversité du monde vivant - CNRS - Sciences biologiques - Parutions +http://www.cnrs.fr/insb/recherche/parutions/articles2016/d-devienne.html|creationTime|2017-01-05T13:29:02Z +http://server1.fandm.edu/departments/Anthropology/mami.html|creationDate|2007-09-05 +http://server1.fandm.edu/departments/Anthropology/mami.html|tag|http://www.semanlink.net/tag/mami_wata +http://server1.fandm.edu/departments/Anthropology/mami.html|title|Mami Wata. +http://server1.fandm.edu/departments/Anthropology/mami.html|creationTime|2007-09-05T00:45:31Z +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|creationDate|2014-09-15 +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|tag|http://www.semanlink.net/tag/eclipse +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|title|eclipse - Maven Java EE Configuration - Stack Overflow +http://stackoverflow.com/questions/23183931/maven-java-ee-configuration|creationTime|2014-09-15T17:08:38Z +http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf|creationDate|2011-09-16 +http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf|tag|http://www.semanlink.net/tag/slides +http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf|tag|http://www.semanlink.net/tag/drupal_rdf +http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf|title|Produce and Consume Linked Data with Drupal! (slides) +http://openspring.net/sites/openspring.net/files/slides_iswc2009_final2.pdf|creationTime|2011-09-16T00:01:33Z +https://haveibeenpwned.com/|creationDate|2017-12-17 +https://haveibeenpwned.com/|tag|http://www.semanlink.net/tag/securite_informatique +https://haveibeenpwned.com/|title|Have I been pwned? Check if your email has been compromised in a data breach +https://haveibeenpwned.com/|creationTime|2017-12-17T12:02:04Z +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-07-31 +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/genetique_humaine +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/histoire_de_l_europe +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/paleoanthropology_genetics +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|L’histoire génétique des Européens en partie dévoilée (CNRS) +http://www.cnrs.fr/inee/communication/breves/b198.html?utm_content=buffer004b7&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-07-31T19:35:33Z +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|creationDate|2011-09-15 +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|tag|http://www.semanlink.net/tag/installing_apps +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|tag|http://www.semanlink.net/tag/mac_os_x +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|tag|http://www.semanlink.net/tag/drupal +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|title|Fastest Way of Installing Drupal 7.8 on Mac OS X 10.7 Lion & 10.6 and fix Clean URLs using the Terminal coolestguyplanet.net Neil Gee +http://www.coolestguyplanet.net/fastest-way-of-installing-drupal-7-4-on-mac-os-x-10-6/|creationTime|2011-09-15T17:55:58Z +https://towardsdatascience.com/structured-deep-learning-b8ca4138b848|creationDate|2017-11-27 +https://towardsdatascience.com/structured-deep-learning-b8ca4138b848|tag|http://www.semanlink.net/tag/entity_embeddings +https://towardsdatascience.com/structured-deep-learning-b8ca4138b848|comment|creating embeddings for categorical data +https://towardsdatascience.com/structured-deep-learning-b8ca4138b848|title|Structured Deep Learning – Towards Data Science +https://towardsdatascience.com/structured-deep-learning-b8ca4138b848|creationTime|2017-11-27T16:40:50Z +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|creationDate|2012-08-08 +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|tag|http://www.semanlink.net/tag/apple +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|tag|http://www.semanlink.net/tag/online_security +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|tag|http://www.semanlink.net/tag/hack +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|tag|http://www.semanlink.net/tag/amazon +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|title|How Apple and Amazon Security Flaws Led to My Epic Hacking Gadget Lab Wired.com +http://www.wired.com/gadgetlab/2012/08/apple-amazon-mat-honan-hacking/3/|creationTime|2012-08-08T10:59:23Z +http://www.liberationdelacroissance.fr/files/home.php|creationDate|2007-09-28 +http://www.liberationdelacroissance.fr/files/home.php|tag|http://www.semanlink.net/tag/attali +http://www.liberationdelacroissance.fr/files/home.php|tag|http://www.semanlink.net/tag/croissance +http://www.liberationdelacroissance.fr/files/home.php|comment|La Commission pour la Libération de la Croissance Française va travailler pendant quatre mois pour proposer les réformes permettant de sortir de cette situation. Tous les sujets y seront abordés. Toutes les propositions seront étudiées, chiffrées, analysées. +http://www.liberationdelacroissance.fr/files/home.php|title|Libération de la Croissance Française +http://www.liberationdelacroissance.fr/files/home.php|creationTime|2007-09-28T23:53:11Z +https://www.wired.com/story/everipedia-blockchain/|creationDate|2017-12-17 +https://www.wired.com/story/everipedia-blockchain/|tag|http://www.semanlink.net/tag/wikipedia +https://www.wired.com/story/everipedia-blockchain/|tag|http://www.semanlink.net/tag/everipedia +https://www.wired.com/story/everipedia-blockchain/|tag|http://www.semanlink.net/tag/blockchain +https://www.wired.com/story/everipedia-blockchain/|title|The Wikipedia Competitor That's Harnessing Blockchain For Epistemological Supremacy WIRED +https://www.wired.com/story/everipedia-blockchain/|creationTime|2017-12-17T11:55:50Z +http://space.newscientist.com/channel/astronomy/cosmology/dn9988|creationDate|2008-08-28 +http://space.newscientist.com/channel/astronomy/cosmology/dn9988|tag|http://www.semanlink.net/tag/cosmologie +http://space.newscientist.com/channel/astronomy/cosmology/dn9988|title|Cosmology -04 September 2006 - New Scientist Space +http://space.newscientist.com/channel/astronomy/cosmology/dn9988|creationTime|2008-08-28T13:45:04Z +https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=fr#scrollTo=rHLcriKWLRe4|creationDate|2018-03-12 +https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=fr#scrollTo=rHLcriKWLRe4|tag|http://www.semanlink.net/tag/pandas +https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=fr#scrollTo=rHLcriKWLRe4|title|Présentation rapide de Pandas +https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=fr#scrollTo=rHLcriKWLRe4|creationTime|2018-03-12T12:55:55Z +https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview|creationDate|2018-08-07 +https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview|tag|http://www.semanlink.net/tag/machine_learning +https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview|comment|the things you have to master to become a Machine Learning expert +https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview|title|How to Prepare for a Machine Learning Interview - Semantic Bits +https://semanti.ca/blog/?how-to-prepare-for-a-machine-learning-interview|creationTime|2018-08-07T10:34:14Z +http://planete.websemantique.org/|creationDate|2006-10-25 +http://planete.websemantique.org/|tag|http://www.semanlink.net/tag/semantic_web +http://planete.websemantique.org/|comment|Cette planète rassemble les principaux carnets web d'auteurs francophones traitant du Web Sémantique. +http://planete.websemantique.org/|title|Planète Web Sémantique +http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html|creationDate|2007-01-03 +http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html|tag|http://www.semanlink.net/tag/apache +http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html|tag|http://www.semanlink.net/tag/skos +http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-esw-thes/2005Oct/0006.html|title|RE: [VM] content-dependent redirects in apache ... help! from Miles, AJ \(Alistair\) on 2005-10-03 (public-esw-thes@w3.org from October 2005) +http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm|creationDate|2005-09-05 +http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm|tag|http://www.semanlink.net/tag/grands_singes +http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm|tag|http://www.semanlink.net/tag/especes_menacees +http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm|title|BBC NEWS - Apes 'extinct in a generation' +http://news.bbc.co.uk/1/hi/sci/tech/4202734.stm|source|BBC +http://www.bbc.com/news/science-environment-36286548|creationDate|2016-05-14 +http://www.bbc.com/news/science-environment-36286548|tag|http://www.semanlink.net/tag/first_americans +http://www.bbc.com/news/science-environment-36286548|title|Mastodon meal scraps revise US prehistory - BBC News +http://www.bbc.com/news/science-environment-36286548|creationTime|2016-05-14T20:00:13Z +http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html|creationDate|2013-07-20 +http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html|tag|http://www.semanlink.net/tag/identity_crisis_in_linked_data +http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html|tag|http://www.semanlink.net/tag/uri_identity +http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html|title|Larry Masinter Musings: Resources are Angels; URLs are Pins +http://masinter.blogspot.fr/2010/03/resources-are-angels-urls-are-pins.html|creationTime|2013-07-20T02:35:53Z +http://vimeo.com/20781278|creationDate|2011-09-20 +http://vimeo.com/20781278|tag|http://www.semanlink.net/tag/hateoas +http://vimeo.com/20781278|tag|http://www.semanlink.net/tag/rest +http://vimeo.com/20781278|comment|"HyperMedia As The Engine Of The Application State
+""using XHTML as a media type for our APIs (instead of the more traditional JSON or Atom types) provides a lot of power in terms of scalability and loose coupling between client and server"" +" +http://vimeo.com/20781278|title|Hypermedia APIs - Jon Moore on Vimeo +http://vimeo.com/20781278|creationTime|2011-09-20T14:13:49Z +http://www.guha.com/sw002.html|creationDate|2008-01-21 +http://www.guha.com/sw002.html|tag|http://www.semanlink.net/tag/tap +http://www.guha.com/sw002.html|title|A System for integrating Web Services into a Global Knowledge Base (2002) +http://www.guha.com/sw002.html|creationTime|2008-01-21T23:10:38Z +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/|creationDate|2008-09-19 +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/|tag|http://www.semanlink.net/tag/rss +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/|tag|http://www.semanlink.net/tag/http +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/|title|HTTP Conditional Get for RSS Hackers - The Fishbowl +http://fishbowl.pastiche.org/2002/10/21/http_conditional_get_for_rss_hackers/|creationTime|2008-09-19T13:04:53Z +https://www.smashingmagazine.com/2018/01/drag-drop-file-uploader-vanilla-js/|creationDate|2019-04-19 +https://www.smashingmagazine.com/2018/01/drag-drop-file-uploader-vanilla-js/|tag|http://www.semanlink.net/tag/drag_and_drop +https://www.smashingmagazine.com/2018/01/drag-drop-file-uploader-vanilla-js/|title|How To Make A Drag-and-Drop File Uploader With Vanilla JavaScript — Smashing Magazine +https://www.smashingmagazine.com/2018/01/drag-drop-file-uploader-vanilla-js/|creationTime|2019-04-19T17:29:20Z +http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse|creationDate|2009-11-09 +http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse|tag|http://www.semanlink.net/tag/film +http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse|comment|Film de Duvivier +http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse|title|Marianne de ma jeunesse +http://fr.wikipedia.org/wiki/Marianne_de_ma_jeunesse|creationTime|2009-11-09T01:16:40Z +http://www.elwatan.com/spip.php?page=article&id_article=83720|creationDate|2008-01-10 +http://www.elwatan.com/spip.php?page=article&id_article=83720|tag|http://www.semanlink.net/tag/uranium +http://www.elwatan.com/spip.php?page=article&id_article=83720|tag|http://www.semanlink.net/tag/rebellion_touaregue +http://www.elwatan.com/spip.php?page=article&id_article=83720|tag|http://www.semanlink.net/tag/niger +http://www.elwatan.com/spip.php?page=article&id_article=83720|title|El Watan - Rébellion Touareg et enjeux de l’uranium La poudrière du Nord-Niger +http://www.elwatan.com/spip.php?page=article&id_article=83720|creationTime|2008-01-10T00:56:27Z +https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved|creationDate|2012-06-21 +https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved|tag|http://www.semanlink.net/tag/apache_stanbol +https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved|tag|http://www.semanlink.net/tag/google_refine +https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved|title|[#STANBOL-594] Google Refine Reconciliation Service support - ASF JIRA +https://issues.apache.org/jira/browse/STANBOL-594?subTaskView=unresolved|creationTime|2012-06-21T12:03:30Z +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|creationDate|2006-04-29 +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/java +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/owl +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/ibm +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/ontologies +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|tag|http://www.semanlink.net/tag/jena +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|comment|"IBM Web Ontology Manager is a lightweight, Web-based tool for managing ontologies expressed in Web Ontology Language (OWL). With this technology, users can browse, search, and submit ontologies to an ontology repository. This technology includes a Web interface for easy uploading of ontologies in an .owl format by any user of the system. It also includes an interface for generating (using Jastor) Java™ APIs from uploaded ontology files. +
+IBM Web Ontology Manager differs from IBM Ontology Management System (a former alphaWorks technology, now merged into the IBM Integrated Ontology Development Toolkit) in that it does not include a statement repository. Instead, based on the ontologies visible to the system, it can generate Java classes for accessing any Jena-compatible RDF statement repository. + +" +http://www.alphaworks.ibm.com/tech/wom?open&S_TACT=105AGX59&S_CMP=GR|title|alphaWorks : IBM Web Ontology Manager +https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)|creationDate|2017-12-29 +https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)|tag|http://www.semanlink.net/tag/roman +https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)|tag|http://www.semanlink.net/tag/thriller +https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)|title|The Son (Nesbø novel) +https://en.wikipedia.org/wiki/The_Son_(Nesb%C3%B8_novel)|creationTime|2017-12-29T19:05:26Z +https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit|creationDate|2018-10-20 +https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit|tag|http://www.semanlink.net/tag/geoffrey_hinton +https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit|tag|http://www.semanlink.net/tag/thought_vector +https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit|title|Aetherial Symbols +https://drive.google.com/file/d/0B8i61jl8OE3XdHRCSkV1VFNqTWc/edit|creationTime|2018-10-20T17:49:59Z +https://www.quora.com/Can-I-use-word2vec-representation-to-train-a-weka-classifier|creationDate|2017-07-20 +https://www.quora.com/Can-I-use-word2vec-representation-to-train-a-weka-classifier|tag|http://www.semanlink.net/tag/word2vec_howto +https://www.quora.com/Can-I-use-word2vec-representation-to-train-a-weka-classifier|title|Can I use word2vec representation to train a weka classifier? - Quora +https://www.quora.com/Can-I-use-word2vec-representation-to-train-a-weka-classifier|creationTime|2017-07-20T13:45:20Z +http://www.hydra-cg.com/spec/latest/schema.org/|creationDate|2015-02-18 +http://www.hydra-cg.com/spec/latest/schema.org/|tag|http://www.semanlink.net/tag/hydra +http://www.hydra-cg.com/spec/latest/schema.org/|tag|http://www.semanlink.net/tag/schema_org +http://www.hydra-cg.com/spec/latest/schema.org/|title|Integration of Hydra into Schema.org +http://www.hydra-cg.com/spec/latest/schema.org/|creationTime|2015-02-18T02:40:20Z +https://theclevermachine.wordpress.com/2014/09/11/a-gentle-introduction-to-artificial-neural-networks/|creationDate|2016-09-17 +https://theclevermachine.wordpress.com/2014/09/11/a-gentle-introduction-to-artificial-neural-networks/|tag|http://www.semanlink.net/tag/ann_introduction +https://theclevermachine.wordpress.com/2014/09/11/a-gentle-introduction-to-artificial-neural-networks/|title|A Gentle Introduction to Artificial Neural Networks The Clever Machine +https://theclevermachine.wordpress.com/2014/09/11/a-gentle-introduction-to-artificial-neural-networks/|creationTime|2016-09-17T19:04:51Z +http://emnlp2018.org/program/tutorials/|creationDate|2018-10-31 +http://emnlp2018.org/program/tutorials/|tag|http://www.semanlink.net/tag/emnlp_2018 +http://emnlp2018.org/program/tutorials/|tag|http://www.semanlink.net/tag/tutorial +http://emnlp2018.org/program/tutorials/|title|Tutorials - EMNLP 2018 +http://emnlp2018.org/program/tutorials/|creationTime|2018-10-31T15:56:28Z +http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti|creationDate|2015-02-10 +http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti|tag|http://www.semanlink.net/tag/java_profiling +http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti|title|profiler - Why VisualVM Sampler does not provide full information about CPU load (method time execution)? - Stack Overflow +http://stackoverflow.com/questions/8130277/why-visualvm-sampler-does-not-provide-full-information-about-cpu-load-method-ti|creationTime|2015-02-10T00:15:42Z +http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf|creationDate|2014-08-27 +http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf|tag|http://www.semanlink.net/tag/knowledge_vault +http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf|comment|"Recent years have witnessed a proliferation of large-scale +knowledge bases, includingWikipedia, Freebase, YAGO,Microsoft's Satori, and Google's Knowledge Graph. To increase the scale even further, we need to explore automatic +methods for constructing knowledge bases. Previous approaches have primarily focused on text-based extraction, +which can be very noisy. Here we introduce Knowledge +Vault, **a Web-scale probabilistic knowledge base** that combines extractions from Web content (obtained via analysis of +text, tabular data, page structure, and human annotations) +with prior knowledge derived from existing knowledge repositories. We employ **supervised machine learning methods for +fusing these distinct information sources**. +" +http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf|title|Knowledge vault: a web scale approach to probabilistic knowledge fusion +http://www.cs.cmu.edu/~nlao/publication/2014.kdd.pdf|creationTime|2014-08-27T15:01:11Z +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|creationDate|2017-12-05 +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|tag|http://www.semanlink.net/tag/topic_modeling +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|tag|http://www.semanlink.net/tag/nlp_sample_code +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|tag|http://www.semanlink.net/tag/non_negative_matrix_factorization +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|tag|http://www.semanlink.net/tag/scikit_learn +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|title|Topic Modeling with Scikit Learn – Aneesha Bakharia – Medium +https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730|creationTime|2017-12-05T09:54:22Z +http://code.google.com/p/topic-modeling-tool/|creationDate|2014-04-23 +http://code.google.com/p/topic-modeling-tool/|tag|http://www.semanlink.net/tag/topic_modeling +http://code.google.com/p/topic-modeling-tool/|tag|http://www.semanlink.net/tag/mallet +http://code.google.com/p/topic-modeling-tool/|title|topic-modeling-tool - A graphical user interface tool for topic modeling - Google Project Hosting +http://code.google.com/p/topic-modeling-tool/|creationTime|2014-04-23T10:56:23Z +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|creationDate|2013-08-24 +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|tag|http://www.semanlink.net/tag/juliana_rotich +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|tag|http://www.semanlink.net/tag/ted +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|tag|http://www.semanlink.net/tag/internet_en_afrique +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|tag|http://www.semanlink.net/tag/ntic_et_developpement +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|comment|"BRCK offers resilient connectivity for the developing world.
+So we've got a joke in Ushahidi where we say, ""If it works in Africa, it'll work anywhere.""
+What if the solutions to the world's problem came from places like Africa?" +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|title|Juliana Rotich: Meet BRCK, Internet access built for Africa Video on TED.com +http://www.ted.com/talks/juliana_rotich_meet_brck_internet_access_built_for_africa.html|creationTime|2013-08-24T18:03:38Z +http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective|creationDate|2015-04-18 +http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective|tag|http://www.semanlink.net/tag/afrique +http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective|title|Africa’s Development in Historical Perspective Foreign Affairs +http://www.foreignaffairs.com/articles/143002/edited-by-emmanuel-akyeampong-robert-h-bates-nathan-nunn-and-jam/africas-development-in-historical-perspective|creationTime|2015-04-18T09:24:08Z +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf|creationDate|2014-04-25 +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf|tag|http://www.semanlink.net/tag/support_vector_machine +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf|comment|By the authors of LIBSVM +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf|title|A practical guide to Support Vector classification +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf|creationTime|2014-04-25T13:33:06Z +https://blog.codeship.com/json-ld-building-meaningful-data-apis/#disqus_thread|creationDate|2016-09-24 +https://blog.codeship.com/json-ld-building-meaningful-data-apis/#disqus_thread|tag|http://www.semanlink.net/tag/json_ld +https://blog.codeship.com/json-ld-building-meaningful-data-apis/#disqus_thread|title|JSON-LD: Building Meaningful Data APIs +https://blog.codeship.com/json-ld-building-meaningful-data-apis/#disqus_thread|creationTime|2016-09-24T16:02:25Z +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf|creationDate|2007-09-27 +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf|tag|http://www.semanlink.net/tag/rdf +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf|tag|http://www.semanlink.net/tag/probabilites +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf|title|Representing Probabilistic Relations in RDF +http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-173/pos_paper5.pdf|creationTime|2007-09-27T21:26:35Z +http://news.bbc.co.uk/2/hi/africa/4459671.stm|creationDate|2006-05-01 +http://news.bbc.co.uk/2/hi/africa/4459671.stm|tag|http://www.semanlink.net/tag/obelisque_d_axoum +http://news.bbc.co.uk/2/hi/africa/4459671.stm|title|BBC NEWS - Ethiopians celebrate obelisk return +http://news.bbc.co.uk/2/hi/africa/4459671.stm|source|BBC +http://news.bbc.co.uk/2/hi/africa/4459671.stm|date|2005-04-19 +http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report|creationDate|2008-05-04 +http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report|tag|http://www.semanlink.net/tag/entreprise +http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report|tag|http://www.semanlink.net/tag/innovation +http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report|title|The World's 50 Most Innovative Companies Interactive Scoreboard +http://bwnt.businessweek.com/interactive_reports/innovative_companies/?chan=magazine+channel_special+report|creationTime|2008-05-04T16:00:49Z +http://www.idealliance.org/papers/extreme/proceedings/html/2007/Bryan01/EML2007Bryan01.html|creationDate|2007-10-05 +http://www.idealliance.org/papers/extreme/proceedings/html/2007/Bryan01/EML2007Bryan01.html|tag|http://www.semanlink.net/tag/mycarevent +http://www.idealliance.org/papers/extreme/proceedings/html/2007/Bryan01/EML2007Bryan01.html|title|MYCAREVENT: OWL and the automotive repair information supply chain - Proceedings of Extreme Markup Languages® +http://www.idealliance.org/papers/extreme/proceedings/html/2007/Bryan01/EML2007Bryan01.html|creationTime|2007-10-05T21:16:17Z +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|creationDate|2008-09-24 +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|tag|http://www.semanlink.net/tag/jena_and_database +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|tag|http://www.semanlink.net/tag/sparql +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|tag|http://www.semanlink.net/tag/benchmark +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|title|Berlin SPARQL Benchmark V2 - Results for Sesame, Virtuoso, Jena TDB, D2R Server, and MySQL +http://lists.w3.org/Archives/Public/semantic-web/2008Sep/0128.html|creationTime|2008-09-24T22:51:36Z +http://topquadrantblog.blogspot.com/|creationDate|2011-01-09 +http://topquadrantblog.blogspot.com/|tag|http://www.semanlink.net/tag/semantic_web_blog +http://topquadrantblog.blogspot.com/|tag|http://www.semanlink.net/tag/semantic_enterprise +http://topquadrantblog.blogspot.com/|tag|http://www.semanlink.net/tag/topquadrant +http://topquadrantblog.blogspot.com/|comment|TopQuadrant's blog +http://topquadrantblog.blogspot.com/|title|VOYAGES OF THE SEMANTIC ENTERPRISE +http://topquadrantblog.blogspot.com/|creationTime|2011-01-09T21:48:48Z +http://topquadrantblog.blogspot.com/|type|http://rdfs.org/sioc/types#Weblog +http://linkeddatafragments.org/|creationDate|2014-10-28 +http://linkeddatafragments.org/|tag|http://www.semanlink.net/tag/linked_data_fragments +http://linkeddatafragments.org/|comment|make the Web of Data Web-scale by moving intelligence from servers to clients. +http://linkeddatafragments.org/|title|Linked Data Fragments +http://linkeddatafragments.org/|creationTime|2014-10-28T22:44:40Z +https://towardsdatascience.com/spiking-neural-networks-the-next-generation-of-machine-learning-84e167f4eb2b|creationDate|2019-01-29 +https://towardsdatascience.com/spiking-neural-networks-the-next-generation-of-machine-learning-84e167f4eb2b|tag|http://www.semanlink.net/tag/spiking_neural_network +https://towardsdatascience.com/spiking-neural-networks-the-next-generation-of-machine-learning-84e167f4eb2b|title|Spiking Neural Networks, the Next Generation of Machine Learning (2018) +https://towardsdatascience.com/spiking-neural-networks-the-next-generation-of-machine-learning-84e167f4eb2b|creationTime|2019-01-29T01:23:57Z +http://www.w3.org/TR/json-ld/|creationDate|2014-11-15 +http://www.w3.org/TR/json-ld/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/json-ld/|tag|http://www.semanlink.net/tag/json_ld +http://www.w3.org/TR/json-ld/|tag|http://www.semanlink.net/tag/manu_sporny +http://www.w3.org/TR/json-ld/|tag|http://www.semanlink.net/tag/markus_lanthaler +http://www.w3.org/TR/json-ld/|title|JSON-LD 1.0 +http://www.w3.org/TR/json-ld/|creationTime|2014-11-15T16:33:57Z +https://heartbeat.fritz.ai/capsule-networks-a-new-and-attractive-ai-architecture-bd1198cc8ad4|creationDate|2018-12-30 +https://heartbeat.fritz.ai/capsule-networks-a-new-and-attractive-ai-architecture-bd1198cc8ad4|tag|http://www.semanlink.net/tag/capsule_networks +https://heartbeat.fritz.ai/capsule-networks-a-new-and-attractive-ai-architecture-bd1198cc8ad4|title|Capsule Networks +https://heartbeat.fritz.ai/capsule-networks-a-new-and-attractive-ai-architecture-bd1198cc8ad4|creationTime|2018-12-30T14:23:57Z +http://hadoop.apache.org/|creationDate|2013-02-18 +http://hadoop.apache.org/|tag|http://www.semanlink.net/tag/hadoop +http://hadoop.apache.org/|title|Apache Hadoop - Home page +http://hadoop.apache.org/|creationTime|2013-02-18T16:11:42Z +http://maps.google.com/maps?q=40.452107,93.742118&hl=de&ll=39.541977,83.94104&spn=0.031903,0.033002&num=1&t=h&vpsrc=6&z=15|creationDate|2011-11-15 +http://maps.google.com/maps?q=40.452107,93.742118&hl=de&ll=39.541977,83.94104&spn=0.031903,0.033002&num=1&t=h&vpsrc=6&z=15|tag|http://www.semanlink.net/tag/google_maps +http://maps.google.com/maps?q=40.452107,93.742118&hl=de&ll=39.541977,83.94104&spn=0.031903,0.033002&num=1&t=h&vpsrc=6&z=15|title|40.452107,93.742118 - Google Maps +http://maps.google.com/maps?q=40.452107,93.742118&hl=de&ll=39.541977,83.94104&spn=0.031903,0.033002&num=1&t=h&vpsrc=6&z=15|creationTime|2011-11-15T21:26:17Z +http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html|creationDate|2010-07-19 +http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html|tag|http://www.semanlink.net/tag/logic_and_semantic_web +http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html|comment|The Semantic Web is vitally dependent on a formal meaning for the constructs of its languages. For Semantic Web languages to work well together their formal meanings must employ a common view (or thesis) of representation, otherwise it will not be possible to reconcile documents written in different languages. The thesis of representation underlying RDF and RDFS is particularly troublesome in this regard, as it has several unusual aspects, both semantic and syntactic. A more-standard thesis of representation would result in the ability to reuse existing results and tools in the Semantic Web. +http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html|title|Three Theses of Representation in the Semantic Web +http://www2003.org/cdrom/papers/refereed/p050/p50-horrocks.html|creationTime|2010-07-19T12:11:42Z +http://fr.slideshare.net/rvguha/sem-tech2014c|creationDate|2014-09-05 +http://fr.slideshare.net/rvguha/sem-tech2014c|tag|http://www.semanlink.net/tag/schema_org +http://fr.slideshare.net/rvguha/sem-tech2014c|tag|http://www.semanlink.net/tag/guha +http://fr.slideshare.net/rvguha/sem-tech2014c|title|Semantic Web and Schema.org +http://fr.slideshare.net/rvguha/sem-tech2014c|creationTime|2014-09-05T18:53:08Z +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|creationDate|2012-05-01 +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|tag|http://www.semanlink.net/tag/crise_financiere +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|tag|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|tag|http://www.semanlink.net/tag/pierre_larrouturou +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|title|"""Nous ne voulons pas mourir dans les décombres du néolibéralisme !""" +http://www.lemonde.fr/idees/article/2012/04/30/nous-ne-voulons-pas-mourir-dans-les-decombres-du-neoliberalisme_1693201_3232.html|creationTime|2012-05-01T12:08:57Z +https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms|creationDate|2013-09-09 +https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms|tag|http://www.semanlink.net/tag/apache_mahout +https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms|tag|http://www.semanlink.net/tag/algorithmes +https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms|title|Algorithms - Apache Mahout - Apache Software Foundation +https://cwiki.apache.org/confluence/display/MAHOUT/Algorithms|creationTime|2013-09-09T15:23:04Z +http://webdatacommons.org/|creationDate|2013-07-06 +http://webdatacommons.org/|tag|http://www.semanlink.net/tag/chris_bizer +http://webdatacommons.org/|tag|http://www.semanlink.net/tag/html_data +http://webdatacommons.org/|comment|More and more websites have started to embed structured data describing products, people, organizations, places, events into their HTML pages using markup standards such as RDFa, Microdata and Microformats. The Web Data Commons project extracts this data from several billion web pages. The project provides the extracted data for download and publishes statistics about the deployment of the different formats. +http://webdatacommons.org/|title|Web Data Commons +http://webdatacommons.org/|creationTime|2013-07-06T17:14:58Z +http://www.bbc.com/future/bespoke/story/20150430-rosetta-the-whole-story/index.html|creationDate|2015-05-11 +http://www.bbc.com/future/bespoke/story/20150430-rosetta-the-whole-story/index.html|tag|http://www.semanlink.net/tag/rosetta +http://www.bbc.com/future/bespoke/story/20150430-rosetta-the-whole-story/index.html|title|Rosetta: The whole story +http://www.bbc.com/future/bespoke/story/20150430-rosetta-the-whole-story/index.html|creationTime|2015-05-11T15:35:18Z +https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/|creationDate|2017-06-26 +https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/|tag|http://www.semanlink.net/tag/dimensionality_reduction +https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/|tag|http://www.semanlink.net/tag/tf_idf +https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/|title|How to reduce dimension for TfIdf / BOW vector? : MachineLearning +https://www.reddit.com/r/MachineLearning/comments/30xo25/how_to_reduce_dimension_for_tfidf_bow_vector/|creationTime|2017-06-26T09:39:21Z +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|creationDate|2018-10-10 +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|tag|http://www.semanlink.net/tag/histoire +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|tag|http://www.semanlink.net/tag/historien +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|tag|http://www.semanlink.net/tag/moyen_age +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|title|Patrick Boucheron bouscule l’histoire CNRS Le journal +https://lejournal.cnrs.fr/articles/patrick-boucheron-bouscule-lhistoire?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1539108273|creationTime|2018-10-10T11:30:10Z +http://www.joehewitt.com/software/firebug/|creationDate|2006-01-26 +http://www.joehewitt.com/software/firebug/|tag|http://www.semanlink.net/tag/debug +http://www.joehewitt.com/software/firebug/|tag|http://www.semanlink.net/tag/ajax +http://www.joehewitt.com/software/firebug/|tag|http://www.semanlink.net/tag/javascript +http://www.joehewitt.com/software/firebug/|comment|FireBug is a new tool for Firefox that aids with debugging Javascript, DHTML, and Ajax. It is like a combination of the Javascript Console, DOM Inspector, and a command line Javascript interpreter. +http://www.joehewitt.com/software/firebug/|title|FireBug - JoeHewitt.com +http://jena.hpl.hp.com:3040/index.html|creationDate|2005-10-06 +http://jena.hpl.hp.com:3040/index.html|tag|http://www.semanlink.net/tag/jena +http://jena.hpl.hp.com:3040/index.html|tag|http://www.semanlink.net/tag/bbc +http://jena.hpl.hp.com:3040/index.html|title|BBC - Backstage +http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/|creationDate|2016-10-25 +http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/|tag|http://www.semanlink.net/tag/nlp_text_classification +http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/|title|Create a Domain Text Classifier Using Cognonto Frederick Giasson +http://fgiasson.com/blog/index.php/2016/10/24/create-a-domain-text-classifier-using-cognonto/|creationTime|2016-10-25T10:06:20Z +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|creationDate|2013-08-06 +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|tag|http://www.semanlink.net/tag/webid +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|tag|http://www.semanlink.net/tag/public_lod_w3_org +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|tag|http://www.semanlink.net/tag/kingsley_idehen +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|title|Simple WebID, WebID+TLS Protocol, and ACL Dogfood Demo from Kingsley Idehen on 2013-08-06 (public-lod@w3.org from August 2013) +http://lists.w3.org/Archives/Public/public-lod/2013Aug/0033.html|creationTime|2013-08-06T20:10:37Z +http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/|creationDate|2015-07-18 +http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/|tag|http://www.semanlink.net/tag/minoen +http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/|tag|http://www.semanlink.net/tag/crete +http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/|title|Il y a 3500 ans en Crète, une invention provoque l’exode d’une civilisation Dans les pas des archéologues +http://archeo.blog.lemonde.fr/2015/07/18/il-y-a-3500-ans-en-crete-une-invention-provoque-lexode-dune-civilisation/|creationTime|2015-07-18T22:25:34Z +https://www.pbs.org/wgbh/nova/article/new-fossils-mass-extinction-wiped-out-dinosaurs/|creationDate|2019-04-03 +https://www.pbs.org/wgbh/nova/article/new-fossils-mass-extinction-wiped-out-dinosaurs/|tag|http://www.semanlink.net/tag/tanis_kt +https://www.pbs.org/wgbh/nova/article/new-fossils-mass-extinction-wiped-out-dinosaurs/|title|A Fossil Snapshot of Mass Extinction NOVA PBS +https://www.pbs.org/wgbh/nova/article/new-fossils-mass-extinction-wiped-out-dinosaurs/|creationTime|2019-04-03T23:47:41Z +http://www.icefox.net/articles/typemanager.php|creationDate|2005-11-17 +http://www.icefox.net/articles/typemanager.php|tag|http://www.semanlink.net/tag/desktop_applications +http://www.icefox.net/articles/typemanager.php|comment|The future of desktop applications, the Type Manager +http://www.icefox.net/articles/typemanager.php|title|Type Manager +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|creationDate|2015-10-07 +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|tag|http://www.semanlink.net/tag/hydra +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|tag|http://www.semanlink.net/tag/sparql_shortcomings +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|comment|APIs are more than just data: context and controls also belong in the message. +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|title|Turtles all the way down Ruben Verborgh +http://ruben.verborgh.org/blog/2015/10/06/turtles-all-the-way-down/|creationTime|2015-10-07T22:42:37Z +https://medium.com/p/1321fa0298c3|creationDate|2014-04-07 +https://medium.com/p/1321fa0298c3|tag|http://www.semanlink.net/tag/droit +https://medium.com/p/1321fa0298c3|comment|Augusta Cantwell’s boss wants a new slideshow. Will Daria Norton be of any help? +https://medium.com/p/1321fa0298c3|title|Data is the new NEW — WHAT? +https://medium.com/p/1321fa0298c3|creationTime|2014-04-07T00:00:05Z +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|creationDate|2016-08-20 +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|tag|http://www.semanlink.net/tag/peter_chilson +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|tag|http://www.semanlink.net/tag/tandja +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|tag|http://www.semanlink.net/tag/niger +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|title|Colonel Tandja's country +http://www.peterchilson.com/wp-content/uploads/2011/05/Colonel-Tandjas-CountryFourth-Genre2.pdf|creationTime|2016-08-20T11:52:48Z +http://web.archive.org/web/19981202132847/http://www.hypersolutions.fr/m3/|creationDate|2013-10-01 +http://web.archive.org/web/19981202132847/http://www.hypersolutions.fr/m3/|tag|http://www.semanlink.net/tag/m3_multi_media_museum +http://web.archive.org/web/19981202132847/http://www.hypersolutions.fr/m3/|title|Multi Media Museum - M3 +http://web.archive.org/web/19981202132847/http://www.hypersolutions.fr/m3/|creationTime|2013-10-01T17:13:32Z +http://www.w3.org/TR/ld-bp/|creationDate|2014-03-25 +http://www.w3.org/TR/ld-bp/|tag|http://www.semanlink.net/tag/linked_data_publishing +http://www.w3.org/TR/ld-bp/|tag|http://www.semanlink.net/tag/linked_data +http://www.w3.org/TR/ld-bp/|title|Best Practices for Publishing Linked Data +http://www.w3.org/TR/ld-bp/|creationTime|2014-03-25T15:06:38Z +http://www.universitygames.fr/|creationDate|2006-11-20 +http://www.universitygames.fr/|tag|http://www.semanlink.net/tag/jeux +http://www.universitygames.fr/|tag|http://www.semanlink.net/tag/langues_vivantes +https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook|creationDate|2018-12-06 +https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook|tag|http://www.semanlink.net/tag/facebook +https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook|tag|http://www.semanlink.net/tag/gilets_jaunes +https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook|title|"The ""Yellow Jackets"" Riots In France Are What Happens When Facebook Gets Involved With Local News" +https://www.buzzfeednews.com/article/ryanhatesthis/france-paris-yellow-jackets-facebook|creationTime|2018-12-06T09:56:10Z +https://web.hypothes.is/|creationDate|2017-08-23 +https://web.hypothes.is/|tag|http://www.semanlink.net/tag/hypothes_is +https://web.hypothes.is/|title|Hypothesis – The Internet, peer reviewed. +https://web.hypothes.is/|creationTime|2017-08-23T18:40:49Z +http://blog.cloudfoundry.org/2012/10/09/securing-restful-web-services-with-oauth2/|creationDate|2015-08-25 +http://blog.cloudfoundry.org/2012/10/09/securing-restful-web-services-with-oauth2/|tag|http://www.semanlink.net/tag/oauth2 +http://blog.cloudfoundry.org/2012/10/09/securing-restful-web-services-with-oauth2/|title|Securing RESTful Web Services with OAuth2 Cloud Foundry Blog +http://blog.cloudfoundry.org/2012/10/09/securing-restful-web-services-with-oauth2/|creationTime|2015-08-25T17:09:35Z +https://fr.wikipedia.org/wiki/Matthieu_Ricard|creationDate|2016-04-24 +https://fr.wikipedia.org/wiki/Matthieu_Ricard|tag|http://www.semanlink.net/tag/boudhisme +https://fr.wikipedia.org/wiki/Matthieu_Ricard|tag|http://www.semanlink.net/tag/neuroscience +https://fr.wikipedia.org/wiki/Matthieu_Ricard|title|Matthieu Ricard +https://fr.wikipedia.org/wiki/Matthieu_Ricard|creationTime|2016-04-24T00:24:44Z +http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin|creationDate|2007-07-28 +http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin|tag|http://www.semanlink.net/tag/poker +http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin|tag|http://www.semanlink.net/tag/computer_game +http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin|title|In Poker Match Against a Machine, Humans Are Better Bluffers - New York Times +http://www.nytimes.com/2007/07/26/business/26poker.html?_r=2&adxnnl=1&oref=slogin&adxnnlx=1185467840-t4htAYaCiLZLHBVvKhSRyQ&oref=slogin|creationTime|2007-07-28T17:58:27Z +https://github.com/facebookresearch/fastText/issues/189|creationDate|2017-12-07 +https://github.com/facebookresearch/fastText/issues/189|tag|http://www.semanlink.net/tag/pointwise_mutual_information +https://github.com/facebookresearch/fastText/issues/189|tag|http://www.semanlink.net/tag/fasttext +https://github.com/facebookresearch/fastText/issues/189|tag|http://www.semanlink.net/tag/cosine_similarity +https://github.com/facebookresearch/fastText/issues/189|comment|"**the norm of a word vector is somewhat related to the overall frequency** of which words occur in the training corpus (so a common word like ""frog"" will still be similar to a less frequent word like ""Anura"" which is it's scientific name) (Hence the use of cosine-distance) + +> That the inner product relates to the PMI between the vectors is for the most part an empirical result and there is very little theoretical background behind this finding" +https://github.com/facebookresearch/fastText/issues/189|title|(fastText) Euclidean distance instead of cosine-similarity? +https://github.com/facebookresearch/fastText/issues/189|creationTime|2017-12-07T16:06:35Z +http://www.w3.org/2005/04/fresnel-info/|creationDate|2007-01-02 +http://www.w3.org/2005/04/fresnel-info/|tag|http://www.semanlink.net/tag/rdf +http://www.w3.org/2005/04/fresnel-info/|tag|http://www.semanlink.net/tag/ihm +http://www.w3.org/2005/04/fresnel-info/|tag|http://www.semanlink.net/tag/rdf_browser +http://www.w3.org/2005/04/fresnel-info/|comment|Presenting Semantic Web content in a human-readable way consists in addressing two issues: specifying what information contained in an RDF graph should be presented and how this information should be presented.
We developed Fresnel as a browser-independent vocabulary of core RDF display concepts applicable across different representation paradigms and output formats. +http://www.w3.org/2005/04/fresnel-info/|title|Fresnel - Display Vocabulary for RDF +http://kleenexsosforet.com|creationDate|2005-03-03 +http://kleenexsosforet.com|tag|http://www.semanlink.net/tag/deforestation +http://kleenexsosforet.com|tag|http://www.semanlink.net/tag/kleenex +http://kleenexsosforet.com|tag|http://www.semanlink.net/tag/foret +http://kleenexsosforet.com|tag|http://www.semanlink.net/tag/canada +http://kleenexsosforet.com|title|Kleenex rase nos forêts boréales ! +http://aclweb.org/anthology/D17-1024|creationDate|2018-05-11 +http://aclweb.org/anthology/D17-1024|tag|http://www.semanlink.net/tag/word_embedding +http://aclweb.org/anthology/D17-1024|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +http://aclweb.org/anthology/D17-1024|title|Dict2vec : Learning Word Embeddings using Lexical Dictionaries +http://aclweb.org/anthology/D17-1024|creationTime|2018-05-11T00:56:25Z +http://mark-shepherd.com/blog/springgraph-flex-component/|creationDate|2008-02-15 +http://mark-shepherd.com/blog/springgraph-flex-component/|tag|http://www.semanlink.net/tag/graph_visualization +http://mark-shepherd.com/blog/springgraph-flex-component/|comment|SpringGraph is a Adobe Flex 2.0 component that displays a set of items that are linked to each other. The component calculates the layout for the items using an organic-looking annealing algorithm based on the size and links of each item, and draws lines to represent the links. +http://mark-shepherd.com/blog/springgraph-flex-component/|title|mark-shepherd.com » SpringGraph Flex Component +http://mark-shepherd.com/blog/springgraph-flex-component/|creationTime|2008-02-15T23:58:33Z +http://www.coindesk.com/understanding-dao-hack-journalists/|creationDate|2016-06-29 +http://www.coindesk.com/understanding-dao-hack-journalists/|tag|http://www.semanlink.net/tag/dao_attack +http://www.coindesk.com/understanding-dao-hack-journalists/|title|Understanding The DAO Attack - CoinDesk +http://www.coindesk.com/understanding-dao-hack-journalists/|creationTime|2016-06-29T11:38:21Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf|creationDate|2011-07-05 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf|tag|http://www.semanlink.net/tag/design_pattern +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf|tag|http://www.semanlink.net/tag/owl +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf|title|Putting OWL in Order: Patterns for Sequences in OWL +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-216/submission_12.pdf|creationTime|2011-07-05T19:14:56Z +http://www.teebweb.org/|creationDate|2012-10-15 +http://www.teebweb.org/|tag|http://www.semanlink.net/tag/economie_ecologique +http://www.teebweb.org/|tag|http://www.semanlink.net/tag/biodiversite +http://www.teebweb.org/|title|The Economics of Ecosystems and Biodiversity (TEEB) +http://www.teebweb.org/|creationTime|2012-10-15T21:04:13Z +http://internetactu.blog.lemonde.fr/2013/03/01/cours-en-ligne-massifs-et-ouverts-la-standardisation-ou-linnovation/|creationDate|2013-03-04 +http://internetactu.blog.lemonde.fr/2013/03/01/cours-en-ligne-massifs-et-ouverts-la-standardisation-ou-linnovation/|tag|http://www.semanlink.net/tag/mooc +http://internetactu.blog.lemonde.fr/2013/03/01/cours-en-ligne-massifs-et-ouverts-la-standardisation-ou-linnovation/|title|Cours en ligne massifs et ouverts : la standardisation ou l’innovation ? InternetActu +http://internetactu.blog.lemonde.fr/2013/03/01/cours-en-ligne-massifs-et-ouverts-la-standardisation-ou-linnovation/|creationTime|2013-03-04T14:07:19Z +http://firblitz.com/2006/12/9/itude-0-1-released|creationDate|2007-12-30 +http://firblitz.com/2006/12/9/itude-0-1-released|tag|http://www.semanlink.net/tag/drm +http://firblitz.com/2006/12/9/itude-0-1-released|tag|http://www.semanlink.net/tag/itunes +http://firblitz.com/2006/12/9/itude-0-1-released|comment|(The following is from http://www.apple.com/support/itunes/musicstore/authorization/ ) : How do I deauthorize all of my computers? If you have authorized five computers, a button labeled “Deauthorize All” will appear in your Account Information screen. This button will deauthorize all computers associated with your account. You can then reauthorize up to 5 computers. Note: You can only use this feature once a year. +http://firblitz.com/2006/12/9/itude-0-1-released|title|Firblitz: iTude 0.1 released +http://firblitz.com/2006/12/9/itude-0-1-released|creationTime|2007-12-30T13:17:08Z +http://www.javascriptkit.com/domref/|creationDate|2007-11-27 +http://www.javascriptkit.com/domref/|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.javascriptkit.com/domref/|tag|http://www.semanlink.net/tag/javascript_dom +http://www.javascriptkit.com/domref/|title|JavaScript Kit- DOM (Document Object Model) Reference +http://www.javascriptkit.com/domref/|creationTime|2007-11-27T14:25:11Z +http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers|creationDate|2016-03-30 +http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers|tag|http://www.semanlink.net/tag/content_negotiation +http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers|comment|If you're building APIs for other developers to consume, consider using Accept-based content-negotiation. If you're building consumer facing web apps: ignore the Accept header until WebKit and IE get their acts together. +http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers|title|Understanding Browser HTTP Accept Headers: Firefox, Internet Explorer, Opera, and WebKit (Safari / Chrome) +http://www.newmediacampaigns.com/blog/browser-rest-http-accept-headers|creationTime|2016-03-30T02:06:30Z +http://www.bitcoin.org/|creationDate|2011-06-15 +http://www.bitcoin.org/|tag|http://www.semanlink.net/tag/bitcoin +http://www.bitcoin.org/|title|Bitcoin P2P Virtual Currency +http://www.bitcoin.org/|creationTime|2011-06-15T15:30:02Z +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|creationDate|2013-01-22 +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/azawad +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/mali +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/aqmi +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/prohibition_des_narcotiques +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/kadhafi +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/intervention_francaise_au_mali +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/trafic_de_drogue +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|tag|http://www.semanlink.net/tag/france_afrique +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|title|Chronique d'une faillite programmée au Mali +http://www.lemonde.fr/idees/article/2013/01/22/chronique-d-une-faillite-programmee-au-mali_1820681_3232.html|creationTime|2013-01-22T22:21:22Z +http://ruder.io/word-embeddings-2017/|creationDate|2017-11-06 +http://ruder.io/word-embeddings-2017/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/word-embeddings-2017/|tag|http://www.semanlink.net/tag/word_embedding +http://ruder.io/word-embeddings-2017/|comment|"- Subword-level embeddings: several methods: + +> Word embeddings have been augmented with subword-level information for many applications such as named entity recognition, POS, ..., Language Modeling. +> Most of these models employ a CNN or a BiLSTM that takes as input the characters of a word and outputs a character-based word representation. + +> For incorporating character information into pre-trained embeddings, however, **character n-grams features** have been shown to be more powerful. [#FastText] + +> Subword units based on **byte-pair encoding** have been found to be particularly useful for machine translation where they have replaced words as the standard input units + +- Out-of-vocabulary (OOV) words + +- Polysemy. Multi-sense embeddings + - [Towards a Seamless Integration of Word Senses into Downstream NLP Applications](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1710.06632) + +- ..." +http://ruder.io/word-embeddings-2017/|relatedDoc|https://arxiv.org/abs/1710.06632 +http://ruder.io/word-embeddings-2017/|title|Word embeddings in 2017: Trends and future directions +http://ruder.io/word-embeddings-2017/|creationTime|2017-11-06T12:00:59Z +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|creationDate|2019-02-25 +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|tag|http://www.semanlink.net/tag/zinder +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|comment|[Zinder (Camille Lefebvre Langarchiv)](doc:2021/04/camille_lefebvre_%7C_langarchiv) +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|relatedDoc|http://www.semanlink.net/doc/2021/04/camille_lefebvre_%7C_langarchiv +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|title|Zinder renoue avec son passé CNRS Le journal +https://lejournal.cnrs.fr/articles/zinder-renoue-avec-son-passe|creationTime|2019-02-25T14:34:25Z +http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/|creationDate|2012-08-14 +http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/|tag|http://www.semanlink.net/tag/eclipse_tip +http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/|tag|http://www.semanlink.net/tag/maven +http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/|title|Creating Dynamic Web Project using Maven in Eclipse. Eclipse Maven Web project +http://viralpatel.net/blogs/generate-dynamic-web-project-maven-eclipse-wtp/|creationTime|2012-08-14T00:16:21Z +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|creationDate|2011-05-23 +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/liberte_d_expression +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/droit_et_internet +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/g8 +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/sarkozy +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|tag|http://www.semanlink.net/tag/sarkozyland +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|title|Sarkozy expulse les libertés de son “internet civilisé” - BUG BROTHER - Blog LeMonde.fr +http://bugbrother.blog.lemonde.fr/2011/05/21/sarkozy-expulse-les-libertes-de-son-internet-civilise/|creationTime|2011-05-23T13:29:20Z +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|creationDate|2009-02-13 +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|tag|http://www.semanlink.net/tag/yahoo +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|tag|http://www.semanlink.net/tag/searchmonkey +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|tag|http://www.semanlink.net/tag/rdf +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|title|Yahoo! adds RDF support to SearchMonkey and BOSS +http://ebiquity.umbc.edu/blogger/2009/02/12/yahoo-adds-rdf-support-to-searchmonkey-and-boss/|creationTime|2009-02-13T18:19:32Z +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|creationDate|2009-05-05 +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|tag|http://www.semanlink.net/tag/ted +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|tag|http://www.semanlink.net/tag/linked_data +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|title|Tim Berners-Lee on the next Web Video on TED.com +http://www.ted.com/index.php/talks/tim_berners_lee_on_the_next_web.html|creationTime|2009-05-05T10:23:56Z +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|creationDate|2016-11-14 +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|tag|http://www.semanlink.net/tag/trump +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|tag|http://www.semanlink.net/tag/brexit +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|tag|http://www.semanlink.net/tag/histoire +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|title|History tells us what may happen next with Brexit & Trump – Medium +https://medium.com/@theonlytoby/history-tells-us-what-will-happen-next-with-brexit-trump-a3fefd154714#.p499yhrrb|creationTime|2016-11-14T15:09:30Z +http://jena.hpl.hp.com/wiki/SDB|creationDate|2007-07-02 +http://jena.hpl.hp.com/wiki/SDB|tag|http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena +http://jena.hpl.hp.com/wiki/SDB|comment|"a new database subsystem for Jena. It uses a conventional relational +database, rewriting SPARQL to SQL. The relational database tools for +load balancing, security, clustering, backup and administration can all +be used to manage the installation. Currently it works with Oracle, MS +SQL Server, PostgreSQL, MySQL, Apache Derby and HSQLDB databases." +http://jena.hpl.hp.com/wiki/SDB|title|SDB - Jena wiki +http://jena.hpl.hp.com/wiki/SDB|creationTime|2007-07-02T22:26:32Z +http://danakil.ethiopia.free.fr/ertaale.htm|creationDate|2008-11-21 +http://danakil.ethiopia.free.fr/ertaale.htm|tag|http://www.semanlink.net/tag/erta_ale +http://danakil.ethiopia.free.fr/ertaale.htm|title|L'Erta Alé, volcan actif dans le Danakil +http://danakil.ethiopia.free.fr/ertaale.htm|creationTime|2008-11-21T23:26:34Z +http://recode.net/2015/02/15/white-house-red-chair-obama-meets-swisher/|creationDate|2015-02-21 +http://recode.net/2015/02/15/white-house-red-chair-obama-meets-swisher/|tag|http://www.semanlink.net/tag/obama +http://recode.net/2015/02/15/white-house-red-chair-obama-meets-swisher/|title|President Barack Obama Speaks with Kara Swisher (Full Transcript) Re/code +http://recode.net/2015/02/15/white-house-red-chair-obama-meets-swisher/|creationTime|2015-02-21T16:50:23Z +https://hackernoon.com/understanding-promises-in-javascript-13d99df067c1|creationDate|2018-07-22 +https://hackernoon.com/understanding-promises-in-javascript-13d99df067c1|tag|http://www.semanlink.net/tag/javascript_promises +https://hackernoon.com/understanding-promises-in-javascript-13d99df067c1|title|Understanding promises in Javascript – Hacker Noon +https://hackernoon.com/understanding-promises-in-javascript-13d99df067c1|creationTime|2018-07-22T11:45:44Z +https://www.stardog.com/|creationDate|2018-05-31 +https://www.stardog.com/|tag|http://www.semanlink.net/tag/enterprise_knowledge_graph +https://www.stardog.com/|title|Stardog: The Enterprise Knowledge Graph Platform +https://www.stardog.com/|creationTime|2018-05-31T00:50:09Z +http://dev.mysql.com/doc/refman/5.0/en/index.html|creationDate|2008-10-20 +http://dev.mysql.com/doc/refman/5.0/en/index.html|tag|http://www.semanlink.net/tag/mysql +http://dev.mysql.com/doc/refman/5.0/en/index.html|title|MySQL 5.0 Reference Manual +http://dev.mysql.com/doc/refman/5.0/en/index.html|creationTime|2008-10-20T14:50:04Z +http://www.coindesk.com/ethereum-response-dao-kill/|creationDate|2016-06-21 +http://www.coindesk.com/ethereum-response-dao-kill/|tag|http://www.semanlink.net/tag/dao_attack +http://www.coindesk.com/ethereum-response-dao-kill/|tag|http://www.semanlink.net/tag/ethereum +http://www.coindesk.com/ethereum-response-dao-kill/|title|Why the Wrong Response to The DAO Attack Could Kill Ethereum - CoinDesk +http://www.coindesk.com/ethereum-response-dao-kill/|creationTime|2016-06-21T10:27:15Z +http://www.openlinksw.com/weblog/oerling/?id=1777|creationDate|2014-02-27 +http://www.openlinksw.com/weblog/oerling/?id=1777|tag|http://www.semanlink.net/tag/chris_bizer +http://www.openlinksw.com/weblog/oerling/?id=1777|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1777|tag|http://www.semanlink.net/tag/lod2 +http://www.openlinksw.com/weblog/oerling/?id=1777|title|LOD2 Plenary and Open Data Meet-up in Mannheim +http://www.openlinksw.com/weblog/oerling/?id=1777|creationTime|2014-02-27T01:01:37Z +https://course-v3.fast.ai/start_gcp.html|creationDate|2018-12-05 +https://course-v3.fast.ai/start_gcp.html|tag|http://www.semanlink.net/tag/google_cloud_platform +https://course-v3.fast.ai/start_gcp.html|tag|http://www.semanlink.net/tag/fast_ai +https://course-v3.fast.ai/start_gcp.html|title|GCP fast.ai course v3 +https://course-v3.fast.ai/start_gcp.html|creationTime|2018-12-05T17:56:46Z +http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery|creationDate|2011-04-05 +http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery|tag|http://www.semanlink.net/tag/tutorial +http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery|tag|http://www.semanlink.net/tag/jquery +http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery|title|Tutorials:Getting Started with jQuery +http://docs.jquery.com/Tutorials:Getting_Started_with_jQuery|creationTime|2011-04-05T17:24:41Z +http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html|creationDate|2017-07-01 +http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html|tag|http://www.semanlink.net/tag/canada +http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html|comment|le patriotisme, un truc de losers +http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html|title|Pourquoi les Canadiens se moquent éperdument des 150 ans du Canada +http://www.lemonde.fr/big-browser/article/2017/06/30/pourquoi-les-canadiens-se-moquent-eperdument-des-150-ans-du-canada_5153587_4832693.html|creationTime|2017-07-01T20:01:31Z +http://www.newscientist.com/article/dn18834-zoologger-the-most-bizarre-life-story-on-earth.html|creationDate|2010-05-04 +http://www.newscientist.com/article/dn18834-zoologger-the-most-bizarre-life-story-on-earth.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.newscientist.com/article/dn18834-zoologger-the-most-bizarre-life-story-on-earth.html|title|Zoologger: The most bizarre life story on Earth? - life - 28 April 2010 - New Scientist +http://www.newscientist.com/article/dn18834-zoologger-the-most-bizarre-life-story-on-earth.html|creationTime|2010-05-04T21:54:54Z +http://lafeuille.blog.lemonde.fr/2013/09/07/un-github-pour-les-ecrivains-%e2%80%a8/|creationDate|2013-09-09 +http://lafeuille.blog.lemonde.fr/2013/09/07/un-github-pour-les-ecrivains-%e2%80%a8/|tag|http://www.semanlink.net/tag/github +http://lafeuille.blog.lemonde.fr/2013/09/07/un-github-pour-les-ecrivains-%e2%80%a8/|title|Un GitHub pour les écrivains ? La Feuille +http://lafeuille.blog.lemonde.fr/2013/09/07/un-github-pour-les-ecrivains-%e2%80%a8/|creationTime|2013-09-09T12:48:25Z +https://medium.com/starts-with-a-bang/the-disappearing-universe-d7447467c63a|creationDate|2014-06-09 +https://medium.com/starts-with-a-bang/the-disappearing-universe-d7447467c63a|tag|http://www.semanlink.net/tag/cosmologie +https://medium.com/starts-with-a-bang/the-disappearing-universe-d7447467c63a|title|The Disappearing Universe — Starts With A Bang! — Medium +https://medium.com/starts-with-a-bang/the-disappearing-universe-d7447467c63a|creationTime|2014-06-09T00:34:15Z +https://www.google.fr/?gfe_rd=ssl&ei=Qck3V_S2KISDaL74kuAG#q=Loriane+Zacharie&stick=H4sIAAAAAAAAAONgFuLVT9c3NEwyjy9PN7NIV0LlaklmJ1vp55YWZybrJxaVZBaXWBVnpqSWJ1YWAwAXCNBNOwAAAA|creationDate|2016-05-15 +https://www.google.fr/?gfe_rd=ssl&ei=Qck3V_S2KISDaL74kuAG#q=Loriane+Zacharie&stick=H4sIAAAAAAAAAONgFuLVT9c3NEwyjy9PN7NIV0LlaklmJ1vp55YWZybrJxaVZBaXWBVnpqSWJ1YWAwAXCNBNOwAAAA|tag|http://www.semanlink.net/tag/zouk +https://www.google.fr/?gfe_rd=ssl&ei=Qck3V_S2KISDaL74kuAG#q=Loriane+Zacharie&stick=H4sIAAAAAAAAAONgFuLVT9c3NEwyjy9PN7NIV0LlaklmJ1vp55YWZybrJxaVZBaXWBVnpqSWJ1YWAwAXCNBNOwAAAA|title|Loriane Zacharie +https://www.google.fr/?gfe_rd=ssl&ei=Qck3V_S2KISDaL74kuAG#q=Loriane+Zacharie&stick=H4sIAAAAAAAAAONgFuLVT9c3NEwyjy9PN7NIV0LlaklmJ1vp55YWZybrJxaVZBaXWBVnpqSWJ1YWAwAXCNBNOwAAAA|creationTime|2016-05-15T02:59:13Z +http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/|creationDate|2007-09-15 +http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/|tag|http://www.semanlink.net/tag/youtube +http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/|tag|http://www.semanlink.net/tag/howto +http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/|title|Help-Key: How to Optimize Your Video for YouTube +http://crunchgear.com/2007/07/16/help-key-how-to-optimize-your-video-for-youtube/|creationTime|2007-09-15T21:29:06Z +http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web|creationDate|2011-05-31 +http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web|title|Evolution Towards Web 3.0: The Semantic Web +http://www.slideshare.net/LeeFeigenbaum/evolution-towards-web-30-the-semantic-web|creationTime|2011-05-31T17:41:28Z +http://www.sciencemag.org|creationDate|2005-03-03 +http://www.sciencemag.org|tag|http://www.semanlink.net/tag/science +http://www.sciencemag.org|title|Science (site officiel de la revue) +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|creationDate|2017-05-15 +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|tag|http://www.semanlink.net/tag/hal +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|tag|http://www.semanlink.net/tag/apis_and_linked_data +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|tag|http://www.semanlink.net/tag/json_ld +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|title|On APIs, JSON, Linked Data, attitude and opportunities Linked Data Orchestration +http://linkeddataorchestration.com/2014/03/04/on-apis-json-linked-data-attitude-and-opportunities/|creationTime|2017-05-15T11:07:13Z +http://web.stanford.edu/class/cs224n/reports.html|creationDate|2018-04-05 +http://web.stanford.edu/class/cs224n/reports.html|tag|http://www.semanlink.net/tag/deep_nlp +http://web.stanford.edu/class/cs224n/reports.html|tag|http://www.semanlink.net/tag/nlp_stanford +http://web.stanford.edu/class/cs224n/reports.html|comment|Course Project Reports for 2018. [Notes on reddit](https://www.reddit.com/r/MachineLearning/comments/89i9h8/ps_the_2018_stanford_cs224n_nlp_course_projects/) +http://web.stanford.edu/class/cs224n/reports.html|title|CS224n: Natural Language Processing with Deep Learning +http://web.stanford.edu/class/cs224n/reports.html|creationTime|2018-04-05T01:55:59Z +http://semanticweb.com/plumbing-depths-deep-learning_b41996|creationDate|2014-03-07 +http://semanticweb.com/plumbing-depths-deep-learning_b41996|tag|http://www.semanlink.net/tag/deep_learning +http://semanticweb.com/plumbing-depths-deep-learning_b41996|title|Plumbing The Depths Of Deep Learning - Semanticweb.com +http://semanticweb.com/plumbing-depths-deep-learning_b41996|creationTime|2014-03-07T13:43:47Z +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|creationDate|2015-05-08 +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|tag|http://www.semanlink.net/tag/peche +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|tag|http://www.semanlink.net/tag/greenpeace +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|tag|http://www.semanlink.net/tag/chine_afrique +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|tag|http://www.semanlink.net/tag/peche_industrielle +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|title|Pêche frauduleuse Greenpeace Afrique +http://www.greenpeace.org/africa/fr/notre-action/Defendons-nos-oceans/Peche-frauduleuse-/|creationTime|2015-05-08T13:19:21Z +http://marmotta.incubator.apache.org/|creationDate|2013-04-26 +http://marmotta.incubator.apache.org/|tag|http://www.semanlink.net/tag/apache_marmotta +http://marmotta.incubator.apache.org/|title|Apache Marmotta +http://marmotta.incubator.apache.org/|creationTime|2013-04-26T13:21:57Z +http://www.jenitennison.com/blog/node/170|creationDate|2012-05-12 +http://www.jenitennison.com/blog/node/170|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.jenitennison.com/blog/node/170|tag|http://www.semanlink.net/tag/httprange_14 +http://www.jenitennison.com/blog/node/170|title|"Using ""Punning"" to Answer httpRange-14 Jeni's Musings" +http://www.jenitennison.com/blog/node/170|creationTime|2012-05-12T18:11:19Z +http://en.wikipedia.org/wiki/The_Chaser_(film)|creationDate|2015-04-27 +http://en.wikipedia.org/wiki/The_Chaser_(film)|tag|http://www.semanlink.net/tag/coree_du_sud +http://en.wikipedia.org/wiki/The_Chaser_(film)|tag|http://www.semanlink.net/tag/film +http://en.wikipedia.org/wiki/The_Chaser_(film)|title|The Chaser (film) +http://en.wikipedia.org/wiki/The_Chaser_(film)|creationTime|2015-04-27T00:16:14Z +https://www.bbc.com/news/science-environment-47873072|creationDate|2019-04-11 +https://www.bbc.com/news/science-environment-47873072|tag|http://www.semanlink.net/tag/paleoanthropology +https://www.bbc.com/news/science-environment-47873072|title|New human species found in Philippines +https://www.bbc.com/news/science-environment-47873072|creationTime|2019-04-11T00:37:26Z +http://www.snee.com/xml/xml2006/owlrdbms.html|creationDate|2008-06-04 +http://www.snee.com/xml/xml2006/owlrdbms.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/xml/xml2006/owlrdbms.html|tag|http://www.semanlink.net/tag/oracle +http://www.snee.com/xml/xml2006/owlrdbms.html|tag|http://www.semanlink.net/tag/d2rq +http://www.snee.com/xml/xml2006/owlrdbms.html|tag|http://www.semanlink.net/tag/triplestore +http://www.snee.com/xml/xml2006/owlrdbms.html|title|Relational database integration with RDF/OWL +http://www.snee.com/xml/xml2006/owlrdbms.html|creationTime|2008-06-04T22:31:39Z +http://www.ldodds.com/blog/archives/000269.html|creationDate|2006-04-13 +http://www.ldodds.com/blog/archives/000269.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000269.html|tag|http://www.semanlink.net/tag/jena_user_conference +http://www.ldodds.com/blog/archives/000269.html|title|Lost Boy: 2006 Jena User Conference Programme +https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie|creationDate|2016-08-27 +https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie|tag|http://www.semanlink.net/tag/nigeria +https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie|tag|http://www.semanlink.net/tag/livre_a_lire +https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie|title|Chimamanda Ngozi Adichie +https://en.wikipedia.org/wiki/Chimamanda_Ngozi_Adichie|creationTime|2016-08-27T00:41:16Z +http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279|creationDate|2011-12-17 +http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279|tag|http://www.semanlink.net/tag/semantic_seo +http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279|tag|http://www.semanlink.net/tag/makolab +http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279|title|Semantic SEO Comes to Prestashop e-commerce Sites - semanticweb.com +http://semanticweb.com/semantic-seo-comes-to-prestashop-e-commerce-sites_b25279|creationTime|2011-12-17T12:05:33Z +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|creationDate|2015-10-21 +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/general_nlp_tasks +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/python_nlp +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/python_sample_code +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/nlp_sample_code +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|tag|http://www.semanlink.net/tag/tutorial +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|comment|"scikit-learn tutorial about analysing a collection of labelled text documents : + +- load the file contents and the categories +- extract feature vectors (count, tf, tf-idf) +- train a linear model to perform categorization +- use a grid search strategy (to find a good configuration of both the feature extraction components and the classifier) +" +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|title|Working With Text Data — scikit-learn documentation +http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html|creationTime|2015-10-21T10:08:08Z +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/|creationDate|2006-12-23 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/|tag|http://www.semanlink.net/tag/workshop +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/|tag|http://www.semanlink.net/tag/semantic_web_application +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS//Vol-201/|title|CEUR-WS.org/Vol-201 - SWAP, Semantic Web Applications and Perspectives, 2nd Italian Semantic Web Workshop +http://searchivarius.org/blog/brief-overview-querysentence-similarity-functions|creationDate|2017-07-21 +http://searchivarius.org/blog/brief-overview-querysentence-similarity-functions|tag|http://www.semanlink.net/tag/text_similarity +http://searchivarius.org/blog/brief-overview-querysentence-similarity-functions|title|A brief overview of query/sentence similarity functions searchivarius.org +http://searchivarius.org/blog/brief-overview-querysentence-similarity-functions|creationTime|2017-07-21T12:47:02Z +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|creationDate|2015-03-14 +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|tag|http://www.semanlink.net/tag/elasticsearch +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|tag|http://www.semanlink.net/tag/solr +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|title|search - Solr vs. ElasticSearch - Stack Overflow +http://stackoverflow.com/questions/10213009/solr-vs-elasticsearch|creationTime|2015-03-14T22:23:34Z +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|creationDate|2016-02-28 +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|tag|http://www.semanlink.net/tag/lstm_networks +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|tag|http://www.semanlink.net/tag/recurrent_neural_network +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|comment|"What character-level language models based on RNNs are capable of + +> The core reason that recurrent nets are more exciting is that they allow us to operate over sequences of vectors: Sequences in the input, the output, or in the most general case both. +" +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|title|The Unreasonable Effectiveness of Recurrent Neural Networks +http://karpathy.github.io/2015/05/21/rnn-effectiveness/|creationTime|2016-02-28T22:55:16Z +http://www.sitepoint.com/article/javascript-library|creationDate|2006-06-25 +http://www.sitepoint.com/article/javascript-library|tag|http://www.semanlink.net/tag/javascript_dom +http://www.sitepoint.com/article/javascript-library|tag|http://www.semanlink.net/tag/javascript_librairies +http://www.sitepoint.com/article/javascript-library|title|The JavaScript Library World Cup [JavaScript & DHTML Tutorials] +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|creationDate|2009-11-13 +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|tag|http://www.semanlink.net/tag/cyborg +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|tag|http://www.semanlink.net/tag/cafard +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|title|Free-flying cyborg insects steered from a distance - tech - 01 October 2009 - New Scientist +http://www.newscientist.com/article/dn17895-freeflying-cyborg-insects-steered-from-a-distance.html|creationTime|2009-11-13T21:27:07Z +http://www.kartoo.com/|creationDate|2008-02-15 +http://www.kartoo.com/|tag|http://www.semanlink.net/tag/search_engines +http://www.kartoo.com/|tag|http://www.semanlink.net/tag/information_visualization +http://www.kartoo.com/|title|KartOO Métamoteur de recherche +http://www.kartoo.com/|creationTime|2008-02-15T23:48:48Z +http://pcottle.github.io/learnGitBranching/|creationDate|2015-03-16 +http://pcottle.github.io/learnGitBranching/|tag|http://www.semanlink.net/tag/git +http://pcottle.github.io/learnGitBranching/|tag|http://www.semanlink.net/tag/tutorial +http://pcottle.github.io/learnGitBranching/|title|Learn Git Branching +http://pcottle.github.io/learnGitBranching/|creationTime|2015-03-16T11:41:47Z +http://www.spurl.net|creationDate|2005-04-28 +http://www.spurl.net|tag|http://www.semanlink.net/tag/spurl +http://docs.info.apple.com/article.html?artnum=301415|creationDate|2007-09-15 +http://docs.info.apple.com/article.html?artnum=301415|tag|http://www.semanlink.net/tag/imovie +http://docs.info.apple.com/article.html?artnum=301415|title|iMovie HD: No sound after applying a title, effect, or transition to an MPEG-4 video clip +http://docs.info.apple.com/article.html?artnum=301415|creationTime|2007-09-15T19:05:33Z +http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents|creationDate|2013-06-25 +http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents|tag|http://www.semanlink.net/tag/json_ld +http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents|tag|http://www.semanlink.net/tag/html_data +http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents|title|JSON-LD 1.0: Embedding JSON-LD in HTML Documents +http://www.w3.org/TR/json-ld-syntax/#embedding-json-ld-in-html-documents|creationTime|2013-06-25T08:50:28Z +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|creationDate|2016-08-02 +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|tag|http://www.semanlink.net/tag/json_ld +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|tag|http://www.semanlink.net/tag/semantic_components +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|tag|http://www.semanlink.net/tag/webcomponents +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|title|creating-semantic-sites-with-web-components-and-jsonld.markdown +https://github.com/google/WebFundamentals/blob/master/src/content/en/updates/posts/2015/03/creating-semantic-sites-with-web-components-and-jsonld.markdown|creationTime|2016-08-02T12:04:31Z +http://news.bbc.co.uk/1/hi/in_depth/sci_tech/2004/planet/default.stm|creationDate|2005-06-23 +http://news.bbc.co.uk/1/hi/in_depth/sci_tech/2004/planet/default.stm|tag|http://www.semanlink.net/tag/planet_under_pressure +http://news.bbc.co.uk/1/hi/in_depth/sci_tech/2004/planet/default.stm|title|BBC NEWS - Planet under pressure +http://news.bbc.co.uk/1/hi/in_depth/sci_tech/2004/planet/default.stm|source|BBC +http://www.oracle.com/technology/sample_code/tech/java/j2ee/jintdemo/tutorials/webservices.html|creationDate|2005-10-13 +http://www.oracle.com/technology/sample_code/tech/java/j2ee/jintdemo/tutorials/webservices.html|tag|http://www.semanlink.net/tag/web_services +http://www.oracle.com/technology/sample_code/tech/java/j2ee/jintdemo/tutorials/webservices.html|title|Document Style Web Services And Dynamic Invocation Of Web Services +http://news.bbc.co.uk/onthisday/|creationDate|2005-03-24 +http://news.bbc.co.uk/onthisday/|tag|http://www.semanlink.net/tag/bbc +http://news.bbc.co.uk/onthisday/|comment|Video and audio from the BBC news archive WWII and 1950-2004 +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|creationDate|2009-10-04 +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|tag|http://www.semanlink.net/tag/nicolas_hulot +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|tag|http://www.semanlink.net/tag/crise_ecologique +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|tag|http://www.semanlink.net/tag/critique_du_liberalisme +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|comment|"""Je suis perdu parce que je ne comprends pas qu'il faille autant d'énergie pour placer des évidences auprès de nos élites. Des gens qui ont une intelligence parfois fulgurante ont des angles morts, c'est-à-dire qu'ils n'arrivent pas à comprendre que leur modèle économique ne tiendra pas.""
+...Ce qu'Hulot appelle les élites, c'est aujourd'hui une oligarchie. Elle ne veut pas entendre l'évidence de la crise écologique et de la désagrégation sociale, parce que le but principal de l'oligarchie est de maintenir ses intérêts et ses privilèges. +" +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|title|Nicolas Hulot et l'oligarchie, par Hervé Kempf +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|creationTime|2009-10-04T11:12:00Z +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|source|Le Monde +http://www.lemonde.fr/opinions/article/2009/10/03/nicolas-hulot-et-l-oligarchie-par-herve-kempf_1248846_3232.html|date|2009-10-04 +http://www.cnrs.fr/ins2i/spip.php?article2581&utm_content=buffer07b2c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-07-05 +http://www.cnrs.fr/ins2i/spip.php?article2581&utm_content=buffer07b2c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/type_system +http://www.cnrs.fr/ins2i/spip.php?article2581&utm_content=buffer07b2c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Développement web : concilier sûreté et flexibilité avec le typage graduel +http://www.cnrs.fr/ins2i/spip.php?article2581&utm_content=buffer07b2c&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-07-05T00:13:56Z +http://www.archive.org/|creationDate|2007-11-13 +http://www.archive.org/|tag|http://www.semanlink.net/tag/web +http://www.archive.org/|comment|Archive.org a archivé près de 86 milliards de pages web, correspondant à 65 millions de sites Web, en 37 langues. Sa base de données pèse près de 2 pétaoctets, soit 2 millions de gigaoctets, l'équivalent de deux cents fois le contenu de la Bibliothèque du Congrès américain. +http://www.archive.org/|title|Internet Archive +http://www.archive.org/|creationTime|2007-11-13T22:12:37Z +http://news.bbc.co.uk/2/hi/health/7114587.stm|creationDate|2007-12-11 +http://news.bbc.co.uk/2/hi/health/7114587.stm|tag|http://www.semanlink.net/tag/medecine +http://news.bbc.co.uk/2/hi/health/7114587.stm|title|BBC NEWS Health Amputees 'regain sense of touch' +http://news.bbc.co.uk/2/hi/health/7114587.stm|creationTime|2007-12-11T23:49:54Z +http://news.bbc.co.uk/2/hi/health/7114587.stm|source|BBC +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|creationDate|2012-04-29 +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|tag|http://www.semanlink.net/tag/immigration +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|tag|http://www.semanlink.net/tag/etat_policier +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|tag|http://www.semanlink.net/tag/refugies +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|title|Calais: des réfugiés aux doigts brûlés BUG BROTHER +http://bugbrother.blog.lemonde.fr/2009/09/26/calais-et-ses-refugies-aux-doigts-brules/|creationTime|2012-04-29T18:48:58Z +http://robohub.org/baidus-self-driving-tech-plans-revealed/|creationDate|2017-07-10 +http://robohub.org/baidus-self-driving-tech-plans-revealed/|tag|http://www.semanlink.net/tag/driverless_car +http://robohub.org/baidus-self-driving-tech-plans-revealed/|tag|http://www.semanlink.net/tag/baidu +http://robohub.org/baidus-self-driving-tech-plans-revealed/|title|Baidu’s self-driving tech plans revealed Robohub +http://robohub.org/baidus-self-driving-tech-plans-revealed/|creationTime|2017-07-10T11:34:38Z +http://www.wired.com/opinion/2013/01/forget-the-internet-of-things-here-comes-the-internet-of-cars/|creationDate|2013-01-08 +http://www.wired.com/opinion/2013/01/forget-the-internet-of-things-here-comes-the-internet-of-cars/|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.wired.com/opinion/2013/01/forget-the-internet-of-things-here-comes-the-internet-of-cars/|title|Forget the Internet of Things: Here Comes the Internet of Cars Wired Opinion Wired.com +http://www.wired.com/opinion/2013/01/forget-the-internet-of-things-here-comes-the-internet-of-cars/|creationTime|2013-01-08T12:00:03Z +http://n2.talis.com/wiki/SPARQL_Demo|creationDate|2008-05-19 +http://n2.talis.com/wiki/SPARQL_Demo|tag|http://www.semanlink.net/tag/sparql_sample_code +http://n2.talis.com/wiki/SPARQL_Demo|tag|http://www.semanlink.net/tag/sparql_endpoint +http://n2.talis.com/wiki/SPARQL_Demo|tag|http://www.semanlink.net/tag/talis +http://n2.talis.com/wiki/SPARQL_Demo|title|SPARQL Demo - n² wiki +http://n2.talis.com/wiki/SPARQL_Demo|creationTime|2008-05-19T18:30:42Z +http://www.javaworld.com/javaworld/jw-11-2004/jw-1101-spider.html|creationDate|2005-05-04 +http://www.javaworld.com/javaworld/jw-11-2004/jw-1101-spider.html|tag|http://www.semanlink.net/tag/html_parsing +http://www.javaworld.com/javaworld/jw-11-2004/jw-1101-spider.html|tag|http://www.semanlink.net/tag/java_dev +http://www.javaworld.com/javaworld/jw-11-2004/jw-1101-spider.html|title|Create intelligent Web spiders +http://www.lespetitescases.net/semweblabs/linkedmymusic/|creationDate|2011-01-18 +http://www.lespetitescases.net/semweblabs/linkedmymusic/|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.lespetitescases.net/semweblabs/linkedmymusic/|tag|http://www.semanlink.net/tag/linked_data +http://www.lespetitescases.net/semweblabs/linkedmymusic/|tag|http://www.semanlink.net/tag/semantic_mashups +http://www.lespetitescases.net/semweblabs/linkedmymusic/|tag|http://www.semanlink.net/tag/musique +http://www.lespetitescases.net/semweblabs/linkedmymusic/|title|Linked My Music +http://www.lespetitescases.net/semweblabs/linkedmymusic/|creationTime|2011-01-18T22:58:41Z +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|creationDate|2014-04-25 +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|tag|http://www.semanlink.net/tag/clustering_of_text_documents +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|tag|http://www.semanlink.net/tag/nlp_and_humanities +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|tag|http://www.semanlink.net/tag/topic_modeling +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|tag|http://www.semanlink.net/tag/mallet +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|title|Sapping Attention: When you have a MALLET, everything looks like a nail +http://sappingattention.blogspot.fr/2012/11/when-you-have-mallet-everything-looks.html|creationTime|2014-04-25T12:44:00Z +https://semantic-ui.com/|creationDate|2017-05-10 +https://semantic-ui.com/|title|Semantic UI +https://semantic-ui.com/|creationTime|2017-05-10T07:54:40Z +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|creationDate|2011-10-12 +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|tag|http://www.semanlink.net/tag/volkswagen +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|tag|http://www.semanlink.net/tag/openstructs +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|title|Volkswagen’s RDF Data Management Workflow at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2011/10/11/volkswagens-rdf-data-management-workflow/|creationTime|2011-10-12T23:43:35Z +http://www.thebeijingguide.com/|creationDate|2008-04-11 +http://www.thebeijingguide.com/|tag|http://www.semanlink.net/tag/pekin +http://www.thebeijingguide.com/|title|The China Guide: China Tours, Hotels, Plane and Train Tickets, Photographs and Pictures +http://www.thebeijingguide.com/|creationTime|2008-04-11T23:19:22Z +http://hsivonen.iki.fi/schema-org-and-communities/|creationDate|2011-06-10 +http://hsivonen.iki.fi/schema-org-and-communities/|tag|http://www.semanlink.net/tag/schema_org +http://hsivonen.iki.fi/schema-org-and-communities/|tag|http://www.semanlink.net/tag/hixie +http://hsivonen.iki.fi/schema-org-and-communities/|tag|http://www.semanlink.net/tag/microdata +http://hsivonen.iki.fi/schema-org-and-communities/|comment|one of the key insights of Microdata is that the properties of an item are tightly coupled with the type of the item, so the properties don’t need to be orthogonal with the item type and applicable to items of any type... If you are preparing schema.org in secret, it may look like the right thing to do to create your own item types instead of sticking some new properties onto someone else’s item type. +http://hsivonen.iki.fi/schema-org-and-communities/|title|Schema.org and Pre-Existing Communities +http://hsivonen.iki.fi/schema-org-and-communities/|creationTime|2011-06-10T00:15:22Z +http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html|creationDate|2013-04-06 +http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html|tag|http://www.semanlink.net/tag/monsanto +http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html|title|"Le ""Monsanto act"" met les OGM au-dessus de la loi aux Etats-Unis" +http://www.lemonde.fr/planete/article/2013/04/05/comment-monsanto-a-mis-les-ogm-au-dessus-de-la-loi-aux-etats-unis_3154615_3244.html|creationTime|2013-04-06T11:11:45Z +http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/|creationDate|2013-07-11 +http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/|tag|http://www.semanlink.net/tag/slow_food +http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/|tag|http://www.semanlink.net/tag/good_idea +http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/|title|Student Invents Clever Online Market for Eating Locally Wired Design Wired.com +http://www.wired.com/design/2013/07/a-market-that-lets-you-buy-from-your-neighbors/|creationTime|2013-07-11T01:31:55Z +http://siren.sindice.com/index.html|creationDate|2012-05-10 +http://siren.sindice.com/index.html|tag|http://www.semanlink.net/tag/lucene +http://siren.sindice.com/index.html|tag|http://www.semanlink.net/tag/solr +http://siren.sindice.com/index.html|comment|Efficient semi-structured Information Retrieval for Lucene +http://siren.sindice.com/index.html|title|SIREn: Semantic Information Retrieval Engine +http://siren.sindice.com/index.html|creationTime|2012-05-10T01:30:00Z +https://blog.openai.com/evolution-strategies/|creationDate|2018-01-06 +https://blog.openai.com/evolution-strategies/|tag|http://www.semanlink.net/tag/reinforcement_learning +https://blog.openai.com/evolution-strategies/|tag|http://www.semanlink.net/tag/neuroevolution +https://blog.openai.com/evolution-strategies/|title|Evolution Strategies as a Scalable Alternative to Reinforcement Learning +https://blog.openai.com/evolution-strategies/|creationTime|2018-01-06T15:11:28Z +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|creationDate|2007-05-31 +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|tag|http://www.semanlink.net/tag/musique +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|tag|http://www.semanlink.net/tag/musicbrainz +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|tag|http://www.semanlink.net/tag/linked_data +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|title|The Music Data Space at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/05/24/the-music-data-space/|creationTime|2007-05-31T01:07:48Z +http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/|creationDate|2016-02-08 +http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/|tag|http://www.semanlink.net/tag/evolution +http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/|tag|http://www.semanlink.net/tag/machine_learning +http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/|title|The Hunt for the Algorithms That Drive Life on Earth WIRED +http://www.wired.com/2016/02/the-hunt-for-the-algorithms-that-drive-life-on-earth/|creationTime|2016-02-08T16:48:57Z +http://www.w3.org/2001/sw/wiki/Linking_patterns|creationDate|2012-01-03 +http://www.w3.org/2001/sw/wiki/Linking_patterns|tag|http://www.semanlink.net/tag/linked_data_dev +http://www.w3.org/2001/sw/wiki/Linking_patterns|tag|http://www.semanlink.net/tag/design_pattern +http://www.w3.org/2001/sw/wiki/Linking_patterns|title|Linking patterns - Semantic Web Standards +http://www.w3.org/2001/sw/wiki/Linking_patterns|creationTime|2012-01-03T12:04:36Z +http://fleursenpoche.antiopa.info/fleursenpoche.htm|creationDate|2010-02-23 +http://fleursenpoche.antiopa.info/fleursenpoche.htm|tag|http://www.semanlink.net/tag/iphone +http://fleursenpoche.antiopa.info/fleursenpoche.htm|tag|http://www.semanlink.net/tag/botanique +http://fleursenpoche.antiopa.info/fleursenpoche.htm|title|Fleurs en poche : identifiez les fleurs sauvages avec votre iPhone +http://fleursenpoche.antiopa.info/fleursenpoche.htm|creationTime|2010-02-23T10:55:49Z +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0167.html|creationDate|2007-01-09 +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0167.html|tag|http://www.semanlink.net/tag/tag_ontology +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0167.html|tag|http://www.semanlink.net/tag/semantic_web_w3_org +http://lists.w3.org/Archives/Public/semantic-web/2005Mar/0167.html|title|Tag ontology RFC from Richard Newman on 2005-03-23 (semantic-web@w3.org from March 2005) +http://www.gnowsis.org/|creationDate|2005-04-21 +http://www.gnowsis.org/|tag|http://www.semanlink.net/tag/semanlink_related +http://www.gnowsis.org/|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.gnowsis.org/|title|http://www.gnowsis.org +http://www.gnowsis.org/|creationTime|2005-04-21T22:00:00Z +http://www.cnes.fr/web/CNES-fr/11590-gp-des-resultats-a-venir-pour-ptolemy.php|creationDate|2014-12-06 +http://www.cnes.fr/web/CNES-fr/11590-gp-des-resultats-a-venir-pour-ptolemy.php|tag|http://www.semanlink.net/tag/philae +http://www.cnes.fr/web/CNES-fr/11590-gp-des-resultats-a-venir-pour-ptolemy.php|title|Des résultats à venir pour PTOLEMY - CNES +http://www.cnes.fr/web/CNES-fr/11590-gp-des-resultats-a-venir-pour-ptolemy.php|creationTime|2014-12-06T11:17:58Z +http://semanticweb.com/semtechbiz-berlin-to-explore-semantics-in-the-auto-industry_b26151|creationDate|2012-02-08 +http://semanticweb.com/semtechbiz-berlin-to-explore-semantics-in-the-auto-industry_b26151|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semanticweb.com/semtechbiz-berlin-to-explore-semantics-in-the-auto-industry_b26151|title|SemTechBiz Berlin to Explore Semantics in the Auto Industry - semanticweb.com +http://semanticweb.com/semtechbiz-berlin-to-explore-semantics-in-the-auto-industry_b26151|creationTime|2012-02-08T17:55:03Z +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-07-21 +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/semantic_data +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/neo4j +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/rdf_fails +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|comment|Why the FT team moved away from a failing semantic linked data platform +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|How Go + Neo4j Enabled the Financial Times to Deliver at Speed +https://neo4j.com/blog/go-neo4j-financial-times/?utm_content=bufferc23f8&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-07-21T18:12:44Z +http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm|creationDate|2005-10-25 +http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm|tag|http://www.semanlink.net/tag/rural_india +http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm|tag|http://www.semanlink.net/tag/ntic_et_developpement +http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm|title|BBC NEWS - Getting connected in rural India BBC NEWS Programmes Click Online Getting connected in rural India +http://news.bbc.co.uk/2/hi/programmes/click_online/4364168.stm|source|BBC +https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395|creationDate|2019-04-21 +https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395|tag|http://www.semanlink.net/tag/physique +https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395|tag|http://www.semanlink.net/tag/temps +https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395|title|Physicists discover time may move in discrete ‘chunks’ +https://medium.com/predict/physicists-discover-time-can-move-in-discrete-chunks-ec5e826a7395|creationTime|2019-04-21T12:43:09Z +http://code.google.com/p/owl1-1/wiki/S1NotesOwled2007|creationDate|2007-06-29 +http://code.google.com/p/owl1-1/wiki/S1NotesOwled2007|tag|http://www.semanlink.net/tag/owled_2007_and_fps +http://code.google.com/p/owl1-1/wiki/S1NotesOwled2007|title|S1NotesOwled2007 - owl1-1 - Google Code +http://code.google.com/p/owl1-1/wiki/S1NotesOwled2007|creationTime|2007-06-29T20:07:55Z +http://help.eclipse.org/help30/index.jsp?topic=/org.eclipse.jdt.doc.user/gettingStarted/qs-junit.htm|creationDate|2006-07-22 +http://help.eclipse.org/help30/index.jsp?topic=/org.eclipse.jdt.doc.user/gettingStarted/qs-junit.htm|tag|http://www.semanlink.net/tag/eclipse +http://help.eclipse.org/help30/index.jsp?topic=/org.eclipse.jdt.doc.user/gettingStarted/qs-junit.htm|tag|http://www.semanlink.net/tag/junit +http://help.eclipse.org/help30/index.jsp?topic=/org.eclipse.jdt.doc.user/gettingStarted/qs-junit.htm|title|Help - Eclipse Platform - JUnit +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|creationDate|2019-03-24 +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|tag|http://www.semanlink.net/tag/jobbotization +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|tag|http://www.semanlink.net/tag/travail +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|comment|"> We should be working the least amount we’ve ever worked, if we were actually paid based on how much wealth we were producing. +> But we’re not. We’re paid on how little we’re desperate enough to accept" +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|title|Alexandria Ocasio-Cortez says ‘we should be excited about automation’ - The Verge +https://www.theverge.com/2019/3/10/18258134/alexandria-ocasio-cortez-automation-sxsw-2019|creationTime|2019-03-24T19:26:02Z +http://www.johndcook.com/blog/r_language_for_programmers/|creationDate|2015-01-13 +http://www.johndcook.com/blog/r_language_for_programmers/|tag|http://www.semanlink.net/tag/r +http://www.johndcook.com/blog/r_language_for_programmers/|title|The R language, for programmers John D. Cook +http://www.johndcook.com/blog/r_language_for_programmers/|creationTime|2015-01-13T21:32:49Z +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|creationDate|2011-11-28 +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|tag|http://www.semanlink.net/tag/mais_ogm +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|tag|http://www.semanlink.net/tag/cour_europeenne_de_justice +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|tag|http://www.semanlink.net/tag/monsanto +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|title|Le Conseil d'Etat annule la suspension de culture de l'OGM MON 810 +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|creationTime|2011-11-28T17:30:34Z +http://www.lemonde.fr/planete/article/2011/11/28/le-conseil-d-etat-annule-la-suspension-de-culture-de-l-ogm-mon-810_1610317_3244.html#ens_id=1610322|source|Le Monde +http://www.nytimes.com/video/world/africa/100000003107917/dying-of-ebola-at-the-hospital-door.html?partner=rss&emc=rss|creationDate|2014-09-15 +http://www.nytimes.com/video/world/africa/100000003107917/dying-of-ebola-at-the-hospital-door.html?partner=rss&emc=rss|tag|http://www.semanlink.net/tag/ebola +http://www.nytimes.com/video/world/africa/100000003107917/dying-of-ebola-at-the-hospital-door.html?partner=rss&emc=rss|title|Dying of Ebola at the Hospital Door - Video - NYTimes.com +http://www.nytimes.com/video/world/africa/100000003107917/dying-of-ebola-at-the-hospital-door.html?partner=rss&emc=rss|creationTime|2014-09-15T11:55:30Z +http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html|creationDate|2014-12-30 +http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html|tag|http://www.semanlink.net/tag/mobile_phone +http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html|title|Le SS7, le réseau des opérateurs qui permet de surveiller vos téléphones portables +http://www.lemonde.fr/pixels/article/2014/12/29/le-ss7-le-reseau-des-operateurs-qui-permet-de-surveiller-vos-telephones-portables_4547194_4408996.html|creationTime|2014-12-30T12:52:27Z +http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir|creationDate|2014-03-04 +http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir|tag|http://www.semanlink.net/tag/film +http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir|tag|http://www.semanlink.net/tag/luis_bunuel +http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir|title|Cet obscur objet du désir +http://fr.wikipedia.org/wiki/Cet_obscur_objet_du_d%C3%A9sir|creationTime|2014-03-04T01:46:19Z +http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768|creationDate|2013-04-27 +http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768|tag|http://www.semanlink.net/tag/semtechbiz +http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768|tag|http://www.semanlink.net/tag/fibo +http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768|title|SemTechBiz Puts Spotlight On Financial Industry Business Ontology - semanticweb.com +http://semanticweb.com/semtechbiz-puts-spotlight-on-financial-services-business-ontology_b36768|creationTime|2013-04-27T14:37:52Z +http://spinrdf.org/spin.html|creationDate|2011-01-10 +http://spinrdf.org/spin.html|tag|http://www.semanlink.net/tag/topbraid_spin +http://spinrdf.org/spin.html|comment|The basic idea is to use specific RDF properties to link classes with SPARQL queries so that those SPARQL queries can be executed with a given context. +http://spinrdf.org/spin.html|title|SPIN Modeling Vocabulary +http://spinrdf.org/spin.html|creationTime|2011-01-10T01:10:32Z +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|creationDate|2013-09-07 +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|tag|http://www.semanlink.net/tag/slides +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|tag|http://www.semanlink.net/tag/fabien_gandon +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|tag|http://www.semanlink.net/tag/linked_data_exploration +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|title|Discovery Hub: on-the-fly linked data exploratory search +http://fr.slideshare.net/fabien_gandon/discovery-hub-onthefly-linked-data-exploratory-search|creationTime|2013-09-07T09:42:40Z +ftp://reports.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|creationDate|2013-04-06 +ftp://reports.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|tag|http://www.semanlink.net/tag/sparse_distributed_memory +ftp://reports.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|title|"""Sparse Distributed Memory: Principles and Operation""" +ftp://reports.stanford.edu/pub/cstr/reports/csl/tr/89/400/CSL-TR-89-400.pdf|creationTime|2013-04-06T02:53:02Z +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|creationDate|2012-02-01 +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|tag|http://www.semanlink.net/tag/javascript +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|tag|http://www.semanlink.net/tag/jquery +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|tag|http://www.semanlink.net/tag/cache_buster +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|title|JQuery / Disable the Cache Busting in Dynamically Linked External Javascript Files? +http://stackoverflow.com/questions/7651984/disable-the-cache-busting-in-dynamically-linked-external-javascript-files|creationTime|2012-02-01T00:38:04Z +http://www.w3.org/TR/powder-dr/|creationDate|2008-05-14 +http://www.w3.org/TR/powder-dr/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/powder-dr/|tag|http://www.semanlink.net/tag/powder +http://www.w3.org/TR/powder-dr/|title|Protocol for Web Description Resources (POWDER): Description Resources +http://www.w3.org/TR/powder-dr/|creationTime|2008-05-14T21:20:50Z +http://archeo.blog.lemonde.fr/2018/01/23/lhistoire-du-femur-de-toumai/|creationDate|2018-01-23 +http://archeo.blog.lemonde.fr/2018/01/23/lhistoire-du-femur-de-toumai/|tag|http://www.semanlink.net/tag/toumai +http://archeo.blog.lemonde.fr/2018/01/23/lhistoire-du-femur-de-toumai/|title|L’histoire du fémur de Toumaï Dans les pas des archéologues +http://archeo.blog.lemonde.fr/2018/01/23/lhistoire-du-femur-de-toumai/|creationTime|2018-01-23T13:44:20Z +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|creationDate|2008-09-01 +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|tag|http://www.semanlink.net/tag/linked_data +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|title|Connecting Freebase, Wikipedia, DBpedia, and other Linked Data Spaces +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1427|creationTime|2008-09-01T13:41:19Z +http://www.lavoutenubienne.org/|creationDate|2009-01-23 +http://www.lavoutenubienne.org/|tag|http://www.semanlink.net/tag/voute_nubienne +http://www.lavoutenubienne.org/|comment|La voûte nubienne est une technique africaine de construction de toits en terre Au Sahel, c’est la seule alternative à l’utilisation de bois rare et de tôles inadaptées - chaudes et chères - qui obligent les populations à une architecture les enfermant dans un cercle vicieux de pauvreté +http://www.lavoutenubienne.org/|title|Voûte nubienne +http://www.lavoutenubienne.org/|creationTime|2009-01-23T20:38:04Z +http://productdb.org/|creationDate|2011-06-23 +http://productdb.org/|tag|http://www.semanlink.net/tag/ian_davis +http://productdb.org/|tag|http://www.semanlink.net/tag/linked_data +http://productdb.org/|comment|ProductDB aims to be the World's most comprehensive and open source of product data. Not only do we want to create a page for every product in the world, we want to connect the underlying structured data together into one huge interlinked dataset. All the data is published as Linked Data. +http://productdb.org/|title|ProductDB +http://productdb.org/|creationTime|2011-06-23T16:27:34Z +http://www.w3.org/2000/10/swap/doc/Rules|creationDate|2007-02-08 +http://www.w3.org/2000/10/swap/doc/Rules|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/2000/10/swap/doc/Rules|tag|http://www.semanlink.net/tag/n3 +http://www.w3.org/2000/10/swap/doc/Rules|tag|http://www.semanlink.net/tag/rules +http://www.w3.org/2000/10/swap/doc/Rules|title|Rules and Formulae +http://www.w3.org/2000/10/swap/doc/Rules|creationTime|2007-02-08T23:34:27Z +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|creationDate|2010-09-19 +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/master_data_management +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/business_intelligence_and_semantic_web +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/jeffrey_t_pollock +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/sap +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/paul_miller +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|title|Bringing Semantic Technologies to Enterprise Data Enterprise Data Journal +http://www.enterprisedatajournal.com/article/bringing-semantic-technologies-enterprise-data.html|creationTime|2010-09-19T17:53:23Z +http://hdalab.iri-research.org/hdalab/|creationDate|2012-10-09 +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/histoire_de_l_art +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/bertrand_sajus +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/lod_use_case +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/alexandre_monnin +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/patrimoine +http://hdalab.iri-research.org/hdalab/|tag|http://www.semanlink.net/tag/culture_et_sem_web +http://hdalab.iri-research.org/hdalab/|comment|Explorez autrement le portail Histoire des Arts +http://hdalab.iri-research.org/hdalab/|title|HdA Lab +http://hdalab.iri-research.org/hdalab/|creationTime|2012-10-09T11:28:47Z +http://europeecologie22mars.org/|creationDate|2010-03-22 +http://europeecologie22mars.org/|tag|http://www.semanlink.net/tag/cohn_bendit +http://europeecologie22mars.org/|tag|http://www.semanlink.net/tag/europe_ecologie +http://europeecologie22mars.org/|comment|par Daniel Cohn-Bendit +http://europeecologie22mars.org/|title|L'appel du 22 mars +http://europeecologie22mars.org/|creationTime|2010-03-22T12:09:50Z +http://www.projectliberty.org/resources/whitepapers/Web_Services_Nokia.pdf|creationDate|2005-10-25 +http://www.projectliberty.org/resources/whitepapers/Web_Services_Nokia.pdf|tag|http://www.semanlink.net/tag/soa +http://www.projectliberty.org/resources/whitepapers/Web_Services_Nokia.pdf|tag|http://www.semanlink.net/tag/web_services +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|creationDate|2013-08-21 +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|tag|http://www.semanlink.net/tag/slides +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|tag|http://www.semanlink.net/tag/topic_modeling +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|tag|http://www.semanlink.net/tag/david_blei +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|title|Probabilistic Topic Models - blei-mlss-2012.pdf (slides) +http://www.cs.princeton.edu/~blei/blei-mlss-2012.pdf|creationTime|2013-08-21T18:25:31Z +http://esw.w3.org/topic/SemanticWebTools|creationDate|2007-06-23 +http://esw.w3.org/topic/SemanticWebTools|tag|http://www.semanlink.net/tag/semantic_web_tools +http://esw.w3.org/topic/SemanticWebTools|title|SemanticWebTools - ESW Wiki +http://esw.w3.org/topic/SemanticWebTools|creationTime|2007-06-23T13:32:51Z +https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles|creationDate|2014-11-25 +https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles|tag|http://www.semanlink.net/tag/langue_electronique +https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles|title|La langue électronique plus forte que nos papilles ? CNRS le journal +https://lejournal.cnrs.fr/articles/la-langue-electronique-plus-forte-que-nos-papilles|creationTime|2014-11-25T15:35:18Z +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|creationDate|2010-07-30 +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|tag|http://www.semanlink.net/tag/business_case_semantic_web +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|tag|http://www.semanlink.net/tag/jeffrey_t_pollock +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|title|Semantic Web Education and Outreach (SWEO) Interest Group / Business case +http://www.w3.org/2001/sw/sweo/public/BusinessCase/|creationTime|2010-07-30T10:46:56Z +http://codahale.com/what-makes-jersey-interesting-injection-providers/|creationDate|2014-09-16 +http://codahale.com/what-makes-jersey-interesting-injection-providers/|tag|http://www.semanlink.net/tag/jersey +http://codahale.com/what-makes-jersey-interesting-injection-providers/|comment|"Jersey has an internal dependency injection system which allows you to write small, focused classes to extract aspects of an HTTP request–in our case, the request’s locale–and inject them into your resource classes as an object of an appropriate type. +" +http://codahale.com/what-makes-jersey-interesting-injection-providers/|title|What Makes Jersey Interesting: Injection Providers codahale.com +http://codahale.com/what-makes-jersey-interesting-injection-providers/|creationTime|2014-09-16T13:56:01Z +http://news.bbc.co.uk/onthisday/hi/themes/science_and_technology/space/|creationDate|2005-03-24 +http://news.bbc.co.uk/onthisday/hi/themes/science_and_technology/space/|tag|http://www.semanlink.net/tag/espace +http://news.bbc.co.uk/onthisday/hi/themes/science_and_technology/space/|tag|http://www.semanlink.net/tag/bbc +http://news.bbc.co.uk/onthisday/hi/themes/science_and_technology/space/|title|BBC - On This Day - Space +http://www.rochester.edu/news/show.php?id=2963|creationDate|2007-09-13 +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/bacterial_to_animal_gene_transfer +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/sequencage_du_genome +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/craig_venter_institute +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/genetique +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.rochester.edu/news/show.php?id=2963|tag|http://www.semanlink.net/tag/evolution +http://www.rochester.edu/news/show.php?id=2963|comment|Bacterial to Animal Gene Transfers Now Shown to be Widespread, with Implications for Evolution and Control of Diseases and Pests +http://www.rochester.edu/news/show.php?id=2963|title|One Species' Genome Discovered Inside Another's - University of Rochester Press Releases +http://www.rochester.edu/news/show.php?id=2963|creationTime|2007-09-13T22:09:27Z +http://www.ldodds.com/blog/archives/000283.html|creationDate|2006-05-22 +http://www.ldodds.com/blog/archives/000283.html|tag|http://www.semanlink.net/tag/ajax +http://www.ldodds.com/blog/archives/000283.html|tag|http://www.semanlink.net/tag/xtech_2006 +http://www.ldodds.com/blog/archives/000283.html|tag|http://www.semanlink.net/tag/sparql +http://www.ldodds.com/blog/archives/000283.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000283.html|tag|http://www.semanlink.net/tag/json +http://www.ldodds.com/blog/archives/000283.html|comment|An overview of SPARQL, with an emphasis on the SPARQL protocol and how SPARQL can benefit Web 2.0/AJAX applications. +http://www.ldodds.com/blog/archives/000283.html|title|Lost Boy: XTech 2006: SPARQLing Services +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|creationDate|2013-04-06 +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|tag|http://www.semanlink.net/tag/crustace +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|tag|http://www.semanlink.net/tag/pbs_program +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|tag|http://www.semanlink.net/tag/parasitisme +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|comment|Meet the tongue-eating isopod, Cymothoa exigua. This marine parasite feasts on a fish's tongue and then becomes its tongue. You'll never eat seafood again. +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|title|NOVA The Tongue-Eating Parasite +http://www.pbs.org/wgbh/nova/nature/tongue-parasite.html|creationTime|2013-04-06T18:07:22Z +http://www.newscientist.com/article.ns?id=dn8826&print=true|creationDate|2006-03-11 +http://www.newscientist.com/article.ns?id=dn8826&print=true|tag|http://www.semanlink.net/tag/mental_typewriter +http://www.newscientist.com/article.ns?id=dn8826&print=true|comment|...says users can operate the device just 20 minutes after going through 150 cursor moves in their minds. This is because the device rapidly learns to recognise activity in the area of a person's motor cortex, the area of the brain associated with movement. +http://www.newscientist.com/article.ns?id=dn8826&print=true|title|'Mental typewriter' controlled by thought alone - Breaking News New Scientist +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|creationDate|2015-02-19 +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|tag|http://www.semanlink.net/tag/json +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|tag|http://www.semanlink.net/tag/json_ld +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|tag|http://www.semanlink.net/tag/public_linked_json_w3_org +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|title|Re: Limitations on JSON which conforms to JSON-LD spec? from Nicholas Bollweg on 2014-12-01 (public-linked-json@w3.org from December 2014) +http://lists.w3.org/Archives/Public/public-linked-json/2014Dec/0001.html|creationTime|2015-02-19T01:31:53Z +http://ebiquity.umbc.edu/blogger/2006/05/01/2007-darpa-grand-challenge-urban-driving/|creationDate|2006-05-05 +http://ebiquity.umbc.edu/blogger/2006/05/01/2007-darpa-grand-challenge-urban-driving/|tag|http://www.semanlink.net/tag/grand_challenge +http://ebiquity.umbc.edu/blogger/2006/05/01/2007-darpa-grand-challenge-urban-driving/|comment|"To succeed, vehicles must autonomously obey traffic laws while merging +into moving traffic, navigating traffic circles, negotiating busy +intersections and avoiding obstacles." +http://ebiquity.umbc.edu/blogger/2006/05/01/2007-darpa-grand-challenge-urban-driving/|title|2007 DARPA Grand Challenge: urban driving +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|creationDate|2010-03-07 +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|tag|http://www.semanlink.net/tag/skos +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|tag|http://www.semanlink.net/tag/linked_data +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|tag|http://www.semanlink.net/tag/thesaurus +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|comment|PoolParty service, a SKOS-based thesaurus management tool with linked data capabilities. +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|title|The Semantic Puzzle Linking Open Data to Thesaurus Management +http://blog.semantic-web.at/2010/02/16/linking-open-data-to-thesaurus-management/|creationTime|2010-03-07T21:44:20Z +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|creationDate|2008-11-06 +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|tag|http://www.semanlink.net/tag/tutorial +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|tag|http://www.semanlink.net/tag/semantic_media_wiki +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|comment|We opted for Semantic MediaWiki (SMW) and the extensions Semantic Forms and Semantic Drilldown. In this blog post we’ll take a look at the handy features you get with these. +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|title|The Semantic Puzzle Short Semantic MediaWiki Tutorial (with link to sandbox) +http://blog.semantic-web.at/2008/11/05/short-semantic-mediawiki-tutorial/|creationTime|2008-11-06T14:25:14Z +http://blog.octo.com/strategie-d-architecture-api/|creationDate|2017-05-15 +http://blog.octo.com/strategie-d-architecture-api/|tag|http://www.semanlink.net/tag/api_management +http://blog.octo.com/strategie-d-architecture-api/|tag|http://www.semanlink.net/tag/api +http://blog.octo.com/strategie-d-architecture-api/|tag|http://www.semanlink.net/tag/octo +http://blog.octo.com/strategie-d-architecture-api/|title|Stratégie d’architecture API OCTO talks ! +http://blog.octo.com/strategie-d-architecture-api/|creationTime|2017-05-15T11:16:17Z +https://patentpdw.files.wordpress.com/2017/10/3-younge-and-kuhn.pdf|creationDate|2019-02-19 +https://patentpdw.files.wordpress.com/2017/10/3-younge-and-kuhn.pdf|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://patentpdw.files.wordpress.com/2017/10/3-younge-and-kuhn.pdf|title|Patent Similarity. A Big Data Method for Patent Analysis (2015) +https://patentpdw.files.wordpress.com/2017/10/3-younge-and-kuhn.pdf|creationTime|2019-02-19T21:08:41Z +http://www.netvouz.com/action/searchBookmarksI?query=semanlink|creationDate|2006-05-28 +http://www.netvouz.com/action/searchBookmarksI?query=semanlink|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.netvouz.com/action/searchBookmarksI?query=semanlink|comment|Netvouz is an online social bookmark manager +http://www.netvouz.com/action/searchBookmarksI?query=semanlink|title|Semanlink on Netvouz +http://labs.antidot.net/museesdefrance|creationDate|2014-02-28 +http://labs.antidot.net/museesdefrance|tag|http://www.semanlink.net/tag/musee +http://labs.antidot.net/museesdefrance|tag|http://www.semanlink.net/tag/antidot +http://labs.antidot.net/museesdefrance|tag|http://www.semanlink.net/tag/gautier_poupeau +http://labs.antidot.net/museesdefrance|title|Les musées en France +http://labs.antidot.net/museesdefrance|creationTime|2014-02-28T14:42:14Z +http://rdf123.umbc.edu/|creationDate|2007-09-05 +http://rdf123.umbc.edu/|tag|http://www.semanlink.net/tag/rdf123 +http://rdf123.umbc.edu/|title|RDF123 Homepage +http://rdf123.umbc.edu/|creationTime|2007-09-05T15:06:42Z +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|creationDate|2017-04-12 +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|tag|http://www.semanlink.net/tag/apigee +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|tag|http://www.semanlink.net/tag/hateoas +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|tag|http://www.semanlink.net/tag/api_design +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|comment|bookmarked because I do not understand +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|title|HATEOAS 101 - Opinionated Introduction to a REST API Style +https://fr.slideshare.net/apigee/hateoas-101-opinionated-introduction-to-a-rest-api-style|creationTime|2017-04-12T13:41:35Z +https://sgfin.github.io/learning-resources/|creationDate|2018-11-08 +https://sgfin.github.io/learning-resources/|tag|http://www.semanlink.net/tag/machine_learning +https://sgfin.github.io/learning-resources/|tag|http://www.semanlink.net/tag/cheat_sheet +https://sgfin.github.io/learning-resources/|tag|http://www.semanlink.net/tag/tutorial +https://sgfin.github.io/learning-resources/|title|ML Resources +https://sgfin.github.io/learning-resources/|creationTime|2018-11-08T15:34:27Z +http://www.bbc.co.uk/nature/13982886|creationDate|2011-07-05 +http://www.bbc.co.uk/nature/13982886|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.bbc.co.uk/nature/13982886|tag|http://www.semanlink.net/tag/fourmi +http://www.bbc.co.uk/nature/13982886|comment|The ant colonies are often invaded by slavemaker ants, which steal their pupae +http://www.bbc.co.uk/nature/13982886|title|BBC Nature - Warring ants know their enemies +http://www.bbc.co.uk/nature/13982886|creationTime|2011-07-05T13:36:42Z +https://stanford.edu/~shervine/teaching/cs-229.html|creationDate|2018-09-20 +https://stanford.edu/~shervine/teaching/cs-229.html|tag|http://www.semanlink.net/tag/machine_learning +https://stanford.edu/~shervine/teaching/cs-229.html|tag|http://www.semanlink.net/tag/cheat_sheet +https://stanford.edu/~shervine/teaching/cs-229.html|title|Machine Learning - Cheatsheet (Teaching - CS 229) +https://stanford.edu/~shervine/teaching/cs-229.html|creationTime|2018-09-20T13:21:47Z +http://wiki.apache.org/solr/SchemaXml|creationDate|2015-03-09 +http://wiki.apache.org/solr/SchemaXml|tag|http://www.semanlink.net/tag/solr_documentation +http://wiki.apache.org/solr/SchemaXml|title|SchemaXml - Solr Wiki +http://wiki.apache.org/solr/SchemaXml|creationTime|2015-03-09T11:06:53Z +http://3spots.blogspot.com/2006/02/30-social-bookmarks-add-to-footer.html|creationDate|2006-05-28 +http://3spots.blogspot.com/2006/02/30-social-bookmarks-add-to-footer.html|tag|http://www.semanlink.net/tag/bookmarks +http://3spots.blogspot.com/2006/02/30-social-bookmarks-add-to-footer.html|title|3spots: 30 Social Bookmarks 'Add to' footer links for blogs +http://www.ikelin.com/implementing-etag-caching-jersey/|creationDate|2015-05-14 +http://www.ikelin.com/implementing-etag-caching-jersey/|tag|http://www.semanlink.net/tag/jersey_cache_control +http://www.ikelin.com/implementing-etag-caching-jersey/|title|Implementing ETag Caching with Jersey +http://www.ikelin.com/implementing-etag-caching-jersey/|creationTime|2015-05-14T13:19:19Z +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|creationDate|2017-07-17 +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|tag|http://www.semanlink.net/tag/maali_mnasri +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|tag|http://www.semanlink.net/tag/automatic_summarization +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|tag|http://www.semanlink.net/tag/gael_de_chalendar +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|title|Intégration de la similarité entre phrases comme critère pour le résumé multi-document (2016) +https://jep-taln2016.limsi.fr/actes/Actes%20JTR-2016/Papers/T83.pdf|creationTime|2017-07-17T00:21:08Z +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|creationDate|2010-12-16 +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|tag|http://www.semanlink.net/tag/constraints_in_the_sw +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|tag|http://www.semanlink.net/tag/skos +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|tag|http://www.semanlink.net/tag/pellet +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|tag|http://www.semanlink.net/tag/sparql +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|tag|http://www.semanlink.net/tag/topbraid_spin +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|title|SPARQL for SKOS integrity constraints +http://www.proxml.be/users/paul/weblog/40127/SPARQL_for_SKOS_integrity_constraints.html|creationTime|2010-12-16T15:41:04Z +http://lavryengineering.com/pdfs/lavry-sampling-theory.pdf|creationDate|2012-12-30 +http://lavryengineering.com/pdfs/lavry-sampling-theory.pdf|tag|http://www.semanlink.net/tag/digital_audio +http://lavryengineering.com/pdfs/lavry-sampling-theory.pdf|title|Sampling theory for digital audio +http://lavryengineering.com/pdfs/lavry-sampling-theory.pdf|creationTime|2012-12-30T01:28:49Z +https://arxiv.org/abs/1307.5101|creationDate|2018-03-04 +https://arxiv.org/abs/1307.5101|tag|http://www.semanlink.net/tag/multi_label_classification +https://arxiv.org/abs/1307.5101|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1307.5101|arxiv_author|Inderjit S. Dhillon +https://arxiv.org/abs/1307.5101|arxiv_author|Hsiang-Fu Yu +https://arxiv.org/abs/1307.5101|arxiv_author|Prateek Jain +https://arxiv.org/abs/1307.5101|arxiv_author|Purushottam Kar +https://arxiv.org/abs/1307.5101|title|[1307.5101] Large-scale Multi-label Learning with Missing Labels +https://arxiv.org/abs/1307.5101|creationTime|2018-03-04T17:05:39Z +https://arxiv.org/abs/1307.5101|arxiv_summary|"The multi-label classification problem has generated significant interest in +recent years. However, existing approaches do not adequately address two key +challenges: (a) the ability to tackle problems with a large number (say +millions) of labels, and (b) the ability to handle data with missing labels. In +this paper, we directly address both these problems by studying the multi-label +problem in a generic empirical risk minimization (ERM) framework. Our +framework, despite being simple, is surprisingly able to encompass several +recent label-compression based methods which can be derived as special cases of +our method. To optimize the ERM problem, we develop techniques that exploit the +structure of specific loss functions - such as the squared loss function - to +offer efficient algorithms. We further show that our learning framework admits +formal excess risk bounds even in the presence of missing labels. Our risk +bounds are tight and demonstrate better generalization performance for low-rank +promoting trace-norm regularization when compared to (rank insensitive) +Frobenius norm regularization. Finally, we present extensive empirical results +on a variety of benchmark datasets and show that our methods perform +significantly better than existing label compression based methods and can +scale up to very large datasets such as the Wikipedia dataset." +https://arxiv.org/abs/1307.5101|arxiv_firstAuthor|Hsiang-Fu Yu +https://arxiv.org/abs/1307.5101|arxiv_updated|2013-11-25T16:57:43Z +https://arxiv.org/abs/1307.5101|arxiv_title|Large-scale Multi-label Learning with Missing Labels +https://arxiv.org/abs/1307.5101|arxiv_published|2013-07-18T23:55:55Z +https://arxiv.org/abs/1307.5101|arxiv_num|1307.5101 +http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers|creationDate|2011-06-10 +http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers|tag|http://www.semanlink.net/tag/goodrelations +http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers|comment|If you are a manufacturer of commodities, you can use GoodRelations to help every small Web shop on the globe to sell your products. +http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers|title|GoodRelations for manufacturers +http://wiki.goodrelations-vocabulary.org/GoodRelations_for_manufacturers|creationTime|2011-06-10T13:03:24Z +http://www.holygoat.co.uk/applications/iphoto-rdf/iphoto-rdf|creationDate|2005-04-21 +http://www.holygoat.co.uk/applications/iphoto-rdf/iphoto-rdf|tag|http://www.semanlink.net/tag/iphoto +http://www.holygoat.co.uk/applications/iphoto-rdf/iphoto-rdf|tag|http://www.semanlink.net/tag/rdf +http://www.holygoat.co.uk/applications/iphoto-rdf/iphoto-rdf|title|RDF Exporter Plugin for iPhoto +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|creationDate|2007-04-02 +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|tag|http://www.semanlink.net/tag/thesaurus +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|tag|http://www.semanlink.net/tag/fao +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|tag|http://www.semanlink.net/tag/skos +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|tag|http://www.semanlink.net/tag/owl +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|tag|http://www.semanlink.net/tag/ontologies +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|comment|From AGROVOC to the Agricultural Ontology Service / Concept Server +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|title|An OWL model for managing ontologies in the agricultural domain +http://owl-workshop.man.ac.uk/acceptedPosition/submission_31.pdf|creationTime|2007-04-02T22:12:19Z +http://www.esa.int/esaCP/SEMEV82DU8E_index_2.html|creationDate|2005-06-27 +http://www.esa.int/esaCP/SEMEV82DU8E_index_2.html|tag|http://www.semanlink.net/tag/mars_express +http://www.esa.int/esaCP/SEMEV82DU8E_index_2.html|title|ESA Portal - Mars Express radar ready to work +https://research.googleblog.com/2017/08/transformer-novel-neural-network.html|creationDate|2017-09-01 +https://research.googleblog.com/2017/08/transformer-novel-neural-network.html|tag|http://www.semanlink.net/tag/neural_machine_translation +https://research.googleblog.com/2017/08/transformer-novel-neural-network.html|tag|http://www.semanlink.net/tag/google_research +https://research.googleblog.com/2017/08/transformer-novel-neural-network.html|title|Research Blog: Transformer: A Novel Neural Network Architecture for Language Understanding +https://research.googleblog.com/2017/08/transformer-novel-neural-network.html|creationTime|2017-09-01T18:52:27Z +https://biotext.berkeley.edu/papers/psb03.pdf|creationDate|2019-04-03 +https://biotext.berkeley.edu/papers/psb03.pdf|tag|http://www.semanlink.net/tag/acronyms_nlp +https://biotext.berkeley.edu/papers/psb03.pdf|tag|http://www.semanlink.net/tag/frequently_cited_paper +https://biotext.berkeley.edu/papers/psb03.pdf|title|A SIMPLE ALGORITHM FOR IDENTIFYING ABBREVIATION DEFINITIONS IN BIOMEDICAL TEXT +https://biotext.berkeley.edu/papers/psb03.pdf|creationTime|2019-04-03T13:10:41Z +http://blog.ted.com/2014/01/31/the-attack-on-our-higher-education-system-and-why-we-should-welcome-it/|creationDate|2014-02-03 +http://blog.ted.com/2014/01/31/the-attack-on-our-higher-education-system-and-why-we-should-welcome-it/|tag|http://www.semanlink.net/tag/mooc +http://blog.ted.com/2014/01/31/the-attack-on-our-higher-education-system-and-why-we-should-welcome-it/|title|The attack on higher ed — and why we should welcome it TED Blog +http://blog.ted.com/2014/01/31/the-attack-on-our-higher-education-system-and-why-we-should-welcome-it/|creationTime|2014-02-03T23:34:26Z +http://www.w3.org/2012/ldp/charter.html|creationDate|2012-07-30 +http://www.w3.org/2012/ldp/charter.html|tag|http://www.semanlink.net/tag/ldp_w3c +http://www.w3.org/2012/ldp/charter.html|title|W3C Linked Data Platform Working Group Charter +http://www.w3.org/2012/ldp/charter.html|creationTime|2012-07-30T23:52:21Z +http://internetactu.blog.lemonde.fr/2014/03/21/linternet-a-t-il-vraiment-fait-la-demonstration-de-notre-capacite-a-collaborer/|creationDate|2014-03-22 +http://internetactu.blog.lemonde.fr/2014/03/21/linternet-a-t-il-vraiment-fait-la-demonstration-de-notre-capacite-a-collaborer/|tag|http://www.semanlink.net/tag/management +http://internetactu.blog.lemonde.fr/2014/03/21/linternet-a-t-il-vraiment-fait-la-demonstration-de-notre-capacite-a-collaborer/|title|Internet : outil de collaboration ou de domination ? InternetActu +http://internetactu.blog.lemonde.fr/2014/03/21/linternet-a-t-il-vraiment-fait-la-demonstration-de-notre-capacite-a-collaborer/|creationTime|2014-03-22T11:09:47Z +http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html|creationDate|2018-02-11 +http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html|tag|http://www.semanlink.net/tag/nouvelle_route_de_la_soie +http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html|title|L’Afrique au cœur de la nouvelle Route de la soie +http://www.lemonde.fr/afrique/article/2017/04/24/l-afrique-au-c-ur-de-la-nouvelle-route-de-la-soie_5116739_3212.html|creationTime|2018-02-11T10:40:00Z +https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c|creationDate|2018-10-26 +https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c|tag|http://www.semanlink.net/tag/maali_mnasri +https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c|title|Small Data is Big in AI : Train-spotting at France is AI +https://medium.com/opla/small-data-is-big-in-ai-train-spotting-at-france-is-ai-4afb24168e4c|creationTime|2018-10-26T01:22:41Z +http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso|creationDate|2009-02-06 +http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso|tag|http://www.semanlink.net/tag/orri_erling +http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso|title|ESWC2008 Relational2RDF - Mapping Relational Databases to RDF with OpenLink Virtuoso +http://www.slideshare.net/rumito/eswc2008-relational2rdf-mapping-relational-databases-to-rdf-with-openlink-virtuoso|creationTime|2009-02-06T23:25:15Z +http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html|creationDate|2013-05-07 +http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html|tag|http://www.semanlink.net/tag/justice +http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html|title|A nineteenth-century linking application - bobdc.blog +http://www.snee.com/bobdc.blog/2013/05/a-nineteenth-century-linking-a.html|creationTime|2013-05-07T14:16:20Z +http://wiki.apache.org/solr/Suggester|creationDate|2015-06-27 +http://wiki.apache.org/solr/Suggester|tag|http://www.semanlink.net/tag/solr_autocomplete +http://wiki.apache.org/solr/Suggester|title|Suggester - Solr Wiki +http://wiki.apache.org/solr/Suggester|creationTime|2015-06-27T02:09:45Z +http://www.ciaovito.net/|creationDate|2007-04-01 +http://www.ciaovito.net/|tag|http://www.semanlink.net/tag/ciao_vito +http://www.ciaovito.net/|title|www.ciaovito.net +http://www.ciaovito.net/|creationTime|2007-04-01T22:12:46Z +http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html|creationDate|2013-01-07 +http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html|tag|http://www.semanlink.net/tag/startups +http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html|tag|http://www.semanlink.net/tag/digital_economy +http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html|title|Comment l'Etat peut favoriser l'essor des startups de technologie +http://www.lemonde.fr/idees/article/2013/01/07/comment-l-etat-peut-favoriser-l-essor-des-startups-de-technologie_1813509_3232.html|creationTime|2013-01-07T13:09:47Z +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|creationDate|2007-11-13 +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|tag|http://www.semanlink.net/tag/semanlink_archives +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|tag|http://www.semanlink.net/tag/meta_content_framework +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|tag|http://www.semanlink.net/tag/hypersolutions +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|comment|Utilisation de Semanlink sur le site hyperSOLutions (datée 1998) +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|title|Proverbes (site hyperSOLutions) +http://web.archive.org/web/19981202055424/www.hypersolutions.fr/proverbes/default.ssi|creationTime|2007-11-13T22:52:14Z +https://usbeketrica.com/article/comment-les-webs-alternatifs-veulent-reinventer-internet|creationDate|2017-03-10 +https://usbeketrica.com/article/comment-les-webs-alternatifs-veulent-reinventer-internet|tag|http://www.semanlink.net/tag/webs_alternatifs +https://usbeketrica.com/article/comment-les-webs-alternatifs-veulent-reinventer-internet|title|Comment les webs alternatifs veulent réinventer Internet Usbek & Rica +https://usbeketrica.com/article/comment-les-webs-alternatifs-veulent-reinventer-internet|creationTime|2017-03-10T14:12:47Z +http://www.youtube.com/watch?v=Jd3-eiid-Uw|creationDate|2008-03-19 +http://www.youtube.com/watch?v=Jd3-eiid-Uw|tag|http://www.semanlink.net/tag/wii +http://www.youtube.com/watch?v=Jd3-eiid-Uw|tag|http://www.semanlink.net/tag/realite_virtuelle +http://www.youtube.com/watch?v=Jd3-eiid-Uw|title|Head Tracking for Desktop VR Displays using the WiiRemote +http://www.youtube.com/watch?v=Jd3-eiid-Uw|creationTime|2008-03-19T22:17:47Z +http://www.barefootpower.com/|creationDate|2011-01-02 +http://www.barefootpower.com/|tag|http://www.semanlink.net/tag/developing_countries +http://www.barefootpower.com/|tag|http://www.semanlink.net/tag/energie_solaire +http://www.barefootpower.com/|comment|"Barefoot Power is a social entrepreneurial business. We design and manufacture technology products specifically for poor people that have the potential to reduce poverty in developing countries. +
+We believe that energy access is one of the key building blocks of economic development. Our first area of focus, therefore, is on the most basic use of electricity - lighting." +http://www.barefootpower.com/|title|Barefoot Power +http://www.barefootpower.com/|creationTime|2011-01-02T16:08:09Z +http://www.jeux-geographiques.com/|creationDate|2009-02-23 +http://www.jeux-geographiques.com/|tag|http://www.semanlink.net/tag/geographie +http://www.jeux-geographiques.com/|tag|http://www.semanlink.net/tag/quizz +http://www.jeux-geographiques.com/|title|Jeux Géographiques : quizz sur la France, le monde, les villes et les départements. +http://www.jeux-geographiques.com/|creationTime|2009-02-23T22:50:50Z +http://www.graphviz.org/|creationDate|2010-05-26 +http://www.graphviz.org/|tag|http://www.semanlink.net/tag/graph_visualization +http://www.graphviz.org/|title|Graphviz - Graph Visualization Software +http://www.graphviz.org/|creationTime|2010-05-26T14:50:09Z +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|creationDate|2018-10-23 +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|tag|http://www.semanlink.net/tag/slides +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|tag|http://www.semanlink.net/tag/brain_vs_deep_learning +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|comment|"> Underlying Assumption: There are principles giving rise to intelligence (machine, human +or animal) via learning, simple enough that they can be +described compactly, similarly to the laws of physics, i.e., our +intelligence is not just the result of a huge bag of tricks and +pieces of knowledge, but of general mechanisms to acquire +knowledge." +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|title|Towards bridging the gap between deep learning and brains +http://www.iro.umontreal.ca/~bengioy/talks/MIT-18oct2018.pdf|creationTime|2018-10-23T22:41:09Z +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|creationDate|2015-05-14 +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|tag|http://www.semanlink.net/tag/browser_back_button +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|tag|http://www.semanlink.net/tag/http_cache +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|tag|http://www.semanlink.net/tag/content_negotiation +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|tag|http://www.semanlink.net/tag/vary_header +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|title|ajax - Content negotiation ignored when using browser Back button - Stack Overflow +http://stackoverflow.com/questions/1465974/content-negotiation-ignored-when-using-browser-back-button|creationTime|2015-05-14T13:09:59Z +http://en.wikipedia.org/wiki/The_Skeleton_Key|creationDate|2008-03-11 +http://en.wikipedia.org/wiki/The_Skeleton_Key|tag|http://www.semanlink.net/tag/film_fantastique +http://en.wikipedia.org/wiki/The_Skeleton_Key|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/The_Skeleton_Key|tag|http://www.semanlink.net/tag/magie_noire +http://en.wikipedia.org/wiki/The_Skeleton_Key|title|The Skeleton Key +http://en.wikipedia.org/wiki/The_Skeleton_Key|creationTime|2008-03-11T00:39:16Z +https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d|creationDate|2018-11-26 +https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d|tag|http://www.semanlink.net/tag/francois_chollet +https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d|tag|http://www.semanlink.net/tag/bitcoin +https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d|title|The Bitcoin network at risk – François Chollet +https://medium.com/@francois.chollet/the-bitcoin-network-at-risk-bd54a1473a1d|creationTime|2018-11-26T19:14:06Z +https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html|creationDate|2018-01-02 +https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html|tag|http://www.semanlink.net/tag/ane +https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html|tag|http://www.semanlink.net/tag/chine_afrique +https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html|title|To Sate China’s Demand, African Donkeys Are Stolen and Skinned - The New York Times +https://www.nytimes.com/2018/01/02/science/donkeys-africa-china-ejiao.html|creationTime|2018-01-02T19:19:30Z +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|creationDate|2005-10-28 +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|tag|http://www.semanlink.net/tag/triplestore +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|tag|http://www.semanlink.net/tag/rdf_schema +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|tag|http://www.semanlink.net/tag/rdf +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|comment|Sesame: A Generic Architecture for Storing and Querying RDF and RDF Schema +http://www.cs.vu.nl/~jbroeks/papers/ISWC02.pdf|title|Sesame: A Generic Architecture for Storing and Querying RDF and RDF Schema +http://www.ldodds.com/blog/archives/000314.html|creationDate|2008-07-06 +http://www.ldodds.com/blog/archives/000314.html|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.ldodds.com/blog/archives/000314.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000314.html|tag|http://www.semanlink.net/tag/data_integration +http://www.ldodds.com/blog/archives/000314.html|comment|Leigh Dods tries to elucidate the ways in which technologies like RDF and OWL actually help with data integration? +http://www.ldodds.com/blog/archives/000314.html|title|Lost Boy: How Shall I Integrate Thee? Let Me Count the Ways... +http://www.ldodds.com/blog/archives/000314.html|creationTime|2008-07-06T04:39:52Z +http://maps.google.com/maps?ie=UTF8&z=13&ll=69.351578,88.197899&spn=0.044674,0.134754&t=k&om=1|creationDate|2006-10-28 +http://maps.google.com/maps?ie=UTF8&z=13&ll=69.351578,88.197899&spn=0.044674,0.134754&t=k&om=1|tag|http://www.semanlink.net/tag/google_maps +http://maps.google.com/maps?ie=UTF8&z=13&ll=69.351578,88.197899&spn=0.044674,0.134754&t=k&om=1|tag|http://www.semanlink.net/tag/norilsk +http://maps.google.com/maps?ie=UTF8&z=13&ll=69.351578,88.197899&spn=0.044674,0.134754&t=k&om=1|title|Norilsk - Google Maps +http://www.icij.org/|creationDate|2013-04-04 +http://www.icij.org/|tag|http://www.semanlink.net/tag/offshore_leaks +http://www.icij.org/|title|International Consortium of Investigative Journalists The World’s Best Cross-Border Investigative Team +http://www.icij.org/|creationTime|2013-04-04T22:57:57Z +https://github.com/sebastianruder/NLP-progress|creationDate|2018-06-23 +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/nlp_problem +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/sebastian_ruder +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/nlp_datasets +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/sota +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/nlp_current_state +https://github.com/sebastianruder/NLP-progress|tag|http://www.semanlink.net/tag/nlp +https://github.com/sebastianruder/NLP-progress|title|sebastianruder/NLP-progress: Repository to track the progress in Natural Language Processing (NLP), including the datasets and the current state-of-the-art for the most common NLP tasks. +https://github.com/sebastianruder/NLP-progress|creationTime|2018-06-23T01:04:30Z +http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf|creationDate|2019-05-03 +http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf|tag|http://www.semanlink.net/tag/entities_to_topics +http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf|tag|http://www.semanlink.net/tag/wikidata +http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf|title|Learning to Map Wikidata Entities To Predefined Topics +http://wikiworkshop.org/2019/papers/Wiki_Workshop_2019_paper_1.pdf|creationTime|2019-05-03T19:49:56Z +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|creationDate|2015-11-08 +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|tag|http://www.semanlink.net/tag/robotisation +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|tag|http://www.semanlink.net/tag/disruption +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|title|Bank of America's report on creative disruption, AI and robotisation +http://www.bofaml.com/content/dam/boamlimages/documents/articles/D3_006/11511357.pdf|creationTime|2015-11-08T11:50:56Z +https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/|creationDate|2019-02-15 +https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/|tag|http://www.semanlink.net/tag/electric_car +https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/|tag|http://www.semanlink.net/tag/tesla_inc +https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/|title|Tesla Model 3 = #1 Best Selling Electric Car in World +https://cleantechnica.com/2019/02/09/tesla-model-3-1-best-selling-electric-car-in-world-7-of-global-ev-market/|creationTime|2019-02-15T13:45:49Z +https://arxiv.org/abs/1511.06335|creationDate|2019-02-19 +https://arxiv.org/abs/1511.06335|tag|http://www.semanlink.net/tag/embeddings +https://arxiv.org/abs/1511.06335|tag|http://www.semanlink.net/tag/cluster_analysis +https://arxiv.org/abs/1511.06335|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1511.06335|arxiv_author|Ali Farhadi +https://arxiv.org/abs/1511.06335|arxiv_author|Ross Girshick +https://arxiv.org/abs/1511.06335|arxiv_author|Junyuan Xie +https://arxiv.org/abs/1511.06335|comment|Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective +https://arxiv.org/abs/1511.06335|title|[1511.06335] Unsupervised Deep Embedding for Clustering Analysis +https://arxiv.org/abs/1511.06335|creationTime|2019-02-19T19:06:06Z +https://arxiv.org/abs/1511.06335|arxiv_summary|"Clustering is central to many data-driven application domains and has been +studied extensively in terms of distance functions and grouping algorithms. +Relatively little work has focused on learning representations for clustering. +In this paper, we propose Deep Embedded Clustering (DEC), a method that +simultaneously learns feature representations and cluster assignments using +deep neural networks. DEC learns a mapping from the data space to a +lower-dimensional feature space in which it iteratively optimizes a clustering +objective. Our experimental evaluations on image and text corpora show +significant improvement over state-of-the-art methods." +https://arxiv.org/abs/1511.06335|arxiv_firstAuthor|Junyuan Xie +https://arxiv.org/abs/1511.06335|arxiv_updated|2016-05-24T22:27:35Z +https://arxiv.org/abs/1511.06335|arxiv_title|Unsupervised Deep Embedding for Clustering Analysis +https://arxiv.org/abs/1511.06335|arxiv_published|2015-11-19T20:06:14Z +https://arxiv.org/abs/1511.06335|arxiv_num|1511.06335 +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|creationDate|2014-03-27 +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|tag|http://www.semanlink.net/tag/apache_opennlp +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|tag|http://www.semanlink.net/tag/howto +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|tag|http://www.semanlink.net/tag/nlp_text_classification +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|title|Data Categorization using OpenNLP +http://hanishblogger.blogspot.fr/2013/07/data-categorization-using-opennlp.html|creationTime|2014-03-27T11:40:41Z +http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss|creationDate|2006-04-05 +http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss|tag|http://www.semanlink.net/tag/robotique +http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss|tag|http://www.semanlink.net/tag/coree_du_sud +http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss|comment|South Korea, the world's most wired country, is rushing to turn what sounds like science fiction into everyday life. The government, which succeeded in getting broadband Internet into 72 percent of all households in the last half decade, has marshaled an army of scientists and business leaders to make robots full members of society. +http://www.nytimes.com/2006/04/02/world/asia/02robot.html?ex=1301634000&en=7d5fcaf014309078&ei=5088&partner=rssnyt&emc=rss|title|In a Wired South Korea, Robots Will Feel Right at Home - New York Times +https://class.coursera.org/rprog-010|creationDate|2015-01-06 +https://class.coursera.org/rprog-010|tag|http://www.semanlink.net/tag/coursera_r_programming +https://class.coursera.org/rprog-010|title|R Programming Coursera +https://class.coursera.org/rprog-010|creationTime|2015-01-06T17:51:13Z +http://fr.wikipedia.org/wiki/Tchin-Tabaraden|creationDate|2007-12-05 +http://fr.wikipedia.org/wiki/Tchin-Tabaraden|tag|http://www.semanlink.net/tag/rebellion_touaregue +http://fr.wikipedia.org/wiki/Tchin-Tabaraden|title|Tchin-Tabaraden - Wikipédia +http://fr.wikipedia.org/wiki/Tchin-Tabaraden|creationTime|2007-12-05T23:20:10Z +https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/|creationDate|2019-01-29 +https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/|tag|http://www.semanlink.net/tag/paris_nlp_meetup +https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/|comment|"Trois présentations : + +- une première décrivant l'utilisation de données textuelles dans le cadre de la conception d'une opération marketing (définition de l'orientation à donner à l'image d'une marque). Les techniques NLP utilisées sont simples, mais leur utilisation comme outil pour aider les créatifs du marketing est originale. + +- une deuxième dans le domaine juridique, très intéressante en termes de techniques mises en oeuvre. L'application vise à la valorisation de bases de contrats (un corpus de textes, sur lequel on souhaite effectuer des recherches complexes), en s'appuyant à la fois sur les techniques récentes de représentation de textes, et sur un knowledge graph (ontologie de termes juridiques). Pour ce qui est de le représentation de textes, ils utilisent Google BERT. Ce que permet BERT, c'est une forme de transfert d'apprentissage : BERT est un réseau de neurones profond entrainé de façon non supervisée, par et chez Google, sur une énorme quantité de textes, de façon à emmagasiner une connaissance sur une langue (""pre-trained language model""). Ces données (c'est à dire ce réseau pré-entrainé) sont mises à disposition par Google. Chacun peut ainsi affiner l'entrainement du réseau sur son propre corpus de textes, et ses propres données labellisées concernant le problème qu'il souhaite effectivement résoudre (par exemple, dans le cas juridique, la reconnaissance d'entités dans les contrats). + +Le speaker rapporte des résultats sensiblement améliorés par rapport à ce qu'ils obtenaient auparavant en ce qui concerne la qualité de la représentation de phrases, et des problèmes de type classification de phrases ou reconnaissance d'entités (il note que la représentation de textes longs reste un problème ouvert). Les temps d'entrainement de BERT sur leur problème ne sont pas exorbitants (il parle de quelques heures de GPU, pas de jours ou de semaines de TPU comme pour l'entrainement initial). + +- le dernier speaker quant à lui a présenté deux papiers de recherche, justement sur les techniques au cœur de BERT (""Transformer architecture""). +" +https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/|title|Paris NLP Season 3 Meetup #3 at Doctrine – Paris NLP +https://nlpparis.wordpress.com/2019/01/24/paris-nlp-season-3-meetup-3/|creationTime|2019-01-29T09:33:31Z +http://www.theinquirer.net/default.aspx?article=34523|creationDate|2006-09-22 +http://www.theinquirer.net/default.aspx?article=34523|tag|http://www.semanlink.net/tag/microsoft +http://www.theinquirer.net/default.aspx?article=34523|tag|http://www.semanlink.net/tag/windows_media_player +http://www.theinquirer.net/default.aspx?article=34523|tag|http://www.semanlink.net/tag/drm +http://www.theinquirer.net/default.aspx?article=34523|title|Microsoft Media Player shreds your rights +http://www.usinenouvelle.com/article/la-pile-a-combustible-a-la-francaise-une-filiere-complete-sans-les-constructeurs-automobiles.N263587|creationDate|2015-01-06 +http://www.usinenouvelle.com/article/la-pile-a-combustible-a-la-francaise-une-filiere-complete-sans-les-constructeurs-automobiles.N263587|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.usinenouvelle.com/article/la-pile-a-combustible-a-la-francaise-une-filiere-complete-sans-les-constructeurs-automobiles.N263587|title|La pile à combustible à la française : une filière complète... sans les constructeurs automobiles - L'Usine Auto +http://www.usinenouvelle.com/article/la-pile-a-combustible-a-la-francaise-une-filiere-complete-sans-les-constructeurs-automobiles.N263587|creationTime|2015-01-06T14:33:17Z +http://www.jeuneafrique.com/Article/ARTJAJA2530p032-034.xml0/-arrestation-opposition-president-Mamadou-Tandja-Tandja-l-apprenti-sorcier.html|creationDate|2009-07-14 +http://www.jeuneafrique.com/Article/ARTJAJA2530p032-034.xml0/-arrestation-opposition-president-Mamadou-Tandja-Tandja-l-apprenti-sorcier.html|tag|http://www.semanlink.net/tag/tandja +http://www.jeuneafrique.com/Article/ARTJAJA2530p032-034.xml0/-arrestation-opposition-president-Mamadou-Tandja-Tandja-l-apprenti-sorcier.html|title|Tandja, l'apprenti sorcier : Jeuneafrique.com +http://www.jeuneafrique.com/Article/ARTJAJA2530p032-034.xml0/-arrestation-opposition-president-Mamadou-Tandja-Tandja-l-apprenti-sorcier.html|creationTime|2009-07-14T13:12:06Z +http://code.google.com/p/oort/wiki/SparqlTree|creationDate|2011-03-07 +http://code.google.com/p/oort/wiki/SparqlTree|tag|http://www.semanlink.net/tag/treeview +http://code.google.com/p/oort/wiki/SparqlTree|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://code.google.com/p/oort/wiki/SparqlTree|title|SparqlTree - oort - Automatic treeification of SPARQL query results. - Project Hosting on Google Code +http://code.google.com/p/oort/wiki/SparqlTree|creationTime|2011-03-07T21:20:15Z +http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html|creationDate|2017-01-14 +http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html|tag|http://www.semanlink.net/tag/leonardo_da_vinci +http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html|title|NOVA - Official Website Mystery of a Masterpiece +http://www.pbs.org/wgbh/nova/tech/mystery-masterpiece.html|creationTime|2017-01-14T12:46:38Z +http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0|creationDate|2016-06-06 +http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0|comment|Some parts of C2c2 genes share a common evolutionary origin with the defense systems seen in other bacterial species. Over billions of years, Dr. Koonin said, evolution has blindly tinkered with these genes in order to generate new ways to protect against viruses. +http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0|title|Scientists Find Form of Crispr Gene Editing With New Capabilities - The New York Times +http://www.nytimes.com/2016/06/04/science/rna-c2c2-gene-editing-dna-crispr.html?_r=0|creationTime|2016-06-06T13:19:32Z +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|creationDate|2016-09-27 +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|tag|http://www.semanlink.net/tag/bacteries +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|tag|http://www.semanlink.net/tag/chiffres +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|tag|http://www.semanlink.net/tag/biologie +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|comment|Body’s bacteria don’t outnumber human cells so much after all New calculations suggest roughly equal populations, not 10-to-1 ratio +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|title|Body’s bacteria don’t outnumber human cells so much after all Science News +https://www.sciencenews.org/article/body%E2%80%99s-bacteria-don%E2%80%99t-outnumber-human-cells-so-much-after-all|creationTime|2016-09-27T23:59:31Z +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|creationDate|2014-01-05 +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|tag|http://www.semanlink.net/tag/google_maps +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|tag|http://www.semanlink.net/tag/freebase +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|tag|http://www.semanlink.net/tag/javascript +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|title|Freebase knowledge maps +http://ebiquity.umbc.edu/blogger/2014/01/01/google-knowledge-maps-demonstration/|creationTime|2014-01-05T10:17:25Z +http://blog.aksw.org/2013/ontowiki-feature-of-the-week-document-management/|creationDate|2013-09-10 +http://blog.aksw.org/2013/ontowiki-feature-of-the-week-document-management/|tag|http://www.semanlink.net/tag/ontowiki +http://blog.aksw.org/2013/ontowiki-feature-of-the-week-document-management/|title|blog.aksw.org » Blog Archive » OntoWiki Feature of the Week: Document Management +http://blog.aksw.org/2013/ontowiki-feature-of-the-week-document-management/|creationTime|2013-09-10T01:30:39Z +http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html|creationDate|2010-08-24 +http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html|tag|http://www.semanlink.net/tag/allegrograph +http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html|tag|http://www.semanlink.net/tag/pfizer +http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html|title|The Pfizer IDEA project: An Interview with Franz' Jans Aasman and IO Informatics' Robert Stanley Semantic Universe +http://www.semanticuniverse.com/articles-pfizer-idea-project-interview-franz-jans-aasman-and-io-informatics-robert-stanley.html|creationTime|2010-08-24T23:56:27Z +https://www.lifewire.com/patent-search-1616728|creationDate|2019-02-09 +https://www.lifewire.com/patent-search-1616728|tag|http://www.semanlink.net/tag/google_patents +https://www.lifewire.com/patent-search-1616728|title|What Is Google Patents Search? +https://www.lifewire.com/patent-search-1616728|creationTime|2019-02-09T00:52:01Z +http://www.ssi.gouv.fr/|creationDate|2015-04-10 +http://www.ssi.gouv.fr/|tag|http://www.semanlink.net/tag/securite_informatique +http://www.ssi.gouv.fr/|title|ANSSI Agence nationale de la sécurité des systèmes d'information +http://www.ssi.gouv.fr/|creationTime|2015-04-10T23:01:23Z +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|creationDate|2019-01-30 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|tag|http://www.semanlink.net/tag/graph_convolutional_networks +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|comment|"Part 1: A High-Level Introduction to Graph Convolutional Networks + +[Part 2](/doc/?uri=https%3A%2F%2Ftowardsdatascience.com%2Fhow-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0)" +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|relatedDoc|https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|title|How to do Deep Learning on Graphs with Graph Convolutional Networks - Part 1 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780|creationTime|2019-01-30T12:54:08Z +http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf|creationDate|2006-03-11 +http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf|tag|http://www.semanlink.net/tag/steve_cayzer +http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf|tag|http://www.semanlink.net/tag/manager_snippet +http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf|comment|"In our research group, there is a need to capture, organize and share +resources associated with a domain of exploration. We are building a tool for +this task, based on previous experience in the knowledge management domain. +In this position paper, we present our thoughts on what works (and what +doesn’t work), together with details of our initial implementation." +http://jena.hpl.hp.com/~stecay/downloads/snippetmanager-iswc-draft008.pdf|title|Howto build a Snippet Manager +https://www.newscientist.com/article/2127625-entire-nervous-system-of-an-animal-recorded-for-the-first-time/|creationDate|2017-04-12 +https://www.newscientist.com/article/2127625-entire-nervous-system-of-an-animal-recorded-for-the-first-time/|tag|http://www.semanlink.net/tag/neuroscience +https://www.newscientist.com/article/2127625-entire-nervous-system-of-an-animal-recorded-for-the-first-time/|title|Entire nervous system of an animal recorded for the first time New Scientist +https://www.newscientist.com/article/2127625-entire-nervous-system-of-an-animal-recorded-for-the-first-time/|creationTime|2017-04-12T21:26:09Z +https://twitter.com/jeremyphoward/status/891421041410531329|creationDate|2019-03-02 +https://twitter.com/jeremyphoward/status/891421041410531329|tag|http://www.semanlink.net/tag/jeremy_howard +https://twitter.com/jeremyphoward/status/891421041410531329|tag|http://www.semanlink.net/tag/memory_networks +https://twitter.com/jeremyphoward/status/891421041410531329|title|"Jeremy Howard sur Twitter : ""Memory networks are the most overhyped and disappointing DL ""advance"" I've seen yet… """ +https://twitter.com/jeremyphoward/status/891421041410531329|creationTime|2019-03-02T16:21:26Z +http://internetactu.blog.lemonde.fr/2014/02/07/comment-apprendre-a-apprendre/|creationDate|2014-02-09 +http://internetactu.blog.lemonde.fr/2014/02/07/comment-apprendre-a-apprendre/|tag|http://www.semanlink.net/tag/apprendre_a_apprendre +http://internetactu.blog.lemonde.fr/2014/02/07/comment-apprendre-a-apprendre/|title|Comment apprendre à apprendre ? InternetActu +http://internetactu.blog.lemonde.fr/2014/02/07/comment-apprendre-a-apprendre/|creationTime|2014-02-09T19:10:36Z +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro|creationDate|2012-04-14 +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro|tag|http://www.semanlink.net/tag/maxent_models +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro|comment|List of tutorials, software, papers... +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro|title|Maximum Entropy Modeling +http://homepages.inf.ed.ac.uk/lzhang10/maxent.html#intro|creationTime|2012-04-14T21:46:14Z +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|creationDate|2017-07-11 +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|tag|http://www.semanlink.net/tag/slides +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|tag|http://www.semanlink.net/tag/singular_value_decomposition +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|tag|http://www.semanlink.net/tag/lingo +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|comment|"2 independent phases in the process: + +- cluster label candidate discovery, (based on phrases discovery — usually good label indicators) +- clusters discovery (based on SVD) + +Lingo: description comes first. +" +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|title|Lingo: Search Results Clustering Algorithm Based on Singular Value Decomposition (slides) +http://www.cs.put.poznan.pl/dweiss/site/publications/slides/iipwm2004-dweiss-lingo.pdf|creationTime|2017-07-11T17:13:55Z +http://docs.python-guide.org/en/latest/writing/structure/|creationDate|2018-03-27 +http://docs.python-guide.org/en/latest/writing/structure/|tag|http://www.semanlink.net/tag/python +http://docs.python-guide.org/en/latest/writing/structure/|title|Structuring Your Project — The Hitchhiker's Guide to Python +http://docs.python-guide.org/en/latest/writing/structure/|creationTime|2018-03-27T14:06:06Z +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|creationDate|2019-02-26 +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|tag|http://www.semanlink.net/tag/javascript +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|tag|http://www.semanlink.net/tag/java_8 +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|comment|Nashorn +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|title|Nashorn (JavaScript engine) +https://en.wikipedia.org/wiki/Nashorn_(JavaScript_engine)|creationTime|2019-02-26T22:28:54Z +http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf|creationDate|2017-06-08 +http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf|tag|http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys +http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf|comment|Learn how IBM SPSS Text Analytics for Surveys gives you greater insight +http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf|title|Analyzing survey text: a brief overview +http://www.besmart.company/wp-content/uploads/2014/11/briefoverview01.pdf|creationTime|2017-06-08T00:46:32Z +http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html|creationDate|2005-02-10 +http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html|tag|http://www.semanlink.net/tag/foaf +http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html|tag|http://www.semanlink.net/tag/rdf_vs_xml +http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html|tag|http://www.semanlink.net/tag/dev +http://rdfweb.org/pipermail/rdfweb-dev/2003-July/011457.html|title|Might FOAF be a plain XML language? +http://www.wired.com/wiredscience/2013/11/christof-koch-panpsychism-consciousness/|creationDate|2013-11-18 +http://www.wired.com/wiredscience/2013/11/christof-koch-panpsychism-consciousness/|tag|http://www.semanlink.net/tag/conscience +http://www.wired.com/wiredscience/2013/11/christof-koch-panpsychism-consciousness/|title|A Neuroscientist's Radical Theory of How Networks Become Conscious - Wired Science +http://www.wired.com/wiredscience/2013/11/christof-koch-panpsychism-consciousness/|creationTime|2013-11-18T09:59:43Z +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/|creationDate|2018-08-13 +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/|tag|http://www.semanlink.net/tag/medical_data +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/|tag|http://www.semanlink.net/tag/concept_extraction +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/|title|Using machine learning for concept extraction on clinical documents from multiple data sources (2011) +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3168314/|creationTime|2018-08-13T17:46:50Z +http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/|creationDate|2013-07-12 +http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/|tag|http://www.semanlink.net/tag/w3c +http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/|tag|http://www.semanlink.net/tag/google +http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/|title|Google, Adobe and Best Buy are working on an ecommerce web data standard — Tech News and Analysis +http://gigaom.com/2013/07/10/google-adobe-and-best-buy-are-working-on-an-ecommerce-web-data-standard/|creationTime|2013-07-12T11:35:07Z +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|creationDate|2008-02-08 +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|tag|http://www.semanlink.net/tag/javascript +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|tag|http://www.semanlink.net/tag/javascript_tips +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|comment|Better than adding onload to the body element +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|title|How to call scripts - Unobtrusive Javascript +http://onlinetools.org/articles/unobtrusivejavascript/chapter4.html|creationTime|2008-02-08T00:24:25Z +https://www.edge.org/response-detail/26794|creationDate|2018-01-14 +https://www.edge.org/response-detail/26794|tag|http://www.semanlink.net/tag/differentiable_programming +https://www.edge.org/response-detail/26794|tag|http://www.semanlink.net/tag/functional_programming +https://www.edge.org/response-detail/26794|tag|http://www.semanlink.net/tag/deep_learning +https://www.edge.org/response-detail/26794|title|Differentiable Programming +https://www.edge.org/response-detail/26794|creationTime|2018-01-14T19:20:20Z +https://jersey.java.net/documentation/latest/security.html|creationDate|2014-09-26 +https://jersey.java.net/documentation/latest/security.html|tag|http://www.semanlink.net/tag/jersey +https://jersey.java.net/documentation/latest/security.html|tag|http://www.semanlink.net/tag/security +https://jersey.java.net/documentation/latest/security.html|title|Jersey user guide. Chapter 15. Security +https://jersey.java.net/documentation/latest/security.html|creationTime|2014-09-26T00:56:30Z +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|creationDate|2010-08-24 +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/paul_miller +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|tag|http://www.semanlink.net/tag/jeffrey_t_pollock +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|title|Bringing Semantic Technologies to Enterprise Data Semantic Universe +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|creationTime|2010-08-24T23:04:44Z +http://www.semanticuniverse.com/articles-bringing-semantic-technologies-enterprise-data.html|creator|http://www.semanlink.net/tag/paul_miller +https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1|creationDate|2016-08-19 +https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1|tag|http://www.semanlink.net/tag/machine_learning_business +https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1|tag|http://www.semanlink.net/tag/machine_learning +https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1|title|The Business Implications of Machine Learning — Free Code Camp +https://medium.freecodecamp.com/the-business-implications-of-machine-learning-11480b99184d#.gj82fw9h1|creationTime|2016-08-19T12:30:49Z +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|creationDate|2014-07-15 +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|tag|http://www.semanlink.net/tag/obama +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|tag|http://www.semanlink.net/tag/new_york_times +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|tag|http://www.semanlink.net/tag/liberte_d_expression +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|comment|L’administration Obama à nouveau accusée d’étouffer la liberté d’expression +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|title|JE DIS ÇA JE DIS RIEN – L’ancienne directrice de la rédaction du « New York Times » dénonce le secret de l’administration Obama Big Browser +http://bigbrowser.blog.lemonde.fr/2014/07/11/je-dis-ca-je-dis-rien-lancienne-directrice-de-la-redaction-du-new-york-times-denonce-le-secret-de-ladministration-obama/|creationTime|2014-07-15T18:18:26Z +https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html|creationDate|2018-08-17 +https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html|tag|http://www.semanlink.net/tag/machine_translation +https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html|tag|http://www.semanlink.net/tag/google_research +https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html|title|Google AI Blog: Transformer: A Novel Neural Network Architecture for Language Understanding +https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html|creationTime|2018-08-17T10:03:28Z +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml|creationDate|2012-04-30 +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml|tag|http://www.semanlink.net/tag/yves_raymond +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml|tag|http://www.semanlink.net/tag/www_2012 +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml|title|Notes from the WWW 2012 conference +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/04/notes-from-the-www12-conferenc.shtml|creationTime|2012-04-30T12:00:08Z +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497|creationDate|2008-12-29 +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497|comment|Why Applications Are Like Fish and Data is Like Wine +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497|title|Cool URIs, Fish, and Wine +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1497|creationTime|2008-12-29T17:36:56Z +http://www.w3.org/TR/rdfa-syntax/|creationDate|2010-12-28 +http://www.w3.org/TR/rdfa-syntax/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/TR/rdfa-syntax/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/rdfa-syntax/|title|RDFa in XHTML: Syntax and Processing +http://www.w3.org/TR/rdfa-syntax/|creationTime|2010-12-28T18:34:40Z +https://github.com/Accenture/AmpliGraph|creationDate|2019-03-25 +https://github.com/Accenture/AmpliGraph|tag|http://www.semanlink.net/tag/github_project +https://github.com/Accenture/AmpliGraph|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +https://github.com/Accenture/AmpliGraph|comment|Open source Python library that predicts links between concepts in a knowledge graph. +https://github.com/Accenture/AmpliGraph|title|Accenture/AmpliGraph: Python library for Representation Learning on Knowledge Graphs +https://github.com/Accenture/AmpliGraph|creationTime|2019-03-25T18:19:17Z +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|creationDate|2012-01-08 +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|tag|http://www.semanlink.net/tag/internet_libre +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|tag|http://www.semanlink.net/tag/freedom_box +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|title|Surfer sans entraves (pirate box, freedom box) +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|creationTime|2012-01-08T10:48:03Z +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|source|Le Monde +http://www.lemonde.fr/technologies/article/2012/01/07/surfer-sans-entraves_1627059_651865.html#ens_id=1280415|date|2012-01-08 +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|creationDate|2014-11-17 +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|tag|http://www.semanlink.net/tag/howto +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|tag|http://www.semanlink.net/tag/tim_bray +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|tag|http://www.semanlink.net/tag/privacy_and_internet +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|title|ongoing by Tim Bray · How To Be Secret +https://www.tbray.org/ongoing/When/201x/2014/11/14/Being-Secret|creationTime|2014-11-17T20:07:08Z +http://www.readwriteweb.com/hack/2010/12/how-to-semantically-analyze-we.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationDate|2010-12-21 +http://www.readwriteweb.com/hack/2010/12/how-to-semantically-analyze-we.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|tag|http://www.semanlink.net/tag/del_icio_us +http://www.readwriteweb.com/hack/2010/12/how-to-semantically-analyze-we.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|title|How to Semantically Analyze Web Pages With Delicious +http://www.readwriteweb.com/hack/2010/12/how-to-semantically-analyze-we.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationTime|2010-12-21T15:07:07Z +http://vapour.sourceforge.net/|creationDate|2007-11-08 +http://vapour.sourceforge.net/|tag|http://www.semanlink.net/tag/publishing_rdf_vocabularies +http://vapour.sourceforge.net/|tag|http://www.semanlink.net/tag/rdf_tools +http://vapour.sourceforge.net/|tag|http://www.semanlink.net/tag/validation +http://vapour.sourceforge.net/|title|Vapour, a web-based validator tool to check best practices for publishing RDF vocabularies +http://vapour.sourceforge.net/|creationTime|2007-11-08T18:36:13Z +https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/|creationDate|2018-01-15 +https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/|tag|http://www.semanlink.net/tag/antibiotic_resistance +https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/|tag|http://www.semanlink.net/tag/metagenomics +https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/|title|How Dirt Could Save Us From Antibiotic-Resistant Superbugs WIRED +https://www.wired.com/story/how-dirt-could-save-humanity-from-an-infectious-apocalypse/|creationTime|2018-01-15T10:26:08Z +http://technology.burningbird.net/|creationDate|2007-01-24 +http://technology.burningbird.net/|tag|http://www.semanlink.net/tag/shelley_powers +http://technology.burningbird.net/|title|Mad Techie Woman +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|creationDate|2009-01-07 +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|tag|http://www.semanlink.net/tag/biodiversite +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|tag|http://www.semanlink.net/tag/riz +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|tag|http://www.semanlink.net/tag/agriculture +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|title|BBC NEWS Science & Environment Crop diversity: Eat it or lose it +http://news.bbc.co.uk/2/hi/science/nature/7753267.stm|creationTime|2009-01-07T18:15:11Z +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|creationDate|2014-04-24 +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|tag|http://www.semanlink.net/tag/topic_modeling +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|tag|http://www.semanlink.net/tag/unsupervised_machine_learning +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|tag|http://www.semanlink.net/tag/automatic_tagging +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|title|machine learning - Unsupervised automatic tagging algorithms? - Stack Overflow +http://stackoverflow.com/questions/15377290/unsupervised-automatic-tagging-algorithms|creationTime|2014-04-24T00:00:04Z +http://www.epimorphics.com/web/wiki/rdf-json-converter-google-app-engine|creationDate|2012-01-11 +http://www.epimorphics.com/web/wiki/rdf-json-converter-google-app-engine|tag|http://www.semanlink.net/tag/epimorphics_json_rdf +http://www.epimorphics.com/web/wiki/rdf-json-converter-google-app-engine|title|An RDF to JSON converter on Google App Engine Epimorphics +http://www.epimorphics.com/web/wiki/rdf-json-converter-google-app-engine|creationTime|2012-01-11T16:40:43Z +http://linkedup-challenge.org/veni.html|creationDate|2013-07-15 +http://linkedup-challenge.org/veni.html|tag|http://www.semanlink.net/tag/education_and_linked_data +http://linkedup-challenge.org/veni.html|title|Veni Competition Submissions +http://linkedup-challenge.org/veni.html|creationTime|2013-07-15T18:07:44Z +http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb|creationDate|2016-06-20 +http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb|tag|http://www.semanlink.net/tag/smart_contracts +http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb|tag|http://www.semanlink.net/tag/dao_attack +http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb|title|Blockchain Company's Smart Contracts Were Dumb - Bloomberg View +http://www.bloomberg.com/view/articles/2016-06-17/blockchain-company-s-smart-contracts-were-dumb|creationTime|2016-06-20T14:25:32Z +http://www.ieml.org/spip.php?rubrique49&lang=fr|creationDate|2007-06-27 +http://www.ieml.org/spip.php?rubrique49&lang=fr|tag|http://www.semanlink.net/tag/ieml +http://www.ieml.org/spip.php?rubrique49&lang=fr|comment|"""Une sémantique computationnelle au service de l’intelligence collective"" +" +http://www.ieml.org/spip.php?rubrique49&lang=fr|title|ieml +http://www.ieml.org/spip.php?rubrique49&lang=fr|creationTime|2007-06-27T21:32:53Z +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|creationDate|2009-06-15 +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|tag|http://www.semanlink.net/tag/rdfa +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|tag|http://www.semanlink.net/tag/searchmonkey +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|title|SearchMonkey and RDFa - bobdc.blog +http://www.snee.com/bobdc.blog/2009/06/searchmonkey-and-rdfa.html|creationTime|2009-06-15T09:40:38Z +http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/|creationDate|2016-06-06 +http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/|tag|http://www.semanlink.net/tag/marklogic +http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/|comment|If you’re not going to tell the database anything about the data, then there’s only a limited set of things the database has the opportunity to do +http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/|title|MarkLogic's New Enterprise NoSQL Solution Drives Next-Gen Apps and Processes - DATAVERSITY +http://www.dataversity.net/enterprise-nosql-drives-synthesized-meaningful-data-next-gen-apps-processes/|creationTime|2016-06-06T13:27:56Z +http://www.ac-nice.fr/clea/lunap/html/Interf/InterfActiv.html|creationDate|2009-11-22 +http://www.ac-nice.fr/clea/lunap/html/Interf/InterfActiv.html|tag|http://www.semanlink.net/tag/optique +http://www.ac-nice.fr/clea/lunap/html/Interf/InterfActiv.html|title|Diffraction et interférences +http://www.ac-nice.fr/clea/lunap/html/Interf/InterfActiv.html|creationTime|2009-11-22T15:12:15Z +http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy|creationDate|2013-05-07 +http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy|tag|http://www.semanlink.net/tag/daimler +http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy|title|EDF2013: Talk of European Data Innovator Award Winner: Michael Gorriz, CIO of Daimler: From Old to New Economy +http://fr.slideshare.net/EUDataForum/edf2013-talk-of-european-data-innovator-award-winner-michael-gorriz-cio-of-daimler-from-old-to-new-economy|creationTime|2013-05-07T11:57:50Z +https://neo4j.com/developer/graph-database/#property-graph|creationDate|2019-03-01 +https://neo4j.com/developer/graph-database/#property-graph|tag|http://www.semanlink.net/tag/neo4j +https://neo4j.com/developer/graph-database/#property-graph|tag|http://www.semanlink.net/tag/property_graphs +https://neo4j.com/developer/graph-database/#property-graph|title|Property Graph Model Neo4j +https://neo4j.com/developer/graph-database/#property-graph|creationTime|2019-03-01T00:14:08Z +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|creationDate|2012-08-23 +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|tag|http://www.semanlink.net/tag/roman +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|tag|http://www.semanlink.net/tag/erich_maria_remarque +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|tag|http://www.semanlink.net/tag/kz +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|title|Spark of Life (novel) +http://en.wikipedia.org/wiki/Spark_of_Life_(novel)|creationTime|2012-08-23T00:56:33Z +http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation|creationDate|2014-07-15 +http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation|tag|http://www.semanlink.net/tag/langue_francaise +http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation|tag|http://www.semanlink.net/tag/sarkozy +http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation|title|Don't let French lose the tu/vous distinction Agnès Poirier theguardian.com +http://www.theguardian.com/commentisfree/2012/sep/10/france-tu-vous-distinction-twitter-generation|creationTime|2014-07-15T18:14:27Z +https://cse.snu.ac.kr/en/node/30084|creationDate|2018-10-31 +https://cse.snu.ac.kr/en/node/30084|tag|http://www.semanlink.net/tag/nlp +https://cse.snu.ac.kr/en/node/30084|tag|http://www.semanlink.net/tag/variational_autoencoder_vae +https://cse.snu.ac.kr/en/node/30084|tag|http://www.semanlink.net/tag/deep_latent_variable_models +https://cse.snu.ac.kr/en/node/30084|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://cse.snu.ac.kr/en/node/30084|comment|"Both GANs and VAEs have been remarkably effective at modeling images, and the learned latent representations often correspond to interesting, semantically-meaningful representations of the observed data. In contrast, GANs and VAEs have been less successful at modeling natural language, but for different reasons. + +- GANs have difficulty dealing with discrete output spaces (such as natural language) as the resulting objective is no longer differentiable with respect to the generator. +- VAEs can deal with discrete output spaces, but when a powerful model (e.g. LSTM) is used as a generator, the model learns to ignore the latent variable and simply becomes a language model." +https://cse.snu.ac.kr/en/node/30084|title|[Seminar] Deep Latent Variable Models of Natural Language +https://cse.snu.ac.kr/en/node/30084|creationTime|2018-10-31T23:27:31Z +https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html|creationDate|2018-11-05 +https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html|tag|http://www.semanlink.net/tag/bert +https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html|title|Google AI Blog: Open Sourcing BERT: State-of-the-Art Pre-training for Natural Language Processing +https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html|creationTime|2018-11-05T15:13:01Z +http://www.slideshare.net/fpservant/product-customization-as-linked-data|creationDate|2012-06-03 +http://www.slideshare.net/fpservant/product-customization-as-linked-data|tag|http://www.semanlink.net/tag/c2gweb +http://www.slideshare.net/fpservant/product-customization-as-linked-data|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://www.slideshare.net/fpservant/product-customization-as-linked-data|tag|http://www.semanlink.net/tag/fpservant_slideshare +http://www.slideshare.net/fpservant/product-customization-as-linked-data|tag|http://www.semanlink.net/tag/eswc_2012 +http://www.slideshare.net/fpservant/product-customization-as-linked-data|title|Product Customization as Linked Data @slideshare +http://www.slideshare.net/fpservant/product-customization-as-linked-data|creationTime|2012-06-03T17:03:00Z +http://www.newscientist.com/article.ns?id=dn8105&print=true|creationDate|2005-10-06 +http://www.newscientist.com/article.ns?id=dn8105&print=true|tag|http://www.semanlink.net/tag/nanotechnologies +http://www.newscientist.com/article.ns?id=dn8105&print=true|tag|http://www.semanlink.net/tag/genetically_engineered_micro_and_nanodevices +http://www.newscientist.com/article.ns?id=dn8105&print=true|title|Micro-organisms may be turned into nano-circuitry - New Scientist +https://gist.github.com/JeniT/2927644|creationDate|2014-09-08 +https://gist.github.com/JeniT/2927644|tag|http://www.semanlink.net/tag/uri_template +https://gist.github.com/JeniT/2927644|tag|http://www.semanlink.net/tag/jeni_tennison +https://gist.github.com/JeniT/2927644|tag|http://www.semanlink.net/tag/rdf_forms +https://gist.github.com/JeniT/2927644|tag|http://www.semanlink.net/tag/leigh_dodds +https://gist.github.com/JeniT/2927644|title|Possible way to provide POSTable URI in RDF +https://gist.github.com/JeniT/2927644|creationTime|2014-09-08T13:57:38Z +http://en.wikipedia.org/wiki/Poor_Law|creationDate|2008-01-14 +http://en.wikipedia.org/wiki/Poor_Law|tag|http://www.semanlink.net/tag/securite_sociale +http://en.wikipedia.org/wiki/Poor_Law|tag|http://www.semanlink.net/tag/royaume_uni +http://en.wikipedia.org/wiki/Poor_Law|tag|http://www.semanlink.net/tag/pauvrete +http://en.wikipedia.org/wiki/Poor_Law|title|Poor Law - Wikipedia +http://en.wikipedia.org/wiki/Poor_Law|creationTime|2008-01-14T13:22:42Z +http://www.technologyreview.com/blog/arxiv/25331/|creationDate|2010-06-23 +http://www.technologyreview.com/blog/arxiv/25331/|tag|http://www.semanlink.net/tag/mecanique_quantique +http://www.technologyreview.com/blog/arxiv/25331/|tag|http://www.semanlink.net/tag/relativite_generale +http://www.technologyreview.com/blog/arxiv/25331/|tag|http://www.semanlink.net/tag/gravitation +http://www.technologyreview.com/blog/arxiv/25331/|title|Technology Review: Blogs: arXiv blog: New Quantum Theory Separates Gravitational and Inertial Mass +http://www.technologyreview.com/blog/arxiv/25331/|creationTime|2010-06-23T00:43:16Z +http://www.tagcloud.com|creationDate|2005-06-15 +http://www.tagcloud.com|tag|http://www.semanlink.net/tag/tagging +http://www.tagcloud.com|tag|http://www.semanlink.net/tag/folksonomy +http://www.tagcloud.com|comment|TagCloud is an automated Folksonomy tool. Essentially, TagCloud searches any number of RSS feeds you specify, extracts keywords from the content and lists them according to prevalence within the RSS feeds. Clicking on the tag’s link will display a list of all the article abstracts associated with that keyword. +http://www.tagcloud.com|title|TagCloud - Home +http://manu.sporny.org/2014/github-adds-json-ld-support/|creationDate|2014-09-20 +http://manu.sporny.org/2014/github-adds-json-ld-support/|tag|http://www.semanlink.net/tag/schema_org_actions +http://manu.sporny.org/2014/github-adds-json-ld-support/|tag|http://www.semanlink.net/tag/github +http://manu.sporny.org/2014/github-adds-json-ld-support/|tag|http://www.semanlink.net/tag/json_ld +http://manu.sporny.org/2014/github-adds-json-ld-support/|title|Github adds JSON-LD support in core product The Beautiful, Tormented Machine +http://manu.sporny.org/2014/github-adds-json-ld-support/|creationTime|2014-09-20T16:52:00Z +http://www.indiana.edu/~arch/saa/matrix/saa/saa_mod02.html|creationDate|2007-02-26 +http://www.indiana.edu/~arch/saa/matrix/saa/saa_mod02.html|tag|http://www.semanlink.net/tag/first_americans +http://www.indiana.edu/~arch/saa/matrix/saa/saa_mod02.html|title|South American Archaeology: Paleo-Indian +http://www.indiana.edu/~arch/saa/matrix/saa/saa_mod02.html|creationTime|2007-02-26T22:21:14Z +http://meta.wikimedia.org/wiki/Running_MediaWiki_on_Mac_OS_X#Required_software|creationDate|2005-10-11 +http://meta.wikimedia.org/wiki/Running_MediaWiki_on_Mac_OS_X#Required_software|tag|http://www.semanlink.net/tag/mac_os_x +http://meta.wikimedia.org/wiki/Running_MediaWiki_on_Mac_OS_X#Required_software|tag|http://www.semanlink.net/tag/wiki_software +http://meta.wikimedia.org/wiki/Running_MediaWiki_on_Mac_OS_X#Required_software|title|Help:Running MediaWiki on Mac OS X - Meta +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|creationDate|2017-12-17 +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|tag|http://www.semanlink.net/tag/crise_financiere +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|tag|http://www.semanlink.net/tag/justice_americaine +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|comment|It’s what corporations were designed to do: Let a bunch of people get together, take some strategic risks they might otherwise not take, and then make sure none of them is devastated individually if things go south. +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|title|Why Aren’t Any Bankers in Prison for Causing the Financial Crisis? - The Atlantic +https://www.theatlantic.com/business/archive/2016/08/why-arent-any-bankers-in-prison-for-causing-the-financial-crisis/496232/?single_page=true|creationTime|2017-12-17T00:37:56Z +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|creationDate|2018-08-10 +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|tag|http://www.semanlink.net/tag/survey +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|comment|[same author](/doc/?uri=http%3A%2F%2Fwww.hlt.utdallas.edu%2F%7Evince%2Fpapers%2Fcoling10-keyphrase.pdf) +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|relatedDoc|http://www.hlt.utdallas.edu/~vince/papers/coling10-keyphrase.pdf +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|title|Automatic Keyphrase Extraction: A Survey of the State of the Art (2014) +http://acl2014.org/acl2014/P14-1/pdf/P14-1119.pdf|creationTime|2018-08-10T10:51:50Z +http://www.memo.fr/dossier.asp?ID=79|creationDate|2005-04-17 +http://www.memo.fr/dossier.asp?ID=79|tag|http://www.semanlink.net/tag/histoire +http://www.memo.fr/dossier.asp?ID=79|tag|http://www.semanlink.net/tag/mongol +http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html|creationDate|2012-03-07 +http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html|tag|http://www.semanlink.net/tag/darpa +http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html|tag|http://www.semanlink.net/tag/robotique +http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html|title|Un robot à quatre pattes bat un record de vitesse +http://www.lemonde.fr/sciences/article/2012/03/06/un-robot-a-quatre-pattes-bat-un-record-de-vitesse_1652844_1650684.html|creationTime|2012-03-07T01:07:24Z +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|creationDate|2013-07-21 +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|tag|http://www.semanlink.net/tag/rdfa +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|comment|Is there an equivalent method for doing itemref in RDFa? +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|title|Re: Offer data in separate places in HTML from Robert Kost on 2013-05-14 (public-vocabs@w3.org from May 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013May/0105.html|creationTime|2013-07-21T12:12:33Z +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|creationDate|2012-08-21 +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|tag|http://www.semanlink.net/tag/wtp +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|tag|http://www.semanlink.net/tag/eclipse +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|tag|http://www.semanlink.net/tag/maven +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|title|Maven Integration for Eclipse WTP Eclipse Plugins, Bundles and Products - Eclipse Marketplace +http://marketplace.eclipse.org/content/maven-integration-eclipse-wtp#.UDPK1ELUN9q|creationTime|2012-08-21T20:19:31Z +http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/|creationDate|2011-01-22 +http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/|tag|http://www.semanlink.net/tag/rdf_dev +http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/|tag|http://www.semanlink.net/tag/php +http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/|title|RAP - Rdf API for PHP +http://www4.wiwiss.fu-berlin.de/bizer/rdfapi/|creationTime|2011-01-22T12:00:39Z +http://news.bbc.co.uk/2/hi/technology/4072704.stm|creationDate|2005-06-08 +http://news.bbc.co.uk/2/hi/technology/4072704.stm|tag|http://www.semanlink.net/tag/fait_divers +http://news.bbc.co.uk/2/hi/technology/4072704.stm|tag|http://www.semanlink.net/tag/massively_multiplayer_online_games +http://news.bbc.co.uk/2/hi/technology/4072704.stm|tag|http://www.semanlink.net/tag/chine +http://news.bbc.co.uk/2/hi/technology/4072704.stm|comment|"Qui Chengwei stabbed Zhu Caoyuan in the chest when he found out he had sold his virtual sword for 7,200 Yuan (£473). +The sword, which Mr Qui had lent to Mr Zhu, was won in the popular online game Legend of Mir 3. +Attempts to take the dispute to the police failed because there is currently no law in China to protect virtual property." +http://news.bbc.co.uk/2/hi/technology/4072704.stm|title|BBC NEWS Technology Chinese gamer sentenced to life +http://news.bbc.co.uk/2/hi/technology/4072704.stm|source|BBC +http://maven.apache.org/guides/mini/guide-ide-eclipse.html|creationDate|2012-08-14 +http://maven.apache.org/guides/mini/guide-ide-eclipse.html|tag|http://www.semanlink.net/tag/eclipse +http://maven.apache.org/guides/mini/guide-ide-eclipse.html|tag|http://www.semanlink.net/tag/maven +http://maven.apache.org/guides/mini/guide-ide-eclipse.html|title|Maven - Guide to using Eclipse with Maven 2.x +http://maven.apache.org/guides/mini/guide-ide-eclipse.html|creationTime|2012-08-14T00:20:41Z +http://www.automotive-ontology.org/|creationDate|2014-07-24 +http://www.automotive-ontology.org/|tag|http://www.semanlink.net/tag/automotive_ontology_working_group +http://www.automotive-ontology.org/|title|The Automotive Ontology Working Group +http://www.automotive-ontology.org/|creationTime|2014-07-24T15:06:40Z +http://www.dlib.org/dlib/april05/hammond/04hammond.html|creationDate|2005-05-23 +http://www.dlib.org/dlib/april05/hammond/04hammond.html|tag|http://www.semanlink.net/tag/social_bookmarking +http://www.dlib.org/dlib/april05/hammond/04hammond.html|tag|http://www.semanlink.net/tag/tagging +http://www.dlib.org/dlib/april05/hammond/04hammond.html|title|Social Bookmarking Tools (I): A General Review +http://del.icio.us/doc/api|creationDate|2005-10-13 +http://del.icio.us/doc/api|tag|http://www.semanlink.net/tag/del_icio_us +http://del.icio.us/doc/api|title|del.icio.us/doc/api +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationDate|2016-10-02 +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/friedrich_nietzsche +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/zoroastre +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/philosophie +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|comment|"le dualisme du bien et du mal n'existe que dans la pensée. Il n'y a pas d'objectivité extérieure au bien et au mal + +Les Gathas + +**le but de notre existence est de mener une vie heureuse et joyeuse sur cette terre**, et la raison de notre création est de **prendre part activement à une amélioration du monde afin que tous les êtres vivants, humains, animaux, plantes, vivent en paix et en plénitude** + +mantra : parole qui éveille la pensée + +**Zoroastre : celui qui s'adresse à son dieu comme un ami à un ami** + +accès à la maitrise de soi : pas un acte de volonté, pas maitrise des désirs, ni raisonnement. Ellenprovient d'un regard spécifique sur la vie qui se construit au fil du temps avec de l'expérience qui nous fait comprendre et interpréter ce que nous vivons d'une certaine manière. + +vous avez le choix : chacun décide. + +entre bien et le mal, seule la sagesse peut aider à choisir, et la sagesse est différente de la connaissance." +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|title|Ainsi parlait vraiment Zarathoustra +http://www.christian-faure.net/2016/10/02/ainsi-parlait-vraiment-zarathoustra/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationTime|2016-10-02T12:08:15Z +http://news.bbc.co.uk/2/hi/africa/7246985.stm|creationDate|2011-11-17 +http://news.bbc.co.uk/2/hi/africa/7246985.stm|tag|http://www.semanlink.net/tag/genocide_rwandais +http://news.bbc.co.uk/2/hi/africa/7246985.stm|title|BBC NEWS Africa Genocide hatred lingers in Rwanda schools +http://news.bbc.co.uk/2/hi/africa/7246985.stm|creationTime|2011-11-17T14:27:59Z +http://leobard.twoday.net/stories/1302436/|creationDate|2005-12-21 +http://leobard.twoday.net/stories/1302436/|tag|http://www.semanlink.net/tag/personal_ontology +http://leobard.twoday.net/stories/1302436/|tag|http://www.semanlink.net/tag/semanlink_related +http://leobard.twoday.net/stories/1302436/|tag|http://www.semanlink.net/tag/semantic_desktop +http://leobard.twoday.net/stories/1302436/|tag|http://www.semanlink.net/tag/gnowsis +http://leobard.twoday.net/stories/1302436/|comment|Connecting data bits from here and there to an information space for yourself. ...Personal ontologies, where you express your mental model and link it to data. Not just data: data with meaning to people. +http://leobard.twoday.net/stories/1302436/|title|Semantic World and Cyberspace: the heart of gnowsis +http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc|creationDate|2013-07-31 +http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc|tag|http://www.semanlink.net/tag/aubrac +http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc|tag|http://www.semanlink.net/tag/cascade +http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc|title|Cascade du Déroc - Wikipédia +http://fr.wikipedia.org/wiki/Cascade_du_D%C3%A9roc|creationTime|2013-07-31T10:56:47Z +http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2017-05-28 +http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/souvenirs +http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/memoire_humaine +http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|CNRS - Quand les souvenirs refont surface grâce à la stimulation électrique cérébrale… +http://www.cnrs.fr/insb/recherche/parutions/articles2017/j-curot.html?utm_content=bufferd6cca&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2017-05-28T12:26:10Z +https://www.youtube.com/watch?v=vZY5ehnYfPo|creationDate|2017-05-20 +https://www.youtube.com/watch?v=vZY5ehnYfPo|tag|http://www.semanlink.net/tag/enfance +https://www.youtube.com/watch?v=vZY5ehnYfPo|tag|http://www.semanlink.net/tag/livre +https://www.youtube.com/watch?v=vZY5ehnYfPo|tag|http://www.semanlink.net/tag/singe +https://www.youtube.com/watch?v=vZY5ehnYfPo|title|Comment Jacko, le petit singe savant, retrouva sa maman - John S. Goodall +https://www.youtube.com/watch?v=vZY5ehnYfPo|creationTime|2017-05-20T15:56:31Z +http://code.google.com/p/ontology-browser/|creationDate|2011-07-19 +http://code.google.com/p/ontology-browser/|tag|http://www.semanlink.net/tag/linked_data_browser +http://code.google.com/p/ontology-browser/|tag|http://www.semanlink.net/tag/rdf_browser +http://code.google.com/p/ontology-browser/|tag|http://www.semanlink.net/tag/rdf_owl_documentation_tool +http://code.google.com/p/ontology-browser/|tag|http://www.semanlink.net/tag/owl_ontology_browser +http://code.google.com/p/ontology-browser/|title|ontology-browser - An OWL Ontology and RDF (Linked Open Data) Browser - Google Project Hosting +http://code.google.com/p/ontology-browser/|creationTime|2011-07-19T01:59:54Z +http://johnvey.com/features/deliciousdirector/|creationDate|2005-10-05 +http://johnvey.com/features/deliciousdirector/|tag|http://www.semanlink.net/tag/ajax +http://johnvey.com/features/deliciousdirector/|tag|http://www.semanlink.net/tag/del_icio_us +http://johnvey.com/features/deliciousdirector/|title|del.icio.us direc.tor: Delivering A High-Performance AJAX Web Service Broker :: Johnvey +https://towardsdatascience.com/beginners-guide-to-data-science-python-docker-3181fd321a5c|creationDate|2018-03-26 +https://towardsdatascience.com/beginners-guide-to-data-science-python-docker-3181fd321a5c|tag|http://www.semanlink.net/tag/docker_python +https://towardsdatascience.com/beginners-guide-to-data-science-python-docker-3181fd321a5c|title|Beginner’s guide to Data Science — Python + Docker – Towards Data Science +https://towardsdatascience.com/beginners-guide-to-data-science-python-docker-3181fd321a5c|creationTime|2018-03-26T08:29:44Z +http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg|creationDate|2008-11-27 +http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg|tag|http://www.semanlink.net/tag/darfour +http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg|comment|"""Nous sommes venus vous défier, et vous n'êtes même pas capables de défendre vos femmes""" +http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg|title|Darfour, le diable arrive à cheval.jpg +http://www.francesoir.fr/uploads/articles/d4273f5740d926b588b3f2dbc04ea8c1.jpg|creationTime|2008-11-27T01:26:44Z +http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches|creationDate|2011-09-02 +http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches|comment|film français de Jean-Pierre Mocky +http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches|title|Un linceul n'a pas de poches +http://fr.wikipedia.org/wiki/Un_linceul_n'a_pas_de_poches|creationTime|2011-09-02T22:43:52Z +http://www-sop.inria.fr/edelweiss/software/corese/|creationDate|2010-05-14 +http://www-sop.inria.fr/edelweiss/software/corese/|tag|http://www.semanlink.net/tag/semantic_search +http://www-sop.inria.fr/edelweiss/software/corese/|tag|http://www.semanlink.net/tag/inria +http://www-sop.inria.fr/edelweiss/software/corese/|comment|COnceptual REsource Search Engine +http://www-sop.inria.fr/edelweiss/software/corese/|title|Corese +http://www-sop.inria.fr/edelweiss/software/corese/|creationTime|2010-05-14T15:23:10Z +http://www.wolframscience.com/nks/|creationDate|2017-05-15 +http://www.wolframscience.com/nks/|tag|http://www.semanlink.net/tag/wolfram +http://www.wolframscience.com/nks/|tag|http://www.semanlink.net/tag/computational_universe +http://www.wolframscience.com/nks/|title|Stephen Wolfram: A New Kind of Science +http://www.wolframscience.com/nks/|creationTime|2017-05-15T09:19:09Z +http://developer.apple.com/referencelibrary/Java/|creationDate|2006-01-15 +http://developer.apple.com/referencelibrary/Java/|tag|http://www.semanlink.net/tag/apple_java +http://developer.apple.com/referencelibrary/Java/|title|Apple: Java Reference Library +http://www.weebly.com/|creationDate|2015-10-13 +http://www.weebly.com/|tag|http://www.semanlink.net/tag/paul_graham +http://www.weebly.com/|tag|http://www.semanlink.net/tag/website_creation +http://www.weebly.com/|title|Weebly Website Builder: Create a Free Website, Store or Blog +http://www.weebly.com/|creationTime|2015-10-13T10:18:14Z +https://ds9a.nl/amazing-dna/|creationDate|2018-01-26 +https://ds9a.nl/amazing-dna/|tag|http://www.semanlink.net/tag/adn +https://ds9a.nl/amazing-dna/|tag|http://www.semanlink.net/tag/programming +https://ds9a.nl/amazing-dna/|title|DNA seen through the eyes of a coder +https://ds9a.nl/amazing-dna/|creationTime|2018-01-26T15:01:21Z +http://www.perceptualedge.com/articles/visual_business_intelligence/big_data_big_ruse.pdf|creationDate|2013-01-02 +http://www.perceptualedge.com/articles/visual_business_intelligence/big_data_big_ruse.pdf|tag|http://www.semanlink.net/tag/big_data +http://www.perceptualedge.com/articles/visual_business_intelligence/big_data_big_ruse.pdf|title|Big Data, Big Ruse +http://www.perceptualedge.com/articles/visual_business_intelligence/big_data_big_ruse.pdf|creationTime|2013-01-02T11:17:39Z +http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness|creationDate|2011-10-18 +http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness|tag|http://www.semanlink.net/tag/conscience +http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness|title|A Role Model of Consciousness +http://dannyayers.com/2011/10/15/A-Role-Model-of-Consciousness|creationTime|2011-10-18T00:36:38Z +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|creationDate|2014-03-29 +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|tag|http://www.semanlink.net/tag/zinder +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|tag|http://www.semanlink.net/tag/pyramide +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|tag|http://www.semanlink.net/tag/archeologie +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|tag|http://www.semanlink.net/tag/niger +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|title|TamTaminfo.com •Zinder : découverte d'une pyramide et d'un sphinx +http://www.tamtaminfo.com/tamforum/viewtopic.php?f=4&t=1337|creationTime|2014-03-29T17:34:15Z +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|creationDate|2019-02-20 +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|tag|http://www.semanlink.net/tag/patent_finding +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|tag|http://www.semanlink.net/tag/survey +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|title|Patent finding using free search tools +http://www.ipo.org/wp-content/uploads/2017/03/Free-Search-Tools-Bulletin_-20170301.pdf|creationTime|2019-02-20T11:54:29Z +http://www.slate.fr/story/4085/lettre-ouverte-%C3%A0-un-repr%C3%A9sentant-de-la-nation|creationDate|2009-04-22 +http://www.slate.fr/story/4085/lettre-ouverte-%C3%A0-un-repr%C3%A9sentant-de-la-nation|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.slate.fr/story/4085/lettre-ouverte-%C3%A0-un-repr%C3%A9sentant-de-la-nation|title|Hadopi: lettre ouverte à un représentant de la nation slate +http://www.slate.fr/story/4085/lettre-ouverte-%C3%A0-un-repr%C3%A9sentant-de-la-nation|creationTime|2009-04-22T17:58:42Z +http://wiki.apache.org/solr/SolrCloud|creationDate|2013-03-20 +http://wiki.apache.org/solr/SolrCloud|tag|http://www.semanlink.net/tag/solrcloud +http://wiki.apache.org/solr/SolrCloud|title|SolrCloud +http://wiki.apache.org/solr/SolrCloud|creationTime|2013-03-20T00:18:11Z +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|creationDate|2018-03-05 +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|tag|http://www.semanlink.net/tag/named_entity_recognition +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|tag|http://www.semanlink.net/tag/bioinformatics +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|tag|http://www.semanlink.net/tag/word_embedding +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|title|Deep learning with word embeddings improves biomedical named entity recognition Bioinformatics Oxford Academic (2017) +https://academic.oup.com/bioinformatics/article/33/14/i37/3953940|creationTime|2018-03-05T19:28:35Z +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|creationDate|2009-03-31 +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|tag|http://www.semanlink.net/tag/named_graphs +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|title|Querying a set of named RDF graphs without naming the graphs - bobdc.blog +http://www.snee.com/bobdc.blog/2009/03/querying-a-set-of-named-rdf-gr.html|creationTime|2009-03-31T14:47:23Z +http://rdf4food.org/moin.cgi/SparqlPress|creationDate|2008-02-17 +http://rdf4food.org/moin.cgi/SparqlPress|tag|http://www.semanlink.net/tag/sparqlpress +http://rdf4food.org/moin.cgi/SparqlPress|comment|SparqlPress explores the addition of an RDF store to the Wordpress weblogging system through PHP-based extensions, providing a basic Personal Semantic Web Aggregator that can integrate interesting data from nearby in the Web, exposing it to local and remote applications via the SPARQL query language and protocol. The primary goal is to populate the local store with an interesting subset of the nearby Semantic Web, through discovery and crawling of RDF data from the websites (typically blogs; initially Wordpress blogs running the FOAF/SKOS plugin). +http://rdf4food.org/moin.cgi/SparqlPress|title|SparqlPress - SPARQL-ing days +http://rdf4food.org/moin.cgi/SparqlPress|creationTime|2008-02-17T02:37:06Z +https://web.stanford.edu/class/cs124/lec/sem|creationDate|2017-07-20 +https://web.stanford.edu/class/cs124/lec/sem|tag|http://www.semanlink.net/tag/text_similarity +https://web.stanford.edu/class/cs124/lec/sem|tag|http://www.semanlink.net/tag/slides +https://web.stanford.edu/class/cs124/lec/sem|tag|http://www.semanlink.net/tag/dan_jurafsky +https://web.stanford.edu/class/cs124/lec/sem|tag|http://www.semanlink.net/tag/nlp_stanford +https://web.stanford.edu/class/cs124/lec/sem|comment|"thesaurus based meaning, Distributional models of meaning + +Term-Context matrix. Term-document matrix: use tf-idf instead of raw term counts, for the term-context matrix, use Positive Pointwise Mutual Information (PPMI: Do words x and y co-occur more than if they were independent?) + +" +https://web.stanford.edu/class/cs124/lec/sem|title|Word Meaning and Similarity - Stanford University +https://web.stanford.edu/class/cs124/lec/sem|creationTime|2017-07-20T00:09:07Z +http://www.semanticblogging.org|creationDate|2006-03-11 +http://www.semanticblogging.org|tag|http://www.semanlink.net/tag/semblog +http://www.semanticblogging.org|title|Semantic Blogging Demonstrator +http://www.activewidgets.com/javascript.forum.6114.21/dynamic-load-javascript-from-javascript.html|creationDate|2012-07-13 +http://www.activewidgets.com/javascript.forum.6114.21/dynamic-load-javascript-from-javascript.html|tag|http://www.semanlink.net/tag/download_execute_javascript +http://www.activewidgets.com/javascript.forum.6114.21/dynamic-load-javascript-from-javascript.html|title|ActiveWidgets • dynamic load javascript from javascript • java javascript download +http://www.activewidgets.com/javascript.forum.6114.21/dynamic-load-javascript-from-javascript.html|creationTime|2012-07-13T02:07:00Z +https://arxiv.org/abs/1601.00670|creationDate|2018-08-07 +https://arxiv.org/abs/1601.00670|tag|http://www.semanlink.net/tag/survey +https://arxiv.org/abs/1601.00670|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1601.00670|tag|http://www.semanlink.net/tag/variational_bayesian_methods +https://arxiv.org/abs/1601.00670|tag|http://www.semanlink.net/tag/statistics +https://arxiv.org/abs/1601.00670|arxiv_author|Alp Kucukelbir +https://arxiv.org/abs/1601.00670|arxiv_author|David M. Blei +https://arxiv.org/abs/1601.00670|arxiv_author|Jon D. McAuliffe +https://arxiv.org/abs/1601.00670|title|[1601.00670] Variational Inference: A Review for Statisticians +https://arxiv.org/abs/1601.00670|creationTime|2018-08-07T10:37:09Z +https://arxiv.org/abs/1601.00670|arxiv_summary|"One of the core problems of modern statistics is to approximate +difficult-to-compute probability densities. This problem is especially +important in Bayesian statistics, which frames all inference about unknown +quantities as a calculation involving the posterior density. In this paper, we +review variational inference (VI), a method from machine learning that +approximates probability densities through optimization. VI has been used in +many applications and tends to be faster than classical methods, such as Markov +chain Monte Carlo sampling. The idea behind VI is to first posit a family of +densities and then to find the member of that family which is close to the +target. Closeness is measured by Kullback-Leibler divergence. We review the +ideas behind mean-field variational inference, discuss the special case of VI +applied to exponential family models, present a full example with a Bayesian +mixture of Gaussians, and derive a variant that uses stochastic optimization to +scale up to massive data. We discuss modern research in VI and highlight +important open problems. VI is powerful, but it is not yet well understood. Our +hope in writing this paper is to catalyze statistical research on this class of +algorithms." +https://arxiv.org/abs/1601.00670|arxiv_firstAuthor|David M. Blei +https://arxiv.org/abs/1601.00670|arxiv_updated|2018-05-09T20:52:28Z +https://arxiv.org/abs/1601.00670|arxiv_title|Variational Inference: A Review for Statisticians +https://arxiv.org/abs/1601.00670|arxiv_published|2016-01-04T21:28:04Z +https://arxiv.org/abs/1601.00670|arxiv_num|1601.00670 +http://www.modamag.com/8milereview.htm|creationDate|2006-02-24 +http://www.modamag.com/8milereview.htm|tag|http://www.semanlink.net/tag/eminem +http://www.modamag.com/8milereview.htm|tag|http://www.semanlink.net/tag/film_americain +http://www.modamag.com/8milereview.htm|tag|http://www.semanlink.net/tag/rap +http://www.modamag.com/8milereview.htm|tag|http://www.semanlink.net/tag/detroit +http://www.modamag.com/8milereview.htm|comment|American movie with Eminem. Rap battles in Detroit +http://www.modamag.com/8milereview.htm|title|8 Mile +https://s2-eu4.ixquick.com/|creationDate|2013-09-02 +https://s2-eu4.ixquick.com/|tag|http://www.semanlink.net/tag/search_engines +https://s2-eu4.ixquick.com/|title|Ixquick Web Recherche +https://s2-eu4.ixquick.com/|creationTime|2013-09-02T13:53:11Z +http://dataliftcamp.eventbrite.fr/|creationDate|2012-09-17 +http://dataliftcamp.eventbrite.fr/|tag|http://www.semanlink.net/tag/datalift +http://dataliftcamp.eventbrite.fr/|title|Datalift Camp - 9-10 octobre 2012 +http://dataliftcamp.eventbrite.fr/|creationTime|2012-09-17T23:49:30Z +http://spinrdf.org/|creationDate|2010-10-01 +http://spinrdf.org/|tag|http://www.semanlink.net/tag/topbraid_spin +http://spinrdf.org/|comment|"SPIN is a collection of RDF vocabularies enabling the use of SPARQL to define constraints and inference rules on Semantic Web models. SPIN also provides meta-modeling capabilities that allow users to define their own SPARQL functions and query templates. Finally, SPIN includes a ready to use library of common functions.
+Instead of relying on the textual representation of SPARQL expressions, SPIN offers an RDF Schema for SPARQL. As a result, SPARQL queries can be stored as RDF triples together with any RDF domain model. This enables linkage of RDF resources with the associated SPARQL queries as well as sharing and reuse of SPARQL queries as part of Semantic Web models. An RDF vocabulary for SPARQL is the first layer in the SPIN framework. +
+Using the SPARQL RDF syntax, SPIN defines a light-weight collection of RDF properties that allow domain modelers to attach inference rules (as SPARQL Construct queries) and constraint checks/unit tests (as Construct or Ask queries) to RDFS or OWL class definitions +" +http://spinrdf.org/|title|SPIN - SPARQL Inferencing Notation +http://spinrdf.org/|creationTime|2010-10-01T01:30:53Z +https://www.letemps.ch/sciences/2017/07/07/bacterie-tueuse-doliviers-progresse-europe|creationDate|2017-07-09 +https://www.letemps.ch/sciences/2017/07/07/bacterie-tueuse-doliviers-progresse-europe|tag|http://www.semanlink.net/tag/oliviers +https://www.letemps.ch/sciences/2017/07/07/bacterie-tueuse-doliviers-progresse-europe|title|La bactérie tueuse d’oliviers progresse en Europe - Le Temps +https://www.letemps.ch/sciences/2017/07/07/bacterie-tueuse-doliviers-progresse-europe|creationTime|2017-07-09T10:32:29Z +http://planetrdf.com|creationDate|2005-05-20 +http://planetrdf.com|tag|http://www.semanlink.net/tag/rdf +http://planetrdf.com|title|Planet RDF +http://www.google.com/support/youtube/bin/answer.py?hl=en-GB&answer=132460|creationDate|2011-12-04 +http://www.google.com/support/youtube/bin/answer.py?hl=en-GB&answer=132460|tag|http://www.semanlink.net/tag/youtube +http://www.google.com/support/youtube/bin/answer.py?hl=en-GB&answer=132460|title|Optimising your video uploads - YouTube Help +http://www.google.com/support/youtube/bin/answer.py?hl=en-GB&answer=132460|creationTime|2011-12-04T01:39:47Z +http://aclweb.org/anthology/P14-3006|creationDate|2018-05-12 +http://aclweb.org/anthology/P14-3006|tag|http://www.semanlink.net/tag/word_embeddings_with_lexical_resources +http://aclweb.org/anthology/P14-3006|tag|http://www.semanlink.net/tag/phrase_embeddings +http://aclweb.org/anthology/P14-3006|comment|"> generalized phrases are part +of the inventory of linguistic units that we should +compute embeddings for and we have shown that +such embeddings are superior to word form embeddings +in a coreference resolution task and standard +paraphrase identification task" +http://aclweb.org/anthology/P14-3006|title|An Exploration of Embeddings for Generalized Phrases (2014) +http://aclweb.org/anthology/P14-3006|creationTime|2018-05-12T16:04:39Z +https://www.youtube.com/watch?v=x2AK5eIKL8c&index=1&list=LLFla3d0JK7zqq9JJkwgvqMQ|creationDate|2016-09-08 +https://www.youtube.com/watch?v=x2AK5eIKL8c&index=1&list=LLFla3d0JK7zqq9JJkwgvqMQ|tag|http://www.semanlink.net/tag/patti_smith +https://www.youtube.com/watch?v=x2AK5eIKL8c&index=1&list=LLFla3d0JK7zqq9JJkwgvqMQ|title|Patti Smith - Because The Night (1979) Germany - YouTube +https://www.youtube.com/watch?v=x2AK5eIKL8c&index=1&list=LLFla3d0JK7zqq9JJkwgvqMQ|creationTime|2016-09-08T01:54:58Z +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|creationDate|2007-04-03 +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|tag|http://www.semanlink.net/tag/leo_sauermann +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|tag|http://www.semanlink.net/tag/hash_uris +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|tag|http://www.semanlink.net/tag/httprange_14 +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|tag|http://www.semanlink.net/tag/httprange_14_solution +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|comment|303 URIs and hash URIs +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|title|Cool URIs for the Semantic Web +http://www.dfki.uni-kl.de/~sauermann/2006/11/cooluris/|creationTime|2007-04-03T23:26:43Z +http://www.technologyreview.com/computing/22702/?a=f|creationDate|2010-06-23 +http://www.technologyreview.com/computing/22702/?a=f|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.technologyreview.com/computing/22702/?a=f|comment|IBM's Watson will showcase the latest tricks in natural-language processing. +http://www.technologyreview.com/computing/22702/?a=f|title|How IBM Plans to Win Jeopardy! +http://www.technologyreview.com/computing/22702/?a=f|creationTime|2010-06-23T00:26:50Z +https://medium.com/prism-truth/82a1791c94d3|creationDate|2013-06-13 +https://medium.com/prism-truth/82a1791c94d3|tag|http://www.semanlink.net/tag/prism_surveillance_program +https://medium.com/prism-truth/82a1791c94d3|title|The PRISM Details Matter — PRISM Truth — Medium +https://medium.com/prism-truth/82a1791c94d3|creationTime|2013-06-13T16:35:01Z +http://www.antipope.org/charlie/blog-static/2013/12/why-i-want-bitcoin-to-die-in-a.html|creationDate|2013-12-20 +http://www.antipope.org/charlie/blog-static/2013/12/why-i-want-bitcoin-to-die-in-a.html|tag|http://www.semanlink.net/tag/bitcoin +http://www.antipope.org/charlie/blog-static/2013/12/why-i-want-bitcoin-to-die-in-a.html|title|Why I want Bitcoin to die in a fire - Charlie's Diary +http://www.antipope.org/charlie/blog-static/2013/12/why-i-want-bitcoin-to-die-in-a.html|creationTime|2013-12-20T13:40:06Z +http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset|creationDate|2015-10-24 +http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset|tag|http://www.semanlink.net/tag/energie_solaire +http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset|comment|For the first time, solar thermal can compete with natural gas during nighttime peak demand +http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset|title|A Tower of Molten Salt Will Deliver Solar Power After Sunset - IEEE Spectrum +http://spectrum.ieee.org/green-tech/solar/a-tower-of-molten-salt-will-deliver-solar-power-after-sunset|creationTime|2015-10-24T22:28:00Z +http://solr.pl/en/2010/11/15/solr-and-autocomplete-part-2/|creationDate|2015-06-20 +http://solr.pl/en/2010/11/15/solr-and-autocomplete-part-2/|tag|http://www.semanlink.net/tag/solr_autocomplete +http://solr.pl/en/2010/11/15/solr-and-autocomplete-part-2/|title|Solr and autocomplete (part 2) Solr Enterprise Search +http://solr.pl/en/2010/11/15/solr-and-autocomplete-part-2/|creationTime|2015-06-20T09:49:46Z +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|creationDate|2011-10-05 +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|tag|http://www.semanlink.net/tag/xslt +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|tag|http://www.semanlink.net/tag/smartphone +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|title|Displaying SPARQL results on a mobile phone - bobdc.blog +http://www.snee.com/bobdc.blog/2011/10/displaying-sparql-results-on-a.html|creationTime|2011-10-05T08:32:17Z +http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html|creationDate|2010-08-24 +http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html|tag|http://www.semanlink.net/tag/semantic_web_training +http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html|tag|http://www.semanlink.net/tag/dean_allemang +http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html|title|Building Competency in Semantic Web Technology Semantic Universe +http://www.semanticuniverse.com/topquadrant-monthly-column/group-blog-entry-building-competency-semantic-web-technology.html|creationTime|2010-08-24T22:39:11Z +http://www.digital-web.com/articles/seven_javascript_techniques/|creationDate|2007-06-23 +http://www.digital-web.com/articles/seven_javascript_techniques/|tag|http://www.semanlink.net/tag/javascript +http://www.digital-web.com/articles/seven_javascript_techniques/|title|Digital Web Magazine - Seven JavaScript Techniques You Should Be Using Today +http://www.digital-web.com/articles/seven_javascript_techniques/|creationTime|2007-06-23T13:41:10Z +http://arstechnica.com/news.ars/post/20060521-6880.html|creationDate|2006-05-23 +http://arstechnica.com/news.ars/post/20060521-6880.html|tag|http://www.semanlink.net/tag/hollywood +http://arstechnica.com/news.ars/post/20060521-6880.html|tag|http://www.semanlink.net/tag/hdmi +http://arstechnica.com/news.ars/post/20060521-6880.html|tag|http://www.semanlink.net/tag/dvd +http://arstechnica.com/news.ars/post/20060521-6880.html|comment|"One of the most controversial aspects of these next-generation products is something called the Image Constraint Token (ICT), a security ""feature"" that allows studios to force-downgrade video quality on players that lack a special video output that was designed to thwart piracy. This ""HDMI"" connector standard is part of a ""protected pathway"" for video that was meant to combat piracy by making it impossible for pirates to tap into high-definition video output and press ""Record,"" as it were." +http://arstechnica.com/news.ars/post/20060521-6880.html|title|Hollywood reportedly in agreement to delay forced quality downgrades for Blu-ray, HD DVD +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|creationDate|2013-05-17 +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|tag|http://www.semanlink.net/tag/diplomatie_americaine +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|tag|http://www.semanlink.net/tag/lobby_agroalimentaire +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|title|Le lobbying agressif de Washington pour les OGM +http://www.lemonde.fr/planete/article/2013/05/16/le-lobbying-agressif-du-departement-d-etat-americain-pour-les-ogm_3239450_3244.html|creationTime|2013-05-17T20:05:35Z +http://www.newyorker.com/fact/content/articles/060828fa_fact2|creationDate|2006-08-28 +http://www.newyorker.com/fact/content/articles/060828fa_fact2|tag|http://www.semanlink.net/tag/new_yorker +http://www.newyorker.com/fact/content/articles/060828fa_fact2|tag|http://www.semanlink.net/tag/conjecture_de_poincare +http://www.newyorker.com/fact/content/articles/060828fa_fact2|tag|http://www.semanlink.net/tag/perelman +http://www.newyorker.com/fact/content/articles/060828fa_fact2|tag|http://www.semanlink.net/tag/medaille_fields +http://www.newyorker.com/fact/content/articles/060828fa_fact2|comment|"""It [the Fields Medal] was completely irrelevant for me” Perelman said. “Everybody understood that if the proof is correct then no other recognition is needed.”" +http://www.newyorker.com/fact/content/articles/060828fa_fact2|title|MANIFOLD DESTINY - The New Yorker +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|creationDate|2013-04-04 +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|tag|http://www.semanlink.net/tag/deri +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|tag|http://www.semanlink.net/tag/cloud_based_lod_platform +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|tag|http://www.semanlink.net/tag/fujitsu +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|title|Fujitsu Labs And DERI To Offer Free, Cloud-Based Platform To Store And Query Linked Open Data +http://www.deri.ie/about/press/coverage/details/?uid=307&ref=213|creationTime|2013-04-04T13:15:07Z +http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol|creationDate|2019-06-01 +http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol|tag|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol|title|The Quiet Semi-Supervised Revolution – Towards Data Science +http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol|bookmarkOf|https://towardsdatascience.com/the-quiet-semi-supervised-revolution-edec1e9ad8c +http://www.semanlink.net/doc/2019/06/the_quiet_semi_supervised_revol|creationTime|2019-06-01T14:56:33Z +http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029|creationDate|2012-12-13 +http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029|tag|http://www.semanlink.net/tag/makolab +http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029|title|Introducing the Used Cars Ontology - semanticweb.com +http://semanticweb.com/introducing-the-used-cars-ontology_b34029#more-34029|creationTime|2012-12-13T09:39:42Z +http://wolke23.at/2010/06/creating-applications-from-shell-scripts-on-os-x/|creationDate|2012-03-15 +http://wolke23.at/2010/06/creating-applications-from-shell-scripts-on-os-x/|tag|http://www.semanlink.net/tag/mac_os_x +http://wolke23.at/2010/06/creating-applications-from-shell-scripts-on-os-x/|title|Creating Executable Applications from Shell Scripts on OS X +http://wolke23.at/2010/06/creating-applications-from-shell-scripts-on-os-x/|creationTime|2012-03-15T13:27:23Z +http://www.omg.org/hot-topics/fibo.htm|creationDate|2013-04-16 +http://www.omg.org/hot-topics/fibo.htm|tag|http://www.semanlink.net/tag/fibo +http://www.omg.org/hot-topics/fibo.htm|title|Financial Industry Business Ontology (FIBO) +http://www.omg.org/hot-topics/fibo.htm|creationTime|2013-04-16T18:12:30Z +http://www.ird.fr|creationDate|2005-09-04 +http://www.ird.fr|tag|http://www.semanlink.net/tag/ird +http://www.ird.fr|title|Institut de recherche pour le Développement +http://www.figoblog.org|creationDate|2005-08-17 +http://www.figoblog.org|tag|http://www.semanlink.net/tag/bibliotheconomie +http://www.figoblog.org|tag|http://www.semanlink.net/tag/blog +http://www.figoblog.org|title|Figoblog. Un blog sur Internet, la bibliothéconomie et la confiture de figues +http://www.rojo.com/|creationDate|2006-04-24 +http://www.rojo.com/|tag|http://www.semanlink.net/tag/web_2_0 +http://www.rojo.com/|tag|http://www.semanlink.net/tag/feed_aggregator +http://www.rojo.com/|comment|Web-based feed reader +http://www.rojo.com/|title|Rojo +http://vowl.visualdataweb.org/webvowl.html|creationDate|2014-12-30 +http://vowl.visualdataweb.org/webvowl.html|tag|http://www.semanlink.net/tag/ontologie_visualization +http://vowl.visualdataweb.org/webvowl.html|title|WebVOWL - Web-based Visualization of Ontologies +http://vowl.visualdataweb.org/webvowl.html|creationTime|2014-12-30T15:00:41Z +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|creationDate|2009-08-27 +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|tag|http://www.semanlink.net/tag/semantic_tagging +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|tag|http://www.semanlink.net/tag/skos +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|tag|http://www.semanlink.net/tag/semanlink_related +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|tag|http://www.semanlink.net/tag/linked_data +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|title|SKOS + DC + Linked Data = Semantic Tagging? - benjamin nowack's blog +http://bnode.org/blog/2009/08/19/skos-dc-linked-data-semantic-tagging|creationTime|2009-08-27T13:49:47Z +http://www.w3.org/2005/Incubator/rdb2rdf/RDB2RDF_SurveyReport.pdf|creationDate|2010-07-06 +http://www.w3.org/2005/Incubator/rdb2rdf/RDB2RDF_SurveyReport.pdf|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.w3.org/2005/Incubator/rdb2rdf/RDB2RDF_SurveyReport.pdf|title|A Survey of Current Approaches for Mapping of Relational Databases to RDF +http://www.w3.org/2005/Incubator/rdb2rdf/RDB2RDF_SurveyReport.pdf|creationTime|2010-07-06T10:14:41Z +http://siri.com/|creationDate|2010-07-01 +http://siri.com/|tag|http://www.semanlink.net/tag/virtual_personal_assistant +http://siri.com/|tag|http://www.semanlink.net/tag/speech_recognition +http://siri.com/|tag|http://www.semanlink.net/tag/iphone +http://siri.com/|tag|http://www.semanlink.net/tag/mobile_search +http://siri.com/|tag|http://www.semanlink.net/tag/apple +http://siri.com/|title|Siri - Your Virtual Personal Assistant +http://siri.com/|creationTime|2010-07-01T16:12:55Z +http://www.lemonde.fr/idees/article/2016/04/23/repenser-l-humanite-apres-tchernobyl_4907654_3232.html|creationDate|2016-04-23 +http://www.lemonde.fr/idees/article/2016/04/23/repenser-l-humanite-apres-tchernobyl_4907654_3232.html|tag|http://www.semanlink.net/tag/tchernobyl +http://www.lemonde.fr/idees/article/2016/04/23/repenser-l-humanite-apres-tchernobyl_4907654_3232.html|title|Repenser l’humanité après Tchernobyl +http://www.lemonde.fr/idees/article/2016/04/23/repenser-l-humanite-apres-tchernobyl_4907654_3232.html|creationTime|2016-04-23T17:41:14Z +http://emnlp2018.org/|creationDate|2018-08-23 +http://emnlp2018.org/|tag|http://www.semanlink.net/tag/emnlp_2018 +http://emnlp2018.org/|title|2018 Conference on Empirical Methods in Natural Language Processing - EMNLP 2018 +http://emnlp2018.org/|creationTime|2018-08-23T22:37:54Z +http://emnlp2018.org/|homepage|http://emnlp2018.org/ +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|creationDate|2016-05-03 +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|tag|http://www.semanlink.net/tag/java_8 +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|tag|http://www.semanlink.net/tag/lambda_calculus +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|comment|derivatives of functions expressed as lambda expressions +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|title|Calculus via Lambdas in Java 8 - Michael E. Cotterell +http://michaelcotterell.com/blog/2015/2/calculus-via-lambdas-in-java-8|creationTime|2016-05-03T23:15:59Z +https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf|creationDate|2013-09-11 +https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf|tag|http://www.semanlink.net/tag/wikidata +https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf|tag|http://www.semanlink.net/tag/introduction +https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf|title|Introduction à Wikidata +https://upload.wikimedia.org/wikipedia/commons/4/43/2013-09_Introduction_%C3%A0_Wikidata.pdf|creationTime|2013-09-11T23:58:57Z +http://blog.bnf.fr/gallica/?p=7874|creationDate|2013-09-23 +http://blog.bnf.fr/gallica/?p=7874|tag|http://www.semanlink.net/tag/exalead +http://blog.bnf.fr/gallica/?p=7874|tag|http://www.semanlink.net/tag/bnf +http://blog.bnf.fr/gallica/?p=7874|title|Un nouveau moteur de recherche pour Gallica Gallica +http://blog.bnf.fr/gallica/?p=7874|creationTime|2013-09-23T13:39:12Z +http://www.restlet.org/|creationDate|2008-05-08 +http://www.restlet.org/|tag|http://www.semanlink.net/tag/rest +http://www.restlet.org/|tag|http://www.semanlink.net/tag/java_dev +http://www.restlet.org/|title|Restlet - Lightweight REST framework for Java +http://www.restlet.org/|creationTime|2008-05-08T02:52:35Z +http://www.librarything.com/catalog/hyperfp|creationDate|2006-05-30 +http://www.librarything.com/catalog/hyperfp|tag|http://www.semanlink.net/tag/livre +http://www.librarything.com/catalog/hyperfp|tag|http://www.semanlink.net/tag/fps +http://www.librarything.com/catalog/hyperfp|tag|http://www.semanlink.net/tag/web_2_0_application +http://www.librarything.com/catalog/hyperfp|title|LibraryThing - my online catalog of books +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|creationDate|2013-07-10 +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|tag|http://www.semanlink.net/tag/google_research +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|tag|http://www.semanlink.net/tag/nlp +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|tag|http://www.semanlink.net/tag/chris_manning +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|title|Natural Language Understanding-focused awards announced +http://googleresearch.blogspot.fr/2013/07/natural-language-understanding-focused.html|creationTime|2013-07-10T22:08:52Z +https://blackboxnlp.github.io/|creationDate|2018-11-06 +https://blackboxnlp.github.io/|tag|http://www.semanlink.net/tag/blackboxnlp_workshop_2018 +https://blackboxnlp.github.io/|title|Analyzing and interpreting neural networks for NLP (Workshop's Home page) +https://blackboxnlp.github.io/|creationTime|2018-11-06T09:58:57Z +http://www.sheaflight.com/|creationDate|2008-09-02 +http://www.sheaflight.com/|tag|http://www.semanlink.net/tag/microsoft +http://www.sheaflight.com/|tag|http://www.semanlink.net/tag/rdf_browser +http://www.sheaflight.com/|tag|http://www.semanlink.net/tag/linked_data +http://www.sheaflight.com/|title|Sheaflight Home +http://www.sheaflight.com/|creationTime|2008-09-02T14:24:24Z +http://select.nytimes.com/gst/abstract.html?res=F3081FFE3E5D0C728CDDA80894DD404482&fta=y&incamp=archive:article_related|creationDate|2005-11-27 +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|creationDate|2008-09-05 +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|tag|http://www.semanlink.net/tag/security +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|tag|http://www.semanlink.net/tag/securite_informatique +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|tag|http://www.semanlink.net/tag/rdf_and_social_networks +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|title|Building Secure, Open and Distributed Social Network Applications +http://blogs.sun.com/bblfish/entry/building_secure_and_distributed_social|creationTime|2008-09-05T21:21:14Z +https://www.w3.org/community/rdfjs/|creationDate|2015-02-19 +https://www.w3.org/community/rdfjs/|tag|http://www.semanlink.net/tag/javascript_rdf +https://www.w3.org/community/rdfjs/|tag|http://www.semanlink.net/tag/w3c_community_group +https://www.w3.org/community/rdfjs/|title|RDF JavaScript Libraries Community Group +https://www.w3.org/community/rdfjs/|creationTime|2015-02-19T01:16:07Z +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|creationDate|2014-06-09 +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|tag|http://www.semanlink.net/tag/politique_francaise +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|tag|http://www.semanlink.net/tag/inegalites +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|title|Attitude vis à vis des inégalités de revenu en France : existerait-il un consensus ? +http://piketty.pse.ens.fr/fichiers/public/Piketty2003c.pdf|creationTime|2014-06-09T10:57:00Z +https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf|creationDate|2017-06-24 +https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf|tag|http://www.semanlink.net/tag/clustering_of_text_documents +https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf|tag|http://www.semanlink.net/tag/dimensionality_reduction +https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf|title|Document Representation and Dimension Reduction for Text Clustering +https://web.cs.dal.ca/~eem/cvWeb/pubs/tdmm-2007-final.pdf|creationTime|2017-06-24T10:19:46Z +https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec|creationDate|2017-05-23 +https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec|tag|http://www.semanlink.net/tag/word2vec +https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec|tag|http://www.semanlink.net/tag/word_embedding +https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec|title|alternatives to word2vec? - Quora +https://www.quora.com/Are-there-any-more-modern-alternatives-to-word2vec|creationTime|2017-05-23T15:06:24Z +http://www.cnrs.fr/inc/communication/direct_labos/rudiuk.htm?utm_content=buffere58b9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-07-19 +http://www.cnrs.fr/inc/communication/direct_labos/rudiuk.htm?utm_content=buffere58b9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/dna_nanotechnology +http://www.cnrs.fr/inc/communication/direct_labos/rudiuk.htm?utm_content=buffere58b9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|La lumière, une alternative pour assembler l'ADN à température constante +http://www.cnrs.fr/inc/communication/direct_labos/rudiuk.htm?utm_content=buffere58b9&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-07-19T11:33:23Z +https://github.com/tkurz/skosjs|creationDate|2012-07-10 +https://github.com/tkurz/skosjs|tag|http://www.semanlink.net/tag/javascript +https://github.com/tkurz/skosjs|tag|http://www.semanlink.net/tag/linked_media_framework +https://github.com/tkurz/skosjs|tag|http://www.semanlink.net/tag/skos_editor +https://github.com/tkurz/skosjs|comment|A javascript-based skos editor +https://github.com/tkurz/skosjs|title|tkurz/skosjs · GitHub +https://github.com/tkurz/skosjs|creationTime|2012-07-10T15:57:49Z +http://java.sun.com/javase/6/docs/technotes/guides/jdbc/|creationDate|2009-06-26 +http://java.sun.com/javase/6/docs/technotes/guides/jdbc/|tag|http://www.semanlink.net/tag/jdbc +http://java.sun.com/javase/6/docs/technotes/guides/jdbc/|title|JDK 6 Java Database Connectivity (JDBC)-related APIs & Developer Guides -- from Sun Microsystems +http://java.sun.com/javase/6/docs/technotes/guides/jdbc/|creationTime|2009-06-26T12:14:23Z +http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091|creationDate|2017-06-05 +http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091|tag|http://www.semanlink.net/tag/face_recognition +http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091|title|Neuroscientists rethink how the brain recognizes faces : Nature News & Comment +http://www.nature.com/news/neuroscientists-rethink-how-the-brain-recognizes-faces-1.22091|creationTime|2017-06-05T10:12:20Z +http://chris.photobooks.com/json/default.htm|creationDate|2014-12-09 +http://chris.photobooks.com/json/default.htm|tag|http://www.semanlink.net/tag/json_visualization +http://chris.photobooks.com/json/default.htm|title|JSON Visualization +http://chris.photobooks.com/json/default.htm|creationTime|2014-12-09T15:28:33Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/4679220.stm|creationDate|2006-02-15 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/4679220.stm|tag|http://www.semanlink.net/tag/matiere_noire +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/4679220.stm|comment|Astronomers have for the first time put some real numbers on the physical characteristics of dark matter. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/2/hi/science/nature/4679220.stm|title|BBC NEWS - Dark matter comes out of the cold BBC NEWS Science/Nature Dark matter comes out of the cold +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|creationDate|2007-07-11 +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|tag|http://www.semanlink.net/tag/securite +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|tag|http://www.semanlink.net/tag/etat_policier +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|tag|http://www.semanlink.net/tag/videosurveillance +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|tag|http://www.semanlink.net/tag/liberte +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|comment|"Dans un futur proche, nous aurons des caméras de surveillance à la maison. Pour notre sécurité. Croyez-vous que les terroristes préparent leurs bombes dans la rue, au nez des caméras publiques ? Et si vous n'avez rien à vous reprocher, vous n'avez rien à cacher. ""Aussi dans mes toilettes ?"" Bien sûr - et pourquoi pas ? N'ayez crainte pour votre pudeur : des programmes informatiques analyseront ces images, sans intervention humaine. Pas de raison de vous émouvoir plus que lorsque vous êtes nu devant votre miroir. + + + +" +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|title|"La CNIL s'inquiète d'une ""société de surveillance"" qui menace les libertés" +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|creationTime|2007-07-11T20:19:50Z +http://www.lemonde.fr/web/article/0,1-0@2-3224,36-933528@51-933119,0.html|source|Le Monde +http://www.research.att.com/~john/Grappa/|creationDate|2005-11-19 +http://www.research.att.com/~john/Grappa/|tag|http://www.semanlink.net/tag/graph_visualization +http://www.research.att.com/~john/Grappa/|comment|A Java Graph Package +http://www.research.att.com/~john/Grappa/|title|AT&T Labs Research - Grappa +http://matthewjamestaylor.com/blog/ultimate-multi-column-liquid-layouts-em-and-pixel-widths|creationDate|2008-06-23 +http://matthewjamestaylor.com/blog/ultimate-multi-column-liquid-layouts-em-and-pixel-widths|tag|http://www.semanlink.net/tag/css +http://matthewjamestaylor.com/blog/ultimate-multi-column-liquid-layouts-em-and-pixel-widths|title|Ultimate multi-column liquid layouts (em and pixel widths) +http://matthewjamestaylor.com/blog/ultimate-multi-column-liquid-layouts-em-and-pixel-widths|creationTime|2008-06-23T19:30:49Z +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|creationDate|2007-07-02 +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|tag|http://www.semanlink.net/tag/rdf_driven_web_sites +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|tag|http://www.semanlink.net/tag/erdf +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|comment|eRDF templating. +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|title|eRDF-T with Tonic and Smarty +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|creationTime|2007-07-02T23:08:23Z +http://semwebdev.keithalexander.co.uk/blog/posts/eRDFt-tonic.html|seeAlso|http://semwebdev.keithalexander.co.uk/snap.html +http://searchengineland.com/unzip-the-secrets-behind-leveraging-unique-identifiers-structured-data-in-e-commerce-178692|creationDate|2013-12-12 +http://searchengineland.com/unzip-the-secrets-behind-leveraging-unique-identifiers-structured-data-in-e-commerce-178692|tag|http://www.semanlink.net/tag/semantic_seo +http://searchengineland.com/unzip-the-secrets-behind-leveraging-unique-identifiers-structured-data-in-e-commerce-178692|title|How Online Retailers Can Leverage Unique Identifiers & Structured Data +http://searchengineland.com/unzip-the-secrets-behind-leveraging-unique-identifiers-structured-data-in-e-commerce-178692|creationTime|2013-12-12T19:54:32Z +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|creationDate|2015-02-19 +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|tag|http://www.semanlink.net/tag/rest +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|tag|http://www.semanlink.net/tag/api +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|tag|http://www.semanlink.net/tag/ruben_verborgh +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|title|The fallacy of the multi-API culture: The fallacy of the multi-API culture: Journal of Documentation: Vol 71, No 2 +http://www.emeraldinsight.com/doi/abs/10.1108/JD-07-2013-0098|creationTime|2015-02-19T01:21:32Z +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|creationDate|2015-01-03 +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|tag|http://www.semanlink.net/tag/langues +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|tag|http://www.semanlink.net/tag/disparition_de_langues_vivantes +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|comment|A century from now, expect fewer but simpler languages on every continent +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|title|What the World Will Speak in 2115 - WSJ +http://www.wsj.com/articles/what-the-world-will-speak-in-2115-1420234648|creationTime|2015-01-03T10:54:51Z +http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html|creationDate|2014-09-05 +http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html|tag|http://www.semanlink.net/tag/ecole_montessori +http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html|tag|http://www.semanlink.net/tag/education +http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html|title|Céline Alvarez, une institutrice révolutionnaire +http://www.lemonde.fr/festival/article/2014/09/04/celine-alvarez-une-instit-revolutionnaire_4481540_4415198.html|creationTime|2014-09-05T10:33:25Z +http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/|creationDate|2013-10-26 +http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/|tag|http://www.semanlink.net/tag/mooc +http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/|tag|http://www.semanlink.net/tag/ecole +http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/|title|L’école inversée ou comment la technologie produit sa disparition InternetActu +http://internetactu.blog.lemonde.fr/2013/10/25/lecole-inversee-ou-comment-la-technologie-produit-sa-disparition/|creationTime|2013-10-26T09:56:59Z +http://textsummarization.net/text-summarizer|creationDate|2017-07-07 +http://textsummarization.net/text-summarizer|tag|http://www.semanlink.net/tag/automatic_summarization +http://textsummarization.net/text-summarizer|tag|http://www.semanlink.net/tag/online_tool +http://textsummarization.net/text-summarizer|title|Text Summarizer - Text Summarization Online +http://textsummarization.net/text-summarizer|creationTime|2017-07-07T17:11:37Z +http://wodka.over-blog.com/article-5492410.html|creationDate|2007-02-23 +http://wodka.over-blog.com/article-5492410.html|tag|http://www.semanlink.net/tag/kapuscinski +http://wodka.over-blog.com/article-5492410.html|title|Ryszard KAPUSCINSKI (1932-2007) - WODKA +http://wodka.over-blog.com/article-5492410.html|creationTime|2007-02-23T21:14:32Z +http://dataliberate.com/2012/02/wikidata-announcing-wikipedias-next-big-thing/|creationDate|2012-02-08 +http://dataliberate.com/2012/02/wikidata-announcing-wikipedias-next-big-thing/|tag|http://www.semanlink.net/tag/wikidata +http://dataliberate.com/2012/02/wikidata-announcing-wikipedias-next-big-thing/|title|WikiData – Announcing Wikipedia’s Next Big Thing Data Liberate +http://dataliberate.com/2012/02/wikidata-announcing-wikipedias-next-big-thing/|creationTime|2012-02-08T13:41:46Z +http://www.411song.com/|creationDate|2005-12-06 +http://www.411song.com/|tag|http://www.semanlink.net/tag/musique_en_ligne +http://www.411song.com/|tag|http://www.semanlink.net/tag/cool +http://www.411song.com/|title|411-SONG +http://blog.valotas.com/2011/01/resteasy-form-annotation-for-jersey.html|creationDate|2012-07-10 +http://blog.valotas.com/2011/01/resteasy-form-annotation-for-jersey.html|tag|http://www.semanlink.net/tag/jersey +http://blog.valotas.com/2011/01/resteasy-form-annotation-for-jersey.html|title|Things to remember: Resteasy Form annotation for Jersey +http://blog.valotas.com/2011/01/resteasy-form-annotation-for-jersey.html|creationTime|2012-07-10T16:54:35Z +http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html|creationDate|2010-05-22 +http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html|tag|http://www.semanlink.net/tag/java +http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html|tag|http://www.semanlink.net/tag/content_negotiation +http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html|title|Java Framework for Content Negotiation +http://lists.w3.org/Archives/Public/public-lod/2010May/0099.html|creationTime|2010-05-22T12:26:17Z +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|creationDate|2014-07-26 +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|tag|http://www.semanlink.net/tag/trust +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|tag|http://www.semanlink.net/tag/facebook +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|comment|Facebook has a perception problem—consumers just don't trust it. +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|title|Why No One Trusts Facebook To Power The Future - ReadWrite +http://readwrite.com/2014/04/03/facebook-whatsapp-oculus-drones-lasers|creationTime|2014-07-26T00:10:17Z +http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread|creationDate|2017-01-04 +http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread|tag|http://www.semanlink.net/tag/javascript +http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread|tag|http://www.semanlink.net/tag/rdf +http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread|title|What the Semantic Web can learn from JavaScript +http://milicicvuk.com/blog/2014/09/06/what-semantic-web-can-learn-from-javascript/#disqus_thread|creationTime|2017-01-04T17:37:17Z +http://www.w3schools.com/xsl/xsl_client.asp|creationDate|2008-02-23 +http://www.w3schools.com/xsl/xsl_client.asp|tag|http://www.semanlink.net/tag/client_side_xslt +http://www.w3schools.com/xsl/xsl_client.asp|title|XSLT on the Client +http://www.w3schools.com/xsl/xsl_client.asp|creationTime|2008-02-23T09:14:22Z +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|creationDate|2007-12-03 +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|tag|http://www.semanlink.net/tag/javascript_dom +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|tag|http://www.semanlink.net/tag/tips +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|comment|Tips and tricks for a friendlier DOM +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|title|XML Matters: Beyond the DOM +http://www-128.ibm.com/developerworks/xml/library/x-matters41.html|creationTime|2007-12-03T11:44:41Z +https://www.ibm.com/us-en/marketplace/spss-text-analytics-for-surveys|creationDate|2017-07-13 +https://www.ibm.com/us-en/marketplace/spss-text-analytics-for-surveys|tag|http://www.semanlink.net/tag/ibm_spss_text_analytics_for_surveys +https://www.ibm.com/us-en/marketplace/spss-text-analytics-for-surveys|title|IBM SPSS Text Analytics for Surveys +https://www.ibm.com/us-en/marketplace/spss-text-analytics-for-surveys|creationTime|2017-07-13T10:38:21Z +http://www.geocities.com/anpipniger/aspiro.htm|creationDate|2006-04-02 +http://www.geocities.com/anpipniger/aspiro.htm|tag|http://www.semanlink.net/tag/pompe_a_eau +http://www.geocities.com/anpipniger/aspiro.htm|tag|http://www.semanlink.net/tag/verger_de_gado_a_niamey +http://www.geocities.com/anpipniger/aspiro.htm|title|Pompe aspirante refoulante ordinaire +http://tagsonomy.com/index.php/tagging-for-business-and-education/|creationDate|2005-12-06 +http://tagsonomy.com/index.php/tagging-for-business-and-education/|tag|http://www.semanlink.net/tag/tagging +http://tagsonomy.com/index.php/tagging-for-business-and-education/|tag|http://www.semanlink.net/tag/education +http://tagsonomy.com/index.php/tagging-for-business-and-education/|title|Tagging for business and education +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|creationDate|2010-05-03 +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|tag|http://www.semanlink.net/tag/open_source +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|tag|http://www.semanlink.net/tag/pbs +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|tag|http://www.semanlink.net/tag/bonus +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|title|What Drives Motivation in the Modern Workplace? PBS NewsHour April 15, 2010 PBS +http://www.pbs.org/newshour/bb/business/jan-june10/makingsense_04-15.html|creationTime|2010-05-03T09:30:04Z +http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html|creationDate|2008-06-22 +http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html|tag|http://www.semanlink.net/tag/www08 +http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html|comment|the guy is at ms +http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html|title|Keynotes at WWW 2008 - Geeking with Greg +http://glinden.blogspot.com/2008/04/keynotes-at-www-2008.html|creationTime|2008-06-22T02:40:54Z +http://www.3rd1000.com/chronology/chrono.htm|creationDate|2005-08-17 +http://www.3rd1000.com/chronology/chrono.htm|tag|http://www.semanlink.net/tag/science +http://www.3rd1000.com/chronology/chrono.htm|title|Chronology of Events in Science, Mathematics, and Technology +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|creationDate|2006-02-05 +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|tag|http://www.semanlink.net/tag/ajax +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|tag|http://www.semanlink.net/tag/web_services +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|tag|http://www.semanlink.net/tag/soap +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|comment|Implement a Web browser-based SOAP Web services client using the Asynchronous JavaScript and XML (Ajax) design pattern. +http://www-128.ibm.com/developerworks/webservices/library/ws-wsajax/?ca=dgr-lnxw03SOAP-AJAX|title|Call SOAP Web services with Ajax +http://www.vogella.de/articles/REST/article.html|creationDate|2011-08-23 +http://www.vogella.de/articles/REST/article.html|tag|http://www.semanlink.net/tag/jersey +http://www.vogella.de/articles/REST/article.html|tag|http://www.semanlink.net/tag/tutorial +http://www.vogella.de/articles/REST/article.html|title|REST with Java (JAX-RS) using Jersey - Tutorial +http://www.vogella.de/articles/REST/article.html|creationTime|2011-08-23T01:06:57Z +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|creationDate|2018-03-26 +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|tag|http://www.semanlink.net/tag/ogm +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|tag|http://www.semanlink.net/tag/elevage +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|tag|http://www.semanlink.net/tag/soja +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|tag|http://www.semanlink.net/tag/deforestation +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|comment|Aucune mention de l'origine du soja qui alimente les animaux d'élevage français n'est obligatoire sur les étiquettes. +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|title|[Enquête] Soja: quand la déforestation s'invite dans nos assiettes - Amériques - RFI +http://www.rfi.fr/ameriques/20180326-soja-deforestation-amerique-sud-alimentation-ogm-mighty-earth|creationTime|2018-03-26T17:46:05Z +http://secondlife.com/|creationDate|2006-02-17 +http://secondlife.com/|tag|http://www.semanlink.net/tag/3d +http://secondlife.com/|tag|http://www.semanlink.net/tag/second_life +http://secondlife.com/|comment|"""Second Life is a 3-D virtual world entirely built and owned by its residents.""" +http://secondlife.com/|title|Second Life: Your World. Your Imagination. +https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a|creationDate|2017-08-26 +https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a|tag|http://www.semanlink.net/tag/tutorial +https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a|tag|http://www.semanlink.net/tag/react_js +https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a|title|Learning React.js is easier than you think – EdgeCoders +https://edgecoders.com/learning-react-js-is-easier-than-you-think-fbd6dc4d935a|creationTime|2017-08-26T11:14:33Z +http://www.gartner.com/newsroom/id/2359715|creationDate|2013-03-21 +http://www.gartner.com/newsroom/id/2359715|tag|http://www.semanlink.net/tag/gartner +http://www.gartner.com/newsroom/id/2359715|title|Gartner Identifies Top Technology Trends Impacting Information Infrastructure in 2013 +http://www.gartner.com/newsroom/id/2359715|creationTime|2013-03-21T22:19:21Z +http://semanticweb.com/introduction-linked-data-platform_b43472|creationDate|2014-07-12 +http://semanticweb.com/introduction-linked-data-platform_b43472|tag|http://www.semanlink.net/tag/linked_data_platform +http://semanticweb.com/introduction-linked-data-platform_b43472|tag|http://www.semanlink.net/tag/introduction +http://semanticweb.com/introduction-linked-data-platform_b43472|title|Introduction to: Linked Data Platform - Semanticweb.com +http://semanticweb.com/introduction-linked-data-platform_b43472|creationTime|2014-07-12T13:06:30Z +http://www.solrtutorial.com/solr-query-syntax.html|creationDate|2015-03-14 +http://www.solrtutorial.com/solr-query-syntax.html|tag|http://www.semanlink.net/tag/tutorial +http://www.solrtutorial.com/solr-query-syntax.html|tag|http://www.semanlink.net/tag/solr +http://www.solrtutorial.com/solr-query-syntax.html|title|Query Syntax - SolrTutorial.com +http://www.solrtutorial.com/solr-query-syntax.html|creationTime|2015-03-14T20:34:20Z +http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/|creationDate|2014-02-03 +http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/|tag|http://www.semanlink.net/tag/encryption +http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/|tag|http://www.semanlink.net/tag/cryptography +http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/|title|Cryptography Breakthrough Could Make Software Unhackable - Wired Science +http://www.wired.com/wiredscience/2014/02/cryptography-breakthrough/|creationTime|2014-02-03T22:30:35Z +http://www.youtube.com/user/LACANNASWING|creationDate|2010-11-04 +http://www.youtube.com/user/LACANNASWING|tag|http://www.semanlink.net/tag/tours +http://www.youtube.com/user/LACANNASWING|tag|http://www.semanlink.net/tag/vito +http://www.youtube.com/user/LACANNASWING|tag|http://www.semanlink.net/tag/jazz +http://www.youtube.com/user/LACANNASWING|title|La canne à swing +http://www.youtube.com/user/LACANNASWING|creationTime|2010-11-04T01:15:02Z +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|creationDate|2008-05-08 +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|tag|http://www.semanlink.net/tag/restful_web_services +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|tag|http://www.semanlink.net/tag/rdf +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|comment|"That's RDF for you. It's so simple I am able to explain it to people in bars within a minute.
Here is an example, which says that my name is Henry: ""Henry Story"".
Click on the URLs and you will GET their meaning. Since resources can return any number of representations, different user agents can get the representation they prefer. + +" +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|title|RESTful Web Services: the book +http://blogs.sun.com/bblfish/entry/restful_web_services_the_book|creationTime|2008-05-08T14:06:30Z +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|creationDate|2007-11-14 +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|tag|http://www.semanlink.net/tag/cybersex +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|tag|http://www.semanlink.net/tag/web_2_0 +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|title|Transnets » Blog Archive » Porno 2.0: à la traîne +http://pisani.blog.lemonde.fr/2007/11/14/porno-20-a-la-traine/|creationTime|2007-11-14T16:34:20Z +http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html|creationDate|2007-11-12 +http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html|tag|http://www.semanlink.net/tag/bill_de_hora +http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html|tag|http://www.semanlink.net/tag/rdf_forms +http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html|title|Bill de hÓra: Automated mapping between RDF and forms, part I +http://www.dehora.net/journal/2005/08/automated_mapping_between_rdf_and_forms_part_i.html|creationTime|2007-11-12T13:55:20Z +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|creationDate|2010-06-23 +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|tag|http://www.semanlink.net/tag/foaf +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|tag|http://www.semanlink.net/tag/facebook +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|tag|http://www.semanlink.net/tag/twitter +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|title|Replace Facebook with FOAF + twitter + ? - bobdc.blog +http://www.snee.com/bobdc.blog/2010/06/replace-facebook-with-foaf-twi.html|creationTime|2010-06-23T00:19:53Z +https://githubengineering.com/towards-natural-language-semantic-code-search/|creationDate|2018-09-19 +https://githubengineering.com/towards-natural-language-semantic-code-search/|tag|http://www.semanlink.net/tag/semantic_search +https://githubengineering.com/towards-natural-language-semantic-code-search/|tag|http://www.semanlink.net/tag/github +https://githubengineering.com/towards-natural-language-semantic-code-search/|title|Towards Natural Language Semantic Code Search GitHub Engineering +https://githubengineering.com/towards-natural-language-semantic-code-search/|creationTime|2018-09-19T16:59:33Z +http://zepheira.com/community/LED/|creationDate|2008-06-25 +http://zepheira.com/community/LED/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://zepheira.com/community/LED/|title|Zepheira :: Community :: Linking Enterprise Data +http://zepheira.com/community/LED/|creationTime|2008-06-25T19:53:41Z +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|creationDate|2017-04-27 +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|tag|http://www.semanlink.net/tag/elasticsearch +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|title|Locality-sensitive hashing - Elasticsearch - Stack Overflow +http://stackoverflow.com/questions/32777630/locality-sensitive-hashing-elasticsearch|creationTime|2017-04-27T17:11:37Z +http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams|creationDate|2013-05-17 +http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams|tag|http://www.semanlink.net/tag/raphael_troncy +http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams|tag|http://www.semanlink.net/tag/www_2013 +http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams|title|Live topic generation from event streams +http://fr.slideshare.net/troncy/live-topic-generation-from-event-streams|creationTime|2013-05-17T11:09:05Z +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|creationDate|2018-08-07 +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|tag|http://www.semanlink.net/tag/automatic_tagging +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|tag|http://www.semanlink.net/tag/multi_label_classification +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|comment|[Supplement to this](/doc/?uri=https%3A%2F%2Fdl.acm.org%2Fcitation.cfm%3Fid%3D3159660) +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|relatedDoc|https://dl.acm.org/citation.cfm?id=3159660 +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|title|Supplementary : Extreme Multi-label Learning with Label Features for Warm-start Tagging, Ranking & Recommendation +https://pdfs.semanticscholar.org/873e/ea884de581f79b1e783052f8e9fa60726fc8.pdf|creationTime|2018-08-07T14:57:57Z +http://www.wired.com/2014/11/delphi-automated-driving-system/|creationDate|2014-11-18 +http://www.wired.com/2014/11/delphi-automated-driving-system/|tag|http://www.semanlink.net/tag/driverless_car +http://www.wired.com/2014/11/delphi-automated-driving-system/|title|A System That Any Automaker Can Use to Build Self-Driving Cars WIRED +http://www.wired.com/2014/11/delphi-automated-driving-system/|creationTime|2014-11-18T13:24:47Z +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613|creationDate|2010-06-17 +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613|tag|http://www.semanlink.net/tag/uriburner_com +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613|title|URIBurner: Painless Generation & Exploitation of Linked Data +http://www.openlinksw.com/blog/kidehen@openlinksw.com/blog/?id=1613|creationTime|2010-06-17T01:24:28Z +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|creationDate|2013-09-19 +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|tag|http://www.semanlink.net/tag/eclipse +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|tag|http://www.semanlink.net/tag/eclipse_juno +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|tag|http://www.semanlink.net/tag/j_ai_un_petit_probleme_avec_mon_ordinateur +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|tag|http://www.semanlink.net/tag/eclipse_tip +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|comment|"in the log:
+...java.lang.NullPointerException
+ at org.eclipse.ui.internal.Workbench.createWorkbenchWindow(Workbench.java:1228)
+ at org.eclipse.ui.internal.Workbench.getActiveWorkbenchWindow(Workbench.java:1221)
+remove workbench.xmi file from workspace .metadata\.plugins\org.eclipse.e4.workbench +" +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|title|Eclipse crashes on startup +https://bugs.eclipse.org/bugs/show_bug.cgi?id=389738|creationTime|2013-09-19T16:51:31Z +https://github.com/kheyer/Genomic-ULMFiT|creationDate|2019-04-02 +https://github.com/kheyer/Genomic-ULMFiT|tag|http://www.semanlink.net/tag/genomique +https://github.com/kheyer/Genomic-ULMFiT|tag|http://www.semanlink.net/tag/ml_sequential_data +https://github.com/kheyer/Genomic-ULMFiT|tag|http://www.semanlink.net/tag/ulmfit +https://github.com/kheyer/Genomic-ULMFiT|tag|http://www.semanlink.net/tag/bioinformatics +https://github.com/kheyer/Genomic-ULMFiT|title|kheyer/Genomic-ULMFiT: ULMFiT for Genomic Sequence Data +https://github.com/kheyer/Genomic-ULMFiT|creationTime|2019-04-02T10:38:46Z +http://sweet.jpl.nasa.gov/|creationDate|2014-10-26 +http://sweet.jpl.nasa.gov/|tag|http://www.semanlink.net/tag/owl_ontology +http://sweet.jpl.nasa.gov/|tag|http://www.semanlink.net/tag/jpl +http://sweet.jpl.nasa.gov/|title|SWEET Semantic Web for Earth and Environmental Terminology +http://sweet.jpl.nasa.gov/|creationTime|2014-10-26T12:48:05Z +https://www.telerama.fr/livre/lecrivain-v.-s.-naipaul-est-mort,-et-cetait-lun-des-prix-nobel-de-litterature-les-moins-consensuels,n5761201.php|creationDate|2018-08-12 +https://www.telerama.fr/livre/lecrivain-v.-s.-naipaul-est-mort,-et-cetait-lun-des-prix-nobel-de-litterature-les-moins-consensuels,n5761201.php|tag|http://www.semanlink.net/tag/v_s_naipaul +https://www.telerama.fr/livre/lecrivain-v.-s.-naipaul-est-mort,-et-cetait-lun-des-prix-nobel-de-litterature-les-moins-consensuels,n5761201.php|title|L'écrivain V. S. Naipaul est mort, et c'était un prix Nobel de littérature tout sauf consensuel - Livres - Télérama.fr +https://www.telerama.fr/livre/lecrivain-v.-s.-naipaul-est-mort,-et-cetait-lun-des-prix-nobel-de-litterature-les-moins-consensuels,n5761201.php|creationTime|2018-08-12T20:02:52Z +http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons|creationDate|2013-09-16 +http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons|title|Where in maven project's path should I put configuration files that are not considered resources - Stack Overflow +http://stackoverflow.com/questions/3001713/where-in-maven-projects-path-should-i-put-configuration-files-that-are-not-cons|creationTime|2013-09-16T19:20:32Z +http://tech.groups.yahoo.com/group/jena-dev/message/27990|creationDate|2008-11-20 +http://tech.groups.yahoo.com/group/jena-dev/message/27990|tag|http://www.semanlink.net/tag/jena_dev +http://tech.groups.yahoo.com/group/jena-dev/message/27990|tag|http://www.semanlink.net/tag/converting_data_into_rdf +http://tech.groups.yahoo.com/group/jena-dev/message/27990|comment|How to present the data from a legacy system (not an RDB) as RDF and thought I'd use Jena as a facade? +http://tech.groups.yahoo.com/group/jena-dev/message/27990|title|jena-dev : Message: Creating custom model for legacy system (Newbie question) +http://tech.groups.yahoo.com/group/jena-dev/message/27990|creationTime|2008-11-20T15:55:54Z +http://www.macosxhints.com/article.php?story=20071127011627796|creationDate|2008-01-03 +http://www.macosxhints.com/article.php?story=20071127011627796|tag|http://www.semanlink.net/tag/leopard +http://www.macosxhints.com/article.php?story=20071127011627796|tag|http://www.semanlink.net/tag/firewall +http://www.macosxhints.com/article.php?story=20071127011627796|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://www.macosxhints.com/article.php?story=20071127011627796|comment|"to block any incoming connection to your local web server from outside your Mac: In the Finder (Go » Go to Folder...), go to folder /usr/sbin and locate the file httpd Open System Preferences » Security » Firewall), and select the ""Set access for specific services and applications"" option. Click on the ""+"" button at the bottom of the list, then drag the file httpd from /usr/sbin in the Finder to the Open dialog, then click Add (Validate). In the list of services, locate httpd, then select ""Block incoming connections"" in the popup menu." +http://www.macosxhints.com/article.php?story=20071127011627796|title|Leopard: Use the built-in firewall to block web sharing +http://www.macosxhints.com/article.php?story=20071127011627796|creationTime|2008-01-03T23:17:23Z +http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html|creationDate|2016-01-27 +http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html|tag|http://www.semanlink.net/tag/go_game +http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html|title|Première défaite d’un professionnel du go contre une intelligence artificielle +http://www.lemonde.fr/pixels/article/2016/01/27/premiere-defaite-d-un-professionnel-du-go-contre-une-intelligence-artificielle_4854886_4408996.html|creationTime|2016-01-27T23:59:26Z +http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen|creationDate|2011-12-21 +http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen|tag|http://www.semanlink.net/tag/semantic_search +http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen|tag|http://www.semanlink.net/tag/volkswagen +http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen|title|Volkswagen adopts semantic search technology (Wired UK) +http://www.wired.co.uk/news/archive/2011-12/07/semantic-seo-volkswagen|creationTime|2011-12-21T23:01:15Z +http://javaluator.sourceforge.net|creationDate|2015-04-16 +http://javaluator.sourceforge.net|comment|Javaluator is a simple, but powerful, infix expression evaluator for Java. +http://javaluator.sourceforge.net|title|Javaluator +http://javaluator.sourceforge.net|creationTime|2015-04-16T16:42:10Z +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|creationDate|2012-01-10 +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|tag|http://www.semanlink.net/tag/e_learning +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|tag|http://www.semanlink.net/tag/education +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|comment|We live in a world where anyone can learn anything, anytime, anywhere, but we haven't remotely reorganized our workplace or school for this age +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|title|'Badges' Earned Online Pose Challenge to Traditional College Diplomas - College 2.0 - The Chronicle of Higher Education +http://chronicle.com/article/Badges-Earned-Online-Pose/130241/|creationTime|2012-01-10T00:45:42Z +http://www.semanlink.net/doc/2019/02/keywords2vec|creationDate|2019-02-09 +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/word2vec +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/simple_idea +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/rake +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/medical_ir_ml_ia +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/phrase_mining +http://www.semanlink.net/doc/2019/02/keywords2vec|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2019/02/keywords2vec|comment|"To generate a word2vec model, but using keywords instead of one word. Tokenize on stopwords + non word characters + +(This remembers me author of [FlashText algorithm](tag:flashtext_algorithm.html) saying he had developed it to create word2vec models)" +http://www.semanlink.net/doc/2019/02/keywords2vec|title|Keywords2vec +http://www.semanlink.net/doc/2019/02/keywords2vec|bookmarkOf|https://github.com/dperezrada/keywords2vec +http://www.semanlink.net/doc/2019/02/keywords2vec|creationTime|2019-02-09T01:43:55Z +http://www.haut-vernet.com/indexesclangon.html|creationDate|2011-10-02 +http://www.haut-vernet.com/indexesclangon.html|tag|http://www.semanlink.net/tag/geologie +http://www.haut-vernet.com/indexesclangon.html|tag|http://www.semanlink.net/tag/ecole_des_mines +http://www.haut-vernet.com/indexesclangon.html|title|Le Haut Vernet Randonnées réserve géologique Haute Provence Serre d'Esclangon +http://www.haut-vernet.com/indexesclangon.html|creationTime|2011-10-02T22:48:48Z +https://blogs.oracle.com/nbprofiler/entry/profiling_with_visualvm_part_2|creationDate|2015-02-09 +https://blogs.oracle.com/nbprofiler/entry/profiling_with_visualvm_part_2|tag|http://www.semanlink.net/tag/jvisualvm +https://blogs.oracle.com/nbprofiler/entry/profiling_with_visualvm_part_2|title|Profiling With VisualVM, Part 2 (NetBeans Profiler) +https://blogs.oracle.com/nbprofiler/entry/profiling_with_visualvm_part_2|creationTime|2015-02-09T21:21:47Z +http://news.sciencemag.org/sciencenow/2013/03/microbes-likely-abundant-hundred.html?ref=hp|creationDate|2013-03-19 +http://news.sciencemag.org/sciencenow/2013/03/microbes-likely-abundant-hundred.html?ref=hp|tag|http://www.semanlink.net/tag/extremophiles +http://news.sciencemag.org/sciencenow/2013/03/microbes-likely-abundant-hundred.html?ref=hp|title|Microbes Likely Abundant Hundreds of Meters Below Sea Floor - ScienceNOW +http://news.sciencemag.org/sciencenow/2013/03/microbes-likely-abundant-hundred.html?ref=hp|creationTime|2013-03-19T17:47:45Z +http://www.redmonk.com/jgovernor/archives/000474.html|creationDate|2005-10-13 +http://www.redmonk.com/jgovernor/archives/000474.html|tag|http://www.semanlink.net/tag/soap_vs_rest +http://www.redmonk.com/jgovernor/archives/000474.html|title|SOAP is boring +http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik|creationDate|2007-09-14 +http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik|tag|http://www.semanlink.net/tag/sarkozy +http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik|tag|http://www.semanlink.net/tag/la_france_vue_de_l_etranger +http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik|title|Letter from France: The Human Bomb: The New Yorker +http://www.newyorker.com/reporting/2007/08/27/070827fa_fact_gopnik|creationTime|2007-09-14T23:44:05Z +http://www.oregonfossilguy.com/|creationDate|2008-05-19 +http://www.oregonfossilguy.com/|tag|http://www.semanlink.net/tag/oregon +http://www.oregonfossilguy.com/|tag|http://www.semanlink.net/tag/paleontologie +http://www.oregonfossilguy.com/|title|Oregon Fossil Guy - Plant & Animal Fossils - Geologic History +http://www.oregonfossilguy.com/|creationTime|2008-05-19T00:02:00Z +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|creationDate|2015-08-25 +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|tag|http://www.semanlink.net/tag/amazon +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|tag|http://www.semanlink.net/tag/ml_as_a_service +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|tag|http://www.semanlink.net/tag/aws +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|tag|http://www.semanlink.net/tag/machine_learning +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|title|Amazon Web Services opens up machine learning service to European developers +http://www.computerweekly.com/news/4500252192/Amazon-Web-Services-opens-up-machine-learning-service-to-European-developers|creationTime|2015-08-25T10:55:04Z +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|creationDate|2013-08-20 +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|tag|http://www.semanlink.net/tag/tables +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|tag|http://www.semanlink.net/tag/csv +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|comment|"Many useful datasets on the Web take the form of tables. The goal of this proposal is to provide a simple, schema.org-friendly way to ""look inside"" these tables, and map their contents into triples. This is an early draft proposal developed at Google. We're seeking feedback from the community." +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|title|Proposal: Looking inside tables from عمر بنجلون on 2013-08-13 (public-vocabs@w3.org from August 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Aug/0033.html|creationTime|2013-08-20T16:21:30Z +http://www.lemonde.fr/pixels/article/2018/03/28/intelligence-artificielle-ce-qu-il-faut-retenir-du-rapport-de-cedric-villani_5277697_4408996.html|creationDate|2018-03-29 +http://www.lemonde.fr/pixels/article/2018/03/28/intelligence-artificielle-ce-qu-il-faut-retenir-du-rapport-de-cedric-villani_5277697_4408996.html|tag|http://www.semanlink.net/tag/rapport_villani_sur_l_ia +http://www.lemonde.fr/pixels/article/2018/03/28/intelligence-artificielle-ce-qu-il-faut-retenir-du-rapport-de-cedric-villani_5277697_4408996.html|title|Intelligence artificielle : ce qu’il faut retenir du rapport de Cédric Villani +http://www.lemonde.fr/pixels/article/2018/03/28/intelligence-artificielle-ce-qu-il-faut-retenir-du-rapport-de-cedric-villani_5277697_4408996.html|creationTime|2018-03-29T00:37:46Z +https://www.lemonde.fr/planete/article/2019/02/11/le-declin-des-insectes-une-menace-grandissante-pour-les-ecosystemes-naturels_5422018_3244.html|creationDate|2019-02-11 +https://www.lemonde.fr/planete/article/2019/02/11/le-declin-des-insectes-une-menace-grandissante-pour-les-ecosystemes-naturels_5422018_3244.html|tag|http://www.semanlink.net/tag/insect_collapse +https://www.lemonde.fr/planete/article/2019/02/11/le-declin-des-insectes-une-menace-grandissante-pour-les-ecosystemes-naturels_5422018_3244.html|title|Les insectes pourraient avoir complètement disparu dans cent ans +https://www.lemonde.fr/planete/article/2019/02/11/le-declin-des-insectes-une-menace-grandissante-pour-les-ecosystemes-naturels_5422018_3244.html|creationTime|2019-02-11T13:39:10Z +http://stackoverflow.com/questions/16673347/multi-label-document-classification|creationDate|2014-04-25 +http://stackoverflow.com/questions/16673347/multi-label-document-classification|tag|http://www.semanlink.net/tag/multi_label_classification +http://stackoverflow.com/questions/16673347/multi-label-document-classification|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/16673347/multi-label-document-classification|title|java - Multi-Label Document Classification - Stack Overflow +http://stackoverflow.com/questions/16673347/multi-label-document-classification|creationTime|2014-04-25T19:22:48Z +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|creationDate|2015-05-17 +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|tag|http://www.semanlink.net/tag/immigration +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|comment|The book addresses head on three of the most salient aspects of immigration control: the denial of rights to non-citizens, their physical removal and exclusion from the polity through deportation, and their deprivation of liberty and freedom of movement in immigration detention. +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|title|Rights, Deportation, and Detention in the Age of Immigration Control: Tom Wong: 9780804793063: Amazon.com: Books +http://www.amazon.com/gp/product/0804793069?redirect=true&tag=centerforimmigra&utm%5Fsource=E-mail%20Updates&utm%5Fcampaign=d1e41890b0-Immigration%5FReading%5F5%5F14%5F155%5F14%5F2015&utm%5Fmedium=email&utm%5Fterm=0%5F7dc4c5d977-d1e41890b0-44163693&pldnSite=1|creationTime|2015-05-17T00:19:54Z +http://www.w3.org/DesignIssues/RDB-RDF.html|creationDate|2010-11-07 +http://www.w3.org/DesignIssues/RDB-RDF.html|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.w3.org/DesignIssues/RDB-RDF.html|tag|http://www.semanlink.net/tag/sql_to_rdf_mapping +http://www.w3.org/DesignIssues/RDB-RDF.html|title|Relational Databases and the Semantic Web (in Design Issues) +http://www.w3.org/DesignIssues/RDB-RDF.html|creationTime|2010-11-07T12:56:02Z +http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud|creationDate|2016-05-14 +http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud|tag|http://www.semanlink.net/tag/cloud +http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud|tag|http://www.semanlink.net/tag/canal +http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud|title|Canal+ troque son mainframe contre un SI Cloud +http://www.lemagit.fr/etude/Canal-troque-son-mainframe-contre-un-SI-Cloud|creationTime|2016-05-14T11:56:09Z +http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136|creationDate|2017-06-24 +http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136|tag|http://www.semanlink.net/tag/machine_learning +http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136|tag|http://www.semanlink.net/tag/finance +http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136|title|JPMorgan's massive guide to machine learning jobs in finance +http://news.efinancialcareers.com/uk-en/285249/machine-learning-and-big-data-j-p-morgan?utm_campaign=Data%2BElixir&utm_medium=email&utm_source=Data_Elixir_136|creationTime|2017-06-24T18:40:17Z +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|creationDate|2017-06-13 +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|tag|http://www.semanlink.net/tag/rake +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|tag|http://www.semanlink.net/tag/python_sample_code +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|tag|http://www.semanlink.net/tag/nlp_sample_code +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|tag|http://www.semanlink.net/tag/textrank +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|title|Automated Keyword Extraction – TF-IDF, RAKE, and TextRank (Less Than Dot - Blog) +http://blogs.lessthandot.com/index.php/artificial-intelligence/automated-keyword-extraction-tf-idf-rake-and-textrank/|creationTime|2017-06-13T23:23:35Z +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|creationDate|2019-02-11 +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|tag|http://www.semanlink.net/tag/slides +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|tag|http://www.semanlink.net/tag/bert +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|comment|Includes new results such as the effect of the masking strategy, using synthetic training data,... +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|title|Jacob Devlin talks about BERT at the Stanford NLP seminar +https://nlp.stanford.edu/seminar/details/jdevlin.pdf|creationTime|2019-02-11T11:20:39Z +http://www.w3.org/2001/sw/SW-FAQ#rulesandonts|creationDate|2010-10-17 +http://www.w3.org/2001/sw/SW-FAQ#rulesandonts|tag|http://www.semanlink.net/tag/owl +http://www.w3.org/2001/sw/SW-FAQ#rulesandonts|tag|http://www.semanlink.net/tag/rules +http://www.w3.org/2001/sw/SW-FAQ#rulesandonts|title|How do I know when to use OWL and when to Rules? How can I use them both together? +http://www.w3.org/2001/sw/SW-FAQ#rulesandonts|creationTime|2010-10-17T12:28:52Z +https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26|creationDate|2018-05-25 +https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26|tag|http://www.semanlink.net/tag/word_embedding +https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26|tag|http://www.semanlink.net/tag/spellchecker +https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26|title|A simple spell checker built from word vectors – Ed Rushton – Medium +https://medium.com/@erushton214/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26|creationTime|2018-05-25T00:16:06Z +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|creationDate|2007-04-03 +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|tag|http://www.semanlink.net/tag/semantic_web_presentation +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|title|State of the Semantic Web - Bangalore, 23 February, 2007 - Ivan Herman, W3C +http://www.w3.org/2007/Talks/0223-Bangalore-IH/Slides.html|creationTime|2007-04-03T23:04:39Z +http://www.mkbergman.com/?p=437|creationDate|2008-04-15 +http://www.mkbergman.com/?p=437|tag|http://www.semanlink.net/tag/semantic_web +http://www.mkbergman.com/?p=437|tag|http://www.semanlink.net/tag/information +http://www.mkbergman.com/?p=437|tag|http://www.semanlink.net/tag/httprange_14 +http://www.mkbergman.com/?p=437|title|Semantic Web Semantics: Arcane, but Important » AI3:::Adaptive Information +http://www.mkbergman.com/?p=437|creationTime|2008-04-15T15:24:02Z +http://afromusing.com/|creationDate|2013-08-24 +http://afromusing.com/|tag|http://www.semanlink.net/tag/juliana_rotich +http://afromusing.com/|title|Afromusing Africa and Beyond! (the personal blog of Juliana Rotich) +http://afromusing.com/|creationTime|2013-08-24T19:24:31Z +http://tuukka.iki.fi/tmp/swig-2008-04-22.html|creationDate|2008-05-04 +http://tuukka.iki.fi/tmp/swig-2008-04-22.html|tag|http://www.semanlink.net/tag/fps_and_ldow2008 +http://tuukka.iki.fi/tmp/swig-2008-04-22.html|comment|Include notes about my talk at ldow 2008 +http://tuukka.iki.fi/tmp/swig-2008-04-22.html|title|swig-2008-04-22 +http://tuukka.iki.fi/tmp/swig-2008-04-22.html|creationTime|2008-05-04T14:55:43Z +http://doc.rplug.renault.com/car-configurator/overview.html|creationDate|2014-01-15 +http://doc.rplug.renault.com/car-configurator/overview.html|tag|http://www.semanlink.net/tag/documentation +http://doc.rplug.renault.com/car-configurator/overview.html|tag|http://www.semanlink.net/tag/c2gweb_on_the_web +http://doc.rplug.renault.com/car-configurator/overview.html|tag|http://www.semanlink.net/tag/c2gweb +http://doc.rplug.renault.com/car-configurator/overview.html|title|C2GWEB Overview +http://doc.rplug.renault.com/car-configurator/overview.html|creationTime|2014-01-15T11:55:01Z +http://semwebdev.keithalexander.co.uk/snap.html|creationDate|2007-07-03 +http://semwebdev.keithalexander.co.uk/snap.html|tag|http://www.semanlink.net/tag/erdf +http://semwebdev.keithalexander.co.uk/snap.html|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://semwebdev.keithalexander.co.uk/snap.html|tag|http://www.semanlink.net/tag/javascript +http://semwebdev.keithalexander.co.uk/snap.html|comment|RDF-in-HTML offers interesting possibilities for end-user experience. This page demonstrates one of those possibilities. Here, some javascript pre-fetches eRDF data related to the uri being linked to. +http://semwebdev.keithalexander.co.uk/snap.html|title|A use for embedded semantics? eRDF Link Preview Demo +http://semwebdev.keithalexander.co.uk/snap.html|creationTime|2007-07-03T00:55:37Z +https://www.quora.com/What-is-a-simple-but-detailed-explanation-of-Textrank|creationDate|2017-07-12 +https://www.quora.com/What-is-a-simple-but-detailed-explanation-of-Textrank|tag|http://www.semanlink.net/tag/textrank +https://www.quora.com/What-is-a-simple-but-detailed-explanation-of-Textrank|title|What is a simple but detailed explanation of Textrank? - Quora +https://www.quora.com/What-is-a-simple-but-detailed-explanation-of-Textrank|creationTime|2017-07-12T00:58:03Z +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|creationDate|2012-12-09 +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|tag|http://www.semanlink.net/tag/dbpedia +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|title|Normalizing company names with SPARQL and DBpedia - bobdc.blog +http://www.snee.com/bobdc.blog/2012/12/normalizing-company-names-with.html|creationTime|2012-12-09T12:35:40Z +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|creationDate|2009-09-22 +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|tag|http://www.semanlink.net/tag/jeanne_d_arc +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|tag|http://www.semanlink.net/tag/religion +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|tag|http://www.semanlink.net/tag/antonin_artaud +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|tag|http://www.semanlink.net/tag/justice +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|title|La Passion de Jeanne d'Arc, film de Dreyer +http://fr.wikipedia.org/wiki/La_Passion_de_Jeanne_d'Arc|creationTime|2009-09-22T00:09:34Z +http://2007.xtech.org/public/schedule/paper/40|creationDate|2007-05-18 +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/microformats +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/rdfa +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/grddl +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/xtech_2007 +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/elias_torres +http://2007.xtech.org/public/schedule/paper/40|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://2007.xtech.org/public/schedule/paper/40|comment|Tour through the latest techniques available to embed or extract semantic markup from HTML pages. +http://2007.xtech.org/public/schedule/paper/40|title|XTech 2007: Open Data in HTML: GRDDL, eRDF and RDFa +http://2007.xtech.org/public/schedule/paper/40|creationTime|2007-05-18T21:47:02Z +http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/|creationDate|2008-08-18 +http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/|tag|http://www.semanlink.net/tag/bill_de_hora +http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/|tag|http://www.semanlink.net/tag/rest +http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/|title|Bill de hÓra: REST as an engineering discipline +http://www.dehora.net/journal/2008/08/15/rest-as-an-engineering-discipline/|creationTime|2008-08-18T17:34:24Z +http://www.lamap.fr/|creationDate|2005-11-19 +http://www.lamap.fr/|tag|http://www.semanlink.net/tag/la_main_a_la_pate +http://www.lamap.fr/|title|La main à la pâte +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|creationDate|2015-02-19 +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|tag|http://www.semanlink.net/tag/hydra_templated_links +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|tag|http://www.semanlink.net/tag/hydra +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|title|Local identifiers in IriTemplates from Dietrich Schulten on 2014-12-13 (public-hydra@w3.org from December 2014) +http://lists.w3.org/Archives/Public/public-hydra/2014Dec/0002.html|creationTime|2015-02-19T14:02:49Z +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|creationDate|2016-02-08 +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|tag|http://www.semanlink.net/tag/systemes_distribues +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|tag|http://www.semanlink.net/tag/api +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|title|Architecture des APIs dans les systèmes distribués +http://www.christian-faure.net/2016/02/07/architecture-des-apis-dans-les-systemes-distribues/|creationTime|2016-02-08T13:34:46Z +http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&|creationDate|2013-04-11 +http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&|tag|http://www.semanlink.net/tag/usa +http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&|tag|http://www.semanlink.net/tag/censure_et_maltraitance_animale +http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&|title|With Ag-Gag Laws, We’re Eating With Our Eyes Closed - NYTimes.com +http://www.nytimes.com/2013/04/10/opinion/eating-with-our-eyes-closed.html?_r=1&|creationTime|2013-04-11T21:43:36Z +http://news.bbc.co.uk/1/hi/world/europe/4714103.stm|creationDate|2005-08-22 +http://news.bbc.co.uk/1/hi/world/europe/4714103.stm|tag|http://www.semanlink.net/tag/thrace +http://news.bbc.co.uk/1/hi/world/europe/4714103.stm|title|BBC NEWS Bulgaria unearths Thracian riches +http://news.bbc.co.uk/1/hi/world/europe/4714103.stm|source|BBC +http://www.masternewmedia.org/news/2005/04/01/where_to_find_great_free.htm|creationDate|2005-05-04 +http://www.masternewmedia.org/news/2005/04/01/where_to_find_great_free.htm|tag|http://www.semanlink.net/tag/photos_online +http://www.masternewmedia.org/news/2005/04/01/where_to_find_great_free.htm|title|Where To Find Great Free Photographs And Visuals For Your Own Online Articles +http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/|creationDate|2012-04-17 +http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/|tag|http://www.semanlink.net/tag/ldow2012 +http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/|tag|http://www.semanlink.net/tag/ivan_herman +http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/|title|Linked Data on the Web Workshop, Lyon « Ivan’s private site +http://ivan-herman.name/2012/04/17/linked-data-on-the-web-workshop-lyon/|creationTime|2012-04-17T11:22:29Z +https://rare-technologies.com/word2vec-tutorial/|creationDate|2017-06-01 +https://rare-technologies.com/word2vec-tutorial/|tag|http://www.semanlink.net/tag/word2vec +https://rare-technologies.com/word2vec-tutorial/|tag|http://www.semanlink.net/tag/gensim +https://rare-technologies.com/word2vec-tutorial/|tag|http://www.semanlink.net/tag/tutorial +https://rare-technologies.com/word2vec-tutorial/|title|Word2vec in gensim Tutorial RaRe Technologies +https://rare-technologies.com/word2vec-tutorial/|creationTime|2017-06-01T02:22:33Z +http://www.les-ernest.fr/orlean|creationDate|2010-08-16 +http://www.les-ernest.fr/orlean|tag|http://www.semanlink.net/tag/marches_financiers +http://www.les-ernest.fr/orlean|tag|http://www.semanlink.net/tag/normale_sup +http://www.les-ernest.fr/orlean|title|André Orléan: L'instabilité des marchés financiers Les Ernest +http://www.les-ernest.fr/orlean|creationTime|2010-08-16T23:06:30Z +http://www.w3.org/DesignIssues/HTTP-URI.html|creationDate|2007-01-02 +http://www.w3.org/DesignIssues/HTTP-URI.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/HTTP-URI.html|tag|http://www.semanlink.net/tag/uri +http://www.w3.org/DesignIssues/HTTP-URI.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/HTTP-URI.html|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/DesignIssues/HTTP-URI.html|comment|"""This was a result of my being in a minority with this opinion on the Technical Architecture Group, and yet finding it the only one I could accept. This is related to TAG issue HTTPRange-14."" (TBL)" +http://www.w3.org/DesignIssues/HTTP-URI.html|title|What do HTTP URIs Identify? - Design Issues +http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html|creationDate|2015-02-09 +http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html|tag|http://www.semanlink.net/tag/france_afrique +http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html|title|La Chine et la France dans un ménage à trois avec l’Afrique +http://www.lemonde.fr/afrique/article/2015/02/09/la-chine-et-la-france-dans-un-menage-a-trois-avec-l-afrique_4572735_3212.html|creationTime|2015-02-09T22:20:11Z +http://www.bbc.com/news/science-environment-30058176|creationDate|2014-11-15 +http://www.bbc.com/news/science-environment-30058176|tag|http://www.semanlink.net/tag/philae +http://www.bbc.com/news/science-environment-30058176|title|BBC News - Philae comet lander sends more data before losing power +http://www.bbc.com/news/science-environment-30058176|creationTime|2014-11-15T10:49:08Z +http://www.google.com/search?sourceid=mozclient&ie=utf-8&oe=utf-8&q=-inurl%3A(htm%7Chtml%7Cphp)+intitle%3A%22index+of%22+%2B%22last+modified%22+%2B%22parent+directory%22+%2Bdescription+%2Bsize+%2B(jpg%7Cgif)+|creationDate|2005-04-27 +http://www.google.com/search?sourceid=mozclient&ie=utf-8&oe=utf-8&q=-inurl%3A(htm%7Chtml%7Cphp)+intitle%3A%22index+of%22+%2B%22last+modified%22+%2B%22parent+directory%22+%2Bdescription+%2Bsize+%2B(jpg%7Cgif)+|tag|http://www.semanlink.net/tag/hack +http://www.google.com/search?sourceid=mozclient&ie=utf-8&oe=utf-8&q=-inurl%3A(htm%7Chtml%7Cphp)+intitle%3A%22index+of%22+%2B%22last+modified%22+%2B%22parent+directory%22+%2Bdescription+%2Bsize+%2B(jpg%7Cgif)+|tag|http://www.semanlink.net/tag/google +http://www.google.com/search?sourceid=mozclient&ie=utf-8&oe=utf-8&q=-inurl%3A(htm%7Chtml%7Cphp)+intitle%3A%22index+of%22+%2B%22last+modified%22+%2B%22parent+directory%22+%2Bdescription+%2Bsize+%2B(jpg%7Cgif)+|title|Google hack example +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|creationDate|2006-11-07 +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|tag|http://www.semanlink.net/tag/anticipation +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|tag|http://www.semanlink.net/tag/global_brain +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|tag|http://www.semanlink.net/tag/semantic_web +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|comment|long term vision of the evolution of the web and the world by Nova Spivack, who runs a Semantic Web startup company Radar Networks. http://www.mindingtheplanet.net +http://novaspivack.typepad.com/nova_spivacks_weblog/2006/11/minding_the_pla.html|title|Minding the Planet: Minding The Planet -- The Meaning and Future of the Semantic Web +http://manu.sporny.org/2014/json-ld-origins-2/|creationDate|2014-10-20 +http://manu.sporny.org/2014/json-ld-origins-2/|tag|http://www.semanlink.net/tag/manu_sporny +http://manu.sporny.org/2014/json-ld-origins-2/|tag|http://www.semanlink.net/tag/json_ld +http://manu.sporny.org/2014/json-ld-origins-2/|title|JSON-LD and Why I Hate the Semantic Web The Beautiful, Tormented Machine +http://manu.sporny.org/2014/json-ld-origins-2/|creationTime|2014-10-20T02:47:51Z +https://news.cnrs.fr/articles/ramanujan-the-man-who-knew-infinity?utm_content=buffer4160a&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-04-10 +https://news.cnrs.fr/articles/ramanujan-the-man-who-knew-infinity?utm_content=buffer4160a&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/ramanujan +https://news.cnrs.fr/articles/ramanujan-the-man-who-knew-infinity?utm_content=buffer4160a&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Ramanujan: The Man Who Knew Infinity CNRS News +https://news.cnrs.fr/articles/ramanujan-the-man-who-knew-infinity?utm_content=buffer4160a&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-04-10T18:41:21Z +http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf|creationDate|2013-03-05 +http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf|tag|http://www.semanlink.net/tag/product_description +http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf|comment|Taehee Lee a,*, Ig-hoon Lee a, Suekyung Lee a, Sang-goo Lee a, Dongkyu Kim b,. Jonghoon Chun b, Hyunja ... +http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf|title|Building an operational product ontology system +http://ids.snu.ac.kr/w/images/f/f8/SC12.pdf|creationTime|2013-03-05T11:37:02Z +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|creationDate|2017-03-11 +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|tag|http://www.semanlink.net/tag/bob_dylan +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|tag|http://www.semanlink.net/tag/immigration +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|comment|"(firefox)" +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|title|I pity the poor immigrant +https://www.youtube.com/watch?v=GlUUmiIEinY&t=13m19s|creationTime|2017-03-11T00:04:15Z +http://www.futura-sciences.com/magazines/terre/infos/actu/d/terre-extinction-masse-permien-elle-due-micro-organismes-53129/|creationDate|2014-04-05 +http://www.futura-sciences.com/magazines/terre/infos/actu/d/terre-extinction-masse-permien-elle-due-micro-organismes-53129/|tag|http://www.semanlink.net/tag/extinction_de_masse_de_la_fin_du_permien +http://www.futura-sciences.com/magazines/terre/infos/actu/d/terre-extinction-masse-permien-elle-due-micro-organismes-53129/|title|L'extinction de masse du Permien est-elle due à des micro-organismes ? +http://www.futura-sciences.com/magazines/terre/infos/actu/d/terre-extinction-masse-permien-elle-due-micro-organismes-53129/|creationTime|2014-04-05T17:43:46Z +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|creationDate|2017-10-21 +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|tag|http://www.semanlink.net/tag/wikidata +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|tag|http://www.semanlink.net/tag/solr_autocomplete +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|tag|http://www.semanlink.net/tag/pagerank +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|tag|http://www.semanlink.net/tag/solr +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|title|#PageRank-based #Wikidata autocomplete powered by #Apache #Solr +https://athalhammer.github.io/wikidata-autocomplete/wikidata.html|creationTime|2017-10-21T11:55:13Z +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|creationDate|2006-03-11 +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|tag|http://www.semanlink.net/tag/matiere_noire +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|tag|http://www.semanlink.net/tag/trou_noir +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|tag|http://www.semanlink.net/tag/energie_sombre +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|comment|DARK energy and dark matter, two of the greatest mysteries confronting physicists, may be two sides of the same coin. A new and as yet undiscovered kind of star could explain both phenomena and, in turn, remove black holes from the lexicon of cosmology. +http://www.newscientist.com/article.ns?id=mg18925423.600&print=true|title|Three cosmic enigmas, one audacious answer - New Scientist Three cosmic enigmas, one audacious answer - News Print New Scientist +http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model|creationDate|2014-01-19 +http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model|tag|http://www.semanlink.net/tag/sdmx_rdf +http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model|title|The RDF Data Cube Vocabulary +http://www.w3.org/TR/2014/REC-vocab-data-cube-20140116/#cubes-model|creationTime|2014-01-19T14:30:29Z +https://cloud.google.com/tpu/|creationDate|2018-05-31 +https://cloud.google.com/tpu/|tag|http://www.semanlink.net/tag/tensorflow +https://cloud.google.com/tpu/|tag|http://www.semanlink.net/tag/google_cloud +https://cloud.google.com/tpu/|title|Cloud TPU – Accélérateurs de ML pour TensorFlow    Google Cloud +https://cloud.google.com/tpu/|creationTime|2018-05-31T16:23:57Z +http://www.arcticphoto.co.uk/|creationDate|2006-10-28 +http://www.arcticphoto.co.uk/|tag|http://www.semanlink.net/tag/regions_polaires +http://www.arcticphoto.co.uk/|title|Arctic & Antarctic pictures +http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html|creationDate|2013-04-23 +http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html|tag|http://www.semanlink.net/tag/map_reduce +http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html|title|Pragmatic Programming Techniques: Location Sensitive Hashing in Map Reduce +http://horicky.blogspot.co.at/2012/09/location-sensitive-hashing-in-map-reduce.html|creationTime|2013-04-23T02:21:19Z +https://en.wikipedia.org/wiki/Naked_Blood|creationDate|2017-12-03 +https://en.wikipedia.org/wiki/Naked_Blood|tag|http://www.semanlink.net/tag/film_japonais +https://en.wikipedia.org/wiki/Naked_Blood|title|Naked Blood +https://en.wikipedia.org/wiki/Naked_Blood|creationTime|2017-12-03T14:18:06Z +http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/|creationDate|2015-12-22 +http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/|tag|http://www.semanlink.net/tag/deep_learning +http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/|tag|http://www.semanlink.net/tag/facebook +http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/|title|Facebook’s open-sourcing of AI hardware is the start of the deep-learning revolution Ars Technica +http://arstechnica.com/information-technology/2015/12/facebooks-open-sourcing-of-ai-hardware-is-the-start-of-the-deep-learning-revolution/|creationTime|2015-12-22T19:46:57Z +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|creationDate|2007-05-24 +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|tag|http://www.semanlink.net/tag/simile_exhibit +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|tag|http://www.semanlink.net/tag/www2007 +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|comment|Exhibit based view of the WWW2007 paper +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|title|WWW Conference Exhibits +http://people.csail.mit.edu/dfhuynh/projects/www-conferences/www-conferences.html|creationTime|2007-05-24T13:13:00Z +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|creationDate|2009-01-31 +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/herve_kempf +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/xavier_bertrand +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/travailler_le_dimanche +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/travailler_moins +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/crise_ecologique +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|tag|http://www.semanlink.net/tag/good +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|comment|"Monsieur Bertrand, si on travaille toujours plus, on fabriquera et on vendra toujours plus, et on polluera toujours plus. Cela s'appelle la crise écologique.
+ +Depuis vingt ans, en France, la productivité du travail a augmenté en moyenne de 2 % par an. Cela signifie qu'avec la même quantité de travail, on produit toujours plus.
+ +Donc, si l'on veut limiter notre impact écologique, on doit travailler moins. Travailler moins pour gagner autant, et faire autre chose de son temps" +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|title|Le dimanche de M. Bertrand, par Hervé Kempf +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|creationTime|2009-01-31T20:35:11Z +http://www.lemonde.fr/opinions/article/2009/01/31/le-dimanche-de-m-bertrand-par-herve-kempf_1149008_3232.html#ens_id=629165|source|Le Monde +http://www.eswc2007.org/|creationDate|2007-05-03 +http://www.eswc2007.org/|tag|http://www.semanlink.net/tag/eswc_2007 +http://www.eswc2007.org/|title|4th European Semantic Web Conference 2007 +http://www.eswc2007.org/|creationTime|2007-05-03T00:07:53Z +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|creationDate|2019-03-03 +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|tag|http://www.semanlink.net/tag/domain_knowledge_deep_learning +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|tag|http://www.semanlink.net/tag/slides +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|title|Integrating Domain-Knowledge into Deep Learning (2019) +https://www.cs.cmu.edu/~rsalakhu/NY_2019_v3.pdf|creationTime|2019-03-03T09:38:54Z +http://n2.talis.com/wiki/Platform_FAQ|creationDate|2008-07-20 +http://n2.talis.com/wiki/Platform_FAQ|tag|http://www.semanlink.net/tag/faq +http://n2.talis.com/wiki/Platform_FAQ|tag|http://www.semanlink.net/tag/talis_platform +http://n2.talis.com/wiki/Platform_FAQ|title|Platform FAQ - n² wiki +http://n2.talis.com/wiki/Platform_FAQ|creationTime|2008-07-20T02:12:58Z +https://www.airpair.com/angularjs/posts/top-10-mistakes-angularjs-developers-make|creationDate|2015-08-29 +https://www.airpair.com/angularjs/posts/top-10-mistakes-angularjs-developers-make|tag|http://www.semanlink.net/tag/angularjs +https://www.airpair.com/angularjs/posts/top-10-mistakes-angularjs-developers-make|title|10 Top Mistakes AngularJS Developers Make +https://www.airpair.com/angularjs/posts/top-10-mistakes-angularjs-developers-make|creationTime|2015-08-29T19:08:21Z +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html|creationDate|2009-01-15 +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html|tag|http://www.semanlink.net/tag/semantic_web_crm +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html|title|Semantic Web project ideas number 3 - (ERP) bobdc.blog +http://www.snee.com/bobdc.blog/2007/04/semantic-web-project-ideas-num-2.html|creationTime|2009-01-15T18:19:45Z +https://github.com/linkeddata/cimba|creationDate|2014-11-05 +https://github.com/linkeddata/cimba|tag|http://www.semanlink.net/tag/cimba +https://github.com/linkeddata/cimba|comment|"CIMBA is a privacy-friendly, decentralized microblogging application that runs in your browser. It is built using the latest HTML5 technologies and Web standards. With CIMBA, people get a microblogging app that behaves like Twitter, built entirely out of parts they can control. + +To use CIMBA, people must have an account at some Data Server (also called a “personal data store”) which implements the Linked Data Platform (LDP) Web standard with appropriate extensions. Users may choose to run their own Data Server, use one provided by an employer/school/government, or even pay for a Data Server service. Whatever their choice, they can easily switch to another Data Server whenever they want or even concurrently use different Data Servers for different aspects of their life." +https://github.com/linkeddata/cimba|title|CIMBA Client-Integrated Micro-Blogging Architecture application +https://github.com/linkeddata/cimba|creationTime|2014-11-05T22:14:12Z +http://www.google.com/webmasters/tools/richsnippets|creationDate|2013-06-13 +http://www.google.com/webmasters/tools/richsnippets|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.google.com/webmasters/tools/richsnippets|tag|http://www.semanlink.net/tag/schema_org +http://www.google.com/webmasters/tools/richsnippets|tag|http://www.semanlink.net/tag/validation +http://www.google.com/webmasters/tools/richsnippets|tag|http://www.semanlink.net/tag/webmasters_google +http://www.google.com/webmasters/tools/richsnippets|title|Google Structured Data Testing Tool +http://www.google.com/webmasters/tools/richsnippets|creationTime|2013-06-13T10:35:28Z +http://www.newscientist.com/article/mg22229722.800-venus-death-dive-to-unlock-secrets-of-earths-evil-twin.html|creationDate|2014-06-09 +http://www.newscientist.com/article/mg22229722.800-venus-death-dive-to-unlock-secrets-of-earths-evil-twin.html|tag|http://www.semanlink.net/tag/venus_express +http://www.newscientist.com/article/mg22229722.800-venus-death-dive-to-unlock-secrets-of-earths-evil-twin.html|title|Venus death dive to unlock secrets of Earth's evil twin - space - 05 June 2014 - New Scientist +http://www.newscientist.com/article/mg22229722.800-venus-death-dive-to-unlock-secrets-of-earths-evil-twin.html|creationTime|2014-06-09T00:01:53Z +http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa|creationDate|2011-03-24 +http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa|tag|http://www.semanlink.net/tag/rdfa +http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa|tag|http://www.semanlink.net/tag/foaf +http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa|title|Getting started with RDFa: Creating a basic FOAF profile webBackplane +http://webbackplane.com/mark-birbeck/blog/2009/04/getting-started-with-rdfa|creationTime|2011-03-24T22:13:02Z +http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification|creationDate|2014-03-15 +http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification|tag|http://www.semanlink.net/tag/nlp_text_classification +http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification|title|nlp - Feature Selection and Reduction for Text Classification - Stack Overflow +http://stackoverflow.com/questions/13603882/feature-selection-and-reduction-for-text-classification|creationTime|2014-03-15T17:41:20Z +https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ|creationDate|2013-02-15 +https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ|tag|http://www.semanlink.net/tag/units_of_measure +https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ|tag|http://www.semanlink.net/tag/richard_cyganiak +https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ|title|Code lists for units of measure? - Google Groups +https://groups.google.com/forum/?fromgroups=#!topic/publishing-statistical-data/gMojBEtvHqQ|creationTime|2013-02-15T11:42:35Z +http://gotze.eu/2003/01/coding-for-automation.html|creationDate|2007-09-25 +http://gotze.eu/2003/01/coding-for-automation.html|tag|http://www.semanlink.net/tag/bookmarklet +http://gotze.eu/2003/01/coding-for-automation.html|comment|Extracting link rel=”alternate” in a bookmarklet +http://gotze.eu/2003/01/coding-for-automation.html|title|GotzeBlogged » Blog Archive » Coding for automation +http://gotze.eu/2003/01/coding-for-automation.html|creationTime|2007-09-25T02:10:08Z +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|creationDate|2016-03-12 +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|tag|http://www.semanlink.net/tag/fbi_v_apple +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|tag|http://www.semanlink.net/tag/encryption +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|tag|http://www.semanlink.net/tag/edward_snowden +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|title|A Berlin, Edward Snowden répond à Barack Obama sur le chiffrement +http://www.lemonde.fr/pixels/article/2016/03/12/a-berlin-edward-snowden-repond-a-barack-obama-sur-le-chiffrement_4881829_4408996.html|creationTime|2016-03-12T21:45:27Z +https://www.theguardian.com/news/2018/mar/17/cambridge-analytica-facebook-influence-us-election|creationDate|2018-03-18 +https://www.theguardian.com/news/2018/mar/17/cambridge-analytica-facebook-influence-us-election|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +https://www.theguardian.com/news/2018/mar/17/cambridge-analytica-facebook-influence-us-election|title|Revealed: 50 million Facebook profiles harvested for Cambridge Analytica in major data breach News The Guardian +https://www.theguardian.com/news/2018/mar/17/cambridge-analytica-facebook-influence-us-election|creationTime|2018-03-18T10:38:51Z +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|creationDate|2015-01-24 +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|tag|http://www.semanlink.net/tag/privacy_and_internet +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|tag|http://www.semanlink.net/tag/ben_adida +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|tag|http://www.semanlink.net/tag/obamacare +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|comment|Ghostery +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|title|(your) information wants to be free – obamacare edition Benlog +http://benlog.com/2015/01/23/your-information-wants-to-be-free-obamacare-edition/|creationTime|2015-01-24T11:33:28Z +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|creationDate|2006-09-03 +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|tag|http://www.semanlink.net/tag/java_dev +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|tag|http://www.semanlink.net/tag/tomcat +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|tag|http://www.semanlink.net/tag/servlet +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|comment|including enhanced support for deploying directly to a Tomcat 5 based server. +http://tomcat.apache.org/tomcat-5.0-doc/appdev/sample/build.xml|title|General purpose build script for web applications and web services +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|creationDate|2007-05-07 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|tag|http://www.semanlink.net/tag/fichage_genetique +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|tag|http://www.semanlink.net/tag/j_hallucine +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|title|A 8 et 11 ans, ils sont menacés de fichage génétique pour vol de jouets +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|creationTime|2007-05-07T11:42:03Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-823448,50-906026,0.html|date|2007-05-05 +http://www.flickr.com/photos/tags/ldow2008/|creationDate|2008-05-06 +http://www.flickr.com/photos/tags/ldow2008/|tag|http://www.semanlink.net/tag/ldow2008 +http://www.flickr.com/photos/tags/ldow2008/|title|Flickr: Items tagged with ldow2008 +http://www.flickr.com/photos/tags/ldow2008/|creationTime|2008-05-06T21:28:42Z +https://developers.google.com/knowledge-graph/|creationDate|2015-12-19 +https://developers.google.com/knowledge-graph/|tag|http://www.semanlink.net/tag/google_knowledge_graph +https://developers.google.com/knowledge-graph/|title|Google Knowledge Graph Search API    Knowledge Graph Search API    Google Developers +https://developers.google.com/knowledge-graph/|creationTime|2015-12-19T23:58:46Z +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|creationDate|2017-11-15 +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|tag|http://www.semanlink.net/tag/education +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|tag|http://www.semanlink.net/tag/ghana +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|comment|L’université, qui a remporté le Wise Prize for Education, propose à ses étudiants de plancher collectivement sur des projets pour résoudre les problèmes du pays +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|title|Ashesi, laboratoire du Ghana de demain +http://www.lemonde.fr/education/article/2017/11/15/ashesi-laboratoire-du-ghana-de-demain_5214968_1473685.html|creationTime|2017-11-15T08:59:23Z +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|creationDate|2014-01-19 +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|tag|http://www.semanlink.net/tag/hierarchical_temporal_memory +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|tag|http://www.semanlink.net/tag/coursera_computational_neuroscience +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|title|Numenta: Hierarchical Temporal Memory, including HTM cortical learning algorithms +http://www.numenta.com/htm-overview/education/HTM_CorticalLearningAlgorithms.pdf|creationTime|2014-01-19T21:01:19Z +http://www.apple.com/sitemap/|creationDate|2008-01-25 +http://www.apple.com/sitemap/|tag|http://www.semanlink.net/tag/semanlink_todo +http://www.apple.com/sitemap/|tag|http://www.semanlink.net/tag/sl_gui +http://www.apple.com/sitemap/|title|Apple - Site Map (example of website with good hierarchy) +http://www.apple.com/sitemap/|creationTime|2008-01-25T13:52:13Z +http://www.cnrs.fr/inee/communication/breves/b353.html|creationDate|2018-04-12 +http://www.cnrs.fr/inee/communication/breves/b353.html|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.cnrs.fr/inee/communication/breves/b353.html|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.cnrs.fr/inee/communication/breves/b353.html|title|CNRS - Des fossiles dans les génomes pour dater l’arbre du vivant +http://www.cnrs.fr/inee/communication/breves/b353.html|creationTime|2018-04-12T23:46:27Z +http://archeo.blog.lemonde.fr/2014/10/16/une-magnifique-mosaique-decouverte-dans-la-tombe-damphipolis/|creationDate|2014-10-18 +http://archeo.blog.lemonde.fr/2014/10/16/une-magnifique-mosaique-decouverte-dans-la-tombe-damphipolis/|tag|http://www.semanlink.net/tag/tombe_d_amphipolis +http://archeo.blog.lemonde.fr/2014/10/16/une-magnifique-mosaique-decouverte-dans-la-tombe-damphipolis/|title|Une magnifique mosaïque découverte dans la tombe d’Amphipolis Dans les pas des archéologues +http://archeo.blog.lemonde.fr/2014/10/16/une-magnifique-mosaique-decouverte-dans-la-tombe-damphipolis/|creationTime|2014-10-18T09:21:46Z +http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet|creationDate|2010-11-18 +http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet|tag|http://www.semanlink.net/tag/vito +http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet|title|The Brother from Another Planet +http://en.wikipedia.org/wiki/The_Brother_from_Another_Planet|creationTime|2010-11-18T01:44:29Z +http://java.sun.com/j2se/1.5.0/docs/guide/jmx/tutorial/essential.html|creationDate|2010-02-11 +http://java.sun.com/j2se/1.5.0/docs/guide/jmx/tutorial/essential.html|tag|http://www.semanlink.net/tag/java_profiling +http://java.sun.com/j2se/1.5.0/docs/guide/jmx/tutorial/essential.html|title|Essentials of the JMX API +http://java.sun.com/j2se/1.5.0/docs/guide/jmx/tutorial/essential.html|creationTime|2010-02-11T15:58:23Z +http://news.bbc.co.uk/2/hi/science/nature/8040982.stm|creationDate|2009-05-12 +http://news.bbc.co.uk/2/hi/science/nature/8040982.stm|tag|http://www.semanlink.net/tag/hubble +http://news.bbc.co.uk/2/hi/science/nature/8040982.stm|title|BBC NEWS Science & Environment Peering into Hubble's future +http://news.bbc.co.uk/2/hi/science/nature/8040982.stm|creationTime|2009-05-12T00:26:26Z +http://news.bbc.co.uk/2/hi/science/nature/8040982.stm|source|BBC +http://www.autorepair.eu.com/oasis.htm|creationDate|2008-03-25 +http://www.autorepair.eu.com/oasis.htm|tag|http://www.semanlink.net/tag/oasis_specs +http://www.autorepair.eu.com/oasis.htm|title|Auto Repair Information - OASIS Documentation +http://www.autorepair.eu.com/oasis.htm|creationTime|2008-03-25T11:42:27Z +https://youtu.be/OV692rxN_LI|creationDate|2016-04-08 +https://youtu.be/OV692rxN_LI|tag|http://www.semanlink.net/tag/patti_smith +https://youtu.be/OV692rxN_LI|comment|"1:05:15' Because the night +https://www.youtube.com/watch?v=x2AK5eIKL8c 2:10" +https://youtu.be/OV692rxN_LI|title|Patti Smith Montreux 2005 +https://youtu.be/OV692rxN_LI|creationTime|2016-04-08T02:02:23Z +http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html|creationDate|2012-08-06 +http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html|tag|http://www.semanlink.net/tag/vie_sur_mars +http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html|tag|http://www.semanlink.net/tag/mars_curiosity +http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html|title|A la recherche d'une vie passée sur Mars +http://www.lemonde.fr/sciences/article/2012/08/05/a-la-recherche-d-une-vie-passee-sur-mars_1742465_1650684.html|creationTime|2012-08-06T09:34:10Z +http://theodi.org/|creationDate|2013-07-18 +http://theodi.org/|tag|http://www.semanlink.net/tag/open_data +http://theodi.org/|tag|http://www.semanlink.net/tag/tom_heath +http://theodi.org/|title|Open Data Institute Knowledge for everyone +http://theodi.org/|creationTime|2013-07-18T16:26:30Z +https://fr.wikipedia.org/wiki/Eug%C3%A8ne_de_Savoie-Carignan#Guerre_contre_l.27Empire_ottoman|creationDate|2017-07-12 +https://fr.wikipedia.org/wiki/Eug%C3%A8ne_de_Savoie-Carignan#Guerre_contre_l.27Empire_ottoman|tag|http://www.semanlink.net/tag/personnage_historique +https://fr.wikipedia.org/wiki/Eug%C3%A8ne_de_Savoie-Carignan#Guerre_contre_l.27Empire_ottoman|title|Eugène de Savoie +https://fr.wikipedia.org/wiki/Eug%C3%A8ne_de_Savoie-Carignan#Guerre_contre_l.27Empire_ottoman|creationTime|2017-07-12T00:05:30Z +http://en.wikipedia.org/wiki/Bombay_(film)|creationDate|2007-10-18 +http://en.wikipedia.org/wiki/Bombay_(film)|tag|http://www.semanlink.net/tag/film_indien +http://en.wikipedia.org/wiki/Bombay_(film)|tag|http://www.semanlink.net/tag/hindu_muslim_riots +http://en.wikipedia.org/wiki/Bombay_(film)|tag|http://www.semanlink.net/tag/bombay +http://en.wikipedia.org/wiki/Bombay_(film)|title|Bombay (film) +http://en.wikipedia.org/wiki/Bombay_(film)|creationTime|2007-10-18T01:29:54Z +http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html|creationDate|2014-04-02 +http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html|tag|http://www.semanlink.net/tag/coursera_machine_learning +http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html|tag|http://www.semanlink.net/tag/matlab +http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html|title|Digit recognition to solve Sudoku puzzles automatically with a webcam - MATLAB Vidéo +http://www.mathworks.fr/videos/solving-a-sudoku-puzzle-using-a-webcam-68773.html|creationTime|2014-04-02T18:36:43Z +http://news.bbc.co.uk/1/hi/health/4228712.stm|creationDate|2005-09-12 +http://news.bbc.co.uk/1/hi/health/4228712.stm|tag|http://www.semanlink.net/tag/medecine +http://news.bbc.co.uk/1/hi/health/4228712.stm|tag|http://www.semanlink.net/tag/three_parent_embryo +http://news.bbc.co.uk/1/hi/health/4228712.stm|tag|http://www.semanlink.net/tag/adn_mitochondrial +http://news.bbc.co.uk/1/hi/health/4228712.stm|title|BBC NEWS - Concern over three-parent embryo +http://news.bbc.co.uk/1/hi/health/4228712.stm|source|BBC +http://markmail.org/thread/kzsg3qntovmqzbje|creationDate|2010-04-26 +http://markmail.org/thread/kzsg3qntovmqzbje|tag|http://www.semanlink.net/tag/semantic_pingback +http://markmail.org/thread/kzsg3qntovmqzbje|tag|http://www.semanlink.net/tag/foaf +http://markmail.org/thread/kzsg3qntovmqzbje|tag|http://www.semanlink.net/tag/henry_story +http://markmail.org/thread/kzsg3qntovmqzbje|title|[foaf-protocols] semantic pingback improvement request for foaf - Story Henry +http://markmail.org/thread/kzsg3qntovmqzbje|creationTime|2010-04-26T11:37:13Z +http://www.lelombrik.net/videos/30536/coiffeur-virtuel.html|creationDate|2010-11-12 +http://www.lelombrik.net/videos/30536/coiffeur-virtuel.html|tag|http://www.semanlink.net/tag/son_3d +http://www.lelombrik.net/videos/30536/coiffeur-virtuel.html|title|Coiffeur virtuel - SON 3D +http://www.lelombrik.net/videos/30536/coiffeur-virtuel.html|creationTime|2010-11-12T15:35:50Z +http://m240.net81-67-17.noos.fr/~fps/sicg/c2g/2004/08/eproc_sendingcats_notes.htm|creationDate|2005-11-06 +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|creationDate|2012-08-09 +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|tag|http://www.semanlink.net/tag/daphne_koller +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|tag|http://www.semanlink.net/tag/e_learning +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|tag|http://www.semanlink.net/tag/coursera +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|tag|http://www.semanlink.net/tag/ng +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|tag|http://www.semanlink.net/tag/education +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|title|La salle de classe planétaire +http://www.lemonde.fr/education/article/2012/08/09/la-salle-de-classe-planetaire_1742909_1473685.html|creationTime|2012-08-09T22:36:10Z +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|creationDate|2015-09-20 +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|tag|http://www.semanlink.net/tag/silicon_valley +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|tag|http://www.semanlink.net/tag/uber +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|tag|http://www.semanlink.net/tag/social_democracy +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|tag|http://www.semanlink.net/tag/evgeny_morozov +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|comment|"Inasmuch as “post-capitalism” emerges out of weakened social protections and industry regulations, we might as well be precise in our definitions: if Silicon Valley represents a shift to anything, it’s probably to “pre-capitalism”. +" +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|title|Does Silicon Valley’s reign herald the end of social democracy? The Guardian +http://www.theguardian.com/commentisfree/2015/sep/20/silicon-valley-end-of-social-democracy|creationTime|2015-09-20T10:24:48Z +https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z|creationDate|2013-01-25 +https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z|tag|http://www.semanlink.net/tag/kingsley_idehen +https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z|tag|http://www.semanlink.net/tag/google_knowledge_graph +https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z|title|Kingsley Idehen - Google+ - Google Knowledge Graph to Linked Data Here are some simple… +https://plus.google.com/112399767740508618350/posts/6mwKR8pF37Z|creationTime|2013-01-25T02:18:17Z +http://www.w3.org/wiki/WebSchemas/SchemaDotOrgProposals|creationDate|2013-06-25 +http://www.w3.org/wiki/WebSchemas/SchemaDotOrgProposals|tag|http://www.semanlink.net/tag/schema_org +http://www.w3.org/wiki/WebSchemas/SchemaDotOrgProposals|title|WebSchemas/SchemaDotOrgProposals - W3C Wiki +http://www.w3.org/wiki/WebSchemas/SchemaDotOrgProposals|creationTime|2013-06-25T11:31:05Z +http://www.temis.com/press-releases/-/asset_publisher/PBR3sbzpVJ9d/content/press-release-oecd?p_r_p_22296479_owat=content&p_r_p_22296479_owagid=10488&p_r_p_22296479_owaut=press-release-oecd&redirect=http://www.temis.com/press-releases?p_p_id=101_INSTANCE_PBR3sbzpVJ9d&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&utm_content=buffer5d3ce&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|creationDate|2013-11-23 +http://www.temis.com/press-releases/-/asset_publisher/PBR3sbzpVJ9d/content/press-release-oecd?p_r_p_22296479_owat=content&p_r_p_22296479_owagid=10488&p_r_p_22296479_owaut=press-release-oecd&redirect=http://www.temis.com/press-releases?p_p_id=101_INSTANCE_PBR3sbzpVJ9d&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&utm_content=buffer5d3ce&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|tag|http://www.semanlink.net/tag/temis +http://www.temis.com/press-releases/-/asset_publisher/PBR3sbzpVJ9d/content/press-release-oecd?p_r_p_22296479_owat=content&p_r_p_22296479_owagid=10488&p_r_p_22296479_owaut=press-release-oecd&redirect=http://www.temis.com/press-releases?p_p_id=101_INSTANCE_PBR3sbzpVJ9d&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&utm_content=buffer5d3ce&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|title|Press release - OECD - Press Releases - TEMIS +http://www.temis.com/press-releases/-/asset_publisher/PBR3sbzpVJ9d/content/press-release-oecd?p_r_p_22296479_owat=content&p_r_p_22296479_owagid=10488&p_r_p_22296479_owaut=press-release-oecd&redirect=http://www.temis.com/press-releases?p_p_id=101_INSTANCE_PBR3sbzpVJ9d&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&utm_content=buffer5d3ce&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|creationTime|2013-11-23T00:00:14Z +http://informationality.com/tagglywiki|creationDate|2005-05-23 +http://informationality.com/tagglywiki|tag|http://www.semanlink.net/tag/tagging +http://informationality.com/tagglywiki|tag|http://www.semanlink.net/tag/web_notebook +http://informationality.com/tagglywiki|title|TagglyWiki - a taggable, reusable, non-linear personal web notebook +https://devcenter.heroku.com/articles/jax-rs-http-caching|creationDate|2015-05-14 +https://devcenter.heroku.com/articles/jax-rs-http-caching|tag|http://www.semanlink.net/tag/jax_rs +https://devcenter.heroku.com/articles/jax-rs-http-caching|tag|http://www.semanlink.net/tag/jersey_cache_control +https://devcenter.heroku.com/articles/jax-rs-http-caching|title|HTTP Caching in Java with JAX-RS Heroku Dev Center +https://devcenter.heroku.com/articles/jax-rs-http-caching|creationTime|2015-05-14T13:13:28Z +http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html|creationDate|2016-09-21 +http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html|tag|http://www.semanlink.net/tag/onu +http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html|title|L’ONU mobilise sur la « menace fondamentale » que représente la résistance aux antibiotiques +http://www.lemonde.fr/planete/article/2016/09/21/l-onu-mobilise-sur-la-menace-fondamentale-que-represente-la-resistance-aux-antibiotiques_5001511_3244.html|creationTime|2016-09-21T21:51:22Z +http://jena.sourceforge.net/|creationDate|2006-11-06 +http://jena.sourceforge.net/|tag|http://www.semanlink.net/tag/jena +http://jena.sourceforge.net/|tag|http://www.semanlink.net/tag/sourceforge +http://jena.sourceforge.net/|comment|HOME DE JENA +http://jena.sourceforge.net/|title|Jena on Sourceforge +http://my.opera.com/tomheath/blog/index.dml/tag/web|creationDate|2007-09-18 +http://my.opera.com/tomheath/blog/index.dml/tag/web|tag|http://www.semanlink.net/tag/tom_heath +http://my.opera.com/tomheath/blog/index.dml/tag/web|tag|http://www.semanlink.net/tag/linkto_semanlink +http://my.opera.com/tomheath/blog/index.dml/tag/web|title|web - Tom Heath's Displacement Activities +http://my.opera.com/tomheath/blog/index.dml/tag/web|creationTime|2007-09-18T01:39:08Z +https://en.wikipedia.org/wiki/Crash_(2004_film)|creationDate|2015-07-05 +https://en.wikipedia.org/wiki/Crash_(2004_film)|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/Crash_(2004_film)|title|Crash (2004 film) +https://en.wikipedia.org/wiki/Crash_(2004_film)|creationTime|2015-07-05T22:16:02Z +http://www.bbc.com/news/health-36439260|creationDate|2016-08-19 +http://www.bbc.com/news/health-36439260|tag|http://www.semanlink.net/tag/gene_editing +http://www.bbc.com/news/health-36439260|tag|http://www.semanlink.net/tag/therapie_genique +http://www.bbc.com/news/health-36439260|title|Gene editing technique could transform future - BBC News +http://www.bbc.com/news/health-36439260|creationTime|2016-08-19T12:37:49Z +https://github.com/aaronsw/html2text|creationDate|2017-06-13 +https://github.com/aaronsw/html2text|tag|http://www.semanlink.net/tag/markdown +https://github.com/aaronsw/html2text|tag|http://www.semanlink.net/tag/html +https://github.com/aaronsw/html2text|tag|http://www.semanlink.net/tag/nlp_tools +https://github.com/aaronsw/html2text|comment|Python script that converts a page of HTML into clean, easy-to-read plain ASCII text. Better yet, that ASCII also happens to be valid Markdown +https://github.com/aaronsw/html2text|title|html2text +https://github.com/aaronsw/html2text|creationTime|2017-06-13T23:32:45Z +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4431|creationDate|2012-02-11 +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4431|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4431|title|KEYNOTE: Real-time Emergency Response Using Semantic Web Technology +http://semtechbizberlin2012.semanticweb.com/sessionPop.cfm?confid=66&proposalid=4431|creationTime|2012-02-11T10:21:13Z +http://www.mkbergman.com/?cat=22|creationDate|2008-05-16 +http://www.mkbergman.com/?cat=22|tag|http://www.semanlink.net/tag/umbel +http://www.mkbergman.com/?cat=22|title|UMBEL » AI3:::Adaptive Information +http://www.mkbergman.com/?cat=22|creationTime|2008-05-16T00:18:17Z +https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596|creationDate|2017-12-03 +https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596|tag|http://www.semanlink.net/tag/entity_embeddings +https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596|comment|example of a recommender system, with person-item matrix +https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596|title|Learning Entity Embeddings in one breath – Apil Tamang – Medium +https://medium.com/@apiltamang/learning-entity-embeddings-in-one-breath-b35da807b596|creationTime|2017-12-03T10:52:02Z +https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428|creationDate|2018-04-21 +https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428|tag|http://www.semanlink.net/tag/tensorflow +https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428|tag|http://www.semanlink.net/tag/tutorial +https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428|title|How to use Dataset in TensorFlow – Towards Data Science +https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428|creationTime|2018-04-21T11:41:38Z +https://medium.com/@bklyn_newton/is-growth-necessary-for-a-thriving-economy-59258df196b4#.hiq4du2py|creationDate|2016-04-27 +https://medium.com/@bklyn_newton/is-growth-necessary-for-a-thriving-economy-59258df196b4#.hiq4du2py|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://medium.com/@bklyn_newton/is-growth-necessary-for-a-thriving-economy-59258df196b4#.hiq4du2py|title|Is growth necessary for a thriving economy? — Medium +https://medium.com/@bklyn_newton/is-growth-necessary-for-a-thriving-economy-59258df196b4#.hiq4du2py|creationTime|2016-04-27T14:24:25Z +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|creationDate|2013-03-18 +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|tag|http://www.semanlink.net/tag/de_extinction +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|tag|http://www.semanlink.net/tag/clonage +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|tag|http://www.semanlink.net/tag/frog +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|tag|http://www.semanlink.net/tag/disparition_d_especes +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|title|Scientists clone extinct frog that gives birth from its mouth +http://www.gizmag.com/extinct-gastric-brooding-frog-cloned/26687/|creationTime|2013-03-18T22:05:22Z +http://ben.bolte.cc/blog/2016/gensim.html|creationDate|2017-10-23 +http://ben.bolte.cc/blog/2016/gensim.html|tag|http://www.semanlink.net/tag/gensim +http://ben.bolte.cc/blog/2016/gensim.html|tag|http://www.semanlink.net/tag/word2vec +http://ben.bolte.cc/blog/2016/gensim.html|tag|http://www.semanlink.net/tag/nlp_sample_code +http://ben.bolte.cc/blog/2016/gensim.html|tag|http://www.semanlink.net/tag/keras +http://ben.bolte.cc/blog/2016/gensim.html|title|Using Gensim Word2Vec Embeddings in Keras Ben Bolte's Blog +http://ben.bolte.cc/blog/2016/gensim.html|creationTime|2017-10-23T09:05:11Z +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|creationDate|2011-01-02 +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|tag|http://www.semanlink.net/tag/developpement_durable +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|tag|http://www.semanlink.net/tag/developing_countries +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|tag|http://www.semanlink.net/tag/energie_solaire +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|tag|http://www.semanlink.net/tag/kenya +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|title|In Kenya, Huts Far Off the Grid Harness the Sun - NYTimes.com +http://www.nytimes.com/2010/12/25/science/earth/25fossil.html|creationTime|2011-01-02T16:04:59Z +http://jan2012.ml-class.org/|creationDate|2012-01-18 +http://jan2012.ml-class.org/|tag|http://www.semanlink.net/tag/stanford +http://jan2012.ml-class.org/|tag|http://www.semanlink.net/tag/machine_learning +http://jan2012.ml-class.org/|title|Machine Learning +http://jan2012.ml-class.org/|creationTime|2012-01-18T08:47:07Z +https://realpython.com/python-keras-text-classification/|creationDate|2018-10-25 +https://realpython.com/python-keras-text-classification/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://realpython.com/python-keras-text-classification/|tag|http://www.semanlink.net/tag/nlp_text_classification +https://realpython.com/python-keras-text-classification/|tag|http://www.semanlink.net/tag/keras +https://realpython.com/python-keras-text-classification/|title|Practical Text Classification With Python and Keras – Real Python +https://realpython.com/python-keras-text-classification/|creationTime|2018-10-25T08:39:17Z +http://dannyayers.com/misc/grddl-reference|creationDate|2007-07-17 +http://dannyayers.com/misc/grddl-reference|tag|http://www.semanlink.net/tag/grddl +http://dannyayers.com/misc/grddl-reference|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/misc/grddl-reference|title|GRDDL Quick reference card +http://dannyayers.com/misc/grddl-reference|creationTime|2007-07-17T23:03:22Z +http://lifehacker.com/5643460/how-to-track-and-potentially-recover-your-stolen-laptop-or-android-with-prey|creationDate|2013-05-07 +http://lifehacker.com/5643460/how-to-track-and-potentially-recover-your-stolen-laptop-or-android-with-prey|tag|http://www.semanlink.net/tag/security +http://lifehacker.com/5643460/how-to-track-and-potentially-recover-your-stolen-laptop-or-android-with-prey|title|How to Track and (Potentially) Recover Your Stolen Laptop or Android with Prey +http://lifehacker.com/5643460/how-to-track-and-potentially-recover-your-stolen-laptop-or-android-with-prey|creationTime|2013-05-07T16:19:23Z +http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf|creationDate|2009-03-05 +http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf|tag|http://www.semanlink.net/tag/faceted_search +http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf|tag|http://www.semanlink.net/tag/www_2009 +http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf|title|Interactively using Semantic Web knowledge: Creating scalable abstractions with FacetOntology +http://eprints.ecs.soton.ac.uk/17054/1/das05r-www2009-semanticweb-futurebrowsing.pdf|creationTime|2009-03-05T10:40:50Z +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|creationDate|2016-09-11 +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|tag|http://www.semanlink.net/tag/poolparty +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|tag|http://www.semanlink.net/tag/semantic_enterprise +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|comment|“An Enterprise without a Semantic Layer is like a Country without a Map. +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|title|Introducing a Graph-based Semantic Layer in Enterprises The Semantic Puzzle +https://blog.semantic-web.at/2016/08/15/introducing-a-graph-based-semantic-layer-in-enterprises/|creationTime|2016-09-11T01:09:13Z +http://danbri.org/words/2008/02/09/273|creationDate|2008-02-11 +http://danbri.org/words/2008/02/09/273|tag|http://www.semanlink.net/tag/dan_brickley +http://danbri.org/words/2008/02/09/273|tag|http://www.semanlink.net/tag/sparql +http://danbri.org/words/2008/02/09/273|title|danbri’s foaf stories » Graph URIs in SPARQL: Using UUIDs as named views +http://danbri.org/words/2008/02/09/273|creationTime|2008-02-11T23:24:43Z +http://www.w3.org/2004/02/skos/core.rdf|creationDate|2007-01-19 +http://www.w3.org/2004/02/skos/core.rdf|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/2004/02/skos/core.rdf|title|SKOS core (rdf file) +http://people.xiph.org/~xiphmont/demo/neil-young.html|creationDate|2012-03-06 +http://people.xiph.org/~xiphmont/demo/neil-young.html|tag|http://www.semanlink.net/tag/robert +http://people.xiph.org/~xiphmont/demo/neil-young.html|tag|http://www.semanlink.net/tag/digital_audio +http://people.xiph.org/~xiphmont/demo/neil-young.html|comment|"see also
+slashdot + +" +http://people.xiph.org/~xiphmont/demo/neil-young.html|title|24/192 Music Downloads are Very Silly Indeed +http://people.xiph.org/~xiphmont/demo/neil-young.html|creationTime|2012-03-06T11:48:17Z +http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html|creationDate|2012-05-22 +http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html|tag|http://www.semanlink.net/tag/entities +http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html|tag|http://www.semanlink.net/tag/nlp_google +http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html|title|From Words to Concepts and Back: Dictionaries for Linking Text, Entities and Ideas +http://googleresearch.blogspot.co.uk/2012/05/from-words-to-concepts-and-back.html|creationTime|2012-05-22T12:04:25Z +http://www.heppnetz.de/projects/goodrelations/GoodRelations-TR-final.pdf|creationDate|2014-04-21 +http://www.heppnetz.de/projects/goodrelations/GoodRelations-TR-final.pdf|tag|http://www.semanlink.net/tag/goodrelations +http://www.heppnetz.de/projects/goodrelations/GoodRelations-TR-final.pdf|title|GoodRelations: technical report +http://www.heppnetz.de/projects/goodrelations/GoodRelations-TR-final.pdf|creationTime|2014-04-21T12:20:28Z +http://www.djlu.fr/|creationDate|2018-07-24 +http://www.djlu.fr/|tag|http://www.semanlink.net/tag/tools +http://www.djlu.fr/|tag|http://www.semanlink.net/tag/research_papers +http://www.djlu.fr/|title|DjLu - The simple and free tool to organize your research papers +http://www.djlu.fr/|creationTime|2018-07-24T23:35:15Z +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|creationDate|2010-07-16 +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|tag|http://www.semanlink.net/tag/semantic_statistics +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|tag|http://www.semanlink.net/tag/richard_cyganiak +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|tag|http://www.semanlink.net/tag/sdmx_rdf +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|title|Expressing Statistical Data in RDF with SDMX-RDF +http://publishing-statistical-data.googlecode.com/svn/trunk/specs/src/main/html/index.html|creationTime|2010-07-16T14:26:21Z +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|creationDate|2015-04-11 +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|tag|http://www.semanlink.net/tag/cerveau +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|tag|http://www.semanlink.net/tag/neurones +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|tag|http://www.semanlink.net/tag/evolution +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|title|Did neurons evolve more than once on Earth? - life - 10 April 2015 - New Scientist +http://www.newscientist.com/article/mg22630164.700-did-neurons-evolve-more-than-once-on-earth.html?cmpid=RSS%7CNSNS%7C2012-GLOBAL%7Conline-news#.VShXyVyn8bw|creationTime|2015-04-11T01:14:20Z +https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique|creationDate|2018-12-03 +https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique|tag|http://www.semanlink.net/tag/changement_climatique +https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique|tag|http://www.semanlink.net/tag/agriculture +https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique|title|Enrichir les sols en carbone pour lutter contre le changement climatique CNRS Le journal +https://lejournal.cnrs.fr/articles/enrichir-les-sols-en-carbone-pour-lutter-contre-le-changement-climatique|creationTime|2018-12-03T22:42:29Z +http://www.openlinksw.com/weblog/oerling/index.vspx|creationDate|2008-05-04 +http://www.openlinksw.com/weblog/oerling/index.vspx|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/index.vspx|title|Orri Erling's Weblog +http://www.openlinksw.com/weblog/oerling/index.vspx|creationTime|2008-05-04T14:47:19Z +http://osxdaily.com/2006/11/30/how-to-manually-adjust-the-macbook-pro-keyboard-backlight/|creationDate|2008-12-17 +http://osxdaily.com/2006/11/30/how-to-manually-adjust-the-macbook-pro-keyboard-backlight/|tag|http://www.semanlink.net/tag/mac_os_x +http://osxdaily.com/2006/11/30/how-to-manually-adjust-the-macbook-pro-keyboard-backlight/|title|How-to: Manually adjust the MacBook Pro Keyboard Backlight - OS X Daily +http://osxdaily.com/2006/11/30/how-to-manually-adjust-the-macbook-pro-keyboard-backlight/|creationTime|2008-12-17T19:04:34Z +http://regexpal.com/|creationDate|2012-03-07 +http://regexpal.com/|tag|http://www.semanlink.net/tag/regex +http://regexpal.com/|title|Regex Tester – RegexPal +http://regexpal.com/|creationTime|2012-03-07T13:33:26Z +http://www.threetags.com/|creationDate|2009-06-23 +http://www.threetags.com/|tag|http://www.semanlink.net/tag/hierarchical_tags +http://www.threetags.com/|title|threetags.com: secure your data online +http://www.threetags.com/|creationTime|2009-06-23T08:55:36Z +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|creationDate|2008-12-02 +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|tag|http://www.semanlink.net/tag/mac_os_x +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|tag|http://www.semanlink.net/tag/java_jni +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|tag|http://www.semanlink.net/tag/mac_dev +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|title|Java Development Guide for Mac OS X: Core Java APIs and the Java Runtime on Mac OS X +http://developer.apple.com/documentation/Java/Conceptual/Java14Development/05-CoreJavaAPIs/CoreJavaAPIs.html|creationTime|2008-12-02T00:18:32Z +http://www.cs.cmu.edu/~rsalakhu/|creationDate|2017-08-28 +http://www.cs.cmu.edu/~rsalakhu/|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.cs.cmu.edu/~rsalakhu/|title|Ruslan Salakhutdinov - Carnegie Mellon School of Computer Science +http://www.cs.cmu.edu/~rsalakhu/|creationTime|2017-08-28T00:19:53Z +http://www.bizcoder.com/the-insanity-of-the-vary-header|creationDate|2016-03-30 +http://www.bizcoder.com/the-insanity-of-the-vary-header|tag|http://www.semanlink.net/tag/vary_header +http://www.bizcoder.com/the-insanity-of-the-vary-header|comment|encoding +http://www.bizcoder.com/the-insanity-of-the-vary-header|title|Bizcoder - The Insanity of the Vary Header +http://www.bizcoder.com/the-insanity-of-the-vary-header|creationTime|2016-03-30T18:20:44Z +http://www.bbc.com/news/science-environment-30097648|creationDate|2014-11-19 +http://www.bbc.com/news/science-environment-30097648|tag|http://www.semanlink.net/tag/philae +http://www.bbc.com/news/science-environment-30097648|title|BBC News - Comet landing: Organic molecules detected by Philae +http://www.bbc.com/news/science-environment-30097648|creationTime|2014-11-19T01:17:09Z +https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf|creationDate|2013-08-23 +https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf|tag|http://www.semanlink.net/tag/seo +https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf|tag|http://www.semanlink.net/tag/webmasters_google +https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf|title|search-engine-optimization-starter-guide.pdf +https://static.googleusercontent.com/external_content/untrusted_dlcp/www.google.com/en//webmasters/docs/search-engine-optimization-starter-guide.pdf|creationTime|2013-08-23T14:50:46Z +http://www.armadillo.fr/|creationDate|2011-03-21 +http://www.armadillo.fr/|tag|http://www.semanlink.net/tag/ged +http://www.armadillo.fr/|title|Armadillo - Gestion documentaire multimédia +http://www.armadillo.fr/|creationTime|2011-03-21T12:12:02Z +http://www.panimages.org/index.jsp?displang=eng|creationDate|2007-09-14 +http://www.panimages.org/index.jsp?displang=eng|tag|http://www.semanlink.net/tag/google +http://www.panimages.org/index.jsp?displang=eng|tag|http://www.semanlink.net/tag/search_engines +http://www.panimages.org/index.jsp?displang=eng|tag|http://www.semanlink.net/tag/multilinguisme +http://www.panimages.org/index.jsp?displang=eng|tag|http://www.semanlink.net/tag/flickr +http://www.panimages.org/index.jsp?displang=eng|comment|Search Google Images and Flickr in 100's of languages using automatic query translation. +http://www.panimages.org/index.jsp?displang=eng|title|PanImages: cross-lingual image search +http://www.panimages.org/index.jsp?displang=eng|creationTime|2007-09-14T13:37:03Z +https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel|creationDate|2018-09-16 +https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel|tag|http://www.semanlink.net/tag/comedie +https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel|title|The Grand Budapest Hotel +https://en.wikipedia.org/wiki/The_Grand_Budapest_Hotel|creationTime|2018-09-16T22:39:24Z +http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/|creationDate|2015-03-15 +http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/|tag|http://www.semanlink.net/tag/genome_editing +http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/|title|« Bienvenue à Gattaca » sera-t-il bientôt réalité ? Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2015/03/15/bienvenue-a-gattaca-sera-t-il-bientot-realite/|creationTime|2015-03-15T18:51:50Z +http://ruder.io/multi-task/|creationDate|2018-10-02 +http://ruder.io/multi-task/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://ruder.io/multi-task/|tag|http://www.semanlink.net/tag/multi_task_learning +http://ruder.io/multi-task/|title|An Overview of Multi-Task Learning for Deep Learning +http://ruder.io/multi-task/|creationTime|2018-10-02T10:08:30Z +http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html|creationDate|2016-03-11 +http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html|tag|http://www.semanlink.net/tag/politique_francaise +http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html|tag|http://www.semanlink.net/tag/droit_et_internet +http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html|title|Chiffrement, blocage de sites, lectures interdites : les amendements de la Loi Pénale à suivre - Politique - Numerama +http://www.numerama.com/politique/149590-loi-penale-chiffrement-blocage-de-sites-les-amendements-a-suivre.html|creationTime|2016-03-11T00:30:44Z +http://techwiki.openstructs.org/index.php/Solr|creationDate|2012-05-10 +http://techwiki.openstructs.org/index.php/Solr|tag|http://www.semanlink.net/tag/solr +http://techwiki.openstructs.org/index.php/Solr|tag|http://www.semanlink.net/tag/openstructs +http://techwiki.openstructs.org/index.php/Solr|comment|we: 1) changed standard RDF practice to also record literals in addition to URI identifiers; and 2) integrated our structured data store with Solr +http://techwiki.openstructs.org/index.php/Solr|title|Solr - OpenStructs TechWiki +http://techwiki.openstructs.org/index.php/Solr|creationTime|2012-05-10T00:13:28Z +http://2006.xmlconference.org/proceedings/188/presentation.html|creationDate|2009-02-07 +http://2006.xmlconference.org/proceedings/188/presentation.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://2006.xmlconference.org/proceedings/188/presentation.html|tag|http://www.semanlink.net/tag/d2rq +http://2006.xmlconference.org/proceedings/188/presentation.html|tag|http://www.semanlink.net/tag/database_to_rdf_mapping +http://2006.xmlconference.org/proceedings/188/presentation.html|title|Relational database integration with RDF/OWL +http://2006.xmlconference.org/proceedings/188/presentation.html|creationTime|2009-02-07T19:49:51Z +http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html|creationDate|2008-09-20 +http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html|tag|http://www.semanlink.net/tag/linked_data +http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html|tag|http://www.semanlink.net/tag/virtuoso +http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html|title|Deploying Linked Data (Virtuoso) +http://virtuoso.openlinksw.com/Whitepapers/html/VirtLinkedDataDeployment.html|creationTime|2008-09-20T11:42:56Z +http://munich2012.drupal.org/program/sessions/decoupling-content-management|creationDate|2012-09-04 +http://munich2012.drupal.org/program/sessions/decoupling-content-management|tag|http://www.semanlink.net/tag/henri_bergius +http://munich2012.drupal.org/program/sessions/decoupling-content-management|comment|video at 16'50 +http://munich2012.drupal.org/program/sessions/decoupling-content-management|title|Decoupling Content Management DrupalCon Munich 2012 +http://munich2012.drupal.org/program/sessions/decoupling-content-management|creationTime|2012-09-04T18:52:20Z +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|creationDate|2018-10-26 +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|tag|http://www.semanlink.net/tag/grounded_language_learning +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|tag|http://www.semanlink.net/tag/mit +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|comment|"Language is grounded in experience. Unlike dictionaries which define words in terms of other words, humans understand many basic words in terms of associations with sensory-motor experiences. People must interact physically with their world to grasp the essence of words like ""red,"" ""heavy,"" and ""above.""" +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|title|Grounded Language Learning and Understanding — MIT Media Lab (1999-2001) +https://www.media.mit.edu/projects/grounded-language-learning-and-understanding/overview/|creationTime|2018-10-26T00:33:06Z +http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html|creationDate|2012-05-03 +http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html|tag|http://www.semanlink.net/tag/solr_not_english_only +http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html|tag|http://www.semanlink.net/tag/solr +http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html|title|XML.com: Solr: Indexing XML with Lucene and REST +http://www.xml.com/pub/a/2006/08/09/solr-indexing-xml-with-lucene-andrest.html|creationTime|2012-05-03T16:27:21Z +http://www.youtube-mp3.org/fr|creationDate|2016-09-24 +http://www.youtube-mp3.org/fr|tag|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.youtube-mp3.org/fr|tag|http://www.semanlink.net/tag/mp3 +http://www.youtube-mp3.org/fr|tag|http://www.semanlink.net/tag/youtube +http://www.youtube-mp3.org/fr|comment|Doesn't exist anymore. [What Happened to YouTube-mp3.org](https://www.joyoshare.com/convert-video/what-happened-to-youtube-mp3-org.html) +http://www.youtube-mp3.org/fr|title|Convertisseur YouTube vers mp3 +http://www.youtube-mp3.org/fr|creationTime|2016-09-24T14:29:12Z +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|creationDate|2008-12-04 +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|tag|http://www.semanlink.net/tag/design_pattern +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|tag|http://www.semanlink.net/tag/junit +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|comment|How the JUnit framework is constructed +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|title|JUnit: A Cook’s Tour +http://junit.sourceforge.net/doc/cookstour/cookstour.htm|creationTime|2008-12-04T18:23:47Z +http://www.eol.org/|creationDate|2007-05-11 +http://www.eol.org/|tag|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.eol.org/|title|Encyclopedia of Life +http://www.eol.org/|creationTime|2007-05-11T00:12:12Z +http://blogs.sun.com/bblfish/entry/it_s_all_about_context|creationDate|2008-11-07 +http://blogs.sun.com/bblfish/entry/it_s_all_about_context|tag|http://www.semanlink.net/tag/sem_web_context +http://blogs.sun.com/bblfish/entry/it_s_all_about_context|title|Keeping track of Context in Life and on the Web +http://blogs.sun.com/bblfish/entry/it_s_all_about_context|creationTime|2008-11-07T13:48:21Z +http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology|creationDate|2010-05-20 +http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology|tag|http://www.semanlink.net/tag/hypiosvocampparismay2010 +http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology|tag|http://www.semanlink.net/tag/annotations +http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology|title|Annotations Ontology +http://vocamp.org/wiki/HypiosVoCampParisMay2010#Annotations_Ontology|creationTime|2010-05-20T01:24:23Z +http://huet.blog.lemonde.fr/2016/12/12/comment-produire-nobels-et-medailles-fields/|creationDate|2016-12-13 +http://huet.blog.lemonde.fr/2016/12/12/comment-produire-nobels-et-medailles-fields/|tag|http://www.semanlink.net/tag/recherche_francaise +http://huet.blog.lemonde.fr/2016/12/12/comment-produire-nobels-et-medailles-fields/|title|Comment produire Nobels et médailles Fields ? {Sciences²} +http://huet.blog.lemonde.fr/2016/12/12/comment-produire-nobels-et-medailles-fields/|creationTime|2016-12-13T11:26:42Z +http://en.wikipedia.org/wiki/Great_Hall_of_the_People|creationDate|2008-04-16 +http://en.wikipedia.org/wiki/Great_Hall_of_the_People|tag|http://www.semanlink.net/tag/www08 +http://en.wikipedia.org/wiki/Great_Hall_of_the_People|title|Great Hall of the People - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Great_Hall_of_the_People|creationTime|2008-04-16T21:25:47Z +https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/|creationDate|2018-02-26 +https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/|tag|http://www.semanlink.net/tag/link_prediction +https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/|tag|http://www.semanlink.net/tag/convolutional_knowledge_graph_embeddings +https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/|title|Convolutional 2D Knowledge Graph Embeddings (2017) +https://www.groundai.com/project/convolutional-2d-knowledge-graph-embeddings/|creationTime|2018-02-26T10:02:49Z +https://ieeexplore.ieee.org/abstract/document/8320777|creationDate|2019-04-24 +https://ieeexplore.ieee.org/abstract/document/8320777|tag|http://www.semanlink.net/tag/list_only_entity_linking +https://ieeexplore.ieee.org/abstract/document/8320777|comment|"the task of **mapping ambiguous mentions in texts to target entities in a group of entity lists** (-\> sparse information on the +entity side) +Harness entity co-occurrences information to mine both textual +description of entities and (explicit and implicit) relations among entities. (Relevant when there are several mentions in one document). Constructs an entity graph to +capture relations among entities, and uses a kind of pagerank algo." +https://ieeexplore.ieee.org/abstract/document/8320777|title|Collective List-Only Entity Linking: A Graph-Based Approach - IEEE Journals & Magazine (2018) +https://ieeexplore.ieee.org/abstract/document/8320777|creationTime|2019-04-24T16:24:21Z +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|creationDate|2009-11-21 +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|tag|http://www.semanlink.net/tag/europe_ecologie +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|tag|http://www.semanlink.net/tag/union_europeenne +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|tag|http://www.semanlink.net/tag/cohn_bendit +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|tag|http://www.semanlink.net/tag/sommet_de_copenhague +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|comment|On ne négocie pas avec la planète, on ne négocie pas avec la science... Il y a urgence et il n'y a pas de plan B +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|title|L'Union européenne détient les clefs du succès, par Jean-Paul Besset, Daniel Cohn-Bendit, Yannick Jadot, Eva Joly +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|creationTime|2009-11-21T17:34:51Z +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|source|Le Monde +http://www.lemonde.fr/opinions/article/2009/11/20/l-union-europeenne-detient-les-clefs-du-succes-par-jean-paul-besset-daniel-cohn-bendit-yannick-jadot-eva-joly_1269895_3232.html#ens_id=647065|date|2009-11-21 +http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml|creationDate|2017-06-20 +http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml|title|Automatic Keyphrase Extraction: A Survey of the State of the Art (2014) +http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml|creationTime|2017-06-20T14:01:17Z +http://www.aclweb.org/anthology/P/P14/P14-1119.xhtml|date|2014 +http://projectwordsworth.com/the-paradox-of-the-proof/?_ga=1.18937784.1815790288.1389170208|creationDate|2017-12-12 +http://projectwordsworth.com/the-paradox-of-the-proof/?_ga=1.18937784.1815790288.1389170208|tag|http://www.semanlink.net/tag/grands_problemes_mathematiques +http://projectwordsworth.com/the-paradox-of-the-proof/?_ga=1.18937784.1815790288.1389170208|title|The Paradox of the Proof Project Wordsworth +http://projectwordsworth.com/the-paradox-of-the-proof/?_ga=1.18937784.1815790288.1389170208|creationTime|2017-12-12T21:46:59Z +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|creationDate|2018-06-27 +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|tag|http://www.semanlink.net/tag/sentence_embeddings +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|tag|http://www.semanlink.net/tag/survey +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|tag|http://www.semanlink.net/tag/embedding_evaluation +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|title|Evaluation of sentence embeddings in downstream and linguistic probing tasks +https://www.groundai.com/project/evaluation-of-sentence-embeddings-in-downstream-and-linguistic-probing-tasks/|creationTime|2018-06-27T11:48:33Z +http://ld2sd.deri.org/lod-ng-tutorial/|creationDate|2011-03-24 +http://ld2sd.deri.org/lod-ng-tutorial/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://ld2sd.deri.org/lod-ng-tutorial/|tag|http://www.semanlink.net/tag/michael_hausenblas +http://ld2sd.deri.org/lod-ng-tutorial/|tag|http://www.semanlink.net/tag/rdfa +http://ld2sd.deri.org/lod-ng-tutorial/|tag|http://www.semanlink.net/tag/linked_data +http://ld2sd.deri.org/lod-ng-tutorial/|tag|http://www.semanlink.net/tag/tutorial +http://ld2sd.deri.org/lod-ng-tutorial/|title|Linked Data Tutorial - Publishing and consuming linked data with RDFa +http://ld2sd.deri.org/lod-ng-tutorial/|creationTime|2011-03-24T22:04:07Z +http://www.bnode.org/archives2/58|creationDate|2006-05-29 +http://www.bnode.org/archives2/58|tag|http://www.semanlink.net/tag/rdfa +http://www.bnode.org/archives2/58|tag|http://www.semanlink.net/tag/semantic_markup_in_html +http://www.bnode.org/archives2/58|tag|http://www.semanlink.net/tag/microformats +http://www.bnode.org/archives2/58|tag|http://www.semanlink.net/tag/rdf_parser +http://www.bnode.org/archives2/58|comment|While searching for a suitable output format for a new RDF framework, I've been looking at the various semantic hypertext approaches, namely microformats, Structured Blogging, RDFa, and Embedded RDF (eRDF). Each one has its pros and cons... +http://www.bnode.org/archives2/58|title|ARC Embedded RDF (eRDF) Parser for PHP +http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html|creationDate|2015-12-20 +http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html|tag|http://www.semanlink.net/tag/fn +http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html|tag|http://www.semanlink.net/tag/bernard_stiegler +http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html|title|"""Le Front national sera majoritaire"", prédit le philosophe Bernard Stiegler - L'Express" +http://www.lexpress.fr/actualite/politique/le-front-national-sera-majoritaire-predit-le-philosophe-bernard-stiegler_1280994.html|creationTime|2015-12-20T00:10:25Z +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|creationDate|2015-10-14 +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|tag|http://www.semanlink.net/tag/spotlight_osx +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|tag|http://www.semanlink.net/tag/markdown +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|comment|pour faire en sorte que spotlight indexe bien les fichiers md +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|title|A Simple Markdown Spotlight Importer - Hiltmon +http://hiltmon.com/blog/2015/06/27/a-simple-markdown-spotlight-importer/|creationTime|2015-10-14T13:32:14Z +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|creationDate|2013-07-10 +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|tag|http://www.semanlink.net/tag/wikidata +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|tag|http://www.semanlink.net/tag/denny_vrandecic +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|title|Wikidata presentation at SemTechBiz Berlin 2012 +http://fr.slideshare.net/vrandezo/wikidata-presentation-at-semtechbiz-berlin-2012|creationTime|2013-07-10T13:26:19Z +http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/|creationDate|2014-05-14 +http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/|tag|http://www.semanlink.net/tag/computer_vision +http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/|tag|http://www.semanlink.net/tag/deep_learning +http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/|title|AlchemyAPI rolls out deep-learning-based computer vision as a service — Tech News and Analysis +http://gigaom.com/2014/05/12/alchemyapi-rolls-out-deep-learning-based-computer-vision-as-a-service/|creationTime|2014-05-14T13:08:07Z +http://www.metmuseum.org/toah/splash.htm|creationDate|2006-05-26 +http://www.metmuseum.org/toah/splash.htm|tag|http://www.semanlink.net/tag/metropolitan_museum_of_art +http://www.metmuseum.org/toah/splash.htm|tag|http://www.semanlink.net/tag/histoire_de_l_art +http://www.metmuseum.org/toah/splash.htm|comment|The Timeline of Art History is a chronological, geographical, and thematic exploration of the history of art from around the world, as illustrated especially by the Metropolitan Museum of Art's collection. +http://www.metmuseum.org/toah/splash.htm|title|Timeline of Art History The Metropolitan Museum of Art +http://putaindecode.io/fr/articles/js/react/|creationDate|2016-07-09 +http://putaindecode.io/fr/articles/js/react/|tag|http://www.semanlink.net/tag/react_js +http://putaindecode.io/fr/articles/js/react/|title|Introduction à ReactJS +http://putaindecode.io/fr/articles/js/react/|creationTime|2016-07-09T16:45:41Z +http://www.vogella.com/tutorials/JavaConcurrency/article.html|creationDate|2014-09-18 +http://www.vogella.com/tutorials/JavaConcurrency/article.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www.vogella.com/tutorials/JavaConcurrency/article.html|tag|http://www.semanlink.net/tag/tutorial +http://www.vogella.com/tutorials/JavaConcurrency/article.html|title|Java concurrency (multi-threading) - Tutorial +http://www.vogella.com/tutorials/JavaConcurrency/article.html|creationTime|2014-09-18T10:27:17Z +http://www.seobythesea.com/2009/10/how-search-engines-might-expand-abbreviations-in-queries/|creationDate|2019-04-03 +http://www.seobythesea.com/2009/10/how-search-engines-might-expand-abbreviations-in-queries/|tag|http://www.semanlink.net/tag/acronyms_nlp +http://www.seobythesea.com/2009/10/how-search-engines-might-expand-abbreviations-in-queries/|title|How Search Engines Might Expand Abbreviations in Queries - SEO by the Sea ⚓ +http://www.seobythesea.com/2009/10/how-search-engines-might-expand-abbreviations-in-queries/|creationTime|2019-04-03T13:01:42Z +http://xmlarmyknife.org/docs/rdf/sparql/|creationDate|2007-01-24 +http://xmlarmyknife.org/docs/rdf/sparql/|tag|http://www.semanlink.net/tag/sparql +http://xmlarmyknife.org/docs/rdf/sparql/|tag|http://www.semanlink.net/tag/leigh_dodds +http://xmlarmyknife.org/docs/rdf/sparql/|title|XMLArmyKnife -- SPARQL Query Service +https://theintercept.com/2017/12/22/snowdens-new-app-uses-your-smartphone-to-physically-guard-your-laptop/|creationDate|2017-12-23 +https://theintercept.com/2017/12/22/snowdens-new-app-uses-your-smartphone-to-physically-guard-your-laptop/|tag|http://www.semanlink.net/tag/edward_snowden +https://theintercept.com/2017/12/22/snowdens-new-app-uses-your-smartphone-to-physically-guard-your-laptop/|title|Edward Snowden’s New App Uses Your Smartphone to Physically Guard Your Laptop +https://theintercept.com/2017/12/22/snowdens-new-app-uses-your-smartphone-to-physically-guard-your-laptop/|creationTime|2017-12-23T11:38:42Z +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|creationDate|2017-06-02 +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|tag|http://www.semanlink.net/tag/data_visualization_tools +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|tag|http://www.semanlink.net/tag/gensim +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|tag|http://www.semanlink.net/tag/topic_modeling +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|comment|"Python library for interactive topic model visualization. Designed to help users interpret the topics.
+ +see also another notebook dedicated to using it with gensim (include nltk_stopwords,...) + +" +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|title|pyLDAvis +http://nbviewer.jupyter.org/github/bmabey/pyLDAvis/blob/master/notebooks/pyLDAvis_overview.ipynb#topic=0&lambda=0.6&term=|creationTime|2017-06-02T00:51:10Z +http://db.tidbits.com/article/9544|creationDate|2008-04-10 +http://db.tidbits.com/article/9544|tag|http://www.semanlink.net/tag/instant_messaging +http://db.tidbits.com/article/9544|title|TidBITS Opinion: Instant Messaging for Introverts +http://db.tidbits.com/article/9544|creationTime|2008-04-10T10:30:02Z +http://www.w3.org/Submission/2006/10/|creationDate|2007-09-19 +http://www.w3.org/Submission/2006/10/|tag|http://www.semanlink.net/tag/owl_1_1 +http://www.w3.org/Submission/2006/10/|tag|http://www.semanlink.net/tag/w3c_submission +http://www.w3.org/Submission/2006/10/|tag|http://www.semanlink.net/tag/ian_horrocks +http://www.w3.org/Submission/2006/10/|title|Submission Request to W3C: OWL 1.1 Web Ontology Language +http://www.w3.org/Submission/2006/10/|creationTime|2007-09-19T01:04:46Z +http://jastor.sourceforge.net|creationDate|2005-06-24 +http://jastor.sourceforge.net|tag|http://www.semanlink.net/tag/java +http://jastor.sourceforge.net|tag|http://www.semanlink.net/tag/owl +http://jastor.sourceforge.net|comment|Jastor generates Java interfaces, implementations, factories, and listeners based on the properties and class hierarchies in the Web Ontologies. +http://jastor.sourceforge.net|title|Jastor +http://www.w3.org/TR/rdfa-api/|creationDate|2010-06-11 +http://www.w3.org/TR/rdfa-api/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/TR/rdfa-api/|title|RDFa API +http://www.w3.org/TR/rdfa-api/|creationTime|2010-06-11T13:54:29Z +http://drdrak.over-blog.com/article-288506.html|creationDate|2005-04-30 +http://drdrak.over-blog.com/article-288506.html|tag|http://www.semanlink.net/tag/mp3 +http://drdrak.over-blog.com/article-288506.html|tag|http://www.semanlink.net/tag/google +http://drdrak.over-blog.com/article-288506.html|title|Comment trouver des MP3 avec Google +http://www.wired.co.uk/news/archive/2014-01/27/maidsafe-bitcloud|creationDate|2014-05-12 +http://www.wired.co.uk/news/archive/2014-01/27/maidsafe-bitcloud|tag|http://www.semanlink.net/tag/maidsafe +http://www.wired.co.uk/news/archive/2014-01/27/maidsafe-bitcloud|title|Scottish company Maidsafe claims to have built a Bitcloud-like system (Wired UK) +http://www.wired.co.uk/news/archive/2014-01/27/maidsafe-bitcloud|creationTime|2014-05-12T00:00:30Z +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|creationDate|2013-07-30 +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|tag|http://www.semanlink.net/tag/cameroun +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|tag|http://www.semanlink.net/tag/huile_de_palme +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|title|Au Cameroun, un projet géant d'huile de palme fait scandale +http://www.lemonde.fr/afrique/article/2013/05/23/au-cameroun-un-projet-geant-d-huile-de-palme-fait-scandale_3416319_3212.html|creationTime|2013-07-30T14:30:51Z +http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/|creationDate|2012-11-02 +http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/|tag|http://www.semanlink.net/tag/one_laptop_per_child +http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/|tag|http://www.semanlink.net/tag/ethiopie +http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/|title|Given Tablets but No Teachers, Ethiopian Children Teach Themselves MIT Technology Review +http://www.technologyreview.com/news/506466/given-tablets-but-no-teachers-ethiopian-children-teach-themselves/|creationTime|2012-11-02T11:05:29Z +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|creationDate|2017-09-26 +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|tag|http://www.semanlink.net/tag/lstm_networks +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|tag|http://www.semanlink.net/tag/tutorial +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|title|What is the best tutorial on RNN, LSTM, BRNN, and BLSTM with visualization? - Quora +https://www.quora.com/What-is-the-best-tutorial-on-RNN-LSTM-BRNN-and-BLSTM-with-visualization|creationTime|2017-09-26T12:13:46Z +https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/|creationDate|2017-10-06 +https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/|tag|http://www.semanlink.net/tag/chene +https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/|tag|http://www.semanlink.net/tag/arbres_remarquables +https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/|title|Le vénérable chêne de La Loupe, Meaucé (Eure-et-Loir) Krapo arboricole +https://krapooarboricole.wordpress.com/2009/11/02/le-chene-de-la-loupe-eure-et-loire/|creationTime|2017-10-06T22:19:08Z +https://arxiv.org/abs/1510.00726|creationDate|2017-07-20 +https://arxiv.org/abs/1510.00726|tag|http://www.semanlink.net/tag/nn_4_nlp +https://arxiv.org/abs/1510.00726|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/abs/1510.00726|arxiv_author|Yoav Goldberg +https://arxiv.org/abs/1510.00726|title|[1510.00726] A Primer on Neural Network Models for Natural Language Processing +https://arxiv.org/abs/1510.00726|creationTime|2017-07-20T13:22:06Z +https://arxiv.org/abs/1510.00726|arxiv_summary|"Over the past few years, neural networks have re-emerged as powerful +machine-learning models, yielding state-of-the-art results in fields such as +image recognition and speech processing. More recently, neural network models +started to be applied also to textual natural language signals, again with very +promising results. This tutorial surveys neural network models from the +perspective of natural language processing research, in an attempt to bring +natural-language researchers up to speed with the neural techniques. The +tutorial covers input encoding for natural language tasks, feed-forward +networks, convolutional networks, recurrent networks and recursive networks, as +well as the computation graph abstraction for automatic gradient computation." +https://arxiv.org/abs/1510.00726|arxiv_firstAuthor|Yoav Goldberg +https://arxiv.org/abs/1510.00726|arxiv_updated|2015-10-02T20:17:33Z +https://arxiv.org/abs/1510.00726|arxiv_title|A Primer on Neural Network Models for Natural Language Processing +https://arxiv.org/abs/1510.00726|arxiv_published|2015-10-02T20:17:33Z +https://arxiv.org/abs/1510.00726|arxiv_num|1510.00726 +https://www.bbc.co.uk/news/science-environment-45049024|creationDate|2018-08-03 +https://www.bbc.co.uk/news/science-environment-45049024|tag|http://www.semanlink.net/tag/homme_de_flores +https://www.bbc.co.uk/news/science-environment-45049024|title|Small height evolved twice on 'Hobbit' island of Flores - BBC News +https://www.bbc.co.uk/news/science-environment-45049024|creationTime|2018-08-03T10:38:01Z +http://composing-the-semantic-web.blogspot.com/2009/01/object-oriented-semantic-web-with-spin.html|creationDate|2010-12-13 +http://composing-the-semantic-web.blogspot.com/2009/01/object-oriented-semantic-web-with-spin.html|tag|http://www.semanlink.net/tag/topbraid_spin +http://composing-the-semantic-web.blogspot.com/2009/01/object-oriented-semantic-web-with-spin.html|title|Composing the Semantic Web: The Object-Oriented Semantic Web with SPIN +http://composing-the-semantic-web.blogspot.com/2009/01/object-oriented-semantic-web-with-spin.html|creationTime|2010-12-13T17:20:51Z +http://semanticweb.com/structured-data-gets-a-lift_b23820#more-23820|creationDate|2011-10-11 +http://semanticweb.com/structured-data-gets-a-lift_b23820#more-23820|tag|http://www.semanlink.net/tag/datalift +http://semanticweb.com/structured-data-gets-a-lift_b23820#more-23820|title|Structured Data Gets a Lift - semanticweb.com +http://semanticweb.com/structured-data-gets-a-lift_b23820#more-23820|creationTime|2011-10-11T00:27:32Z +http://particletree.com/features/lightbox-gone-wild/|creationDate|2006-10-11 +http://particletree.com/features/lightbox-gone-wild/|tag|http://www.semanlink.net/tag/dialogs_in_javascript +http://particletree.com/features/lightbox-gone-wild/|title|Particletree · Lightbox Gone Wild! (Dialogs in javascript) +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|creationDate|2019-03-01 +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|tag|http://www.semanlink.net/tag/rdf_star +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|tag|http://www.semanlink.net/tag/olaf_hartig +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|tag|http://www.semanlink.net/tag/rdf_and_property_graphs +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|title|Position Statement: The RDF* and SPARQL* Approach to Annotate Statements in RDF and to Reconcile RDF and Property Graphs Olaf Hartig +http://blog.liu.se/olafhartig/2019/01/10/position-statement-rdf-star-and-sparql-star/|creationTime|2019-03-01T00:16:18Z +http://webr3.org/blog/|creationDate|2011-08-11 +http://webr3.org/blog/|tag|http://www.semanlink.net/tag/nathan_rixham +http://webr3.org/blog/|title|webr3.org +http://webr3.org/blog/|creationTime|2011-08-11T19:10:07Z +http://radimrehurek.com/gensim/models/phrases.html|creationDate|2017-07-10 +http://radimrehurek.com/gensim/models/phrases.html|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://radimrehurek.com/gensim/models/phrases.html|tag|http://www.semanlink.net/tag/gensim +http://radimrehurek.com/gensim/models/phrases.html|comment|"Automatically detect common phrases – aka multi-word expressions, word n-gram collocations – from a stream of sentences. + +[see also](http://www.markhneedham.com/blog/2015/02/12/pythongensim-creating-bigrams-over-how-i-met-your-mother-transcripts/#disqus_thread)" +http://radimrehurek.com/gensim/models/phrases.html|title|gensim: models.phrases – Phrase (collocation) detection +http://radimrehurek.com/gensim/models/phrases.html|creationTime|2017-07-10T19:05:37Z +https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet|creationDate|2015-05-14 +https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet|tag|http://www.semanlink.net/tag/markdown +https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet|tag|http://www.semanlink.net/tag/cheat_sheet +https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet|title|Markdown Cheatsheet +https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet|creationTime|2015-05-14T17:46:47Z +http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html|creationDate|2017-03-28 +http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html|tag|http://www.semanlink.net/tag/programming +http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html|tag|http://www.semanlink.net/tag/uri +http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html|title|Pattern: Using Pseudo-URIs with Microservices +http://philcalcado.com/2017/03/22/pattern_using_seudo-uris_with_microservices.html|creationTime|2017-03-28T12:13:32Z +http://histropedia.com/|creationDate|2015-02-19 +http://histropedia.com/|tag|http://www.semanlink.net/tag/timeline +http://histropedia.com/|title|Histropedia - The Timeline of Everything +http://histropedia.com/|creationTime|2015-02-19T01:05:17Z +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|creationDate|2016-10-28 +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|tag|http://www.semanlink.net/tag/jersey +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|comment|"but ""No way to look up a resource by its URI""" +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|title|java - Jersey: How to get the URI of a resource? - Stack Overflow +http://stackoverflow.com/questions/11106911/jersey-how-to-get-the-uri-of-a-resource|creationTime|2016-10-28T18:59:07Z +https://github.com/zazi/rdf2rdfaizer|creationDate|2013-08-27 +https://github.com/zazi/rdf2rdfaizer|tag|http://www.semanlink.net/tag/rdf_owl_documentation_tool +https://github.com/zazi/rdf2rdfaizer|tag|http://www.semanlink.net/tag/rdf2rdfa +https://github.com/zazi/rdf2rdfaizer|tag|http://www.semanlink.net/tag/github_project +https://github.com/zazi/rdf2rdfaizer|comment|RDF2RDFa-izer is currently a deadly simple web frontend of the RDFa Serializer plugin (written by Keith Alexander) for ARC, a Semantic Web Framework written in PHP. +https://github.com/zazi/rdf2rdfaizer|title|RDF2RDFa-izer +https://github.com/zazi/rdf2rdfaizer|creationTime|2013-08-27T16:50:23Z +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-03-20 +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/biodiversite +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/insectes_fossiles +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/insecte +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/evolution +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Les origines de l'incroyable biodiversité des insectes remises en cause +http://www.cnrs.fr/inee/communication/breves/b175.html?utm_content=bufferef172&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-03-20T12:39:07Z +http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/|creationDate|2012-07-12 +http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/|tag|http://www.semanlink.net/tag/videosurveillance +http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/|title|Plus belle la vidéosurveillance BUG BROTHER +http://bugbrother.blog.lemonde.fr/2012/07/12/plus-belle-la-videosurveillance/|creationTime|2012-07-12T19:43:06Z +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|creationDate|2016-05-15 +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|tag|http://www.semanlink.net/tag/apple +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|tag|http://www.semanlink.net/tag/driverless_car +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|tag|http://www.semanlink.net/tag/uber +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|tag|http://www.semanlink.net/tag/cringely +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|title|Apple and Didi is about foreign cash and the future of motoring - I, Cringely +http://www.cringely.com/2016/05/14/apple-and-didi-is-about-foreign-cash-and-the-future-of-motoring/|creationTime|2016-05-15T11:23:41Z +http://www.springerlink.com/content/u3588666k780ng58/|creationDate|2011-02-02 +http://www.springerlink.com/content/u3588666k780ng58/|tag|http://www.semanlink.net/tag/yves_raymond +http://www.springerlink.com/content/u3588666k780ng58/|tag|http://www.semanlink.net/tag/semantic_web +http://www.springerlink.com/content/u3588666k780ng58/|tag|http://www.semanlink.net/tag/semantic_web_sites +http://www.springerlink.com/content/u3588666k780ng58/|tag|http://www.semanlink.net/tag/bbc +http://www.springerlink.com/content/u3588666k780ng58/|tag|http://www.semanlink.net/tag/bbc_programmes +http://www.springerlink.com/content/u3588666k780ng58/|title|Use of Semantic Web technologies on the BBC Web Sites +http://www.springerlink.com/content/u3588666k780ng58/|creationTime|2011-02-02T22:27:45Z +http://mindraider.sourceforge.net/|creationDate|2005-12-31 +http://mindraider.sourceforge.net/|tag|http://www.semanlink.net/tag/semantic_web_outliner +http://mindraider.sourceforge.net/|tag|http://www.semanlink.net/tag/mindmap +http://mindraider.sourceforge.net/|comment|MindRaider is Semantic Web outliner. It aims to connect the tradition of outline editors with emerging technologies. MindRaider mission is to organize not only the content of your hard drive but also your cognitive base and social relationships in a way that enables quick navigation, concise representation and inferencing. +http://mindraider.sourceforge.net/|title|MindRaider - Semantic Web Outliner +http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide|creationDate|2010-12-14 +http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide|tag|http://www.semanlink.net/tag/disparition_des_abeilles +http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide|tag|http://www.semanlink.net/tag/pesticide +http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide|title|Wik-Bee Leaks: EPA Document Shows It Knowingly Allowed Pesticide That Kills Honey Bees Fast Company +http://www.fastcompany.com/1708896/wiki-bee-leaks-epa-document-reveals-agency-knowingly-allowed-use-of-bee-toxic-pesticide|creationTime|2010-12-14T23:31:45Z +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|creationDate|2012-10-23 +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|tag|http://www.semanlink.net/tag/data_portal +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|tag|http://www.semanlink.net/tag/data_publica +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|comment|2010 +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|title|Technical workshop on the goals and requirements for a pan-European data portal +http://cordis.europa.eu/fp7/ict/content-knowledge/docs/report-ws-pan-eu-dat-porta_en.pdf|creationTime|2012-10-23T00:54:09Z +http://continuations.com/post/91111911845/more-on-basic-income-and-robots|creationDate|2015-11-09 +http://continuations.com/post/91111911845/more-on-basic-income-and-robots|tag|http://www.semanlink.net/tag/guaranteed_basic_income +http://continuations.com/post/91111911845/more-on-basic-income-and-robots|title|Continuations : More On Basic Income (and Robots) +http://continuations.com/post/91111911845/more-on-basic-income-and-robots|creationTime|2015-11-09T13:28:48Z +http://parachutes.tv/pages/beauty.html|creationDate|2013-12-14 +http://parachutes.tv/pages/beauty.html|tag|http://www.semanlink.net/tag/mathematiques +http://parachutes.tv/pages/beauty.html|title|BEAUTY OF MATHEMATICS - PARACHUTES +http://parachutes.tv/pages/beauty.html|creationTime|2013-12-14T19:07:27Z +https://github.com/google-research/bert|creationDate|2018-11-05 +https://github.com/google-research/bert|tag|http://www.semanlink.net/tag/github +https://github.com/google-research/bert|tag|http://www.semanlink.net/tag/bert +https://github.com/google-research/bert|comment|"Code and pretrained weights for BERT. +Includes scripts to reproduce results. BERT-Base can be fine-tuned on a standard GPU; for BERT-Large, a Cloud TPU is required" +https://github.com/google-research/bert|title|GitHub - google-research/bert: TensorFlow code and pre-trained models for BERT +https://github.com/google-research/bert|creationTime|2018-11-05T15:04:06Z +https://github.com/markdown-it/markdown-it|creationDate|2017-02-11 +https://github.com/markdown-it/markdown-it|tag|http://www.semanlink.net/tag/markdown_ittt +https://github.com/markdown-it/markdown-it|tag|http://www.semanlink.net/tag/github_project +https://github.com/markdown-it/markdown-it|title|markdown-it +https://github.com/markdown-it/markdown-it|creationTime|2017-02-11T17:04:16Z +https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/|creationDate|2017-12-17 +https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/|tag|http://www.semanlink.net/tag/securite_informatique +https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/|tag|http://www.semanlink.net/tag/hack +https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/|title|How to Protect Yourself After the Next Big Corporate Hack WIRED +https://www.wired.com/story/how-to-protect-yourself-after-the-next-big-corporate-hack/|creationTime|2017-12-17T12:04:53Z +https://cloud.google.com/datalab/|creationDate|2018-08-22 +https://cloud.google.com/datalab/|tag|http://www.semanlink.net/tag/google_cloud_platform +https://cloud.google.com/datalab/|title|Cloud Datalab – Outil interactif d'analyse de données    Google Cloud +https://cloud.google.com/datalab/|creationTime|2018-08-22T15:20:22Z +https://stanfordnlp.github.io/stanfordnlp/|creationDate|2019-01-30 +https://stanfordnlp.github.io/stanfordnlp/|tag|http://www.semanlink.net/tag/python_nlp +https://stanfordnlp.github.io/stanfordnlp/|tag|http://www.semanlink.net/tag/nlp_stanford +https://stanfordnlp.github.io/stanfordnlp/|title|StanfordNLP StanfordNLP +https://stanfordnlp.github.io/stanfordnlp/|creationTime|2019-01-30T22:52:47Z +http://arstechnica.com/science/2016/07/algorithms-used-to-study-brain-activity-may-be-exaggerating-results/|creationDate|2016-07-03 +http://arstechnica.com/science/2016/07/algorithms-used-to-study-brain-activity-may-be-exaggerating-results/|tag|http://www.semanlink.net/tag/neuroscience +http://arstechnica.com/science/2016/07/algorithms-used-to-study-brain-activity-may-be-exaggerating-results/|title|Software faults raise questions about the validity of brain studies Ars Technica +http://arstechnica.com/science/2016/07/algorithms-used-to-study-brain-activity-may-be-exaggerating-results/|creationTime|2016-07-03T01:58:50Z +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|creationDate|2008-04-08 +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|tag|http://www.semanlink.net/tag/filets_a_nuages +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|tag|http://www.semanlink.net/tag/la_main_a_la_pate +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|tag|http://www.semanlink.net/tag/secheresse +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|comment|Grâce à ces filets à nuages (il y en a une centaine, hauts de 4 mètres et long de 12 mètres) tendus le long d'une crête, à 800 mètres d'altitude, l'eau des brumes persistantes est retenue dans les mailles, sous forme de gouttelettes qui ruissellent le long de gouttières, s'écoulent dans une tuyauterie jusqu'à une citerne installée dans le village. Résultat, grâce à ce système, 15 000 litres peuvent être récupérés chaque jour. +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|title|France 5 : Les maternelles - Ecologie (Les Petits Débrouillards) +http://www.france5.fr/maternelles/scolarite/w00337/12/114620.cfm|creationTime|2008-04-08T13:05:44Z +http://www.wiwiss.fu-berlin.de/suhl/bizer/toolkits/|creationDate|2006-01-27 +http://www.wiwiss.fu-berlin.de/suhl/bizer/toolkits/|tag|http://www.semanlink.net/tag/semantic_web_dev +http://www.wiwiss.fu-berlin.de/suhl/bizer/toolkits/|title|Developers Guide to Semantic Web Toolkits for different Programming Languages +https://www.theatlantic.com/technology/archive/2017/04/the-tragedy-of-google-books/523320/|creationDate|2017-06-02 +https://www.theatlantic.com/technology/archive/2017/04/the-tragedy-of-google-books/523320/|tag|http://www.semanlink.net/tag/library_of_alexandria +https://www.theatlantic.com/technology/archive/2017/04/the-tragedy-of-google-books/523320/|title|Torching the Modern-Day Library of Alexandria - The Atlantic +https://www.theatlantic.com/technology/archive/2017/04/the-tragedy-of-google-books/523320/|creationTime|2017-06-02T09:04:56Z +http://swik.net/|creationDate|2007-01-24 +http://swik.net/|tag|http://www.semanlink.net/tag/wiki +http://swik.net/|tag|http://www.semanlink.net/tag/open_source +http://swik.net/|comment|SWiK is a community driven resource for open source software. +http://swik.net/|title|The Open Software Wiki - SWiK +https://medium.com/basic-income/true-freedom-comes-with-basic-income-7ff1368e170#.bsbnlrs5h|creationDate|2017-01-24 +https://medium.com/basic-income/true-freedom-comes-with-basic-income-7ff1368e170#.bsbnlrs5h|tag|http://www.semanlink.net/tag/guaranteed_basic_income +https://medium.com/basic-income/true-freedom-comes-with-basic-income-7ff1368e170#.bsbnlrs5h|title|True Freedom Comes With Basic Income +https://medium.com/basic-income/true-freedom-comes-with-basic-income-7ff1368e170#.bsbnlrs5h|creationTime|2017-01-24T23:39:32Z +http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149|creationDate|2015-02-25 +http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149|tag|http://www.semanlink.net/tag/regex +http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149|title|8 Regular Expressions You Should Know - Tuts+ Code Tutorial +http://code.tutsplus.com/tutorials/8-regular-expressions-you-should-know--net-6149|creationTime|2015-02-25T17:18:59Z +https://app.box.com/notes/238671166577|creationDate|2017-10-24 +https://app.box.com/notes/238671166577|comment|"CR (Franck Boudinet) du workshop au Plessis +" +https://app.box.com/notes/238671166577|title|Workshop 17Oct2017 +https://app.box.com/notes/238671166577|creationTime|2017-10-24T10:53:45Z +http://www.niso.org/news/events/2013/dcmi/developing/|creationDate|2013-08-26 +http://www.niso.org/news/events/2013/dcmi/developing/|tag|http://www.semanlink.net/tag/developing_countries +http://www.niso.org/news/events/2013/dcmi/developing/|tag|http://www.semanlink.net/tag/linked_data +http://www.niso.org/news/events/2013/dcmi/developing/|comment|Implementing Linked Data in Developing Countries and Low-Resource Conditions +http://www.niso.org/news/events/2013/dcmi/developing/|title|September 25: Linked Data in Developing Countries - National Information Standards Organization +http://www.niso.org/news/events/2013/dcmi/developing/|creationTime|2013-08-26T14:55:44Z +http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html|creationDate|2005-07-08 +http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html|tag|http://www.semanlink.net/tag/yahoo_my_web_2_0 +http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html|tag|http://www.semanlink.net/tag/search_engines +http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/pisani/2005/07/le_moteur_de_re.html|title|Transnets, des gadgets aux réseaux: Le moteur de recherche : interface et miroir (1) +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|creationDate|2013-08-20 +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|tag|http://www.semanlink.net/tag/xss +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|tag|http://www.semanlink.net/tag/html_parsing +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|comment|You want to allow untrusted users to supply HTML for output on your website (e.g. as comment submission). You need to clean this HTML to avoid cross-site scripting (XSS) attacks. +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|title|Prevent cross site scripting with jsoup +http://jsoup.org/cookbook/cleaning-html/whitelist-sanitizer|creationTime|2013-08-20T17:09:02Z +http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html|creationDate|2012-05-07 +http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html|tag|http://www.semanlink.net/tag/jersey +http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html|tag|http://www.semanlink.net/tag/http_cache +http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html|title|Sleepless in Salt Lake City: An Example of Caching with REST using Jersey JAX-RS +http://sleeplessinslc.blogspot.fr/2009/03/example-of-caching-with-rest-using.html|creationTime|2012-05-07T19:03:45Z +http://www.jenitennison.com/blog/node/149|creationDate|2012-08-24 +http://www.jenitennison.com/blog/node/149|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.jenitennison.com/blog/node/149|tag|http://www.semanlink.net/tag/rdf +http://www.jenitennison.com/blog/node/149|title|Priorities for RDF Jeni's Musings +http://www.jenitennison.com/blog/node/149|creationTime|2012-08-24T18:24:58Z +http://code.google.com/p/owl1-1/wiki/UserRequirements|creationDate|2008-01-08 +http://code.google.com/p/owl1-1/wiki/UserRequirements|tag|http://www.semanlink.net/tag/owl_1_1 +http://code.google.com/p/owl1-1/wiki/UserRequirements|tag|http://www.semanlink.net/tag/owled +http://code.google.com/p/owl1-1/wiki/UserRequirements|tag|http://www.semanlink.net/tag/christine_golbreich +http://code.google.com/p/owl1-1/wiki/UserRequirements|comment|Home page de la task force animée par Christine Golbreich +http://code.google.com/p/owl1-1/wiki/UserRequirements|title|UserRequirements - owl1-1 - Google Code +http://code.google.com/p/owl1-1/wiki/UserRequirements|creationTime|2008-01-08T21:38:23Z +http://www.newscientist.com/article.ns?id=dn8251&print=true|creationDate|2005-11-02 +http://www.newscientist.com/article.ns?id=dn8251&print=true|tag|http://www.semanlink.net/tag/sexe +http://www.newscientist.com/article.ns?id=dn8251&print=true|title|Hormone levels predict attractiveness of women - New Scientist +http://www.newscientist.com/article.ns?id=dn8251&print=true|seeAlso|http://www.semanlink.net/doc/2005/11/Hormone%20levels%20predict%20attractiveness%20of%20women%20-%20New%20Scientist.jpg +http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/|creationDate|2010-12-27 +http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/|tag|http://www.semanlink.net/tag/banque +http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/|tag|http://www.semanlink.net/tag/credit_card +http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/|title|Light Blue Touchpaper - A Merry Christmas to all Bankers +http://www.lightbluetouchpaper.org/2010/12/25/a-merry-christmas-to-all-bankers/|creationTime|2010-12-27T13:53:08Z +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|creationDate|2011-03-27 +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|tag|http://www.semanlink.net/tag/heredite +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|tag|http://www.semanlink.net/tag/transposon +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|tag|http://www.semanlink.net/tag/jean_claude_ameisen +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|comment|L’intérieur et l’extérieur s’interpénètrent, et un organisme vivant est à la fois le produit et le lieu de cette interaction +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|title|France Inter > Sur les épaules de Darwin - A la recherche des mystères de l’hérédité +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=100821|creationTime|2011-03-27T12:24:52Z +http://osds.openlinksw.com/#DownloadChrome|creationDate|2019-05-04 +http://osds.openlinksw.com/#DownloadChrome|tag|http://www.semanlink.net/tag/openlink +http://osds.openlinksw.com/#DownloadChrome|tag|http://www.semanlink.net/tag/data_sniffer +http://osds.openlinksw.com/#DownloadChrome|title|OpenLink Structured Data Sniffer +http://osds.openlinksw.com/#DownloadChrome|creationTime|2019-05-04T13:14:54Z +https://www.ibm.com/watson/developercloud/|creationDate|2017-06-06 +https://www.ibm.com/watson/developercloud/|tag|http://www.semanlink.net/tag/i_b_m_s_watson +https://www.ibm.com/watson/developercloud/|title|IBM Watson Developer Cloud +https://www.ibm.com/watson/developercloud/|creationTime|2017-06-06T11:53:54Z +http://nytimes.blogrunner.com|creationDate|2005-04-05 +http://nytimes.blogrunner.com|tag|http://www.semanlink.net/tag/new_york_times +http://nytimes.blogrunner.com|tag|http://www.semanlink.net/tag/blog +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|creationDate|2008-09-24 +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|tag|http://www.semanlink.net/tag/deforestation +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|tag|http://www.semanlink.net/tag/mali +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|title|Infos de la Planète - Mali: Coupe de bois contre hôpital et 3e pont ? - Les Echos (Mali) - 2008-09-16 +http://www.infosdelaplanete.org/4447/mali-coupe-de-bois-contre-hopital-et-3e-pont.html|creationTime|2008-09-24T23:02:07Z +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|creationDate|2017-09-18 +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|tag|http://www.semanlink.net/tag/bhaskar_mitra +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|tag|http://www.semanlink.net/tag/nlp_microsoft +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|comment|"> the elegance is in the learning model, but the magic is in the structure of the information we model + +> The source-target training pairs dictate **what notion of ""relatedness""** will be modeled in the embedding space + +> is Eminem more similar to Rihanna or rap? + + + +" +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|title|Vectorland: Brief Notes from Using Text Embeddings for Search +https://fr.slideshare.net/BhaskarMitra3/vectorland-brief-notes-from-using-text-embeddings-for-search|creationTime|2017-09-18T18:58:10Z +http://driving.stanford.edu/papers.html|creationDate|2016-09-16 +http://driving.stanford.edu/papers.html|tag|http://www.semanlink.net/tag/driverless_car +http://driving.stanford.edu/papers.html|title|Stanford Autonomous Driving Team - Publications +http://driving.stanford.edu/papers.html|creationTime|2016-09-16T16:16:13Z +http://linuxfr.org/news/api-platform-2-un-cadriciel-pour-creer-des-api-web-hypermedia-en-quelques-minutes|creationDate|2017-04-01 +http://linuxfr.org/news/api-platform-2-un-cadriciel-pour-creer-des-api-web-hypermedia-en-quelques-minutes|tag|http://www.semanlink.net/tag/hateoas +http://linuxfr.org/news/api-platform-2-un-cadriciel-pour-creer-des-api-web-hypermedia-en-quelques-minutes|title|API Platform 2 : un cadriciel pour créer des API Web hypermédia en quelques minutes - LinuxFr.org +http://linuxfr.org/news/api-platform-2-un-cadriciel-pour-creer-des-api-web-hypermedia-en-quelques-minutes|creationTime|2017-04-01T18:51:47Z +https://voyageenbarbarie.wordpress.com/|creationDate|2015-08-22 +https://voyageenbarbarie.wordpress.com/|tag|http://www.semanlink.net/tag/torture +https://voyageenbarbarie.wordpress.com/|tag|http://www.semanlink.net/tag/sinai +https://voyageenbarbarie.wordpress.com/|tag|http://www.semanlink.net/tag/documentaire_tv +https://voyageenbarbarie.wordpress.com/|tag|http://www.semanlink.net/tag/erythree +https://voyageenbarbarie.wordpress.com/|tag|http://www.semanlink.net/tag/immigration +https://voyageenbarbarie.wordpress.com/|title|Voyage en Barbarie, de Delphine Deloget et Cécile Allegra +https://voyageenbarbarie.wordpress.com/|creationTime|2015-08-22T23:48:19Z +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|creationDate|2007-09-19 +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|tag|http://www.semanlink.net/tag/lod_limitations_on_browseable_data +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|tag|http://www.semanlink.net/tag/linked_data +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|tag|http://www.semanlink.net/tag/chris_bizer +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|title|[Linking-open-data] Returning to backlinks - Additional requirement: Paging +http://simile.mit.edu/mail/ReadMsg?listName=Linking%20Open%20Data&msgId=20931|creationTime|2007-09-19T14:20:57Z +http://www2.cnrs.fr/presse/communique/4673.htm?utm_content=buffer4527f&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-09-05 +http://www2.cnrs.fr/presse/communique/4673.htm?utm_content=buffer4527f&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/virus +http://www2.cnrs.fr/presse/communique/4673.htm?utm_content=buffer4527f&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Placenta chez les femelles, masse musculaire chez les mâles : le double héritage d'un virus - Communiqués et dossiers de presse - CNRS +http://www2.cnrs.fr/presse/communique/4673.htm?utm_content=buffer4527f&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-09-05T11:50:56Z +http://apassant.net/blog/2008/01/12/one-foaf-fits-all/|creationDate|2008-04-07 +http://apassant.net/blog/2008/01/12/one-foaf-fits-all/|tag|http://www.semanlink.net/tag/foaf +http://apassant.net/blog/2008/01/12/one-foaf-fits-all/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2008/01/12/one-foaf-fits-all/|title|One FOAF fits all : Alexandre Passant +http://apassant.net/blog/2008/01/12/one-foaf-fits-all/|creationTime|2008-04-07T22:21:26Z +http://piketty.blog.lemonde.fr/2015/12/03/pourquoi-le-gouvernement-protege-t-il-les-multinationales/|creationDate|2015-12-04 +http://piketty.blog.lemonde.fr/2015/12/03/pourquoi-le-gouvernement-protege-t-il-les-multinationales/|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.blog.lemonde.fr/2015/12/03/pourquoi-le-gouvernement-protege-t-il-les-multinationales/|title|Pourquoi le gouvernement protège-t-il les multinationales ? Le blog de Thomas Piketty +http://piketty.blog.lemonde.fr/2015/12/03/pourquoi-le-gouvernement-protege-t-il-les-multinationales/|creationTime|2015-12-04T14:22:32Z +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|creationDate|2010-03-30 +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|tag|http://www.semanlink.net/tag/lhc +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|comment|Beams collided at 7 TeV in the LHC at 13:06 CEST, marking the start of the LHC research programme. +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|title|LHC research programme gets underway +http://press.web.cern.ch/press/PressReleases/Releases2010/PR07.10E.html|creationTime|2010-03-30T15:26:04Z +http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html|creationDate|2016-07-25 +http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html|tag|http://www.semanlink.net/tag/cinema_americain +http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html|tag|http://www.semanlink.net/tag/greve_du_sexe +http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html|title|La guerre du sexe passe de l’Afrique aux ghettos urbains de Chicago +http://www.lemonde.fr/afrique/article/2016/03/03/la-guerre-du-sexe-passe-de-l-afrique-aux-ghettos-urbains-de-chicago_4876235_3212.html|creationTime|2016-07-25T16:54:44Z +http://realitesbiomedicales.blog.lemonde.fr/2017/08/08/interview-confession-du-plus-grand-serial-killer-le-moustique/|creationDate|2017-08-24 +http://realitesbiomedicales.blog.lemonde.fr/2017/08/08/interview-confession-du-plus-grand-serial-killer-le-moustique/|tag|http://www.semanlink.net/tag/moustique +http://realitesbiomedicales.blog.lemonde.fr/2017/08/08/interview-confession-du-plus-grand-serial-killer-le-moustique/|title|Interview-confession du plus grand serial-killer : le moustique Réalités Biomédicales +http://realitesbiomedicales.blog.lemonde.fr/2017/08/08/interview-confession-du-plus-grand-serial-killer-le-moustique/|creationTime|2017-08-24T00:26:21Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|creationDate|2013-04-16 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|tag|http://www.semanlink.net/tag/linked_data_application +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|tag|http://www.semanlink.net/tag/collaborative_ontologie_creation +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|tag|http://www.semanlink.net/tag/points_of_interest +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|comment|The novel mobile application csxPOI (short for: collaborative, semantic, and context-aware points-of-interest) enables its users to collaboratively create, share, and modify semantic points of interest (POI). Semantic POIs describe geographic places with explicit semantic properties of a collaboratively created ontology. As the ontology includes multiple subclassifications and instantiations and as it links to DBpedia, the richness of annotation goes far beyond mere textual annotations such as text. With the intuitive interface of csxPOI, users can easily create, delete, and modify their POIs and those shared by others. Thereby, the users adapt the structure of the ontology underlying the semantic annotations of the POIs. Data mining techniques are employed to cluster and thus improve the quality of the collaboratively created POIs. The semantic POIs and collaborative POI ontology are published as Linked Open Data. +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|title|CiteSeerX — Collaborative Creation of Semantic Points of Interest as Linked Data on the Mobile Phone +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.190.8467|creationTime|2013-04-16T15:08:23Z +https://semantic-web.com/|creationDate|2017-10-08 +https://semantic-web.com/|tag|http://www.semanlink.net/tag/semantic_web_company +https://semantic-web.com/|tag|http://www.semanlink.net/tag/poolparty +https://semantic-web.com/|title|Semantic Web Company +https://semantic-web.com/|creationTime|2017-10-08T12:27:46Z +https://docs.angularjs.org/api/ng/service/$q|creationDate|2015-06-12 +https://docs.angularjs.org/api/ng/service/$q|tag|http://www.semanlink.net/tag/angularjs +https://docs.angularjs.org/api/ng/service/$q|tag|http://www.semanlink.net/tag/javascript_promises +https://docs.angularjs.org/api/ng/service/$q|comment|This is an implementation of promises/deferred objects inspired by Kris Kowal's Q. +https://docs.angularjs.org/api/ng/service/$q|title|AngularJS: API: $q +https://docs.angularjs.org/api/ng/service/$q|creationTime|2015-06-12T00:44:15Z +http://www.newstatesman.com/martin-robbins/2012/09/trouble-ted-talks|creationDate|2013-12-23 +http://www.newstatesman.com/martin-robbins/2012/09/trouble-ted-talks|tag|http://www.semanlink.net/tag/ted +http://www.newstatesman.com/martin-robbins/2012/09/trouble-ted-talks|title|The trouble with TED talks +http://www.newstatesman.com/martin-robbins/2012/09/trouble-ted-talks|creationTime|2013-12-23T15:41:59Z +http://www.simonsfoundation.org/quanta/20140624-fluid-tests-hint-at-concrete-quantum-reality/|creationDate|2014-06-29 +http://www.simonsfoundation.org/quanta/20140624-fluid-tests-hint-at-concrete-quantum-reality/|tag|http://www.semanlink.net/tag/mecanique_quantique +http://www.simonsfoundation.org/quanta/20140624-fluid-tests-hint-at-concrete-quantum-reality/|title|Fluid Experiments Support Deterministic “Pilot-Wave” Quantum Theory Simons Foundation +http://www.simonsfoundation.org/quanta/20140624-fluid-tests-hint-at-concrete-quantum-reality/|creationTime|2014-06-29T00:33:44Z +http://pipl.com/|creationDate|2010-04-26 +http://pipl.com/|tag|http://www.semanlink.net/tag/web_search +http://pipl.com/|comment|"""The most comprehensive people search on the web""" +http://pipl.com/|title|Pipl - People Search +http://pipl.com/|creationTime|2010-04-26T13:19:32Z +http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html|creationDate|2009-05-14 +http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html|tag|http://www.semanlink.net/tag/semantic_web_and_oop +http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html|tag|http://www.semanlink.net/tag/jena_dev +http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html|title|Binding Java Objects to RDF +http://www.semanticuniverse.com/articles-binding-java-objects-rdf.html|creationTime|2009-05-14T02:38:10Z +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|creationDate|2013-09-16 +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|tag|http://www.semanlink.net/tag/google +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|tag|http://www.semanlink.net/tag/wifi +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|tag|http://www.semanlink.net/tag/privacy_and_internet +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|tag|http://www.semanlink.net/tag/passwords +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|title|Google knows nearly every Wi-Fi password in the world Computerworld Blogs +http://blogs.computerworld.com/android/22806/google-knows-nearly-every-wi-fi-password-world|creationTime|2013-09-16T10:04:50Z +http://www.bbc.com/news/science-environment-29054889|creationDate|2014-09-04 +http://www.bbc.com/news/science-environment-29054889|tag|http://www.semanlink.net/tag/tree_of_life +http://www.bbc.com/news/science-environment-29054889|tag|http://www.semanlink.net/tag/decouverte_d_especes_inconnues +http://www.bbc.com/news/science-environment-29054889|title|BBC News - Deep sea 'mushroom' may be new branch of life +http://www.bbc.com/news/science-environment-29054889|creationTime|2014-09-04T10:35:22Z +http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/|creationDate|2012-12-22 +http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/|tag|http://www.semanlink.net/tag/methodes_agiles +http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/|tag|http://www.semanlink.net/tag/automobile +http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/|title|Méthodes agiles : gérer les entreprises comme des logiciels InternetActu +http://internetactu.blog.lemonde.fr/2012/12/21/methodes-agiles-gerer-les-entreprises-comme-des-logiciels/|creationTime|2012-12-22T14:46:13Z +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|creationDate|2018-09-17 +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|tag|http://www.semanlink.net/tag/mark_zuckerberg +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|tag|http://www.semanlink.net/tag/facebook +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|tag|http://www.semanlink.net/tag/fix_it +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|tag|http://www.semanlink.net/tag/new_yorker +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|title|Can Mark Zuckerberg Fix Facebook Before It Breaks Democracy? The New Yorker +https://www.newyorker.com/magazine/2018/09/17/can-mark-zuckerberg-fix-facebook-before-it-breaks-democracy?utm_campaign=NLP%20News&utm_medium=email&utm_source=Revue%20newsletter|creationTime|2018-09-17T13:00:18Z +http://karpathy.github.io/2015/11/14/ai/|creationDate|2017-06-08 +http://karpathy.github.io/2015/11/14/ai/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://karpathy.github.io/2015/11/14/ai/|tag|http://www.semanlink.net/tag/science_fiction +http://karpathy.github.io/2015/11/14/ai/|tag|http://www.semanlink.net/tag/andrej_karpathy +http://karpathy.github.io/2015/11/14/ai/|title|Short Story on AI: A Cognitive Discontinuity. +http://karpathy.github.io/2015/11/14/ai/|creationTime|2017-06-08T16:06:15Z +http://data.semanticweb.org/conference/eswc/2008/html|creationDate|2008-05-29 +http://data.semanticweb.org/conference/eswc/2008/html|tag|http://www.semanlink.net/tag/chris_bizer +http://data.semanticweb.org/conference/eswc/2008/html|tag|http://www.semanlink.net/tag/eswc +http://data.semanticweb.org/conference/eswc/2008/html|tag|http://www.semanlink.net/tag/linked_data +http://data.semanticweb.org/conference/eswc/2008/html|tag|http://www.semanlink.net/tag/richard_cyganiak +http://data.semanticweb.org/conference/eswc/2008/html|comment|This page provides an overview about different access mechanisms to the RDF dataset about the 5th European Semantic Web Conference (ESWC2008) and explains how the dataset can be used within different Semantic Web applications. +http://data.semanticweb.org/conference/eswc/2008/html|title|ESWC2008 Conference Data +http://data.semanticweb.org/conference/eswc/2008/html|creationTime|2008-05-29T21:00:40Z +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|creationDate|2016-02-22 +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|tag|http://www.semanlink.net/tag/data_model +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|tag|http://www.semanlink.net/tag/nosql_vs_sql +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|title|Sens et enjeux des modèles de stockage et d’accès aux données +http://www.christian-faure.net/2011/04/18/sens-et-enjeux-des-modeles-de-stockage-et-dacces-aux-donnees/|creationTime|2016-02-22T13:55:34Z +https://support.google.com/webmasters/answer/146898|creationDate|2013-07-06 +https://support.google.com/webmasters/answer/146898|tag|http://www.semanlink.net/tag/schema_org +https://support.google.com/webmasters/answer/146898|tag|http://www.semanlink.net/tag/rdfa +https://support.google.com/webmasters/answer/146898|title|schema.org: About RDFa - Webmaster Tools Help +https://support.google.com/webmasters/answer/146898|creationTime|2013-07-06T01:12:22Z +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|creationDate|2006-09-12 +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|tag|http://www.semanlink.net/tag/mit +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|tag|http://www.semanlink.net/tag/robotique +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|tag|http://www.semanlink.net/tag/jeux +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|tag|http://www.semanlink.net/tag/lego +http://www.businessweek.com/innovate/content/sep2006/id20060907_525435.htm|title|Invasion of the DIY Robots +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|creationDate|2007-09-13 +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|tag|http://www.semanlink.net/tag/fair_use +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|tag|http://www.semanlink.net/tag/copyright +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|comment|"By one measure -- ""value added,"" which the report defines as ""an industry's gross output minus its purchased intermediate inputs"" -- the fair use economy is greater than the copyright economy." +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|title|Fair Use Worth More to Economy Than Copyright, CCIA Says +http://informationweek.com/news/showArticle.jhtml?articleID=201805939|creationTime|2007-09-13T22:23:25Z +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|creationDate|2017-07-10 +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|tag|http://www.semanlink.net/tag/python_sample_code +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|tag|http://www.semanlink.net/tag/nlp_sample_code +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|tag|http://www.semanlink.net/tag/tutorial +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|tag|http://www.semanlink.net/tag/python_nlp +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|title|Teaching a Computer to Read: - Scripted +https://www.scripted.com/scripted-updates/nlp-hacking-in-python|creationTime|2017-07-10T18:32:29Z +http://www.jaxa.jp/press/2007/11/20071107_kaguya_movie_e.html|creationDate|2007-11-09 +http://www.jaxa.jp/press/2007/11/20071107_kaguya_movie_e.html|tag|http://www.semanlink.net/tag/kaguya +http://www.jaxa.jp/press/2007/11/20071107_kaguya_movie_e.html|title|JAXA Moving image of the Moon shot by the HDTV camera of the KAGUYA (SELENE) +http://www.jaxa.jp/press/2007/11/20071107_kaguya_movie_e.html|creationTime|2007-11-09T13:24:24Z +http://java.sun.com/products/jfc/tsc/articles/bookmarks/|creationDate|2005-03-05 +http://java.sun.com/products/jfc/tsc/articles/bookmarks/|tag|http://www.semanlink.net/tag/swing +http://java.sun.com/products/jfc/tsc/articles/bookmarks/|tag|http://www.semanlink.net/tag/html_parsing +http://java.sun.com/products/jfc/tsc/articles/bookmarks/|title|The Swing HTML Parser - Parsing a Netscape Navigator Bookmarks File +https://radimrehurek.com/gensim/models/word2vec.html|creationDate|2017-06-01 +https://radimrehurek.com/gensim/models/word2vec.html|tag|http://www.semanlink.net/tag/word2vec +https://radimrehurek.com/gensim/models/word2vec.html|tag|http://www.semanlink.net/tag/gensim +https://radimrehurek.com/gensim/models/word2vec.html|title|gensim: models.word2vec – Deep learning with word2vec +https://radimrehurek.com/gensim/models/word2vec.html|creationTime|2017-06-01T13:05:30Z +http://chillyinside.com/blog/?p=15|creationDate|2006-05-26 +http://chillyinside.com/blog/?p=15|tag|http://www.semanlink.net/tag/jena_user_conference +http://chillyinside.com/blog/?p=15|tag|http://www.semanlink.net/tag/linkto_semanlink +http://chillyinside.com/blog/?p=15|title|chillyinside.com » Blog Archive » 2006 Jena User Conference +http://www.semanticuniverse.com/articles-putting-powder-work.html-0|creationDate|2010-07-01 +http://www.semanticuniverse.com/articles-putting-powder-work.html-0|tag|http://www.semanlink.net/tag/powder +http://www.semanticuniverse.com/articles-putting-powder-work.html-0|title|Putting POWDER to Work Semantic Universe +http://www.semanticuniverse.com/articles-putting-powder-work.html-0|creationTime|2010-07-01T16:11:51Z +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|creationDate|2018-09-29 +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|tag|http://www.semanlink.net/tag/guillaume_lample +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|tag|http://www.semanlink.net/tag/machine_translation +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|tag|http://www.semanlink.net/tag/unsupervised_machine_translation +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|tag|http://www.semanlink.net/tag/nlp_facebook +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|tag|http://www.semanlink.net/tag/slides +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|comment|slides présentées au [Paris NLP meetup](/doc/?uri=https%3A%2F%2Fwww.meetup.com%2Ffr-FR%2FParis-NLP%2Fevents%2Fxzstdqyxmbjc%2F) +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|relatedDoc|https://www.meetup.com/fr-FR/Paris-NLP/events/xzstdqyxmbjc/ +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|title|Unsupervised Machine Translation. G. Lample (slides) +https://nlpparis.files.wordpress.com/2018/09/talk_meetup_nlp_guillaume_lample.pdf|creationTime|2018-09-29T10:29:24Z +http://www.icmu.org/icmu2012/papers/FP-5.pdf|creationDate|2013-03-21 +http://www.icmu.org/icmu2012/papers/FP-5.pdf|tag|http://www.semanlink.net/tag/destination_prediction +http://www.icmu.org/icmu2012/papers/FP-5.pdf|title|A Destination Prediction Method Based on Behavioral Pattern Analysis of Nonperiodic Position Logs. +http://www.icmu.org/icmu2012/papers/FP-5.pdf|creationTime|2013-03-21T18:03:04Z +http://www.ldh-toulon.net/|creationDate|2007-04-29 +http://www.ldh-toulon.net/|tag|http://www.semanlink.net/tag/droits_de_l_homme +http://www.ldh-toulon.net/|title|Ligue des droits de l'Homme- [LDH-Toulon] +http://www.ldh-toulon.net/|creationTime|2007-04-29T23:51:02Z +http://www.facebook.com/profile.php?id=705574873|creationDate|2007-07-06 +http://www.facebook.com/profile.php?id=705574873|tag|http://www.semanlink.net/tag/facebook +http://www.facebook.com/profile.php?id=705574873|tag|http://www.semanlink.net/tag/fps +http://www.facebook.com/profile.php?id=705574873|title|Facebook François-Paul Servant +http://www.facebook.com/profile.php?id=705574873|creationTime|2007-07-06T21:00:00Z +https://class.coursera.org/wh1300-002/class|creationDate|2013-10-09 +https://class.coursera.org/wh1300-002/class|tag|http://www.semanlink.net/tag/a_history_of_the_world_since_1300 +https://class.coursera.org/wh1300-002/class|title|Announcements A History of the World since 1300 +https://class.coursera.org/wh1300-002/class|creationTime|2013-10-09T21:45:35Z +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|creationDate|2010-06-08 +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|tag|http://www.semanlink.net/tag/agriculture_africaine +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|tag|http://www.semanlink.net/tag/burkina_faso +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|tag|http://www.semanlink.net/tag/sahel +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|title|Infos de la Planète - Le jatropha curcas comme alternative dans le sahel burkinabé - Sidwaya (Burkina Faso) - 2010-04-28 +http://www.infosdelaplanete.org/5931/le-jatropha-curcas-comme-alternative-dans-le-sahel-burkinabe.html|creationTime|2010-06-08T22:08:38Z +https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7|creationDate|2019-02-14 +https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7|tag|http://www.semanlink.net/tag/bert +https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7|tag|http://www.semanlink.net/tag/howto +https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7|title|Using BERT for state-of-the-art pre-training for natural language processing +https://blog.insightdatascience.com/using-bert-for-state-of-the-art-pre-training-for-natural-language-processing-1d87142c29e7|creationTime|2019-02-14T16:45:56Z +http://m3pe.org/semperwiki/|creationDate|2005-11-01 +http://m3pe.org/semperwiki/|tag|http://www.semanlink.net/tag/semantic_wiki +http://m3pe.org/semperwiki/|comment|SemperWiki is an open-source semantic personal Wiki for Gnome. +http://m3pe.org/semperwiki/|title|SemperWiki +https://www.coursera.org/course/datasci|creationDate|2013-03-29 +https://www.coursera.org/course/datasci|tag|http://www.semanlink.net/tag/coursera_introduction_to_data_science +https://www.coursera.org/course/datasci|title|Introduction to Data Science Coursera +https://www.coursera.org/course/datasci|creationTime|2013-03-29T01:22:13Z +https://www.youtube.com/watch?v=e-5obm1G_FY|creationDate|2017-01-04 +https://www.youtube.com/watch?v=e-5obm1G_FY|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=e-5obm1G_FY|tag|http://www.semanlink.net/tag/functional_programming +https://www.youtube.com/watch?v=e-5obm1G_FY|title|Anjana Vakil: Learning Functional Programming with JavaScript - JSUnconf 2016 - YouTube +https://www.youtube.com/watch?v=e-5obm1G_FY|creationTime|2017-01-04T16:24:12Z +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|creationDate|2014-09-19 +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|tag|http://www.semanlink.net/tag/prehistoire +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|tag|http://www.semanlink.net/tag/genetique_humaine +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|tag|http://www.semanlink.net/tag/histoire_de_l_europe +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|title|New branch added to European family tree: Europeans descended from at least 3, not 2, groups of ancient humans -- ScienceDaily +http://www.sciencedaily.com/releases/2014/09/140917131812.htm|creationTime|2014-09-19T22:14:51Z +http://www.w3.org/2012/08/web-and-automotive/Overview.html|creationDate|2012-11-23 +http://www.w3.org/2012/08/web-and-automotive/Overview.html|tag|http://www.semanlink.net/tag/workshop +http://www.w3.org/2012/08/web-and-automotive/Overview.html|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2012/08/web-and-automotive/Overview.html|title|Shift into High Gear on the Web : W3C Web and Automotive Workshop - 14-15 November 2012 +http://www.w3.org/2012/08/web-and-automotive/Overview.html|creationTime|2012-11-23T13:46:32Z +http://www.w3.org/blog/data/2014/01/06/vocabularies-at-w3c/|creationDate|2014-01-06 +http://www.w3.org/blog/data/2014/01/06/vocabularies-at-w3c/|tag|http://www.semanlink.net/tag/w3c_data_activity +http://www.w3.org/blog/data/2014/01/06/vocabularies-at-w3c/|title|Vocabularies at W3C W3C Data Activity +http://www.w3.org/blog/data/2014/01/06/vocabularies-at-w3c/|creationTime|2014-01-06T13:50:15Z +http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem|creationDate|2014-11-19 +http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem|tag|http://www.semanlink.net/tag/rdf +http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem|tag|http://www.semanlink.net/tag/hadoop +http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem|title|Quadrupling your elephants - RDF and the Hadoop ecosystem +http://fr.slideshare.net/RobVesse/quadrupling-your-elephants-rdf-and-the-hadoop-ecosystem|creationTime|2014-11-19T16:59:50Z +http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf|creationDate|2015-01-01 +http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf|tag|http://www.semanlink.net/tag/slides +http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf|title|Le capital au 21eme siècle - Slides +http://piketty.pse.ens.fr/files/Piketty2013Capital21c.pdf|creationTime|2015-01-01T18:06:51Z +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|creationDate|2014-04-30 +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|tag|http://www.semanlink.net/tag/hepp_s_propertyvalue +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|title|Re: Generic Property-Value Proposal for Schema.org from Francois-Paul Servant on 2014-04-30 (public-vocabs@w3.org from April 2014) +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0298.html|creationTime|2014-04-30T17:20:29Z +https://theclevermachine.wordpress.com/|creationDate|2016-09-17 +https://theclevermachine.wordpress.com/|tag|http://www.semanlink.net/tag/blog +https://theclevermachine.wordpress.com/|tag|http://www.semanlink.net/tag/computational_neuroscience +https://theclevermachine.wordpress.com/|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://theclevermachine.wordpress.com/|title|The Clever Machine Topics in Computational Neuroscience & Machine Learning +https://theclevermachine.wordpress.com/|creationTime|2016-09-17T18:39:49Z +http://payswarm.com|creationDate|2010-12-03 +http://payswarm.com|tag|http://www.semanlink.net/tag/micropayments_on_the_web +http://payswarm.com|title|Rewarding 'awesome' on the Web. The Universal Payment Standard. +http://payswarm.com|creationTime|2010-12-03T12:01:05Z +http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350|creationDate|2018-02-11 +http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350|tag|http://www.semanlink.net/tag/jobbotization +http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350|tag|http://www.semanlink.net/tag/bill_gates +http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350|title|Bill Gates thinks AI taking everyone's jobs could be a good thing (MSFT) +http://markets.businessinsider.com/news/stocks/bill-gates-artificial-intellegence-doesnt-think-ai-taking-everyones-jobs-is-a-bad-thing-2018-1-1014021350|creationTime|2018-02-11T18:26:15Z +http://sentientdevelopments.blogspot.com/2007/08/fermi-paradox-back-with-vengeance.html|creationDate|2007-08-08 +http://sentientdevelopments.blogspot.com/2007/08/fermi-paradox-back-with-vengeance.html|tag|http://www.semanlink.net/tag/fermi_paradox +http://sentientdevelopments.blogspot.com/2007/08/fermi-paradox-back-with-vengeance.html|title|Sentient Developments: The Fermi Paradox: Back with a vengeance +http://sentientdevelopments.blogspot.com/2007/08/fermi-paradox-back-with-vengeance.html|creationTime|2007-08-08T17:32:43Z +http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html|creationDate|2011-11-19 +http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html|tag|http://www.semanlink.net/tag/cosmologie +http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html|comment|Surprising clues indicate that space is very much something and not nothing +http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html|title|NOVA The Fabric of the Cosmos +http://www.pbs.org/wgbh/nova/physics/fabric-of-cosmos.html|creationTime|2011-11-19T13:36:08Z +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|creationDate|2013-04-25 +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|tag|http://www.semanlink.net/tag/configuration_ontology +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|tag|http://www.semanlink.net/tag/fps_ldow_2013 +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|tag|http://www.semanlink.net/tag/configuration_as_linked_data +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|title|Describing Customizable Products on the Web of Data, LDOW 2013 +http://events.linkeddata.org/ldow2013/papers/ldow2013-paper-11.pdf|creationTime|2013-04-25T23:56:38Z +http://www.w3.org/DesignIssues/CloudStorage.html|creationDate|2011-03-08 +http://www.w3.org/DesignIssues/CloudStorage.html|tag|http://www.semanlink.net/tag/social_web +http://www.w3.org/DesignIssues/CloudStorage.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/CloudStorage.html|tag|http://www.semanlink.net/tag/webid +http://www.w3.org/DesignIssues/CloudStorage.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/CloudStorage.html|tag|http://www.semanlink.net/tag/cloud +http://www.w3.org/DesignIssues/CloudStorage.html|comment|"There is an architecture in which a few existing or Web protocols are gathered together with some glue to make a world wide system in which applications (desktop or Web Application) can work on top of a layer of commodity read-write storage. Crucial design issues are that principals (users) and groups are identifies by URIs, and so are global in scope, and that elements of storage are access controlled using those global identifiers. The result is that storage becomes a commodity, independent of the application running on it. +" +http://www.w3.org/DesignIssues/CloudStorage.html|title|Socially aware cloud storage - Design Issues +http://www.w3.org/DesignIssues/CloudStorage.html|creationTime|2011-03-08T09:04:05Z +http://del.icio.us/gambina|creationDate|2005-04-28 +http://del.icio.us/gambina|tag|http://www.semanlink.net/tag/del_icio_us +http://del.icio.us/gambina|title|My delicious : http://del.icio.us/gambina +http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/|creationDate|2007-11-14 +http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/|tag|http://www.semanlink.net/tag/corse +http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/|tag|http://www.semanlink.net/tag/justice +http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/|title|Chroniques judiciaires » Blog Archive » Comment prononcez-vous Colonna? +http://prdchroniques.blog.lemonde.fr/2007/11/14/comment-prononcez-vous-colonna/|creationTime|2007-11-14T21:48:30Z +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|creationDate|2011-10-18 +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|tag|http://www.semanlink.net/tag/thesaurus +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|tag|http://www.semanlink.net/tag/skos +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|tag|http://www.semanlink.net/tag/geologie +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|title|Geological Survey Austria launches thesaurus project The Semantic Puzzle +http://blog.semantic-web.at/2011/10/17/geological-survey-austria-launches-thesaurus-project/|creationTime|2011-10-18T00:35:52Z +http://www.w3.org/2001/sw/sweo/public/UseCases/|creationDate|2007-04-28 +http://www.w3.org/2001/sw/sweo/public/UseCases/|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/2001/sw/sweo/public/UseCases/|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.w3.org/2001/sw/sweo/public/UseCases/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/UseCases/|title|Semantic Web Education and Outreach Interest Group Case Studies and Use Cases +http://www.w3.org/2001/sw/sweo/public/UseCases/|creationTime|2007-04-28T16:49:24Z +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|creationDate|2012-07-11 +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|tag|http://www.semanlink.net/tag/talis +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|tag|http://www.semanlink.net/tag/linked_data +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|tag|http://www.semanlink.net/tag/gartner +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|comment|"We feel that there isn't and won't be an addressable market that coalesces around the technologies themselves. Rather, they are being absorbed into mainstream use.
+Both data.gov.uk and the BBC were highlighted by analyst company Gartner in a recent research report that described linked data as ""the best way of opening, integrating and sharing data to meet the needs of the evolving web"". + +" +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|title|Talis shuts down semantic web operations  +http://www.information-age.com/channels/information-management/news/2111803/talis-shuts-down-semantic-web-operations.thtml|creationTime|2012-07-11T21:03:01Z +http://www.dailyrazor.com/java/overview.php|creationDate|2005-11-05 +http://www.dailyrazor.com/java/overview.php|tag|http://www.semanlink.net/tag/isp_servlet_hosting +http://www.dailyrazor.com/java/overview.php|title|DailyRazor Hosting - Advanced Java Hosting, JSP Hosting, Servlets, ASP.NET, ASP 3.0, PHP Hosting +http://aloha-editor.org/|creationDate|2012-08-06 +http://aloha-editor.org/|tag|http://www.semanlink.net/tag/html5 +http://aloha-editor.org/|tag|http://www.semanlink.net/tag/html_editor +http://aloha-editor.org/|title|Aloha Editor - HTML5 WYSIWYG Editor +http://aloha-editor.org/|creationTime|2012-08-06T14:29:12Z +http://denisnddo.free.fr/html/zarma.htm|creationDate|2007-09-19 +http://denisnddo.free.fr/html/zarma.htm|tag|http://www.semanlink.net/tag/jerma +http://denisnddo.free.fr/html/zarma.htm|title|Notions élémentaires de Zarma +http://denisnddo.free.fr/html/zarma.htm|creationTime|2007-09-19T23:07:25Z +http://environment.newscientist.com/article.ns?id=dn12433|creationDate|2007-08-08 +http://environment.newscientist.com/article.ns?id=dn12433|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://environment.newscientist.com/article.ns?id=dn12433|tag|http://www.semanlink.net/tag/glaciologie +http://environment.newscientist.com/article.ns?id=dn12433|tag|http://www.semanlink.net/tag/bacteries +http://environment.newscientist.com/article.ns?id=dn12433|title|Eight-million-year-old bug is alive and growing - New Scientist Environment +http://environment.newscientist.com/article.ns?id=dn12433|creationTime|2007-08-08T17:35:00Z +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|tag|http://www.semanlink.net/tag/linked_data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|tag|http://www.semanlink.net/tag/multimedia_ld +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|title|Synote: Weaving Media Fragments and Linked Data +http://events.linkeddata.org/ldow2012/papers/ldow2012-paper-01.pdf|creationTime|2012-04-16T09:28:44Z +http://dannyayers.com/archives/2005/10/03/semantic-web-starting-points/|creationDate|2005-10-04 +http://dannyayers.com/archives/2005/10/03/semantic-web-starting-points/|tag|http://www.semanlink.net/tag/links +http://dannyayers.com/archives/2005/10/03/semantic-web-starting-points/|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://dannyayers.com/archives/2005/10/03/semantic-web-starting-points/|title|Danny Ayers, Raw Blog : » Semantic Web Starting Points +http://semanticweb.com/applying-semantic-technology-to-big-data_b28569|creationDate|2012-05-01 +http://semanticweb.com/applying-semantic-technology-to-big-data_b28569|tag|http://www.semanlink.net/tag/big_data +http://semanticweb.com/applying-semantic-technology-to-big-data_b28569|title|Applying Semantic Technology to Big Data - semanticweb.com +http://semanticweb.com/applying-semantic-technology-to-big-data_b28569|creationTime|2012-05-01T11:54:34Z +http://chemicalsemantics.com/|creationDate|2013-04-11 +http://chemicalsemantics.com/|tag|http://www.semanlink.net/tag/chimie +http://chemicalsemantics.com/|tag|http://www.semanlink.net/tag/mirek_sopek +http://chemicalsemantics.com/|title|Chemical Semantics +http://chemicalsemantics.com/|creationTime|2013-04-11T09:06:44Z +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|creationDate|2006-05-26 +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|tag|http://www.semanlink.net/tag/mit +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|tag|http://www.semanlink.net/tag/robotique +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|tag|http://www.semanlink.net/tag/lego +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|comment|Développé au MIT pour Lego, le Robotic Invention System n'est pas un robot, mais plutôt un ensemble de pièces pour construire des robots comme dans la philosophie LEGO. +http://www.robopolis.com/produit/280/20/Robots-programmables/Lego-Mindstorms---Robotics-Invention-System-20.php|title|Lego Mindstorms +http://www.aventureforth.com/2005/09/16/top-10-ajax-applications-part-2/|creationDate|2005-10-05 +http://www.aventureforth.com/2005/09/16/top-10-ajax-applications-part-2/|tag|http://www.semanlink.net/tag/ajax_applications +http://www.aventureforth.com/2005/09/16/top-10-ajax-applications-part-2/|title|A Venture Forth » Blog Archive » Top 10 Ajax Applications (Part 2) +http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html|creationDate|2012-07-23 +http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html|tag|http://www.semanlink.net/tag/tombouctou +http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html|comment|> Tombouctou n'appartient pas à un pays. Ses vestiges historiques ont dépassé les contours de notre continent, ils sont un bien commun à la communauté des Nations. Chaque individu, quel qu'il soit, détient une parcelle de ce joyau. +http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html|title|"Tombouctou ou notre ""pari de civilisation""" +http://www.lemonde.fr/idees/article/2012/07/23/tombouctou-ou-notre-pari-de-civilisation_1736511_3232.html|creationTime|2012-07-23T20:20:10Z +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|creationDate|2014-06-06 +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|tag|http://www.semanlink.net/tag/liberte_liberte_cherie +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|tag|http://www.semanlink.net/tag/debarquement +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|tag|http://www.semanlink.net/tag/vieux +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|title|Un vétéran s'échappe de sa maison de retraite pour aller aux commémorations du Débarquement +http://www.lemonde.fr/societe/article/2014/06/06/un-veteran-s-echappe-de-sa-maison-de-retraite-pour-aller-aux-commemorations-du-debarquement_4433952_3224.html|creationTime|2014-06-06T22:56:49Z +http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/|creationDate|2012-04-18 +http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/|tag|http://www.semanlink.net/tag/empire_romain +http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/|tag|http://www.semanlink.net/tag/energie +http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/|title|L’empire romain et la société d’opulence énergétique : un parallèle Oil Man +http://petrole.blog.lemonde.fr/2011/10/31/lempire-romain-et-la-societe-dopulence-energetique-un-parallele/|creationTime|2012-04-18T11:07:43Z +http://www.lisperati.com/casting.html|creationDate|2010-11-07 +http://www.lisperati.com/casting.html|tag|http://www.semanlink.net/tag/lisp +http://www.lisperati.com/casting.html|title|Casting SPELs in Lisp +http://www.lisperati.com/casting.html|creationTime|2010-11-07T14:02:47Z +https://www.polymer-project.org/0.5/|creationDate|2015-03-10 +https://www.polymer-project.org/0.5/|tag|http://www.semanlink.net/tag/webcomponents +https://www.polymer-project.org/0.5/|title|Polymer +https://www.polymer-project.org/0.5/|creationTime|2015-03-10T13:07:31Z +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|creationDate|2010-08-30 +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|tag|http://www.semanlink.net/tag/semantic_web +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|tag|http://www.semanlink.net/tag/crise_financiere +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|tag|http://www.semanlink.net/tag/disruptive_change +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|title|Introducing The Creative Destruction 7 Act Play - Semantic Web +http://www.semanticweb.com/features/introducing_the_creative_destruction_7_act_play_152454.asp|creationTime|2010-08-30T16:15:30Z +http://www.ldh-toulon.net/spip.php?article1628|creationDate|2007-04-30 +http://www.ldh-toulon.net/spip.php?article1628|tag|http://www.semanlink.net/tag/videosurveillance +http://www.ldh-toulon.net/spip.php?article1628|tag|http://www.semanlink.net/tag/grande_bretagne +http://www.ldh-toulon.net/spip.php?article1628|tag|http://www.semanlink.net/tag/1984 +http://www.ldh-toulon.net/spip.php?article1628|comment|Le rapport souligne que la vidéosurveillance a pris un essor considérable depuis le début de la « Guerre contre le Terrorisme » des Etats-Unis, auxquels est alliée la Grande-Bretagne. Il détaille ce processus de généralisation en quatre phases : d’abord, la surveillance se donne un but précis et public. Puis elle devient une « routine », s’intègre au paysage, avant de se faire « systématique ». Dernière étape : la vidéosurveillance devient « focalisée ». Elle ne se contente plus d’observer, mais cherche d’elle-même des détails, croise les informations, les classe, et les échange. Avec parfois des erreurs. +http://www.ldh-toulon.net/spip.php?article1628|title|[LDH-Toulon] un tableau accablant de la vidéosurveillance en Grande-Bretagne +http://www.ldh-toulon.net/spip.php?article1628|creationTime|2007-04-30T01:51:29Z +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|creationDate|2011-10-11 +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|tag|http://www.semanlink.net/tag/volkswagen +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|title|Case Study: Contextual Search for Volkswagen and the Automotive Industry +http://www.w3.org/2001/sw/sweo/public/UseCases/Volkswagen/|creationTime|2011-10-11T00:21:44Z +http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm|creationDate|2015-07-11 +http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm|tag|http://www.semanlink.net/tag/loi_renseignement +http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm|comment|la surveillance de masse créée une prison dans l’esprit qui est bien plus subtile mais bien plus efficace pour favoriser la conformité aux normes sociales, bien plus effective que la force physique ne pourra jamais l’être +http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm|title|Le CNNum flingue la loi Renseignement devant le Conseil constitutionnel - Next INpact +http://www.nextinpact.com/news/95754-le-cnnum-flingue-loi-renseignement-devant-conseil-constitutionnel.htm|creationTime|2015-07-11T00:37:20Z +http://www2012.org/proceedings/companion/p115.pdf|creationDate|2012-04-20 +http://www2012.org/proceedings/companion/p115.pdf|tag|http://www.semanlink.net/tag/linked_data +http://www2012.org/proceedings/companion/p115.pdf|tag|http://www.semanlink.net/tag/www_2012 +http://www2012.org/proceedings/companion/p115.pdf|tag|http://www.semanlink.net/tag/uri_identity +http://www2012.org/proceedings/companion/p115.pdf|tag|http://www.semanlink.net/tag/entity_linking +http://www2012.org/proceedings/companion/p115.pdf|title|From Linked Data to Linked Entities: a Migration Path - Giovanni Bartolomeo, Stefano Salsano +http://www2012.org/proceedings/companion/p115.pdf|creationTime|2012-04-20T11:58:23Z +http://www.webreference.com/programming/ajax_tech2/index.html|creationDate|2008-06-14 +http://www.webreference.com/programming/ajax_tech2/index.html|tag|http://www.semanlink.net/tag/cookie +http://www.webreference.com/programming/ajax_tech2/index.html|tag|http://www.semanlink.net/tag/javascript_tips +http://www.webreference.com/programming/ajax_tech2/index.html|title|How to use images and cookies to enable client-server communication +http://www.webreference.com/programming/ajax_tech2/index.html|creationTime|2008-06-14T13:53:30Z +https://www.lemonde.fr/planete/article/2019/05/03/le-senat-rejette-l-inscription-du-crime-d-ecocide-dans-le-droit-penal_5458028_3244.html|creationDate|2019-05-03 +https://www.lemonde.fr/planete/article/2019/05/03/le-senat-rejette-l-inscription-du-crime-d-ecocide-dans-le-droit-penal_5458028_3244.html|tag|http://www.semanlink.net/tag/ecocide +https://www.lemonde.fr/planete/article/2019/05/03/le-senat-rejette-l-inscription-du-crime-d-ecocide-dans-le-droit-penal_5458028_3244.html|title|Le Sénat rejette l’inscription du « crime d’écocide » dans le droit pénal +https://www.lemonde.fr/planete/article/2019/05/03/le-senat-rejette-l-inscription-du-crime-d-ecocide-dans-le-droit-penal_5458028_3244.html|creationTime|2019-05-03T19:40:48Z +https://github.com/spotify/annoy|creationDate|2018-03-12 +https://github.com/spotify/annoy|tag|http://www.semanlink.net/tag/approximate_nearest_neighbor +https://github.com/spotify/annoy|tag|http://www.semanlink.net/tag/github_project +https://github.com/spotify/annoy|title|GitHub - spotify/annoy: Approximate Nearest Neighbors in C++/Python optimized for memory usage and loading/saving to disk +https://github.com/spotify/annoy|creationTime|2018-03-12T11:13:44Z +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|creationDate|2017-12-30 +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|tag|http://www.semanlink.net/tag/dl_why_does_it_work +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|comment|A new idea called the “information bottleneck” is helping to explain the puzzling success of today’s artificial-intelligence algorithms — and might also explain how human brains learn. +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|title|New Theory Cracks Open the Black Box of Deep Learning Quanta Magazine +https://www.quantamagazine.org/new-theory-cracks-open-the-black-box-of-deep-learning-20170921/|creationTime|2017-12-30T11:07:53Z +https://bitcoinmagazine.com/21919/decentralist-perspective-bitcoin-might-need-small-blocks/|creationDate|2015-09-14 +https://bitcoinmagazine.com/21919/decentralist-perspective-bitcoin-might-need-small-blocks/|tag|http://www.semanlink.net/tag/bitcoin +https://bitcoinmagazine.com/21919/decentralist-perspective-bitcoin-might-need-small-blocks/|title|The Decentralist Perspective, or Why Bitcoin Might Need Small Blocks – Bitcoin Magazine +https://bitcoinmagazine.com/21919/decentralist-perspective-bitcoin-might-need-small-blocks/|creationTime|2015-09-14T01:08:59Z +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|creationDate|2018-10-02 +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|tag|http://www.semanlink.net/tag/good +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|tag|http://www.semanlink.net/tag/sebastian_ruder +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|tag|http://www.semanlink.net/tag/nlp +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|comment|[slides included here](/doc/?uri=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP%2Fview) +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|relatedDoc|https://drive.google.com/file/d/15ehMIJ7wY9A7RSmyJPNmrBMuC7se0PMP/view +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|title|A Review of the Recent History of Natural Language Processing - AYLIEN +http://blog.aylien.com/a-review-of-the-recent-history-of-natural-language-processing/|creationTime|2018-10-02T10:02:54Z +http://decoupledcms.org/|creationDate|2015-01-30 +http://decoupledcms.org/|tag|http://www.semanlink.net/tag/semantic_cms +http://decoupledcms.org/|title|Decoupling Content Management +http://decoupledcms.org/|creationTime|2015-01-30T07:36:47Z +http://www.readwriteweb.com/archives/is_schemaorg_really_a_google_land_grab.php|creationDate|2011-06-08 +http://www.readwriteweb.com/archives/is_schemaorg_really_a_google_land_grab.php|tag|http://www.semanlink.net/tag/schema_org +http://www.readwriteweb.com/archives/is_schemaorg_really_a_google_land_grab.php|title|Is Schema.org Really a Google Land Grab? +http://www.readwriteweb.com/archives/is_schemaorg_really_a_google_land_grab.php|creationTime|2011-06-08T23:16:33Z +http://www.simile-widgets.org/timeline/|creationDate|2012-11-28 +http://www.simile-widgets.org/timeline/|tag|http://www.semanlink.net/tag/simile_timeline +http://www.simile-widgets.org/timeline/|title|SIMILE Widgets Timeline +http://www.simile-widgets.org/timeline/|creationTime|2012-11-28T00:13:00Z +http://www.mnh.si.edu/africanvoices/|creationDate|2005-04-20 +http://www.mnh.si.edu/africanvoices/|tag|http://www.semanlink.net/tag/web_site_design +http://www.mnh.si.edu/africanvoices/|tag|http://www.semanlink.net/tag/afrique +http://maps.google.com|creationDate|2005-04-15 +http://maps.google.com|tag|http://www.semanlink.net/tag/google_maps +https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs|creationDate|2014-09-16 +https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs|tag|http://www.semanlink.net/tag/crowd_sourcing +https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs|tag|http://www.semanlink.net/tag/cnrs +https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs|title|Crowdsourcing : tous chercheurs ! CNRS le journal +https://lejournal.cnrs.fr/articles/crowdsourcing-tous-chercheurs|creationTime|2014-09-16T16:18:38Z +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|creationDate|2017-12-07 +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|tag|http://www.semanlink.net/tag/periodes_glacieres +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|tag|http://www.semanlink.net/tag/climat +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|tag|http://www.semanlink.net/tag/geologie +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|title|Quand la Terre était une boule de neige CNRS Le journal +https://lejournal.cnrs.fr/articles/quand-la-terre-etait-une-boule-de-neige|creationTime|2017-12-07T22:49:44Z +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|creationDate|2014-04-23 +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|tag|http://www.semanlink.net/tag/inegalites +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|tag|http://www.semanlink.net/tag/usa +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|title|Thomas Piketty : « Le retour des inégalités inquiète aux Etats-Unis » +http://www.lemonde.fr/economie/article/2014/04/23/thomas-piketty-les-etats-unis-ont-une-relation-tres-compliquee-avec-les-inegalites_4405523_3234.html|creationTime|2014-04-23T21:58:12Z +https://github.com/ldodds/dowl|creationDate|2012-09-02 +https://github.com/ldodds/dowl|tag|http://www.semanlink.net/tag/leigh_dodds +https://github.com/ldodds/dowl|tag|http://www.semanlink.net/tag/owl_tool +https://github.com/ldodds/dowl|tag|http://www.semanlink.net/tag/rdf_schema +https://github.com/ldodds/dowl|tag|http://www.semanlink.net/tag/rdf_owl_documentation_tool +https://github.com/ldodds/dowl|comment|A simple command-line tool for generating HTML documentation from RDFS/OWL schemas +https://github.com/ldodds/dowl|title|ldodds/dowl · GitHub +https://github.com/ldodds/dowl|creationTime|2012-09-02T10:31:57Z +http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/|creationDate|2016-12-29 +http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/|tag|http://www.semanlink.net/tag/yves_roth +http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/|tag|http://www.semanlink.net/tag/kz +http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/|title|Ernst Wiechert: Fluchtversuch aus dem KZ ( Tentative d’évasion du Camp de concentration) Raison garder! +http://berardjean.blog.lemonde.fr/2010/07/05/ernst-wiechert-fluchtversuch-aus-dem-kz-tentative-devasion-du-camp-de-concentration/|creationTime|2016-12-29T11:45:26Z +http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction|creationDate|2014-03-08 +http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction|tag|http://www.semanlink.net/tag/semantic_feature_extraction +http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction|tag|http://www.semanlink.net/tag/europeana +http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction|title|Europeana Professional - Market study on technical options for semantic feature extraction - Europeana Tech +http://pro.europeana.eu/web/network/europeana-tech/-/wiki/Main/Market+study+on+technical+options+for+semantic+feature+extraction|creationTime|2014-03-08T15:35:00Z +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|creationDate|2006-09-15 +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|tag|http://www.semanlink.net/tag/ouganda +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|tag|http://www.semanlink.net/tag/lord_s_resistance_army +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|comment|"Every night in northern Uganda, tens of thousands of children, known as night commuters, flow into town centres.
+They come seeking safety in shelters set up by aid agencies, with the Ugandan government unable to end a brutal 18-year war and protect them from rebel attacks.
At a centre known as the Arc in Gulu town, hundreds of boys will share the floor." +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|title|BBC NEWS Ouganda: Night commuters +http://news.bbc.co.uk/1/shared/spl/hi/picture_gallery/05/africa_night_commuters/html/1.stm|source|BBC +http://blog.newsweek.com/blogs/techtonicshifts/archive/2010/04/22/facebook-f8-internet-open-social-graph-semantic-web-twitter.aspx|creationDate|2010-04-26 +http://blog.newsweek.com/blogs/techtonicshifts/archive/2010/04/22/facebook-f8-internet-open-social-graph-semantic-web-twitter.aspx|tag|http://www.semanlink.net/tag/facebook +http://blog.newsweek.com/blogs/techtonicshifts/archive/2010/04/22/facebook-f8-internet-open-social-graph-semantic-web-twitter.aspx|title|Facebook's Play to Take Over the Entire Internet - Techtonic Shifts Blog - Newsweek.com +http://blog.newsweek.com/blogs/techtonicshifts/archive/2010/04/22/facebook-f8-internet-open-social-graph-semantic-web-twitter.aspx|creationTime|2010-04-26T13:08:33Z +http://www.x-tags.org/|creationDate|2014-01-02 +http://www.x-tags.org/|tag|http://www.semanlink.net/tag/javascript +http://www.x-tags.org/|tag|http://www.semanlink.net/tag/mozilla +http://www.x-tags.org/|comment|X-Tag is a small JavaScript library, created and supported by Mozilla, that brings Web Components Custom Element capabilities to all modern browsers. +http://www.x-tags.org/|title|X-Tag - Web Components Custom Element Polylib +http://www.x-tags.org/|creationTime|2014-01-02T10:11:52Z +https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer|creationDate|2019-04-10 +https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer|tag|http://www.semanlink.net/tag/fungal_infections +https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer|tag|http://www.semanlink.net/tag/drug_resistant_germs +https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer|title|A Mysterious Infection, Spanning the Globe in a Climate of Secrecy - The New York Times +https://www.nytimes.com/2019/04/06/health/drug-resistant-candida-auris.html?action=click&module=RelatedCoverage&pgtype=Article®ion=Footer|creationTime|2019-04-10T00:00:57Z +http://schema.org/docs/schema_org_rdfa.html|creationDate|2013-06-13 +http://schema.org/docs/schema_org_rdfa.html|tag|http://www.semanlink.net/tag/schema_org +http://schema.org/docs/schema_org_rdfa.html|comment|" +" +http://schema.org/docs/schema_org_rdfa.html|title|Schema.org core schema as RDFa Lite +http://schema.org/docs/schema_org_rdfa.html|creationTime|2013-06-13T11:42:06Z +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|creationDate|2016-01-18 +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|tag|http://www.semanlink.net/tag/google +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|tag|http://www.semanlink.net/tag/machine_learning_basics +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|tag|http://www.semanlink.net/tag/machine_learning +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|tag|http://www.semanlink.net/tag/slides +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|title|Introduction to Machine learning - Google Slides +https://docs.google.com/presentation/d/1O6ozzZHHxGzU-McpvEG09hl7K6oQDd2Taw0FOlnxJc8/preview#slide=id.p|creationTime|2016-01-18T00:10:41Z +http://paolo.evectors.it/stories/entKcollectorWWWW.html|creationDate|2006-01-17 +http://paolo.evectors.it/stories/entKcollectorWWWW.html|tag|http://www.semanlink.net/tag/aggregators +http://paolo.evectors.it/stories/entKcollectorWWWW.html|tag|http://www.semanlink.net/tag/social_content_services +http://paolo.evectors.it/stories/entKcollectorWWWW.html|tag|http://www.semanlink.net/tag/social_software +http://paolo.evectors.it/stories/entKcollectorWWWW.html|comment|"Our main goal is the development of an enterprise news aggregator that leverages the power of shared topics to present new ways of finding and combining the real knowledge in your organization. Goal: a system to organize contents created with weblogs and personal aggregators and allow people to easily browse these contents at any time.
+One of the keys of this system is that there are no pre-existing lists of topics that must be set up and configured. Topics get on the server via RSS feeds while users create them, and they are distributed to all other users within the same cloud via TopicRolls. +
+The basic idea is to allow for topics and posts to somehow self-organize without any top-down design approach" +http://paolo.evectors.it/stories/entKcollectorWWWW.html|title|Topics, aggregators, K-collector and other assorted stuff. Paolo Valdemarin Weblog +http://www.luern.fr/|creationDate|2008-05-08 +http://www.luern.fr/|tag|http://www.semanlink.net/tag/corent +http://www.luern.fr/|title|Fouilles de Corent +http://www.luern.fr/|creationTime|2008-05-08T12:15:26Z +http://blogs.zdnet.com/Google/?p=18|creationDate|2005-11-02 +http://blogs.zdnet.com/Google/?p=18|tag|http://www.semanlink.net/tag/google +http://blogs.zdnet.com/Google/?p=18|title|Google: Thinking about the future of TV ads - ZDNet.com +http://www.webrtc.org/home|creationDate|2014-01-08 +http://www.webrtc.org/home|tag|http://www.semanlink.net/tag/javascript +http://www.webrtc.org/home|tag|http://www.semanlink.net/tag/google +http://www.webrtc.org/home|tag|http://www.semanlink.net/tag/real_time_communications +http://www.webrtc.org/home|comment|WebRTC is a free, open project that enables web browsers with Real-Time Communications (RTC) capabilities via simple JavaScript APIs. +http://www.webrtc.org/home|title|WebRTC +http://www.webrtc.org/home|creationTime|2014-01-08T14:15:00Z +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|creationDate|2018-12-27 +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|tag|http://www.semanlink.net/tag/cnrs +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|tag|http://www.semanlink.net/tag/2018 +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|tag|http://www.semanlink.net/tag/science +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|title|2018, une année de science CNRS Le journal +https://lejournal.cnrs.fr/dossiers/2018-une-annee-de-science?utm_term=Autofeed&utm_medium=Social&utm_source=Twitter#Echobox=1545903944|creationTime|2018-12-27T12:01:34Z +https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor|creationDate|2017-10-18 +https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor|tag|http://www.semanlink.net/tag/astronomie_multi_signaux +https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor|tag|http://www.semanlink.net/tag/ondes_gravitationnelles +https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor|title|Étoiles à neutrons : une fusion qui vaut de l’or CNRS Le journal +https://lejournal.cnrs.fr/articles/etoiles-a-neutrons-une-fusion-qui-vaut-de-lor|creationTime|2017-10-18T13:44:05Z +https://ajolicoeur.wordpress.com/RelativisticGAN/|creationDate|2018-07-05 +https://ajolicoeur.wordpress.com/RelativisticGAN/|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://ajolicoeur.wordpress.com/RelativisticGAN/|comment|> In this paper, I argue that standard GAN (SGAN) is missing a fundamental property, i.e., training the generator should not only increase the probability that fake data is real but also decrease the probability that real data is real +https://ajolicoeur.wordpress.com/RelativisticGAN/|title|The relativistic discriminator: a key element missing from standard GAN (2018) – Alexia Jolicoeur-Martineau +https://ajolicoeur.wordpress.com/RelativisticGAN/|creationTime|2018-07-05T09:10:15Z +http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/|creationDate|2015-11-28 +http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/|tag|http://www.semanlink.net/tag/pollueurs_payeurs +http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/|title|Les pollueurs du monde doivent payer Le blog de Thomas Piketty +http://piketty.blog.lemonde.fr/2015/11/28/les-pollueurs-du-monde-doivent-payer/|creationTime|2015-11-28T16:25:08Z +http://openstructs.org|creationDate|2011-09-20 +http://openstructs.org|tag|http://www.semanlink.net/tag/frederick_giasson +http://openstructs.org|tag|http://www.semanlink.net/tag/open_source +http://openstructs.org|tag|http://www.semanlink.net/tag/semantic_framework +http://openstructs.org|title|OpenStructs Open source data structs and semantic frameworks +http://openstructs.org|creationTime|2011-09-20T08:55:34Z +http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html|creationDate|2014-12-04 +http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html|tag|http://www.semanlink.net/tag/google_research +http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html|tag|http://www.semanlink.net/tag/time_series +http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html|title|Research Blog: Automatically making sense of data +http://googleresearch.blogspot.fr/2014/12/automatically-making-sense-of-data.html|creationTime|2014-12-04T13:38:28Z +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|creationDate|2018-02-03 +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|tag|http://www.semanlink.net/tag/machine_learning +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|tag|http://www.semanlink.net/tag/educational_resources +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|tag|http://www.semanlink.net/tag/links +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|tag|http://www.semanlink.net/tag/nlp +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|title|12 of the best free Natural Language Processing and Machine Learning educational resources - AYLIEN +http://blog.aylien.com/12-of-the-best-free-natural-language-processing-and-machine-learning-educational-resources/?utm_content=66559950&utm_medium=social&utm_source=twitter|creationTime|2018-02-03T14:51:30Z +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|creationDate|2012-03-23 +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|tag|http://www.semanlink.net/tag/automatic_tagging +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|tag|http://www.semanlink.net/tag/bbc +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|tag|http://www.semanlink.net/tag/yves_raymond +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|title|BBC - Research and Development: Automatically tagging the World Service archive +http://www.bbc.co.uk/blogs/researchanddevelopment/2012/03/automatically-tagging-the-worl.shtml|creationTime|2012-03-23T22:33:14Z +http://thenextweb.com/insider/2015/11/01/technological-innovation-for-the-third-world/|creationDate|2015-11-01 +http://thenextweb.com/insider/2015/11/01/technological-innovation-for-the-third-world/|tag|http://www.semanlink.net/tag/ntic_et_developpement +http://thenextweb.com/insider/2015/11/01/technological-innovation-for-the-third-world/|title|9 ways tech is transforming the developing world +http://thenextweb.com/insider/2015/11/01/technological-innovation-for-the-third-world/|creationTime|2015-11-01T20:51:42Z +https://www.bbc.com/news/world-asia-china-47667880|creationDate|2019-03-24 +https://www.bbc.com/news/world-asia-china-47667880|tag|http://www.semanlink.net/tag/fossile +https://www.bbc.com/news/world-asia-china-47667880|tag|http://www.semanlink.net/tag/chine +https://www.bbc.com/news/world-asia-china-47667880|comment|"The fossils are estimated to be about 518 million years old, and are particularly unusual because the soft body tissue of many creatures, including their skin, eyes, and internal organs, have been ""exquisitely"" well preserved." +https://www.bbc.com/news/world-asia-china-47667880|title|Huge fossil discovery made in China's Hubei province - BBC News +https://www.bbc.com/news/world-asia-china-47667880|creationTime|2019-03-24T19:14:26Z +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|creationDate|2006-12-01 +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/undecidability +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/owl_dl +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/inference +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/owl_full +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/conceptual_modeling +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|tag|http://www.semanlink.net/tag/owl +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|comment|"Abstract: A common practice in conceptual modeling is to separate the intensional from the extensional model. Although very intuitive, this approach is inadequate for many complex domains, where the borderline between the two models is not clear-cut. Therefore, OWL-Full, the most expressive of the Semantic Web ontology languages, allows combining the intensional and the extensional model by a feature we refer to as metamodeling. In this paper, we show that the semantics of metamodeling adopted in OWL-Full leads to undecidability of basic inference problems, due to free mixing of logical and metalogical symbols. Based on this result, we propose two alternative semantics for metamodeling: the contextual and the HiLog semantics. We show that SHOIQ— a description logic underlying OWL-DL— extended with metamodeling under either semantics is decidable. Finally, we show how the latter semantics can be used in practice to axiomatize the logical interaction between concepts and metaconcepts. +

+Eagle is a type of RedListSpecies. Thus, RedListSpecies acts as a +metaconcept for Eagle +
+The examples such as the one given above are often dismissed with an argument that “eagle as a species” and “eagle as a set of all individual eagles” +are not the one and the same thing, and should not be referred to using the +same symbol...we simply observe that the word “eagle” in most people’s minds invokes a notion of a “mighty bird of prey.” The interpretation of +this notion as a concept or as an individual is secondary and is often context-dependent, so using different symbols for the same intuitive notion makes the +model unnecessarily complex. + + + + +" +http://www.cs.man.ac.uk/~bmotik/publications/papers/motik05metamodeling.pdf|title|On the properties of metamodeling in OWL +https://escholarship.org/uc/item/48z2p287|creationDate|2019-02-19 +https://escholarship.org/uc/item/48z2p287|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://escholarship.org/uc/item/48z2p287|title|Machine learning and natural language processing on the patent corpus: Data, tools, and new measures (2015) +https://escholarship.org/uc/item/48z2p287|creationTime|2019-02-19T21:14:16Z +https://medium.com/s/story/yes-you-should-delete-facebook-heres-why-bc623a3b4625|creationDate|2018-11-01 +https://medium.com/s/story/yes-you-should-delete-facebook-heres-why-bc623a3b4625|tag|http://www.semanlink.net/tag/facebook +https://medium.com/s/story/yes-you-should-delete-facebook-heres-why-bc623a3b4625|title|Yes, You Should Delete Facebook +https://medium.com/s/story/yes-you-should-delete-facebook-heres-why-bc623a3b4625|creationTime|2018-11-01T21:18:03Z +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|creationDate|2012-05-10 +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|tag|http://www.semanlink.net/tag/juan_sequeda +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|tag|http://www.semanlink.net/tag/semantic_web +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|tag|http://www.semanlink.net/tag/peter_mika +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|title|The Semantic Web Has Gone Mainstream! Wanna Bet? - semanticweb.com +http://semanticweb.com/the-semantic-web-has-gone-mainstream-wanna-bet_b27329|creationTime|2012-05-10T08:51:03Z +http://blog.morrisjohns.com/javascript_closures_for_dummies|creationDate|2006-09-25 +http://blog.morrisjohns.com/javascript_closures_for_dummies|tag|http://www.semanlink.net/tag/javascript +http://blog.morrisjohns.com/javascript_closures_for_dummies|comment|"a closure is the local variables for a function - kept alive after the function has returned, or +a closure is a stack-frame which is not deallocated when the function returns. (as if a 'stack-frame' were malloc'ed instead of being on the stack!)" +http://blog.morrisjohns.com/javascript_closures_for_dummies|title|JavaScript Closures for Dummies Developing thoughts — Morris Johns +http://json-ld.org/spec/latest/json-ld-framing/|creationDate|2014-10-29 +http://json-ld.org/spec/latest/json-ld-framing/|tag|http://www.semanlink.net/tag/json_ld +http://json-ld.org/spec/latest/json-ld-framing/|comment|"An Application Programming Interface for the JSON-LD Syntax
+A Frame can be used by a developer on a JSON-LD document to specify a deterministic layout for a graph." +http://json-ld.org/spec/latest/json-ld-framing/|title|JSON-LD Framing 1.0 +http://json-ld.org/spec/latest/json-ld-framing/|creationTime|2014-10-29T01:11:20Z +https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld|creationDate|2017-09-26 +https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld|tag|http://www.semanlink.net/tag/json_ld +https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld|tag|http://www.semanlink.net/tag/google_structured_data_testing_tool +https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld|title|category:structured-data jsonld (in Webmaster Central Help Forum) - Google Product Forums +https://productforums.google.com/forum/#!topicsearchin/webmasters/category$3Astructured-data$20jsonld|creationTime|2017-09-26T15:55:40Z +http://ajaxian.com/|creationDate|2006-06-25 +http://ajaxian.com/|tag|http://www.semanlink.net/tag/ajax +http://ajaxian.com/|title|Ajaxian +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|creationDate|2013-09-14 +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|tag|http://www.semanlink.net/tag/cringely +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|tag|http://www.semanlink.net/tag/ballmer +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|tag|http://www.semanlink.net/tag/microsoft +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|title|I, Cringely Steve Ballmer's Dilemma ~ I, Cringely +http://www.cringely.com/2012/10/28/steve-ballmers-dilemma/|creationTime|2013-09-14T01:10:52Z +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|tag|http://www.semanlink.net/tag/microdata +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|tag|http://www.semanlink.net/tag/dan_brickley +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|tag|http://www.semanlink.net/tag/rdfa +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|comment|"""The reason of adding this property is to patch microdata...""
+C'est ici que les Athéniens s'atteignirent" +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|title|additionalType property, vs extending Microdata syntax for multiple types from Dan Brickley on 2012-06-15 (public-vocabs@w3.org from June 2012) +http://lists.w3.org/Archives/Public/public-vocabs/2012Jun/0031.html|creationTime|2013-07-06T17:35:09Z +http://www.marekrei.com/blog/paper-summaries/|creationDate|2018-01-17 +http://www.marekrei.com/blog/paper-summaries/|tag|http://www.semanlink.net/tag/nlp +http://www.marekrei.com/blog/paper-summaries/|tag|http://www.semanlink.net/tag/machine_learning +http://www.marekrei.com/blog/paper-summaries/|tag|http://www.semanlink.net/tag/survey +http://www.marekrei.com/blog/paper-summaries/|title|57 Summaries of Machine Learning and NLP Research - Marek Rei +http://www.marekrei.com/blog/paper-summaries/|creationTime|2018-01-17T21:02:46Z +https://duckduckgo.com/|creationDate|2013-06-13 +https://duckduckgo.com/|tag|http://www.semanlink.net/tag/duckduckgo +https://duckduckgo.com/|title|DuckDuckGo +https://duckduckgo.com/|creationTime|2013-06-13T00:14:56Z +http://www.w3.org/blog/SW/2010/07/17/powder_not_so_quiet|creationDate|2010-07-18 +http://www.w3.org/blog/SW/2010/07/17/powder_not_so_quiet|tag|http://www.semanlink.net/tag/powder +http://www.w3.org/blog/SW/2010/07/17/powder_not_so_quiet|title|W3C Semantic Web Activity News - POWDER: Not So Quiet +http://www.w3.org/blog/SW/2010/07/17/powder_not_so_quiet|creationTime|2010-07-18T19:22:21Z +https://docs.docker.com/mac/|creationDate|2016-04-01 +https://docs.docker.com/mac/|tag|http://www.semanlink.net/tag/docker +https://docs.docker.com/mac/|title|Get Started with Docker +https://docs.docker.com/mac/|creationTime|2016-04-01T02:09:52Z +http://createjs.org/guide/|creationDate|2012-08-10 +http://createjs.org/guide/|tag|http://www.semanlink.net/tag/create_js +http://createjs.org/guide/|title|Create.js - Create.js Integration Guide +http://createjs.org/guide/|creationTime|2012-08-10T15:54:00Z +http://stardust.jpl.nasa.gov/|creationDate|2005-12-08 +http://stardust.jpl.nasa.gov/|tag|http://www.semanlink.net/tag/nasa +http://stardust.jpl.nasa.gov/|tag|http://www.semanlink.net/tag/stardust +http://stardust.jpl.nasa.gov/|tag|http://www.semanlink.net/tag/jpl +http://stardust.jpl.nasa.gov/|comment|"On January 15, 2006, after more than 7 years and billions of miles of travel +through space, the Stardust spacecraft will finally return to Earth with some +precious cargo -- pristine samples of comet and interstellar dust." +http://stardust.jpl.nasa.gov/|title|Stardust JPL NASA +http://www.w3.org/TR/swbp-n-aryRelations/|creationDate|2006-04-18 +http://www.w3.org/TR/swbp-n-aryRelations/|tag|http://www.semanlink.net/tag/n_ary_relations_on_the_semantic_web +http://www.w3.org/TR/swbp-n-aryRelations/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/swbp-n-aryRelations/|title|Defining N-ary Relations on the Semantic Web +http://news.stanford.edu/pr/2015/pr-memory-monitor-biox-061715.html|creationDate|2015-06-25 +http://news.stanford.edu/pr/2015/pr-memory-monitor-biox-061715.html|tag|http://www.semanlink.net/tag/neuroscience +http://news.stanford.edu/pr/2015/pr-memory-monitor-biox-061715.html|title|Brain connections last as long as the memories they store, Stanford neuroscientist finds Stanford News Release +http://news.stanford.edu/pr/2015/pr-memory-monitor-biox-061715.html|creationTime|2015-06-25T18:16:14Z +http://www.ifixit.com|creationDate|2006-07-14 +http://www.ifixit.com|tag|http://www.semanlink.net/tag/howto +http://www.ifixit.com|tag|http://www.semanlink.net/tag/bricolage_mac +http://www.ifixit.com|comment|iFixit makes repairing and upgrading your Mac easy +http://www.ifixit.com|title|iFixit - iBook & PowerBook Parts +http://www.wired.com/2015/01/beth-moon-ancient-trees/#slide-1|creationDate|2015-12-30 +http://www.wired.com/2015/01/beth-moon-ancient-trees/#slide-1|tag|http://www.semanlink.net/tag/arbres +http://www.wired.com/2015/01/beth-moon-ancient-trees/#slide-1|title|The Most Ancient and Magnificent Trees From Around the World WIRED +http://www.wired.com/2015/01/beth-moon-ancient-trees/#slide-1|creationTime|2015-12-30T20:35:09Z +http://www.newscientist.com/article/mg20627621.000-language-lessons-you-are-what-you-speak.html?full=true|creationDate|2010-06-02 +http://www.newscientist.com/article/mg20627621.000-language-lessons-you-are-what-you-speak.html?full=true|tag|http://www.semanlink.net/tag/langage +http://www.newscientist.com/article/mg20627621.000-language-lessons-you-are-what-you-speak.html?full=true|title|Language lessons: You are what you speak - life - 01 June 2010 - New Scientist +http://www.newscientist.com/article/mg20627621.000-language-lessons-you-are-what-you-speak.html?full=true|creationTime|2010-06-02T22:28:36Z +https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)|creationDate|2015-08-27 +https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)|tag|http://www.semanlink.net/tag/immigration +https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)|tag|http://www.semanlink.net/tag/film_francais +https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)|title|Aïcha (série de téléfilms) — Wikipédia +https://fr.wikipedia.org/wiki/A%C3%AFcha_(s%C3%A9rie_de_t%C3%A9l%C3%A9films)|creationTime|2015-08-27T01:47:26Z +https://blog.makk.es/java-libs-for-processing-wiki-markup.html|creationDate|2014-09-29 +https://blog.makk.es/java-libs-for-processing-wiki-markup.html|tag|http://www.semanlink.net/tag/wiki_markup +https://blog.makk.es/java-libs-for-processing-wiki-markup.html|title|Java libs for processing wiki markup – Makk.es +https://blog.makk.es/java-libs-for-processing-wiki-markup.html|creationTime|2014-09-29T23:03:31Z +https://medium.com/@madrugado/interesting-stuff-at-emnlp-part-ii-ce92ac928f16|creationDate|2018-11-25 +https://medium.com/@madrugado/interesting-stuff-at-emnlp-part-ii-ce92ac928f16|tag|http://www.semanlink.net/tag/emnlp_2018 +https://medium.com/@madrugado/interesting-stuff-at-emnlp-part-ii-ce92ac928f16|title|Interesting Stuff at EMNLP (part II) – Valentin Malykh – Medium +https://medium.com/@madrugado/interesting-stuff-at-emnlp-part-ii-ce92ac928f16|creationTime|2018-11-25T15:55:26Z +http://www.geocities.com/hollywood/set/8100/frroman.html|creationDate|2006-04-10 +http://www.geocities.com/hollywood/set/8100/frroman.html|tag|http://www.semanlink.net/tag/film_francais +http://www.geocities.com/hollywood/set/8100/frroman.html|tag|http://www.semanlink.net/tag/sacha_guitry +http://www.geocities.com/hollywood/set/8100/frroman.html|comment|Film français de Sacha Guitry (1936)
Le héros du film commence par voler six sous dans l'épicerie familiale. A cause de ce vol, il sera privé de champignons. A cause des champignons, il devient orphelin. Placé chez un oncle et une tante qui n'aspirent qu'à le dépouiller de son héritage, l'orphelin est amené à s'enfuir et il devient groom dans un hôtel, puis croupier à Monaco. Son goût pour les femmes, son attraction pour la richesse, son absence de scrupules feront de lui un joueur, un tricheur joyeux, un homme sinon constamment heureux, en tous cas indépendant, autonome et vaillant. (François Truffaut, Le cinéma et moi, Ramsay) +http://www.geocities.com/hollywood/set/8100/frroman.html|title|Le roman d'un tricheur +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|creationDate|2006-10-06 +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|tag|http://www.semanlink.net/tag/gouvernement_chirac +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|tag|http://www.semanlink.net/tag/odf +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|tag|http://www.semanlink.net/tag/open_source +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|tag|http://www.semanlink.net/tag/bonne_nouvelle +http://www.infoworld.com/article/06/10/03/HNfrenchodf_1.html|title|French gov't report recommends standardizing on ODF +http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html|creationDate|2014-10-11 +http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html|tag|http://www.semanlink.net/tag/allemagne +http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html|tag|http://www.semanlink.net/tag/economie_allemande +http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html|title|German model is ruinous for Germany, and deadly for Europe - Telegraph +http://www.telegraph.co.uk/finance/comment/ambroseevans_pritchard/11150306/German-model-is-ruinous-for-Germany-and-deadly-for-Europe.html|creationTime|2014-10-11T00:52:36Z +http://diveintogreasemonkey.org|creationDate|2005-05-17 +http://diveintogreasemonkey.org|tag|http://www.semanlink.net/tag/greasemonkey +http://diveintogreasemonkey.org|title|Dive Into Greasemonkey +http://dev2ops.org/2010/02/what-is-devops/|creationDate|2014-01-24 +http://dev2ops.org/2010/02/what-is-devops/|tag|http://www.semanlink.net/tag/devops +http://dev2ops.org/2010/02/what-is-devops/|title|What is DevOps? - dev2ops +http://dev2ops.org/2010/02/what-is-devops/|creationTime|2014-01-24T14:36:12Z +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|creationDate|2014-05-11 +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|tag|http://www.semanlink.net/tag/chine_europe +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|tag|http://www.semanlink.net/tag/riz +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|tag|http://www.semanlink.net/tag/ble +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|tag|http://www.semanlink.net/tag/agriculture +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|comment|Rice farming seems to have fostered collective thinking while wheat farming favoured individualism. +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|title|How your ancestors' farms shaped your thinking - New Scientist +http://www.newscientist.com/article/dn25538-how-your-ancestors-farms-shaped-your-thinking.html#.U2_ooMYoqwy|creationTime|2014-05-11T23:20:34Z +https://www.crummy.com/software/BeautifulSoup/|creationDate|2017-06-28 +https://www.crummy.com/software/BeautifulSoup/|tag|http://www.semanlink.net/tag/python_tools +https://www.crummy.com/software/BeautifulSoup/|title|Beautiful Soup +https://www.crummy.com/software/BeautifulSoup/|creationTime|2017-06-28T23:29:02Z +http://blogs.zdnet.com/semantic-web/?p=243|creationDate|2008-12-10 +http://blogs.zdnet.com/semantic-web/?p=243|tag|http://www.semanlink.net/tag/zemanta +http://blogs.zdnet.com/semantic-web/?p=243|title|Zemanta talks Linked Data with SDK and commercial API The Semantic Web ZDNet.com +http://blogs.zdnet.com/semantic-web/?p=243|creationTime|2008-12-10T14:14:50Z +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|creationDate|2018-08-08 +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|tag|http://www.semanlink.net/tag/sanjeev_arora +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|comment|[article](/doc/?uri=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fy59petiffzq63gt%2Fmain.pdf%3Fdl%3D0) +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|relatedDoc|https://www.dropbox.com/s/y59petiffzq63gt/main.pdf?dl=0 +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|title|Mathematics of Machine Learning and Deep Learning - Plenary talk at International Congress of Mathematicians 2018 +http://unsupervised.cs.princeton.edu/ICMtalk/aroraplenary.html|creationTime|2018-08-08T13:48:49Z +http://sourceforge.net/projects/touchgraph/|creationDate|2005-09-25 +http://sourceforge.net/projects/touchgraph/|tag|http://www.semanlink.net/tag/touchgraph +http://sourceforge.net/projects/touchgraph/|title|SourceForge.net: Project Info - TouchGraph +http://linkedup-challenge.org/|creationDate|2013-06-14 +http://linkedup-challenge.org/|tag|http://www.semanlink.net/tag/education_and_linked_data +http://linkedup-challenge.org/|title|LinkedUp Challenge +http://linkedup-challenge.org/|creationTime|2013-06-14T16:31:54Z +http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html|creationDate|2005-07-08 +http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html|tag|http://www.semanlink.net/tag/blog +http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html|tag|http://www.semanlink.net/tag/terrorisme +http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html|comment|"Contemporaria +This post was last changed at 10:29 AM, July 7 2005, at a time when the top headline on Guardian Unlimited was London wins 2012 Olympics, and the top headline from the BBC was Several hurt in Tube 'explosion', and there were posts elsewhere tagged with these same keywords: + +The post was written by Neil McIntosh. You can email the author at neil.mcintosh@guardian.co.uk" +http://blogs.guardian.co.uk/news/archives/2005/07/07/bomb_blasts_plunge_london_into_chaos.html|title|Bomb blasts plunge London into chaos from Guardian Unlimited: Newsblog +https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/|creationDate|2017-06-15 +https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/|tag|http://www.semanlink.net/tag/time_series +https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/|tag|http://www.semanlink.net/tag/arima +https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/|title|Complete guide to create a Time Series Forecast (with Codes in Python) +https://www.analyticsvidhya.com/blog/2016/02/time-series-forecasting-codes-python/|creationTime|2017-06-15T13:26:28Z +http://www.diplomatie.gouv.fr/fr/conseils-aux-voyageurs_909/pays_12191/niger_12300/index.html|creationDate|2007-06-24 +http://www.diplomatie.gouv.fr/fr/conseils-aux-voyageurs_909/pays_12191/niger_12300/index.html|tag|http://www.semanlink.net/tag/niger +http://www.diplomatie.gouv.fr/fr/conseils-aux-voyageurs_909/pays_12191/niger_12300/index.html|title|Ministère des Affaires Etrangères. Conseils aux voyageurs au Niger +http://www.diplomatie.gouv.fr/fr/conseils-aux-voyageurs_909/pays_12191/niger_12300/index.html|creationTime|2007-06-24T20:55:26Z +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|creationDate|2018-10-16 +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|tag|http://www.semanlink.net/tag/iswc +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|tag|http://www.semanlink.net/tag/wikipedia +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|tag|http://www.semanlink.net/tag/denny_vrandecic +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|title|Capturing meaning: Toward an abstract Wikipedia (ISWC 2018) +http://ceur-ws.org/Vol-2180/ISWC_2018_Outrageous_Ideas_paper_6.pdf|creationTime|2018-10-16T00:20:49Z +https://webflow.com/|creationDate|2015-10-13 +https://webflow.com/|tag|http://www.semanlink.net/tag/paul_graham +https://webflow.com/|tag|http://www.semanlink.net/tag/website_creation +https://webflow.com/|tag|http://www.semanlink.net/tag/cms +https://webflow.com/|title|Web design tool, CMS, and hosting platform Webflow +https://webflow.com/|creationTime|2015-10-13T10:23:07Z +https://nadesnotes.wordpress.com/2016/04/10/natural-language-processing-nlp-fundamentals-finite-state-transducers-fsts/|creationDate|2018-11-11 +https://nadesnotes.wordpress.com/2016/04/10/natural-language-processing-nlp-fundamentals-finite-state-transducers-fsts/|tag|http://www.semanlink.net/tag/finite_state_transducer +https://nadesnotes.wordpress.com/2016/04/10/natural-language-processing-nlp-fundamentals-finite-state-transducers-fsts/|title|Natural Language Processing (NLP) Fundamentals: Finite State Transducers (FSTs) – Nade's Notes +https://nadesnotes.wordpress.com/2016/04/10/natural-language-processing-nlp-fundamentals-finite-state-transducers-fsts/|creationTime|2018-11-11T13:19:07Z +http://blog.socialcast.com/javascript-memory-management/|creationDate|2012-09-14 +http://blog.socialcast.com/javascript-memory-management/|tag|http://www.semanlink.net/tag/javascript +http://blog.socialcast.com/javascript-memory-management/|tag|http://www.semanlink.net/tag/memory_leak +http://blog.socialcast.com/javascript-memory-management/|title|JavaScript Memory Management +http://blog.socialcast.com/javascript-memory-management/|creationTime|2012-09-14T01:36:27Z +http://jena.sourceforge.net/grddl/index.html|creationDate|2007-07-17 +http://jena.sourceforge.net/grddl/index.html|tag|http://www.semanlink.net/tag/jena_grddl_reader +http://jena.sourceforge.net/grddl/index.html|title|Jena GRDDL Reader +http://jena.sourceforge.net/grddl/index.html|creationTime|2007-07-17T23:09:56Z +http://www.ihes.fr/~lafforgue/textes/lettresaucollege.pdf|creationDate|2005-12-21 +http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html|creationDate|2014-09-08 +http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html|tag|http://www.semanlink.net/tag/rdf_thrift +http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html|comment|a binary format for RDF graphs, datasets and SPARQL result sets that is fast to process. +http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html|title|RDF Thrift : A binary format for RDF data +http://w3-org.9356.n7.nabble.com/RDF-Thrift-A-binary-format-for-RDF-data-td289661.html|creationTime|2014-09-08T18:08:17Z +http://www.readwriteweb.com/archives/10_semantic_apps_to_watch_one_year_later.php|creationDate|2008-11-26 +http://www.readwriteweb.com/archives/10_semantic_apps_to_watch_one_year_later.php|tag|http://www.semanlink.net/tag/semantic_web_application +http://www.readwriteweb.com/archives/10_semantic_apps_to_watch_one_year_later.php|title|Semantic apps to watch - ReadWriteWeb +http://www.readwriteweb.com/archives/10_semantic_apps_to_watch_one_year_later.php|creationTime|2008-11-26T20:46:35Z +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|creationDate|2008-09-02 +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|tag|http://www.semanlink.net/tag/google +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|tag|http://www.semanlink.net/tag/forms +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|comment|"When we encounter a FORM element on a high-quality site, we might choose to do a small number of queries using the form. For text boxes, our computers automatically choose words from the site that has the form...Only a small number of particularly useful sites receive this treatment, and our crawl agent, the ever-friendly Googlebot, always adheres to robots.txt, nofollow, and noindex directives. That means that if a search form is forbidden in robots.txt, we won't crawl any of the URLs that a form would generate. +" +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|title|Official Google Webmaster Central Blog: Crawling through HTML forms +http://googlewebmastercentral.blogspot.com/2008/04/crawling-through-html-forms.html|creationTime|2008-09-02T13:05:27Z +http://java.net/projects/sommer|creationDate|2012-03-29 +http://java.net/projects/sommer|tag|http://www.semanlink.net/tag/jersey_rdf +http://java.net/projects/sommer|tag|http://www.semanlink.net/tag/henry_story +http://java.net/projects/sommer|comment|Semantic Object (Medata) Mapper +http://java.net/projects/sommer|title|Sommer — Java.net +http://java.net/projects/sommer|creationTime|2012-03-29T01:43:00Z +http://www.w3.org/wiki/TagIssue57Responses|creationDate|2012-04-13 +http://www.w3.org/wiki/TagIssue57Responses|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/wiki/TagIssue57Responses|title|TagIssue57Responses - W3C Wiki +http://www.w3.org/wiki/TagIssue57Responses|creationTime|2012-04-13T17:30:48Z +http://www.kuro5hin.org/story/2005/6/12/143721/743|creationDate|2005-06-15 +http://www.kuro5hin.org/story/2005/6/12/143721/743|tag|http://www.semanlink.net/tag/google +http://www.kuro5hin.org/story/2005/6/12/143721/743|title|Who Will Google Buy Next? +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|creationDate|2009-01-16 +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|tag|http://www.semanlink.net/tag/animal_rights +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|tag|http://www.semanlink.net/tag/agriculture +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|comment|Very often, one goose would bravely step away from the panicked flock and walk tremulously toward me. It would be the mate of the one I had caught, male or female, and it would step right up to me, protesting pitifully. It would be frightened out of its wits, but still determined to stand with and comfort its lover. +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|title|A Farm Boy Reflects +http://www.nytimes.com/2008/07/31/opinion/31kristof.html|creationTime|2009-01-16T01:07:59Z +https://medium.com/@chriszhu12/highlights-of-emnlp-2018-55892fba4247|creationDate|2018-11-25 +https://medium.com/@chriszhu12/highlights-of-emnlp-2018-55892fba4247|tag|http://www.semanlink.net/tag/emnlp_2018 +https://medium.com/@chriszhu12/highlights-of-emnlp-2018-55892fba4247|title|Highlights of EMNLP 2018 – Chris Zhu – Medium +https://medium.com/@chriszhu12/highlights-of-emnlp-2018-55892fba4247|creationTime|2018-11-25T17:24:27Z +http://www.xml.com/pub/a/2004/02/11/googlexml.html|creationDate|2007-12-20 +http://www.xml.com/pub/a/2004/02/11/googlexml.html|tag|http://www.semanlink.net/tag/googling +http://www.xml.com/pub/a/2004/02/11/googlexml.html|tag|http://www.semanlink.net/tag/tips +http://www.xml.com/pub/a/2004/02/11/googlexml.html|comment|Googling for Specific Document Types +http://www.xml.com/pub/a/2004/02/11/googlexml.html|title|XML.com: Googling for XML +http://www.xml.com/pub/a/2004/02/11/googlexml.html|creationTime|2007-12-20T15:24:57Z +https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html|creationDate|2018-11-19 +https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html|tag|http://www.semanlink.net/tag/new_york_times +https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html|tag|http://www.semanlink.net/tag/nlp_current_state +https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html|title|Finally, a Machine That Can Finish Your Sentence - The New York Times +https://www.nytimes.com/2018/11/18/technology/artificial-intelligence-language.html|creationTime|2018-11-19T09:00:24Z +http://www.ldodds.com/blog/archives/000272.html|creationDate|2006-04-27 +http://www.ldodds.com/blog/archives/000272.html|tag|http://www.semanlink.net/tag/sparql +http://www.ldodds.com/blog/archives/000272.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000272.html|tag|http://www.semanlink.net/tag/bbc +http://www.ldodds.com/blog/archives/000272.html|title|Lost Boy: SPARQLing the BBC Programme Catalogue +http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf|creationDate|2007-05-02 +http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf|tag|http://www.semanlink.net/tag/empire_colonial_francais +http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf|tag|http://www.semanlink.net/tag/colonisation +http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf|title|Calendrier des crimes de la France outre-mer +http://perso.orange.fr/jacques.morel67/ccfo/crimcol.pdf|creationTime|2007-05-02T00:00:13Z +http://drupal.org/project/usage|creationDate|2012-01-23 +http://drupal.org/project/usage|tag|http://www.semanlink.net/tag/drupal_modules +http://drupal.org/project/usage|title|Project usage overview drupal.org +http://drupal.org/project/usage|creationTime|2012-01-23T11:51:47Z +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|creationDate|2007-08-23 +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|tag|http://www.semanlink.net/tag/sparql +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|tag|http://www.semanlink.net/tag/rdf_forms +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|tag|http://www.semanlink.net/tag/altavista +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|comment|The AltaVista engineers developed a clever mapping between html forms and SPARQL queries. +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|title|SPARQLing AltaVista: the meaning of forms +http://blogs.sun.com/bblfish/entry/sparqling_altavista_the_meaning_of|creationTime|2007-08-23T23:50:06Z +http://www.bbc.co.uk/news/science-environment-21150047|creationDate|2013-12-27 +http://www.bbc.co.uk/news/science-environment-21150047|tag|http://www.semanlink.net/tag/quantum_biology +http://www.bbc.co.uk/news/science-environment-21150047|title|BBC News - Quantum biology: Do weird physics effects abound in nature? +http://www.bbc.co.uk/news/science-environment-21150047|creationTime|2013-12-27T14:29:21Z +http://julialang.org/|creationDate|2014-03-03 +http://julialang.org/|tag|http://www.semanlink.net/tag/programming_language +http://julialang.org/|comment|Julia is a high-level, high-performance dynamic programming language for technical computing +http://julialang.org/|title|The Julia Language +http://julialang.org/|creationTime|2014-03-03T22:57:44Z +http://www.ibm.com/developerworks/library/wa-datasets/|creationDate|2011-09-16 +http://www.ibm.com/developerworks/library/wa-datasets/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/library/wa-datasets/|tag|http://www.semanlink.net/tag/drupal_rdf +http://www.ibm.com/developerworks/library/wa-datasets/|title|The Semantic Web, Linked Data and Drupal, Part 2: Combine linked datasets with Drupal 7 and SPARQL Views +http://www.ibm.com/developerworks/library/wa-datasets/|creationTime|2011-09-16T16:04:55Z +http://chrispederick.com/work/webdeveloper/|creationDate|2006-07-21 +http://chrispederick.com/work/webdeveloper/|tag|http://www.semanlink.net/tag/web_dev +http://chrispederick.com/work/webdeveloper/|tag|http://www.semanlink.net/tag/web_tools +http://chrispederick.com/work/webdeveloper/|tag|http://www.semanlink.net/tag/firefox_extension +http://chrispederick.com/work/webdeveloper/|comment|The Web Developer extension adds a menu and a toolbar to the browser with various web developer tools. It is designed for Firefox, Flock, Mozilla and Seamonkey. +http://chrispederick.com/work/webdeveloper/|title|Web Developer Extension +http://deeplearning4j.org/neuralnet-overview.html|creationDate|2016-02-13 +http://deeplearning4j.org/neuralnet-overview.html|tag|http://www.semanlink.net/tag/deep_learning +http://deeplearning4j.org/neuralnet-overview.html|title|Introduction to Deep Neural Networks - Deeplearning4j: Open-source, distributed deep learning for the JVM +http://deeplearning4j.org/neuralnet-overview.html|creationTime|2016-02-13T00:15:50Z +http://ask.slashdot.org/article.pl?sid=06/08/27/000248|creationDate|2008-12-11 +http://ask.slashdot.org/article.pl?sid=06/08/27/000248|tag|http://www.semanlink.net/tag/programming +http://ask.slashdot.org/article.pl?sid=06/08/27/000248|tag|http://www.semanlink.net/tag/education +http://ask.slashdot.org/article.pl?sid=06/08/27/000248|title|Slashdot Teaching Primary School Students Programming? +http://ask.slashdot.org/article.pl?sid=06/08/27/000248|creationTime|2008-12-11T21:42:07Z +http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi|creationDate|2013-10-01 +http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi|tag|http://www.semanlink.net/tag/musees_africains +http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi|tag|http://www.semanlink.net/tag/m3_multi_media_museum +http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi|title|Multi Media Museum et les musées africains +http://web.archive.org/web/19981201182307/http://www.hypersolutions.fr/M3/AfricanMuseums/default.ssi|creationTime|2013-10-01T17:18:42Z +http://clusty.com|creationDate|2005-05-10 +http://clusty.com|tag|http://www.semanlink.net/tag/search_engines +http://clusty.com|title|Clusty the Clustering Engine +http://blog.howarddierking.com/2016/10/07/swagger-ain-t-rest-is-that-ok/|creationDate|2017-04-01 +http://blog.howarddierking.com/2016/10/07/swagger-ain-t-rest-is-that-ok/|tag|http://www.semanlink.net/tag/rest +http://blog.howarddierking.com/2016/10/07/swagger-ain-t-rest-is-that-ok/|title|Swagger Ain't REST - is that OK? +http://blog.howarddierking.com/2016/10/07/swagger-ain-t-rest-is-that-ok/|creationTime|2017-04-01T18:45:32Z +http://bioinformaticsalgorithms.com/index.htm|creationDate|2015-01-04 +http://bioinformaticsalgorithms.com/index.htm|tag|http://www.semanlink.net/tag/bioinformatics +http://bioinformaticsalgorithms.com/index.htm|title|Bioinformatics Algorithms: an Active Learning Approach +http://bioinformaticsalgorithms.com/index.htm|creationTime|2015-01-04T19:29:44Z +http://bergie.iki.fi/blog/decoupling_content_management/|creationDate|2012-08-06 +http://bergie.iki.fi/blog/decoupling_content_management/|tag|http://www.semanlink.net/tag/henri_bergius +http://bergie.iki.fi/blog/decoupling_content_management/|tag|http://www.semanlink.net/tag/cms +http://bergie.iki.fi/blog/decoupling_content_management/|tag|http://www.semanlink.net/tag/vie_vienna_iks_editables +http://bergie.iki.fi/blog/decoupling_content_management/|title|Decoupling Content Management - Henri Bergius +http://bergie.iki.fi/blog/decoupling_content_management/|creationTime|2012-08-06T16:05:14Z +http://nodejs.org/|creationDate|2011-09-07 +http://nodejs.org/|tag|http://www.semanlink.net/tag/node_js +http://nodejs.org/|comment|Evented I/O for V8 JavaScript. +http://nodejs.org/|title|node.js +http://nodejs.org/|creationTime|2011-09-07T17:10:28Z +https://www.xlpat.com/|creationDate|2019-02-15 +https://www.xlpat.com/|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://www.xlpat.com/|title|Online Patent Search Tools Patent Analytics - XLPAT Labs +https://www.xlpat.com/|creationTime|2019-02-15T14:52:32Z +http://code.google.com/p/timemap/|creationDate|2012-11-28 +http://code.google.com/p/timemap/|tag|http://www.semanlink.net/tag/simile_timeline +http://code.google.com/p/timemap/|tag|http://www.semanlink.net/tag/google_maps +http://code.google.com/p/timemap/|title|timemap - Javascript library to help use a SIMILE timeline with online maps including Google, OpenLayers, and Bing. - Google Project Hosting +http://code.google.com/p/timemap/|creationTime|2012-11-28T00:15:27Z +http://www.onlinetools.org/articles/unobtrusivejavascript/chapter4.html|creationDate|2009-04-09 +http://www.onlinetools.org/articles/unobtrusivejavascript/chapter4.html|tag|http://www.semanlink.net/tag/javascript_tips +http://www.onlinetools.org/articles/unobtrusivejavascript/chapter4.html|title|How to call scripts - Unobtrusive Javascript +http://www.onlinetools.org/articles/unobtrusivejavascript/chapter4.html|creationTime|2009-04-09T16:08:26Z +http://code.google.com/p/google-refine/|creationDate|2011-01-18 +http://code.google.com/p/google-refine/|tag|http://www.semanlink.net/tag/freebase +http://code.google.com/p/google-refine/|tag|http://www.semanlink.net/tag/google_refine +http://code.google.com/p/google-refine/|comment|Google Refine is a power tool for working with messy data, cleaning it up, transforming it from one format into another, extending it with web services, and linking it to databases like Freebase. +http://code.google.com/p/google-refine/|title|google-refine - Project Hosting on Google Code +http://code.google.com/p/google-refine/|creationTime|2011-01-18T15:38:03Z +http://webseitz.fluxent.com/wiki/MindMapping|creationDate|2005-09-25 +http://webseitz.fluxent.com/wiki/MindMapping|tag|http://www.semanlink.net/tag/mind_mapping +http://webseitz.fluxent.com/wiki/MindMapping|tag|http://www.semanlink.net/tag/graph_visualization +http://webseitz.fluxent.com/wiki/MindMapping|title|Mind Mapping (WebSeitz/wikilog) +https://act.greenpeace.org/page/14369/petition/1|creationDate|2017-12-07 +https://act.greenpeace.org/page/14369/petition/1|tag|http://www.semanlink.net/tag/industrie_nucleaire +https://act.greenpeace.org/page/14369/petition/1|tag|http://www.semanlink.net/tag/greenpeace +https://act.greenpeace.org/page/14369/petition/1|tag|http://www.semanlink.net/tag/edf +https://act.greenpeace.org/page/14369/petition/1|title|EDF : c’est pas bientôt fini le nucléaire ? +https://act.greenpeace.org/page/14369/petition/1|creationTime|2017-12-07T00:38:02Z +http://www.pbs.org/wgbh/nova/einstein/|creationDate|2005-10-10 +http://www.pbs.org/wgbh/nova/einstein/|tag|http://www.semanlink.net/tag/einstein +http://www.pbs.org/wgbh/nova/einstein/|tag|http://www.semanlink.net/tag/pbs +http://www.pbs.org/wgbh/nova/einstein/|title|NOVA Einstein's Big Idea PBS +https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article|creationDate|2016-12-12 +https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article|tag|http://www.semanlink.net/tag/langues_vivantes +https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article|tag|http://www.semanlink.net/tag/deutsch +https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article|title|Et voilà pourquoi l’allemand met le verbe à la fin - Le Temps +https://www.letemps.ch/culture/2012/09/24/voila-lallemand-met-verbe-fin?utm_source=twitter&utm_medium=share&utm_campaign=article|creationTime|2016-12-12T19:26:56Z +http://readwrite.com/2013/10/17/evernote-addons#awesm=~okELddEpQbFm2M|creationDate|2013-10-19 +http://readwrite.com/2013/10/17/evernote-addons#awesm=~okELddEpQbFm2M|tag|http://www.semanlink.net/tag/evernote +http://readwrite.com/2013/10/17/evernote-addons#awesm=~okELddEpQbFm2M|title|10 Ways To Make Evernote Rock Harder – ReadWrite +http://readwrite.com/2013/10/17/evernote-addons#awesm=~okELddEpQbFm2M|creationTime|2013-10-19T00:16:22Z +http://alias-i.com/lingpipe/index.html|creationDate|2010-05-14 +http://alias-i.com/lingpipe/index.html|tag|http://www.semanlink.net/tag/nlp_tools +http://alias-i.com/lingpipe/index.html|comment|"Suite of Java libraries for the linguistic analysis of human language. +" +http://alias-i.com/lingpipe/index.html|title|LingPipe +http://alias-i.com/lingpipe/index.html|creationTime|2010-05-14T13:29:19Z +http://www.nzlinux.org.nz/blogs/2006/03/03/scoping-a-semantic-wiki/|creationDate|2006-03-03 +http://www.nzlinux.org.nz/blogs/2006/03/03/scoping-a-semantic-wiki/|tag|http://www.semanlink.net/tag/semantic_wiki +http://www.nzlinux.org.nz/blogs/2006/03/03/scoping-a-semantic-wiki/|title|Visions of Aestia » Scoping a Semantic Wiki +http://www.wikimedia.fr/afripedia|creationDate|2012-11-19 +http://www.wikimedia.fr/afripedia|tag|http://www.semanlink.net/tag/afripedia +http://www.wikimedia.fr/afripedia|title|Afripedia : un partenariat avec l'Agence universitaire de la francophonie et l'Institut français pour développer Wikipédia en Afrique Wikimédia France +http://www.wikimedia.fr/afripedia|creationTime|2012-11-19T16:52:50Z +http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html|creationDate|2005-09-25 +http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html|tag|http://www.semanlink.net/tag/graph_visualization +http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html|tag|http://www.semanlink.net/tag/rdf +http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html|comment|RDF Gravity is a tool for visualising RDF/OWL Graphs/ ontologies. +http://semweb.salzburgresearch.at/apps/rdf-gravity/index.html|title|RDF-Gravity +http://maps.google.fr/maps/place?hl=fr&um=1&ie=UTF-8&q=paris+%22francois+bonvin%22+restaurant&fb=1&gl=fr&hq=%22francois+bonvin%22+restaurant&hnear=Paris&cid=13847008823922781253|creationDate|2010-07-17 +http://maps.google.fr/maps/place?hl=fr&um=1&ie=UTF-8&q=paris+%22francois+bonvin%22+restaurant&fb=1&gl=fr&hq=%22francois+bonvin%22+restaurant&hnear=Paris&cid=13847008823922781253|tag|http://www.semanlink.net/tag/restaurant +http://maps.google.fr/maps/place?hl=fr&um=1&ie=UTF-8&q=paris+%22francois+bonvin%22+restaurant&fb=1&gl=fr&hq=%22francois+bonvin%22+restaurant&hnear=Paris&cid=13847008823922781253|title|Le Troquet - restau rue François Bonvin, 15e +http://maps.google.fr/maps/place?hl=fr&um=1&ie=UTF-8&q=paris+%22francois+bonvin%22+restaurant&fb=1&gl=fr&hq=%22francois+bonvin%22+restaurant&hnear=Paris&cid=13847008823922781253|creationTime|2010-07-17T14:54:42Z +http://www.w3.org/TR/grddl-primer/|creationDate|2007-07-17 +http://www.w3.org/TR/grddl-primer/|tag|http://www.semanlink.net/tag/grddl +http://www.w3.org/TR/grddl-primer/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/grddl-primer/|title|GRDDL Primer +http://www.w3.org/TR/grddl-primer/|creationTime|2007-07-17T22:57:02Z +http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/|creationDate|2012-05-03 +http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/|tag|http://www.semanlink.net/tag/semweb_pro_2012 +http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/|tag|http://www.semanlink.net/tag/c2gweb +http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/|title|SemWeb.Pro : le web sémantique démontre son utilité en entreprise #semwebpro +http://pro.01net.com/editorial/565289/semweb-pro-le-web-semantique-demontre-son-utilite-en-entreprise/|creationTime|2012-05-03T22:11:14Z +https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/|creationDate|2017-05-24 +https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/|tag|http://www.semanlink.net/tag/nlp_sample_code +https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/|tag|http://www.semanlink.net/tag/automatic_summarization +https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/|title|Build your own summary tool! The Tokenizer +https://thetokenizer.com/2013/04/28/build-your-own-summary-tool/|creationTime|2017-05-24T17:56:43Z +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|creationDate|2009-04-06 +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|tag|http://www.semanlink.net/tag/gautier_poupeau +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|title|L’apport des technologies du Web sémantique à la gestion des données structurées +http://www.slideshare.net/lespetitescases/lapport-des-technologies-du-web-smantique-la-gestion-des-donnes-structures|creationTime|2009-04-06T15:00:59Z +http://www.salon.com/technology/feature/2006/09/14/basic|creationDate|2011-07-01 +http://www.salon.com/technology/feature/2006/09/14/basic|tag|http://www.semanlink.net/tag/education +http://www.salon.com/technology/feature/2006/09/14/basic|tag|http://www.semanlink.net/tag/basic +http://www.salon.com/technology/feature/2006/09/14/basic|comment|Microsoft and Apple and all the big-time education-computerizing reformers of the MIT Media Lab... seem bent on providing information consumption devices, not tools that teach creative thinking and technological mastery. +http://www.salon.com/technology/feature/2006/09/14/basic|title|Why Johnny can't code +http://www.salon.com/technology/feature/2006/09/14/basic|creationTime|2011-07-01T14:37:52Z +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|creationDate|2011-01-05 +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|tag|http://www.semanlink.net/tag/linked_data +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|tag|http://www.semanlink.net/tag/description_logic +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|title|Thinking ‘Inside the Box’ with Description Logics » AI3:::Adaptive Information +http://www.mkbergman.com/466/thinking-inside-the-box-with-description-logics/|creationTime|2011-01-05T16:56:00Z +http://en.wikipedia.org/wiki/Terra_preta|creationDate|2012-12-15 +http://en.wikipedia.org/wiki/Terra_preta|tag|http://www.semanlink.net/tag/amazonie +http://en.wikipedia.org/wiki/Terra_preta|title|Terra preta - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Terra_preta|creationTime|2012-12-15T16:46:59Z +http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html|creationDate|2012-08-14 +http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html|tag|http://www.semanlink.net/tag/maven +http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html|comment|Overlays allow you to combine many similar resources, for example, images, CSS and JavaScript files all into one project. At build time, the overlay will be predictably merged into a master WAR project. +http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html|title|Mike's Site: Maven's WAR Overlay: What are WAR Overlays? +http://www.ensor.cc/2011/06/mavens-war-overlay-what-are-war.html|creationTime|2012-08-14T23:31:24Z +https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains|creationDate|2018-11-15 +https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains|tag|http://www.semanlink.net/tag/markov_model +https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains|title|How do RNNs differ from Markov Chains? - Quora +https://www.quora.com/How-do-RNNs-differ-from-Markov-Chains|creationTime|2018-11-15T01:06:06Z +http://www.thebrain.com|creationDate|2005-09-25 +http://www.thebrain.com|tag|http://www.semanlink.net/tag/graph +http://www.thebrain.com|title|TheBrain Technologies Corporation +http://www.opencontentalliance.org/|creationDate|2005-11-16 +http://www.opencontentalliance.org/|tag|http://www.semanlink.net/tag/bibliotheque_numerique +http://www.opencontentalliance.org/|comment|The Open Content Alliance (OCA) represents the collaborative efforts of a group of cultural, technology, nonprofit, and governmental organizations from around the world that will help build a permanent archive of multilingual digitized text and multimedia content. Content in the OCA archive will be accessible soon through this website and through Yahoo! +http://www.opencontentalliance.org/|title|Open Content Alliance (OCA) - Home +http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html|creationDate|2006-08-29 +http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html|tag|http://www.semanlink.net/tag/conjecture_de_poincare +http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html|title|Elémentaire, mon cher Poincaré +http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html|source|Le Monde +http://www.lemonde.fr/web/recherche_breve/1,13-0,37-955744,0.html|date|2006-08-20 +http://www.betaversion.org/~stefano/linotype/news/93/|creationDate|2005-10-12 +http://www.betaversion.org/~stefano/linotype/news/93/|tag|http://www.semanlink.net/tag/good +http://www.betaversion.org/~stefano/linotype/news/93/|tag|http://www.semanlink.net/tag/software_design +http://www.betaversion.org/~stefano/linotype/news/93/|tag|http://www.semanlink.net/tag/stefano_mazzocchi +http://www.betaversion.org/~stefano/linotype/news/93/|tag|http://www.semanlink.net/tag/semantic_web +http://www.betaversion.org/~stefano/linotype/news/93/|tag|http://www.semanlink.net/tag/rdf +http://www.betaversion.org/~stefano/linotype/news/93/|title|Stefano's Linotype ~ Data First vs. Structure First +http://arstechnica.com/articles/culture/drmhacks.ars/1|creationDate|2006-07-20 +http://arstechnica.com/articles/culture/drmhacks.ars/1|tag|http://www.semanlink.net/tag/no_more_drm +http://arstechnica.com/articles/culture/drmhacks.ars/1|title|Hacking Digital Rights Management +https://twitter.com/seanjtaylor/status/1073632404286275584|creationDate|2018-12-15 +https://twitter.com/seanjtaylor/status/1073632404286275584|tag|http://www.semanlink.net/tag/reseaux_bayesiens +https://twitter.com/seanjtaylor/status/1073632404286275584|title|"Sean J. Taylor sur Twitter : ""A couple days ago another team asked me to speak about Bayesian data analysis..." +https://twitter.com/seanjtaylor/status/1073632404286275584|creationTime|2018-12-15T10:14:41Z +http://cain.ice.ucdavis.edu/semanticnaturalist/|creationDate|2007-09-13 +http://cain.ice.ucdavis.edu/semanticnaturalist/|tag|http://www.semanlink.net/tag/histoire_naturelle +http://cain.ice.ucdavis.edu/semanticnaturalist/|tag|http://www.semanlink.net/tag/linked_data +http://cain.ice.ucdavis.edu/semanticnaturalist/|tag|http://www.semanlink.net/tag/biodiversity_data +http://cain.ice.ucdavis.edu/semanticnaturalist/|tag|http://www.semanlink.net/tag/semantic_web_blog +http://cain.ice.ucdavis.edu/semanticnaturalist/|comment|Musings on natural history, geography, and the semantic web (Blog). +http://cain.ice.ucdavis.edu/semanticnaturalist/|title|The Semantic Naturalist +http://cain.ice.ucdavis.edu/semanticnaturalist/|creationTime|2007-09-13T21:52:57Z +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|creationDate|2013-09-22 +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|tag|http://www.semanlink.net/tag/data_gouv_fr +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|tag|http://www.semanlink.net/tag/open_data +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|tag|http://www.semanlink.net/tag/ayrault +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|title|Open Data : le Premier Ministre met la pression sur les ministères +http://www.lemagit.fr/actualites/2240205636/Open-Data-le-Premier-Ministre-met-la-pression-sur-les-ministeres?goback=%2Egde_4158686_member_275093262#%21|creationTime|2013-09-22T12:22:01Z +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|creationDate|2018-03-31 +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|tag|http://www.semanlink.net/tag/ei +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|tag|http://www.semanlink.net/tag/abrutis +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|tag|http://www.semanlink.net/tag/destruction_de_vestiges_antiques +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|tag|http://www.semanlink.net/tag/syrian_civil_war +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|title|En Syrie, le plus ancien palais de l’humanité détruit par l’organisation Etat islamique +http://www.lemonde.fr/culture/article/2018/03/29/en-syrie-le-plus-ancien-palais-de-l-humanite-detruit-par-l-organisation-etat-islamique_5278308_3246.html|creationTime|2018-03-31T10:02:36Z +http://rww.io/help.php|creationDate|2013-09-22 +http://rww.io/help.php|tag|http://www.semanlink.net/tag/to_see +http://rww.io/help.php|tag|http://www.semanlink.net/tag/personal_cloud +http://rww.io/help.php|tag|http://www.semanlink.net/tag/read_write_linked_data +http://rww.io/help.php|title|rww.io: data cloud +http://rww.io/help.php|creationTime|2013-09-22T13:14:59Z +http://rapid-i.com/rapidforum/index.php|creationDate|2013-09-13 +http://rapid-i.com/rapidforum/index.php|tag|http://www.semanlink.net/tag/rapidminer +http://rapid-i.com/rapidforum/index.php|title|Rapid-I Forum +http://rapid-i.com/rapidforum/index.php|creationTime|2013-09-13T00:27:13Z +http://www.culturecommunication.gouv.fr/Ressources/HADOC/Referentiels2/Les-vocabulaires-scientifiques-et-techniques/L-application-GINCO|creationDate|2014-07-26 +http://www.culturecommunication.gouv.fr/Ressources/HADOC/Referentiels2/Les-vocabulaires-scientifiques-et-techniques/L-application-GINCO|tag|http://www.semanlink.net/tag/ginco_culture +http://www.culturecommunication.gouv.fr/Ressources/HADOC/Referentiels2/Les-vocabulaires-scientifiques-et-techniques/L-application-GINCO|title|L'application GINCO - Ministère de la Culture et de la Communication +http://www.culturecommunication.gouv.fr/Ressources/HADOC/Referentiels2/Les-vocabulaires-scientifiques-et-techniques/L-application-GINCO|creationTime|2014-07-26T02:11:45Z +https://fr.slideshare.net/PetarRistoski/rdf2vec-rdf-graph-embeddings-for-data-mining|creationDate|2018-01-03 +https://fr.slideshare.net/PetarRistoski/rdf2vec-rdf-graph-embeddings-for-data-mining|tag|http://www.semanlink.net/tag/rdf2vec +https://fr.slideshare.net/PetarRistoski/rdf2vec-rdf-graph-embeddings-for-data-mining|title|RDF2Vec: RDF Graph Embeddings for Data Mining +https://fr.slideshare.net/PetarRistoski/rdf2vec-rdf-graph-embeddings-for-data-mining|creationTime|2018-01-03T16:50:08Z +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|creationDate|2014-11-08 +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|tag|http://www.semanlink.net/tag/read_write_linked_data +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|tag|http://www.semanlink.net/tag/henry_story +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|tag|http://www.semanlink.net/tag/linked_data_platform +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|tag|http://www.semanlink.net/tag/read_write_secure_data_web +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|title|The Read-Write Secure Data Web, Henry Story at semwebpro 2014 +http://bblfish.net/tmp/2014/11/05/SemWebPro2014.pdf|creationTime|2014-11-08T15:08:37Z +https://machinelearningmastery.com/keras-functional-api-deep-learning/|creationDate|2018-12-12 +https://machinelearningmastery.com/keras-functional-api-deep-learning/|tag|http://www.semanlink.net/tag/keras_functional_api +https://machinelearningmastery.com/keras-functional-api-deep-learning/|title|How to Use the Keras Functional API for Deep Learning +https://machinelearningmastery.com/keras-functional-api-deep-learning/|creationTime|2018-12-12T11:35:54Z +http://today.java.net/lpt/a/225|creationDate|2006-12-19 +http://today.java.net/lpt/a/225|tag|http://www.semanlink.net/tag/java_dev +http://today.java.net/lpt/a/225|tag|http://www.semanlink.net/tag/xss +http://today.java.net/lpt/a/225|tag|http://www.semanlink.net/tag/web_dev +http://today.java.net/lpt/a/225|title|Handling Java Web Application Input +http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/|creationDate|2011-01-22 +http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/|tag|http://www.semanlink.net/tag/mike_bergman +http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/|tag|http://www.semanlink.net/tag/semantic_web_tools +http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/|title|Comprehensive Listing of 175 Semantic Web Tools » AI3:::Adaptive Information +http://www.mkbergman.com/287/comprehensive-listing-of-175-semantic-web-tools/|creationTime|2011-01-22T00:03:07Z +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|creationDate|2011-06-15 +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|tag|http://www.semanlink.net/tag/microdata +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|tag|http://www.semanlink.net/tag/httprange_14 +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|comment|you can't squeeze a dog over the wire with HTTP, but that's just a limitation of the protocol +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|title|httpRange-14 Reflux - Danny Ayers : Raw Blog +http://dannyayers.com/2011/06/15/httpRange-14-Reflux|creationTime|2011-06-15T17:46:03Z +http://www.w3.org/TR/urls-in-data/|creationDate|2013-07-11 +http://www.w3.org/TR/urls-in-data/|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/TR/urls-in-data/|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.w3.org/TR/urls-in-data/|title|URLs in Data Primer +http://www.w3.org/TR/urls-in-data/|creationTime|2013-07-11T15:45:15Z +http://www.fiftyfoureleven.com/resources/programming/javascript|creationDate|2005-05-20 +http://www.fiftyfoureleven.com/resources/programming/javascript|tag|http://www.semanlink.net/tag/ajax +http://www.fiftyfoureleven.com/resources/programming/javascript|tag|http://www.semanlink.net/tag/javascript +http://www.fiftyfoureleven.com/resources/programming/javascript|tag|http://www.semanlink.net/tag/links +http://www.fiftyfoureleven.com/resources/programming/javascript|title|Javascript - Links and Resources, Fiftyfoureleven.com +http://stefansavev.com/blog/beyond-cosine-similarity/|creationDate|2017-07-21 +http://stefansavev.com/blog/beyond-cosine-similarity/|tag|http://www.semanlink.net/tag/cosine_similarity +http://stefansavev.com/blog/beyond-cosine-similarity/|title|Beyond Cosine Similarity - Algorithms for Big Data +http://stefansavev.com/blog/beyond-cosine-similarity/|creationTime|2017-07-21T11:44:52Z +http://answers.semanticweb.com/questions/12147/whats-the-best-way-to-parameterize-sparql-queries|creationDate|2014-10-19 +http://answers.semanticweb.com/questions/12147/whats-the-best-way-to-parameterize-sparql-queries|tag|http://www.semanlink.net/tag/sparql +http://answers.semanticweb.com/questions/12147/whats-the-best-way-to-parameterize-sparql-queries|title|What's the best way to parameterize SPARQL queries? - ANSWERS +http://answers.semanticweb.com/questions/12147/whats-the-best-way-to-parameterize-sparql-queries|creationTime|2014-10-19T18:44:59Z +http://www.livescience.com/43270-new-burgess-shale-fossils-canada.html|creationDate|2014-02-17 +http://www.livescience.com/43270-new-burgess-shale-fossils-canada.html|tag|http://www.semanlink.net/tag/explosion_cambrienne +http://www.livescience.com/43270-new-burgess-shale-fossils-canada.html|title|'Mother Lode' of Amazingly Preserved Fossils Discovered in Canada LiveScience +http://www.livescience.com/43270-new-burgess-shale-fossils-canada.html|creationTime|2014-02-17T00:05:44Z +https://github.com/ffftzh/BTM-Java|creationDate|2017-06-08 +https://github.com/ffftzh/BTM-Java|tag|http://www.semanlink.net/tag/biterm_topic_model +https://github.com/ffftzh/BTM-Java|title|ffftzh/BTM-Java: A java implement of Biterm Topic Model +https://github.com/ffftzh/BTM-Java|creationTime|2017-06-08T01:01:00Z +https://www.w3.org/2016/04/blockchain-workshop/report|creationDate|2016-08-29 +https://www.w3.org/2016/04/blockchain-workshop/report|tag|http://www.semanlink.net/tag/w3c +https://www.w3.org/2016/04/blockchain-workshop/report|tag|http://www.semanlink.net/tag/blockchain +https://www.w3.org/2016/04/blockchain-workshop/report|title|W3C Blockchains and the Web Workshop Report +https://www.w3.org/2016/04/blockchain-workshop/report|creationTime|2016-08-29T18:35:15Z +https://blog.wikimedia.org/2016/10/30/histropedia/|creationDate|2017-11-01 +https://blog.wikimedia.org/2016/10/30/histropedia/|tag|http://www.semanlink.net/tag/histropedia +https://blog.wikimedia.org/2016/10/30/histropedia/|title|Histropedia: “The power of data visualisation combined with free knowledge” – Wikimedia Blog +https://blog.wikimedia.org/2016/10/30/histropedia/|creationTime|2017-11-01T14:02:25Z +http://orange.biolab.si/|creationDate|2013-05-26 +http://orange.biolab.si/|tag|http://www.semanlink.net/tag/orange_data_mining +http://orange.biolab.si/|title|Orange – Data Mining Fruitful & Fun +http://orange.biolab.si/|creationTime|2013-05-26T14:50:09Z +https://docs.google.com/presentation/d/17CGWPwu59GB7miyY1ErTjr4Wb-kS-rM7dB3MAMVO9HU/pub?slide=id.p|creationDate|2015-07-16 +https://docs.google.com/presentation/d/17CGWPwu59GB7miyY1ErTjr4Wb-kS-rM7dB3MAMVO9HU/pub?slide=id.p|tag|http://www.semanlink.net/tag/firefox +https://docs.google.com/presentation/d/17CGWPwu59GB7miyY1ErTjr4Wb-kS-rM7dB3MAMVO9HU/pub?slide=id.p|title|Pin the Web - Firefox OS Design Concept +https://docs.google.com/presentation/d/17CGWPwu59GB7miyY1ErTjr4Wb-kS-rM7dB3MAMVO9HU/pub?slide=id.p|creationTime|2015-07-16T11:49:41Z +https://mike.place/talks/serverless/|creationDate|2018-08-29 +https://mike.place/talks/serverless/|tag|http://www.semanlink.net/tag/serverless +https://mike.place/talks/serverless/|title|Serverless for data scientists +https://mike.place/talks/serverless/|creationTime|2018-08-29T16:17:29Z +http://www.vub.ac.be/BIBLIO/nieuwenhuysen/african-art/african-art-links.html|creationDate|2005-04-07 +http://www.vub.ac.be/BIBLIO/nieuwenhuysen/african-art/african-art-links.html|tag|http://www.semanlink.net/tag/links +http://www.vub.ac.be/BIBLIO/nieuwenhuysen/african-art/african-art-links.html|tag|http://www.semanlink.net/tag/art_d_afrique +http://www.vub.ac.be/BIBLIO/nieuwenhuysen/african-art/african-art-links.html|title|African art links +http://blogs.esa.int/rosetta/2014/11/19/did-philae-drill-the-comet/|creationDate|2014-11-19 +http://blogs.esa.int/rosetta/2014/11/19/did-philae-drill-the-comet/|tag|http://www.semanlink.net/tag/philae +http://blogs.esa.int/rosetta/2014/11/19/did-philae-drill-the-comet/|title|Did Philae drill the comet? Rosetta - ESA's comet chaser +http://blogs.esa.int/rosetta/2014/11/19/did-philae-drill-the-comet/|creationTime|2014-11-19T17:19:17Z +http://www.mpi-inf.mpg.de/~suchanek/downloads/yago/|creationDate|2007-05-23 +http://www.mpi-inf.mpg.de/~suchanek/downloads/yago/|tag|http://www.semanlink.net/tag/yago +http://www.mpi-inf.mpg.de/~suchanek/downloads/yago/|title|Yago - A Core of Semantic Knowledge +http://www.mpi-inf.mpg.de/~suchanek/downloads/yago/|creationTime|2007-05-23T21:32:51Z +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|creationDate|2017-02-05 +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|tag|http://www.semanlink.net/tag/zapata +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|tag|http://www.semanlink.net/tag/marlon_brando +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|tag|http://www.semanlink.net/tag/john_steinbeck +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|title|Viva Zapata! +https://en.wikipedia.org/wiki/Viva_Zapata!#Awards|creationTime|2017-02-05T00:57:17Z +http://lucene.apache.org/solr/|creationDate|2012-05-03 +http://lucene.apache.org/solr/|tag|http://www.semanlink.net/tag/solr +http://lucene.apache.org/solr/|tag|http://www.semanlink.net/tag/apache_org +http://lucene.apache.org/solr/|title|Apache Solr +http://lucene.apache.org/solr/|creationTime|2012-05-03T14:47:07Z +http://www.mycarevent.com/default.aspx|creationDate|2006-06-02 +http://www.mycarevent.com/default.aspx|tag|http://www.semanlink.net/tag/automobile +http://www.mycarevent.com/default.aspx|comment|"MYCAREVENT - MobilitY and CollAboRative Work in European Vehicle Emergency NeTworks – is an Integrated Project sponsored by the European Commission in support of the Strategic Objective “Information Society Technologies (IST)” in the Sixth Framework Program.
The After Sales Market of the European Automotive Industry has become a very important market. The introduction of innovative mobile applications will enable new ways of working and collaboration among car manufacturers, workshops, road assistance services and the customer, who will all benefit from this. +" +http://www.mycarevent.com/default.aspx|title|MYCAREVENT +http://www.devx.com/semantic/Article/42543|creationDate|2011-02-15 +http://www.devx.com/semantic/Article/42543|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.devx.com/semantic/Article/42543|tag|http://www.semanlink.net/tag/rdfa +http://www.devx.com/semantic/Article/42543|tag|http://www.semanlink.net/tag/dita +http://www.devx.com/semantic/Article/42543|title|Using RDFa with DITA and DocBook +http://www.devx.com/semantic/Article/42543|creationTime|2011-02-15T11:51:15Z +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|creationDate|2016-09-18 +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/fact_checking +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/brexit +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/social_networks +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/verite +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/big_data +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/post_verite +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|tag|http://www.semanlink.net/tag/algorithmes +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|title|La propagande des algorithmes ? Vraiment ? InternetActu +http://internetactu.blog.lemonde.fr/2016/09/17/la-propagande-des-algorithmes-vraiment/|creationTime|2016-09-18T11:21:00Z +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|creationDate|2017-09-30 +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|tag|http://www.semanlink.net/tag/keras +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|comment|Sequence-to-sequence learning (Seq2Seq) is about training models to convert sequences from one domain (e.g. sentences in English) to sequences in another domain (e.g. the same sentences translated to French). +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|title|A ten-minute introduction to sequence-to-sequence learning in Keras +https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html|creationTime|2017-09-30T10:54:53Z +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|creationDate|2017-11-12 +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|tag|http://www.semanlink.net/tag/cross_lingual_nlp +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|tag|http://www.semanlink.net/tag/information_retrieval +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|tag|http://www.semanlink.net/tag/using_word_embedding +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|title|Monolingual and Cross-Lingual Information Retrieval Models Based on (Bilingual) Word Embeddings (2015) +https://pdfs.semanticscholar.org/8b40/b159c2316dbea297a301a9c561b1d9873c4a.pdf|creationTime|2017-11-12T02:35:24Z +http://www.radioactivehq.org|creationDate|2005-10-10 +http://www.radioactivehq.org|tag|http://www.semanlink.net/tag/rfid +http://www.radioactivehq.org|tag|http://www.semanlink.net/tag/open_source +http://www.radioactivehq.org|title|The RadioActive Foundation: The standards based open source RFID project. +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|creationDate|2009-03-03 +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|tag|http://www.semanlink.net/tag/named_graphs +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|tag|http://www.semanlink.net/tag/anzo +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|title|Named graphs in Open Anzo - TechnicaLee Speaking +http://www.thefigtrees.net/lee/blog/2009/03/named_graphs_in_open_anzo.html|creationTime|2009-03-03T18:52:08Z +http://www.ysearchblog.com/archives/000527.html|creationDate|2008-03-17 +http://www.ysearchblog.com/archives/000527.html|tag|http://www.semanlink.net/tag/rdfa +http://www.ysearchblog.com/archives/000527.html|tag|http://www.semanlink.net/tag/yahoo +http://www.ysearchblog.com/archives/000527.html|tag|http://www.semanlink.net/tag/microformats +http://www.ysearchblog.com/archives/000527.html|title|Yahoo! Search Blog: The Yahoo! Search Open Ecosystem +http://www.ysearchblog.com/archives/000527.html|creationTime|2008-03-17T13:26:28Z +http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php|creationDate|2017-07-03 +http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php|tag|http://www.semanlink.net/tag/socrate +http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php|tag|http://www.semanlink.net/tag/democratie +http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php|title|Socrate, ennemi de la démocratie ? +http://www.lepoint.fr/culture/socrate-ennemi-de-la-democratie-22-08-2013-1812427_3.php|creationTime|2017-07-03T00:03:53Z +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|creationDate|2016-08-31 +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|tag|http://www.semanlink.net/tag/new_yorker +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|tag|http://www.semanlink.net/tag/whistleblower +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|comment|The only people Obama has prosecuted are the whistle-blowers. +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|title|The Secret Sharer - The New Yorker +http://www.newyorker.com/magazine/2011/05/23/the-secret-sharer|creationTime|2016-08-31T00:01:50Z +http://www.inforbix.com/volkswagen-and-product-data-semantics/|creationDate|2011-10-05 +http://www.inforbix.com/volkswagen-and-product-data-semantics/|tag|http://www.semanlink.net/tag/volkswagen +http://www.inforbix.com/volkswagen-and-product-data-semantics/|title|Volkswagen and Product Data Semantics +http://www.inforbix.com/volkswagen-and-product-data-semantics/|creationTime|2011-10-05T08:26:08Z +http://en.wikipedia.org/wiki/Business_case|creationDate|2010-07-24 +http://en.wikipedia.org/wiki/Business_case|tag|http://www.semanlink.net/tag/business_case +http://en.wikipedia.org/wiki/Business_case|title|Business case +http://en.wikipedia.org/wiki/Business_case|creationTime|2010-07-24T01:40:23Z +http://www.againsttcpa.com/tcpa-faq-en.html|creationDate|2005-11-04 +http://www.againsttcpa.com/tcpa-faq-en.html|tag|http://www.semanlink.net/tag/trusted_computing +http://www.againsttcpa.com/tcpa-faq-en.html|title|Against TCPA TCPA would TAKE your FREEDOM This is NO FAKE +http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf|creationDate|2012-03-20 +http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf|tag|http://www.semanlink.net/tag/jean_paul +http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf|tag|http://www.semanlink.net/tag/algebre +http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf|title|l’algèbre à travers les équations +http://mathenjeans.free.fr/amej/edition/actes/actespdf/94231238.pdf|creationTime|2012-03-20T23:49:03Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|creationDate|2005-09-23 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|tag|http://www.semanlink.net/tag/chine +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|tag|http://www.semanlink.net/tag/ca_craint +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|tag|http://www.semanlink.net/tag/politique_de_l_enfant_unique +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|title|En Chine, le planning familial du Shandong a imposé une violente campagne de stérilisation et d'avortement +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3216,50-692127,0.html|date|2005-09-24 +http://www.lights.com/pickalink/bookmarks/|creationDate|2005-05-23 +http://www.lights.com/pickalink/bookmarks/|tag|http://www.semanlink.net/tag/bookmark_managers +http://www.lights.com/pickalink/bookmarks/|tag|http://www.semanlink.net/tag/tagging +http://www.lights.com/pickalink/bookmarks/|title|Free Bookmark Managers +https://www.bbc.co.uk/news/science-environment-44904298|creationDate|2018-08-03 +https://www.bbc.co.uk/news/science-environment-44904298|tag|http://www.semanlink.net/tag/parfum +https://www.bbc.co.uk/news/science-environment-44904298|title|Petrichor: why does rain smell so good? - BBC News +https://www.bbc.co.uk/news/science-environment-44904298|creationTime|2018-08-03T14:18:12Z +http://www.coral-lab.org/|creationDate|2006-05-04 +http://www.coral-lab.org/|tag|http://www.semanlink.net/tag/robotique +http://www.coral-lab.org/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.coral-lab.org/|comment|Research in the Cognition, Robotics, and Learning (CORAL) lab seeks to understand how artificial systems can acquire grounded knowledge from sensori-motor interaction with their environment that enables cognitive activities like natural language communication and planning. +http://www.coral-lab.org/|title|Cognition, Robotics, and Learning (CORAL) Lab Home +http://www.w3.org/2006/07/SWD/track/issues/54|creationDate|2008-05-12 +http://www.w3.org/2006/07/SWD/track/issues/54|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://www.w3.org/2006/07/SWD/track/issues/54|comment|semantics of skos:Concept? +http://www.w3.org/2006/07/SWD/track/issues/54|title|ISSUE-54 - SWD +http://www.w3.org/2006/07/SWD/track/issues/54|creationTime|2008-05-12T19:23:34Z +http://commons.wikimedia.org/wiki/User_talk:Fps61|creationDate|2011-10-18 +http://commons.wikimedia.org/wiki/User_talk:Fps61|tag|http://www.semanlink.net/tag/wikimedia +http://commons.wikimedia.org/wiki/User_talk:Fps61|tag|http://www.semanlink.net/tag/fps +http://commons.wikimedia.org/wiki/User_talk:Fps61|title|User talk:Fps61 - Wikimedia Commons +http://commons.wikimedia.org/wiki/User_talk:Fps61|creationTime|2011-10-18T09:25:55Z +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|creationDate|2013-04-24 +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|tag|http://www.semanlink.net/tag/facebook +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|tag|http://www.semanlink.net/tag/publicite_internet +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|comment|Facebook is testing out a new kind of ad targeting that will let brands market to users based on what they've bought in stores, according to execs briefed on their plans. +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|title|Facebook to Partner With Acxiom, Epsilon to Match Store Purchases With User Profiles Digital - Advertising Age +http://adage.com/article/digital/facebook-partner-acxiom-epsilon-match-store-purchases-user-profiles/239967/|creationTime|2013-04-24T18:17:10Z +http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135|creationDate|2016-04-01 +http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135|tag|http://www.semanlink.net/tag/jersey +http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135|tag|http://www.semanlink.net/tag/resteasy +http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135|title|How to choose between Jersey, Apache Wink and JBoss RESTEasy? - Stack Overflow +http://stackoverflow.com/questions/3459795/how-to-choose-between-jersey-apache-wink-and-jboss-resteasy/10922135#10922135|creationTime|2016-04-01T10:41:56Z +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184|creationDate|2007-10-12 +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184|tag|http://www.semanlink.net/tag/niger +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184|tag|http://www.semanlink.net/tag/touareg +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184|title|Le Republicain-Niger: CAUSERIE RADIOTÉLÉVISÉE. Un appel au génocide ! +http://www.planeteafrique.com/Republicain-Niger/Index.asp?affiche=News_Display.asp&articleid=4184|creationTime|2007-10-12T02:01:40Z +http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationDate|2015-10-03 +http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/christian_faure +http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|tag|http://www.semanlink.net/tag/blockchain +http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|title|Conférence : La blockchain au delà du Bitcoin +http://www.christian-faure.net/2015/10/02/conference-la-blockchain-au-dela-du-bitcoin/?utm_source=feedburner&utm_medium=twitter&utm_campaign=Feed%3A+christian-faure%2FwMuM+%28Blog%29|creationTime|2015-10-03T12:43:10Z +http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html|creationDate|2016-09-20 +http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html|tag|http://www.semanlink.net/tag/new_africa +http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html|tag|http://www.semanlink.net/tag/nigeria +http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html|title|McKinsey continue de croire en la capacité économique des « lions » d’Afrique +http://www.lemonde.fr/afrique/article/2016/09/20/mckinsey-continue-de-croire-en-la-capacite-economique-des-lions-d-afrique_5000545_3212.html|creationTime|2016-09-20T11:46:24Z +http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/|creationDate|2014-06-21 +http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/|tag|http://www.semanlink.net/tag/epidemie +http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/|tag|http://www.semanlink.net/tag/egypte_antique +http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/|title|Archéologie : les traces d’une épidémie antique découvertes en Egypte Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2014/06/19/archeologie-les-traces-dune-epidemie-antique-decouvertes-en-egypte/|creationTime|2014-06-21T00:08:48Z +http://www.w3.org/TR/ldp/|creationDate|2012-10-29 +http://www.w3.org/TR/ldp/|tag|http://www.semanlink.net/tag/ldp_w3c +http://www.w3.org/TR/ldp/|title|Linked Data Platform 1.0 +http://www.w3.org/TR/ldp/|creationTime|2012-10-29T17:48:56Z +http://www.squeak.org/|creationDate|2007-09-10 +http://www.squeak.org/|tag|http://www.semanlink.net/tag/squeak +http://www.squeak.org/|comment|"Squeak is a modern, open source full-featured implementation of the powerful Smalltalk programming language and environment.
+The One Laptop Per Child (OLPC) initiative leverages Squeak to power the Etoys application shipped with every laptop around the world. (Etoys is a powerful script-based environment to learn science and math by encouraging exploration and experimentation.) + +" +http://www.squeak.org/|title|Squeak +http://www.squeak.org/|creationTime|2007-09-10T19:48:29Z +http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html|creationDate|2015-06-18 +http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html|tag|http://www.semanlink.net/tag/ecologie +http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html|tag|http://www.semanlink.net/tag/pape_francois +http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html|title|Climat : le pape s’attaque à la « culture du déchet » des pays riches +http://www.lemonde.fr/planete/article/2015/06/16/climat-le-blame-du-pape-aux-pays-riches_4655037_3244.html|creationTime|2015-06-18T19:37:14Z +http://www.hlt.utdallas.edu/~vince/papers/acl14-keyphrase-poster.jpg|creationDate|2017-06-20 +http://www.hlt.utdallas.edu/~vince/papers/acl14-keyphrase-poster.jpg|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.hlt.utdallas.edu/~vince/papers/acl14-keyphrase-poster.jpg|title|Automatic Keyphrase Extraction (Poster): A Survey of the State of the Art (2014) +http://www.hlt.utdallas.edu/~vince/papers/acl14-keyphrase-poster.jpg|creationTime|2017-06-20T14:04:04Z +http://jena.hpl.hp.com/juc2006/schedule.html|creationDate|2006-04-13 +http://jena.hpl.hp.com/juc2006/schedule.html|tag|http://www.semanlink.net/tag/jena_user_conference +http://jena.hpl.hp.com/juc2006/schedule.html|title|2006 Jena User Conference - schedule +http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why|creationDate|2016-08-02 +http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why|tag|http://www.semanlink.net/tag/gene_therapy +http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why|title|The gene editor CRISPR won’t fully fix sick people anytime soon. Here’s why Science AAAS +http://www.sciencemag.org/news/2016/05/gene-editor-crispr-won-t-fully-fix-sick-people-anytime-soon-here-s-why|creationTime|2016-08-02T10:08:01Z +http://www.w3.org/TR/rdfa-primer/|creationDate|2013-06-15 +http://www.w3.org/TR/rdfa-primer/|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/TR/rdfa-primer/|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://www.w3.org/TR/rdfa-primer/|title|RDFa 1.1 Primer +http://www.w3.org/TR/rdfa-primer/|creationTime|2013-06-15T17:49:31Z +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|creationDate|2010-07-16 +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|tag|http://www.semanlink.net/tag/richard_cyganiak +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|tag|http://www.semanlink.net/tag/slides +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|tag|http://www.semanlink.net/tag/sdmx_rdf +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|title|What is SDMX-RDF? +http://www.slideshare.net/cygri/what-is-sdmxrdf-4696043|creationTime|2010-07-16T14:20:12Z +http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1|creationDate|2014-03-15 +http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1|tag|http://www.semanlink.net/tag/nlp_text_classification +http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1|title|python - Classifying Documents into Categories - Stack Overflow +http://stackoverflow.com/questions/3113428/classifying-documents-into-categories?rq=1|creationTime|2014-03-15T19:23:24Z +http://jena.sourceforge.net/ARQ/property_paths.html|creationDate|2010-06-10 +http://jena.sourceforge.net/ARQ/property_paths.html|tag|http://www.semanlink.net/tag/arq +http://jena.sourceforge.net/ARQ/property_paths.html|title|ARQ - Property Paths +http://jena.sourceforge.net/ARQ/property_paths.html|creationTime|2010-06-10T00:36:55Z +http://backbonetutorials.com/|creationDate|2012-08-31 +http://backbonetutorials.com/|tag|http://www.semanlink.net/tag/tutorial +http://backbonetutorials.com/|tag|http://www.semanlink.net/tag/backbone_js +http://backbonetutorials.com/|title|Backbone.js Tutorials +http://backbonetutorials.com/|creationTime|2012-08-31T17:31:07Z +http://www.inter-locale.com/codeset1.jsp|creationDate|2005-05-14 +http://www.inter-locale.com/codeset1.jsp|tag|http://www.semanlink.net/tag/encoding +http://www.inter-locale.com/codeset1.jsp|tag|http://www.semanlink.net/tag/jsp +http://www.inter-locale.com/codeset1.jsp|title|Unicode JSP Primer +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|creationDate|2015-03-06 +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|tag|http://www.semanlink.net/tag/siren +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|tag|http://www.semanlink.net/tag/solr_rdf +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|tag|http://www.semanlink.net/tag/sindice +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|title|The SIREn 1.0 Open Source Release and its Use in the Semantic Web Community - Sindice Blog +http://blog.sindice.com/2014/03/07/the-siren-1-0-open-source-release-and-its-use-in-the-semantic-web-community/|creationTime|2015-03-06T15:28:09Z +http://www.youtube.com/watch?v=Itcir2iiH6E|creationDate|2008-12-29 +http://www.youtube.com/watch?v=Itcir2iiH6E|tag|http://www.semanlink.net/tag/mbilia_bel +http://www.youtube.com/watch?v=Itcir2iiH6E|comment|'Yamba Ngai' is Mbilia's tribute to the institution we call 'marriage'. +http://www.youtube.com/watch?v=Itcir2iiH6E|title|Mbilia Bel - Yamba Ngai +http://www.youtube.com/watch?v=Itcir2iiH6E|creationTime|2008-12-29T22:34:09Z +http://www.scholarpedia.org/article/Text_categorization|creationDate|2014-04-08 +http://www.scholarpedia.org/article/Text_categorization|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.scholarpedia.org/article/Text_categorization|title|Text categorization - Scholarpedia +http://www.scholarpedia.org/article/Text_categorization|creationTime|2014-04-08T18:50:37Z +http://arxiv.org/abs/1511.08154|creationDate|2016-01-12 +http://arxiv.org/abs/1511.08154|tag|http://www.semanlink.net/tag/hypothese_de_riemann +http://arxiv.org/abs/1511.08154|tag|http://www.semanlink.net/tag/jean_paul +http://arxiv.org/abs/1511.08154|tag|http://www.semanlink.net/tag/arxiv_doc +http://arxiv.org/abs/1511.08154|arxiv_author|David Montague +http://arxiv.org/abs/1511.08154|arxiv_author|Jeffrey C. Lagarias +http://arxiv.org/abs/1511.08154|title|[1511.08154] Notes on Cardinal's Matrices +http://arxiv.org/abs/1511.08154|creationTime|2016-01-12T23:36:39Z +http://arxiv.org/abs/1511.08154|arxiv_summary|"These notes are motivated by the work of Jean-Paul Cardinal on symmetric +matrices related to the Mertens function. He showed that certain norm bounds on +his matrices implied the Riemann hypothesis. Using a different matrix norm we +show an equivalence of the Riemann hypothesis to suitable norm bounds on his +matrices in the new norm. Then we specify a deformed version of his Mertens +function matrices that unconditionally satisfies a norm bound that is of the +same strength as his Riemann hypothesis bound." +http://arxiv.org/abs/1511.08154|arxiv_firstAuthor|Jeffrey C. Lagarias +http://arxiv.org/abs/1511.08154|arxiv_updated|2015-11-25T19:02:43Z +http://arxiv.org/abs/1511.08154|arxiv_title|Notes on Cardinal's Matrices +http://arxiv.org/abs/1511.08154|arxiv_published|2015-11-25T19:02:43Z +http://arxiv.org/abs/1511.08154|arxiv_num|1511.08154 +http://www.apesmanifesto.org|creationDate|2008-10-22 +http://www.apesmanifesto.org|tag|http://www.semanlink.net/tag/grands_singes +http://www.apesmanifesto.org|tag|http://www.semanlink.net/tag/petition +http://www.apesmanifesto.org|comment|Tropical forests are disappearing at an excessive speed and with them the last populations of great apes. All specialists are unanimous: if we do nothing gorillas, chimpanzees and bonobos will have disappeared by the middle of the 21st century. The situation of orangoutans is even more dramatic; in 20 years time, they might only exist in zoos. Today, it is important to become active in order to stop this Ecocide! We, citizens of the Earth, ask our governments and international authorities to accept as their superior duty to save and protect primates. +http://www.apesmanifesto.org|title|Manifesto for Apes and nature [mAn] +http://www.apesmanifesto.org|creationTime|2008-10-22T20:00:57Z +http://semanticday.mondeca.makolab.fr/|creationDate|2013-09-23 +http://semanticday.mondeca.makolab.fr/|tag|http://www.semanlink.net/tag/makolab_semantic_day +http://semanticday.mondeca.makolab.fr/|title|Semantic Web’s significance confirmed II MakoLab Semantic Day +http://semanticday.mondeca.makolab.fr/|creationTime|2013-09-23T10:55:42Z +https://aclanthology.coli.uni-saarland.de/events/emnlp-2018|creationDate|2018-11-02 +https://aclanthology.coli.uni-saarland.de/events/emnlp-2018|tag|http://www.semanlink.net/tag/emnlp_2018 +https://aclanthology.coli.uni-saarland.de/events/emnlp-2018|title|EMNLP (2018) - ACL Anthology - Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing +https://aclanthology.coli.uni-saarland.de/events/emnlp-2018|creationTime|2018-11-02T23:16:49Z +http://www.joseki.org/|creationDate|2008-01-04 +http://www.joseki.org/|tag|http://www.semanlink.net/tag/joseki +http://www.joseki.org/|title|Joseki - A SPARQL Server for Jena +http://www.joseki.org/|creationTime|2008-01-04T01:40:12Z +http://tools.wmflabs.org/sqid/#/|creationDate|2016-04-22 +http://tools.wmflabs.org/sqid/#/|tag|http://www.semanlink.net/tag/wikidata +http://tools.wmflabs.org/sqid/#/|title|Wikidata Class Browser +http://tools.wmflabs.org/sqid/#/|creationTime|2016-04-22T12:55:29Z +https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis|creationDate|2017-01-08 +https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis|tag|http://www.semanlink.net/tag/evgeny_morozov +https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis|tag|http://www.semanlink.net/tag/fake_news +https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis|title|Democracy is in crisis, but blaming fake news is not the answer Evgeny Morozov Opinion The Guardian +https://www.theguardian.com/commentisfree/2017/jan/08/blaming-fake-news-not-the-answer-democracy-crisis|creationTime|2017-01-08T13:45:50Z +https://medium.com/@writingben/when-roman-barbarians-met-the-asian-enlightenment-2be064d7af9b#.b5gor5ymx|creationDate|2016-10-05 +https://medium.com/@writingben/when-roman-barbarians-met-the-asian-enlightenment-2be064d7af9b#.b5gor5ymx|tag|http://www.semanlink.net/tag/histoire_de_l_asie +https://medium.com/@writingben/when-roman-barbarians-met-the-asian-enlightenment-2be064d7af9b#.b5gor5ymx|title|When Roman “Barbarians” Met the Asian Enlightenment – Medium +https://medium.com/@writingben/when-roman-barbarians-met-the-asian-enlightenment-2be064d7af9b#.b5gor5ymx|creationTime|2016-10-05T21:57:42Z +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|creationDate|2016-03-28 +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|tag|http://www.semanlink.net/tag/adn +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|tag|http://www.semanlink.net/tag/virus +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|tag|http://www.semanlink.net/tag/immune_system +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|title|Viral ‘fossils’ in our DNA may help us fight infection Science AAAS +http://www.sciencemag.org/news/2016/03/viral-fossils-our-dna-may-help-us-fight-infection|creationTime|2016-03-28T10:46:12Z +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|creationDate|2015-10-20 +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|tag|http://www.semanlink.net/tag/microsoft_research +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|tag|http://www.semanlink.net/tag/nlp_microsoft +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|tag|http://www.semanlink.net/tag/email_classification +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|comment|"JD Brutlag, C Meek - ICML, 2000 - research.microsoft.com
+Interactive classification of email into a userdefined hierarchy of folders is a natural +domain for application of text classification methods. This domain presents several +challenges. First, the user's changing mailfiling habits mandate classification technology ..." +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|title|Challenges of the email domain for text classification +http://research.microsoft.com:8082/pubs/73532/AF1-1.pdf|creationTime|2015-10-20T11:00:30Z +https://www.sitepoint.com/getting-started-browserify/|creationDate|2017-04-02 +https://www.sitepoint.com/getting-started-browserify/|tag|http://www.semanlink.net/tag/node_js +https://www.sitepoint.com/getting-started-browserify/|tag|http://www.semanlink.net/tag/javascript +https://www.sitepoint.com/getting-started-browserify/|title|Getting Started with Browserify +https://www.sitepoint.com/getting-started-browserify/|creationTime|2017-04-02T14:53:23Z +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|creationDate|2018-09-27 +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|tag|http://www.semanlink.net/tag/global_semantic_context +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|tag|http://www.semanlink.net/tag/word_embedding +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|tag|http://www.semanlink.net/tag/bi_lstm +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|tag|http://www.semanlink.net/tag/apple +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|tag|http://www.semanlink.net/tag/language_model +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|title|Can Global Semantic Context Improve Neural Language Models? - Apple (2018) +https://machinelearning.apple.com/2018/09/27/can-global-semantic-context-improve-neural-language-models.html|creationTime|2018-09-27T21:37:54Z +https://medium.com/@keremturgutlu/understanding-building-blocks-of-ulmfit-818d3775325b|creationDate|2019-02-10 +https://medium.com/@keremturgutlu/understanding-building-blocks-of-ulmfit-818d3775325b|tag|http://www.semanlink.net/tag/ulmfit +https://medium.com/@keremturgutlu/understanding-building-blocks-of-ulmfit-818d3775325b|title|Understanding building blocks of ULMFIT – Kerem Turgutlu – Medium +https://medium.com/@keremturgutlu/understanding-building-blocks-of-ulmfit-818d3775325b|creationTime|2019-02-10T19:18:31Z +https://www.youtube.com/watch?v=FNQSM4ipZog|creationDate|2015-09-13 +https://www.youtube.com/watch?v=FNQSM4ipZog|tag|http://www.semanlink.net/tag/exponential_organizations +https://www.youtube.com/watch?v=FNQSM4ipZog|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=FNQSM4ipZog|title|Exponential Organizations - Salim Ismail, at USI - YouTube +https://www.youtube.com/watch?v=FNQSM4ipZog|creationTime|2015-09-13T14:50:13Z +http://www.w3.org/MarkUp/2009/rdfa-for-html-authors|creationDate|2009-05-15 +http://www.w3.org/MarkUp/2009/rdfa-for-html-authors|tag|http://www.semanlink.net/tag/rdfa +http://www.w3.org/MarkUp/2009/rdfa-for-html-authors|tag|http://www.semanlink.net/tag/tutorial +http://www.w3.org/MarkUp/2009/rdfa-for-html-authors|title|RDFa for HTML Authors +http://www.w3.org/MarkUp/2009/rdfa-for-html-authors|creationTime|2009-05-15T14:01:56Z +http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1|creationDate|2014-04-25 +http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1|tag|http://www.semanlink.net/tag/nlp_text_classification +http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1|title|How to do text classification with label probabilities? - Stack Overflow +http://stackoverflow.com/questions/19221289/how-to-do-text-classification-with-label-probabilities?rq=1|creationTime|2014-04-25T19:10:57Z +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|creationDate|2013-12-03 +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|tag|http://www.semanlink.net/tag/the_guardian +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|tag|http://www.semanlink.net/tag/nsa +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|tag|http://www.semanlink.net/tag/edward_snowden +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|title|NSA files decoded: Edward Snowden's surveillance revelations explained World news theguardian.com +http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded?CMP=twt_gu#section/1|creationTime|2013-12-03T13:46:48Z +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|creationDate|2014-06-08 +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|tag|http://www.semanlink.net/tag/symbiose +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|tag|http://www.semanlink.net/tag/evolution +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|tag|http://www.semanlink.net/tag/lynn_margulis +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|comment|how symbiotic microbes might affect evolution +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|title|Microbes May Drive Evolution of New Animal Species Science WIRED +http://www.wired.com/2014/06/microbe-symbiosis-evolution/|creationTime|2014-06-08T23:39:13Z +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|creationDate|2014-05-02 +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|tag|http://www.semanlink.net/tag/product_description +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|tag|http://www.semanlink.net/tag/hepp_s_propertyvalue +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|comment|"see also
+Niklas Lindström" +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|title|Generic Property-Value Proposal for Schema.org from martin.hepp@ebusiness-unibw.org on 2014-04-29 (public-vocabs@w3.org from April 2014) +http://lists.w3.org/Archives/Public/public-vocabs/2014Apr/0295.html|creationTime|2014-05-02T19:32:06Z +http://www.practicalembeddedjava.com/tools/eclipse_tips.html|creationDate|2007-10-19 +http://www.practicalembeddedjava.com/tools/eclipse_tips.html|tag|http://www.semanlink.net/tag/eclipse_tip +http://www.practicalembeddedjava.com/tools/eclipse_tips.html|title|Eclipse Tips +http://www.practicalembeddedjava.com/tools/eclipse_tips.html|creationTime|2007-10-19T01:45:03Z +https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045|creationDate|2019-03-14 +https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045|tag|http://www.semanlink.net/tag/blog +https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045|tag|http://www.semanlink.net/tag/rachel_thomas +https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045|title|Why you (yes, you) should blog – Rachel Thomas – Medium +https://medium.com/@racheltho/why-you-yes-you-should-blog-7d2544ac1045|creationTime|2019-03-14T21:02:34Z +https://trackography.org/|creationDate|2014-12-30 +https://trackography.org/|tag|http://www.semanlink.net/tag/privacy_and_internet +https://trackography.org/|comment|Find out who is tracking you when you are reading your favourite news online. +https://trackography.org/|title|Trackography - Who tracks you online? +https://trackography.org/|creationTime|2014-12-30T13:02:38Z +http://www.herodote.net|creationDate|2005-04-17 +http://www.herodote.net|tag|http://www.semanlink.net/tag/histoire +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|creationDate|2012-02-01 +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|tag|http://www.semanlink.net/tag/jsonp +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|tag|http://www.semanlink.net/tag/jquery +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|comment|"As a matter of fact, no way for JQuery to set it, as it is the ""script"" hack" +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|title|JQuery's getJSON() not setting Accept header correctly? +http://stackoverflow.com/questions/3781343/jquerys-getjson-not-setting-accept-header-correctly|creationTime|2012-02-01T14:07:56Z +http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise|creationDate|2010-07-01 +http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise|tag|http://www.semanlink.net/tag/mike_bergman +http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise|tag|http://www.semanlink.net/tag/semantic_enterprise +http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise|title|Guiding Principles for the Open Semantic Enterprise +http://mike2.openmethodology.org/wiki/Guiding_Principles_for_the_Open_Semantic_Enterprise|creationTime|2010-07-01T18:01:01Z +http://pt.wikipedia.org/wiki/Cidade_de_Deus_(filme)|creationDate|2013-08-28 +http://pt.wikipedia.org/wiki/Cidade_de_Deus_(filme)|tag|http://www.semanlink.net/tag/film_bresilien +http://pt.wikipedia.org/wiki/Cidade_de_Deus_(filme)|title|Cidade de Deus (filme) – Wikipédia, a enciclopédia livre +http://pt.wikipedia.org/wiki/Cidade_de_Deus_(filme)|creationTime|2013-08-28T01:47:23Z +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|creationDate|2007-01-14 +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|tag|http://www.semanlink.net/tag/film_americain +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|tag|http://www.semanlink.net/tag/pont_couvert +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|tag|http://www.semanlink.net/tag/clint_eastwood +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|tag|http://www.semanlink.net/tag/meryl_streep +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|comment|Film américain (1995) de et avec Clint Eastwood, Meryl Streep +http://fr.wikipedia.org/wiki/Sur_la_route_de_Madison|title|The Bridges of Madison County +http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html|creationDate|2013-03-25 +http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html|tag|http://www.semanlink.net/tag/httprange_14 +http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html|tag|http://www.semanlink.net/tag/kingsley_idehen +http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html|title|Important Change to HTTP semantics re. hashless URIs from Kingsley Idehen on 2013-03-24 (public-lod@w3.org from March 2013) +http://lists.w3.org/Archives/Public/public-lod/2013Mar/0115.html|creationTime|2013-03-25T16:01:42Z +http://www.w3.org/History/1989/proposal.html|creationDate|2008-11-04 +http://www.w3.org/History/1989/proposal.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/History/1989/proposal.html|title|The original proposal of the WWW, HTMLized +http://www.w3.org/History/1989/proposal.html|creationTime|2008-11-04T13:58:23Z +http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/|creationDate|2014-05-28 +http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/|tag|http://www.semanlink.net/tag/google_car +http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/|tag|http://www.semanlink.net/tag/driverless_car +http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/|title|Google Introduces New Self Driving Car at the Code Conference Re/code +http://recode.net/2014/05/27/googles-new-self-driving-car-ditches-the-steering-wheel/|creationTime|2014-05-28T13:24:24Z +http://openspring.net/blog/2009/10/22/produce-and-consume-linked-data-with-drupal|creationDate|2011-09-15 +http://openspring.net/blog/2009/10/22/produce-and-consume-linked-data-with-drupal|tag|http://www.semanlink.net/tag/drupal_rdf +http://openspring.net/blog/2009/10/22/produce-and-consume-linked-data-with-drupal|title|Produce and Consume Linked Data with Drupal! +http://openspring.net/blog/2009/10/22/produce-and-consume-linked-data-with-drupal|creationTime|2011-09-15T14:16:31Z +http://hinchcliffe.org/archive/2008/04/08/16627.aspx|creationDate|2008-04-15 +http://hinchcliffe.org/archive/2008/04/08/16627.aspx|tag|http://www.semanlink.net/tag/soap_vs_rest +http://hinchcliffe.org/archive/2008/04/08/16627.aspx|tag|http://www.semanlink.net/tag/rest +http://hinchcliffe.org/archive/2008/04/08/16627.aspx|title|12 Things You Should Know About REST and WOA +http://hinchcliffe.org/archive/2008/04/08/16627.aspx|creationTime|2008-04-15T15:25:39Z +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|creationDate|2014-01-24 +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|tag|http://www.semanlink.net/tag/big_brother +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|tag|http://www.semanlink.net/tag/ukraine +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|tag|http://www.semanlink.net/tag/geolocalisation +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|tag|http://www.semanlink.net/tag/mouchard +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|title|MOUCHARD – Le mystérieux texto reçu par les manifestants de Kiev Big Browser +http://bigbrowser.blog.lemonde.fr/2014/01/23/mouchard-le-mysterieux-texto-recu-par-les-manifestants-de-kiev/|creationTime|2014-01-24T00:20:54Z +http://www.cs.washington.edu/homes/rao/indus.html|creationDate|2012-06-02 +http://www.cs.washington.edu/homes/rao/indus.html|tag|http://www.semanlink.net/tag/civilisation_de_l_indus +http://www.cs.washington.edu/homes/rao/indus.html|tag|http://www.semanlink.net/tag/nlp +http://www.cs.washington.edu/homes/rao/indus.html|title|Probabilistic Analysis of the 4000-year-old Indus Script +http://www.cs.washington.edu/homes/rao/indus.html|creationTime|2012-06-02T08:25:54Z +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|creationDate|2013-07-22 +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|tag|http://www.semanlink.net/tag/open_source +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|tag|http://www.semanlink.net/tag/mooc +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|tag|http://www.semanlink.net/tag/stanford +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|title|Stanford U. and edX Will Jointly Build Open-Source Software to Deliver MOOCs - Wired Campus - The Chronicle of Higher Education +http://chronicle.com/blogs/wiredcampus/stanford-u-and-edx-will-jointly-build-open-source-software-to-deliver-moocs/43301|creationTime|2013-07-22T09:31:07Z +http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html|creationDate|2010-01-20 +http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html|tag|http://www.semanlink.net/tag/sparql_and_jena +http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html|title|Federated SPARQL queries - bobdc.blog +http://www.snee.com/bobdc.blog/2010/01/federated-sparql-queries.html|creationTime|2010-01-20T18:24:57Z +http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D|creationDate|2017-07-17 +http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D|tag|http://www.semanlink.net/tag/maali_mnasri +http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D|title|Résumé Automatique Multi-Document Dynamique : État de l’Art (2015) +http://www.atala.org/taln_archives/RECITAL/RECITAL-2015/recital-2015-long-004.pdf?lipi=urn%3Ali%3Apage%3Ad_flagship3_profile_view_base%3BCaTtEmAlR7eq0au%2FgIHveg%3D%3D|creationTime|2017-07-17T00:17:43Z +http://hbase.apache.org/|creationDate|2013-02-14 +http://hbase.apache.org/|tag|http://www.semanlink.net/tag/hbase +http://hbase.apache.org/|title|HBase - Apache HBase™ Home +http://hbase.apache.org/|creationTime|2013-02-14T11:29:10Z +http://hbase.apache.org/|homepage|http://hbase.apache.org/ +http://hueniverse.com/oauth/guide/|creationDate|2014-09-26 +http://hueniverse.com/oauth/guide/|tag|http://www.semanlink.net/tag/oauth +http://hueniverse.com/oauth/guide/|title|The OAuth 1.0 Guide +http://hueniverse.com/oauth/guide/|creationTime|2014-09-26T00:27:29Z +http://www.nytimes.com/2006/08/24/technology/24basics.html?ei=5088&en=58c571f0b4ae0ed8&ex=1314072000&partner=rssnyt&emc=rss&pagewanted=print|creationDate|2006-09-26 +http://www.nytimes.com/2006/08/24/technology/24basics.html?ei=5088&en=58c571f0b4ae0ed8&ex=1314072000&partner=rssnyt&emc=rss&pagewanted=print|tag|http://www.semanlink.net/tag/batteries +http://www.nytimes.com/2006/08/24/technology/24basics.html?ei=5088&en=58c571f0b4ae0ed8&ex=1314072000&partner=rssnyt&emc=rss&pagewanted=print|title|About Batteries: Tips on Longevity and Reviving the Dead - New York Times +http://www.linuxjournal.com/article/9301?page=0,0|creationDate|2012-11-28 +http://www.linuxjournal.com/article/9301?page=0,0|tag|http://www.semanlink.net/tag/timeline +http://www.linuxjournal.com/article/9301?page=0,0|title|Ajax Timelines and the Semantic Web Linux Journal +http://www.linuxjournal.com/article/9301?page=0,0|creationTime|2012-11-28T00:01:38Z +http://callimachusproject.org/|creationDate|2014-05-17 +http://callimachusproject.org/|tag|http://www.semanlink.net/tag/linked_data_application +http://callimachusproject.org/|tag|http://www.semanlink.net/tag/linked_data +http://callimachusproject.org/|tag|http://www.semanlink.net/tag/rdf_template +http://callimachusproject.org/|tag|http://www.semanlink.net/tag/semanlink2_related +http://callimachusproject.org/|comment|Build Linked Data Applications ... directly in your Web Browser +http://callimachusproject.org/|title|Callimachus - Data-driven applications made easy +http://callimachusproject.org/|creationTime|2014-05-17T17:40:33Z +https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet|creationDate|2018-11-08 +https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet|tag|http://www.semanlink.net/tag/cheat_sheet +https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet|tag|http://www.semanlink.net/tag/git +https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet|title|Git cheat sheet +https://www.atlassian.com/git/tutorials/atlassian-git-cheatsheet|creationTime|2018-11-08T10:17:47Z +http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/|creationDate|2008-10-16 +http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/|tag|http://www.semanlink.net/tag/ivan_herman +http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/|title|Semantic Web and uncertainty « Ivan’s private site +http://ivanherman.wordpress.com/2008/10/15/semantic-web-and-uncertainty/|creationTime|2008-10-16T21:58:36Z +https://www.youtube.com/watch?v=A6wq16Ow5Ec|creationDate|2015-09-04 +https://www.youtube.com/watch?v=A6wq16Ow5Ec|tag|http://www.semanlink.net/tag/youtube_tutorial +https://www.youtube.com/watch?v=A6wq16Ow5Ec|tag|http://www.semanlink.net/tag/angularjs +https://www.youtube.com/watch?v=A6wq16Ow5Ec|tag|http://www.semanlink.net/tag/markdown +https://www.youtube.com/watch?v=A6wq16Ow5Ec|title|AngularJS - Custom Components - Part 1 - YouTube +https://www.youtube.com/watch?v=A6wq16Ow5Ec|creationTime|2015-09-04T22:30:38Z +http://mail-archives.apache.org/mod_mbox/jena-users/201211.mbox/%3c50B89950.6010705@it.ox.ac.uk%3e|creationDate|2012-12-01 +http://mail-archives.apache.org/mod_mbox/jena-users/201211.mbox/%3c50B89950.6010705@it.ox.ac.uk%3e|tag|http://www.semanlink.net/tag/jena_dev +http://mail-archives.apache.org/mod_mbox/jena-users/201211.mbox/%3c50B89950.6010705@it.ox.ac.uk%3e|title|Re: Dealing with expensive queries (jena users mailing list) +http://mail-archives.apache.org/mod_mbox/jena-users/201211.mbox/%3c50B89950.6010705@it.ox.ac.uk%3e|creationTime|2012-12-01T13:21:04Z +http://dev2dev.bea.com/pub/a/2006/01/ajax-back-button.html?page=1|creationDate|2006-06-20 +http://dev2dev.bea.com/pub/a/2006/01/ajax-back-button.html?page=1|tag|http://www.semanlink.net/tag/ajax +http://dev2dev.bea.com/pub/a/2006/01/ajax-back-button.html?page=1|tag|http://www.semanlink.net/tag/browser_back_button +http://dev2dev.bea.com/pub/a/2006/01/ajax-back-button.html?page=1|title|Developing Ajax Applications That Preserve Standard Browser Functionality +http://tiffanybbrown.com/2011/03/23/html5-does-not-allow-self-closing-tags/|creationDate|2013-06-15 +http://tiffanybbrown.com/2011/03/23/html5-does-not-allow-self-closing-tags/|tag|http://www.semanlink.net/tag/html5 +http://tiffanybbrown.com/2011/03/23/html5-does-not-allow-self-closing-tags/|title|HTML5 does NOT allow “self-closing” tags • Tiffany B. Brown +http://tiffanybbrown.com/2011/03/23/html5-does-not-allow-self-closing-tags/|creationTime|2013-06-15T18:08:15Z +http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html|creationDate|2014-01-02 +http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html|tag|http://www.semanlink.net/tag/livre +http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html|tag|http://www.semanlink.net/tag/platonov +http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html|title|Le livre de sang d'Andreï Platonov +http://www.lexpress.fr/informations/le-livre-de-sang-d-andrei-platonov_618697.html|creationTime|2014-01-02T11:44:20Z +https://twitter.com/enahpets/status/1052537794764128257|creationDate|2018-10-18 +https://twitter.com/enahpets/status/1052537794764128257|tag|http://www.semanlink.net/tag/france_is_ai_2018 +https://twitter.com/enahpets/status/1052537794764128257|title|#franceisai, petit récapitulatif des choses vues et entendues ce matin +https://twitter.com/enahpets/status/1052537794764128257|creationTime|2018-10-18T14:18:46Z +http://www.theatlantic.com/features/archive/2014/09/why-i-hope-to-die-at-75/379329/|creationDate|2014-09-23 +http://www.theatlantic.com/features/archive/2014/09/why-i-hope-to-die-at-75/379329/|tag|http://www.semanlink.net/tag/mort +http://www.theatlantic.com/features/archive/2014/09/why-i-hope-to-die-at-75/379329/|title|Why I Hope to Die at 75 - The Atlantic +http://www.theatlantic.com/features/archive/2014/09/why-i-hope-to-die-at-75/379329/|creationTime|2014-09-23T16:10:44Z +http://www.infomesh.net/2001/swintro|creationDate|2005-01-05 +http://www.infomesh.net/2001/swintro|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.infomesh.net/2001/swintro|title|The Semantic Web - An Introduction. +http://youtube.com/watch?v=Jk3o1hAgBlE|creationDate|2007-09-18 +http://youtube.com/watch?v=Jk3o1hAgBlE|tag|http://www.semanlink.net/tag/youtube +http://youtube.com/watch?v=Jk3o1hAgBlE|tag|http://www.semanlink.net/tag/moussa_poussi +http://youtube.com/watch?v=Jk3o1hAgBlE|comment|"I made this song for the abandoned children. It is about all the children of the world.
Cette chanson, je l'ai composée pour les enfants abandonnés, et elle parle de tous les enfants du monde.
Recorded live in Niamey, August 19, 2007.
+ +" +http://youtube.com/watch?v=Jk3o1hAgBlE|title|YouTube - Kokeïna - Moussa Poussi +http://youtube.com/watch?v=Jk3o1hAgBlE|creationTime|2007-09-18T01:21:04Z +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|creationDate|2010-09-04 +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|tag|http://www.semanlink.net/tag/tahar_ben_jelloun +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|comment|La crise économique n'est pas une excuse. La crise morale est un fait. Il revient à vous, Monsieur le Président, de rétablir l'image de la France dans ce qu'elle a de plus beau, d'enviable et d'universel, à savoir son statut de pays des droits de l'homme, pays de la solidarité et de la fraternité proclamées, terre généreuse, riche de ses différences, riche de ses couleurs et de ses épices, prouvant entre autres que l'islam est tout à fait compatible avec la démocratie et la laïcité. Pour cela, Monsieur Le Président, effacez, je vous prie, de votre discours les idées malheureuses qu'un parti d'extrême droite diffuse dans le but de fermer ce pays sur lui-même, de l'isoler et de trahir ses valeurs fondamentales. +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|title|Lettre au président de la République, par Tahar Ben Jelloun +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|creationTime|2010-09-04T21:36:26Z +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|source|Le Monde +http://www.lemonde.fr/a-la-une/article/2010/09/04/lettre-au-president-de-la-republique-par-tahar-ben-jelloun_1406745_3208.html|date|2010-09-05 +http://shanghailectures.org/lectures|creationDate|2014-03-09 +http://shanghailectures.org/lectures|tag|http://www.semanlink.net/tag/artificial_intelligence +http://shanghailectures.org/lectures|title|About the ShanghAI Lectures ShanghAI Lectures +http://shanghailectures.org/lectures|creationTime|2014-03-09T00:43:21Z +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|creationDate|2018-08-19 +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|tag|http://www.semanlink.net/tag/recommended_reading +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|tag|http://www.semanlink.net/tag/livre +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|comment|"> Make progress by accumulation, not random walks. + +> What if we could compile a list of the best textbooks on every subject? That would be extremely useful." +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|title|The Best Textbooks on Every Subject +https://www.lesswrong.com/posts/xg3hXCYQPJkwHyik2/the-best-textbooks-on-every-subject|creationTime|2018-08-19T22:42:09Z +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|creationDate|2018-08-12 +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|tag|http://www.semanlink.net/tag/conditional_random_field +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|tag|http://www.semanlink.net/tag/mooc +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|tag|http://www.semanlink.net/tag/sequence_labeling +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|tag|http://www.semanlink.net/tag/concept_extraction +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|title|A Framework for Semi supervised Concept Extraction from MOOC content (2017) +http://www.cis.pku.edu.cn/faculty/system/zhangyan/papers/DMMOOC2017-jiang.pdf|creationTime|2018-08-12T18:29:53Z +https://www.youtube.com/watch?v=nMK9-E-LUnc|creationDate|2014-04-13 +https://www.youtube.com/watch?v=nMK9-E-LUnc|tag|http://www.semanlink.net/tag/francophonie +https://www.youtube.com/watch?v=nMK9-E-LUnc|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=nMK9-E-LUnc|tag|http://www.semanlink.net/tag/niger +https://www.youtube.com/watch?v=nMK9-E-LUnc|title|"""Na am Francophonie"" Sogha Niger - YouTube" +https://www.youtube.com/watch?v=nMK9-E-LUnc|creationTime|2014-04-13T10:21:23Z +https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains|creationDate|2018-11-09 +https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains|tag|http://www.semanlink.net/tag/medecine +https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains|tag|http://www.semanlink.net/tag/new_africa +https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains|title|« Bogou », faire voyager l’expertise au cœur des déserts médicaux africains +https://theconversation.com/bogou-faire-voyager-lexpertise-au-coeur-des-deserts-medicaux-africains-106369?utm_medium=email&utm_campaign=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449&utm_content=La%20lettre%20de%20The%20Conversation%20France%20du%208%20novembre%202018%20-%201155510449+CID_6ffe4a3e2829d97988a5d922642c2038&utm_source=campaign_monitor_fr&utm_term=Bogou%20%20faire%20voyager%20lexpertise%20au%20cur%20des%20dserts%20mdicaux%20africains|creationTime|2018-11-09T14:02:04Z +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|creationDate|2010-05-12 +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/restful_web_services +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/java_dev +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|tag|http://www.semanlink.net/tag/rdf +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|comment|Jersey is the reference implementation of JSR311 (JAX-RS) the Java API for RESTful Web Services. In short JSR311 makes it easy to publish graphs of Java Objects to the web, and implement update and POST semantics - all this using simple java annotations. It makes it easy for Java developers to do the right thing when writing data to the web. +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|title|Serialising Java Objects to RDF with Jersey +http://blogs.sun.com/bblfish/entry/serialising_java_objects_to_rdf|creationTime|2010-05-12T21:45:22Z +http://www.journaldunet.com/solutions/cloud-computing/1146290-cloud-pourquoi-docker-peut-tout-changer/|creationDate|2016-02-06 +http://www.journaldunet.com/solutions/cloud-computing/1146290-cloud-pourquoi-docker-peut-tout-changer/|tag|http://www.semanlink.net/tag/docker +http://www.journaldunet.com/solutions/cloud-computing/1146290-cloud-pourquoi-docker-peut-tout-changer/|title|Cloud : pourquoi Docker peut tout changer - JDN +http://www.journaldunet.com/solutions/cloud-computing/1146290-cloud-pourquoi-docker-peut-tout-changer/|creationTime|2016-02-06T00:38:51Z +http://passeurdesciences.blog.lemonde.fr/2017/03/26/des-archeologues-chinois-decouvrent-un-tresor-mythique/|creationDate|2017-03-26 +http://passeurdesciences.blog.lemonde.fr/2017/03/26/des-archeologues-chinois-decouvrent-un-tresor-mythique/|tag|http://www.semanlink.net/tag/archeologie_chinoise +http://passeurdesciences.blog.lemonde.fr/2017/03/26/des-archeologues-chinois-decouvrent-un-tresor-mythique/|title|Des archéologues chinois découvrent un trésor mythique Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2017/03/26/des-archeologues-chinois-decouvrent-un-tresor-mythique/|creationTime|2017-03-26T19:17:08Z +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|creationDate|2011-01-10 +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|tag|http://www.semanlink.net/tag/sparql_extension_functions +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|tag|http://www.semanlink.net/tag/spin_functions +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|title|Describing SPARQL Extension Functions « Lost Boy +http://www.ldodds.com/blog/2009/11/describing-sparql-extension-functions/|creationTime|2011-01-10T09:32:10Z +http://digital.cityofchicago.org/index.php/how-a-table-becomes-a-dataset-openrefine/|creationDate|2014-02-15 +http://digital.cityofchicago.org/index.php/how-a-table-becomes-a-dataset-openrefine/|tag|http://www.semanlink.net/tag/openrefine +http://digital.cityofchicago.org/index.php/how-a-table-becomes-a-dataset-openrefine/|title|Digital Hub How a Table Becomes a Dataset, OpenRefine +http://digital.cityofchicago.org/index.php/how-a-table-becomes-a-dataset-openrefine/|creationTime|2014-02-15T17:45:56Z +http://freemind.sourceforge.net/wiki/index.php/Main_Page|creationDate|2005-09-25 +http://freemind.sourceforge.net/wiki/index.php/Main_Page|tag|http://www.semanlink.net/tag/mind_mapping +http://freemind.sourceforge.net/wiki/index.php/Main_Page|tag|http://www.semanlink.net/tag/outliner +http://freemind.sourceforge.net/wiki/index.php/Main_Page|comment|FreeMind is a premier free mind-mapping software written in Java. The recent development has hopefully turned it into high productivity tool. +http://freemind.sourceforge.net/wiki/index.php/Main_Page|title|Main Page - FreeMind - free mind mapping software +http://rdf4food.org/moin.cgi/DanBriSlides|creationDate|2008-02-17 +http://rdf4food.org/moin.cgi/DanBriSlides|tag|http://www.semanlink.net/tag/sparql +http://rdf4food.org/moin.cgi/DanBriSlides|tag|http://www.semanlink.net/tag/sparqlpress +http://rdf4food.org/moin.cgi/DanBriSlides|tag|http://www.semanlink.net/tag/dan_brickley +http://rdf4food.org/moin.cgi/DanBriSlides|comment|Notes from danbri talk, and motivation for SparqlPress +http://rdf4food.org/moin.cgi/DanBriSlides|title|DanBriSlides - SPARQL-ing days +http://rdf4food.org/moin.cgi/DanBriSlides|creationTime|2008-02-17T02:34:17Z +https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/|creationDate|2018-12-29 +https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/|tag|http://www.semanlink.net/tag/artificial_intelligence +https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/|tag|http://www.semanlink.net/tag/computational_neuroscience +https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/|title|The intertwined quest for understanding biological intelligence and creating artificial intelligence +https://hai.stanford.edu/news/the_intertwined_quest_for_understanding_biological_intelligence_and_creating_artificial_intelligence/|creationTime|2018-12-29T00:31:00Z +http://www.openlinksw.com/weblog/oerling/?id=1550|creationDate|2009-05-01 +http://www.openlinksw.com/weblog/oerling/?id=1550|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/weblog/oerling/?id=1550|tag|http://www.semanlink.net/tag/orri_erling +http://www.openlinksw.com/weblog/oerling/?id=1550|title|Short Recap of Virtuoso Basics +http://www.openlinksw.com/weblog/oerling/?id=1550|creationTime|2009-05-01T21:53:52Z +http://www.w3.org/People/maxf/textorizer/|creationDate|2007-05-21 +http://www.w3.org/People/maxf/textorizer/|tag|http://www.semanlink.net/tag/svg +http://www.w3.org/People/maxf/textorizer/|tag|http://www.semanlink.net/tag/tools +http://www.w3.org/People/maxf/textorizer/|title|SVG Textorizer Tool +http://www.w3.org/People/maxf/textorizer/|creationTime|2007-05-21T23:01:52Z +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|creationDate|2007-08-01 +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|tag|http://www.semanlink.net/tag/wikipedia_page_to_concept +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|tag|http://www.semanlink.net/tag/ivan_herman +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|tag|http://www.semanlink.net/tag/richard_cyganiak +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|title|From Wikipedia URI-s to DBpedia URI… « Ivan’s blog +http://ivanherman.wordpress.com/2007/07/29/from-wikipedia-uri-s-to-dbpedia-uri%E2%80%A6/|creationTime|2007-08-01T17:02:03Z +http://www.nytimes.com/2014/05/09/science/a-synthetic-biology-conference-lures-an-intriguing-audience.html?partner=rss&emc=rss|creationDate|2014-05-12 +http://www.nytimes.com/2014/05/09/science/a-synthetic-biology-conference-lures-an-intriguing-audience.html?partner=rss&emc=rss|tag|http://www.semanlink.net/tag/synthetic_biology +http://www.nytimes.com/2014/05/09/science/a-synthetic-biology-conference-lures-an-intriguing-audience.html?partner=rss&emc=rss|title|A Synthetic Biology Conference Lures an Intriguing Audience - NYTimes.com +http://www.nytimes.com/2014/05/09/science/a-synthetic-biology-conference-lures-an-intriguing-audience.html?partner=rss&emc=rss|creationTime|2014-05-12T23:18:50Z +http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/|creationDate|2007-12-08 +http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/|tag|http://www.semanlink.net/tag/zitgist +http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/|title|Zitgist DataViewer at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/11/30/zitgist-dataviewer/|creationTime|2007-12-08T16:05:43Z +http://www.w3.org/2005/Incubator/urw3/XGR-urw3/|creationDate|2008-05-04 +http://www.w3.org/2005/Incubator/urw3/XGR-urw3/|tag|http://www.semanlink.net/tag/w3c_incubator_group_report +http://www.w3.org/2005/Incubator/urw3/XGR-urw3/|tag|http://www.semanlink.net/tag/uncertainty_reasoning_and_semantic_web +http://www.w3.org/2005/Incubator/urw3/XGR-urw3/|title|Uncertainty Reasoning for the World Wide Web +http://www.w3.org/2005/Incubator/urw3/XGR-urw3/|creationTime|2008-05-04T15:56:48Z +http://www.w3.org/2008/09/msnws/papers/|creationDate|2009-02-07 +http://www.w3.org/2008/09/msnws/papers/|tag|http://www.semanlink.net/tag/workshop +http://www.w3.org/2008/09/msnws/papers/|tag|http://www.semanlink.net/tag/social_networks +http://www.w3.org/2008/09/msnws/papers/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2008/09/msnws/papers/|title|Papers submitted to W3C Workshop on the Future of Social Networking +http://www.w3.org/2008/09/msnws/papers/|creationTime|2009-02-07T23:10:11Z +http://www.prescod.net/rest/|creationDate|2006-06-25 +http://www.prescod.net/rest/|tag|http://www.semanlink.net/tag/rest +http://www.prescod.net/rest/|title|REST Resources +http://www.heppnetz.de/projects/skos2owl/|creationDate|2013-02-08 +http://www.heppnetz.de/projects/skos2owl/|tag|http://www.semanlink.net/tag/martin_hepp +http://www.heppnetz.de/projects/skos2owl/|tag|http://www.semanlink.net/tag/skos_owl +http://www.heppnetz.de/projects/skos2owl/|title|SKOS2OWL: Online tool for deriving OWL ontologies from SKOS categorization schemas +http://www.heppnetz.de/projects/skos2owl/|creationTime|2013-02-08T10:37:30Z +https://github.com/keon/awesome-nlp/blob/master/README.md|creationDate|2018-02-14 +https://github.com/keon/awesome-nlp/blob/master/README.md|tag|http://www.semanlink.net/tag/links +https://github.com/keon/awesome-nlp/blob/master/README.md|tag|http://www.semanlink.net/tag/nlp +https://github.com/keon/awesome-nlp/blob/master/README.md|title|awesome NLP: A curated list of resources dedicated to Natural Language Processing +https://github.com/keon/awesome-nlp/blob/master/README.md|creationTime|2018-02-14T17:29:54Z +http://www.bakerbotts.com/ideas/publications/2019/february/what-is-unity-a-look-at-the-usptos-ai-development-efforts|creationDate|2019-02-18 +http://www.bakerbotts.com/ideas/publications/2019/february/what-is-unity-a-look-at-the-usptos-ai-development-efforts|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +http://www.bakerbotts.com/ideas/publications/2019/february/what-is-unity-a-look-at-the-usptos-ai-development-efforts|title|A Look at the USPTO’s AI Development Efforts +http://www.bakerbotts.com/ideas/publications/2019/february/what-is-unity-a-look-at-the-usptos-ai-development-efforts|creationTime|2019-02-18T15:10:06Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3232,50-647202,0.html|creationDate|2005-05-08 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3232,50-647202,0.html|tag|http://www.semanlink.net/tag/web +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3232,50-647202,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3232,50-647202,0.html|date|2005-05-08 +http://www.documentarymania.com/player.php?title=Steve%20Jobs%20the%20Lost%20Interview|creationDate|2018-12-22 +http://www.documentarymania.com/player.php?title=Steve%20Jobs%20the%20Lost%20Interview|tag|http://www.semanlink.net/tag/steve_jobs +http://www.documentarymania.com/player.php?title=Steve%20Jobs%20the%20Lost%20Interview|title|Steve Jobs the Lost Interview - Documentary Mania +http://www.documentarymania.com/player.php?title=Steve%20Jobs%20the%20Lost%20Interview|creationTime|2018-12-22T10:37:13Z +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|creationDate|2018-06-23 +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|tag|http://www.semanlink.net/tag/nlp_stanford +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|comment|> a framework for training classifiers in which an **annotator** provides a natural language explanation for each labeling decision. A semantic parser converts these explanations into programmatic labeling functions that generate noisy labels for an arbitrary amount of unlabeled data, which is used to train a classifier. On three relation extraction tasks, we find that users are able to train classifiers with comparable F1 scores from 5–100 faster by providing explanations instead of just labels +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|title|Training Classifiers with Natural Language Explanations +https://nlp.stanford.edu/pubs/hancock2018babble.pdf|creationTime|2018-06-23T00:55:49Z +http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk|creationDate|2006-01-22 +http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk|tag|http://www.semanlink.net/tag/tomcat +http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk|tag|http://www.semanlink.net/tag/apache +http://www.detailedbalance.net/glove-compartment/2005/05/14/osx-tomcat55-mod_jk|title|The Glove Compartment » Tomcat 5.5, mod_jk, and OS X’s built-in Apache +http://www.uncontactedtribes.org/|creationDate|2011-02-07 +http://www.uncontactedtribes.org/|tag|http://www.semanlink.net/tag/amazonie +http://www.uncontactedtribes.org/|tag|http://www.semanlink.net/tag/peuples +http://www.uncontactedtribes.org/|title|Uncontacted Tribes +http://www.uncontactedtribes.org/|creationTime|2011-02-07T17:27:08Z +https://ontotext.com/|creationDate|2018-10-13 +https://ontotext.com/|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +https://ontotext.com/|tag|http://www.semanlink.net/tag/knowledge_graph +https://ontotext.com/|comment|"> Ontotext transforms how organizations **identify meaning across** diverse databases and massive amounts of unstructured data by **combining a semantic graph database with text mining, and machine learning**. +" +https://ontotext.com/|title|Ontotext Semantic Technology Developer +https://ontotext.com/|creationTime|2018-10-13T11:01:58Z +https://thegradient.pub/structure-learning/|creationDate|2018-05-07 +https://thegradient.pub/structure-learning/|tag|http://www.semanlink.net/tag/graph_embeddings +https://thegradient.pub/structure-learning/|title|How do we capture structure in relational data? +https://thegradient.pub/structure-learning/|creationTime|2018-05-07T12:48:15Z +http://en.wikipedia.org/wiki/Big_Lebowski|creationDate|2013-11-06 +http://en.wikipedia.org/wiki/Big_Lebowski|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Big_Lebowski|tag|http://www.semanlink.net/tag/comedie +http://en.wikipedia.org/wiki/Big_Lebowski|title|The Big Lebowski +http://en.wikipedia.org/wiki/Big_Lebowski|creationTime|2013-11-06T00:38:19Z +http://blog.datagraph.org/2010/03/rdf-isomorphism|creationDate|2014-12-10 +http://blog.datagraph.org/2010/03/rdf-isomorphism|tag|http://www.semanlink.net/tag/algorithmes +http://blog.datagraph.org/2010/03/rdf-isomorphism|tag|http://www.semanlink.net/tag/graph +http://blog.datagraph.org/2010/03/rdf-isomorphism|tag|http://www.semanlink.net/tag/rdf +http://blog.datagraph.org/2010/03/rdf-isomorphism|title|The Curious Case of RDF Graph Isomorphism - The Datagraph Blog +http://blog.datagraph.org/2010/03/rdf-isomorphism|creationTime|2014-12-10T18:40:09Z +https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/|creationDate|2017-11-25 +https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/|tag|http://www.semanlink.net/tag/emmanuel_ledinot +https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/|tag|http://www.semanlink.net/tag/programming +https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/|title|The Coming Software Apocalypse - The Atlantic +https://www.theatlantic.com/technology/archive/2017/09/saving-the-world-from-code/540393/|creationTime|2017-11-25T18:59:49Z +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|creationDate|2019-03-18 +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|tag|http://www.semanlink.net/tag/mesopotamie +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|tag|http://www.semanlink.net/tag/decouverte_archeologique +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|comment|"Au Kurdistan d’Irak, les fouilles menées par une mission archéologique française ont révélé une ville antique inédite, sur le site de Kunara. Vers la fin du IIIe millénaire av. J.-C., cette ville s’élevait au cœur d’un royaume méconnu : celui du peuple des montagnes, demeuré jusque-là dans l’ombre de ses puissants voisins mésopotamiens. +" +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|title|Une riche cité découverte aux portes de la Mésopotamie CNRS Le journal +https://lejournal.cnrs.fr/articles/une-riche-cite-decouverte-aux-portes-de-la-mesopotamie|creationTime|2019-03-18T15:26:57Z +https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois|creationDate|2018-03-04 +https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois|tag|http://www.semanlink.net/tag/chinois +https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois|tag|http://www.semanlink.net/tag/dictionnaire +https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois|title|Utiliser un dictionnaire chinois - 东游记 +https://www.voyage-est.com/blogs/voyage-est/index.php?post/2014/03/20/Utiliser-un-dictionnaire-chinois|creationTime|2018-03-04T11:03:02Z +http://www.roosevelt2012.fr/|creationDate|2012-05-01 +http://www.roosevelt2012.fr/|tag|http://www.semanlink.net/tag/roosevelt +http://www.roosevelt2012.fr/|title|Roosevelt 2012 - un collectif et 15 réformes pour changer la donne +http://www.roosevelt2012.fr/|creationTime|2012-05-01T12:10:13Z +http://semanticweb.com/gmail-meet-json-ld_b37211|creationDate|2013-05-17 +http://semanticweb.com/gmail-meet-json-ld_b37211|tag|http://www.semanlink.net/tag/dan_brickley +http://semanticweb.com/gmail-meet-json-ld_b37211|tag|http://www.semanlink.net/tag/gmail +http://semanticweb.com/gmail-meet-json-ld_b37211|tag|http://www.semanlink.net/tag/json_ld +http://semanticweb.com/gmail-meet-json-ld_b37211|title|Gmail, Meet JSON-LD - semanticweb.com +http://semanticweb.com/gmail-meet-json-ld_b37211|creationTime|2013-05-17T14:23:37Z +https://blog.insightdatascience.com/reinforcement-learning-from-scratch-819b65f074d8|creationDate|2018-06-09 +https://blog.insightdatascience.com/reinforcement-learning-from-scratch-819b65f074d8|tag|http://www.semanlink.net/tag/reinforcement_learning +https://blog.insightdatascience.com/reinforcement-learning-from-scratch-819b65f074d8|title|Reinforcement Learning from scratch – Insight Data +https://blog.insightdatascience.com/reinforcement-learning-from-scratch-819b65f074d8|creationTime|2018-06-09T09:26:53Z +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|creationDate|2007-10-25 +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|tag|http://www.semanlink.net/tag/sarkozy_et_la_recherche +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|tag|http://www.semanlink.net/tag/cnrs +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|tag|http://www.semanlink.net/tag/prix_nobel +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|comment|"Auriez-vous décroché le Nobel avec le financement de la recherche sur projet que met en place le gouvernement ? Non, s'il n'y avait eu qu'un financement sur projet.
Je n'ai pas démarré mes travaux en me disant que j'allais augmenter la capacité de stockage des disques durs. Le paysage final n'est jamais visible du point de départ. + + +" +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|title|Le Prix Nobel Albert Fert plaide pour une recherche libre +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|creationTime|2007-10-25T13:02:18Z +http://www.lemonde.fr/web/article/0,1-0@2-3244,36-970565,0.html|source|Le Monde +https://github.com/jprante/elasticsearch-langdetect|creationDate|2017-04-28 +https://github.com/jprante/elasticsearch-langdetect|tag|http://www.semanlink.net/tag/elasticsearch +https://github.com/jprante/elasticsearch-langdetect|title|A plugin for language detection in Elasticsearch using Nakatani Shuyo's language detector +https://github.com/jprante/elasticsearch-langdetect|creationTime|2017-04-28T22:26:59Z +http://musique.rfi.fr/artiste/zouk/kassav|creationDate|2017-12-08 +http://musique.rfi.fr/artiste/zouk/kassav|tag|http://www.semanlink.net/tag/kassav +http://musique.rfi.fr/artiste/zouk/kassav|title|Kassav ' - Biographie, discographie et fiche artiste – RFI Musique +http://musique.rfi.fr/artiste/zouk/kassav|creationTime|2017-12-08T17:16:00Z +http://data.semanticweb.org/person/michael-k-bergman/html|creationDate|2012-02-20 +http://data.semanticweb.org/person/michael-k-bergman/html|tag|http://www.semanlink.net/tag/mike_bergman +http://data.semanticweb.org/person/michael-k-bergman/html|title|Mike Bergman Semantic Web Dog Food +http://data.semanticweb.org/person/michael-k-bergman/html|creationTime|2012-02-20T21:14:23Z +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|creationDate|2019-03-02 +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|tag|http://www.semanlink.net/tag/solid +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|tag|http://www.semanlink.net/tag/ruben_verborgh +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|tag|http://www.semanlink.net/tag/react_js +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|tag|http://www.semanlink.net/tag/github_project +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|comment|Basic React components for building your own Solid components and apps +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|title|GitHub - solid/react-components at v1.4.0 +https://github.com/solid/react-components/tree/v1.4.0#-create-your-own-components|creationTime|2019-03-02T08:39:58Z +https://twitter.com/random_walker/status/1079759096272818178|creationDate|2019-01-01 +https://twitter.com/random_walker/status/1079759096272818178|tag|http://www.semanlink.net/tag/blockchain +https://twitter.com/random_walker/status/1079759096272818178|title|"Arvind Narayanan sur Twitter : ""In 2018 the blockchain/decentralization story fell apart. For example, a study of 43 use cases found a 0% success rate""" +https://twitter.com/random_walker/status/1079759096272818178|creationTime|2019-01-01T12:39:29Z +http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data|creationDate|2014-01-21 +http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data|tag|http://www.semanlink.net/tag/mathieu_d_aquin +http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data|tag|http://www.semanlink.net/tag/open_education +http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data|title|Open Education: A Growing, High Impact Area for Linked Open Data +http://ercim-news.ercim.eu/en96/special/open-education-a-growing-high-impact-area-for-linked-open-data|creationTime|2014-01-21T19:07:07Z +http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html|creationDate|2017-06-24 +http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html|tag|http://www.semanlink.net/tag/deforestation +http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html|tag|http://www.semanlink.net/tag/bresil +http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html|title|Déforestation au Brésil : l’opération « greenwashing » de Michel Temer fait long feu +http://www.lemonde.fr/planete/article/2017/06/24/deforestation-au-bresil-l-operation-greenwashing-de-michel-temer-fait-long-feu_5150613_3244.html|creationTime|2017-06-24T17:27:42Z +http://silentcircle.wordpress.com/2013/09/11/the-battle-for-your-digital-soul/?utm_content=buffer9771d&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|creationDate|2013-09-14 +http://silentcircle.wordpress.com/2013/09/11/the-battle-for-your-digital-soul/?utm_content=buffer9771d&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|tag|http://www.semanlink.net/tag/nsa_spying_scandal +http://silentcircle.wordpress.com/2013/09/11/the-battle-for-your-digital-soul/?utm_content=buffer9771d&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|title|The Battle for Your Digital Soul Silent Circle Blog +http://silentcircle.wordpress.com/2013/09/11/the-battle-for-your-digital-soul/?utm_content=buffer9771d&utm_source=buffer&utm_medium=twitter&utm_campaign=Buffer|creationTime|2013-09-14T00:10:43Z +https://www.edge.org/response-detail/26693|creationDate|2018-01-14 +https://www.edge.org/response-detail/26693|tag|http://www.semanlink.net/tag/ebola +https://www.edge.org/response-detail/26693|title|We May All Die Horribly +https://www.edge.org/response-detail/26693|creationTime|2018-01-14T19:19:51Z +http://www.youtube.com/watch?v=6eGcsGPgUTw|creationDate|2008-04-01 +http://www.youtube.com/watch?v=6eGcsGPgUTw|tag|http://www.semanlink.net/tag/fun +http://www.youtube.com/watch?v=6eGcsGPgUTw|tag|http://www.semanlink.net/tag/rock +http://www.youtube.com/watch?v=6eGcsGPgUTw|tag|http://www.semanlink.net/tag/danny_ayers +http://www.youtube.com/watch?v=6eGcsGPgUTw|tag|http://www.semanlink.net/tag/dataportability +http://www.youtube.com/watch?v=6eGcsGPgUTw|title|You Tube - DataPortability and Me (Get Your Data Out!) +http://www.youtube.com/watch?v=6eGcsGPgUTw|creationTime|2008-04-01T15:30:54Z +http://skhole.fr/petite-poucette-la-douteuse-fable-de-michel-serres|creationDate|2016-10-11 +http://skhole.fr/petite-poucette-la-douteuse-fable-de-michel-serres|tag|http://www.semanlink.net/tag/michel_serres +http://skhole.fr/petite-poucette-la-douteuse-fable-de-michel-serres|title|Petite Poucette : la douteuse fable de Michel Serres Revue Skhole.fr +http://skhole.fr/petite-poucette-la-douteuse-fable-de-michel-serres|creationTime|2016-10-11T10:19:59Z +http://www.w3.org/2007/powder/|creationDate|2010-07-01 +http://www.w3.org/2007/powder/|tag|http://www.semanlink.net/tag/powder +http://www.w3.org/2007/powder/|title|W3C POWDER Working Group +http://www.w3.org/2007/powder/|creationTime|2010-07-01T11:16:11Z +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|creationDate|2007-02-13 +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|tag|http://www.semanlink.net/tag/coree_du_sud +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|tag|http://www.semanlink.net/tag/windows_vista +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|comment|Without enough time to re-architect Korean websites, 3 S. Korean governmental ministries, warned S. Korean users that upgrading to Vista would disable the user from making any secure transaction online. +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|title|The cost of monoculture +http://www.kanai.net/weblog/archive/2007/01/26/00h53m55s#003095|creationTime|2007-02-13T00:23:44Z +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|creationDate|2009-05-11 +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|tag|http://www.semanlink.net/tag/sarkozyland +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|tag|http://www.semanlink.net/tag/gouvernement_sarkozy +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|tag|http://www.semanlink.net/tag/tf1 +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|comment|Un cadre de TF1 hostile à la loi Hadopi a écrit à sa députée, Françoise de Panafieu. Qui a fait suivre à la ministre de la Culture. Qui a transmis à la chaîne. Qui l’a licencié. +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|title|Dénoncé par Albanel, viré par TF1 +http://www.ecrans.fr/Denonce-par-Albanel-vire-par-TF1,7137.html|creationTime|2009-05-11T23:01:56Z +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|creationDate|2010-07-30 +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|tag|http://www.semanlink.net/tag/cost_of_linked_data +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|tag|http://www.semanlink.net/tag/government_data_as_linked_data +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|tag|http://www.semanlink.net/tag/tom_heath +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|title|Why Carry the Cost of Linked Data? – Tom Heath’s Displacement Activities +http://tomheath.com/blog/2010/06/why-carry-the-cost-of-linked-data/|creationTime|2010-07-30T14:06:00Z +http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py|creationDate|2016-01-12 +http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py|tag|http://www.semanlink.net/tag/text_feature_extraction +http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py|title|Sample pipeline for text feature extraction and evaluation — scikit-learn documentation +http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html#example-model-selection-grid-search-text-feature-extraction-py|creationTime|2016-01-12T00:45:15Z +http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3|creationDate|2013-10-26 +http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3|tag|http://www.semanlink.net/tag/education +http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3|tag|http://www.semanlink.net/tag/mooc +http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3|title|Turning Education Upside Down - NYTimes.com +http://opinionator.blogs.nytimes.com/2013/10/09/turning-education-upside-down/?_r=3|creationTime|2013-10-26T10:04:49Z +http://apassant.net/blog/2008/06/20/eswc2008-slides/|creationDate|2008-06-20 +http://apassant.net/blog/2008/06/20/eswc2008-slides/|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2008/06/20/eswc2008-slides/|tag|http://www.semanlink.net/tag/eswc_2008 +http://apassant.net/blog/2008/06/20/eswc2008-slides/|tag|http://www.semanlink.net/tag/slides +http://apassant.net/blog/2008/06/20/eswc2008-slides/|title|ESWC2008 slides : Alexandre Passant +http://apassant.net/blog/2008/06/20/eswc2008-slides/|creationTime|2008-06-20T19:02:06Z +http://freedomboxfoundation.org/|creationDate|2012-01-06 +http://freedomboxfoundation.org/|tag|http://www.semanlink.net/tag/freedom_box +http://freedomboxfoundation.org/|title|FreedomBox Foundation +http://freedomboxfoundation.org/|creationTime|2012-01-06T21:14:31Z +http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html|creationDate|2013-05-27 +http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html|tag|http://www.semanlink.net/tag/syrie +http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html|tag|http://www.semanlink.net/tag/guerre_chimique +http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html|title|"Syrie : ""Le Monde"" témoin d'attaques toxiques" +http://www.lemonde.fr/proche-orient/article/2013/05/27/syrie-le-monde-temoin-d-attaques-toxiques_3417225_3218.html|creationTime|2013-05-27T07:48:43Z +http://www.youtube.com/watch?v=G4UX6UNdLUM|creationDate|2009-04-19 +http://www.youtube.com/watch?v=G4UX6UNdLUM|tag|http://www.semanlink.net/tag/bush +http://www.youtube.com/watch?v=G4UX6UNdLUM|tag|http://www.semanlink.net/tag/patti_smith +http://www.youtube.com/watch?v=G4UX6UNdLUM|tag|http://www.semanlink.net/tag/contestation +http://www.youtube.com/watch?v=G4UX6UNdLUM|title|Patti Smith Indicts George W. Bush - Dream of life +http://www.youtube.com/watch?v=G4UX6UNdLUM|creationTime|2009-04-19T00:38:55Z +http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/|creationDate|2018-02-12 +http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/|tag|http://www.semanlink.net/tag/lucene +http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/|tag|http://www.semanlink.net/tag/okapi_bm25 +http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/|title|BM25 The Next Generation of Lucene Relevance +http://opensourceconnections.com/blog/2015/10/16/bm25-the-next-generation-of-lucene-relevation/|creationTime|2018-02-12T18:14:27Z +http://events.linkeddata.org/ldow2013/|creationDate|2013-03-02 +http://events.linkeddata.org/ldow2013/|tag|http://www.semanlink.net/tag/ldow2013 +http://events.linkeddata.org/ldow2013/|title|Linked Data on the Web (LDOW2013) - Workshop at WWW2013, Rio de Janeiro, Brazil +http://events.linkeddata.org/ldow2013/|creationTime|2013-03-02T16:44:11Z +http://en.wikipedia.org/wiki/JSON|creationDate|2006-02-07 +http://en.wikipedia.org/wiki/JSON|tag|http://www.semanlink.net/tag/json +http://en.wikipedia.org/wiki/JSON|comment|"JSON, which stands for ""JavaScript Object Notation"", is a lightweight computer data interchange format. JSON is a subset of the object literal notation of JavaScript but its use does not require JavaScript." +http://en.wikipedia.org/wiki/JSON|title|JSON - Wikipedia, the free encyclopedia +http://www.wired.com/2014/05/the-world-of-computer-go|creationDate|2014-12-27 +http://www.wired.com/2014/05/the-world-of-computer-go|tag|http://www.semanlink.net/tag/computers +http://www.wired.com/2014/05/the-world-of-computer-go|tag|http://www.semanlink.net/tag/go_game +http://www.wired.com/2014/05/the-world-of-computer-go|title|The Mystery of Go, the Ancient Game That Computers Still Can't Win WIRED +http://www.wired.com/2014/05/the-world-of-computer-go|creationTime|2014-12-27T14:49:19Z +http://www.desertec.org/|creationDate|2009-07-02 +http://www.desertec.org/|tag|http://www.semanlink.net/tag/desert +http://www.desertec.org/|tag|http://www.semanlink.net/tag/energie_solaire +http://www.desertec.org/|title|DESERTEC Foundation +http://www.desertec.org/|creationTime|2009-07-02T23:10:56Z +http://html5doctor.com/interview-with-ian-hickson-html-editor/|creationDate|2013-01-22 +http://html5doctor.com/interview-with-ian-hickson-html-editor/|tag|http://www.semanlink.net/tag/hixie +http://html5doctor.com/interview-with-ian-hickson-html-editor/|comment|"""Using something like JSON makes sense if what you want to serialise is a static JavaScript structure with no cross-references. If your data structure is something else, then using JSON doesn’t generally make more sense than using a dedicated format — either way, you’re going to have to get the data out and piece it back together into your own internal representation. """ +http://html5doctor.com/interview-with-ian-hickson-html-editor/|title|Interview with Ian Hickson, HTML editor HTML5 Doctor +http://html5doctor.com/interview-with-ian-hickson-html-editor/|creationTime|2013-01-22T23:21:09Z +http://www.snee.com/bobdc.blog/2012/02/pull-rdf-metadata-out-of-jpegs.html|creationDate|2012-03-10 +http://www.snee.com/bobdc.blog/2012/02/pull-rdf-metadata-out-of-jpegs.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2012/02/pull-rdf-metadata-out-of-jpegs.html|title|Pull RDF metadata out of JPEGs, MP3s, and more - bobdc.blog +http://www.snee.com/bobdc.blog/2012/02/pull-rdf-metadata-out-of-jpegs.html|creationTime|2012-03-10T22:38:06Z +http://www.insu.cnrs.fr/node/6047|creationDate|2016-10-07 +http://www.insu.cnrs.fr/node/6047|tag|http://www.semanlink.net/tag/exoplanetes +http://www.insu.cnrs.fr/node/6047|title|Proxima b, une exoplanète recouverte d’un océan ? +http://www.insu.cnrs.fr/node/6047|creationTime|2016-10-07T11:37:52Z +http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments|creationDate|2007-11-12 +http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments|tag|http://www.semanlink.net/tag/wtp +http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments|tag|http://www.semanlink.net/tag/eclipse +http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments|title|Eclipse hints, tips, and random musings » Blog Archive » Many classpaths to enlightenment +http://dev.eclipse.org/blogs/wayne/2007/07/25/many-classpaths-to-enlightenment/#comments|creationTime|2007-11-12T01:07:26Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.7265|creationDate|2011-10-26 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.7265|tag|http://www.semanlink.net/tag/configuration +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.7265|title|CiteSeerX — Fast backtrack-free product configuration using a precompiled solution space representation +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.7265|creationTime|2011-10-26T14:36:03Z +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|creationDate|2011-01-09 +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|tag|http://www.semanlink.net/tag/skos +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|tag|http://www.semanlink.net/tag/topquadrant +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|tag|http://www.semanlink.net/tag/spreadsheets +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|title|VOYAGES OF THE SEMANTIC ENTERPRISE: How to: convert a spreadsheet to SKOS +http://topquadrantblog.blogspot.com/2010/12/how-to-convert-spreadsheet-to-skos.html|creationTime|2011-01-09T21:31:14Z +http://www.appleinsider.com/articles/08/06/16/apples_open_secret_sproutcore_is_cocoa_for_the_web.html|creationDate|2008-06-17 +http://www.appleinsider.com/articles/08/06/16/apples_open_secret_sproutcore_is_cocoa_for_the_web.html|tag|http://www.semanlink.net/tag/sproutcore +http://www.appleinsider.com/articles/08/06/16/apples_open_secret_sproutcore_is_cocoa_for_the_web.html|title|AppleInsider Apple's open secret: SproutCore is Cocoa for the Web +http://www.appleinsider.com/articles/08/06/16/apples_open_secret_sproutcore_is_cocoa_for_the_web.html|creationTime|2008-06-17T23:26:03Z +https://www.youtube.com/watch?v=2cEUKKJoAdU|creationDate|2017-10-21 +https://www.youtube.com/watch?v=2cEUKKJoAdU|tag|http://www.semanlink.net/tag/open_knowledge_network +https://www.youtube.com/watch?v=2cEUKKJoAdU|title|"Andrew Moore on ""TOKeN: The Open Knowledge Network"" - YouTube" +https://www.youtube.com/watch?v=2cEUKKJoAdU|creationTime|2017-10-21T14:31:45Z +http://www.slideshare.net/candp/sem-tech-pelletdb|creationDate|2010-12-16 +http://www.slideshare.net/candp/sem-tech-pelletdb|tag|http://www.semanlink.net/tag/reasoning +http://www.slideshare.net/candp/sem-tech-pelletdb|tag|http://www.semanlink.net/tag/pellet +http://www.slideshare.net/candp/sem-tech-pelletdb|title|PelletDb: Scalable Reasoning for Enterprise Semantics +http://www.slideshare.net/candp/sem-tech-pelletdb|creationTime|2010-12-16T15:44:19Z +http://www-sul.stanford.edu/depts/ssrg/africa/guide.html|creationDate|2006-01-02 +http://www-sul.stanford.edu/depts/ssrg/africa/guide.html|tag|http://www.semanlink.net/tag/afrique_subsaharienne +http://www-sul.stanford.edu/depts/ssrg/africa/guide.html|tag|http://www.semanlink.net/tag/links +http://www-sul.stanford.edu/depts/ssrg/africa/guide.html|tag|http://www.semanlink.net/tag/stanford +http://www-sul.stanford.edu/depts/ssrg/africa/guide.html|title|Africa South of the Sahara: Selected Internet Resources - Stanford University +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|creationDate|2008-10-30 +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|tag|http://www.semanlink.net/tag/spreadsheets +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|title|Download SPARQL results directly into a spreadsheet - bobdc.blog +http://www.snee.com/bobdc.blog/2008/10/download_sparql_results_direct.html|creationTime|2008-10-30T13:21:06Z +https://blog.miguelgrinberg.com/post/using-celery-with-flask|creationDate|2018-04-09 +https://blog.miguelgrinberg.com/post/using-celery-with-flask|tag|http://www.semanlink.net/tag/asynchronous +https://blog.miguelgrinberg.com/post/using-celery-with-flask|tag|http://www.semanlink.net/tag/flask +https://blog.miguelgrinberg.com/post/using-celery-with-flask|title|Using Celery With Flask - miguelgrinberg.com +https://blog.miguelgrinberg.com/post/using-celery-with-flask|creationTime|2018-04-09T10:46:54Z +https://tilloy.wordpress.com/teaching/physique-pour-tous/|creationDate|2017-10-01 +https://tilloy.wordpress.com/teaching/physique-pour-tous/|tag|http://www.semanlink.net/tag/physique +https://tilloy.wordpress.com/teaching/physique-pour-tous/|title|“Physique pour tous” Antoine Tilloy's research log +https://tilloy.wordpress.com/teaching/physique-pour-tous/|creationTime|2017-10-01T17:20:12Z +http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2015-01-26 +http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/graph_database +http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/triplestore +http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|The Graph Database and the RDF Database Inside Analysis +http://insideanalysis.com/2015/01/the-graph-database-and-the-rdf-database/?utm_content=buffer88439&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2015-01-26T14:48:53Z +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|creationDate|2018-01-22 +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|tag|http://www.semanlink.net/tag/services_publics +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|tag|http://www.semanlink.net/tag/royaume_uni +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|tag|http://www.semanlink.net/tag/capitalisme +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|title|Capitalism’s new crisis: after Carillion, can the private sector ever be trusted? Politics The Guardian +https://www.theguardian.com/politics/2018/jan/21/capitalism-new-crisis-can-private-sector-be-trusted-carillion-privatisation?CMP=Share_iOSApp_Other|creationTime|2018-01-22T18:22:52Z +http://www.dfki.uni-kl.de/~sauermann/2006/01-pimo-report/pimOntologyLanguageReport.html|creationDate|2006-05-10 +http://www.dfki.uni-kl.de/~sauermann/2006/01-pimo-report/pimOntologyLanguageReport.html|tag|http://www.semanlink.net/tag/pimo +http://www.dfki.uni-kl.de/~sauermann/2006/01-pimo-report/pimOntologyLanguageReport.html|comment|"In this report, a new ontology +language is proposed, the PIMO ontology language, which addresses the +requirements of the Semantic Desktop and uses existing solutions as an +inspiration to build a suitable solution. The language contains a core upper +ontology, defining basic classes for things, concepts, resources, persons, etc. +and also stops at these basic entities. Extending the ontology definitions of +classes and relations is possible by PIMO-domain ontologies. The core +application area of the PIMO-language is to allow individual persons to express +their own mental models in a structured way, the different mental models can +then be integrated based on matching algorithms or on domain ontologies. +" +http://www.dfki.uni-kl.de/~sauermann/2006/01-pimo-report/pimOntologyLanguageReport.html|title|PIMO-a PIM Ontology for the Semantic Desktop +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|creationDate|2017-10-30 +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|tag|http://www.semanlink.net/tag/restricted_boltzmann_machine +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|tag|http://www.semanlink.net/tag/yoshua_bengio +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|comment|> You can think of it a little bit like you think about Principal Components Analysis, in that it is trained by unsupervised learning so as to capture the leading variations in the data, and it yields a new representation of the data +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|title|How do RBMs work? - Quora +https://www.quora.com/How-do-RBMs-work-What-are-some-good-use-cases-and-some-good-recent-papers-on-the-topic|creationTime|2017-10-30T12:36:20Z +http://etienne.chouard.free.fr/Europe/|creationDate|2005-04-25 +http://etienne.chouard.free.fr/Europe/|tag|http://www.semanlink.net/tag/constitution_europeenne +https://distill.pub/2018/building-blocks/|creationDate|2018-03-07 +https://distill.pub/2018/building-blocks/|tag|http://www.semanlink.net/tag/christopher_olah +https://distill.pub/2018/building-blocks/|tag|http://www.semanlink.net/tag/neural_network_interpretability +https://distill.pub/2018/building-blocks/|comment|"> Interpretability techniques are normally studied in isolation. We explore the powerful interfaces that arise when you combine them — and the rich structure of this combinatorial space. +" +https://distill.pub/2018/building-blocks/|title|The Building Blocks of Interpretability +https://distill.pub/2018/building-blocks/|creationTime|2018-03-07T14:32:55Z +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|creationDate|2018-01-03 +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|tag|http://www.semanlink.net/tag/rdf_embeddings +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|tag|http://www.semanlink.net/tag/slideshare +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|title|Combining semantics and deep learning for intelligent information services +https://fr.slideshare.net/lysander07/combining-semantics-an-deep-learning-for-intelligent-information-services|creationTime|2018-01-03T17:15:17Z +http://www.gazeoftime.com/|creationDate|2018-03-06 +http://www.gazeoftime.com/|tag|http://www.semanlink.net/tag/timeline +http://www.gazeoftime.com/|tag|http://www.semanlink.net/tag/wikidata +http://www.gazeoftime.com/|title|Gaze of time Timelines and Chronologies curated by the community +http://www.gazeoftime.com/|creationTime|2018-03-06T14:27:11Z +https://aclanthology.coli.uni-saarland.de/volumes/proceedings-of-the-2018-emnlp-workshop-blackboxnlp-analyzing-and-interpreting-neural-networks-for-nlp|creationDate|2018-11-06 +https://aclanthology.coli.uni-saarland.de/volumes/proceedings-of-the-2018-emnlp-workshop-blackboxnlp-analyzing-and-interpreting-neural-networks-for-nlp|tag|http://www.semanlink.net/tag/blackboxnlp_workshop_2018 +https://aclanthology.coli.uni-saarland.de/volumes/proceedings-of-the-2018-emnlp-workshop-blackboxnlp-analyzing-and-interpreting-neural-networks-for-nlp|title|PROCEEDINGS of the BlackboxNLP Workshop +https://aclanthology.coli.uni-saarland.de/volumes/proceedings-of-the-2018-emnlp-workshop-blackboxnlp-analyzing-and-interpreting-neural-networks-for-nlp|creationTime|2018-11-06T10:06:41Z +https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory|creationDate|2017-06-04 +https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory|tag|http://www.semanlink.net/tag/python_install +https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory|title|installation - How do I find the location of my Python site-packages directory? - Stack Overflow +https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory|creationTime|2017-06-04T11:41:40Z +https://www.quora.com/How-is-information-coded-in-neural-activity|creationDate|2017-12-04 +https://www.quora.com/How-is-information-coded-in-neural-activity|tag|http://www.semanlink.net/tag/neural_coding +https://www.quora.com/How-is-information-coded-in-neural-activity|title|How is information coded in neural activity? (Quora) +https://www.quora.com/How-is-information-coded-in-neural-activity|creationTime|2017-12-04T09:05:41Z +http://rdfa.info/play/|creationDate|2012-08-25 +http://rdfa.info/play/|tag|http://www.semanlink.net/tag/manu_sporny +http://rdfa.info/play/|tag|http://www.semanlink.net/tag/rdfa_tool +http://rdfa.info/play/|title|RDFa / Play: visualizer for RDFa by @manusporny +http://rdfa.info/play/|creationTime|2012-08-25T20:12:48Z +https://github.com/facebookresearch/fastText/issues/26|creationDate|2017-11-25 +https://github.com/facebookresearch/fastText/issues/26|tag|http://www.semanlink.net/tag/fasttext +https://github.com/facebookresearch/fastText/issues/26|tag|http://www.semanlink.net/tag/document_embeddings +https://github.com/facebookresearch/fastText/issues/26|title|How can we get the vector of a paragraph? · Issue #26 · facebookresearch/fastText +https://github.com/facebookresearch/fastText/issues/26|creationTime|2017-11-25T19:39:45Z +http://www.webreference.com/programming/javascript/index.html|creationDate|2006-07-26 +http://www.webreference.com/programming/javascript/index.html|tag|http://www.semanlink.net/tag/javascript +http://www.webreference.com/programming/javascript/index.html|tag|http://www.semanlink.net/tag/howto +http://www.webreference.com/programming/javascript/index.html|title|WebReference JavaScript Articles +http://timeline.verite.co/|creationDate|2012-11-28 +http://timeline.verite.co/|tag|http://www.semanlink.net/tag/timeline +http://timeline.verite.co/|title|Timeline JS - Beautifully crafted timelines that are easy, and intuitive to use. +http://timeline.verite.co/|creationTime|2012-11-28T00:09:37Z +http://www2.cnrs.fr/presse/communique/4519.htm|creationDate|2016-04-27 +http://www2.cnrs.fr/presse/communique/4519.htm|tag|http://www.semanlink.net/tag/cnrs +http://www2.cnrs.fr/presse/communique/4519.htm|tag|http://www.semanlink.net/tag/apprentissage +http://www2.cnrs.fr/presse/communique/4519.htm|title|Un organisme unicellulaire capable d'apprendre - Communiqués et dossiers de presse - CNRS +http://www2.cnrs.fr/presse/communique/4519.htm|creationTime|2016-04-27T14:21:53Z +http://blog.tcrouzet.com/la-quatrieme-theorie/la-quatrieme-theorie-liens/|creationDate|2014-05-27 +http://blog.tcrouzet.com/la-quatrieme-theorie/la-quatrieme-theorie-liens/|tag|http://www.semanlink.net/tag/twitterature +http://blog.tcrouzet.com/la-quatrieme-theorie/la-quatrieme-theorie-liens/|title|Twittérature +http://blog.tcrouzet.com/la-quatrieme-theorie/la-quatrieme-theorie-liens/|creationTime|2014-05-27T14:07:23Z +http://news.bbc.co.uk/2/hi/technology/4766755.stm|creationDate|2006-05-14 +http://news.bbc.co.uk/2/hi/technology/4766755.stm|tag|http://www.semanlink.net/tag/second_life +http://news.bbc.co.uk/2/hi/technology/4766755.stm|tag|http://www.semanlink.net/tag/bbc +http://news.bbc.co.uk/2/hi/technology/4766755.stm|comment|The BBC has staked a claim to a virtual tropical island where it can stage online music festivals and throw exclusive celebrity parties. +http://news.bbc.co.uk/2/hi/technology/4766755.stm|title|BBC NEWS Technology BBC starts to rock online world +http://news.bbc.co.uk/2/hi/technology/4766755.stm|source|BBC +https://www.reddit.com/r/MachineLearning/comments/4ssk6u/is_it_true_that_r_programming_is_dying/?st=iqmztsrx&sh=b08ddfe5|creationDate|2016-07-15 +https://www.reddit.com/r/MachineLearning/comments/4ssk6u/is_it_true_that_r_programming_is_dying/?st=iqmztsrx&sh=b08ddfe5|tag|http://www.semanlink.net/tag/r +https://www.reddit.com/r/MachineLearning/comments/4ssk6u/is_it_true_that_r_programming_is_dying/?st=iqmztsrx&sh=b08ddfe5|title|Is it true that R programming is dying? : MachineLearning +https://www.reddit.com/r/MachineLearning/comments/4ssk6u/is_it_true_that_r_programming_is_dying/?st=iqmztsrx&sh=b08ddfe5|creationTime|2016-07-15T02:18:05Z +http://restlet.tigris.org/issues/show_bug.cgi?id=463|creationDate|2011-08-23 +http://restlet.tigris.org/issues/show_bug.cgi?id=463|tag|http://www.semanlink.net/tag/jersey +http://restlet.tigris.org/issues/show_bug.cgi?id=463|title|Support variant selection based on file extension +http://restlet.tigris.org/issues/show_bug.cgi?id=463|creationTime|2011-08-23T00:27:45Z +http://www.w3.org/TR/2015/CR-ldpatch-20150303/|creationDate|2015-03-05 +http://www.w3.org/TR/2015/CR-ldpatch-20150303/|tag|http://www.semanlink.net/tag/ld_patch +http://www.w3.org/TR/2015/CR-ldpatch-20150303/|comment|W3C Candidate Recommendation 03 March 2015 +http://www.w3.org/TR/2015/CR-ldpatch-20150303/|title|Linked Data Patch Format +http://www.w3.org/TR/2015/CR-ldpatch-20150303/|creationTime|2015-03-05T11:00:19Z +http://cs231n.github.io/neural-networks-1/#feedforward|creationDate|2015-12-26 +http://cs231n.github.io/neural-networks-1/#feedforward|tag|http://www.semanlink.net/tag/computer_vision +http://cs231n.github.io/neural-networks-1/#feedforward|tag|http://www.semanlink.net/tag/artificial_neural_network +http://cs231n.github.io/neural-networks-1/#feedforward|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://cs231n.github.io/neural-networks-1/#feedforward|tag|http://www.semanlink.net/tag/good +http://cs231n.github.io/neural-networks-1/#feedforward|title|CS231n Convolutional Neural Networks for Visual Recognition +http://cs231n.github.io/neural-networks-1/#feedforward|creationTime|2015-12-26T01:28:43Z +http://www.hugin.com/|creationDate|2006-02-17 +http://www.hugin.com/|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.hugin.com/|title|Hugin +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|creationDate|2011-03-27 +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|tag|http://www.semanlink.net/tag/genetique +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|tag|http://www.semanlink.net/tag/retrovirus +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|tag|http://www.semanlink.net/tag/evolution +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|title|Un gène d’origine rétrovirale essentiel pour la formation du placenta +http://www.iledefrance-est.cnrs.fr/com/documents/gene_syncitine.pdf|creationTime|2011-03-27T14:35:38Z +https://twitter.com/feiliu_nlp/status/1058985012945735680|creationDate|2018-11-06 +https://twitter.com/feiliu_nlp/status/1058985012945735680|tag|http://www.semanlink.net/tag/emnlp_2018 +https://twitter.com/feiliu_nlp/status/1058985012945735680|tag|http://www.semanlink.net/tag/multi_document_summarization +https://twitter.com/feiliu_nlp/status/1058985012945735680|title|Adapting the Neural Encoder-Decoder Framework from Single to Multi-Document Summarization +https://twitter.com/feiliu_nlp/status/1058985012945735680|creationTime|2018-11-06T23:11:24Z +http://www.subbu.org/blog/2012/07/mvc-may-be-slowing-down-your-site|creationDate|2013-01-03 +http://www.subbu.org/blog/2012/07/mvc-may-be-slowing-down-your-site|tag|http://www.semanlink.net/tag/mvc +http://www.subbu.org/blog/2012/07/mvc-may-be-slowing-down-your-site|title|Your MVC may be Slowing Down Your Site +http://www.subbu.org/blog/2012/07/mvc-may-be-slowing-down-your-site|creationTime|2013-01-03T13:16:30Z +http://www.xfront.com/REST-Web-Services.html|creationDate|2005-09-01 +http://www.xfront.com/REST-Web-Services.html|tag|http://www.semanlink.net/tag/web_services +http://www.xfront.com/REST-Web-Services.html|tag|http://www.semanlink.net/tag/rest +http://www.xfront.com/REST-Web-Services.html|comment|First provides a brief introduction to REST and then describes how to build Web services in the REST style. +http://www.xfront.com/REST-Web-Services.html|title|Building Web Services the REST Way +http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html|creationDate|2017-09-07 +http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html|tag|http://www.semanlink.net/tag/elephant +http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html|title|Alors que s’éteignent les éléphants d’Afrique, les Chinois ont pris le contrôle des routes de l’ivoire +http://www.lemonde.fr/afrique/article/2017/09/07/alors-que-s-eteignent-les-elephants-d-afrique-les-chinois-ont-pris-le-controle-des-routes-de-l-ivoire_5182142_3212.html|creationTime|2017-09-07T20:01:10Z +http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf|creationDate|2012-02-20 +http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf|tag|http://www.semanlink.net/tag/linkto_semanlink +http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf|tag|http://www.semanlink.net/tag/folksonomy +http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf|title|Helping online communities to semantically enrich folksonomies +http://isicil.inria.fr/v2/res/docs/articles/webscience10_flimpens_paper.pdf|creationTime|2012-02-20T21:12:13Z +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|creationDate|2018-07-09 +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|tag|http://www.semanlink.net/tag/new_africa +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|tag|http://www.semanlink.net/tag/ethiopie +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|tag|http://www.semanlink.net/tag/vive_le_capitalisme +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|tag|http://www.semanlink.net/tag/industrie_textile +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|title|Ethiopia Touts Good Conditions in Factories for Brands Like H&M and Calvin Klein, but Workers Scrape By On $1 a Day +https://theintercept.com/2018/07/08/ethiopia-garment-industry/|creationTime|2018-07-09T15:10:57Z +http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks|creationDate|2016-01-13 +http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks|tag|http://www.semanlink.net/tag/artificial_general_intelligence +http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks|tag|http://www.semanlink.net/tag/artificial_neural_network +http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks|title|The Unreasonable Reputation of Neural Networks [ thinking machines ] +http://thinkingmachines.mit.edu/blog/unreasonable-reputation-neural-networks|creationTime|2016-01-13T23:04:14Z +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|creationDate|2018-11-05 +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|tag|http://www.semanlink.net/tag/language_model +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|tag|http://www.semanlink.net/tag/sebastian_ruder +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|tag|http://www.semanlink.net/tag/emnlp_2018 +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|tag|http://www.semanlink.net/tag/transfer_learning +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|title|Transfer learning with language models +https://drive.google.com/file/d/1kmNAwrSlFYo0cN_DcURMOArBwe9FxWxR/view|creationTime|2018-11-05T13:50:50Z +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|creationDate|2007-10-31 +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|tag|http://www.semanlink.net/tag/memory_leak +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|tag|http://www.semanlink.net/tag/java_dev +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|comment|substring() can be dangerous +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|title|The Fishbowl: The String Memory Gotcha +http://fishbowl.pastiche.org/2005/04/27/the_string_memory_gotcha|creationTime|2007-10-31T15:50:42Z +http://www.rdfhdt.org/|creationDate|2014-09-04 +http://www.rdfhdt.org/|tag|http://www.semanlink.net/tag/rdf_binary +http://www.rdfhdt.org/|comment|"HDT (Header, Dictionary, Triples), a compact data structure and binary serialization format for RDF.
+""HDT compresses big RDF datasets while maintaining search and browse operations"" +" +http://www.rdfhdt.org/|title|RDF HDT +http://www.rdfhdt.org/|creationTime|2014-09-04T14:36:19Z +http://sig.ma/search?q=francois-paul+servant|creationDate|2009-08-27 +http://sig.ma/search?q=francois-paul+servant|tag|http://www.semanlink.net/tag/sig_ma +http://sig.ma/search?q=francois-paul+servant|tag|http://www.semanlink.net/tag/fps +http://sig.ma/search?q=francois-paul+servant|title|fps on sig.ma - Semantic Information MAshup +http://sig.ma/search?q=francois-paul+servant|creationTime|2009-08-27T14:28:50Z +http://blog.outer-court.com/archive/2005-05-22-n83.html|creationDate|2005-05-31 +http://blog.outer-court.com/archive/2005-05-22-n83.html|tag|http://www.semanlink.net/tag/nlp_google +http://blog.outer-court.com/archive/2005-05-22-n83.html|tag|http://www.semanlink.net/tag/statistical_machine_translation +http://blog.outer-court.com/archive/2005-05-22-n83.html|title|Google Translator: The Universal Language +http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf|creationDate|2013-03-13 +http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf|tag|http://www.semanlink.net/tag/hbase +http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf|tag|http://www.semanlink.net/tag/tutorial +http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf|title|HBase tutorial +http://www.eurecom.fr/~michiard/teaching/slides/clouds/tutorial-hbase.pdf|creationTime|2013-03-13T17:27:23Z +http://blog.aylien.com/word-embeddings-and-their-challenges/|creationDate|2017-07-20 +http://blog.aylien.com/word-embeddings-and-their-challenges/|comment|Perhaps the biggest problem with word2vec is the inability to handle unknown or out-of-vocabulary (OOV) words. +http://blog.aylien.com/word-embeddings-and-their-challenges/|title|Word Embeddings and Their Challenges - AYLIEN +http://blog.aylien.com/word-embeddings-and-their-challenges/|creationTime|2017-07-20T15:49:59Z +http://www.readwriteweb.com/archives/4_tools_for_teaching_kids_to_code.php#more|creationDate|2010-08-30 +http://www.readwriteweb.com/archives/4_tools_for_teaching_kids_to_code.php#more|tag|http://www.semanlink.net/tag/teaching_kids_to_code +http://www.readwriteweb.com/archives/4_tools_for_teaching_kids_to_code.php#more|title|4 Tools for Teaching Kids to Code +http://www.readwriteweb.com/archives/4_tools_for_teaching_kids_to_code.php#more|creationTime|2010-08-30T14:49:08Z +http://www.tombouctoumanuscripts.org/|creationDate|2013-01-29 +http://www.tombouctoumanuscripts.org/|tag|http://www.semanlink.net/tag/manuscrits_de_tombouctou +http://www.tombouctoumanuscripts.org/|title|Tombouctou Manuscripts Project +http://www.tombouctoumanuscripts.org/|creationTime|2013-01-29T18:17:55Z +http://pisani.blog.lemonde.fr/pisani/|creationDate|2006-04-18 +http://pisani.blog.lemonde.fr/pisani/|tag|http://www.semanlink.net/tag/web_2_0 +http://pisani.blog.lemonde.fr/pisani/|tag|http://www.semanlink.net/tag/transnets +http://pisani.blog.lemonde.fr/pisani/|title|Transnets, des gadgets aux réseaux +http://semtech2010.semanticuniverse.com/|creationDate|2010-06-30 +http://semtech2010.semanticuniverse.com/|tag|http://www.semanlink.net/tag/semantic_web +http://semtech2010.semanticuniverse.com/|tag|http://www.semanlink.net/tag/conferences +http://semtech2010.semanticuniverse.com/|title|SemTech 2010 +http://semtech2010.semanticuniverse.com/|creationTime|2010-06-30T13:25:55Z +http://www.honeynet.org/papers/phishing/|creationDate|2005-05-25 +http://www.honeynet.org/papers/phishing/|tag|http://www.semanlink.net/tag/phishing +http://www.honeynet.org/papers/phishing/|title|Know your Enemy: Phishing +http://www.zdnet.com/article/back-to-the-future-does-graph-database-success-hang-on-query-language/|creationDate|2018-03-13 +http://www.zdnet.com/article/back-to-the-future-does-graph-database-success-hang-on-query-language/|tag|http://www.semanlink.net/tag/graph_database +http://www.zdnet.com/article/back-to-the-future-does-graph-database-success-hang-on-query-language/|title|Back to the future: Does graph database success hang on query language? ZDNet +http://www.zdnet.com/article/back-to-the-future-does-graph-database-success-hang-on-query-language/|creationTime|2018-03-13T14:52:56Z +http://www.ccfd.asso.fr/|creationDate|2007-04-10 +http://www.ccfd.asso.fr/|tag|http://www.semanlink.net/tag/ccfd +http://www.ccfd.asso.fr/|tag|http://www.semanlink.net/tag/faim +http://www.ccfd.asso.fr/|tag|http://www.semanlink.net/tag/developpement +http://www.ccfd.asso.fr/|title|CCFD : Comité Catholique contre la Faim et pour le Développement +http://www.ccfd.asso.fr/|creationTime|2007-04-10T00:01:46Z +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|creationDate|2011-01-18 +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|tag|http://www.semanlink.net/tag/rdf_tools +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|tag|http://www.semanlink.net/tag/deri +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|tag|http://www.semanlink.net/tag/google_refine +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|title|RDF Extension for Google Refine +http://lab.linkeddata.deri.ie/2010/grefine-rdf-extension/|creationTime|2011-01-18T15:39:07Z +http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html|creationDate|2014-11-10 +http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html|tag|http://www.semanlink.net/tag/usa +http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html|title|Les partisans des OGM marquent des points aux Etats-Unis +http://www.lemonde.fr/planete/article/2014/11/10/les-partisans-des-ogm-marquent-des-points-aux-etats-unis_4521152_3244.html|creationTime|2014-11-10T12:52:54Z +https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e|creationDate|2018-02-14 +https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e|tag|http://www.semanlink.net/tag/doc2vec +https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e|title|A gentle introduction to Doc2Vec – ScaleAbout – Medium +https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e|creationTime|2018-02-14T01:34:05Z +http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html|creationDate|2014-01-20 +http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html|tag|http://www.semanlink.net/tag/money +http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html|tag|http://www.semanlink.net/tag/traders +http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html|title|For the Love of Money - NYTimes.com +http://mobile.nytimes.com/2014/01/19/opinion/sunday/for-the-love-of-money.html|creationTime|2014-01-20T11:35:53Z +http://www.wiseclerk.com/group-news/countries/france-the-situation-of-social-lending-in-france/|creationDate|2008-09-08 +http://www.wiseclerk.com/group-news/countries/france-the-situation-of-social-lending-in-france/|tag|http://www.semanlink.net/tag/banque +http://www.wiseclerk.com/group-news/countries/france-the-situation-of-social-lending-in-france/|title|P2P-Banking.com » The situation of social lending in France +http://www.wiseclerk.com/group-news/countries/france-the-situation-of-social-lending-in-france/|creationTime|2008-09-08T21:58:47Z +http://interface.fh-potsdam.de/incom/code/projekte/projekt_anzeigen.php?4,260,17,0,0,281|creationDate|2006-10-09 +http://interface.fh-potsdam.de/incom/code/projekte/projekt_anzeigen.php?4,260,17,0,0,281|tag|http://www.semanlink.net/tag/tag_cloud +http://interface.fh-potsdam.de/incom/code/projekte/projekt_anzeigen.php?4,260,17,0,0,281|comment|Based on co–occurence of tags, I calculate similarity measures and map them to 2D coordinates. The result are semantically ordered tag clouds. +http://interface.fh-potsdam.de/incom/code/projekte/projekt_anzeigen.php?4,260,17,0,0,281|title|incom projekt {Tag Clouds 5.0} +https://www.bbc.com/news/science-environment-47540792|creationDate|2019-03-24 +https://www.bbc.com/news/science-environment-47540792|tag|http://www.semanlink.net/tag/genetique_histoire +https://www.bbc.com/news/science-environment-47540792|tag|http://www.semanlink.net/tag/age_du_bronze +https://www.bbc.com/news/science-environment-47540792|tag|http://www.semanlink.net/tag/archeologie_europeenne +https://www.bbc.com/news/science-environment-47540792|comment|A migration from Central Europe transformed the genetic make-up of people in Spain during the Bronze Age +https://www.bbc.com/news/science-environment-47540792|title|Ancient migration transformed Spain's DNA - BBC News +https://www.bbc.com/news/science-environment-47540792|creationTime|2019-03-24T19:49:26Z +https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf|creationDate|2017-06-08 +https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf|tag|http://www.semanlink.net/tag/topic_models_word_embedding +https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf|tag|http://www.semanlink.net/tag/topic_modeling_over_short_texts +https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf|title|Topic Modeling for Short Texts with Auxiliary Word Embeddings +https://www.ntu.edu.sg/home/axsun/paper/sigir16text.pdf|creationTime|2017-06-08T01:07:49Z +http://dl.acm.org/citation.cfm?id=2505526|creationDate|2017-07-21 +http://dl.acm.org/citation.cfm?id=2505526|tag|http://www.semanlink.net/tag/text_similarity +http://dl.acm.org/citation.cfm?id=2505526|tag|http://www.semanlink.net/tag/cosine_similarity +http://dl.acm.org/citation.cfm?id=2505526|tag|http://www.semanlink.net/tag/okapi_bm25 +http://dl.acm.org/citation.cfm?id=2505526|title|Effective measures for inter-document similarity +http://dl.acm.org/citation.cfm?id=2505526|creationTime|2017-07-21T12:45:10Z +http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes|creationDate|2016-04-13 +http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes|tag|http://www.semanlink.net/tag/docker_volumes +http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes|comment|As of Docker 1.9.0, Docker has named volumes which replace data-only containers. +http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes|title|What is the (best) way to manage permissions for docker shared volumes - Stack Overflow +http://stackoverflow.com/questions/23544282/what-is-the-best-way-to-manage-permissions-for-docker-shared-volumes|creationTime|2016-04-13T17:57:15Z +http://www.nytimes.com/2014/11/18/science/earth/hydrogen-cars-join-electric-models-in-showrooms.html?partner=rss&emc=rss|creationDate|2014-11-18 +http://www.nytimes.com/2014/11/18/science/earth/hydrogen-cars-join-electric-models-in-showrooms.html?partner=rss&emc=rss|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.nytimes.com/2014/11/18/science/earth/hydrogen-cars-join-electric-models-in-showrooms.html?partner=rss&emc=rss|title|Hydrogen Cars Join Electric Models in Showrooms - NYTimes.com +http://www.nytimes.com/2014/11/18/science/earth/hydrogen-cars-join-electric-models-in-showrooms.html?partner=rss&emc=rss|creationTime|2014-11-18T13:16:33Z +http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all|creationDate|2013-05-22 +http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all|tag|http://www.semanlink.net/tag/driverless_car +http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all|tag|http://www.semanlink.net/tag/daimler +http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all|title|A Benz With a Virtual Chauffeur - NYTimes.com +http://www.nytimes.com/2013/05/19/automobiles/a-benz-with-a-virtual-chauffeur.html?pagewanted=all|creationTime|2013-05-22T18:55:47Z +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|creationDate|2007-07-26 +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|tag|http://www.semanlink.net/tag/niger +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|tag|http://www.semanlink.net/tag/mines_d_or +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|comment|Dans ces mines d’or situées à 270km au nord de Niamey, les conditions de vie des milliers d’orpailleurs venus de toute la sous-région sont extrêmement précaires et dangereuses. La promiscuité, l’insalubrité et les risques liés à l’activité minière sont à l’origine d’une situation sanitaire complètement détériorée. +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|title|Dans les mines d’or de Komabangou, au Niger +http://www.rfi.fr/radiofr/editions/072/edition_57_20070726.asp|creationTime|2007-07-26T13:00:53Z +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|creationDate|2013-08-25 +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|tag|http://www.semanlink.net/tag/mobile_phone +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|tag|http://www.semanlink.net/tag/winch5 +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|tag|http://www.semanlink.net/tag/new_africa +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|title|Comment le mobile change l’Afrique Winch 5 +http://winch5.blog.lemonde.fr/2013/08/22/comment-le-mobile-change-lafrique/|creationTime|2013-08-25T14:53:43Z +http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html|creationDate|2014-06-29 +http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html|tag|http://www.semanlink.net/tag/energie_solaire +http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html|comment|En répondant à la question « comment crée-t-on de l'électricité avec de la lumière », cette vidéo produite par Universcience.tv explique les lois physiques qui régissent le fonctionnement des panneaux photovoltaïques. Des lois qui permettent également de comprendre les limites, en termes de rendement, desdits panneaux. Pour les surmonter, les chercheurs travaillent sur une solution inspirée par la photosynthèse : les cellules solaires à pigments photosensibles, aussi appelées cellules Grätzel, du nom de leur concepteur, le chimiste suisse d'origine allemande Michael Grätzel qui a reçu en 2010 le Millenium Technology Prize pour son invention. +http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html|title|Vers des panneaux solaires inspirés des plantes +http://www.lemonde.fr/sciences/video/2014/06/29/vers-des-panneaux-solaires-inspires-des-plantes_4447419_1650684.html|creationTime|2014-06-29T09:36:39Z +https://medium.freecodecamp.org/how-to-turn-your-website-into-a-mobile-app-with-7-lines-of-json-631c9c9895f5|creationDate|2018-04-03 +https://medium.freecodecamp.org/how-to-turn-your-website-into-a-mobile-app-with-7-lines-of-json-631c9c9895f5|tag|http://www.semanlink.net/tag/mobile_apps_dev +https://medium.freecodecamp.org/how-to-turn-your-website-into-a-mobile-app-with-7-lines-of-json-631c9c9895f5|title|How to Turn Your Website into a Mobile App with 7 Lines of JSON +https://medium.freecodecamp.org/how-to-turn-your-website-into-a-mobile-app-with-7-lines-of-json-631c9c9895f5|creationTime|2018-04-03T08:51:00Z +http://www.alvit.de/web-dev/index.html|creationDate|2005-05-19 +http://www.alvit.de/web-dev/index.html|title|Essential bookmarks for web-designers and webdevelopers CSS, Color Tools, Royalty free photos, Usability etc. +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|creationDate|2013-06-24 +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|tag|http://www.semanlink.net/tag/yahoo +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|tag|http://www.semanlink.net/tag/peter_mika +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|tag|http://www.semanlink.net/tag/semantic_web_search_engine +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|tag|http://www.semanlink.net/tag/semantic_search +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|comment|The Semantic Search research group at Yahoo! Labs is pleased to announce the open-source code release and public demo of Glimmer, a search engine for RDF data +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|title|[ANN] Public release of Glimmer RDF search engine and demo from Peter Mika on 2013-06-20 (public-vocabs@w3.org from June 2013) +http://lists.w3.org/Archives/Public/public-vocabs/2013Jun/0094.html|creationTime|2013-06-24T17:48:57Z +http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html|creationDate|2007-04-20 +http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html|tag|http://www.semanlink.net/tag/sparql_en_javascript +http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html|title|TechnicaLee Speaking: Updates to sparql.js +http://www.thefigtrees.net/lee/blog/2007/02/updates_to_sparqljs.html|creationTime|2007-04-20T21:00:15Z +http://news.bbc.co.uk/2/hi/africa/7981964.stm|creationDate|2011-11-17 +http://news.bbc.co.uk/2/hi/africa/7981964.stm|tag|http://www.semanlink.net/tag/genocide_rwandais +http://news.bbc.co.uk/2/hi/africa/7981964.stm|comment|In many ways, the perpetrators of the genocide have succeeded. They have managed to encase the whole country in a gigantic airless bubble where everybody pretends that life goes on but where, in many ways, it actually stopped on 7 April 1994. +http://news.bbc.co.uk/2/hi/africa/7981964.stm|title|BBC NEWS Africa Rwanda's ghosts refuse to be buried +http://news.bbc.co.uk/2/hi/africa/7981964.stm|creationTime|2011-11-17T14:46:14Z +http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/|creationDate|2009-03-03 +http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/|tag|http://www.semanlink.net/tag/w3c_incubator_group_report +http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/|tag|http://www.semanlink.net/tag/relational_databases_and_the_semantic_web +http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/|title|W3C RDB2RDF Incubator Group Report +http://www.w3.org/2005/Incubator/rdb2rdf/XGR-rdb2rdf/|creationTime|2009-03-03T20:46:14Z +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|creationDate|2017-09-17 +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|tag|http://www.semanlink.net/tag/alan_kay +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|tag|http://www.semanlink.net/tag/mobile_computing +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|comment|"> Suppose you do something on the iPhone and you don’t like it, how do you undo it? + +> We can eliminate the learning curve for reading by getting rid of reading and going to recordings. That’s basically what they’re doing: Basically, let’s revert back to a pre-tool time. + +> “Simple things should be simple, complex things should be possible.” They’ve got simple things being simple and they have complex things being impossible, so that’s wrong. + + +" +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|title|The Father Of Mobile Computing Is Not Impressed +https://www.fastcompany.com/40435064/what-alan-kay-thinks-about-the-iphone-and-technology-now|creationTime|2017-09-17T16:42:30Z +http://openjena.org/|creationDate|2009-11-12 +http://openjena.org/|tag|http://www.semanlink.net/tag/jena +http://openjena.org/|title|Jena Semantic Web Framework +http://openjena.org/|creationTime|2009-11-12T13:39:02Z +http://www.bbc.co.uk/news/science-environment-15181187|creationDate|2011-12-26 +http://www.bbc.co.uk/news/science-environment-15181187|tag|http://www.semanlink.net/tag/prix_nobel +http://www.bbc.co.uk/news/science-environment-15181187|tag|http://www.semanlink.net/tag/quasicrystals +http://www.bbc.co.uk/news/science-environment-15181187|title|2011: Nobel win for the discovery of the structure of quasicrystals +http://www.bbc.co.uk/news/science-environment-15181187|creationTime|2011-12-26T11:42:20Z +http://pingthesemanticweb.com/namespaces.php|creationDate|2007-04-20 +http://pingthesemanticweb.com/namespaces.php|tag|http://www.semanlink.net/tag/rdf_repository +http://pingthesemanticweb.com/namespaces.php|comment|"Share your RDF Data with the World!
+PingtheSemanticWeb.com is a repository for RDF documents. You can notify this service that you created/updated a RDF document on your web site. + +" +http://pingthesemanticweb.com/namespaces.php|title|Ping the Semantic Web.com +http://pingthesemanticweb.com/namespaces.php|creationTime|2007-04-20T20:47:43Z +http://www.math.union.edu/~dpvc/jsMath/welcome.html|creationDate|2007-07-09 +http://www.math.union.edu/~dpvc/jsMath/welcome.html|tag|http://www.semanlink.net/tag/mathematiques +http://www.math.union.edu/~dpvc/jsMath/welcome.html|tag|http://www.semanlink.net/tag/javascript +http://www.math.union.edu/~dpvc/jsMath/welcome.html|comment|A Method of Including Mathematics in Web Pages +http://www.math.union.edu/~dpvc/jsMath/welcome.html|title|jsMath Home Page +http://www.math.union.edu/~dpvc/jsMath/welcome.html|creationTime|2007-07-09T22:32:45Z +http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/|creationDate|2013-09-23 +http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/|tag|http://www.semanlink.net/tag/git +http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/|tag|http://www.semanlink.net/tag/tutorial +http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/|title|Tutorial: Easy Version Control with Git Nettuts+ +http://net.tutsplus.com/tutorials/other/easy-version-control-with-git/|creationTime|2013-09-23T10:57:41Z +http://code.google.com/p/linked-data-api/wiki/AssumptionsAndGoals|creationDate|2012-01-11 +http://code.google.com/p/linked-data-api/wiki/AssumptionsAndGoals|tag|http://www.semanlink.net/tag/linked_data_api +http://code.google.com/p/linked-data-api/wiki/AssumptionsAndGoals|title|linked-data-api: assumptions and goals +http://code.google.com/p/linked-data-api/wiki/AssumptionsAndGoals|creationTime|2012-01-11T22:51:10Z +http://www.markus-lanthaler.com/research/third-generation-web-apis-bridging-the-gap-between-rest-and-linked-data.pdf|creationDate|2014-12-22 +http://www.markus-lanthaler.com/research/third-generation-web-apis-bridging-the-gap-between-rest-and-linked-data.pdf|tag|http://www.semanlink.net/tag/markus_lanthaler +http://www.markus-lanthaler.com/research/third-generation-web-apis-bridging-the-gap-between-rest-and-linked-data.pdf|title|Markus Lanthaler - Doctoral Dissertation +http://www.markus-lanthaler.com/research/third-generation-web-apis-bridging-the-gap-between-rest-and-linked-data.pdf|creationTime|2014-12-22T23:35:26Z +http://edition.cnn.com/2012/01/13/tech/innovation/ces-future-driving/index.html|creationDate|2012-01-15 +http://edition.cnn.com/2012/01/13/tech/innovation/ces-future-driving/index.html|tag|http://www.semanlink.net/tag/automobile +http://edition.cnn.com/2012/01/13/tech/innovation/ces-future-driving/index.html|title|'Augmented-reality' windshields and the future of driving - CNN.com +http://edition.cnn.com/2012/01/13/tech/innovation/ces-future-driving/index.html|creationTime|2012-01-15T09:54:34Z +http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb|creationDate|2016-11-17 +http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb|tag|http://www.semanlink.net/tag/java_8_lambdas +http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb|tag|http://www.semanlink.net/tag/mongodb +http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb|title|Java syntax with MongoDB - Stack Overflow +http://stackoverflow.com/questions/30424894/java-syntax-with-mongodb|creationTime|2016-11-17T02:37:20Z +http://www.w3.org/2005/rules/wiki/RIF_FAQ|creationDate|2010-06-28 +http://www.w3.org/2005/rules/wiki/RIF_FAQ|tag|http://www.semanlink.net/tag/rif +http://www.w3.org/2005/rules/wiki/RIF_FAQ|tag|http://www.semanlink.net/tag/faq +http://www.w3.org/2005/rules/wiki/RIF_FAQ|title|RIF FAQ +http://www.w3.org/2005/rules/wiki/RIF_FAQ|creationTime|2010-06-28T16:49:31Z +http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/|creationDate|2012-08-02 +http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/|tag|http://www.semanlink.net/tag/innovation +http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/|tag|http://www.semanlink.net/tag/tiers_monde +http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/|title|Giradora, la machine à laver à pédale qui change la vie Même pas mal +http://alternatives.blog.lemonde.fr/2012/08/01/avec-giradora-pedaler-cest-laver-et-ca-change-la-vie/|creationTime|2012-08-02T13:21:35Z +http://www.itworld.com/software/382730/ibm-offer-watson-supercomputer-cloud-development-platform|creationDate|2013-11-18 +http://www.itworld.com/software/382730/ibm-offer-watson-supercomputer-cloud-development-platform|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.itworld.com/software/382730/ibm-offer-watson-supercomputer-cloud-development-platform|title|IBM to offer Watson supercomputer as cloud development platform ITworld +http://www.itworld.com/software/382730/ibm-offer-watson-supercomputer-cloud-development-platform|creationTime|2013-11-18T09:57:23Z +http://www.cigref.fr/etude-cigref-enjeux-de-mise-en-oeuvre-de-l-intelligence-artificielle-pour-l-entreprise|creationDate|2017-12-18 +http://www.cigref.fr/etude-cigref-enjeux-de-mise-en-oeuvre-de-l-intelligence-artificielle-pour-l-entreprise|title|Etude CIGREF : enjeux de mise en œuvre de l’Intelligence Artificielle pour l’entreprise… – CIGREF +http://www.cigref.fr/etude-cigref-enjeux-de-mise-en-oeuvre-de-l-intelligence-artificielle-pour-l-entreprise|creationTime|2017-12-18T10:50:06Z +http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/|creationDate|2016-02-01 +http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/|tag|http://www.semanlink.net/tag/bitcoin +http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/|tag|http://www.semanlink.net/tag/blockchain +http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/|title|La Blockchain, au-delà du Bitcoin – Framablog +http://framablog.org/2016/01/30/la-blockchain-au-dela-du-bitcoin/|creationTime|2016-02-01T02:00:06Z +https://huyenchip.com/2018/10/04/sotawhat.html|creationDate|2018-10-05 +https://huyenchip.com/2018/10/04/sotawhat.html|tag|http://www.semanlink.net/tag/arxiv +https://huyenchip.com/2018/10/04/sotawhat.html|tag|http://www.semanlink.net/tag/sota +https://huyenchip.com/2018/10/04/sotawhat.html|title|SOTAWHAT - A script to keep track of state-of-the-art AI research +https://huyenchip.com/2018/10/04/sotawhat.html|creationTime|2018-10-05T08:26:47Z +http://arstechnica.com/reviews/os/macosx-10.4.ars/7|creationDate|2005-05-25 +http://arstechnica.com/reviews/os/macosx-10.4.ars/7|tag|http://www.semanlink.net/tag/mac_os_x +http://arstechnica.com/reviews/os/macosx-10.4.ars/7|tag|http://www.semanlink.net/tag/tiger +http://arstechnica.com/reviews/os/macosx-10.4.ars/7|title|Mac OS X 10.4 Tiger : Page 7 Mac OS X 10.4 Tiger +http://www.wollemipine.com|creationDate|2005-05-12 +http://www.wollemipine.com|tag|http://www.semanlink.net/tag/decouverte_d_especes_inconnues +http://www.wollemipine.com|tag|http://www.semanlink.net/tag/fossile_vivant +http://www.wollemipine.com|tag|http://www.semanlink.net/tag/arbres +http://www.wollemipine.com|tag|http://www.semanlink.net/tag/australie +http://www.wollemipine.com|title|WollemiPine.com - the official home of the Wollemi Pine. +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|creationDate|2013-07-06 +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|tag|http://www.semanlink.net/tag/linkedin +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|tag|http://www.semanlink.net/tag/c2gweb +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|tag|http://www.semanlink.net/tag/fps_post +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|title|Renault Exploitation of Linked Data LinkedIn, discussion started by Kingsley Idehen +http://www.linkedin.com/groupAnswers?viewQuestionAndAnswers=&discussionID=254113207&gid=49970&commentID=148780198&trk=view_disc&fromEmail=&ut=0BXwfSCrIXc5Q1|creationTime|2013-07-06T00:57:20Z +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|creationDate|2010-09-30 +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|tag|http://www.semanlink.net/tag/linked_data_demo +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|tag|http://www.semanlink.net/tag/michael_hausenblas +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|tag|http://www.semanlink.net/tag/4store +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|title|Linked Enterprise Data in a nutshell « Web of Data +http://webofdata.wordpress.com/2010/09/27/linked-enterprise-data-in-a-nutshell/|creationTime|2010-09-30T23:23:41Z +http://www.ldh-toulon.net/spip.php?article399|creationDate|2007-04-30 +http://www.ldh-toulon.net/spip.php?article399|tag|http://www.semanlink.net/tag/mission_voulet_chanoine +http://www.ldh-toulon.net/spip.php?article399|comment|"Pourquoi cette violence extrême du colonisateur ? +
+Les idées racialistes ont forcément joué. Un théoricien disait : il faut expulser [de la métropole] la violence des classes ""dangereuses"" et lui permettre de se débrider ailleurs. Là-bas, l’individu, livré à lui-même, transgresse tous les interdits. C’est le thème d’ Au coeur des ténèbres de Conrad (1899). En Europe, au XXe siècle, on osera transgresser parce qu’on l’a déjà fait en Afrique." +http://www.ldh-toulon.net/spip.php?article399|title|[LDH-Toulon] la colonne infernale de Voulet-Chanoine +http://www.ldh-toulon.net/spip.php?article399|creationTime|2007-04-30T01:32:31Z +http://www.freenet.org.nz/misc/google-privacy.html|creationDate|2006-08-19 +http://www.freenet.org.nz/misc/google-privacy.html|tag|http://www.semanlink.net/tag/google +http://www.freenet.org.nz/misc/google-privacy.html|comment|"At present, when you submit a search to Google, you need to assume that your +search keywords, along with your name, address and social security number, are +being logged to a government database. + +The purpose of this article is to inform people that there are alternatives, that +they have a right to search for whatever information they want in complete privacy, +and there are simple technical means of exercising that right." +http://www.freenet.org.nz/misc/google-privacy.html|title|Protect Your Privacy from Google +http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html|creationDate|2008-01-15 +http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html|tag|http://www.semanlink.net/tag/live_clipboard +http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html|comment|Doesn't seem to work in safari +http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html|title|Live Clipboard Example +http://spaces.live.com/editorial/rayozzie/demo/liveclip/liveclipsample/clipboardexample.html|creationTime|2008-01-15T02:14:22Z +http://www.rentacoder.com|creationDate|2006-01-26 +http://www.rentacoder.com|tag|http://www.semanlink.net/tag/delocalisation_des_services +http://www.rentacoder.com|tag|http://www.semanlink.net/tag/web_marchand +http://www.rentacoder.com|tag|http://www.semanlink.net/tag/software +http://www.rentacoder.com|title|Rent A Coder: How Software Gets Done +http://bighugelabs.com/flickr/|creationDate|2007-10-31 +http://bighugelabs.com/flickr/|tag|http://www.semanlink.net/tag/flickr +http://bighugelabs.com/flickr/|title|fd's Flickr Toys: Fun toys for your digital photographs +http://bighugelabs.com/flickr/|creationTime|2007-10-31T15:47:08Z +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|creationDate|2015-06-10 +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|tag|http://www.semanlink.net/tag/congo_kinshasa +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|tag|http://www.semanlink.net/tag/gorille +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|tag|http://www.semanlink.net/tag/petrole_et_corruption +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|title|Dans le parc congolais des Virunga, l’armée à la solde du pétrolier SOCO +http://www.lemonde.fr/afrique/article/2015/06/10/dans-le-parc-congolais-des-virunga-l-armee-a-la-solde-du-petrolier-soco_4650727_3212.html|creationTime|2015-06-10T19:34:16Z +http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/|creationDate|2014-09-26 +http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/|tag|http://www.semanlink.net/tag/amazonie +http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/|tag|http://www.semanlink.net/tag/invasion_d_especes_etrangeres +http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/|title|Invasion of the golden mussel: wielding genes to protect the Amazon TED Blog +http://blog.ted.com/2014/09/26/a-ted-fellow-wields-genes-to-protect-the-amazon/|creationTime|2014-09-26T16:04:56Z +https://www.reador.net/|creationDate|2013-09-07 +https://www.reador.net/|tag|http://www.semanlink.net/tag/to_see +https://www.reador.net/|tag|http://www.semanlink.net/tag/news +https://www.reador.net/|title|Reador - aggregateur de news sémantique +https://www.reador.net/|creationTime|2013-09-07T09:52:00Z +http://denisnddo.free.fr/html/zarma.htm#ancre16|creationDate|2006-03-30 +http://denisnddo.free.fr/html/zarma.htm#ancre16|tag|http://www.semanlink.net/tag/jerma +http://denisnddo.free.fr/html/zarma.htm#ancre16|title|Zarma: notions de base Zarma : notions de base +http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/|creationDate|2007-08-27 +http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/|tag|http://www.semanlink.net/tag/swrl +http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/|tag|http://www.semanlink.net/tag/bijan_parsia +http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/|title|Thinking Clearly» Understanding SWRL (Part 2): DL Safety +http://clarkparsia.com/weblog/2007/08/27/understanding-swrl-part-2-dl-safety/|creationTime|2007-08-27T22:39:25Z +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|creationDate|2019-04-18 +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|tag|http://www.semanlink.net/tag/frequently_cited_paper +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|tag|http://www.semanlink.net/tag/information_retrieval +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|tag|http://www.semanlink.net/tag/ranking_information_retrieval +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|comment|"> ""**Turn the search problem around to predict the input**"" + +> We propose a new probabilistic approach to information retrieval based upon the ideas and methods of statistical machine translation. The central ingredient in this approach is **a statistical model of how a user might distill or ""translate"" a given document into a query**. To assess the relevance of a document to a user's query, **we estimate the probability that the query would have been generated as a translation of the document**, and factor in the user's general preferences in the form of a prior distribution over documents. We propose a simple, well motivated model of the document-to-query translation process, and describe an algorithm for learning the parameters of this model in an unsupervised manner from a collection of documents + + + + +" +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|title|Information Retrieval as Statistical Translation (Adam Berger , John Lafferty, 1999) +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.7803|creationTime|2019-04-18T10:30:01Z +http://en.wikipedia.org/wiki/Perdita_Durango|creationDate|2014-12-07 +http://en.wikipedia.org/wiki/Perdita_Durango|tag|http://www.semanlink.net/tag/film +http://en.wikipedia.org/wiki/Perdita_Durango|title|Perdita Durango - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Perdita_Durango|creationTime|2014-12-07T19:45:04Z +https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost|creationDate|2018-10-28 +https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost|tag|http://www.semanlink.net/tag/keras +https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost|tag|http://www.semanlink.net/tag/adaboost +https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost|title|How to boost a Keras based neural network using AdaBoost? - Stack Overflow +https://stackoverflow.com/questions/39063676/how-to-boost-a-keras-based-neural-network-using-adaboost|creationTime|2018-10-28T01:00:34Z +https://transacl.org/ojs/index.php/tacl/article/download/742/204|creationDate|2017-12-07 +https://transacl.org/ojs/index.php/tacl/article/download/742/204|tag|http://www.semanlink.net/tag/pointwise_mutual_information +https://transacl.org/ojs/index.php/tacl/article/download/742/204|tag|http://www.semanlink.net/tag/yoav_goldberg +https://transacl.org/ojs/index.php/tacl/article/download/742/204|tag|http://www.semanlink.net/tag/word_embedding +https://transacl.org/ojs/index.php/tacl/article/download/742/204|comment|"(improves on [Yoav Goldberg](/tag/yoav_goldberg)'s findings) +" +https://transacl.org/ojs/index.php/tacl/article/download/742/204|title|A Latent Variable Model Approach to PMI-basedWord Embeddings +https://transacl.org/ojs/index.php/tacl/article/download/742/204|creationTime|2017-12-07T16:20:59Z +http://ebiquity.umbc.edu/v2.1/blogger/|creationDate|2005-06-02 +http://ebiquity.umbc.edu/v2.1/blogger/|tag|http://www.semanlink.net/tag/semantic_web_blog +http://ebiquity.umbc.edu/v2.1/blogger/|title|EBB: ebiquity blog at UMBC +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|creationDate|2015-11-20 +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|tag|http://www.semanlink.net/tag/bayesian_reasoning +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|tag|http://www.semanlink.net/tag/deep_learning +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|tag|http://www.semanlink.net/tag/google_guy +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|title|Talk: Memory-based Bayesian Reasoning and Deep Learning ← The Spectator +http://blog.shakirm.com/2015/11/talk-memory-based-bayesian-reasoning-and-deep-learning/|creationTime|2015-11-20T17:27:08Z +https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html|creationDate|2012-01-04 +https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html|tag|http://www.semanlink.net/tag/html_data +https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html|comment|Microformats, RDFa and microdata all enable consumers to extract data from HTML pages. This data may be embedded within enhanced search engine results, exposed to users through browser extensions, aggregated across websites or used by scripts running within those HTML pages. This guide aims to help publishers and consumers of HTML data use it well. With several syntaxes and vocabularies to choose from, it provides guidance about how to decide which meets the publisher's or consumer's needs. It discusses when it is necessary to mix syntaxes and vocabularies and how to publish and consume data that uses multiple formats. It describes how to create vocabularies that can be used in multiple syntaxes and general best practices about the publication and consumption of HTML data. +https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html|title|HTML Data Guide +https://dvcs.w3.org/hg/htmldata/raw-file/default/html-data-guide/index.html|creationTime|2012-01-04T21:44:56Z +http://community.kde.org/Baloo#Why_change_Nepomuk.3F|creationDate|2014-02-05 +http://community.kde.org/Baloo#Why_change_Nepomuk.3F|tag|http://www.semanlink.net/tag/kde +http://community.kde.org/Baloo#Why_change_Nepomuk.3F|tag|http://www.semanlink.net/tag/nepomuk +http://community.kde.org/Baloo#Why_change_Nepomuk.3F|title|Baloo - KDE Community Wiki +http://community.kde.org/Baloo#Why_change_Nepomuk.3F|creationTime|2014-02-05T16:39:41Z +http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html|creationDate|2015-04-16 +http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html|tag|http://www.semanlink.net/tag/design_pattern +http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html|comment|By using Java reflection, you can enhance the Visitor design pattern to provide a powerful way to operate on object structures, giving the flexibility to add new +http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html|title|Java Tip 98: Reflect on the Visitor design pattern JavaWorld +http://www.javaworld.com/article/2077602/learn-java/java-tip-98--reflect-on-the-visitor-design-pattern.html|creationTime|2015-04-16T08:46:19Z +http://home.ccil.org/~cowan/XML/tagsoup/|creationDate|2007-01-24 +http://home.ccil.org/~cowan/XML/tagsoup/|tag|http://www.semanlink.net/tag/tools +http://home.ccil.org/~cowan/XML/tagsoup/|tag|http://www.semanlink.net/tag/html_parsing +http://home.ccil.org/~cowan/XML/tagsoup/|comment|SAX-compliant parser written in Java that, instead of parsing well-formed or valid XML, parses HTML as it is found in the wild: poor, nasty and brutish. By providing a SAX interface, it allows standard XML tools to be applied to even the worst HTML. Free and Open Source software, licensed under the Academic Free License +http://home.ccil.org/~cowan/XML/tagsoup/|title|TagSoup +https://support.apple.com/en-us/HT201300|creationDate|2015-09-26 +https://support.apple.com/en-us/HT201300|tag|http://www.semanlink.net/tag/bricolage_mac +https://support.apple.com/en-us/HT201300|title|How to identify MacBook Pro models - Apple Support +https://support.apple.com/en-us/HT201300|creationTime|2015-09-26T10:23:07Z +http://infolab.stanford.edu/gps/|creationDate|2014-06-19 +http://infolab.stanford.edu/gps/|tag|http://www.semanlink.net/tag/graph +http://infolab.stanford.edu/gps/|title|GPS: A Graph Processing System +http://infolab.stanford.edu/gps/|creationTime|2014-06-19T13:37:26Z +http://www.jenitennison.com/blog/node/154|creationDate|2011-08-11 +http://www.jenitennison.com/blog/node/154|tag|http://www.semanlink.net/tag/jeni_tennison +http://www.jenitennison.com/blog/node/154|tag|http://www.semanlink.net/tag/hash_bang_uris +http://www.jenitennison.com/blog/node/154|title|Hash URIs Jeni's Musings +http://www.jenitennison.com/blog/node/154|creationTime|2011-08-11T11:30:17Z +http://www.geocities.com/anpipniger/CompC.htm|creationDate|2006-04-02 +http://www.geocities.com/anpipniger/CompC.htm|tag|http://www.semanlink.net/tag/irrigation +http://www.geocities.com/anpipniger/CompC.htm|tag|http://www.semanlink.net/tag/verger_de_gado_a_niamey +http://www.geocities.com/anpipniger/CompC.htm|title|CIMA.html Irrigation Manuelle Améliorée +http://internetactu.blog.lemonde.fr/2013/05/31/de-la-fabrique-des-automates-politiques/|creationDate|2013-08-21 +http://internetactu.blog.lemonde.fr/2013/05/31/de-la-fabrique-des-automates-politiques/|tag|http://www.semanlink.net/tag/drones +http://internetactu.blog.lemonde.fr/2013/05/31/de-la-fabrique-des-automates-politiques/|title|De la fabrique des automates politiques InternetActu +http://internetactu.blog.lemonde.fr/2013/05/31/de-la-fabrique-des-automates-politiques/|creationTime|2013-08-21T01:11:34Z +http://alistapart.com/column/windows-on-the-web|creationDate|2013-05-23 +http://alistapart.com/column/windows-on-the-web|tag|http://www.semanlink.net/tag/seamless_journey +http://alistapart.com/column/windows-on-the-web|tag|http://www.semanlink.net/tag/smartphone +http://alistapart.com/column/windows-on-the-web|tag|http://www.semanlink.net/tag/multidevice +http://alistapart.com/column/windows-on-the-web|comment|Users imagine that each device is its own window onto the web. +http://alistapart.com/column/windows-on-the-web|title|Windows on the Web ∙ An A List Apart Column +http://alistapart.com/column/windows-on-the-web|creationTime|2013-05-23T14:17:10Z +http://dig.csail.mit.edu/breadcrumbs/node/71|creationDate|2006-01-27 +http://dig.csail.mit.edu/breadcrumbs/node/71|tag|http://www.semanlink.net/tag/tim_berners_lee +http://dig.csail.mit.edu/breadcrumbs/node/71|tag|http://www.semanlink.net/tag/uri +http://dig.csail.mit.edu/breadcrumbs/node/71|tag|http://www.semanlink.net/tag/foaf +http://dig.csail.mit.edu/breadcrumbs/node/71|title|Give yourself a URI +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|creationDate|2015-08-06 +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|tag|http://www.semanlink.net/tag/niklas_lindstrom +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|tag|http://www.semanlink.net/tag/json_ld +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|tag|http://www.semanlink.net/tag/public_linked_json_w3_org +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|title|Re: Querying JSON-LD on client side from David I. Lehn on 2015-07-17 (public-linked-json@w3.org from July 2015) +https://lists.w3.org/Archives/Public/public-linked-json/2015Jul/0007.html|creationTime|2015-08-06T17:39:34Z +http://www.newscientist.com/article/mg22329832.700-googles-factchecking-bots-build-vast-knowledge-bank.html?full=true#.U_3IxEtGuwx|creationDate|2014-08-27 +http://www.newscientist.com/article/mg22329832.700-googles-factchecking-bots-build-vast-knowledge-bank.html?full=true#.U_3IxEtGuwx|tag|http://www.semanlink.net/tag/knowledge_vault +http://www.newscientist.com/article/mg22329832.700-googles-factchecking-bots-build-vast-knowledge-bank.html?full=true#.U_3IxEtGuwx|title|Google's fact-checking bots build vast knowledge bank - tech - 20 August 2014 - New Scientist +http://www.newscientist.com/article/mg22329832.700-googles-factchecking-bots-build-vast-knowledge-bank.html?full=true#.U_3IxEtGuwx|creationTime|2014-08-27T14:59:34Z +http://en.wikipedia.org/wiki/Koumbi_Saleh|creationDate|2013-01-06 +http://en.wikipedia.org/wiki/Koumbi_Saleh|tag|http://www.semanlink.net/tag/histoire_de_l_afrique_and_afrique_de_l_ouest +http://en.wikipedia.org/wiki/Koumbi_Saleh|tag|http://www.semanlink.net/tag/ghana_empire +http://en.wikipedia.org/wiki/Koumbi_Saleh|tag|http://www.semanlink.net/tag/mauritanie +http://en.wikipedia.org/wiki/Koumbi_Saleh|title|Koumbi Saleh - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Koumbi_Saleh|creationTime|2013-01-06T18:56:21Z +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|creationDate|2013-04-10 +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|tag|http://www.semanlink.net/tag/microsoft_research +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|tag|http://www.semanlink.net/tag/destination_prediction +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|comment|"This paper develops and tests algorithms for predicting the +end-to- +end +route of a vehicle based on GPS +observations of the vehicle’s past trips. We show that a +large portion of a typical driver’s trips are repeated. Our +algorithms exploit this fact for prediction by matching the +first part of a driver’s current trip with o +ne of the set of +previously observed trips. Rather than predicting +upcoming road segments, our focus is on making long +term predictions of the route." +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|title|Route Prediction from Trip Observations +http://research.microsoft.com/en-us/um/people/jckrumm/Publications%202008/sae%20route%20prediction%20-%20camera%20ready%20v3.pdf|creationTime|2013-04-10T13:21:37Z +http://idlewords.com/talks/website_obesity.htm|creationDate|2016-01-05 +http://idlewords.com/talks/website_obesity.htm|tag|http://www.semanlink.net/tag/web_site_design +http://idlewords.com/talks/website_obesity.htm|tag|http://www.semanlink.net/tag/good +http://idlewords.com/talks/website_obesity.htm|title|The Website Obesity Crisis +http://idlewords.com/talks/website_obesity.htm|creationTime|2016-01-05T10:55:01Z +http://bigbrowser.blog.lemonde.fr/2016/02/26/un-poeme-anglais-imprononcable/|creationDate|2016-02-27 +http://bigbrowser.blog.lemonde.fr/2016/02/26/un-poeme-anglais-imprononcable/|tag|http://www.semanlink.net/tag/anglais +http://bigbrowser.blog.lemonde.fr/2016/02/26/un-poeme-anglais-imprononcable/|title|Un poème anglais imprononçable Big Browser +http://bigbrowser.blog.lemonde.fr/2016/02/26/un-poeme-anglais-imprononcable/|creationTime|2016-02-27T01:31:30Z +http://time.com/4193747/el-salvador-zika-virus-pregnancy/|creationDate|2016-01-27 +http://time.com/4193747/el-salvador-zika-virus-pregnancy/|tag|http://www.semanlink.net/tag/zika +http://time.com/4193747/el-salvador-zika-virus-pregnancy/|title|El Salvador Asks People Not to Have Children for Two Years Due to Zika Virus TIME +http://time.com/4193747/el-salvador-zika-virus-pregnancy/|creationTime|2016-01-27T13:57:55Z +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|creationDate|2019-05-29 +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_author|Hongyun Cai +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_author|Vincent W. Zheng +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_author|Kevin Chen-Chuan Chang +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|title|[1709.07604] A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|bookmarkOf|https://arxiv.org/abs/1709.07604 +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|creationTime|2019-05-29T17:26:26Z +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_summary|"Graph is an important data representation which appears in a wide diversity +of real-world scenarios. Effective graph analytics provides users a deeper +understanding of what is behind the data, and thus can benefit a lot of useful +applications such as node classification, node recommendation, link prediction, +etc. However, most graph analytics methods suffer the high computation and +space cost. Graph embedding is an effective yet efficient way to solve the +graph analytics problem. It converts the graph data into a low dimensional +space in which the graph structural information and graph properties are +maximally preserved. In this survey, we conduct a comprehensive review of the +literature in graph embedding. We first introduce the formal definition of +graph embedding as well as the related concepts. After that, we propose two +taxonomies of graph embedding which correspond to what challenges exist in +different graph embedding problem settings and how the existing work address +these challenges in their solutions. Finally, we summarize the applications +that graph embedding enables and suggest four promising future research +directions in terms of computation efficiency, problem settings, techniques and +application scenarios." +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_firstAuthor|Hongyun Cai +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_updated|2018-02-02T07:01:22Z +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_title|A Comprehensive Survey of Graph Embedding: Problems, Techniques and Applications +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_published|2017-09-22T05:54:16Z +http://www.semanlink.net/doc/2019/05/_1709_07604_a_comprehensive_su|arxiv_num|1709.07604 +http://jobs.semanticweb.com/jobboard.php|creationDate|2012-05-15 +http://jobs.semanticweb.com/jobboard.php|tag|http://www.semanlink.net/tag/job_openings +http://jobs.semanticweb.com/jobboard.php|tag|http://www.semanlink.net/tag/semantic_web +http://jobs.semanticweb.com/jobboard.php|comment|Search for semantic technology jobs +http://jobs.semanticweb.com/jobboard.php|title|Semantic Web Jobs Semantic Web Job Board +http://jobs.semanticweb.com/jobboard.php|creationTime|2012-05-15T09:50:17Z +http://jex.im/regulex/#!embed=false&flags=&re=%5E(a%7Cb)*%3F%24|creationDate|2015-02-05 +http://jex.im/regulex/#!embed=false&flags=&re=%5E(a%7Cb)*%3F%24|tag|http://www.semanlink.net/tag/regex +http://jex.im/regulex/#!embed=false&flags=&re=%5E(a%7Cb)*%3F%24|title|Regulex:JavaScript Regular Expression Visualizer. +http://jex.im/regulex/#!embed=false&flags=&re=%5E(a%7Cb)*%3F%24|creationTime|2015-02-05T00:59:56Z +http://www-128.ibm.com/developerworks/opensource/library/os-php-read/|creationDate|2006-03-28 +http://www-128.ibm.com/developerworks/opensource/library/os-php-read/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/opensource/library/os-php-read/|tag|http://www.semanlink.net/tag/php +http://www-128.ibm.com/developerworks/opensource/library/os-php-read/|title|Recommended PHP reading list +http://www.ilrt.bris.ac.uk/discovery/rdf/resources/|creationDate|2005-01-27 +http://www.ilrt.bris.ac.uk/discovery/rdf/resources/|tag|http://www.semanlink.net/tag/links +http://www.ilrt.bris.ac.uk/discovery/rdf/resources/|tag|http://www.semanlink.net/tag/semantic_web +http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html|creationDate|2016-05-11 +http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html|tag|http://www.semanlink.net/tag/islamisme +http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html|tag|http://www.semanlink.net/tag/psychanalyse +http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html|title|Ccomment penser le désir sacrificiel au nom de l’islam +http://www.lemonde.fr/religions/article/2016/05/10/assimiler-la-radicalisation-islamiste-a-un-phenomene-sectaire-pose-probleme_4917030_1653130.html|creationTime|2016-05-11T00:45:36Z +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|creationDate|2008-06-17 +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|tag|http://www.semanlink.net/tag/sweo_interest_group +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|tag|http://www.semanlink.net/tag/audi +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|title|Use Case: How Ontologies and Rules Help to Advance Automobile Development +http://www.w3.org/2001/sw/sweo/public/UseCases/Audi/|creationTime|2008-06-17T21:43:43Z +http://www.mkbergman.com/?p=415|creationDate|2008-02-15 +http://www.mkbergman.com/?p=415|tag|http://www.semanlink.net/tag/graph_visualization +http://www.mkbergman.com/?p=415|title|Cytoscape: Hands-down Winner for Large-scale Graph Visualization » AI3:::Adaptive Information +http://www.mkbergman.com/?p=415|creationTime|2008-02-15T23:36:49Z +http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2|creationDate|2012-08-07 +http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2|tag|http://www.semanlink.net/tag/google +http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2|tag|http://www.semanlink.net/tag/machine_learning +http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2|title|In a Big Network of Computers, Evidence of Machine Learning - NYTimes.com +http://www.nytimes.com/2012/06/26/technology/in-a-big-network-of-computers-evidence-of-machine-learning.html?pagewanted=1&_r=2|creationTime|2012-08-07T16:04:37Z +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|creationDate|2012-05-03 +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|tag|http://www.semanlink.net/tag/champignon +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|tag|http://www.semanlink.net/tag/plastic +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|tag|http://www.semanlink.net/tag/amazonie +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|title|Fungi Discovered In The Amazon Will Eat Your Plastic Co.Exist: World changing ideas and innovation +http://www.fastcoexist.com/1679201/fungi-discovered-in-the-amazon-will-eat-your-plastic|creationTime|2012-05-03T22:24:57Z +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest/examples|creationDate|2005-05-14 +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest/examples|tag|http://www.semanlink.net/tag/ajax +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest/examples|tag|http://www.semanlink.net/tag/links +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest/examples|title|XMLHttpRequest & Ajax Working Examples - Links and Resources, Fiftyfoureleven.com +https://twitter.com/trbouma/status/1050188438786502656|creationDate|2019-02-06 +https://twitter.com/trbouma/status/1050188438786502656|tag|http://www.semanlink.net/tag/facebook +https://twitter.com/trbouma/status/1050188438786502656|title|"Tim Bouma sur Twitter : ""I take the bus home. I stand behind two people. I notice the weird-looking shoes tied to their backpacks. After a few moments, I realize they are rock-climbers and those are grippy shoes...""" +https://twitter.com/trbouma/status/1050188438786502656|creationTime|2019-02-06T23:50:12Z +http://www.lehall.com/galerie/africains/|creationDate|2005-12-11 +http://www.lehall.com/galerie/africains/|tag|http://www.semanlink.net/tag/music_of_africa +http://www.lehall.com/galerie/africains/|title|Les Africains de la Chanson Francophone +http://www.lefigaro.fr/sciences/2018/04/09/01008-20180409ARTFIG00009-des-civilisations-entieres-peuplaient-l-amazonie-au-moyen-ge.php|creationDate|2018-04-09 +http://www.lefigaro.fr/sciences/2018/04/09/01008-20180409ARTFIG00009-des-civilisations-entieres-peuplaient-l-amazonie-au-moyen-ge.php|tag|http://www.semanlink.net/tag/archeologie_amazonienne +http://www.lefigaro.fr/sciences/2018/04/09/01008-20180409ARTFIG00009-des-civilisations-entieres-peuplaient-l-amazonie-au-moyen-ge.php|title|Des civilisations oubliées peuplaient l'Amazonie au Moyen-Âge +http://www.lefigaro.fr/sciences/2018/04/09/01008-20180409ARTFIG00009-des-civilisations-entieres-peuplaient-l-amazonie-au-moyen-ge.php|creationTime|2018-04-09T22:02:34Z +http://raimond.me.uk/slides/isemantics-2013/#/|creationDate|2013-09-07 +http://raimond.me.uk/slides/isemantics-2013/#/|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://raimond.me.uk/slides/isemantics-2013/#/|tag|http://www.semanlink.net/tag/bbc +http://raimond.me.uk/slides/isemantics-2013/#/|tag|http://www.semanlink.net/tag/slides +http://raimond.me.uk/slides/isemantics-2013/#/|tag|http://www.semanlink.net/tag/yves_raymond +http://raimond.me.uk/slides/isemantics-2013/#/|title|Current and future uses of Semantic Web technologies at the BBC +http://raimond.me.uk/slides/isemantics-2013/#/|creationTime|2013-09-07T09:45:03Z +http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html|creationDate|2013-04-04 +http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html|tag|http://www.semanlink.net/tag/peche +http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html|tag|http://www.semanlink.net/tag/chine +http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html|title|Comment la pêche chinoise pille les océans de la planète +http://www.lemonde.fr/planete/article/2013/04/04/comment-la-peche-chinoise-pille-les-oceans-de-la-planete_3154101_3244.html|creationTime|2013-04-04T22:59:36Z +http://smethur.st/posts/176135860|creationDate|2014-09-26 +http://smethur.st/posts/176135860|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://smethur.st/posts/176135860|tag|http://www.semanlink.net/tag/bbc_programmes +http://smethur.st/posts/176135860|tag|http://www.semanlink.net/tag/bbc +http://smethur.st/posts/176135860|tag|http://www.semanlink.net/tag/url +http://smethur.st/posts/176135860|title|Designing a URL structure for BBC programmes Smethurst +http://smethur.st/posts/176135860|creationTime|2014-09-26T10:51:44Z +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|creationDate|2008-06-04 +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|tag|http://www.semanlink.net/tag/debug +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|tag|http://www.semanlink.net/tag/internet_explorer +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|tag|http://www.semanlink.net/tag/javascript +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|title|HOW-TO: Debug JavaScript in Internet Explorer +http://www.jonathanboutelle.com/mt/archives/2006/01/howto_debug_jav.html|creationTime|2008-06-04T22:39:29Z +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|creationDate|2010-05-28 +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|tag|http://www.semanlink.net/tag/danemark +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|tag|http://www.semanlink.net/tag/ville +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|tag|http://www.semanlink.net/tag/economie_ecologique +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|title|Au Danemark, la mutation d'Aalborg en ville écologique +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|creationTime|2010-05-28T00:57:01Z +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|source|Le Monde +http://www.lemonde.fr/planete/article/2010/05/27/au-danemark-la-mutation-d-aalborg-en-ville-ecologique_1363815_3244.html|date|2010-05-28 +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|creationDate|2017-11-25 +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|tag|http://www.semanlink.net/tag/gensim +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|tag|http://www.semanlink.net/tag/python_sample_code +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|tag|http://www.semanlink.net/tag/fasttext +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|tag|http://www.semanlink.net/tag/nlp_sample_code +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|title|How to use pre-trained word vectors from Facebook’s fastText +https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27|creationTime|2017-11-25T00:37:09Z +https://developers.facebook.com/docs/opengraph/action-guides|creationDate|2014-10-29 +https://developers.facebook.com/docs/opengraph/action-guides|tag|http://www.semanlink.net/tag/facebook_open_graph +https://developers.facebook.com/docs/opengraph/action-guides|comment|"Actions are the ""verbs"" or the things that people can do in your app." +https://developers.facebook.com/docs/opengraph/action-guides|title|Open Graph Actions +https://developers.facebook.com/docs/opengraph/action-guides|creationTime|2014-10-29T02:35:24Z +http://spectrum.ieee.org/energy/nuclear/24-hours-at-fukushima/0|creationDate|2011-11-01 +http://spectrum.ieee.org/energy/nuclear/24-hours-at-fukushima/0|tag|http://www.semanlink.net/tag/fukushima +http://spectrum.ieee.org/energy/nuclear/24-hours-at-fukushima/0|title|24 Hours at Fukushima - IEEE Spectrum +http://spectrum.ieee.org/energy/nuclear/24-hours-at-fukushima/0|creationTime|2011-11-01T23:35:58Z +https://academic.microsoft.com/|creationDate|2019-02-25 +https://academic.microsoft.com/|tag|http://www.semanlink.net/tag/thewebconf_2018 +https://academic.microsoft.com/|tag|http://www.semanlink.net/tag/nlp_microsoft +https://academic.microsoft.com/|tag|http://www.semanlink.net/tag/scientific_information_extraction +https://academic.microsoft.com/|tag|http://www.semanlink.net/tag/publication_scientifique +https://academic.microsoft.com/|title|Microsoft Academic +https://academic.microsoft.com/|creationTime|2019-02-25T10:15:38Z +http://www.cigref.fr/wp/wp-content/uploads/2017/10/CIGREF-Cercle-IA-2017-Mise-en-oeuvre-operationnelle-IA-en-Entreprises.pdf|creationDate|2017-12-18 +http://www.cigref.fr/wp/wp-content/uploads/2017/10/CIGREF-Cercle-IA-2017-Mise-en-oeuvre-operationnelle-IA-en-Entreprises.pdf|comment|"> il ne faut pas oublier qu’il y a toujours deux dimensions dans le sujet de l’IA : +> +>- Celle de l’homme augmenté avec l’analytics, l’aide à la décision, le machine learning +>- Celle de l’homme remplacé avec des tâches entières de l’entreprise qui sont déléguées à la décision de robots intelligents. +> +> Nous ne sommes pas encore à la phase de remplacement, mais plutôt à celle d’augmentation. + +A propos de chatbots dans les RH : +> limiter au maximum l’augmentation de l’homme pour ne pas avoir à baisser les effectifs. + +! + + + + + +" +http://www.cigref.fr/wp/wp-content/uploads/2017/10/CIGREF-Cercle-IA-2017-Mise-en-oeuvre-operationnelle-IA-en-Entreprises.pdf|title|Les enjeux de mise en œuvre opérationnelle de l’intelligence artificielle dans les grandes entreprises +http://www.cigref.fr/wp/wp-content/uploads/2017/10/CIGREF-Cercle-IA-2017-Mise-en-oeuvre-operationnelle-IA-en-Entreprises.pdf|creationTime|2017-12-18T10:47:41Z +http://www.ldodds.com/blog/archives/000255.html|creationDate|2005-11-18 +http://www.ldodds.com/blog/archives/000255.html|title|Lost Boy: SPARQLing +http://www.alwayssunny.com/blog/?cat=16|creationDate|2009-04-27 +http://www.alwayssunny.com/blog/?cat=16|tag|http://www.semanlink.net/tag/tomcat_tips +http://www.alwayssunny.com/blog/?cat=16|tag|http://www.semanlink.net/tag/tomcat_in_eclipse +http://www.alwayssunny.com/blog/?cat=16|tag|http://www.semanlink.net/tag/eclipse_tip +http://www.alwayssunny.com/blog/?cat=16|tag|http://www.semanlink.net/tag/memoire_informatique +http://www.alwayssunny.com/blog/?cat=16|comment|including how to increase mem for tomcat in eclipse +http://www.alwayssunny.com/blog/?cat=16|title|Bits n Bobs » tomcat +http://www.alwayssunny.com/blog/?cat=16|creationTime|2009-04-27T23:26:39Z +http://mashable.com/2009/04/05/europe-social-media/|creationDate|2009-04-14 +http://mashable.com/2009/04/05/europe-social-media/|tag|http://www.semanlink.net/tag/startups +http://mashable.com/2009/04/05/europe-social-media/|tag|http://www.semanlink.net/tag/europe +http://mashable.com/2009/04/05/europe-social-media/|tag|http://www.semanlink.net/tag/web +http://mashable.com/2009/04/05/europe-social-media/|title|European Social Media: 19 Web Startups to Watch +http://mashable.com/2009/04/05/europe-social-media/|creationTime|2009-04-14T01:02:51Z +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html|creationDate|2007-11-20 +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html|tag|http://www.semanlink.net/tag/information_resources +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html|tag|http://www.semanlink.net/tag/roy_t_fielding +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html|title|Re: More on distinguishing information resources from other resources Roy T. Fielding +http://lists.w3.org/Archives/Public/www-tag/2005Jun/0092.html|creationTime|2007-11-20T21:32:46Z +http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html|creationDate|2007-09-13 +http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html|tag|http://www.semanlink.net/tag/cassini +http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html|tag|http://www.semanlink.net/tag/iapetus +http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html|title|NASA - Close-Up of Iapetus +http://www.nasa.gov/mission_pages/cassini/multimedia/N00092001-9-12-07.html|creationTime|2007-09-13T22:02:11Z +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|creationDate|2018-03-27 +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|tag|http://www.semanlink.net/tag/flask +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|tag|http://www.semanlink.net/tag/docker_python +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|title|python - Running Flask app in a Docker container - Stack Overflow +https://stackoverflow.com/questions/41752405/running-flask-app-in-a-docker-container|creationTime|2018-03-27T13:42:54Z +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|creationDate|2014-02-05 +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|tag|http://www.semanlink.net/tag/ibm_developerworks +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|tag|http://www.semanlink.net/tag/rdf +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|tag|http://www.semanlink.net/tag/json +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|title|Reflections on a Year spent developing with RDF and JSON (Software Development on the Cloud Exploration) +https://www.ibm.com/developerworks/community/blogs/c06ef551-0127-483d-a104-cdd02b1cee31/entry/february_3_2014_1_47_pm?lang=en|creationTime|2014-02-05T14:04:09Z +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|creationDate|2013-07-08 +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|tag|http://www.semanlink.net/tag/pulsar +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|tag|http://www.semanlink.net/tag/trou_noir +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|tag|http://www.semanlink.net/tag/cosmologie +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|title|Découverte d’un nouveau monstre du cosmos Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/07/07/decouverte-nouveau-monstre-du-cosmos-astronomie/|creationTime|2013-07-08T09:54:27Z +http://qudt.org/|creationDate|2011-07-11 +http://qudt.org/|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://qudt.org/|tag|http://www.semanlink.net/tag/nasa +http://qudt.org/|tag|http://www.semanlink.net/tag/ontologies +http://qudt.org/|tag|http://www.semanlink.net/tag/topquadrant +http://qudt.org/|title|QUDT - Quantities, Units, Dimensions and Data Types in OWL and XML +http://qudt.org/|creationTime|2011-07-11T21:25:34Z +http://www.nytimes.com/2006/05/28/world/asia/28everest.html|creationDate|2006-05-28 +http://www.nytimes.com/2006/05/28/world/asia/28everest.html|tag|http://www.semanlink.net/tag/alpinisme +http://www.nytimes.com/2006/05/28/world/asia/28everest.html|tag|http://www.semanlink.net/tag/everest +http://www.nytimes.com/2006/05/28/world/asia/28everest.html|title|'Dead' Climber's Survival Impugns Mount Everest Ethics - New York Times +http://www.w3.org/2000/10/swap/doc/|creationDate|2007-02-08 +http://www.w3.org/2000/10/swap/doc/|tag|http://www.semanlink.net/tag/tutorial +http://www.w3.org/2000/10/swap/doc/|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/2000/10/swap/doc/|tag|http://www.semanlink.net/tag/n3 +http://www.w3.org/2000/10/swap/doc/|title|Semantic Web Tutorial Using N3 +http://www.w3.org/2000/10/swap/doc/|creationTime|2007-02-08T23:31:16Z +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|creationDate|2013-05-14 +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|tag|http://www.semanlink.net/tag/fabien_gandon +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|tag|http://www.semanlink.net/tag/www_2013 +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|title|An introduction to semantic web and linked data +http://www-sop.inria.fr/members/Fabien.Gandon/docs/www2013/WWW2013_Tutorial_WebSem_FabienGandon.pdf|creationTime|2013-05-14T18:33:20Z +http://www.dbgroup.unimo.it/swae08/index.html|creationDate|2008-01-10 +http://www.dbgroup.unimo.it/swae08/index.html|tag|http://www.semanlink.net/tag/workshop +http://www.dbgroup.unimo.it/swae08/index.html|comment|2nd International Workshop on Semantic Web Architectures For Enterprises +http://www.dbgroup.unimo.it/swae08/index.html|title|SWAE 08 +http://www.dbgroup.unimo.it/swae08/index.html|creationTime|2008-01-10T21:45:49Z +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140|creationDate|2011-05-13 +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140|tag|http://www.semanlink.net/tag/heredite +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140|tag|http://www.semanlink.net/tag/jean_claude_ameisen +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140|title|France Inter > Sur les épaules de Darwin > A la recherche des mystères de l'hérédité (2) +http://sites.radiofrance.fr/franceinter/em/sur-les-epaules-de-darwin/index.php?id=104140|creationTime|2011-05-13T00:21:49Z +http://dannyayers.com/2006/05/01/personal-gnowledge|creationDate|2006-05-06 +http://dannyayers.com/2006/05/01/personal-gnowledge|tag|http://www.semanlink.net/tag/gnowsis +http://dannyayers.com/2006/05/01/personal-gnowledge|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2006/05/01/personal-gnowledge|comment|"""one particular thing I believe they've got strongly right"" (the notion that the individual user of the system will have their own personal ontology, derived from a handful of base terms.)
+The user creates describing properties on the go. If a new property is needed, it is instantly created. There is no strong domain/range typing, if the user wants to use the property in an ""unintended"" way, the user is always right to do so. + +" +http://dannyayers.com/2006/05/01/personal-gnowledge|title|Personal Gnowledge +http://ebiquity.umbc.edu/blogger/2006/09/15/nissan-to-test-intelligent-transportation-system/|creationDate|2006-09-19 +http://ebiquity.umbc.edu/blogger/2006/09/15/nissan-to-test-intelligent-transportation-system/|tag|http://www.semanlink.net/tag/nissan +http://ebiquity.umbc.edu/blogger/2006/09/15/nissan-to-test-intelligent-transportation-system/|comment|Nissan announced that it will test a new car-to-road communication system aimed at making Japanese roads safer and less congested. The system sends signals between vehicles and roadside structures such as traffic lights and electricity poles to gauge vehicles’ speed, distance and acceleration, sending a warning signal if a car is in danger of running a red light or colliding with another vehicle. It will be deployed as an upgrade to Nissan’s SkyWings satellite navigation system in use in thousands of the company’s cars in Japan. +http://ebiquity.umbc.edu/blogger/2006/09/15/nissan-to-test-intelligent-transportation-system/|title|Nissan to test intelligent transportation system +http://www.gbif.org/|creationDate|2007-11-14 +http://www.gbif.org/|tag|http://www.semanlink.net/tag/gbif +http://www.gbif.org/|title|GBIF portal: Home +http://www.gbif.org/|creationTime|2007-11-14T13:49:23Z +https://twitter.com/karpathy/status/1013244313327681536|creationDate|2018-07-01 +https://twitter.com/karpathy/status/1013244313327681536|tag|http://www.semanlink.net/tag/nn_tips +https://twitter.com/karpathy/status/1013244313327681536|tag|http://www.semanlink.net/tag/andrej_karpathy +https://twitter.com/karpathy/status/1013244313327681536|comment|"> 1) you didn't try to overfit a single batch first. + +[""if you can't overfit on a tiny batch size, things are definitely broken""](https://youtu.be/gYpoJMlgyXA?t=1h1m22s) + +> 2) you forgot to toggle train/eval mode for the net. +> 3) you forgot to .zero_grad() (in pytorch) before .backward(). +>4) you passed softmaxed outputs to a loss that expects raw logits. + +> 5) you didn't use bias=False for your Linear/Conv2d layer when using BatchNorm, or conversely forget to include it for the output layer .This one won't make you silently fail, but they are spurious parameters + +> 6) thinking view() and permute() are the same thing (& incorrectly using view) + + + +" +https://twitter.com/karpathy/status/1013244313327681536|title|Andrej Karpathy sur Twitter : most common neural net mistakes +https://twitter.com/karpathy/status/1013244313327681536|creationTime|2018-07-01T11:03:14Z +http://demo.openlinksw.com/DAV/JS/rdfbrowser/index.html|creationDate|2007-03-29 +http://demo.openlinksw.com/DAV/JS/rdfbrowser/index.html|tag|http://www.semanlink.net/tag/rdf_browser +http://demo.openlinksw.com/DAV/JS/rdfbrowser/index.html|title|OpenLink RDF Browser +http://demo.openlinksw.com/DAV/JS/rdfbrowser/index.html|creationTime|2007-03-29T00:43:11Z +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|creationDate|2017-06-20 +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|tag|http://www.semanlink.net/tag/douglas_rushkoff +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|tag|http://www.semanlink.net/tag/amazon +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|comment|Amazon treated the book industry the same way companies like Walmart once treated the territories into which they expanded: Use a war chest of capital to undercut prices, put competitors out of business, become the sole employer in the community, turn employees into part-time shift workers, lobby for deregulation, and effectively extract all the value from a given region before closing up shop and moving to the next one. +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|title|It’s Time To Break Up Amazon +https://www.fastcompany.com/40432885/its-time-to-break-up-amazon|creationTime|2017-06-20T16:00:36Z +http://www.google.com/googlenotebook/overview.html|creationDate|2006-08-21 +http://www.google.com/googlenotebook/overview.html|tag|http://www.semanlink.net/tag/google +http://www.google.com/googlenotebook/overview.html|title|Google Notebook +http://www.eff.org/deeplinks/archives/004910.php|creationDate|2006-09-19 +http://www.eff.org/deeplinks/archives/004910.php|tag|http://www.semanlink.net/tag/microsoft +http://www.eff.org/deeplinks/archives/004910.php|tag|http://www.semanlink.net/tag/zune +http://www.eff.org/deeplinks/archives/004910.php|tag|http://www.semanlink.net/tag/drm +http://www.eff.org/deeplinks/archives/004910.php|title|EFF: Microsoft's Zune Won't Play Protected Windows Media +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|creationDate|2013-09-27 +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|tag|http://www.semanlink.net/tag/mac_os_x +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|tag|http://www.semanlink.net/tag/svn +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|tag|http://www.semanlink.net/tag/eclipse +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|comment|"switch to the pure-Java implementation via ""Window -> Preferences -> Team -> SVN -> SVN inteface"" and choose the entry with ""SVNKit""." +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|title|eclipse - Installing Subversion JavaHL library in Mac OS X - Stack Overflow +http://stackoverflow.com/questions/12313164/installing-subversion-javahl-library-in-mac-os-x|creationTime|2013-09-27T00:02:54Z +http://www.w3.org/TR/skos-ucr/|creationDate|2008-05-12 +http://www.w3.org/TR/skos-ucr/|tag|http://www.semanlink.net/tag/w3c_working_draft +http://www.w3.org/TR/skos-ucr/|tag|http://www.semanlink.net/tag/skos_w3c_document +http://www.w3.org/TR/skos-ucr/|title|SKOS Use Cases and Requirements +http://www.w3.org/TR/skos-ucr/|creationTime|2008-05-12T17:58:03Z +http://asynchronous.org/blog/archives/2006/01/25/logging_in_json.html|creationDate|2006-02-07 +http://asynchronous.org/blog/archives/2006/01/25/logging_in_json.html|tag|http://www.semanlink.net/tag/json +http://asynchronous.org/blog/archives/2006/01/25/logging_in_json.html|title|AsynchronousBlog: Logging in JSON +http://www.w3.org/wiki/N3inHTML|creationDate|2013-09-06 +http://www.w3.org/wiki/N3inHTML|tag|http://www.semanlink.net/tag/turtle_in_html +http://www.w3.org/wiki/N3inHTML|title|N3inHTML - W3C Wiki +http://www.w3.org/wiki/N3inHTML|creationTime|2013-09-06T18:46:19Z +http://www.xfront.com/sld001.htm|creationDate|2006-06-25 +http://www.xfront.com/sld001.htm|tag|http://www.semanlink.net/tag/soap_vs_rest +http://www.xfront.com/sld001.htm|tag|http://www.semanlink.net/tag/rest +http://www.xfront.com/sld001.htm|title|REST (slides) +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#link_time=1512760037|creationDate|2017-12-08 +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#link_time=1512760037|tag|http://www.semanlink.net/tag/hydrogen_economy +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#link_time=1512760037|title|Énergie : les promesses de l'hydrogène CNRS Le journal +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene?utm_term=Autofeed&utm_campaign=Echobox&utm_medium=Social&utm_source=Twitter#link_time=1512760037|creationTime|2017-12-08T23:32:32Z +http://arstechnica.com/science/news/2012/04/decision-to-entangle-effects-results-of-measurements-taken-beforehand.ars|creationDate|2012-04-28 +http://arstechnica.com/science/news/2012/04/decision-to-entangle-effects-results-of-measurements-taken-beforehand.ars|tag|http://www.semanlink.net/tag/mecanique_quantique +http://arstechnica.com/science/news/2012/04/decision-to-entangle-effects-results-of-measurements-taken-beforehand.ars|title|Quantum decision affects results of measurements taken earlier in time +http://arstechnica.com/science/news/2012/04/decision-to-entangle-effects-results-of-measurements-taken-beforehand.ars|creationTime|2012-04-28T09:55:55Z +http://esw.w3.org/WebID|creationDate|2011-01-18 +http://esw.w3.org/WebID|tag|http://www.semanlink.net/tag/webid +http://esw.w3.org/WebID|title|WebID - ESW Wiki +http://esw.w3.org/WebID|creationTime|2011-01-18T09:25:52Z +http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf|creationDate|2011-04-04 +http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf|tag|http://www.semanlink.net/tag/semantic_web_assisted_learning +http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf|tag|http://www.semanlink.net/tag/e_learning +http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf|title|Semantic Web Assisted Learning Experience Management – Architecture and Strategy for Collaborative Learning Experience Sharing +http://eprints.ecs.soton.ac.uk/13024/1/wi2006-TR.pdf|creationTime|2011-04-04T15:41:48Z +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|creationDate|2011-07-18 +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|tag|http://www.semanlink.net/tag/howto +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|tag|http://www.semanlink.net/tag/mysql +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|tag|http://www.semanlink.net/tag/php +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|tag|http://www.semanlink.net/tag/os_x_10_6_snow_leopard +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|comment|"Solved for me a pb trying to install neologism: impossible de me connecter à mySQL via PHP. cf php.ini (seulement php.ini.default dans /etc) + modif à l'intérieur. Voir 2008/10/Installing mySQL +" +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|title|Install Apache/PHP/MySQL on Snow Leopard +http://maestric.com/doc/mac/apache_php_mysql_snow_leopard|creationTime|2011-07-18T18:52:23Z +http://recollection.zepheira.com/|creationDate|2011-09-12 +http://recollection.zepheira.com/|tag|http://www.semanlink.net/tag/digital_collections +http://recollection.zepheira.com/|comment|Recollection is a free platform for generating and customizing views, (interactive maps, timelines, facets, tag clouds) that allow users to experience your digital collections. +http://recollection.zepheira.com/|title|Recollection +http://recollection.zepheira.com/|creationTime|2011-09-12T21:37:23Z +http://spectrum.ieee.org/computing/software/the-future-of-music/0|creationDate|2013-02-10 +http://spectrum.ieee.org/computing/software/the-future-of-music/0|tag|http://www.semanlink.net/tag/loudness_war +http://spectrum.ieee.org/computing/software/the-future-of-music/0|title|The Future of Music - Tearing Down the Wall of Noise - IEEE Spectrum +http://spectrum.ieee.org/computing/software/the-future-of-music/0|creationTime|2013-02-10T22:09:41Z +http://blog.xebia.fr/2014/03/17/post-vs-put-la-confusion/|creationDate|2017-04-27 +http://blog.xebia.fr/2014/03/17/post-vs-put-la-confusion/|tag|http://www.semanlink.net/tag/http +http://blog.xebia.fr/2014/03/17/post-vs-put-la-confusion/|title|POST vs. PUT : la confusion Blog Xebia - Cabinet de conseil IT +http://blog.xebia.fr/2014/03/17/post-vs-put-la-confusion/|creationTime|2017-04-27T11:23:06Z +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|creationDate|2018-06-24 +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|tag|http://www.semanlink.net/tag/crise_des_migrants +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|tag|http://www.semanlink.net/tag/banksy +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|tag|http://www.semanlink.net/tag/paris +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|title|Banksy peint les murs de Paris pour illustrer la crise des migrants +https://www.lemonde.fr/societe/article/2018/06/24/banksy-peint-les-murs-de-paris-pour-illustrer-la-crise-des-migrants_5320435_3224.html|creationTime|2018-06-24T20:06:31Z +http://www.bioshare.net/News/NEWS1178699092|creationDate|2007-11-14 +http://www.bioshare.net/News/NEWS1178699092|tag|http://www.semanlink.net/tag/encyclopedia_of_life +http://www.bioshare.net/News/NEWS1178699092|title|Bioshare: A home page for every species +http://www.bioshare.net/News/NEWS1178699092|creationTime|2007-11-14T13:47:33Z +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.164.416&rep=rep1&type=pdf|creationDate|2013-09-09 +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.164.416&rep=rep1&type=pdf|tag|http://www.semanlink.net/tag/data_mining +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.164.416&rep=rep1&type=pdf|title|Predictive analytics and data mining +http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.164.416&rep=rep1&type=pdf|creationTime|2013-09-09T15:48:57Z +http://www.bbc.co.uk/news/science-environment-17769529|creationDate|2012-04-22 +http://www.bbc.co.uk/news/science-environment-17769529|tag|http://www.semanlink.net/tag/synthetic_biology +http://www.bbc.co.uk/news/science-environment-17769529|tag|http://www.semanlink.net/tag/evolution +http://www.bbc.co.uk/news/science-environment-17769529|tag|http://www.semanlink.net/tag/adn +http://www.bbc.co.uk/news/science-environment-17769529|title|Evolution seen in 'synthetic DNA' +http://www.bbc.co.uk/news/science-environment-17769529|creationTime|2012-04-22T16:12:01Z +http://www.offconvex.org/2015/12/12/word-embeddings-1/|creationDate|2017-11-21 +http://www.offconvex.org/2015/12/12/word-embeddings-1/|tag|http://www.semanlink.net/tag/good +http://www.offconvex.org/2015/12/12/word-embeddings-1/|tag|http://www.semanlink.net/tag/sanjeev_arora +http://www.offconvex.org/2015/12/12/word-embeddings-1/|tag|http://www.semanlink.net/tag/word_embedding +http://www.offconvex.org/2015/12/12/word-embeddings-1/|comment|([Part 2](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2016%2F02%2F14%2Fword-embeddings-2%2F)) +http://www.offconvex.org/2015/12/12/word-embeddings-1/|relatedDoc|http://www.offconvex.org/2016/02/14/word-embeddings-2/ +http://www.offconvex.org/2015/12/12/word-embeddings-1/|title|Semantic Word Embeddings – Off the convex path +http://www.offconvex.org/2015/12/12/word-embeddings-1/|creationTime|2017-11-21T01:31:18Z +http://www.jacuba.com|creationDate|2005-10-31 +http://www.jacuba.com|tag|http://www.semanlink.net/tag/spellchecker +http://www.jacuba.com|tag|http://www.semanlink.net/tag/ajax +http://www.jacuba.com|title|Welcome to Jacuba - Free Online Spellchecker +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|creationDate|2016-03-22 +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|tag|http://www.semanlink.net/tag/tutorial +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|tag|http://www.semanlink.net/tag/angularjs +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|tag|http://www.semanlink.net/tag/eclipse +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|title|Dinis Cruz Blog: Using AngularJS in Eclipse, Part 1) The Basics +http://blog.diniscruz.com/2014/02/using-angularjs-in-eclipse-part-1.html|creationTime|2016-03-22T00:39:23Z +http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/|creationDate|2010-01-20 +http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/|tag|http://www.semanlink.net/tag/named_graphs +http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/|title|Approaches to Publishing Linked Data via Named Graphs « Lost Boy +http://www.ldodds.com/blog/2009/12/approaches-to-publishing-linked-data-via-named-graphs/|creationTime|2010-01-20T18:39:36Z +http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/|creationDate|2013-09-11 +http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/|tag|http://www.semanlink.net/tag/rapidminer_java +http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/|comment|"github" +http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/|title|Embedding RapidMiner as a library in an application Dacamo76's Blog +http://dacamo76.wordpress.com/2011/07/22/embedding-rapidminer-as-a-library-in-an-application/|creationTime|2013-09-11T00:40:02Z +http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html|creationDate|2016-07-28 +http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html|tag|http://www.semanlink.net/tag/panama_papers +http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html|tag|http://www.semanlink.net/tag/niger +http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html|title|« Panama Papers » : le roi de l’autobus au Niger pratique la finance aux Seychelles +http://www.lemonde.fr/afrique/article/2016/07/26/panama-papers-le-roi-de-l-autobus-au-niger-pratique-la-finance-aux-seychelles_4974817_3212.html|creationTime|2016-07-28T10:50:17Z +http://blog.octo.com/designer-une-api-rest/#case_body|creationDate|2017-05-15 +http://blog.octo.com/designer-une-api-rest/#case_body|tag|http://www.semanlink.net/tag/rest +http://blog.octo.com/designer-une-api-rest/#case_body|tag|http://www.semanlink.net/tag/api +http://blog.octo.com/designer-une-api-rest/#case_body|tag|http://www.semanlink.net/tag/octo +http://blog.octo.com/designer-une-api-rest/#case_body|title|Designer une API REST OCTO talks ! +http://blog.octo.com/designer-une-api-rest/#case_body|creationTime|2017-05-15T11:32:18Z +https://arxiv.org/pdf/1507.07998.pdf|creationDate|2017-08-20 +https://arxiv.org/pdf/1507.07998.pdf|tag|http://www.semanlink.net/tag/christopher_olah +https://arxiv.org/pdf/1507.07998.pdf|tag|http://www.semanlink.net/tag/doc2vec +https://arxiv.org/pdf/1507.07998.pdf|tag|http://www.semanlink.net/tag/arxiv_doc +https://arxiv.org/pdf/1507.07998.pdf|arxiv_author|Andrew M. Dai +https://arxiv.org/pdf/1507.07998.pdf|arxiv_author|Christopher Olah +https://arxiv.org/pdf/1507.07998.pdf|arxiv_author|Quoc V. Le +https://arxiv.org/pdf/1507.07998.pdf|title|[1507.07998] Document Embedding with Paragraph Vectors +https://arxiv.org/pdf/1507.07998.pdf|creationTime|2017-08-20T23:29:27Z +https://arxiv.org/pdf/1507.07998.pdf|arxiv_summary|"Paragraph Vectors has been recently proposed as an unsupervised method for +learning distributed representations for pieces of texts. In their work, the +authors showed that the method can learn an embedding of movie review texts +which can be leveraged for sentiment analysis. That proof of concept, while +encouraging, was rather narrow. Here we consider tasks other than sentiment +analysis, provide a more thorough comparison of Paragraph Vectors to other +document modelling algorithms such as Latent Dirichlet Allocation, and evaluate +performance of the method as we vary the dimensionality of the learned +representation. We benchmarked the models on two document similarity data sets, +one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method +performs significantly better than other methods, and propose a simple +improvement to enhance embedding quality. Somewhat surprisingly, we also show +that much like word embeddings, vector operations on Paragraph Vectors can +perform useful semantic results." +https://arxiv.org/pdf/1507.07998.pdf|arxiv_firstAuthor|Andrew M. Dai +https://arxiv.org/pdf/1507.07998.pdf|arxiv_updated|2015-07-29T01:04:28Z +https://arxiv.org/pdf/1507.07998.pdf|arxiv_title|Document Embedding with Paragraph Vectors +https://arxiv.org/pdf/1507.07998.pdf|arxiv_published|2015-07-29T01:04:28Z +https://arxiv.org/pdf/1507.07998.pdf|arxiv_num|1507.07998 +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|creationDate|2010-03-07 +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|tag|http://www.semanlink.net/tag/automobile +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|tag|http://www.semanlink.net/tag/audi +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|tag|http://www.semanlink.net/tag/stanford +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|title|Stanford's robotic Audi to brave Pikes Peak without a driver +http://news.stanford.edu/news/2010/february1/shelley-pikes-peak-020310.html|creationTime|2010-03-07T22:21:27Z +http://owled2007.iut-velizy.uvsq.fr/FinalProgramme.html|creationDate|2007-06-04 +http://owled2007.iut-velizy.uvsq.fr/FinalProgramme.html|tag|http://www.semanlink.net/tag/owled_2007 +http://owled2007.iut-velizy.uvsq.fr/FinalProgramme.html|title|OWLED 2007 Program +http://owled2007.iut-velizy.uvsq.fr/FinalProgramme.html|creationTime|2007-06-04T14:06:00Z +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|creationDate|2013-11-15 +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|tag|http://www.semanlink.net/tag/schema_org +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|tag|http://www.semanlink.net/tag/guha +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|comment|about 15 percent of the pages we crawl have schema.org markup... over 5 million sites are using it. +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|title|Where Schema.org Is At: A Chat With Google’s R.V. Guha - Semanticweb.com +http://semanticweb.com/schema-org-chat-googles-r-v-guha_b40607|creationTime|2013-11-15T14:48:56Z +http://www.nature.com/news/2011/110228/full/471013a.html|creationDate|2011-03-02 +http://www.nature.com/news/2011/110228/full/471013a.html|tag|http://www.semanlink.net/tag/lhc +http://www.nature.com/news/2011/110228/full/471013a.html|tag|http://www.semanlink.net/tag/physique_des_particules +http://www.nature.com/news/2011/110228/full/471013a.html|comment|Latest results from the LHC are casting doubt on the theory of supersymmetry. +http://www.nature.com/news/2011/110228/full/471013a.html|title|Beautiful theory collides with smashing particle data : Nature News +http://www.nature.com/news/2011/110228/full/471013a.html|creationTime|2011-03-02T22:37:17Z +http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/|creationDate|2016-07-31 +http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/|tag|http://www.semanlink.net/tag/franco_allemand +http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/|tag|http://www.semanlink.net/tag/angela_merkel +http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/|title|Un bilan (très personnel) des années Merkel Merkel, acte III +http://allemagne.blog.lemonde.fr/2016/07/21/un-bilan-tres-personnel-des-annees-merkel/|creationTime|2016-07-31T18:40:25Z +https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16|creationDate|2012-11-18 +https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16|tag|http://www.semanlink.net/tag/belleme +https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16|tag|http://www.semanlink.net/tag/crochemelier +https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16|title|Crochemelier +https://maps.google.fr/maps?q=belleme+61&hl=en&ie=UTF8&ll=48.346139,0.54348&spn=0.012678,0.022595&sll=48.858859,2.34706&sspn=0.200805,0.361519&t=h&hnear=Bell%C3%AAme,+Orne,+Lower+Normandy&z=16|creationTime|2012-11-18T01:38:29Z +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|creationDate|2014-10-20 +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|tag|http://www.semanlink.net/tag/java +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|tag|http://www.semanlink.net/tag/machine_learning +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|tag|http://www.semanlink.net/tag/open_source +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|title|New open-source Machine Learning Framework written in Java Datumbox +http://blog.datumbox.com/new-open-source-machine-learning-framework-written-in-java/|creationTime|2014-10-20T02:34:54Z +http://www.newsweek.com/id/124122|creationDate|2008-11-05 +http://www.newsweek.com/id/124122|tag|http://www.semanlink.net/tag/obama +http://www.newsweek.com/id/124122|comment|One for the History Books Newsweek.com +http://www.newsweek.com/id/124122|title|Obama's audacious—and risky—address on race +http://www.newsweek.com/id/124122|creationTime|2008-11-05T21:47:42Z +http://semanticweb.com/build-knowledge-graph-video_b43358|creationDate|2014-06-27 +http://semanticweb.com/build-knowledge-graph-video_b43358|tag|http://www.semanlink.net/tag/knowledge_graph +http://semanticweb.com/build-knowledge-graph-video_b43358|tag|http://www.semanlink.net/tag/howto +http://semanticweb.com/build-knowledge-graph-video_b43358|tag|http://www.semanlink.net/tag/google +http://semanticweb.com/build-knowledge-graph-video_b43358|tag|http://www.semanlink.net/tag/video +http://semanticweb.com/build-knowledge-graph-video_b43358|title|How to Build Your Own Knowledge Graph (Video – Part 1) - Semanticweb.com +http://semanticweb.com/build-knowledge-graph-video_b43358|creationTime|2014-06-27T16:11:14Z +http://blogs.zdnet.com/BTL/?p=3626|creationDate|2006-09-19 +http://blogs.zdnet.com/BTL/?p=3626|tag|http://www.semanlink.net/tag/drm +http://blogs.zdnet.com/BTL/?p=3626|tag|http://www.semanlink.net/tag/zune +http://blogs.zdnet.com/BTL/?p=3626|title|» EFF on Zune: Risk of DRM/DMCA checkmate no longer a risk. It's reality ZDNet.com +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|creationDate|2013-08-15 +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|tag|http://www.semanlink.net/tag/colonisation +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|comment|We may not be able to compete immediately in selling manufactured goods to Europe. But in the short term, with the right infrastructure, we have a huge domestic market. Here, we must see China for what it is: a competitor. +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|title|Africa must get real about Chinese ties - FT.com +http://www.ft.com/intl/cms/s/0/562692b0-898c-11e2-ad3f-00144feabdc0.html|creationTime|2013-08-15T16:00:55Z +https://shiro.apache.org/|creationDate|2018-08-05 +https://shiro.apache.org/|tag|http://www.semanlink.net/tag/apache_shiro +https://shiro.apache.org/|title|Apache Shiro Simple. Java. Security. +https://shiro.apache.org/|creationTime|2018-08-05T18:50:15Z +http://lemonde-educ.blog.lemonde.fr/2012/12/20/avec-gymglish-le-monde-sembarque-dans-une-aventure-pedago/|creationDate|2012-12-22 +http://lemonde-educ.blog.lemonde.fr/2012/12/20/avec-gymglish-le-monde-sembarque-dans-une-aventure-pedago/|title|Avec Gymglish, « Le Monde » s’embarque dans une aventure « pédago » Peut mieux faire +http://lemonde-educ.blog.lemonde.fr/2012/12/20/avec-gymglish-le-monde-sembarque-dans-une-aventure-pedago/|creationTime|2012-12-22T14:52:06Z +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|creationDate|2013-07-06 +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|tag|http://www.semanlink.net/tag/schema_org +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|tag|http://www.semanlink.net/tag/public_vocabs_w3_org +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|tag|http://www.semanlink.net/tag/automotive_ontologies +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|title|Schema for vehicles from Richard Le Poidevin on 2012-05-22 (public-vocabs@w3.org from May 2012) +http://lists.w3.org/Archives/Public/public-vocabs/2012May/0096.html|creationTime|2013-07-06T17:26:56Z +http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf|creationDate|2007-10-04 +http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf|tag|http://www.semanlink.net/tag/regroupement_familial_et_test_adn_de_filiation +http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf|comment|"Le CCNE attire l'attention sur la dimension profondément symbolique dans la société de toute mesure qui demande à la vérité biologique d'être l'ultime arbitre dans des questions qui touchent à l'identité sociale et culturelle. Elle conduirait furtivement à généraliser de telles identifications génétiques, qui pourraient se révéler à terme attentatoires aux libertés individuelles. Elle risquerait d'inscrire dans l'univers culturel et social la banalisation de l'identification génétique avec ses risques afférents de discrimination.
Le CCNE redoute les modalités concrètes d'application dans des réalités culturelles très différentes des nôtres. Nos concitoyens comprendraient peut-être mieux l'exacte réalité de tels enjeux s'ils étaient confrontés à des exigences analogues lors de leur propre demande de visa. +" +http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf|title|Rapport du Comité consultatif national d'éthique sur l'amendement ADN +http://medias.lemonde.fr/mmpub/edt/doc/20071004/963161_avis100.pdf|creationTime|2007-10-04T22:17:44Z +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|creationDate|2008-05-04 +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|tag|http://www.semanlink.net/tag/tabulator +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|tag|http://www.semanlink.net/tag/ldow2008 +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|tag|http://www.semanlink.net/tag/david_peterson +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|title|SitePoint Blogs » WWW2008 Beijing: Day 1 - Linked Data on the Web (LDOW 2008) Workshop +http://www.sitepoint.com/blogs/2008/04/22/www2008-beijing-day-1-linked-data-on-the-web-ldow-2008-workshop/|creationTime|2008-05-04T13:52:58Z +http://www.bbc.co.uk/news/technology-21769537|creationDate|2013-03-15 +http://www.bbc.co.uk/news/technology-21769537|tag|http://www.semanlink.net/tag/microscope +http://www.bbc.co.uk/news/technology-21769537|tag|http://www.semanlink.net/tag/iphone +http://www.bbc.co.uk/news/technology-21769537|tag|http://www.semanlink.net/tag/diy +http://www.bbc.co.uk/news/technology-21769537|title|BBC News - Worms detected by converted iPhone microscope +http://www.bbc.co.uk/news/technology-21769537|creationTime|2013-03-15T12:30:34Z +https://www.quora.com/Can-I-use-word2vec-to-train-a-machine-learning-classifier|creationDate|2017-07-20 +https://www.quora.com/Can-I-use-word2vec-to-train-a-machine-learning-classifier|tag|http://www.semanlink.net/tag/word2vec_howto +https://www.quora.com/Can-I-use-word2vec-to-train-a-machine-learning-classifier|title|Can I use word2vec to train a machine learning classifier? - Quora +https://www.quora.com/Can-I-use-word2vec-to-train-a-machine-learning-classifier|creationTime|2017-07-20T13:42:49Z +http://mccormickml.com/2016/04/27/word2vec-resources/|creationDate|2017-09-12 +http://mccormickml.com/2016/04/27/word2vec-resources/|tag|http://www.semanlink.net/tag/word2vec +http://mccormickml.com/2016/04/27/word2vec-resources/|tag|http://www.semanlink.net/tag/links +http://mccormickml.com/2016/04/27/word2vec-resources/|title|Word2Vec Resources · Chris McCormick +http://mccormickml.com/2016/04/27/word2vec-resources/|creationTime|2017-09-12T12:21:25Z +http://piketty.pse.ens.fr/fr/capital21c|creationDate|2015-01-01 +http://piketty.pse.ens.fr/fr/capital21c|tag|http://www.semanlink.net/tag/thomas_piketty +http://piketty.pse.ens.fr/fr/capital21c|title|Thomas Piketty - Le capital au 21e siècle - Web site +http://piketty.pse.ens.fr/fr/capital21c|creationTime|2015-01-01T18:08:18Z +http://www.wired.com/2015/08/coding-physics-course/|creationDate|2015-08-17 +http://www.wired.com/2015/08/coding-physics-course/|tag|http://www.semanlink.net/tag/physique +http://www.wired.com/2015/08/coding-physics-course/|tag|http://www.semanlink.net/tag/enseignement_scientifique +http://www.wired.com/2015/08/coding-physics-course/|title|You Should Be Coding in Your Physics Course WIRED +http://www.wired.com/2015/08/coding-physics-course/|creationTime|2015-08-17T13:01:06Z +http://www.youtube.com/watch?v=n2aaIbeKQvo&feature=related|creationDate|2009-01-14 +http://www.youtube.com/watch?v=n2aaIbeKQvo&feature=related|tag|http://www.semanlink.net/tag/rap +http://www.youtube.com/watch?v=n2aaIbeKQvo&feature=related|title|Moussa toujours en retard +http://www.youtube.com/watch?v=n2aaIbeKQvo&feature=related|creationTime|2009-01-14T23:35:06Z +http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699|creationDate|2011-01-08 +http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699|tag|http://www.semanlink.net/tag/high_frequency_trading +http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699|tag|http://www.semanlink.net/tag/hackers +http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699|title|Hackers find new way to cheat on Wall Street -- to everyone's peril +http://www.infoworld.com/d/the-industry-standard/hackers-find-new-way-cheat-wall-street-everyones-peril-699|creationTime|2011-01-08T00:25:09Z +http://www.w3.org/2001/sw/sweo/public/UseCases/BBC/|creationDate|2011-02-04 +http://www.w3.org/2001/sw/sweo/public/UseCases/BBC/|tag|http://www.semanlink.net/tag/bbc_semantic_publishing +http://www.w3.org/2001/sw/sweo/public/UseCases/BBC/|title|Case Study: Use of Semantic Web Technologies on the BBC Web Sites +http://www.w3.org/2001/sw/sweo/public/UseCases/BBC/|creationTime|2011-02-04T21:36:46Z +https://plantnet.org/|creationDate|2018-06-06 +https://plantnet.org/|tag|http://www.semanlink.net/tag/botanique +https://plantnet.org/|comment|identifiez une plante à partir d'une photo +https://plantnet.org/|title|Pl@ntNet +https://plantnet.org/|creationTime|2018-06-06T22:17:42Z +http://documentcloud.github.com/backbone/docs/todos.html|creationDate|2012-08-28 +http://documentcloud.github.com/backbone/docs/todos.html|tag|http://www.semanlink.net/tag/backbone_js +http://documentcloud.github.com/backbone/docs/todos.html|title|todos.js +http://documentcloud.github.com/backbone/docs/todos.html|creationTime|2012-08-28T15:40:16Z +http://www.cs.utexas.edu/~jsequeda/meetings.html|creationDate|2007-09-18 +http://www.cs.utexas.edu/~jsequeda/meetings.html|tag|http://www.semanlink.net/tag/linkto_semanlink +http://www.cs.utexas.edu/~jsequeda/meetings.html|title|Juan Sequeda - Meetings +http://www.cs.utexas.edu/~jsequeda/meetings.html|creationTime|2007-09-18T01:37:55Z +http://www-lipn.univ-paris13.fr/~cerin/documents/mine05.pdf|creationDate|2006-10-18 +http://www-lipn.univ-paris13.fr/~cerin/documents/mine05.pdf|tag|http://www.semanlink.net/tag/koskas +http://www-lipn.univ-paris13.fr/~cerin/documents/mine05.pdf|title|Efficient Data-Structures and Parallel Algorithms for Association Rules Discovery +http://www.salsasud.com/spip.php?article=734|creationDate|2007-04-18 +http://www.salsasud.com/spip.php?article=734|tag|http://www.semanlink.net/tag/salsa +http://www.salsasud.com/spip.php?article=734|title|Orquesta Aragon - Salsasud +http://www.salsasud.com/spip.php?article=734|creationTime|2007-04-18T13:22:06Z +https://supernlp.github.io/2018/11/26/sentreps/|creationDate|2018-11-27 +https://supernlp.github.io/2018/11/26/sentreps/|tag|http://www.semanlink.net/tag/good +https://supernlp.github.io/2018/11/26/sentreps/|tag|http://www.semanlink.net/tag/survey +https://supernlp.github.io/2018/11/26/sentreps/|tag|http://www.semanlink.net/tag/sentence_embeddings +https://supernlp.github.io/2018/11/26/sentreps/|comment|can we go beyond word embeddings and represent sentences in a way that is both principled and generalizable across a wide array of tasks? +https://supernlp.github.io/2018/11/26/sentreps/|title|On sentence representations, pt. 1: what can you fit into a single #$!%@*&% blog post? · Supernatural Language Processing +https://supernlp.github.io/2018/11/26/sentreps/|creationTime|2018-11-27T11:09:42Z +https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news|creationDate|2017-10-01 +https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news|tag|http://www.semanlink.net/tag/mecanique_quantique +https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news|tag|http://www.semanlink.net/tag/gravitation +https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news|title|Gravity may be created by strange flashes in the quantum realm New Scientist +https://www.newscientist.com/article/mg23531444-600-spontaneous-collapses-may-show-how-to-unite-quantum-theory-and-gravity/?utm_campaign=RSS%7CNSNS&utm_source=NSNS&utm_medium=RSS&utm_content=news&campaign_id=RSS%7CNSNS-news|creationTime|2017-10-01T17:20:30Z +http://apex.sjtu.edu.cn/|creationDate|2008-04-23 +http://apex.sjtu.edu.cn/|tag|http://www.semanlink.net/tag/semantic_web_company +http://apex.sjtu.edu.cn/|tag|http://www.semanlink.net/tag/chine +http://apex.sjtu.edu.cn/|tag|http://www.semanlink.net/tag/www08 +http://apex.sjtu.edu.cn/|comment|Apex Data & Knowledge Management Lab focuses on the research and development in the data and knowledge management area. Current interests include Next Generation Search and Retrieval, Ontology Theory and Engineering, and Semantic Web. +http://apex.sjtu.edu.cn/|title|Apex Data & Knowledge Management Lab +http://apex.sjtu.edu.cn/|creationTime|2008-04-23T14:42:51Z +http://www2008.org/program/program-overview.html|creationDate|2008-04-14 +http://www2008.org/program/program-overview.html|tag|http://www.semanlink.net/tag/www08 +http://www2008.org/program/program-overview.html|title|WWW2008 Conference: program +http://www2008.org/program/program-overview.html|creationTime|2008-04-14T20:50:43Z +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene|creationDate|2018-08-09 +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene|tag|http://www.semanlink.net/tag/hydrogen +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene|title|Énergie : les promesses de l'hydrogène CNRS Le journal +https://lejournal.cnrs.fr/billets/energie-les-promesses-de-lhydrogene|creationTime|2018-08-09T11:04:47Z +https://class.coursera.org/bigdata-002/class/index|creationDate|2013-04-02 +https://class.coursera.org/bigdata-002/class/index|tag|http://www.semanlink.net/tag/coursera_web_intelligence_and_big_data +https://class.coursera.org/bigdata-002/class/index|title|Web Intelligence and Big Data - Coursera +https://class.coursera.org/bigdata-002/class/index|creationTime|2013-04-02T01:23:24Z +http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf|creationDate|2014-03-26 +http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf|tag|http://www.semanlink.net/tag/libshorttext +http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf|comment|(documentation) +http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf|title|H.-F. Yu, C.-H. Ho, Y.-C. Juan, and C.-J. Lin. LibShortText: A Library for Short-text Classification and Analysis +http://www.csie.ntu.edu.tw/~cjlin/papers/libshorttext.pdf|creationTime|2014-03-26T10:23:18Z +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|creationDate|2009-06-15 +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|tag|http://www.semanlink.net/tag/semantic_tagging +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|tag|http://www.semanlink.net/tag/common_tag +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|title|CommonTag - An easy-to-use vocabulary for Semantic Tagging Alexandre Passant +http://apassant.net/blog/2009/06/11/commontag-easy-use-vocabulary-semantic-tagging|creationTime|2009-06-15T09:15:44Z +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|creationDate|2014-07-04 +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|tag|http://www.semanlink.net/tag/nlp +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|tag|http://www.semanlink.net/tag/nlp_sem_web +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|tag|http://www.semanlink.net/tag/machine_learning +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|comment|QuickAnswers.io: a new algorithm Adventures in NLP and the semantic web +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|title|Sphere Engineering - Machine Learning Solutions - QuickAnswers.io: a new algorithm +http://www.sphere-engineering.com/blog/quickanswers-io-a-new-algorithm.html|creationTime|2014-07-04T09:45:19Z +http://www.easychair.org/OWLED2007/|creationDate|2007-05-05 +http://www.easychair.org/OWLED2007/|tag|http://www.semanlink.net/tag/owled_2007 +http://www.easychair.org/OWLED2007/|title|EasyChair. Login Page for OWLED 2007 +http://www.easychair.org/OWLED2007/|creationTime|2007-05-05T10:19:24Z +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|creationDate|2018-04-09 +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|tag|http://www.semanlink.net/tag/asynchronous +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|tag|http://www.semanlink.net/tag/flask +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|comment|Introduction to Twisted Klein, which is like Flask, but allows running asynchronous code. +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|title|Going asynchronous: from Flask to Twisted Klein +https://crossbario.com/blog/Going-Asynchronous-from-Flask-to-Twisted-Klein/|creationTime|2018-04-09T11:08:17Z +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|creationDate|2011-08-16 +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|tag|http://www.semanlink.net/tag/automobile +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|tag|http://www.semanlink.net/tag/semtechbiz +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|tag|http://www.semanlink.net/tag/martin_hepp +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|tag|http://www.semanlink.net/tag/seo +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|title|Semantic SEO for Automotive +http://semtechbizuk2011.semanticweb.com/sessionPop.cfm?confid=63&proposalid=4385|creationTime|2011-08-16T15:00:16Z +http://webr3.org/apps/specs/jsn3/|creationDate|2012-02-20 +http://webr3.org/apps/specs/jsn3/|tag|http://www.semanlink.net/tag/rdf_in_json +http://webr3.org/apps/specs/jsn3/|tag|http://www.semanlink.net/tag/nathan_rixham +http://webr3.org/apps/specs/jsn3/|title|JSN3 +http://webr3.org/apps/specs/jsn3/|creationTime|2012-02-20T23:05:52Z +http://thegeekyway.com/css-regex-selector-using-regular-expression-css/|creationDate|2017-11-02 +http://thegeekyway.com/css-regex-selector-using-regular-expression-css/|tag|http://www.semanlink.net/tag/css +http://thegeekyway.com/css-regex-selector-using-regular-expression-css/|tag|http://www.semanlink.net/tag/regex +http://thegeekyway.com/css-regex-selector-using-regular-expression-css/|title|CSS Regex selector – Using Regular Expression in CSS – The Geeky Way +http://thegeekyway.com/css-regex-selector-using-regular-expression-css/|creationTime|2017-11-02T17:28:02Z +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|creationDate|2015-01-30 +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|tag|http://www.semanlink.net/tag/linked_data_fragments +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|tag|http://www.semanlink.net/tag/ruben_verborgh +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|comment|The LOD Laundromat and Triple Pattern Fragments offer scalable Web querying. +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|title|600,000 queryable datasets—and counting Ruben Verborgh +http://ruben.verborgh.org/blog/2015/01/30/600000-queryable-datasets-and-counting/|creationTime|2015-01-30T15:34:01Z +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|creationDate|2013-05-17 +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|tag|http://www.semanlink.net/tag/quantum_computing +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|tag|http://www.semanlink.net/tag/google_research +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|tag|http://www.semanlink.net/tag/machine_learning +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|comment|We believe quantum computing may help solve some of the most challenging computer science problems, particularly in machine learning. +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|title|Launching the Quantum Artificial Intelligence Lab +http://googleresearch.blogspot.ca/2013/05/launching-quantum-artificial.html|creationTime|2013-05-17T14:26:41Z +http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/|creationDate|2009-11-12 +http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/|tag|http://www.semanlink.net/tag/named_graphs +http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/|title|Managing RDF Using Named Graphs « Lost Boy +http://www.ldodds.com/blog/2009/11/managing-rdf-using-named-graphs/|creationTime|2009-11-12T14:05:38Z +http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm|creationDate|2010-09-21 +http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm|tag|http://www.semanlink.net/tag/business_intelligence +http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm|tag|http://www.semanlink.net/tag/atos_origin +http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm|title|Business Intelligence selon Atos Origin +http://www.fr.atosorigin.com/fr-fr/nos_activites/solutions/business_intelligence/default.htm|creationTime|2010-09-21T14:40:50Z +https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c|creationDate|2019-02-19 +https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c|tag|http://www.semanlink.net/tag/google_patents +https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c|tag|http://www.semanlink.net/tag/bigquery +https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c|title|Programmatic Patent Searches Using Google’s BigQuery & Public Patent Data +https://medium.com/@jimmoeller149/programmatic-patent-searches-using-googles-bigquery-public-patent-data-293adad3d30c|creationTime|2019-02-19T19:21:48Z +http://weblog.burningbird.net/2006/04/13/allo-jena/|creationDate|2006-04-13 +http://weblog.burningbird.net/2006/04/13/allo-jena/|tag|http://www.semanlink.net/tag/jena_user_conference +http://weblog.burningbird.net/2006/04/13/allo-jena/|tag|http://www.semanlink.net/tag/shelley_powers +http://weblog.burningbird.net/2006/04/13/allo-jena/|title|Burningbird » ‘allo Jena +http://ns.inria.fr/nicetag/2010/09/09/voc.html|creationDate|2013-08-09 +http://ns.inria.fr/nicetag/2010/09/09/voc.html|tag|http://www.semanlink.net/tag/ontologies +http://ns.inria.fr/nicetag/2010/09/09/voc.html|tag|http://www.semanlink.net/tag/tag_ontology +http://ns.inria.fr/nicetag/2010/09/09/voc.html|tag|http://www.semanlink.net/tag/fabien_gandon +http://ns.inria.fr/nicetag/2010/09/09/voc.html|title|Nice Tag ontology +http://ns.inria.fr/nicetag/2010/09/09/voc.html|creationTime|2013-08-09T12:41:04Z +https://research.google.com/pubs/pub45482.html|creationDate|2016-08-14 +https://research.google.com/pubs/pub45482.html|tag|http://www.semanlink.net/tag/language_model +https://research.google.com/pubs/pub45482.html|tag|http://www.semanlink.net/tag/lstm_networks +https://research.google.com/pubs/pub45482.html|comment|This clearly demonstrates the significant benefit of using context appropriately in natural language (NL) tasks +https://research.google.com/pubs/pub45482.html|title|Contextual LSTM: A Step towards Hierarchical Language Modeling +https://research.google.com/pubs/pub45482.html|creationTime|2016-08-14T21:25:04Z +http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only|creationDate|2006-07-11 +http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only|tag|http://www.semanlink.net/tag/atom +http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only|tag|http://www.semanlink.net/tag/henry_story +http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only|tag|http://www.semanlink.net/tag/semantic_web_services_vs_soap +http://blogs.sun.com/roller/page/bblfish?entry=rest_without_rdf_is_only|title|REST without RDF is only half as bad as SOAP - The Sun BabelFish Blog +http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html|creationDate|2015-06-21 +http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html|tag|http://www.semanlink.net/tag/ei +http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html|tag|http://www.semanlink.net/tag/palmyra +http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html|title|« L’Etat islamique à Palmyre, c’est l’Etat islamique dans la cour du Louvre » +http://www.lemonde.fr/proche-orient/article/2015/05/22/il-faut-sauver-palmyre_4638788_3218.html|creationTime|2015-06-21T22:49:22Z +http://leobard.twoday.net/stories/982264/|creationDate|2005-09-17 +http://leobard.twoday.net/stories/982264/|tag|http://www.semanlink.net/tag/ajax +http://leobard.twoday.net/stories/982264/|tag|http://www.semanlink.net/tag/sparql +http://leobard.twoday.net/stories/982264/|title|Semantic World and Cyberspace: SPARQL has some use - Kendall Clark +http://education.okfn.org/12-open-education-days-of-christmas/|creationDate|2013-12-18 +http://education.okfn.org/12-open-education-days-of-christmas/|tag|http://www.semanlink.net/tag/open_education +http://education.okfn.org/12-open-education-days-of-christmas/|title|12 Open Education Days of Christmas Open Education Working Group +http://education.okfn.org/12-open-education-days-of-christmas/|creationTime|2013-12-18T16:30:31Z +http://ecologie.blog.lemonde.fr/2013/04/03/peut-on-nettoyer-les-oceans-des-dechets-plastiques/|creationDate|2013-04-04 +http://ecologie.blog.lemonde.fr/2013/04/03/peut-on-nettoyer-les-oceans-des-dechets-plastiques/|tag|http://www.semanlink.net/tag/continent_de_plastique +http://ecologie.blog.lemonde.fr/2013/04/03/peut-on-nettoyer-les-oceans-des-dechets-plastiques/|title|Peut-on nettoyer les océans des déchets plastiques ? Eco(lo) +http://ecologie.blog.lemonde.fr/2013/04/03/peut-on-nettoyer-les-oceans-des-dechets-plastiques/|creationTime|2013-04-04T01:14:19Z +https://www.msn.com/en-xl/europe/top-stories/uk-could-cancel-brexit-and-stay-in-eu-on-same-terms-says-french-government/ar-BBL6cyE|creationDate|2018-07-27 +https://www.msn.com/en-xl/europe/top-stories/uk-could-cancel-brexit-and-stay-in-eu-on-same-terms-says-french-government/ar-BBL6cyE|tag|http://www.semanlink.net/tag/brexit +https://www.msn.com/en-xl/europe/top-stories/uk-could-cancel-brexit-and-stay-in-eu-on-same-terms-says-french-government/ar-BBL6cyE|title|UK could cancel Brexit and stay in EU on same terms, says French government +https://www.msn.com/en-xl/europe/top-stories/uk-could-cancel-brexit-and-stay-in-eu-on-same-terms-says-french-government/ar-BBL6cyE|creationTime|2018-07-27T01:28:58Z +https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final|creationDate|2018-07-12 +https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final|tag|http://www.semanlink.net/tag/coupe_du_monde_2018 +https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final|comment|Reason is a lie. Talent makes its own rules. France is in the World Cup final. Vive l’anarchie! +https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final|title|World Cup 2018: Kylian Mbappé and France Troll Their Way to the Final The New Yorker +https://www.newyorker.com/sporting-scene/replay/world-cup-2018-kylian-mbappe-and-france-troll-their-way-to-the-final|creationTime|2018-07-12T00:14:08Z +http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses|creationDate|2009-04-07 +http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses|tag|http://www.semanlink.net/tag/http_cache +http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses|tag|http://www.semanlink.net/tag/ajax +http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses|title|Which browsers have problems caching XMLHTTPRequest responses? - Stack Overflow +http://stackoverflow.com/questions/677480/which-browsers-have-problems-caching-xmlhttprequest-responses|creationTime|2009-04-07T17:07:11Z +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|creationDate|2017-07-10 +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|tag|http://www.semanlink.net/tag/good +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|tag|http://www.semanlink.net/tag/survey +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|tag|http://www.semanlink.net/tag/vector_space_model +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|comment|"Good survey of VSMs, of their 3 classes (based either on term-document, word-context, or pair-pattern matrices), and of their applications. A detailed look at a specific open source project in each category. + + + +" +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|title|From Frequency to Meaning: Vector Space Models of Semantics (2010) +http://www.jair.org/media/2934/live-2934-4846-jair.pdf|creationTime|2017-07-10T15:18:19Z +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|creationDate|2014-03-08 +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|tag|http://www.semanlink.net/tag/google +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|tag|http://www.semanlink.net/tag/ray_kurzweil +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|tag|http://www.semanlink.net/tag/technological_singularity +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|title|Are the robots about to rise? Google's new director of engineering thinks so… Technology The Observer +http://www.theguardian.com/technology/2014/feb/22/robots-google-ray-kurzweil-terminator-singularity-artificial-intelligence|creationTime|2014-03-08T14:35:32Z +http://www.w3.org/DesignIssues/Fractal.html|creationDate|2010-08-23 +http://www.w3.org/DesignIssues/Fractal.html|tag|http://www.semanlink.net/tag/architecture_of_the_world_wide_web +http://www.w3.org/DesignIssues/Fractal.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/DesignIssues/Fractal.html|comment|The Scale-free nature of the Web +http://www.w3.org/DesignIssues/Fractal.html|title|Fractal Web - Commentary on Web Architecture +http://www.w3.org/DesignIssues/Fractal.html|creationTime|2010-08-23T11:23:40Z +http://del.icio.us|creationDate|2005-04-05 +http://del.icio.us|tag|http://www.semanlink.net/tag/del_icio_us +http://del.icio.us|tag|http://www.semanlink.net/tag/bookmarks +http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark|creationDate|2013-01-30 +http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark|tag|http://www.semanlink.net/tag/exploitation_petroliere +http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark|title|A Mysterious Patch Of Light Shows Up In The North Dakota Dark : Krulwich Wonders... : NPR +http://www.npr.org/blogs/krulwich/2013/01/16/169511949/a-mysterious-patch-of-light-shows-up-in-the-north-dakota-dark|creationTime|2013-01-30T16:57:20Z +http://www.oezratty.net/wordpress/2016/avancees-intelligence-artificielle-2/|creationDate|2016-03-14 +http://www.oezratty.net/wordpress/2016/avancees-intelligence-artificielle-2/|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.oezratty.net/wordpress/2016/avancees-intelligence-artificielle-2/|title|Les avancées de l’intelligence artificielle +http://www.oezratty.net/wordpress/2016/avancees-intelligence-artificielle-2/|creationTime|2016-03-14T22:17:09Z +http://ontorule-project.eu/parrot/parrot|creationDate|2013-03-12 +http://ontorule-project.eu/parrot/parrot|tag|http://www.semanlink.net/tag/parrot +http://ontorule-project.eu/parrot/parrot|title|Parrot, a RIF and OWL documentation service +http://ontorule-project.eu/parrot/parrot|creationTime|2013-03-12T10:19:50Z +http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html|creationDate|2012-02-07 +http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html|comment|Ancient patches of a giant seagrass in the Mediterranean Sea are now considered the oldest living organism on Earth after scientists dated them as up to 200,000 years old. +http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html|title|Ancient seagrass: 'Oldest living thing on earth' discovered in Mediterranean Sea - Telegraph +http://www.telegraph.co.uk/science/9066393/Ancient-seagrass-Oldest-living-thing-on-earth-discovered-in-Mediterranean-Sea.html|creationTime|2012-02-07T21:04:58Z +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|creationDate|2006-04-02 +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|tag|http://www.semanlink.net/tag/niger_agriculture +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|tag|http://www.semanlink.net/tag/banque_mondiale +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|tag|http://www.semanlink.net/tag/irrigation +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|comment|PROJECT APPRAISAL DOCUMENT ON A PROPOSED CREDIT TO THE REP OF NIGER FOR THE PRIVATE IRRIGATION PROMOTION PROJECT (February 21, 2002) +http://www-wds.worldbank.org/servlet/WDSContentServer/WDSP/IB/2002/03/15/000094946_02030604011943/Rendered/INDEX/multi0page.txt|title|World Bank - PROJECT APPRAISAL DOCUMENT ON A PROPOSED CREDIT TO THE REP OF NIGER FOR THE PRIVATE IRRIGATION PROMOTION PROJECT (February 21, 2002) World Bank - Private irrigation promotion project in Niger +http://dspace.mit.edu/handle/1721.1/96942|creationDate|2016-01-13 +http://dspace.mit.edu/handle/1721.1/96942|tag|http://www.semanlink.net/tag/convolutional_neural_network +http://dspace.mit.edu/handle/1721.1/96942|comment|Object detectors emerge from training CNNs to perform scene classification...With object detectors emerging as a result of learning to recognize scenes, our work demonstrates that the same network can perform both scene recognition and object localization in a single forward-pass, without ever having been explicitly taught the notion of objects. +http://dspace.mit.edu/handle/1721.1/96942|title|DSpace@MIT: Object detectors emerge in Deep Scene CNNs +http://dspace.mit.edu/handle/1721.1/96942|creationTime|2016-01-13T23:57:15Z +https://www.quora.com/Is-Swagger-any-good|creationDate|2017-04-01 +https://www.quora.com/Is-Swagger-any-good|title|Is Swagger any good? - Quora +https://www.quora.com/Is-Swagger-any-good|creationTime|2017-04-01T18:54:35Z +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|creationDate|2007-04-10 +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|tag|http://www.semanlink.net/tag/darpa +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|comment|"The Defense Advanced Research Projects Agency (DARPA) is seeking innovative proposals to develop Chemical Robots (ChemBots): soft, flexible, mobile objects that can identify and maneuver through openings smaller than their static structural dimensions; reconstitute size, shape, and functionality after traversal... +" +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|title|Chemical robots - DEFENSE SCIENCES RESEARCH AND TECHNOLOGY +http://www.fbo.gov/spg/ODA/DARPA/CMO/BAA07-21/Modification%2002.html|creationTime|2007-04-10T23:11:21Z +http://www.youtube.com/watch?v=E3tgI9AjrCg|creationDate|2009-11-21 +http://www.youtube.com/watch?v=E3tgI9AjrCg|tag|http://www.semanlink.net/tag/economies_d_energie +http://www.youtube.com/watch?v=E3tgI9AjrCg|tag|http://www.semanlink.net/tag/geste_ecologique +http://www.youtube.com/watch?v=E3tgI9AjrCg|tag|http://www.semanlink.net/tag/lumiere +http://www.youtube.com/watch?v=E3tgI9AjrCg|title|Éteins la lumière +http://www.youtube.com/watch?v=E3tgI9AjrCg|creationTime|2009-11-21T18:08:41Z +http://pathway.screenager.be/index.html|creationDate|2006-10-06 +http://pathway.screenager.be/index.html|tag|http://www.semanlink.net/tag/wikipedia +http://pathway.screenager.be/index.html|tag|http://www.semanlink.net/tag/os_x_app +http://pathway.screenager.be/index.html|comment|"Pathway's aim is to help you discover Wikipedia without having to worry whether you'll have enough time to read everything you want, or if you'll get lost. +
+It accomplishes this by presenting you with a graphical ""network"" representation of your visited article pages. A node represents an article, a connection between two nodes means, of course, that you've gone from the first article to the second one. You can save the network you've created to disk and recover it." +http://pathway.screenager.be/index.html|title|Pathway +http://www.w3.org/2005/Talks/1214-Trento-IH/|creationDate|2005-12-16 +http://www.w3.org/2005/Talks/1214-Trento-IH/|tag|http://www.semanlink.net/tag/tutorial +http://www.w3.org/2005/Talks/1214-Trento-IH/|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.w3.org/2005/Talks/1214-Trento-IH/|title|Tutorial on Semantic Web Technologies (1) +https://vimeo.com/141675862|creationDate|2015-12-22 +https://vimeo.com/141675862|tag|http://www.semanlink.net/tag/google +https://vimeo.com/141675862|title|10x Not 10% : Product management by orders of magnitude by Ken Norton at Mind the Product 2015 sur Vimeo +https://vimeo.com/141675862|creationTime|2015-12-22T19:31:42Z +http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/|creationDate|2015-12-30 +http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/|tag|http://www.semanlink.net/tag/machine_learning_basics +http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/|title|How To Learn Any Machine Learning Tool - Machine Learning Mastery +http://machinelearningmastery.com/how-to-learn-any-machine-learning-tool/|creationTime|2015-12-30T00:27:54Z +http://www.guardian.co.uk/world/2013/jun/17/edward-snowden-nsa-files-whistleblower|creationDate|2013-06-17 +http://www.guardian.co.uk/world/2013/jun/17/edward-snowden-nsa-files-whistleblower|tag|http://www.semanlink.net/tag/edward_snowden +http://www.guardian.co.uk/world/2013/jun/17/edward-snowden-nsa-files-whistleblower|title|Edward Snowden Q&A: Dick Cheney traitor charge is 'the highest honor' World news guardian.co.uk +http://www.guardian.co.uk/world/2013/jun/17/edward-snowden-nsa-files-whistleblower|creationTime|2013-06-17T22:35:45Z +http://arxiv.org/pdf/cs.DS/0310019|creationDate|2006-10-18 +http://arxiv.org/pdf/cs.DS/0310019|tag|http://www.semanlink.net/tag/koskas +http://arxiv.org/pdf/cs.DS/0310019|title|A hierarchical Algorithm to Solve the Shortest Path Problem in Valued Graphs +http://www.ldodds.com/blog/archives/000251.html|creationDate|2008-07-06 +http://www.ldodds.com/blog/archives/000251.html|tag|http://www.semanlink.net/tag/arq +http://www.ldodds.com/blog/archives/000251.html|tag|http://www.semanlink.net/tag/sparql_tips +http://www.ldodds.com/blog/archives/000251.html|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/archives/000251.html|title|Lost Boy: Parameterised Queries with SPARQL and ARQ +http://www.ldodds.com/blog/archives/000251.html|creationTime|2008-07-06T04:28:28Z +http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions|creationDate|2009-12-19 +http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions|tag|http://www.semanlink.net/tag/sommet_de_copenhague +http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions|tag|http://www.semanlink.net/tag/greenpeace +http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions|title|Politicians talk, leaders act +http://energie-climat.greenpeace.fr/greenpeace-denonce-un-fiasco-climatique-les-politiciens-en-restent-aux-declarations-d%E2%80%99intentions|creationTime|2009-12-19T12:35:35Z +http://wiki.leibnizcenter.org/index.php/OWLED_2007|creationDate|2007-06-23 +http://wiki.leibnizcenter.org/index.php/OWLED_2007|tag|http://www.semanlink.net/tag/owled_2007_and_fps +http://wiki.leibnizcenter.org/index.php/OWLED_2007|title|OWLED 2007 - LeibnizWiki +http://wiki.leibnizcenter.org/index.php/OWLED_2007|creationTime|2007-06-23T15:17:56Z +http://twitter.com/cern/|creationDate|2010-03-30 +http://twitter.com/cern/|tag|http://www.semanlink.net/tag/cern +http://twitter.com/cern/|title|CERN (CERN) on Twitter +http://twitter.com/cern/|creationTime|2010-03-30T15:27:16Z +https://sizovs.net/2018/12/17/stop-learning-frameworks/|creationDate|2018-12-19 +https://sizovs.net/2018/12/17/stop-learning-frameworks/|tag|http://www.semanlink.net/tag/frameworks +https://sizovs.net/2018/12/17/stop-learning-frameworks/|tag|http://www.semanlink.net/tag/don_t_waste_my_time +https://sizovs.net/2018/12/17/stop-learning-frameworks/|title|Stop Learning Frameworks – Lifehacks for Developers by Eduards Sizovs +https://sizovs.net/2018/12/17/stop-learning-frameworks/|creationTime|2018-12-19T13:08:07Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4394080.stm|creationDate|2005-11-04 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4394080.stm|tag|http://www.semanlink.net/tag/galileo +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4394080.stm|title|BBC NEWS Science/Nature Smart directions for green ideas +http://ebiquity.umbc.edu/blogger/2009/07/02/nosql-distributed-key-value-data-stores/|creationDate|2009-07-07 +http://ebiquity.umbc.edu/blogger/2009/07/02/nosql-distributed-key-value-data-stores/|tag|http://www.semanlink.net/tag/nosql +http://ebiquity.umbc.edu/blogger/2009/07/02/nosql-distributed-key-value-data-stores/|title|NOSQL: distributed key-value data stores +http://ebiquity.umbc.edu/blogger/2009/07/02/nosql-distributed-key-value-data-stores/|creationTime|2009-07-07T19:26:43Z +http://www.bbc.com/news/science-environment-43116836|creationDate|2018-02-22 +http://www.bbc.com/news/science-environment-43116836|tag|http://www.semanlink.net/tag/paleontologie +http://www.bbc.com/news/science-environment-43116836|tag|http://www.semanlink.net/tag/plante +http://www.bbc.com/news/science-environment-43116836|tag|http://www.semanlink.net/tag/molecular_clock +http://www.bbc.com/news/science-environment-43116836|title|Origins of land plants pushed back in time +http://www.bbc.com/news/science-environment-43116836|creationTime|2018-02-22T00:34:02Z +https://addons.mozilla.org/en-US/firefox/addon/8062|creationDate|2008-09-02 +https://addons.mozilla.org/en-US/firefox/addon/8062|tag|http://www.semanlink.net/tag/openlink +https://addons.mozilla.org/en-US/firefox/addon/8062|tag|http://www.semanlink.net/tag/firefox +https://addons.mozilla.org/en-US/firefox/addon/8062|title|OpenLink Data Explorer :: Firefox Add-ons +https://addons.mozilla.org/en-US/firefox/addon/8062|creationTime|2008-09-02T16:11:32Z +https://www.facebook.com/notes/blake-ross/aphantasia-how-it-feels-to-be-blind-in-your-mind/10156834777480504|creationDate|2016-04-26 +https://www.facebook.com/notes/blake-ross/aphantasia-how-it-feels-to-be-blind-in-your-mind/10156834777480504|tag|http://www.semanlink.net/tag/insolite +https://www.facebook.com/notes/blake-ross/aphantasia-how-it-feels-to-be-blind-in-your-mind/10156834777480504|title|Aphantasia: How It Feels To Be Blind In Your Mind +https://www.facebook.com/notes/blake-ross/aphantasia-how-it-feels-to-be-blind-in-your-mind/10156834777480504|creationTime|2016-04-26T01:02:30Z +http://www.bing.com/widget/knowledge|creationDate|2014-04-04 +http://www.bing.com/widget/knowledge|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.bing.com/widget/knowledge|tag|http://www.semanlink.net/tag/bing +http://www.bing.com/widget/knowledge|tag|http://www.semanlink.net/tag/entity_mining +http://www.bing.com/widget/knowledge|title|Bing - Knowledge Widget (Beta) +http://www.bing.com/widget/knowledge|creationTime|2014-04-04T13:19:32Z +https://www.youtube.com/watch?v=ZmNOAtZIgIk|creationDate|2014-10-06 +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/deep_learning +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/ng +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/sparse_dictionary_learning +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/embeddings +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/feature_learning +https://www.youtube.com/watch?v=ZmNOAtZIgIk|tag|http://www.semanlink.net/tag/googletechtalks +https://www.youtube.com/watch?v=ZmNOAtZIgIk|comment|"Lessons from neuroscience: one algorithm for all kinds of learning
+Looking for better representations of the input (features)
+Feature learning via sparse coding (sparse linear combinations. Edge detection, quantitatively similar to primary visual cortex)
+Then learning features hierarchies (several layers. ""sparse DBN"" ""deep belief nets"")
+Scaling see 25'07 (algos) ; using GPUs
+Learning recursive representations. ""Generic"" hierarchies on text doesn't make sense; learn feature vector that represent sentences + + + + + + + + +" +https://www.youtube.com/watch?v=ZmNOAtZIgIk|title|Bay Area Vision Meeting: Unsupervised Feature Learning and Deep Learning - YouTube +https://www.youtube.com/watch?v=ZmNOAtZIgIk|creationTime|2014-10-06T00:53:05Z +https://twitter.com/i/web/status/1010499867330871296|creationDate|2018-06-24 +https://twitter.com/i/web/status/1010499867330871296|tag|http://www.semanlink.net/tag/banksy +https://twitter.com/i/web/status/1010499867330871296|comment|"Télérama sur Twitter : ""Bansky Paris Invasion ! Venu incognito comme toujours, le célèbre street artist a déjà laissé deux œuvres qui témoignent de son passage dans les 18e et 19e arrondissements. Elles livrent un message fort au gouvernement français. #Banksy #Paris #streetart https://t.co/AbiT6RfsEw… https://t.co/t302gOpZri""" +https://twitter.com/i/web/status/1010499867330871296|title|"""Bansky Paris Invasion !""" +https://twitter.com/i/web/status/1010499867330871296|creationTime|2018-06-24T20:03:47Z +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|creationDate|2016-01-27 +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|tag|http://www.semanlink.net/tag/calais_jungle +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|tag|http://www.semanlink.net/tag/banksy +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|tag|http://www.semanlink.net/tag/censorship +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|tag|http://www.semanlink.net/tag/hugo +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|title|Banksy's new artwork criticises use of teargas in Calais refugee camp Art and design The Guardian +http://www.theguardian.com/artanddesign/2016/jan/24/banksy-uses-new-artwork-to-criticise-use-of-teargas-in-calais-refugee-camp|creationTime|2016-01-27T00:22:11Z +http://dpreview.com/|creationDate|2005-06-23 +http://dpreview.com/|tag|http://www.semanlink.net/tag/photo_numerique +http://dpreview.com/|tag|http://www.semanlink.net/tag/guide_d_achat +http://dpreview.com/|title|Digital Camera Reviews and News: Digital Photography Review: Forums, Glossary, FAQ +http://sebastianruder.com/optimizing-gradient-descent/|creationDate|2016-01-21 +http://sebastianruder.com/optimizing-gradient-descent/|tag|http://www.semanlink.net/tag/gradient_descent +http://sebastianruder.com/optimizing-gradient-descent/|title|An overview of gradient descent optimization algorithms +http://sebastianruder.com/optimizing-gradient-descent/|creationTime|2016-01-21T14:05:16Z +http://leobard.twoday.net/stories/191619/|creationDate|2008-10-29 +http://leobard.twoday.net/stories/191619/|tag|http://www.semanlink.net/tag/leo_sauermann +http://leobard.twoday.net/stories/191619/|tag|http://www.semanlink.net/tag/converting_data_into_rdf +http://leobard.twoday.net/stories/191619/|comment|Ever tried to convert data into RDF? Extract something from iCalendar or an MP3 file and then use a bit of RDF? Have it all in a graph? Then you may be interestedhow to choose your weapons wise: If you want a fast and easy way for RDF integration, follow Patrick Stickler and his URIQA ideas. +http://leobard.twoday.net/stories/191619/|title|semantic weltbild 2.0: why I love Patrick Sticklers URIQA approach (2004) +http://leobard.twoday.net/stories/191619/|creationTime|2008-10-29T17:52:58Z +https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/|creationDate|2018-05-20 +https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/|tag|http://www.semanlink.net/tag/named_entity_recognition +https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/|tag|http://www.semanlink.net/tag/survey +https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/|title|Named Entity Recognition: Milestone Papers, Models and Technologies (2017) +https://blog.paralleldots.com/data-science/named-entity-recognition-milestone-models-papers-and-technologies/|creationTime|2018-05-20T22:25:45Z +https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationDate|2018-03-12 +https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/google +https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|tag|http://www.semanlink.net/tag/machine_learning_course +https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|title|Google's Machine Learning Crash Course    Google Developers +https://developers.google.com/machine-learning/crash-course/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationTime|2018-03-12T12:51:52Z +https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html|creationDate|2019-05-15 +https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html|tag|http://www.semanlink.net/tag/artificial_life +https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html|tag|http://www.semanlink.net/tag/synthetic_genome +https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html|title|Scientists Created Bacteria With a Synthetic Genome. Is This Artificial Life? - The New York Times +https://www.nytimes.com/2019/05/15/science/synthetic-genome-bacteria.html|creationTime|2019-05-15T23:11:06Z +http://robohub.org/engineers-design-artificial-synapse-for-brain-on-a-chip-hardware/|creationDate|2018-01-23 +http://robohub.org/engineers-design-artificial-synapse-for-brain-on-a-chip-hardware/|tag|http://www.semanlink.net/tag/brains_in_silicon +http://robohub.org/engineers-design-artificial-synapse-for-brain-on-a-chip-hardware/|title|Engineers design artificial synapse for “brain-on-a-chip” hardware Robohub +http://robohub.org/engineers-design-artificial-synapse-for-brain-on-a-chip-hardware/|creationTime|2018-01-23T01:50:22Z +http://openstructs.org/|creationDate|2010-08-12 +http://openstructs.org/|tag|http://www.semanlink.net/tag/openstructs +http://openstructs.org/|title|OpenStructs Open source data structs and semantic frameworks +http://openstructs.org/|creationTime|2010-08-12T16:19:44Z +https://lejournal.cnrs.fr/articles/en-ethiopie-lhistoire-de-lalibela-se-revele-peu-a-peu|creationDate|2019-03-11 +https://lejournal.cnrs.fr/articles/en-ethiopie-lhistoire-de-lalibela-se-revele-peu-a-peu|tag|http://www.semanlink.net/tag/lalibela +https://lejournal.cnrs.fr/articles/en-ethiopie-lhistoire-de-lalibela-se-revele-peu-a-peu|title|En Éthiopie, l’histoire de Lalibela se révèle peu à peu CNRS Le journal +https://lejournal.cnrs.fr/articles/en-ethiopie-lhistoire-de-lalibela-se-revele-peu-a-peu|creationTime|2019-03-11T19:53:09Z +http://backstage.bbc.co.uk|creationDate|2005-05-16 +http://backstage.bbc.co.uk|tag|http://www.semanlink.net/tag/bbc +http://backstage.bbc.co.uk|tag|http://www.semanlink.net/tag/dev +http://backstage.bbc.co.uk|tag|http://www.semanlink.net/tag/rss +http://backstage.bbc.co.uk|comment|Build what you want using BBC content +http://backstage.bbc.co.uk|title|BBC Backstage :: Front Page :: BBC Backstage +http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse|creationDate|2012-09-06 +http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse|tag|http://www.semanlink.net/tag/m2eclipse +http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse|tag|http://www.semanlink.net/tag/maven +http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse|title|m2eclipse : Déclarez vos projets Maven dans Eclipse - Netapsys Blog +http://blog.netapsys.fr/index.php/post/2012/06/02/m2eclipse-%3A-Maven-dans-Eclipse|creationTime|2012-09-06T15:38:32Z +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|creationDate|2007-09-20 +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|tag|http://www.semanlink.net/tag/sahel +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|tag|http://www.semanlink.net/tag/plantation_d_arbres +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|comment|"Technique de plantation : Effectuer la plantation deuxième quinzaine de juillet afin que les arbres puissent survivre sans ou presque sans arrosage ultérieur. Il faut toutefois être très présent les premières semaines pour éviter l’assèchement au moment du repiquage. La croissance de l’arbre sera par ailleurs très influencée par les soins ultérieurs reçus par l’arbre (désherbage, paillage, arrosage, engrais). Faire un trou de 60 cm x 60 cm et 50 cm de profondeur. Mettre au fond la terre qui était au dessus et rajouter si possible du fumier. Couper la partie inférieure du pot inciser la partie latérale du pot. Insérer le pot dans le trou sans retirer le plastique. Mettre un peu de terre puis retirer le plastique. Mettre encore de la terre puis tasser fermement. Pour améliorer le taux de survie, on peut planter une dizaine de tiges de mil séchées autour du plant, ou une gaine IRRIGASC. On peut également couper un partie des feuilles pour limiter l’évapotranspiration et ne garder que la partie supérieure. Dans le même objectif, recouvrir le sol d’herbes ou de tiges de mil a pied de l’arbre (paillage). Une famille peut au maximum s’occuper de 100 arbres, étant donné qu’elle doit dans le même temps être dans les champs de mil. Si les arbres sont espacés de 10 m, la superficie forestière est donc de 100m x 100 m = 1 Hectare pour 100 arbres. +" +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|title|Reforestation en Pays dogon +http://via-sahel-toulouse.over-blog.com/article-967205-6.html|creationTime|2007-09-20T22:08:15Z +http://readwrite.com/2013/11/27/github-pages-explained#awesm=~oowPZQBq7kbIj5|creationDate|2013-11-28 +http://readwrite.com/2013/11/27/github-pages-explained#awesm=~oowPZQBq7kbIj5|tag|http://www.semanlink.net/tag/github_pages +http://readwrite.com/2013/11/27/github-pages-explained#awesm=~oowPZQBq7kbIj5|title|How To Use GitHub Pages To Make Web Sites While Learning Code – ReadWrite +http://readwrite.com/2013/11/27/github-pages-explained#awesm=~oowPZQBq7kbIj5|creationTime|2013-11-28T23:38:39Z +https://www.stardog.com/blog/extending-nlp/|creationDate|2018-06-14 +https://www.stardog.com/blog/extending-nlp/|tag|http://www.semanlink.net/tag/stardog +https://www.stardog.com/blog/extending-nlp/|comment|How to extend Stardog’s NLP pipeline +https://www.stardog.com/blog/extending-nlp/|title|Extending NLP - Stardog +https://www.stardog.com/blog/extending-nlp/|creationTime|2018-06-14T13:21:28Z +http://www.apple.com/customer-letter/|creationDate|2016-02-18 +http://www.apple.com/customer-letter/|tag|http://www.semanlink.net/tag/fbi_v_apple +http://www.apple.com/customer-letter/|title|A Message to Our Customers - Apple +http://www.apple.com/customer-letter/|creationTime|2016-02-18T00:03:35Z +https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-04-01 +https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/evolution +https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|comment|the evolutionary trajectory of life is no longer considered to be as sensitive to initial conditions as it was in the 1990s +https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Imagine Living in a Parallel World CNRS News +https://news.cnrs.fr/opinions/imagine-living-in-a-parallel-world?utm_content=bufferbcc95&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-04-01T00:14:38Z +http://www.protopage.com|creationDate|2005-10-05 +http://www.protopage.com|tag|http://www.semanlink.net/tag/social_content_services +http://www.protopage.com|tag|http://www.semanlink.net/tag/ajax +http://www.protopage.com|title|Protopage - AJAX personal bookmarks springboard +https://github.com/epfml/sent2vec|creationDate|2019-03-25 +https://github.com/epfml/sent2vec|tag|http://www.semanlink.net/tag/sent2vec +https://github.com/epfml/sent2vec|comment|> **Think of it as an unsupervised version of FastText, and an extension of word2vec (CBOW) to sentences.** The method uses a simple but efficient unsupervised objective to train distributed representations of sentences. +https://github.com/epfml/sent2vec|title|epfml/sent2vec: General purpose unsupervised sentence representations +https://github.com/epfml/sent2vec|creationTime|2019-03-25T15:35:00Z +http://www.itworld.com/security/177163/30-ridiculously-dumb-tech-warning-labels|creationDate|2011-07-01 +http://www.itworld.com/security/177163/30-ridiculously-dumb-tech-warning-labels|tag|http://www.semanlink.net/tag/rigolo +http://www.itworld.com/security/177163/30-ridiculously-dumb-tech-warning-labels|title|30 ridiculously dumb tech warning labels ITworld +http://www.itworld.com/security/177163/30-ridiculously-dumb-tech-warning-labels|creationTime|2011-07-01T14:56:22Z +http://itunes.unice.fr/|creationDate|2009-03-14 +http://itunes.unice.fr/|tag|http://www.semanlink.net/tag/universite +http://itunes.unice.fr/|tag|http://www.semanlink.net/tag/itunes +http://itunes.unice.fr/|tag|http://www.semanlink.net/tag/online_course_materials +http://itunes.unice.fr/|title|UNS sur iTunes U +http://itunes.unice.fr/|creationTime|2009-03-14T15:45:18Z +http://code.google.com/p/linked-data-api/wiki/JSONFormats|creationDate|2012-02-21 +http://code.google.com/p/linked-data-api/wiki/JSONFormats|tag|http://www.semanlink.net/tag/rdf_in_json +http://code.google.com/p/linked-data-api/wiki/JSONFormats|tag|http://www.semanlink.net/tag/linked_data_api +http://code.google.com/p/linked-data-api/wiki/JSONFormats|title|Survey - Existing JSON RDF formats +http://code.google.com/p/linked-data-api/wiki/JSONFormats|creationTime|2012-02-21T10:05:46Z +http://json-ld.org/|creationDate|2010-12-03 +http://json-ld.org/|tag|http://www.semanlink.net/tag/json_ld +http://json-ld.org/|title|JSON-LD - Expressing Linked Data in JSON +http://json-ld.org/|creationTime|2010-12-03T12:04:49Z +http://www.wdl.org/en/|creationDate|2009-04-22 +http://www.wdl.org/en/|tag|http://www.semanlink.net/tag/unesco +http://www.wdl.org/en/|tag|http://www.semanlink.net/tag/bibliotheque_numerique +http://www.wdl.org/en/|title|World Digital Library Home +http://www.wdl.org/en/|creationTime|2009-04-22T23:14:24Z +http://sig.ma/|creationDate|2009-08-27 +http://sig.ma/|tag|http://www.semanlink.net/tag/sig_ma +http://sig.ma/|title|sig.ma - Semantic Information MAshup +http://sig.ma/|creationTime|2009-08-27T14:27:04Z +https://blog.openai.com/language-unsupervised/|creationDate|2018-06-12 +https://blog.openai.com/language-unsupervised/|tag|http://www.semanlink.net/tag/language_model +https://blog.openai.com/language-unsupervised/|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +https://blog.openai.com/language-unsupervised/|tag|http://www.semanlink.net/tag/unsupervised_deep_pre_training +https://blog.openai.com/language-unsupervised/|tag|http://www.semanlink.net/tag/openai_gpt +https://blog.openai.com/language-unsupervised/|tag|http://www.semanlink.net/tag/attention_is_all_you_need +https://blog.openai.com/language-unsupervised/|comment|"> can we develop one model, train it in an unsupervised way on a large amount of data, and then fine-tune the model to achieve good performance on many different tasks? Our results indicate that this approach works surprisingly well; the same core model can be fine-tuned for very different tasks with minimal adaptation. + +a scalable, task-agnostic system based on a combination of two existing ideas: transformers and unsupervised pre-training. + +unsupervised generative pre-training of language models followed by discriminative fine-tunning. +" +https://blog.openai.com/language-unsupervised/|title|Improving Language Understanding with Unsupervised Learning +https://blog.openai.com/language-unsupervised/|creationTime|2018-06-12T09:16:15Z +http://www.bladi.net/forum//archive/index.php?t-19078.html|creationDate|2006-10-07 +http://www.bladi.net/forum//archive/index.php?t-19078.html|tag|http://www.semanlink.net/tag/oum_kalsoum +http://www.bladi.net/forum//archive/index.php?t-19078.html|title|chansons de oum khaltoum [Archives] - Bladi.net +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|creationDate|2012-06-30 +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|tag|http://www.semanlink.net/tag/bioterrorisme +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|tag|http://www.semanlink.net/tag/recherche +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|tag|http://www.semanlink.net/tag/epidemie +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|tag|http://www.semanlink.net/tag/h5n1 +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|tag|http://www.semanlink.net/tag/publication_scientifique +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|title|Supervirus H5N1 : les secrets de fabrication du mutant dévoilés +http://www.lemonde.fr/sciences/article/2012/06/21/supervirus-h5n1-les-secrets-de-fabrication-du-mutant-devoiles_1722681_1650684.html|creationTime|2012-06-30T00:54:49Z +http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features|creationDate|2011-08-19 +http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features|tag|http://www.semanlink.net/tag/jersey +http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features|tag|http://www.semanlink.net/tag/restful_web_services +http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features|title|Overview of JAX-RS 1.0 Features - Jersey: RESTful Web services made easy - wikis.sun.com +http://wikis.sun.com/display/Jersey/Overview+of+JAX-RS+1.0+Features|creationTime|2011-08-19T11:14:42Z +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|creationDate|2006-05-05 +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|tag|http://www.semanlink.net/tag/bioinformatics +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|tag|http://www.semanlink.net/tag/ibm +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|tag|http://www.semanlink.net/tag/junk_dna +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|comment|"A mathematical analysis of the human genome suggests that so-called ""junk DNA"" might not be so useless after all." +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|title|BBC NEWS - Salvage prospect for 'junk' DNA +http://news.bbc.co.uk/1/hi/sci/tech/4940654.stm|source|BBC +http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/|creationDate|2013-06-30 +http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/|tag|http://www.semanlink.net/tag/bug_brother +http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/|tag|http://www.semanlink.net/tag/nsa +http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/|title|Pourquoi la NSA espionne aussi votre papa (#oupas) BUG BROTHER +http://bugbrother.blog.lemonde.fr/2013/06/30/pourquoi-la-nsa-espionne-aussi-votre-papa-oupas/|creationTime|2013-06-30T19:19:09Z +http://jpspan.sourceforge.net/wiki/doku.php?id=javascript:xmlhttprequest:snippets:request|creationDate|2005-05-20 +http://jpspan.sourceforge.net/wiki/doku.php?id=javascript:xmlhttprequest:snippets:request|tag|http://www.semanlink.net/tag/ajax +http://jpspan.sourceforge.net/wiki/doku.php?id=javascript:xmlhttprequest:snippets:request|tag|http://www.semanlink.net/tag/javascript +http://jpspan.sourceforge.net/wiki/doku.php?id=javascript:xmlhttprequest:snippets:request|title|javascript:xmlhttprequest:snippets:request [JPSPAN] +http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern|creationDate|2013-06-16 +http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern|tag|http://www.semanlink.net/tag/css +http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern|tag|http://www.semanlink.net/tag/rdfa +http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern|title|Is visually hidden RDFa an anti-pattern? - ANSWERS +http://answers.semanticweb.com/questions/10161/is-visually-hidden-rdfa-an-anti-pattern|creationTime|2013-06-16T11:13:25Z +https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/|creationDate|2019-04-05 +https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/|tag|http://www.semanlink.net/tag/generative_adversarial_network +https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/|tag|http://www.semanlink.net/tag/3d +https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/|title|HOLOGAN: UNSUPERVISED LEARNING OF 3D REPRESENTATIONS FROM NATURAL IMAGES +https://www.monkeyoverflow.com/#/hologan-unsupervised-learning-of-3d-representations-from-natural-images/|creationTime|2019-04-05T09:08:38Z +https://insis.cnrs.fr/fr/personne/thierry-poinsot|creationDate|2019-04-17 +https://insis.cnrs.fr/fr/personne/thierry-poinsot|tag|http://www.semanlink.net/tag/hydrogen +https://insis.cnrs.fr/fr/personne/thierry-poinsot|tag|http://www.semanlink.net/tag/energies_renouvelables +https://insis.cnrs.fr/fr/personne/thierry-poinsot|title|Utilisation de l’hydrogène comme moyen de stockage des énergies renouvelables - Thierry Poinsot INSIS +https://insis.cnrs.fr/fr/personne/thierry-poinsot|creationTime|2019-04-17T12:58:39Z +https://forums.docker.com/t/where-are-images-stored-on-mac-os-x/17165|creationDate|2017-04-04 +https://forums.docker.com/t/where-are-images-stored-on-mac-os-x/17165|tag|http://www.semanlink.net/tag/docker_mac +https://forums.docker.com/t/where-are-images-stored-on-mac-os-x/17165|title|Where are images stored on Mac OS X? - Docker for Mac - Docker Forums +https://forums.docker.com/t/where-are-images-stored-on-mac-os-x/17165|creationTime|2017-04-04T15:37:33Z +http://www.bbc.com/news/science-environment-25576718|creationDate|2014-12-28 +http://www.bbc.com/news/science-environment-25576718|tag|http://www.semanlink.net/tag/beijing_genomics_institute +http://www.bbc.com/news/science-environment-25576718|title|BBC News - China cloning on an 'industrial scale' +http://www.bbc.com/news/science-environment-25576718|creationTime|2014-12-28T10:36:11Z +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|creationDate|2008-01-25 +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|tag|http://www.semanlink.net/tag/to_see +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|tag|http://www.semanlink.net/tag/sparql +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|tag|http://www.semanlink.net/tag/wiki +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|comment|Grawiki is a SPARQL-based Data Wiki, a little bit inspired by freebase, less impressive, feature-rich, scalable and all that, but, well, OpenSource, SemWeb-enabled, and decentralized (each Grawiki installation can import selected graphs from other ones, back-POSTing is in the works). As it seems that I forgot to write-protect the instance mentioned above, you can play with it if you like. You'll most probably encounter bugs, the built-in inferencer is still at alpha stage, and editing of consolidated bnodes is quite tricky to implement. I'll tweak things in a day or two. With Grawiki, I think I finally have (the start of) a tool that could work nicely for ad-hoc RDF editing and aggregation (it can import RDF and certain microformats). Oh, and a personal URI, and a FOAF file. At last ;-) +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|title|Grawiki - A Wiki (and aggregator) for graph-shaped data - benjamin nowack's blog +http://bnode.org/blog/2008/01/22/grawiki-a-wiki-and-aggregator-for-graph-shaped-data|creationTime|2008-01-25T08:47:18Z +http://www.blacksmithinstitute.org/ten.php|creationDate|2006-10-28 +http://www.blacksmithinstitute.org/ten.php|tag|http://www.semanlink.net/tag/polluted_places +http://www.blacksmithinstitute.org/ten.php|title|The World’s Worst Polluted Places - Blacksmith Institute +http://www.seoskeptic.com/bing-mounts-a-personal-offensive-against-googles-knowledge-grap/|creationDate|2013-04-02 +http://www.seoskeptic.com/bing-mounts-a-personal-offensive-against-googles-knowledge-grap/|tag|http://www.semanlink.net/tag/satori +http://www.seoskeptic.com/bing-mounts-a-personal-offensive-against-googles-knowledge-grap/|title|Bing Mounts a Personal Offensive Against Google's Knowledge Graph +http://www.seoskeptic.com/bing-mounts-a-personal-offensive-against-googles-knowledge-grap/|creationTime|2013-04-02T14:03:35Z +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|creationDate|2008-07-16 +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|tag|http://www.semanlink.net/tag/open_source +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|tag|http://www.semanlink.net/tag/data_interchange_format +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|tag|http://www.semanlink.net/tag/google +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|title|Google Open Source Blog: Protocol Buffers: Google's Data Interchange Format +http://google-opensource.blogspot.com/2008/07/protocol-buffers-googles-data.html|creationTime|2008-07-16T12:59:33Z +http://www2012.wwwconference.org/program/accepted-papers/|creationDate|2012-04-18 +http://www2012.wwwconference.org/program/accepted-papers/|tag|http://www.semanlink.net/tag/www_2012 +http://www2012.wwwconference.org/program/accepted-papers/|title|Tracks & Accepted papers www2012 +http://www2012.wwwconference.org/program/accepted-papers/|creationTime|2012-04-18T17:05:53Z +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|creationDate|2010-01-12 +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|tag|http://www.semanlink.net/tag/paul_krugman +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|tag|http://www.semanlink.net/tag/obamacare +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|tag|http://www.semanlink.net/tag/us_vs_europe +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|comment|The real lesson from Europe is actually the opposite of what conservatives claim: Europe is an economic success, and that success shows that social democracy works. +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|title|Learning From Europe +http://www.nytimes.com/2010/01/11/opinion/11krugman.html?th&emc=th|creationTime|2010-01-12T14:49:31Z +https://github.com/apigee-labs/rapier|creationDate|2017-01-06 +https://github.com/apigee-labs/rapier|tag|http://www.semanlink.net/tag/apigee +https://github.com/apigee-labs/rapier|comment|"You specify an API with Rapier by specifying in YAML the entities and relationships of the data model that underlies the API, along with query paths traversing the relationships. The details of the API's HTTP messages are deduced from this specification using the standard patterns described in the HTTP specifications, plus a few conventions that we have added. Rapier thereby eliminates the need to repetitively document individual URLs and their methods, which vary only in the entities they accept and return or the queries they express. +
+Rapier takes a data-oriented approach to API design, which aligns with the model of the world-wide-web. If your mental model of an API is a network of HTTP resources identified and located using URLs, you should be comfortable with Rapier. If you think of a web API as a set of 'end-points' with 'parameters' (a traditional service-oriented or RPC model), the Rapier approach may not resonate with you. +
+Since the Rapier specification language is not yet widely known and adopted, we provide a tool that will generate an OpenAPI (formerly known as Swagger) document from a Rapier specification." +https://github.com/apigee-labs/rapier|title|Rapier: specification language created by Apigee +https://github.com/apigee-labs/rapier|creationTime|2017-01-06T13:21:50Z +https://web.stanford.edu/group/brainsinsilicon/|creationDate|2017-09-07 +https://web.stanford.edu/group/brainsinsilicon/|tag|http://www.semanlink.net/tag/stanford +https://web.stanford.edu/group/brainsinsilicon/|tag|http://www.semanlink.net/tag/brains_in_silicon +https://web.stanford.edu/group/brainsinsilicon/|comment|silicon chips that combine analog computation with digital communication, emulating the brain's unique mix of analog and digital techniques. +https://web.stanford.edu/group/brainsinsilicon/|title|Brains in Silicon +https://web.stanford.edu/group/brainsinsilicon/|creationTime|2017-09-07T13:04:04Z +http://www.henriverdier.com/|creationDate|2014-03-27 +http://www.henriverdier.com/|tag|http://www.semanlink.net/tag/henri_verdier +http://www.henriverdier.com/|title|Henri Verdier Blog +http://www.henriverdier.com/|creationTime|2014-03-27T22:42:49Z +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|creationDate|2017-07-18 +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|tag|http://www.semanlink.net/tag/latent_semantic_analysis +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|tag|http://www.semanlink.net/tag/frequently_cited_paper +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|comment|"LSI seminal article. Cité plus de 12000 fois + + +" +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|title|Indexing by Latent Semantic Analysis - Deerwester et al. (1990) +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf|creationTime|2017-07-18T15:46:17Z +http://www.internetactu.net/2015/10/12/crispr-la-technologie-qui-bouleverse-la-biotech/|creationDate|2016-06-24 +http://www.internetactu.net/2015/10/12/crispr-la-technologie-qui-bouleverse-la-biotech/|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.internetactu.net/2015/10/12/crispr-la-technologie-qui-bouleverse-la-biotech/|title|CRISPR, la technologie qui bouleverse la biotech « InternetActu.net +http://www.internetactu.net/2015/10/12/crispr-la-technologie-qui-bouleverse-la-biotech/|creationTime|2016-06-24T22:49:15Z +http://pipes.deri.org/|creationDate|2008-08-18 +http://pipes.deri.org/|tag|http://www.semanlink.net/tag/deri +http://pipes.deri.org/|tag|http://www.semanlink.net/tag/yahoo +http://pipes.deri.org/|tag|http://www.semanlink.net/tag/semantic_mashups +http://pipes.deri.org/|comment|Inspired by Yahoo's Pipes, DERI Web Data Pipes implement a generalization which can also deal with formats such as RDF (RDFa), Microformats and generic XML. +http://pipes.deri.org/|title|DERI Pipes +http://pipes.deri.org/|creationTime|2008-08-18T23:33:28Z +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|creationDate|2016-04-09 +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|tag|http://www.semanlink.net/tag/brinxmat +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|tag|http://www.semanlink.net/tag/about_rdf +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|comment|Now, as long as you have a fixed view of what you want to know (title, author, creation date,…), then RDF is largely pointless, please use whichever row/column-oriented tool you choose. As soon as you’re unsure about the structure of your data, use RDF. +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|title|RDF, it’s difficult, nasty, horrible and I hate it Brinxmat's blog +https://brinxmat.wordpress.com/2014/01/27/rdf-its-difficult-nasty-horrible-and-i-hate-it/|creationTime|2016-04-09T01:02:50Z +http://incubator.apache.org/clerezza/|creationDate|2010-12-16 +http://incubator.apache.org/clerezza/|tag|http://www.semanlink.net/tag/clerezza +http://incubator.apache.org/clerezza/|comment|ApachClerezza is an OSGi-based modular application and set of components (bundles) for building RESTFul Semantic Web applications and services. +http://incubator.apache.org/clerezza/|title|Welcome to Apache Clerezza +http://incubator.apache.org/clerezza/|creationTime|2010-12-16T23:46:23Z +http://makolab.com/fr/software/semantic/|creationDate|2011-02-16 +http://makolab.com/fr/software/semantic/|tag|http://www.semanlink.net/tag/makolab +http://makolab.com/fr/software/semantic/|title|Réseau des Significations +http://makolab.com/fr/software/semantic/|creationTime|2011-02-16T13:21:59Z +http://www.inf.unibz.it/~franconi/dl/course/dlhb/dlhb-01.pdf|creationDate|2007-07-09 +http://www.inf.unibz.it/~franconi/dl/course/dlhb/dlhb-01.pdf|tag|http://www.semanlink.net/tag/description_logic +http://www.inf.unibz.it/~franconi/dl/course/dlhb/dlhb-01.pdf|title|An introduction to Description Logics - Daniele Nardi, Ronald J. Brachman +http://www.inf.unibz.it/~franconi/dl/course/dlhb/dlhb-01.pdf|creationTime|2007-07-09T23:18:59Z +http://www.nytimes.com/2010/06/20/magazine/20Computer-t.html|creationDate|2010-06-23 +http://www.nytimes.com/2010/06/20/magazine/20Computer-t.html|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.nytimes.com/2010/06/20/magazine/20Computer-t.html|title|Smarter Than You Think - I.B.M.'s Supercomputer to Challenge 'Jeopardy!' Champions - NYTimes.com +http://www.nytimes.com/2010/06/20/magazine/20Computer-t.html|creationTime|2010-06-23T00:28:34Z +http://www.grokker.com|creationDate|2005-05-10 +http://www.grokker.com|tag|http://www.semanlink.net/tag/search_engines +http://www.grokker.com|title|Grokker - A New Way to Look at Search +http://scuttle.org/|creationDate|2006-05-28 +http://scuttle.org/|tag|http://www.semanlink.net/tag/tagging +http://scuttle.org/|title|Scuttle: Store, share and tag your favourite links +http://arethuse1.free.fr/|creationDate|2006-01-29 +http://arethuse1.free.fr/|tag|http://www.semanlink.net/tag/mesopotamie +http://arethuse1.free.fr/|title|Mésopotamie : un portail de l'Orient ancien +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|creationDate|2013-01-29 +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|tag|http://www.semanlink.net/tag/vso +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|tag|http://www.semanlink.net/tag/fps_post +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|tag|http://www.semanlink.net/tag/goodrelations +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|title|[goodrelations] vso:feature, dbPedia and productontology +http://ebusiness-unibw.org/pipermail/goodrelations/2013-January/000508.html|creationTime|2013-01-29T15:17:03Z +http://www.space.com/scienceastronomy/050509_blackhole_birth.html|creationDate|2005-05-10 +http://www.space.com/scienceastronomy/050509_blackhole_birth.html|tag|http://www.semanlink.net/tag/trou_noir +http://www.space.com/scienceastronomy/050509_blackhole_birth.html|title|Creation of Black Hole Detected +http://www.jgraph.com/index.html|creationDate|2005-09-26 +http://www.jgraph.com/index.html|tag|http://www.semanlink.net/tag/graph_visualization +http://www.jgraph.com/index.html|title|Java Graph Visualization and Layout +https://benlog.com/2016/02/18/on-apple-and-the-fbi/|creationDate|2016-02-19 +https://benlog.com/2016/02/18/on-apple-and-the-fbi/|tag|http://www.semanlink.net/tag/fbi_v_apple +https://benlog.com/2016/02/18/on-apple-and-the-fbi/|title|On Apple and the FBI Benlog +https://benlog.com/2016/02/18/on-apple-and-the-fbi/|creationTime|2016-02-19T13:53:22Z +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|creationDate|2018-06-29 +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|tag|http://www.semanlink.net/tag/pandas +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|tag|http://www.semanlink.net/tag/python_4_data_science +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|tag|http://www.semanlink.net/tag/numpy +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|tag|http://www.semanlink.net/tag/matplotlib +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|title|Python Data Science Handbook (Jake VanderPlas) +https://jakevdp.github.io/PythonDataScienceHandbook/index.html|creationTime|2018-06-29T10:50:30Z +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|creationDate|2017-07-11 +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|tag|http://www.semanlink.net/tag/clustering_of_text_documents +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|tag|http://www.semanlink.net/tag/singular_value_decomposition +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|tag|http://www.semanlink.net/tag/lingo +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|comment|lingo: algorithm for clustering search results, which emphasizes cluster description quality. Implemented in Carrot2. +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|title|Lingo: Search Results Clustering Algorithm Based on Singular Value Decomposition (2004) (paper) +https://www.researchgate.net/publication/221501367_Lingo_Search_Results_Clustering_Algorithm_Based_on_Singular_Value_Decomposition|creationTime|2017-07-11T16:58:42Z +https://www.france-universite-numerique-mooc.fr/|creationDate|2015-03-03 +https://www.france-universite-numerique-mooc.fr/|tag|http://www.semanlink.net/tag/mooc +https://www.france-universite-numerique-mooc.fr/|title|FUN - France Université Numérique +https://www.france-universite-numerique-mooc.fr/|creationTime|2015-03-03T23:00:07Z +http://www.infoworld.com/d/open-source-software/jeff-hawkins-where-open-source-and-machine-learning-meet-big-data-224069|creationDate|2013-09-10 +http://www.infoworld.com/d/open-source-software/jeff-hawkins-where-open-source-and-machine-learning-meet-big-data-224069|tag|http://www.semanlink.net/tag/jeff_hawkins +http://www.infoworld.com/d/open-source-software/jeff-hawkins-where-open-source-and-machine-learning-meet-big-data-224069|title|Jeff Hawkins: Where open source and machine learning meet big data Open Source Software - InfoWorld +http://www.infoworld.com/d/open-source-software/jeff-hawkins-where-open-source-and-machine-learning-meet-big-data-224069|creationTime|2013-09-10T02:02:16Z +http://www.mturk.com/mturk/welcome|creationDate|2005-11-04 +http://www.mturk.com/mturk/welcome|tag|http://www.semanlink.net/tag/amazon_mechanical_turk +http://www.mturk.com/mturk/welcome|title|Amazon Mechanical Turk - Welcome +http://tagcentral.net/index.php?tag=rdf&submit=Get+Tag|creationDate|2005-04-30 +http://tagcentral.net/index.php?tag=rdf&submit=Get+Tag|tag|http://www.semanlink.net/tag/tagging +http://architects.dzone.com/articles/solr-hadoop-big-data-love|creationDate|2013-03-20 +http://architects.dzone.com/articles/solr-hadoop-big-data-love|tag|http://www.semanlink.net/tag/hadoop +http://architects.dzone.com/articles/solr-hadoop-big-data-love|tag|http://www.semanlink.net/tag/solr +http://architects.dzone.com/articles/solr-hadoop-big-data-love|title|Solr + Hadoop = Big Data Love Architects Zone +http://architects.dzone.com/articles/solr-hadoop-big-data-love|creationTime|2013-03-20T00:20:15Z +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|creationDate|2014-12-18 +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|tag|http://www.semanlink.net/tag/json_ld +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|tag|http://www.semanlink.net/tag/tutorial +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|tag|http://www.semanlink.net/tag/hydra +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|tag|http://www.semanlink.net/tag/markus_lanthaler +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|title|How to Build Linked Data APIs with JSON LD and Hydra - YouTube +https://www.youtube.com/watch?v=fJCtaNRxg9M&feature=youtu.be|creationTime|2014-12-18T22:24:23Z +http://planb.nicecupoftea.org/archives/001292.html|creationDate|2005-04-21 +http://planb.nicecupoftea.org/archives/001292.html|tag|http://www.semanlink.net/tag/rdf +http://planb.nicecupoftea.org/archives/001292.html|tag|http://www.semanlink.net/tag/iphoto +http://planb.nicecupoftea.org/archives/001292.html|title|sparqling days / iphoto rdf export +http://www.opendatasoft.com/fr/|creationDate|2013-09-29 +http://www.opendatasoft.com/fr/|tag|http://www.semanlink.net/tag/open_data +http://www.opendatasoft.com/fr/|title|OpenDataSoft Plateforme clef-en-main pour l'open data +http://www.opendatasoft.com/fr/|creationTime|2013-09-29T17:30:11Z +http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet|creationDate|2010-08-20 +http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet|tag|http://www.semanlink.net/tag/rdf_context +http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet|title|Quel événement !? Ou comment contextualiser le triplet Les petites cases +http://www.lespetitescases.net/quel-evenement-ou-comment-contextualiser-le-triplet|creationTime|2010-08-20T12:24:44Z +http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century|creationDate|2014-06-24 +http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century|tag|http://www.semanlink.net/tag/france +http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century|title|France Is Not Impressed with Thomas Piketty +http://www.foreignpolicy.com/articles/2014/04/28/france_thomas_piketty_capital_in_the_twenty_first_century|creationTime|2014-06-24T23:05:44Z +http://music.blog.lemonde.fr/|creationDate|2010-06-07 +http://music.blog.lemonde.fr/|tag|http://www.semanlink.net/tag/musique +http://music.blog.lemonde.fr/|title|A la recherche des sons perdus - Blog LeMonde.fr +http://music.blog.lemonde.fr/|creationTime|2010-06-07T13:51:30Z +http://www.nuxeo.com/fr|creationDate|2012-05-02 +http://www.nuxeo.com/fr|tag|http://www.semanlink.net/tag/semweb_pro_2012 +http://www.nuxeo.com/fr|tag|http://www.semanlink.net/tag/ged +http://www.nuxeo.com/fr|tag|http://www.semanlink.net/tag/open_source +http://www.nuxeo.com/fr|tag|http://www.semanlink.net/tag/apache_stanbol +http://www.nuxeo.com/fr|tag|http://www.semanlink.net/tag/nuxeo +http://www.nuxeo.com/fr|title|Gestion Documentaire (GED) Open Source par Nuxeo +http://www.nuxeo.com/fr|creationTime|2012-05-02T15:59:11Z +http://dl.acm.org/citation.cfm?id=1498283|creationDate|2012-03-05 +http://dl.acm.org/citation.cfm?id=1498283|tag|http://www.semanlink.net/tag/configuration_and_sw +http://dl.acm.org/citation.cfm?id=1498283|title|Product configuration knowledge modeling using ontology web language +http://dl.acm.org/citation.cfm?id=1498283|creationTime|2012-03-05T22:30:59Z +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|creationDate|2011-01-25 +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|tag|http://www.semanlink.net/tag/usa +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|tag|http://www.semanlink.net/tag/innovation +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|tag|http://www.semanlink.net/tag/science +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|tag|http://www.semanlink.net/tag/enseignement_scientifique +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|comment|Scientists and engineers are celebrities in most countries. They’re not seen as geeks or misfits, as they too often are in the U.S., but rather as society’s leaders and innovators. In China, eight of the top nine political posts are held by engineers. In the U.S., almost no engineers or scientists are engaged in high-level politics, and there is a virtual absence of engineers in our public policy debates. +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|title|Danger: America Is Losing Its Edge In Innovation - Forbes +http://blogs.forbes.com/ciocentral/2011/01/20/danger-america-is-losing-its-edge-in-innovation/|creationTime|2011-01-25T10:54:09Z +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|creationDate|2008-08-27 +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|tag|http://www.semanlink.net/tag/sparql_construct +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|tag|http://www.semanlink.net/tag/skos +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|tag|http://www.semanlink.net/tag/bernard_vatant +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|title|Un moteur de transformation RDF basé sur SPARQL (2) « Leçons de Choses +http://mondeca.wordpress.com/2008/01/19/un-moteur-de-transformation-rdf-base-sur-sparql-2/|creationTime|2008-08-27T17:53:28Z +http://dannyayers.com/2007/12/30/another-rdf-syntax|creationDate|2007-12-31 +http://dannyayers.com/2007/12/30/another-rdf-syntax|tag|http://www.semanlink.net/tag/rdf_net_api +http://dannyayers.com/2007/12/30/another-rdf-syntax|tag|http://www.semanlink.net/tag/danny_ayers +http://dannyayers.com/2007/12/30/another-rdf-syntax|tag|http://www.semanlink.net/tag/identifying_triples +http://dannyayers.com/2007/12/30/another-rdf-syntax|comment|"Let a URI constructed like this: URIbase/ars?s=encodedURIs&p=encodedURIp&o=encodedURIo identify the statement: URIs URIp URIo +" +http://dannyayers.com/2007/12/30/another-rdf-syntax|title|Another RDF Syntax! (URI-embedded RDF) +http://dannyayers.com/2007/12/30/another-rdf-syntax|creationTime|2007-12-31T16:25:18Z +http://www.altova.com/products_semanticworks.html|creationDate|2005-10-06 +http://www.altova.com/products_semanticworks.html|tag|http://www.semanlink.net/tag/rdf_editor +http://www.altova.com/products_semanticworks.html|tag|http://www.semanlink.net/tag/owl_editor +http://www.altova.com/products_semanticworks.html|comment|RDF/OWL editor from the creators of XMLSpy +http://www.altova.com/products_semanticworks.html|title|Altova SemanticWorks +https://hub.docker.com/_/python/|creationDate|2018-03-26 +https://hub.docker.com/_/python/|tag|http://www.semanlink.net/tag/docker_python +https://hub.docker.com/_/python/|title|library/python - Docker Hub +https://hub.docker.com/_/python/|creationTime|2018-03-26T08:33:35Z +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|creationDate|2010-05-24 +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|tag|http://www.semanlink.net/tag/shanghai_expo_2010 +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|tag|http://www.semanlink.net/tag/robotique +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|tag|http://www.semanlink.net/tag/toyota +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|title|Toyota’s Robot Violinist Wows Crowd At Shanghai Expo 2010 (Video) Singularity Hub +http://singularityhub.com/2010/05/23/toyotas-robot-violinist-wows-crowd-at-shanghai-expo-2010-video/|creationTime|2010-05-24T22:28:29Z +https://issues.apache.org/jira/browse/JENA-203|creationDate|2012-02-02 +https://issues.apache.org/jira/browse/JENA-203|tag|http://www.semanlink.net/tag/jena +https://issues.apache.org/jira/browse/JENA-203|tag|http://www.semanlink.net/tag/henry_story +https://issues.apache.org/jira/browse/JENA-203|title|[#JENA-203] support for Non Blocking Parsers - ASF JIRA +https://issues.apache.org/jira/browse/JENA-203|creationTime|2012-02-02T21:17:59Z +http://developer.apple.com/internet/|creationDate|2005-05-14 +http://developer.apple.com/internet/|tag|http://www.semanlink.net/tag/apple_developer_connection +http://developer.apple.com/internet/|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://developer.apple.com/internet/|title|Apple Developer Connection - Internet & Web +http://codahale.com/what-makes-jersey-interesting-parameter-classes/|creationDate|2012-01-19 +http://codahale.com/what-makes-jersey-interesting-parameter-classes/|tag|http://www.semanlink.net/tag/jersey +http://codahale.com/what-makes-jersey-interesting-parameter-classes/|tag|http://www.semanlink.net/tag/tutorial +http://codahale.com/what-makes-jersey-interesting-parameter-classes/|title|What Makes Jersey Interesting: Parameter Classes codahale.com +http://codahale.com/what-makes-jersey-interesting-parameter-classes/|creationTime|2012-01-19T11:01:08Z +http://websearch.alexa.com/static.html?show=webtour/start|creationDate|2005-12-17 +http://websearch.alexa.com/static.html?show=webtour/start|tag|http://www.semanlink.net/tag/amazon_alexa +http://websearch.alexa.com/static.html?show=webtour/start|title|Building your own search engine with Alexa +http://carlchenet.com/2016/01/22/le-danger-github/|creationDate|2016-01-23 +http://carlchenet.com/2016/01/22/le-danger-github/|tag|http://www.semanlink.net/tag/github +http://carlchenet.com/2016/01/22/le-danger-github/|tag|http://www.semanlink.net/tag/blog +http://carlchenet.com/2016/01/22/le-danger-github/|title|Le danger Github – Le blog de Carl Chenet +http://carlchenet.com/2016/01/22/le-danger-github/|creationTime|2016-01-23T21:43:58Z +http://bratton.com/?p=5|creationDate|2007-04-25 +http://bratton.com/?p=5|tag|http://www.semanlink.net/tag/mac_os_x +http://bratton.com/?p=5|tag|http://www.semanlink.net/tag/cvs +http://bratton.com/?p=5|title|The Bratton Blog » Setting up a CVS server on Mac OSX in 4 steps and 4 minutes +http://bratton.com/?p=5|creationTime|2007-04-25T22:04:37Z +http://fr.wikipedia.org/wiki/L'Usage_du_monde|creationDate|2008-01-23 +http://fr.wikipedia.org/wiki/L'Usage_du_monde|tag|http://www.semanlink.net/tag/livre_a_lire +http://fr.wikipedia.org/wiki/L'Usage_du_monde|tag|http://www.semanlink.net/tag/recit_de_voyage +http://fr.wikipedia.org/wiki/L'Usage_du_monde|title|L'Usage du monde - Nicolas Bouvier +http://fr.wikipedia.org/wiki/L'Usage_du_monde|creationTime|2008-01-23T23:06:44Z +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|creationDate|2007-05-31 +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|tag|http://www.semanlink.net/tag/rdfa +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|tag|http://www.semanlink.net/tag/grddl +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|tag|http://www.semanlink.net/tag/jena +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|tag|http://www.semanlink.net/tag/jeremy_carroll +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|title|RDFa support in Jena GRDDL Reader (SVN) +http://lists.w3.org/Archives/Public/public-rdf-in-xhtml-tf/2007May/0050.html|creationTime|2007-05-31T01:03:08Z +https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50|creationDate|2018-11-01 +https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50|tag|http://www.semanlink.net/tag/voice_ai +https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50|tag|http://www.semanlink.net/tag/facebook +https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50|title|How Facebook Failed To Build A Better Alexa (Or Siri) +https://www.forbes.com/sites/parmyolson/2018/10/30/on-mute-how-facebook-fell-behind-on-voice-technology/#22baabcf3b50|creationTime|2018-11-01T21:19:41Z +http://users.jena.apache.narkive.com/NF0pn3kq/controlling-json-ld-output|creationDate|2016-04-09 +http://users.jena.apache.narkive.com/NF0pn3kq/controlling-json-ld-output|tag|http://www.semanlink.net/tag/jsonld_jena +http://users.jena.apache.narkive.com/NF0pn3kq/controlling-json-ld-output|title|Jena: controlling JSON-LD output +http://users.jena.apache.narkive.com/NF0pn3kq/controlling-json-ld-output|creationTime|2016-04-09T11:46:38Z +http://clesnes.blog.lemonde.fr/2013/06/10/le-defi-dedward-snowden-a-barack-obama/|creationDate|2013-06-13 +http://clesnes.blog.lemonde.fr/2013/06/10/le-defi-dedward-snowden-a-barack-obama/|tag|http://www.semanlink.net/tag/edward_snowden +http://clesnes.blog.lemonde.fr/2013/06/10/le-defi-dedward-snowden-a-barack-obama/|title|Le défi d’Edward Snowden à Barack Obama Big Picture +http://clesnes.blog.lemonde.fr/2013/06/10/le-defi-dedward-snowden-a-barack-obama/|creationTime|2013-06-13T13:19:14Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html|creationDate|2006-05-27 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html|tag|http://www.semanlink.net/tag/innovation +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html|title|Quand les ingénieurs en herbe se projettent en 2020 +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html|source|Le Monde +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-776783,0.html|date|2006-05-28 +http://www.w3.org/2005/ajar/tab|creationDate|2007-01-06 +http://www.w3.org/2005/ajar/tab|tag|http://www.semanlink.net/tag/tabulator +http://www.w3.org/2005/ajar/tab|title|Tabulator: Generic data browser +http://theswitchboard.ca|creationDate|2005-03-07 +http://theswitchboard.ca|tag|http://www.semanlink.net/tag/voip +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-369/|creationDate|2010-09-16 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-369/|tag|http://www.semanlink.net/tag/ldow2008 +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-369/|title|Proceedings of the WWW2008 Workshop on Linked Data on the Web +http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-369/|creationTime|2010-09-16T22:26:22Z +http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d|creationDate|2016-01-31 +http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d|tag|http://www.semanlink.net/tag/economie +http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d|comment|"In an effort to offset declining performance and profits due to increased competition, these companies embraced the notion that the very purpose of a corporation is to maximize shareholder value as reflected in the current stock price.
+see also" +http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d|title|Why 'The System' Is Rigged And The U.S. Electorate Is Angry - Forbes +http://www.forbes.com/sites/stevedenning/2016/01/23/why-the-system-is-rigged/#7b1345d8223d|creationTime|2016-01-31T13:15:03Z +http://readwrite.com/2016/02/17/apple-wont-build-backdoor?utm_source=feedly&utm_medium=webfeeds|creationDate|2016-02-17 +http://readwrite.com/2016/02/17/apple-wont-build-backdoor?utm_source=feedly&utm_medium=webfeeds|tag|http://www.semanlink.net/tag/fbi_v_apple +http://readwrite.com/2016/02/17/apple-wont-build-backdoor?utm_source=feedly&utm_medium=webfeeds|title|Why Apple Is Fighting Back - ReadWrite +http://readwrite.com/2016/02/17/apple-wont-build-backdoor?utm_source=feedly&utm_medium=webfeeds|creationTime|2016-02-17T23:44:31Z +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|creationDate|2013-01-04 +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|tag|http://www.semanlink.net/tag/uri +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|tag|http://www.semanlink.net/tag/synonym_uris +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|title|canonicURI property (public-lod@w3.org from January 2013) +http://lists.w3.org/Archives/Public/public-lod/2013Jan/0013.html|creationTime|2013-01-04T14:00:30Z +https://lilianweng.github.io/lil-log/|creationDate|2018-11-06 +https://lilianweng.github.io/lil-log/|tag|http://www.semanlink.net/tag/nlp +https://lilianweng.github.io/lil-log/|tag|http://www.semanlink.net/tag/blog +https://lilianweng.github.io/lil-log/|tag|http://www.semanlink.net/tag/lilian_weng +https://lilianweng.github.io/lil-log/|title|Lil'Log +https://lilianweng.github.io/lil-log/|creationTime|2018-11-06T19:50:46Z +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424|creationDate|2008-09-02 +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424|tag|http://www.semanlink.net/tag/virtuoso +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424|tag|http://www.semanlink.net/tag/kingsley_idehen +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424|title|Crunchbase & Semantic Web Interview (Remix - Update 1) +http://www.openlinksw.com/dataspace/kidehen@openlinksw.com/weblog/kidehen@openlinksw.com's%20BLOG%20%5B127%5D/1424|creationTime|2008-09-02T22:17:45Z +http://www.ccs.neu.edu/home/kenb/csg112/synchronize.html|creationDate|2005-10-29 +http://www.ccs.neu.edu/home/kenb/csg112/synchronize.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www.ccs.neu.edu/home/kenb/csg112/synchronize.html|tag|http://www.semanlink.net/tag/tutorial +http://www.ccs.neu.edu/home/kenb/csg112/synchronize.html|title|Java Synchronization Tutorial +http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)|creationDate|2007-12-27 +http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)|tag|http://www.semanlink.net/tag/louis_jouvet +http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)|tag|http://www.semanlink.net/tag/film_francais +http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)|title|La Kermesse héroïque - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Carnival_in_Flanders_(film)|creationTime|2007-12-27T22:18:00Z +http://www.nltk.org/book/ch02.html|creationDate|2017-06-20 +http://www.nltk.org/book/ch02.html|tag|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.nltk.org/book/ch02.html|tag|http://www.semanlink.net/tag/nltk +http://www.nltk.org/book/ch02.html|title|Accessing Text Corpora and Lexical Resources +http://www.nltk.org/book/ch02.html|creationTime|2017-06-20T13:35:50Z +http://www.liip.ch/fr|creationDate|2012-06-13 +http://www.liip.ch/fr|tag|http://www.semanlink.net/tag/semantic_cms +http://www.liip.ch/fr|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.liip.ch/fr|title|Liip AG +http://www.liip.ch/fr|creationTime|2012-06-13T11:38:31Z +https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng|creationDate|2018-03-05 +https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng|tag|http://www.semanlink.net/tag/slideshare +https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng|tag|http://www.semanlink.net/tag/coursera_deep_learning +https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng|title|Notes from Coursera Deep Learning courses by Andrew Ng +https://www.slideshare.net/TessFerrandez/notes-from-coursera-deep-learning-courses-by-andrew-ng|creationTime|2018-03-05T18:34:13Z +https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html|creationDate|2018-09-21 +https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html|tag|http://www.semanlink.net/tag/twitter +https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html|tag|http://www.semanlink.net/tag/embeddings +https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html|title|Embeddings@Twitter +https://blog.twitter.com/engineering/en_us/topics/insights/2018/embeddingsattwitter.html|creationTime|2018-09-21T13:40:35Z +http://stackoverflow.com/questions/5692256/maven-best-way-of-linking-custom-external-jar-to-my-project|creationDate|2012-08-18 +http://stackoverflow.com/questions/5692256/maven-best-way-of-linking-custom-external-jar-to-my-project|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/5692256/maven-best-way-of-linking-custom-external-jar-to-my-project|title|java - Maven: best way of linking custom external JAR to my project? - Stack Overflow +http://stackoverflow.com/questions/5692256/maven-best-way-of-linking-custom-external-jar-to-my-project|creationTime|2012-08-18T14:56:54Z +http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/|creationDate|2009-04-03 +http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/|tag|http://www.semanlink.net/tag/art_d_afrique +http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/|tag|http://www.semanlink.net/tag/anticolonialisme +http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/|title|Les statues africaines n’en finissent pas de mourir - africamix - Blog LeMonde.fr +http://africamix.blog.lemonde.fr/2009/03/28/les-statues-africaines-nen-finissent-pas-de-mourir/|creationTime|2009-04-03T01:28:22Z +http://africamix.blog.lemonde.fr/category/genocide/|creationDate|2009-01-19 +http://africamix.blog.lemonde.fr/category/genocide/|tag|http://www.semanlink.net/tag/genocide_rwandais +http://africamix.blog.lemonde.fr/category/genocide/|tag|http://www.semanlink.net/tag/responsabilite_de_la_france +http://africamix.blog.lemonde.fr/category/genocide/|title|Génocide - africamix - Blog LeMonde.fr +http://africamix.blog.lemonde.fr/category/genocide/|creationTime|2009-01-19T00:35:15Z +https://github.com/jsonld-java/jsonld-java/issues/101|creationDate|2016-04-09 +https://github.com/jsonld-java/jsonld-java/issues/101|tag|http://www.semanlink.net/tag/jsonld_jena +https://github.com/jsonld-java/jsonld-java/issues/101|title|Serialization of JSON-LD using appropriate context · Issue #101 · jsonld-java/jsonld-java +https://github.com/jsonld-java/jsonld-java/issues/101|creationTime|2016-04-09T12:19:49Z +http://arstechnica.com/tech-policy/news/2009/03/french-anti-p2p-law-toughest-in-the-world.ars|creationDate|2009-05-11 +http://arstechnica.com/tech-policy/news/2009/03/french-anti-p2p-law-toughest-in-the-world.ars|tag|http://www.semanlink.net/tag/hadopi_riposte_graduee +http://arstechnica.com/tech-policy/news/2009/03/french-anti-p2p-law-toughest-in-the-world.ars|title|French anti-P2P law toughest in the world +http://arstechnica.com/tech-policy/news/2009/03/french-anti-p2p-law-toughest-in-the-world.ars|creationTime|2009-05-11T23:05:38Z +http://particletree.com/features/the-importance-of-rss|creationDate|2005-06-17 +http://particletree.com/features/the-importance-of-rss|tag|http://www.semanlink.net/tag/rss +http://particletree.com/features/the-importance-of-rss|tag|http://www.semanlink.net/tag/google +http://particletree.com/features/the-importance-of-rss|title|particletree · The Importance of RSS +http://jena.hpl.hp.com/wiki/SDB/Store_Description|creationDate|2008-11-17 +http://jena.hpl.hp.com/wiki/SDB/Store_Description|tag|http://www.semanlink.net/tag/sdb_a_sparql_database_for_jena +http://jena.hpl.hp.com/wiki/SDB/Store_Description|tag|http://www.semanlink.net/tag/jena_assembler +http://jena.hpl.hp.com/wiki/SDB/Store_Description|title|SDB/Store Description - Jena wiki +http://jena.hpl.hp.com/wiki/SDB/Store_Description|creationTime|2008-11-17T00:51:22Z +http://www.semweb.pro/file/2285?vid=download|creationDate|2011-01-21 +http://www.semweb.pro/file/2285?vid=download|tag|http://www.semanlink.net/tag/semweb_pro_2011 +http://www.semweb.pro/file/2285?vid=download|title|SemWeb.Pro 2011 : dossier de presse +http://www.semweb.pro/file/2285?vid=download|creationTime|2011-01-21T23:46:04Z +http://linkeddata.jiscpress.org/|creationDate|2010-03-02 +http://linkeddata.jiscpress.org/|tag|http://www.semanlink.net/tag/linked_data +http://linkeddata.jiscpress.org/|tag|http://www.semanlink.net/tag/paul_miller +http://linkeddata.jiscpress.org/|comment|This Linked Data Horizon Scan was commissioned from Paul Miller of the Cloud of Data by the Joint Information Systems Committee (JISC). The work was intended to provide an overview of current developments with respect to Linked Data, and to make a series of recommendations to JISC and the wider community +http://linkeddata.jiscpress.org/|title|Linked Data Horizon Scan +http://linkeddata.jiscpress.org/|creationTime|2010-03-02T10:38:57Z +http://lab.arc90.com/2006/07/link_thumbnail.php#examples|creationDate|2006-10-11 +http://lab.arc90.com/2006/07/link_thumbnail.php#examples|tag|http://www.semanlink.net/tag/thumbnails +http://lab.arc90.com/2006/07/link_thumbnail.php#examples|tag|http://www.semanlink.net/tag/tools +http://lab.arc90.com/2006/07/link_thumbnail.php#examples|tag|http://www.semanlink.net/tag/javascript +http://lab.arc90.com/2006/07/link_thumbnail.php#examples|title|arc90 lab : tools : Link Thumbnail +http://robobees.info/|creationDate|2014-04-29 +http://robobees.info/|tag|http://www.semanlink.net/tag/greenpeace +http://robobees.info/|tag|http://www.semanlink.net/tag/abeille +http://robobees.info/|tag|http://www.semanlink.net/tag/robobees +http://robobees.info/|title|Robobees +http://robobees.info/|creationTime|2014-04-29T01:29:55Z +https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html|creationDate|2018-05-31 +https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html|tag|http://www.semanlink.net/tag/google_colab +https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html|title|Deep Learning Development with Google Colab, TensorFlow, Keras & PyTorch +https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html|creationTime|2018-05-31T08:30:22Z +http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/|creationDate|2010-06-04 +http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/|tag|http://www.semanlink.net/tag/rdf +http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/|tag|http://www.semanlink.net/tag/rdf_in_json +http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/|title|From JSON to RDF in Six Easy Steps with JRON +http://decentralyze.com/2010/06/04/from-json-to-rdf-in-six-easy-steps-with-jron/|creationTime|2010-06-04T11:54:33Z +http://purl.org/configurationontology|creationDate|2012-02-07 +http://purl.org/configurationontology|tag|http://www.semanlink.net/tag/configuration_ontology +http://purl.org/configurationontology|title|Configuration ontology +http://purl.org/configurationontology|creationTime|2012-02-07T14:17:40Z +http://news.bbc.co.uk/2/hi/technology/6291746.stm|creationDate|2007-07-13 +http://news.bbc.co.uk/2/hi/technology/6291746.stm|tag|http://www.semanlink.net/tag/robotique +http://news.bbc.co.uk/2/hi/technology/6291746.stm|tag|http://www.semanlink.net/tag/robot_humanoide +http://news.bbc.co.uk/2/hi/technology/6291746.stm|comment|Roboticists are using the lessons of a 1930s human physiologist to build the world's fastest walking robot. +http://news.bbc.co.uk/2/hi/technology/6291746.stm|title|BBC NEWS Technology Robot unravels mystery of walking +http://news.bbc.co.uk/2/hi/technology/6291746.stm|creationTime|2007-07-13T18:41:57Z +http://news.bbc.co.uk/2/hi/technology/6291746.stm|source|BBC +http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf|creationDate|2015-08-30 +http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf|tag|http://www.semanlink.net/tag/linked_data_exploration +http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf|tag|http://www.semanlink.net/tag/linked_data_browser +http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf|title|Facilitating the Exploration and Visualization of Linked Data +http://eprints.cs.univie.ac.at/4106/1/Mader2014_lod2.pdf|creationTime|2015-08-30T16:37:09Z +http://freeculture.org/blog/2013/04/23/dont-let-the-myths-fool-you-the-w3cs-plan-for-drm-in-html5-is-a-betrayal-to-all-web-users/|creationDate|2013-04-27 +http://freeculture.org/blog/2013/04/23/dont-let-the-myths-fool-you-the-w3cs-plan-for-drm-in-html5-is-a-betrayal-to-all-web-users/|tag|http://www.semanlink.net/tag/drm_in_html_5 +http://freeculture.org/blog/2013/04/23/dont-let-the-myths-fool-you-the-w3cs-plan-for-drm-in-html5-is-a-betrayal-to-all-web-users/|title|Free Culture Foundation » Blog Archive » Don’t let the myths fool you: the W3C’s plan for DRM in HTML5 is a betrayal to all Web users. +http://freeculture.org/blog/2013/04/23/dont-let-the-myths-fool-you-the-w3cs-plan-for-drm-in-html5-is-a-betrayal-to-all-web-users/|creationTime|2013-04-27T20:03:45Z +http://open.vocab.org/docs/AutomobileCataologType|creationDate|2011-07-11 +http://open.vocab.org/docs/AutomobileCataologType|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://open.vocab.org/docs/AutomobileCataologType|title|ov:AutomobileCataologType, a class in the OpenVocab RDF schema +http://open.vocab.org/docs/AutomobileCataologType|creationTime|2011-07-11T16:32:37Z +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAKdC0PsHEe4M=OZe=_vE3A+jH5k4HgdiyX=sJ4j7Uhf+m0=Wzw@mail.gmail.com%3e|creationDate|2015-02-19 +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAKdC0PsHEe4M=OZe=_vE3A+jH5k4HgdiyX=sJ4j7Uhf+m0=Wzw@mail.gmail.com%3e|title|HELP about jena fuseki and NodeJS +http://mail-archives.apache.org/mod_mbox/jena-users/201502.mbox/%3cCAKdC0PsHEe4M=OZe=_vE3A+jH5k4HgdiyX=sJ4j7Uhf+m0=Wzw@mail.gmail.com%3e|creationTime|2015-02-19T01:12:22Z +http://www.hpl.hp.com/personal/bwm/rdf/jena/rssinjena.htm|creationDate|2005-04-30 +http://www.hpl.hp.com/personal/bwm/rdf/jena/rssinjena.htm|tag|http://www.semanlink.net/tag/rss_dev +http://www.hpl.hp.com/personal/bwm/rdf/jena/rssinjena.htm|tag|http://www.semanlink.net/tag/jena +http://www.hpl.hp.com/personal/bwm/rdf/jena/rssinjena.htm|title|Processing A Site Summary Format with Jena +http://elda.googlecode.com/hg/deliver-elda/src/main/docs/index.html|creationDate|2011-01-24 +http://elda.googlecode.com/hg/deliver-elda/src/main/docs/index.html|tag|http://www.semanlink.net/tag/elda +http://elda.googlecode.com/hg/deliver-elda/src/main/docs/index.html|title|Elda -- an implementation of the Linked Data API +http://elda.googlecode.com/hg/deliver-elda/src/main/docs/index.html|creationTime|2011-01-24T19:01:22Z +http://www.growing-gardens.org/|creationDate|2008-08-17 +http://www.growing-gardens.org/|tag|http://www.semanlink.net/tag/portland_or +http://www.growing-gardens.org/|tag|http://www.semanlink.net/tag/jardin +http://www.growing-gardens.org/|tag|http://www.semanlink.net/tag/vito +http://www.growing-gardens.org/|title|Gardening in Portland Oregon Growing Gardens Digs at the Root of Hunger +http://www.growing-gardens.org/|creationTime|2008-08-17T23:29:36Z +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|creationDate|2011-09-26 +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|tag|http://www.semanlink.net/tag/wikipedia +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|tag|http://www.semanlink.net/tag/monuments_historiques +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|tag|http://www.semanlink.net/tag/orne +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|title|Liste des monuments historiques de l'Orne - Wikipédia +http://fr.wikipedia.org/wiki/Liste_des_monuments_historiques_de_l'Orne|creationTime|2011-09-26T14:40:54Z +http://blog.intrapromote.com/category/seo-for-automotive/|creationDate|2014-05-01 +http://blog.intrapromote.com/category/seo-for-automotive/|tag|http://www.semanlink.net/tag/automobile +http://blog.intrapromote.com/category/seo-for-automotive/|tag|http://www.semanlink.net/tag/seo +http://blog.intrapromote.com/category/seo-for-automotive/|title|Seo For Automotive Above the Fold & Socially Acceptable +http://blog.intrapromote.com/category/seo-for-automotive/|creationTime|2014-05-01T00:57:17Z +http://www.fast.ai/|creationDate|2017-11-28 +http://www.fast.ai/|tag|http://www.semanlink.net/tag/fast_ai +http://www.fast.ai/|title|fast.ai · Making neural nets uncool again +http://www.fast.ai/|creationTime|2017-11-28T23:58:07Z +http://www.wired.com/autopia/2012/09/connected-car-innovation/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationDate|2013-01-08 +http://www.wired.com/autopia/2012/09/connected-car-innovation/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|tag|http://www.semanlink.net/tag/automotive_and_web_technologies +http://www.wired.com/autopia/2012/09/connected-car-innovation/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|title|Why Connected-Car Innovation Could Come From Outside the Auto Industry Autopia Wired.com +http://www.wired.com/autopia/2012/09/connected-car-innovation/?utm_source=Contextly&utm_medium=RelatedLinks&utm_campaign=Previous|creationTime|2013-01-08T11:59:07Z +http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/|creationDate|2017-01-21 +http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/|tag|http://www.semanlink.net/tag/javascript +http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/|tag|http://www.semanlink.net/tag/mvc +http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/|title|The Top 10 Javascript MVC Frameworks Reviewed +http://codebrief.com/2012/01/the-top-10-javascript-mvc-frameworks-reviewed/|creationTime|2017-01-21T23:14:37Z +http://www.comtech-serv.com/webinar-rdf.shtml|creationDate|2011-05-20 +http://www.comtech-serv.com/webinar-rdf.shtml|tag|http://www.semanlink.net/tag/dita +http://www.comtech-serv.com/webinar-rdf.shtml|title|How Effective Use of Metadata and the Resource Description Framework (RDF) Can Be an Answer to Your DITA Nightmares +http://www.comtech-serv.com/webinar-rdf.shtml|creationTime|2011-05-20T14:51:56Z +http://blog.iks-project.eu/iks-salzburg-workshop-june-2012/|creationDate|2012-06-21 +http://blog.iks-project.eu/iks-salzburg-workshop-june-2012/|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://blog.iks-project.eu/iks-salzburg-workshop-june-2012/|title|IKS Salzburg Workshop June 2012 IKS Blog – The Semantic CMS Community +http://blog.iks-project.eu/iks-salzburg-workshop-june-2012/|creationTime|2012-06-21T11:58:31Z +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|creationDate|2019-01-30 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|tag|http://www.semanlink.net/tag/graph_convolutional_networks +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|tag|http://www.semanlink.net/tag/semi_supervised_learning +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|comment|"Part 2: Semi-Supervised Learning with Spectral Graph Convolutions + +[Part 1](/doc/?uri=https%3A%2F%2Ftowardsdatascience.com%2Fhow-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780)" +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|relatedDoc|https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|title|How to do Deep Learning on Graphs with Graph Convolutional Networks Part 2 +https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0|creationTime|2019-01-30T13:09:41Z +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|creationDate|2008-05-15 +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|tag|http://www.semanlink.net/tag/fps_post +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|tag|http://www.semanlink.net/tag/uri_synonymity +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|title|"skos:Concept and ""real world things""" +http://lists.w3.org/Archives/Public/public-esw-thes/2008May/0010.html|creationTime|2008-05-15T22:26:50Z +https://www.youtube.com/watch?v=uBr_jOyrdbk|creationDate|2015-08-07 +https://www.youtube.com/watch?v=uBr_jOyrdbk|tag|http://www.semanlink.net/tag/championnats_du_monde_a_paris_saint_denis_2003 +https://www.youtube.com/watch?v=uBr_jOyrdbk|title|Les françaises championnes du monde du 4x100m (WC Paris 2003) HQ - YouTube +https://www.youtube.com/watch?v=uBr_jOyrdbk|creationTime|2015-08-07T19:15:38Z +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|creationDate|2006-06-06 +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|tag|http://www.semanlink.net/tag/social_software +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|tag|http://www.semanlink.net/tag/open_source +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|tag|http://www.semanlink.net/tag/red_hat +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|tag|http://www.semanlink.net/tag/digital_media +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|comment|Red Hat has launched a social-networking project called Mugshot to promote the use of open-source software for digital media. +http://news.zdnet.com/2102-3513_22-6078976.html?tag=printthis|title|Red Hat project brings open-source to digital media Tech News on ZDNet +http://www.projectforum.com|creationDate|2005-10-11 +http://www.projectforum.com|tag|http://www.semanlink.net/tag/wiki_software +http://www.projectforum.com|title|CourseForum Technologies Home +https://blog.algolia.com/inside-the-engine-part-6-handling-synonyms-the-right-way/?utm_source=twitter&utm_campaign=enginepart6&utm_medium=cpm|creationDate|2016-12-10 +https://blog.algolia.com/inside-the-engine-part-6-handling-synonyms-the-right-way/?utm_source=twitter&utm_campaign=enginepart6&utm_medium=cpm|tag|http://www.semanlink.net/tag/text_search +https://blog.algolia.com/inside-the-engine-part-6-handling-synonyms-the-right-way/?utm_source=twitter&utm_campaign=enginepart6&utm_medium=cpm|title|Inside the Algolia Engine Part 6 — Handling Synonyms the Right Way Milliseconds Matter +https://blog.algolia.com/inside-the-engine-part-6-handling-synonyms-the-right-way/?utm_source=twitter&utm_campaign=enginepart6&utm_medium=cpm|creationTime|2016-12-10T10:37:39Z +http://www.jguru.com/faq/view.jsp?EID=1030399|creationDate|2008-10-11 +http://www.jguru.com/faq/view.jsp?EID=1030399|tag|http://www.semanlink.net/tag/diacritics_in_uri +http://www.jguru.com/faq/view.jsp?EID=1030399|tag|http://www.semanlink.net/tag/tomcat +http://www.jguru.com/faq/view.jsp?EID=1030399|tag|http://www.semanlink.net/tag/servlet +http://www.jguru.com/faq/view.jsp?EID=1030399|title|How to get international unicode characters from a a form input field/servlet parameter into a string? +http://www.jguru.com/faq/view.jsp?EID=1030399|creationTime|2008-10-11T16:56:56Z +https://www.lemonde.fr/afrique/article/2019/03/20/ghana-le-petit-pays-qui-voit-grand_5438952_3212.html|creationDate|2019-03-21 +https://www.lemonde.fr/afrique/article/2019/03/20/ghana-le-petit-pays-qui-voit-grand_5438952_3212.html|tag|http://www.semanlink.net/tag/ghana +https://www.lemonde.fr/afrique/article/2019/03/20/ghana-le-petit-pays-qui-voit-grand_5438952_3212.html|title|Ghana, le petit pays qui voit grand +https://www.lemonde.fr/afrique/article/2019/03/20/ghana-le-petit-pays-qui-voit-grand_5438952_3212.html|creationTime|2019-03-21T13:13:49Z +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|creationDate|2018-04-12 +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|tag|http://www.semanlink.net/tag/matthew_honnibal +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|tag|http://www.semanlink.net/tag/slides +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|tag|http://www.semanlink.net/tag/nlp +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|title|talks/2018-04-12__Embed-Encode-Attend-Predict.pdf at master · explosion/talks · GitHub +https://github.com/explosion/talks/blob/master/2018-04-12__Embed-Encode-Attend-Predict.pdf|creationTime|2018-04-12T23:39:42Z +http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables|creationDate|2010-12-02 +http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables|tag|http://www.semanlink.net/tag/wikileaks +http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables|tag|http://www.semanlink.net/tag/sarkozy +http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables|title|WikiLeaks cables: 'Nicolas Sarkozy thin-skinned and authoritarian' World news guardian.co.uk +http://www.guardian.co.uk/world/2010/nov/30/nicolas-sarkozy-personality-embassy-cables|creationTime|2010-12-02T00:08:20Z +https://supernlp.github.io/2018/11/10/emnlp-2018/|creationDate|2018-11-13 +https://supernlp.github.io/2018/11/10/emnlp-2018/|tag|http://www.semanlink.net/tag/emnlp_2018 +https://supernlp.github.io/2018/11/10/emnlp-2018/|title|EMNLP 2018 Thoughts and Notes · Supernatural Language Processing +https://supernlp.github.io/2018/11/10/emnlp-2018/|creationTime|2018-11-13T00:22:21Z +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|creationDate|2015-11-11 +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|tag|http://www.semanlink.net/tag/facebook +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|tag|http://www.semanlink.net/tag/death_of_hyperlink +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|tag|http://www.semanlink.net/tag/iran +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|title|Death of Hyperlink: The Aftermath — Thoughts on Media — Medium +https://medium.com/thoughts-on-media/death-of-hyperlink-the-aftermath-cb10ce79e014|creationTime|2015-11-11T20:02:06Z +http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/|creationDate|2008-05-22 +http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/|tag|http://www.semanlink.net/tag/yahoo +http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/|tag|http://www.semanlink.net/tag/searchmonkey +http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/|title|Benlog » Why I’m switching to Yahoo Search +http://benlog.com/articles/2008/05/21/why-im-switching-to-yahoo-search/|creationTime|2008-05-22T14:28:05Z +http://www.sysdeo.com/eclipse/tomcatpluginfr|creationDate|2006-01-07 +http://www.sysdeo.com/eclipse/tomcatpluginfr|tag|http://www.semanlink.net/tag/eclipse +http://www.sysdeo.com/eclipse/tomcatpluginfr|tag|http://www.semanlink.net/tag/tomcat +http://www.sysdeo.com/eclipse/tomcatpluginfr|title|Sysdeo Eclipse Tomcat plugin +http://nkos.slis.kent.edu/|creationDate|2008-05-06 +http://nkos.slis.kent.edu/|tag|http://www.semanlink.net/tag/nkos +http://nkos.slis.kent.edu/|title|NKOS Networked Knowledge Organization Systems and Services +http://nkos.slis.kent.edu/|creationTime|2008-05-06T21:34:49Z +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2014-05-10 +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/deep_learning +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/darpa +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/nlp +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|DARPA is working on its own deep-learning project for natural-language processing — Tech News and Analysis +http://gigaom.com/2014/05/02/darpa-is-working-on-its-own-deep-learning-project-for-natural-language-processing/?utm_content=buffer0a1bb&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2014-05-10T14:11:50Z +http://googleresearch.blogspot.co.uk/2012/04/working-with-your-data-easier-and-more.html|creationDate|2012-05-19 +http://googleresearch.blogspot.co.uk/2012/04/working-with-your-data-easier-and-more.html|tag|http://www.semanlink.net/tag/google_fusion_tables +http://googleresearch.blogspot.co.uk/2012/04/working-with-your-data-easier-and-more.html|title|Working with your Data: Easier and More Fun +http://googleresearch.blogspot.co.uk/2012/04/working-with-your-data-easier-and-more.html|creationTime|2012-05-19T19:22:55Z +http://www.republicain-niger.com/index.asp?affiche=News_Display.asp&articleid=5614|creationDate|2009-08-08 +http://www.republicain-niger.com/index.asp?affiche=News_Display.asp&articleid=5614|tag|http://www.semanlink.net/tag/niger +http://www.republicain-niger.com/index.asp?affiche=News_Display.asp&articleid=5614|title|UN NIGÉRIEN HONORÉ EN FRANCE Habibou Maïtournam, lauréat du prix PLUMEY 2009 de l’Académie des sciences - Le Republicain-Niger +http://www.republicain-niger.com/index.asp?affiche=News_Display.asp&articleid=5614|creationTime|2009-08-08T11:04:53Z +https://reporterre.net/Exclusif-L-ex-Premier-ministre-du-Japon-Naoto-Kan-raconte-la-catastrophe-de|creationDate|2018-03-12 +https://reporterre.net/Exclusif-L-ex-Premier-ministre-du-Japon-Naoto-Kan-raconte-la-catastrophe-de|tag|http://www.semanlink.net/tag/fukushima +https://reporterre.net/Exclusif-L-ex-Premier-ministre-du-Japon-Naoto-Kan-raconte-la-catastrophe-de|title|L’ex-Premier ministre du Japon, Naoto Kan, raconte la catastrophe de Fukushima +https://reporterre.net/Exclusif-L-ex-Premier-ministre-du-Japon-Naoto-Kan-raconte-la-catastrophe-de|creationTime|2018-03-12T13:16:46Z +http://taskonomy.stanford.edu/|creationDate|2018-06-21 +http://taskonomy.stanford.edu/|tag|http://www.semanlink.net/tag/transfer_learning +http://taskonomy.stanford.edu/|title|Taskonomy Stanford +http://taskonomy.stanford.edu/|creationTime|2018-06-21T13:12:11Z +http://blogs.esa.int/rosetta/|creationDate|2014-11-15 +http://blogs.esa.int/rosetta/|tag|http://www.semanlink.net/tag/rosetta +http://blogs.esa.int/rosetta/|title|Rosetta blog +http://blogs.esa.int/rosetta/|creationTime|2014-11-15T10:07:22Z +http://www.twine.com/user/fps|creationDate|2008-07-07 +http://www.twine.com/user/fps|tag|http://www.semanlink.net/tag/twine +http://www.twine.com/user/fps|tag|http://www.semanlink.net/tag/fps +http://www.twine.com/user/fps|title|François-Paul Servant Twine +http://www.twine.com/user/fps|creationTime|2008-07-07T21:17:38Z +http://xtech06.usefulinc.com/schedule/detail/38|creationDate|2006-04-24 +http://xtech06.usefulinc.com/schedule/detail/38|tag|http://www.semanlink.net/tag/semantic_browsing +http://xtech06.usefulinc.com/schedule/detail/38|tag|http://www.semanlink.net/tag/a_suivre +http://xtech06.usefulinc.com/schedule/detail/38|tag|http://www.semanlink.net/tag/xtech_2006 +http://xtech06.usefulinc.com/schedule/detail/38|comment|As a case study, a rich visual interface is applied to a data set based on current events, which makes use of keywords to define connections between events as they unfold over time. The resulting experience presents a novel perspective on the material which conforms to its underlying meanings. The user may frame information by making positive decisions about how it is displayed, following their own trajectory through interconnected news items. +http://xtech06.usefulinc.com/schedule/detail/38|title|XTech 2006: Making Connections: Exploring new forms of semantic browsing +http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/|creationDate|2013-10-01 +http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/|tag|http://www.semanlink.net/tag/catastrophe_naturelle +http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/|tag|http://www.semanlink.net/tag/eruption_volcanique +http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/|title|Le mystère de la plus grande éruption volcanique du dernier millénaire est résolu Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/10/01/le-mystere-de-la-plus-grande-eruption-volcanique-du-dernier-millenaire-est-resolu/|creationTime|2013-10-01T21:49:22Z +http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf|creationDate|2012-03-19 +http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf|tag|http://www.semanlink.net/tag/unix +http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf|tag|http://www.semanlink.net/tag/nlp_class +http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf|title|Unix for Poets +http://www.stanford.edu/class/cs124/kwc-unix-for-poets.pdf|creationTime|2012-03-19T00:02:46Z +http://remacle.org/bloodwolf/textes/thucyeloge.htm|creationDate|2009-03-05 +http://remacle.org/bloodwolf/textes/thucyeloge.htm|tag|http://www.semanlink.net/tag/citation +http://remacle.org/bloodwolf/textes/thucyeloge.htm|tag|http://www.semanlink.net/tag/thucydide +http://remacle.org/bloodwolf/textes/thucyeloge.htm|tag|http://www.semanlink.net/tag/pericles +http://remacle.org/bloodwolf/textes/thucyeloge.htm|comment|"> Les hommes éminents ont la terre entière pour tombeau. +> +> les peuples qui proposent à la vertu de magnifiques récompenses ont aussi les meilleurs citoyens" +http://remacle.org/bloodwolf/textes/thucyeloge.htm|title|Thucydide, la guerre du Péloponèse : oraison funèbre prononcée par Périclès +http://remacle.org/bloodwolf/textes/thucyeloge.htm|creationTime|2009-03-05T01:36:51Z +http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp|creationDate|2007-07-14 +http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp|tag|http://www.semanlink.net/tag/l_afrique_a_la_bastille_13_juillet_2007 +http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp|comment|"Grand concert de musique africaine à la Bastille, le 13 juillet 2007. +" +http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp|title|RFI Musique - L'Afrique prend la Bastille +http://www.rfimusique.com/musiquefr/articles/091/article_16904.asp|creationTime|2007-07-14T00:54:23Z +http://taddei.zapto.org/TINS/|creationDate|2014-07-20 +http://taddei.zapto.org/TINS/|tag|http://www.semanlink.net/tag/gilles_taddei +http://taddei.zapto.org/TINS/|title|TINS Is Not Sharepoint +http://taddei.zapto.org/TINS/|creationTime|2014-07-20T22:29:04Z +http://readwrite.com/2013/07/12/how-an-engineering-toy-for-girls-went-from-kickstarter-to-bestseller?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed:+readwriteweb+(ReadWriteWeb)#awesm=~obnkAaJdbjjbl1|creationDate|2013-07-12 +http://readwrite.com/2013/07/12/how-an-engineering-toy-for-girls-went-from-kickstarter-to-bestseller?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed:+readwriteweb+(ReadWriteWeb)#awesm=~obnkAaJdbjjbl1|tag|http://www.semanlink.net/tag/jouet +http://readwrite.com/2013/07/12/how-an-engineering-toy-for-girls-went-from-kickstarter-to-bestseller?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed:+readwriteweb+(ReadWriteWeb)#awesm=~obnkAaJdbjjbl1|title|How An Engineering Toy For Girls Went From Kickstarter To Bestseller – ReadWrite +http://readwrite.com/2013/07/12/how-an-engineering-toy-for-girls-went-from-kickstarter-to-bestseller?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed:+readwriteweb+(ReadWriteWeb)#awesm=~obnkAaJdbjjbl1|creationTime|2013-07-12T13:20:28Z +https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire|creationDate|2016-01-19 +https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire|tag|http://www.semanlink.net/tag/metagenomics +https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire|tag|http://www.semanlink.net/tag/biodiversite +https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire|title|Le vivant a sa matière noire CNRS Le journal +https://lejournal.cnrs.fr/articles/le-vivant-a-sa-matiere-noire|creationTime|2016-01-19T15:53:54Z +http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html|creationDate|2017-04-03 +http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html|tag|http://www.semanlink.net/tag/jersey +http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html|tag|http://www.semanlink.net/tag/tutorial +http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html|title|Swagger integration with Jersey +http://riteshkrmodi.blogspot.fr/2014/06/swagger-integration-jersey.html|creationTime|2017-04-03T14:20:56Z +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|creationDate|2011-06-29 +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|tag|http://www.semanlink.net/tag/unit_test +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|tag|http://www.semanlink.net/tag/rdfa +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|tag|http://www.semanlink.net/tag/web_dev +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|title|Using RDFa for testing templates - DBTune blog +http://blog.dbtune.org/post/2011/06/28/Using-RDFa-for-testing-templates|creationTime|2011-06-29T18:07:16Z +http://lifemap.univ-lyon1.fr/|creationDate|2017-12-23 +http://lifemap.univ-lyon1.fr/|tag|http://www.semanlink.net/tag/tree_of_life +http://lifemap.univ-lyon1.fr/|comment|Exploring the Entire Tree of Life +http://lifemap.univ-lyon1.fr/|title|Lifemap +http://lifemap.univ-lyon1.fr/|creationTime|2017-12-23T11:09:13Z +http://www.fastcoexist.com/3041719/change-generation/work-is-bullshit-the-argument-for-antiwork|creationDate|2015-02-03 +http://www.fastcoexist.com/3041719/change-generation/work-is-bullshit-the-argument-for-antiwork|tag|http://www.semanlink.net/tag/antiwork +http://www.fastcoexist.com/3041719/change-generation/work-is-bullshit-the-argument-for-antiwork|title|"Work Is Bullshit: The Argument For ""Antiwork"" Co.Exist ideas + impact" +http://www.fastcoexist.com/3041719/change-generation/work-is-bullshit-the-argument-for-antiwork|creationTime|2015-02-03T18:34:40Z +http://www.kingtutone.com|creationDate|2005-05-14 +http://www.kingtutone.com|tag|http://www.semanlink.net/tag/egypte_antique +http://www.kingtutone.com|tag|http://www.semanlink.net/tag/toutankhamon +http://www.kingtutone.com|title|Ancient Egypt - Includes pyramids, pharaohs, queens, and more. +http://manu.sporny.org/2013/microdata-downward-spiral/|creationDate|2013-08-22 +http://manu.sporny.org/2013/microdata-downward-spiral/|tag|http://www.semanlink.net/tag/microdata +http://manu.sporny.org/2013/microdata-downward-spiral/|tag|http://www.semanlink.net/tag/manu_sporny +http://manu.sporny.org/2013/microdata-downward-spiral/|title|The Downward Spiral of Microdata The Beautiful, Tormented Machine +http://manu.sporny.org/2013/microdata-downward-spiral/|creationTime|2013-08-22T21:44:49Z +https://docs.docker.com/engine/userguide/|creationDate|2016-04-06 +https://docs.docker.com/engine/userguide/|tag|http://www.semanlink.net/tag/docker +https://docs.docker.com/engine/userguide/|title|Docker User guide +https://docs.docker.com/engine/userguide/|creationTime|2016-04-06T15:36:16Z +http://jmlr.org/proceedings/papers/v28/bi13.pdf|creationDate|2014-04-25 +http://jmlr.org/proceedings/papers/v28/bi13.pdf|tag|http://www.semanlink.net/tag/multi_label_classification +http://jmlr.org/proceedings/papers/v28/bi13.pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +http://jmlr.org/proceedings/papers/v28/bi13.pdf|title|Efficient Multi-label Classification with Many Labels (2013) +http://jmlr.org/proceedings/papers/v28/bi13.pdf|creationTime|2014-04-25T19:21:16Z +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|creationDate|2008-04-05 +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|tag|http://www.semanlink.net/tag/giovanni_tummarello +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|tag|http://www.semanlink.net/tag/okkam +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|tag|http://www.semanlink.net/tag/ldow2008 +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|comment|"The Semantic Web should provide a global space for the seamless integration of small knowledge bases (or local “semantic webs”) +into a global, open, decentralized and scalable knowledge +space.
+In this paper, we will try to defend the view that the practical realization of the grand vision of the Semantic Web as +a huge graph of interlinked data would be much easier and +faster if we could count on a service which, by analogy with +the DNS, we call an Entity Name System (ENS), namely a +service which stores and makes available for reuse URIs for +any type of entity in a fully decentralized and open knowledge publication space. +" +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|title|An Entity Name System for Linking Semantic Web Data +http://events.linkeddata.org/ldow2008/papers/23-bouquet-stoermer-entity-name-system.pdf|creationTime|2008-04-05T00:28:10Z +https://www.qwant.com/|creationDate|2015-01-01 +https://www.qwant.com/|tag|http://www.semanlink.net/tag/search_engines +https://www.qwant.com/|title|Qwant +https://www.qwant.com/|creationTime|2015-01-01T15:31:30Z +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|creationDate|2008-01-01 +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|tag|http://www.semanlink.net/tag/ora_lassila +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|tag|http://www.semanlink.net/tag/knowledge_representation +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|comment|"Dissertation for the degree of Doctor of Science in Technology
+In the approach taken in this dissertation, it is proposed that RDF graphs can be exposed +through a node-centric (i.e., “frame system”) API. Central to this API is a slot access function +Alookup (f , s, G ): “given graph G , give me the values of slot s of frame f ”... This +basic API can be extended by supporting a query language which allows complex access +paths – expressed as regular expressions of slot names (i.e., RDF properties) – to be used +in place of atomic slot names. + +" +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|title|Ora Lassila - Programming Semantic Web Applications: A Synthesis of Knowledge Representation and Semi-Structured Data +http://lib.tkk.fi/Diss/2007/isbn9789512289851/|creationTime|2008-01-01T23:26:27Z +http://en.lodlive.it/|creationDate|2012-11-10 +http://en.lodlive.it/|tag|http://www.semanlink.net/tag/javascript_rdf +http://en.lodlive.it/|tag|http://www.semanlink.net/tag/data_web +http://en.lodlive.it/|title|LodLive - browsing the Web of Data +http://en.lodlive.it/|creationTime|2012-11-10T18:30:20Z +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|creationDate|2005-05-13 +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|tag|http://www.semanlink.net/tag/ihm_web +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|tag|http://www.semanlink.net/tag/blog +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|tag|http://www.semanlink.net/tag/livesearch +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|tag|http://www.semanlink.net/tag/dev +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|tag|http://www.semanlink.net/tag/good +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|title|Spotlight-like livesearch +http://blog.bitflux.ch/archive/2004/7/5/spotlight_like_livesearch_added.html|seeAlso|http://blog.bitflux.ch/archive/2004/07/13/livesearch_roundup.html +http://torrez.us/code/www-rdfa/|creationDate|2008-04-21 +http://torrez.us/code/www-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://torrez.us/code/www-rdfa/|tag|http://www.semanlink.net/tag/elias_torres +http://torrez.us/code/www-rdfa/|tag|http://www.semanlink.net/tag/javascript +http://torrez.us/code/www-rdfa/|tag|http://www.semanlink.net/tag/tutorial +http://torrez.us/code/www-rdfa/|title|RDFa Tutorial WWW2008 (demo files) - Elias Torres +http://torrez.us/code/www-rdfa/|creationTime|2008-04-21T16:14:56Z +http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html|creationDate|2013-07-31 +http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html|tag|http://www.semanlink.net/tag/james_hendler +http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html|title|Elementary, My Dear Watson - Will IBM’s quiz show champion outgrow humankind? +http://www.geekexchange.com/elementary-my-dear-watson-will-ibms-quiz-show-champion-outgrow-humankind-73517.html|creationTime|2013-07-31T09:53:53Z +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|creationDate|2012-02-20 +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|tag|http://www.semanlink.net/tag/rdf_in_json +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|tag|http://www.semanlink.net/tag/danny_ayers +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|tag|http://www.semanlink.net/tag/jena +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|title|Danny Ayers - Google+ - I've knocked together a serializer for Jena to the minimal… +https://plus.google.com/112609322932428633493/posts/N9ZjjdfHr4v|creationTime|2012-02-20T22:56:19Z +https://stackoverflow.com/questions/13131139/lemmatize-french-text|creationDate|2017-06-28 +https://stackoverflow.com/questions/13131139/lemmatize-french-text|tag|http://www.semanlink.net/tag/nlp_french +https://stackoverflow.com/questions/13131139/lemmatize-french-text|title|Lemmatize French text - Stack Overflow +https://stackoverflow.com/questions/13131139/lemmatize-french-text|creationTime|2017-06-28T16:58:37Z +https://www.quora.com/What-are-some-good-papers-about-topic-modeling-on-Tweets|creationDate|2017-06-08 +https://www.quora.com/What-are-some-good-papers-about-topic-modeling-on-Tweets|tag|http://www.semanlink.net/tag/topic_modeling_over_short_texts +https://www.quora.com/What-are-some-good-papers-about-topic-modeling-on-Tweets|title|What are some good papers about topic modeling on Tweets? - Quora +https://www.quora.com/What-are-some-good-papers-about-topic-modeling-on-Tweets|creationTime|2017-06-08T01:04:29Z +https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html|creationDate|2017-07-11 +https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html|tag|http://www.semanlink.net/tag/part_of_speech_tagging +https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html|comment|Alphabetical list of part-of-speech tags used in the Penn Treebank Project +https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html|title|Penn Treebank P.O.S. Tags +https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html|creationTime|2017-07-11T14:48:26Z +http://www.tumblr.com/|creationDate|2010-06-29 +http://www.tumblr.com/|tag|http://www.semanlink.net/tag/blog +http://www.tumblr.com/|title|Tumblr +http://www.tumblr.com/|creationTime|2010-06-29T14:05:31Z +http://manu.sporny.org/2013/drm-in-html5/|creationDate|2013-04-27 +http://manu.sporny.org/2013/drm-in-html5/|tag|http://www.semanlink.net/tag/manu_sporny +http://manu.sporny.org/2013/drm-in-html5/|tag|http://www.semanlink.net/tag/drm_in_html_5 +http://manu.sporny.org/2013/drm-in-html5/|title|DRM in HTML5 The Beautiful, Tormented Machine +http://manu.sporny.org/2013/drm-in-html5/|creationTime|2013-04-27T19:56:15Z +http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html|creationDate|2010-12-04 +http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html|tag|http://www.semanlink.net/tag/litterature_africaine +http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html|title|Huit écrivains africains racontent l'Afrique qui vient +http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html|creationTime|2010-12-04T12:23:59Z +http://www.lemonde.fr/international/article_interactif/2010/12/04/huit-ecrivains-africains-racontent-l-afrique-qui-vient_1447623_3210.html|source|Le Monde +https://www.lemonde.fr/planete/article/2018/08/20/l-huile-de-palme-menace-aussi-les-primates-d-afrique_5344271_3244.html|creationDate|2018-08-20 +https://www.lemonde.fr/planete/article/2018/08/20/l-huile-de-palme-menace-aussi-les-primates-d-afrique_5344271_3244.html|tag|http://www.semanlink.net/tag/huile_de_palme +https://www.lemonde.fr/planete/article/2018/08/20/l-huile-de-palme-menace-aussi-les-primates-d-afrique_5344271_3244.html|title|L’huile de palme menace aussi les primates d’Afrique +https://www.lemonde.fr/planete/article/2018/08/20/l-huile-de-palme-menace-aussi-les-primates-d-afrique_5344271_3244.html|creationTime|2018-08-20T22:50:33Z +http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne|creationDate|2009-07-07 +http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne|tag|http://www.semanlink.net/tag/alexandre_passant +http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne|tag|http://www.semanlink.net/tag/semantic_enterprise +http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne|title|"""Technologies du Web Sémantique pour l'Entreprise 2.0"": Thèse et slides en ligne Alexandre Passant" +http://apassant.net/blog/2009/07/02/technologies-du-web-s%C3%A9mantique-pour-lentreprise-20-th%C3%A8se-et-slides-en-ligne|creationTime|2009-07-07T19:20:37Z +http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html|creationDate|2013-08-23 +http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html|tag|http://www.semanlink.net/tag/webmasters_google +http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html|tag|http://www.semanlink.net/tag/flash +http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html|title|Official Google Webmaster Central Blog: Improved Flash indexing +http://googlewebmastercentral.blogspot.fr/2008/06/improved-flash-indexing.html|creationTime|2013-08-23T15:00:05Z +http://blogstats.wordpress.com/2009/10/27/sdmx-and-rdf-getting-acquainted/|creationDate|2010-07-16 +http://blogstats.wordpress.com/2009/10/27/sdmx-and-rdf-getting-acquainted/|tag|http://www.semanlink.net/tag/sdmx_rdf +http://blogstats.wordpress.com/2009/10/27/sdmx-and-rdf-getting-acquainted/|title|SDMX and RDF: Getting Acquainted « Blog about Stats +http://blogstats.wordpress.com/2009/10/27/sdmx-and-rdf-getting-acquainted/|creationTime|2010-07-16T14:13:28Z +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|creationDate|2018-11-27 +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|tag|http://www.semanlink.net/tag/documentation +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|tag|http://www.semanlink.net/tag/spring_boot +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|comment|"- [Spring web MVC](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html) + - [Handler methods, args](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#mvc-ann-arguments) + + +" +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|title|Spring Framework Documentation +https://docs.spring.io/spring/docs/current/spring-framework-reference/index.html|creationTime|2018-11-27T14:13:06Z +https://www.research.ibm.com/ibm-q/|creationDate|2018-08-21 +https://www.research.ibm.com/ibm-q/|tag|http://www.semanlink.net/tag/ibm +https://www.research.ibm.com/ibm-q/|tag|http://www.semanlink.net/tag/quantum_computing +https://www.research.ibm.com/ibm-q/|comment|IBM Q is an industry-first initiative to build commercially available universal quantum computers for business and science. +https://www.research.ibm.com/ibm-q/|title|Quantum Computing - IBM Q +https://www.research.ibm.com/ibm-q/|creationTime|2018-08-21T09:01:17Z +http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html|creationDate|2016-03-27 +http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html|tag|http://www.semanlink.net/tag/semanlink_related +http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html|comment|"A Chrome extension being developed will allow web users to choose what they want to share, and with whom, possibly altering the now Google-ized Internet landscape forever.
+Making Big Data Small for Individual Consumption" +http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html|title|Big data to be made little: Individuals to mine data too Network World +http://www.networkworld.com/article/3045549/internet/big-data-to-be-made-little-individuals-to-mine-data-too.html|creationTime|2016-03-27T18:05:17Z +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|creationDate|2010-05-21 +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|tag|http://www.semanlink.net/tag/wordpress +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|tag|http://www.semanlink.net/tag/virtuoso +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|tag|http://www.semanlink.net/tag/frederick_giasson +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|tag|http://www.semanlink.net/tag/converting_data_into_rdf +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|comment|This blog post will show you how we can do the same with your WordPress blog and your Mediawiki wiki using Virtuoso RDF Views. +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|title|Converting your WordPress and Mediawiki data into RDF on-the-fly at Frederick Giasson’s Weblog +http://fgiasson.com/blog/index.php/2007/04/24/converting-your-wordpress-and-mediawiki-data-into-rdf-on-the-fly/|creationTime|2010-05-21T14:17:50Z +http://www.jibbering.com/faq/notes/closures/|creationDate|2014-10-13 +http://www.jibbering.com/faq/notes/closures/|tag|http://www.semanlink.net/tag/javascript_closures +http://www.jibbering.com/faq/notes/closures/|title|Javascript Closures +http://www.jibbering.com/faq/notes/closures/|creationTime|2014-10-13T21:59:54Z +http://java-source.net|creationDate|2005-03-05 +http://java-source.net|tag|http://www.semanlink.net/tag/open_source +http://java-source.net|tag|http://www.semanlink.net/tag/java_dev +http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064|creationDate|2018-10-11 +http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064|tag|http://www.semanlink.net/tag/tips +http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064|tag|http://www.semanlink.net/tag/jupyter +http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064|title|Jupyter Notebook Enhancements, Tips And Tricks - Part 1 +http://forums.fast.ai/t/jupyter-notebook-enhancements-tips-and-tricks/17064|creationTime|2018-10-11T08:39:11Z +https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html|creationDate|2018-09-05 +https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html|tag|http://www.semanlink.net/tag/rio_de_janeiro +https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html|tag|http://www.semanlink.net/tag/musee +https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html|title|The Brazil Museum Fire: What Was Lost - The New York Times +https://www.nytimes.com/2018/09/04/science/brazil-museum-fire.html|creationTime|2018-09-05T22:58:06Z +http://www.w3.org/wiki/Web_and_Automotive|creationDate|2012-11-23 +http://www.w3.org/wiki/Web_and_Automotive|tag|http://www.semanlink.net/tag/automobile_and_w3c +http://www.w3.org/wiki/Web_and_Automotive|title|Web and Automotive - W3C Wiki +http://www.w3.org/wiki/Web_and_Automotive|creationTime|2012-11-23T13:37:28Z +https://www.youtube.com/watch?v=v4mAuMp7dHs|creationDate|2014-03-29 +https://www.youtube.com/watch?v=v4mAuMp7dHs|tag|http://www.semanlink.net/tag/amour +https://www.youtube.com/watch?v=v4mAuMp7dHs|tag|http://www.semanlink.net/tag/youtube_video +https://www.youtube.com/watch?v=v4mAuMp7dHs|tag|http://www.semanlink.net/tag/musique_du_niger +https://www.youtube.com/watch?v=v4mAuMp7dHs|title|Soyeya Niger +https://www.youtube.com/watch?v=v4mAuMp7dHs|creationTime|2014-03-29T17:10:50Z +https://twitter.com/aureliengeron/status/1005483669929299969|creationDate|2018-06-10 +https://twitter.com/aureliengeron/status/1005483669929299969|tag|http://www.semanlink.net/tag/keras +https://twitter.com/aureliengeron/status/1005483669929299969|comment|"""In @TensorFlow 1.9, it is much easier to use Keras with the Data API: just pass data iterators, specify the number of steps per epoch, and you're good to go! Plus it works in both graph mode and eager mode, kudos to the TF team!… https://t.co/EH3hY50N0o""" +https://twitter.com/aureliengeron/status/1005483669929299969|title|"Aurélien Geron sur Twitter : ""In @TensorFlow 1.9, it is much easier to use Keras with the Data API...""" +https://twitter.com/aureliengeron/status/1005483669929299969|creationTime|2018-06-10T09:18:12Z +http://itsnat.sourceforge.net/php/spim/spi_manifesto_en.php|creationDate|2012-02-13 +http://itsnat.sourceforge.net/php/spim/spi_manifesto_en.php|tag|http://www.semanlink.net/tag/ajax +http://itsnat.sourceforge.net/php/spim/spi_manifesto_en.php|title|The Single Page Interface Manifesto +http://itsnat.sourceforge.net/php/spim/spi_manifesto_en.php|creationTime|2012-02-13T22:59:00Z +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|creationDate|2015-03-10 +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|tag|http://www.semanlink.net/tag/semantic_components +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|tag|http://www.semanlink.net/tag/webcomponents +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|tag|http://www.semanlink.net/tag/json_ld +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|title|Creating semantic sites with Web Components and JSON-LD +http://updates.html5rocks.com/2015/03/creating-semantic-sites-with-web-components-and-jsonld|creationTime|2015-03-10T13:01:33Z +https://code.google.com/p/topic-modeling-tool/|creationDate|2013-09-03 +https://code.google.com/p/topic-modeling-tool/|tag|http://www.semanlink.net/tag/topic_modeling +https://code.google.com/p/topic-modeling-tool/|title|topic-modeling-tool - A graphical user interface tool for topic modeling - Google Project Hosting +https://code.google.com/p/topic-modeling-tool/|creationTime|2013-09-03T11:00:04Z +http://www.vidizmo.com/|creationDate|2014-11-17 +http://www.vidizmo.com/|title|Enterprise Video Portal & Streaming Media Solutions +http://www.vidizmo.com/|creationTime|2014-11-17T21:00:15Z +http://www.audiocite.net/livres-audio-gratuits-planete-actuelle/index.html|creationDate|2009-08-19 +http://www.audiocite.net/livres-audio-gratuits-planete-actuelle/index.html|tag|http://www.semanlink.net/tag/livres_audio +http://www.audiocite.net/livres-audio-gratuits-planete-actuelle/index.html|title|Audiocité - Livres audio +http://www.audiocite.net/livres-audio-gratuits-planete-actuelle/index.html|creationTime|2009-08-19T18:35:38Z +http://www-128.ibm.com/developerworks/java/library/j-threads1.html|creationDate|2005-10-29 +http://www-128.ibm.com/developerworks/java/library/j-threads1.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www-128.ibm.com/developerworks/java/library/j-threads1.html|title|Threading lightly, Part 1: Synchronization is not the enemy +https://theintercept.com/2015/04/27/encrypting-laptop-like-mean/|creationDate|2015-11-20 +https://theintercept.com/2015/04/27/encrypting-laptop-like-mean/|tag|http://www.semanlink.net/tag/https +https://theintercept.com/2015/04/27/encrypting-laptop-like-mean/|creationTime|2015-11-20T00:45:15Z +https://stackoverflow.com/questions/20727552/abbreviation-detection|creationDate|2019-04-03 +https://stackoverflow.com/questions/20727552/abbreviation-detection|tag|http://www.semanlink.net/tag/acronyms_nlp +https://stackoverflow.com/questions/20727552/abbreviation-detection|title|nlp - Abbreviation detection - Stack Overflow +https://stackoverflow.com/questions/20727552/abbreviation-detection|creationTime|2019-04-03T13:02:59Z +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|creationDate|2019-02-28 +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|tag|http://www.semanlink.net/tag/pagerank +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|tag|http://www.semanlink.net/tag/neo4j +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|tag|http://www.semanlink.net/tag/recommender_systems +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|title|Article recommendation with Personalized PageRank and Full Text Search +https://medium.com/neo4j/article-recommendation-with-personalized-pagerank-and-full-text-search-c0203dd833e8|creationTime|2019-02-28T13:17:25Z +https://addons.mozilla.org/fr/firefox/addon/249|creationDate|2009-04-28 +https://addons.mozilla.org/fr/firefox/addon/249|tag|http://www.semanlink.net/tag/firefox_extension +https://addons.mozilla.org/fr/firefox/addon/249|tag|http://www.semanlink.net/tag/ajax +https://addons.mozilla.org/fr/firefox/addon/249|tag|http://www.semanlink.net/tag/html_dev +https://addons.mozilla.org/fr/firefox/addon/249|tag|http://www.semanlink.net/tag/validation +https://addons.mozilla.org/fr/firefox/addon/249|comment|HTML Validator is a Mozilla extension that adds HTML validation inside Firefox and Mozilla. The number of errors of a HTML page is seen on the form of an icon in the status bar when browsing. The extension can validate the HTML sent by the server or the HTML in the memory (after Ajax execution) ... +https://addons.mozilla.org/fr/firefox/addon/249|title|Html Validator :: Modules pour Firefox +https://addons.mozilla.org/fr/firefox/addon/249|creationTime|2009-04-28T10:50:20Z +http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler|creationDate|2008-11-17 +http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler|tag|http://www.semanlink.net/tag/jena_assembler +http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler|tag|http://www.semanlink.net/tag/d2rq +http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler|title|Jena Assembler (D2RQ V0.5 - User Manual) +http://www4.wiwiss.fu-berlin.de/bizer/d2rq/spec/20061030/#jena-assembler|creationTime|2008-11-17T00:39:25Z +http://www.hippasus.com/resources/mapdelicious/|creationDate|2005-04-22 +http://www.hippasus.com/resources/mapdelicious/|tag|http://www.semanlink.net/tag/del_icio_us +http://www.hippasus.com/resources/mapdelicious/|tag|http://www.semanlink.net/tag/dev +http://wordpress.org/extend/plugins/poolparty-thesaurus/|creationDate|2011-12-27 +http://wordpress.org/extend/plugins/poolparty-thesaurus/|tag|http://www.semanlink.net/tag/thesaurus +http://wordpress.org/extend/plugins/poolparty-thesaurus/|tag|http://www.semanlink.net/tag/wordpress +http://wordpress.org/extend/plugins/poolparty-thesaurus/|tag|http://www.semanlink.net/tag/skos +http://wordpress.org/extend/plugins/poolparty-thesaurus/|title|WordPress › PoolParty Thesaurus « WordPress Plugins +http://wordpress.org/extend/plugins/poolparty-thesaurus/|creationTime|2011-12-27T18:11:17Z +http://fr.wikipedia.org/wiki/99_francs_(film)|creationDate|2012-05-04 +http://fr.wikipedia.org/wiki/99_francs_(film)|tag|http://www.semanlink.net/tag/jean_dujardin +http://fr.wikipedia.org/wiki/99_francs_(film)|tag|http://www.semanlink.net/tag/publicite +http://fr.wikipedia.org/wiki/99_francs_(film)|tag|http://www.semanlink.net/tag/drogues +http://fr.wikipedia.org/wiki/99_francs_(film)|tag|http://www.semanlink.net/tag/film_francais +http://fr.wikipedia.org/wiki/99_francs_(film)|title|99 francs (film) +http://fr.wikipedia.org/wiki/99_francs_(film)|creationTime|2012-05-04T01:22:30Z +http://www.w3.org/QA/2011/10/steve_jobs.html|creationDate|2012-07-06 +http://www.w3.org/QA/2011/10/steve_jobs.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.w3.org/QA/2011/10/steve_jobs.html|tag|http://www.semanlink.net/tag/steve_jobs +http://www.w3.org/QA/2011/10/steve_jobs.html|comment|TBL about Steve Jobs +http://www.w3.org/QA/2011/10/steve_jobs.html|title|Steve Jobs and the actually usable computer - W3C Blog +http://www.w3.org/QA/2011/10/steve_jobs.html|creationTime|2012-07-06T11:13:10Z +http://www.nytimes.com/2016/03/28/business/dealbook/ethereum-a-virtual-currency-enables-transactions-that-rival-bitcoins.html?partner=rss&emc=rss|creationDate|2016-03-28 +http://www.nytimes.com/2016/03/28/business/dealbook/ethereum-a-virtual-currency-enables-transactions-that-rival-bitcoins.html?partner=rss&emc=rss|tag|http://www.semanlink.net/tag/ethereum +http://www.nytimes.com/2016/03/28/business/dealbook/ethereum-a-virtual-currency-enables-transactions-that-rival-bitcoins.html?partner=rss&emc=rss|title|Ethereum, a Virtual Currency, Enables Transactions That Rival Bitcoin’s - The New York Times +http://www.nytimes.com/2016/03/28/business/dealbook/ethereum-a-virtual-currency-enables-transactions-that-rival-bitcoins.html?partner=rss&emc=rss|creationTime|2016-03-28T12:49:21Z +http://www.flax.co.uk/blog/2013/12/17/principles-of-solr-application-design-%e2%80%93-part-2-of-2/|creationDate|2015-03-14 +http://www.flax.co.uk/blog/2013/12/17/principles-of-solr-application-design-%e2%80%93-part-2-of-2/|tag|http://www.semanlink.net/tag/solr +http://www.flax.co.uk/blog/2013/12/17/principles-of-solr-application-design-%e2%80%93-part-2-of-2/|title|Principles of Solr application design – part 2 of 2 +http://www.flax.co.uk/blog/2013/12/17/principles-of-solr-application-design-%e2%80%93-part-2-of-2/|creationTime|2015-03-14T23:36:09Z +https://github.com/rdfjs/rdfjs.org|creationDate|2015-10-15 +https://github.com/rdfjs/rdfjs.org|tag|http://www.semanlink.net/tag/javascript_rdf +https://github.com/rdfjs/rdfjs.org|title|rdfjs/rdfjs.org +https://github.com/rdfjs/rdfjs.org|creationTime|2015-10-15T10:29:50Z +http://noone.org/blog/English/Computer/Web/Blosxom/Blosxom%20Tagging%20Plugin%20Version%200.02.futile|creationDate|2006-02-05 +http://noone.org/blog/English/Computer/Web/Blosxom/Blosxom%20Tagging%20Plugin%20Version%200.02.futile|tag|http://www.semanlink.net/tag/blosxom +http://noone.org/blog/English/Computer/Web/Blosxom/Blosxom%20Tagging%20Plugin%20Version%200.02.futile|tag|http://www.semanlink.net/tag/tagging +http://noone.org/blog/English/Computer/Web/Blosxom/Blosxom%20Tagging%20Plugin%20Version%200.02.futile|title|Blosxom Plugin Tagging +http://semarglproject.org|creationDate|2013-01-07 +http://semarglproject.org|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://semarglproject.org|tag|http://www.semanlink.net/tag/manu_sporny +http://semarglproject.org|comment|RDFa 1.1 processor - java +http://semarglproject.org|title|Semargl: better linked data processing +http://semarglproject.org|creationTime|2013-01-07T22:38:45Z +http://www.rsf.org/article.php3?id_article=24888|creationDate|2008-01-10 +http://www.rsf.org/article.php3?id_article=24888|tag|http://www.semanlink.net/tag/moussa_kaka +http://www.rsf.org/article.php3?id_article=24888|tag|http://www.semanlink.net/tag/reporters_sans_frontieres +http://www.rsf.org/article.php3?id_article=24888|tag|http://www.semanlink.net/tag/liberte_de_la_presse +http://www.rsf.org/article.php3?id_article=24888|title|100è jour de détention pour Moussa Kaka : Reporters sans frontières exprime sa solidarité avec son correspondant incarcéré +http://www.rsf.org/article.php3?id_article=24888|creationTime|2008-01-10T01:08:07Z +http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/|creationDate|2010-07-31 +http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/|tag|http://www.semanlink.net/tag/business_intelligence_and_semantic_web +http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/|tag|http://www.semanlink.net/tag/ontologies +http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/|title|Ontology-supported Business Intelligence (ISWC 2008) +http://iswc2008.semanticweb.org/workshops/ontology-supported-business-intelligence/|creationTime|2010-07-31T13:47:38Z +http://www.europe-solidaire.org/spip.php?article7952|creationDate|2009-03-10 +http://www.europe-solidaire.org/spip.php?article7952|tag|http://www.semanlink.net/tag/equateur +http://www.europe-solidaire.org/spip.php?article7952|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.europe-solidaire.org/spip.php?article7952|tag|http://www.semanlink.net/tag/texaco +http://www.europe-solidaire.org/spip.php?article7952|tag|http://www.semanlink.net/tag/catastrophe_ecologique +http://www.europe-solidaire.org/spip.php?article7952|title|[Europe Solidaire Sans Frontières] Equateur : Les marais noirs de Texaco +http://www.europe-solidaire.org/spip.php?article7952|creationTime|2009-03-10T23:05:03Z +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|creationDate|2014-03-26 +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|tag|http://www.semanlink.net/tag/atos_origin +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|tag|http://www.semanlink.net/tag/datalift +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|comment|Big Data to knowledge - DataLift helping to create digital value +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|title|Atos and its partners launch the DataLift platform - NASDAQ.com +http://www.nasdaq.com/press-release/atos-and-its-partners-launch-the-datalift-platform-20140324-00666|creationTime|2014-03-26T13:22:41Z +http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html|creationDate|2010-08-24 +http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html|tag|http://www.semanlink.net/tag/master_data_management +http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html|tag|http://www.semanlink.net/tag/semantic_enterprise +http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html|title|Semantic Technology and Master Data Management Semantic Universe +http://www.semanticuniverse.com/articles-semantic-technology-and-master-data-management.html|creationTime|2010-08-24T23:37:04Z +http://www.w3.org/TR/r2rml/|creationDate|2010-11-07 +http://www.w3.org/TR/r2rml/|tag|http://www.semanlink.net/tag/r2rml +http://www.w3.org/TR/r2rml/|title|R2RML: RDB to RDF Mapping Language +http://www.w3.org/TR/r2rml/|creationTime|2010-11-07T13:02:28Z +http://www.lemonde.fr/livres/article/2012/08/01/le-romancier-americain-gore-vidal-meurt-a-86-ans_1740843_3260.html|creationDate|2012-08-01 +http://www.lemonde.fr/livres/article/2012/08/01/le-romancier-americain-gore-vidal-meurt-a-86-ans_1740843_3260.html|tag|http://www.semanlink.net/tag/gore_vidal +http://www.lemonde.fr/livres/article/2012/08/01/le-romancier-americain-gore-vidal-meurt-a-86-ans_1740843_3260.html|title|Le romancier américain Gore Vidal meurt à 86 ans +http://www.lemonde.fr/livres/article/2012/08/01/le-romancier-americain-gore-vidal-meurt-a-86-ans_1740843_3260.html|creationTime|2012-08-01T22:07:17Z +http://www.seoskeptic.com/what-is-json-ld/|creationDate|2014-09-10 +http://www.seoskeptic.com/what-is-json-ld/|tag|http://www.semanlink.net/tag/schema_org_actions +http://www.seoskeptic.com/what-is-json-ld/|tag|http://www.semanlink.net/tag/gregg_kellogg +http://www.seoskeptic.com/what-is-json-ld/|tag|http://www.semanlink.net/tag/json_ld +http://www.seoskeptic.com/what-is-json-ld/|title|What is JSON-LD? A Talk with Gregg Kellogg +http://www.seoskeptic.com/what-is-json-ld/|creationTime|2014-09-10T23:15:14Z +https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1|creationDate|2018-10-23 +https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/stack_overflow +https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1|tag|http://www.semanlink.net/tag/out_of_distribution_detection +https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1|title|python - Pythonic way of detecting outliers in one dimensional observation data - Stack Overflow +https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data?noredirect=1&lq=1|creationTime|2018-10-23T09:38:41Z +http://www.cert.org/advisories/CA-2000-02.html|creationDate|2006-12-10 +http://www.cert.org/advisories/CA-2000-02.html|tag|http://www.semanlink.net/tag/web_dev +http://www.cert.org/advisories/CA-2000-02.html|tag|http://www.semanlink.net/tag/malicious_code +http://www.cert.org/advisories/CA-2000-02.html|comment|A web site may inadvertently include malicious HTML tags or script in a dynamically generated page based on unvalidated input from untrustworthy sources. This can be a problem when a web server does not adequately ensure that generated pages are properly encoded to prevent unintended execution of scripts, and when input is not validated to prevent malicious HTML from being presented to the user. +http://www.cert.org/advisories/CA-2000-02.html|title|CERT Advisory CA-2000-02 Malicious HTML Tags Embedded in Client Web Requests +http://jena.sourceforge.net/ARQ/|creationDate|2005-10-10 +http://jena.sourceforge.net/ARQ/|tag|http://www.semanlink.net/tag/arq +http://jena.sourceforge.net/ARQ/|title|ARQ - A SPARQL Processor for Jena +http://commons.apache.org/jcs/|creationDate|2013-01-25 +http://commons.apache.org/jcs/|tag|http://www.semanlink.net/tag/jcs_java_caching_system +http://commons.apache.org/jcs/|title|JCS - Java Caching System +http://commons.apache.org/jcs/|creationTime|2013-01-25T10:57:14Z +http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers|creationDate|2011-06-10 +http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers|tag|http://www.semanlink.net/tag/microdata +http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers|tag|http://www.semanlink.net/tag/benjamin_nowack +http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers|title|Microdata, semantic markup for both RDFers and non-RDFers - benjamin nowack's blog +http://bnode.org/blog/2010/01/26/microdata-semantic-markup-for-both-rdfers-and-non-rdfers|creationTime|2011-06-10T01:15:11Z +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|creationDate|2018-01-28 +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|tag|http://www.semanlink.net/tag/embeddings_in_ir +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|tag|http://www.semanlink.net/tag/using_word_embedding +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|tag|http://www.semanlink.net/tag/text_similarity +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|comment|"> Transferring the success of word embeddings to Information Retrieval (IR) task is currently an active research topic. While embedding-based retrieval models could tackle the vocabulary mismatch problem by making use of the embedding’s inherent similarity between distinct words, most of them struggle to compete with the prevalent strong baselines such as TF-IDF and BM25. + +Considering a practical ad-hoc IR task composed of two steps, matching and scoring, compares the performance of several techniques that leverage word embeddings in the retrieval models to compute the similarity between the query and the documents (namely word centroid similarity, paragraph vectors, Word Mover’s distance, as well as a novel inverse document frequency (IDF) re-weighted word centroid similarity). + +> We confirm that word embeddings can be successfully employed in a practical information retrieval setting. The proposed cosine similarity of IDF re-weighted, aggregated word vectors is competitive to the TF-IDF baseline." +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|title|Evaluating the Impact of Word Embeddings on Similarity Scoring in Practical Information Retrieval (2017) +https://www.zbw.eu/fileadmin/pdf/forschung/2017-colloquium-galke-word-embeddings.pdf|creationTime|2018-01-28T17:19:03Z +http://www.saudiaramcoworld.com/issue/200405/what.was.jiroft..htm|creationDate|2008-01-06 +http://www.saudiaramcoworld.com/issue/200405/what.was.jiroft..htm|tag|http://www.semanlink.net/tag/jiroft +http://www.saudiaramcoworld.com/issue/200405/what.was.jiroft..htm|title|Saudi Aramco World : What Was Jiroft? +http://www.saudiaramcoworld.com/issue/200405/what.was.jiroft..htm|creationTime|2008-01-06T03:25:00Z +http://www-128.ibm.com/developerworks/library/j-ruby/?ca=dgr-lnxw01RubyOffRails|creationDate|2005-12-31 +http://www-128.ibm.com/developerworks/library/j-ruby/?ca=dgr-lnxw01RubyOffRails|tag|http://www.semanlink.net/tag/ruby +http://www-128.ibm.com/developerworks/library/j-ruby/?ca=dgr-lnxw01RubyOffRails|tag|http://www.semanlink.net/tag/java +http://www-128.ibm.com/developerworks/library/j-ruby/?ca=dgr-lnxw01RubyOffRails|title|Ruby off the Rails +http://www.info.uni-karlsruhe.de/~frick/gd/index.html|creationDate|2005-09-26 +http://www.info.uni-karlsruhe.de/~frick/gd/index.html|tag|http://www.semanlink.net/tag/graph_visualization +http://www.info.uni-karlsruhe.de/~frick/gd/index.html|title|Graph Drawing +https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html|creationDate|2017-10-04 +https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html|tag|http://www.semanlink.net/tag/textrank +https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html|tag|http://www.semanlink.net/tag/neo4j +https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html|title|Efficient unsupervised keywords extraction using graphs +https://graphaware.com/neo4j/2017/10/03/efficient-unsupervised-topic-extraction-nlp-neo4j.html|creationTime|2017-10-04T23:01:42Z +http://lucene.472066.n3.nabble.com/question-on-solr-ASCIIFoldingFilterFactory-td2780463.html|creationDate|2012-11-25 +http://lucene.472066.n3.nabble.com/question-on-solr-ASCIIFoldingFilterFactory-td2780463.html|tag|http://www.semanlink.net/tag/solr_not_english_only +http://lucene.472066.n3.nabble.com/question-on-solr-ASCIIFoldingFilterFactory-td2780463.html|title|Solr - User - question on solr.ASCIIFoldingFilterFactory +http://lucene.472066.n3.nabble.com/question-on-solr-ASCIIFoldingFilterFactory-td2780463.html|creationTime|2012-11-25T23:59:14Z +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|creationDate|2011-07-17 +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|tag|http://www.semanlink.net/tag/rdf_vocabularies +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|tag|http://www.semanlink.net/tag/dave_reynolds +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|tag|http://www.semanlink.net/tag/owl_2 +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|title|Semai » Blog Archive » OWL 2 for RDF vocabularies +http://www.amberdown.net/2009/10/owl2-for-rdf-vocabs/|creationTime|2011-07-17T18:56:22Z +http://3roundstones.com/linking-enterprise-data/|creationDate|2010-10-25 +http://3roundstones.com/linking-enterprise-data/|tag|http://www.semanlink.net/tag/linking_enterprise_data +http://3roundstones.com/linking-enterprise-data/|title|Linking Enterprise Data: the book +http://3roundstones.com/linking-enterprise-data/|creationTime|2010-10-25T23:23:00Z +https://web.stanford.edu/~jurafsky/slp3/|creationDate|2018-03-15 +https://web.stanford.edu/~jurafsky/slp3/|tag|http://www.semanlink.net/tag/nlp_book +https://web.stanford.edu/~jurafsky/slp3/|tag|http://www.semanlink.net/tag/dan_jurafsky +https://web.stanford.edu/~jurafsky/slp3/|title|Speech and Language Processing +https://web.stanford.edu/~jurafsky/slp3/|creationTime|2018-03-15T13:55:54Z +http://static.flickr.com/33/54411225_f3555d9409.jpg|creationDate|2006-03-30 +http://static.flickr.com/33/54411225_f3555d9409.jpg|tag|http://www.semanlink.net/tag/euphrasie +http://static.flickr.com/33/54411225_f3555d9409.jpg|title|Euphrasie à Grand-Popo +http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm|creationDate|2013-03-12 +http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm|tag|http://www.semanlink.net/tag/nosql_vs_sql +http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm|tag|http://www.semanlink.net/tag/nosql +http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm|title|To SQL or NoSQL - that's not the question - Views from the Lab - Blogs - Technology Blog and Community from IT Experts - Techworld.com +http://blogs.techworld.com/views-from-the-lab/2011/09/to-sql-or-nosql---thats-not-the-question/index.htm|creationTime|2013-03-12T16:51:29Z +http://yro.slashdot.org/story/13/11/22/1929234/tor-now-comes-in-a-box?utm_source=rss1.0mainlinkanon&utm_medium=feed|creationDate|2013-11-29 +http://yro.slashdot.org/story/13/11/22/1929234/tor-now-comes-in-a-box?utm_source=rss1.0mainlinkanon&utm_medium=feed|tag|http://www.semanlink.net/tag/tor_anonymity_network +http://yro.slashdot.org/story/13/11/22/1929234/tor-now-comes-in-a-box?utm_source=rss1.0mainlinkanon&utm_medium=feed|title|Tor Now Comes In a Box - Slashdot +http://yro.slashdot.org/story/13/11/22/1929234/tor-now-comes-in-a-box?utm_source=rss1.0mainlinkanon&utm_medium=feed|creationTime|2013-11-29T14:08:12Z +http://simon.incutio.com/notes/2006/summit/schachter.txt|creationDate|2006-06-25 +http://simon.incutio.com/notes/2006/summit/schachter.txt|tag|http://www.semanlink.net/tag/del_icio_us +http://simon.incutio.com/notes/2006/summit/schachter.txt|title|"Joshua Schachter, del.icio.us: Things to look out for when building a large application. +" +http://linkeddata.org/|creationDate|2008-03-20 +http://linkeddata.org/|tag|http://www.semanlink.net/tag/linked_data +http://linkeddata.org/|title|linkeddata.org +http://linkeddata.org/|creationTime|2008-03-20T13:01:58Z +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|creationDate|2019-02-09 +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|tag|http://www.semanlink.net/tag/google_brain +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|tag|http://www.semanlink.net/tag/ml_google +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|tag|http://www.semanlink.net/tag/abstractions_in_ai +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|title|Google explores AI's mysterious polytope ZDNet +https://www.zdnet.com/article/google-explores-ais-mysterious-polytope/|creationTime|2019-02-09T01:52:31Z +http://www.lac.ox.ac.uk/sites/sias/files/documents/Schwarcz47.pdf|creationDate|2014-07-27 +http://www.lac.ox.ac.uk/sites/sias/files/documents/Schwarcz47.pdf|tag|http://www.semanlink.net/tag/bresil +http://www.lac.ox.ac.uk/sites/sias/files/documents/Schwarcz47.pdf|title|Not black, not white: just the opposite. Culture, race and national identity in Brazil +http://www.lac.ox.ac.uk/sites/sias/files/documents/Schwarcz47.pdf|creationTime|2014-07-27T22:43:27Z +http://news.bbc.co.uk/2/hi/science/nature/7399226.stm|creationDate|2008-05-17 +http://news.bbc.co.uk/2/hi/science/nature/7399226.stm|tag|http://www.semanlink.net/tag/phoenix_mars_lander +http://news.bbc.co.uk/2/hi/science/nature/7399226.stm|title|BBC NEWS Mars probe set for risky descent +http://news.bbc.co.uk/2/hi/science/nature/7399226.stm|creationTime|2008-05-17T00:31:40Z +http://news.bbc.co.uk/2/hi/science/nature/7399226.stm|source|BBC +http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi|creationDate|2010-05-17 +http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi|tag|http://www.semanlink.net/tag/hypersolutions +http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi|tag|http://www.semanlink.net/tag/about_semanlink +http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi|title|Nouveautés sur le site hyperSOLutions (1998) +http://web.archive.org/web/20001021022558/http://www.hypersolutions.fr/news.ssi|creationTime|2010-05-17T12:27:13Z +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|creationDate|2013-06-12 +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|tag|http://www.semanlink.net/tag/ivan_herman +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|tag|http://www.semanlink.net/tag/schema_org +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|tag|http://www.semanlink.net/tag/rdfa_1_1_lite +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|title|Schema.org and RDFa 1.1 Lite: how does it look now? - W3C Blog +http://www.w3.org/QA/2011/11/schemaorg_and_rdfa_11_lite_how.html|creationTime|2013-06-12T13:23:24Z +http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html|creationDate|2015-02-18 +http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html|tag|http://www.semanlink.net/tag/ruben_verborgh +http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html|title|"Re: How to avoid that collections ""break"" relationships from Ruben Verborgh on 2014-03-31 (public-hydra@w3.org from March 2014)" +http://lists.w3.org/Archives/Public/public-hydra/2014Mar/0145.html|creationTime|2015-02-18T16:40:23Z +https://techcrunch.com/2016/07/06/france-wants-to-rethink-the-state-as-a-platform/|creationDate|2016-07-13 +https://techcrunch.com/2016/07/06/france-wants-to-rethink-the-state-as-a-platform/|tag|http://www.semanlink.net/tag/franceconnect +https://techcrunch.com/2016/07/06/france-wants-to-rethink-the-state-as-a-platform/|title|France wants to rethink the state as a platform TechCrunch +https://techcrunch.com/2016/07/06/france-wants-to-rethink-the-state-as-a-platform/|creationTime|2016-07-13T12:45:15Z +http://www.readwriteweb.com/cloud/2010/12/host-your-own-delicious-altern.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationDate|2010-12-21 +http://www.readwriteweb.com/cloud/2010/12/host-your-own-delicious-altern.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|tag|http://www.semanlink.net/tag/del_icio_us +http://www.readwriteweb.com/cloud/2010/12/host-your-own-delicious-altern.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|title|3 Ways to Host Your Own Delicious Alternative - ReadWriteCloud +http://www.readwriteweb.com/cloud/2010/12/host-your-own-delicious-altern.php?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+readwriteweb+%28ReadWriteWeb%29|creationTime|2010-12-21T15:29:45Z +https://github.com/andrewoma/dexx|creationDate|2017-01-06 +https://github.com/andrewoma/dexx|tag|http://www.semanlink.net/tag/persistent_data_structure +https://github.com/andrewoma/dexx|tag|http://www.semanlink.net/tag/github_project +https://github.com/andrewoma/dexx|comment|a port of Scala's immutable, persistent collection classes to pure Java. +https://github.com/andrewoma/dexx|title|Dexx Collections: Persistent (immutable) collections for Java +https://github.com/andrewoma/dexx|creationTime|2017-01-06T16:52:27Z +http://www.colheradacultural.com.br/viagem/tag/chef-ofir-oliveira/|creationDate|2014-01-18 +http://www.colheradacultural.com.br/viagem/tag/chef-ofir-oliveira/|tag|http://www.semanlink.net/tag/ofir +http://www.colheradacultural.com.br/viagem/tag/chef-ofir-oliveira/|title|chef ofir oliveira Colheres na Estrada +http://www.colheradacultural.com.br/viagem/tag/chef-ofir-oliveira/|creationTime|2014-01-18T10:44:22Z +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|creationDate|2017-10-19 +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|tag|http://www.semanlink.net/tag/http_redirect +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|tag|http://www.semanlink.net/tag/seo +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|comment|"303 redirect is never cacheable. Use for + +- Device targeting +- Geo targeting +- ... + +Don't use 303 when + +- Redirect is permanent +- SEO value is supposed to be passed on to the destination URL +" +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|title|Redirects & SEO - The Complete Guide +https://audisto.com/insights/guides/31/#303-see-other-93d33e2f8e9da7a6005e80632c05dcb8|creationTime|2017-10-19T22:45:08Z +http://distill.pub/|creationDate|2017-03-22 +http://distill.pub/|tag|http://www.semanlink.net/tag/machine_learning +http://distill.pub/|tag|http://www.semanlink.net/tag/christopher_olah +http://distill.pub/|title|Distill — Latest articles about machine learning +http://distill.pub/|creationTime|2017-03-22T13:21:31Z +http://www.ldh-toulon.net/spip.php?article2018|creationDate|2007-04-30 +http://www.ldh-toulon.net/spip.php?article2018|tag|http://www.semanlink.net/tag/sarkozy +http://www.ldh-toulon.net/spip.php?article2018|tag|http://www.semanlink.net/tag/plantu +http://www.ldh-toulon.net/spip.php?article2018|comment|La première apparut sur un dessin du 14 décembre 2005. Jacques Chirac, très ému, déposait une gerbe à la mémoire des esclaves ; dans son dos, un élu UMP en short et casque colonial murmurait une traîtrise à l’oreille du ministre de l’intérieur. Il faisait chaud, la mouche volait bas, un peu par hasard, se souvient Plantu. Lequel fut fort étonné, le lendemain, de voir débarquer un motard en tenue, pour lui remettre une lettre à en-tête du ministère de l’intérieur. +http://www.ldh-toulon.net/spip.php?article2018|title|[LDH-Toulon] Plantu, Sarkozy et les mouches +http://www.ldh-toulon.net/spip.php?article2018|creationTime|2007-04-30T01:46:19Z +http://htmlparser.sourceforge.net|creationDate|2005-03-05 +http://htmlparser.sourceforge.net|tag|http://www.semanlink.net/tag/html_parsing +http://www.runrev.com/|creationDate|2009-11-27 +http://www.runrev.com/|tag|http://www.semanlink.net/tag/rev +http://www.runrev.com/|title|Rev +http://www.runrev.com/|creationTime|2009-11-27T11:14:04Z +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|creationDate|2010-09-06 +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|tag|http://www.semanlink.net/tag/lod2 +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|tag|http://www.semanlink.net/tag/freie_universitat_berlin +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|tag|http://www.semanlink.net/tag/exalead +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|title|blog.aksw.org » AKSW coordinates EU-funded research project LOD2 aiming to take the Web of Linked Data to the next level +http://blog.aksw.org/2010/aksw-coordinates-eu-funded-research-project-lod2-aiming-to-take-the-web-of-linked-data-to-the-next-level/|creationTime|2010-09-06T09:31:59Z +http://events.linkeddata.org/ldow2012/|creationDate|2012-04-16 +http://events.linkeddata.org/ldow2012/|tag|http://www.semanlink.net/tag/ldow2012 +http://events.linkeddata.org/ldow2012/|title|Linked Data on the Web (LDOW2012) - Workshop at WWW2012, Lyon, France +http://events.linkeddata.org/ldow2012/|creationTime|2012-04-16T09:24:20Z +https://github.com/castagna/jena-examples|creationDate|2012-12-09 +https://github.com/castagna/jena-examples|tag|http://www.semanlink.net/tag/jena_dev +https://github.com/castagna/jena-examples|tag|http://www.semanlink.net/tag/github_project +https://github.com/castagna/jena-examples|title|castagna/jena-examples · GitHub +https://github.com/castagna/jena-examples|creationTime|2012-12-09T12:14:57Z +http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html|creationDate|2016-09-24 +http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html|tag|http://www.semanlink.net/tag/origines_de_l_homme +http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html|tag|http://www.semanlink.net/tag/african_origin_of_modern_humans +http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html|title|A Single Migration From Africa Populated the World, Studies Find - The New York Times +http://www.nytimes.com/2016/09/22/science/ancient-dna-human-history.html|creationTime|2016-09-24T21:31:21Z +http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf|creationDate|2008-01-19 +http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf|tag|http://www.semanlink.net/tag/semantic_negotiation +http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf|tag|http://www.semanlink.net/tag/guha +http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf|title|Semantic Negotiation: Co-identifying objects across data +http://www.daml.ecs.soton.ac.uk/SSS-SWS04/02.pdf|creationTime|2008-01-19T17:26:12Z +http://www.w3.org/TR/grddl-scenarios/|creationDate|2007-05-30 +http://www.w3.org/TR/grddl-scenarios/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/TR/grddl-scenarios/|tag|http://www.semanlink.net/tag/grddl +http://www.w3.org/TR/grddl-scenarios/|title|GRDDL Use Cases: Scenarios of extracting RDF data from XML documents +http://www.w3.org/TR/grddl-scenarios/|creationTime|2007-05-30T21:51:17Z +http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/|creationDate|2015-06-12 +http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/|tag|http://www.semanlink.net/tag/webcomponents +http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/|tag|http://www.semanlink.net/tag/html5 +http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/|title|A Detailed Introduction To Custom Elements – Smashing Magazine +http://www.smashingmagazine.com/2014/03/04/introduction-to-custom-elements/|creationTime|2015-06-12T00:38:10Z +http://semanticweb.com/session-for-the-rdfa-initiative-in-drupal-7-and-how-it-will-impact-the-semantic-web_b15681|creationDate|2011-09-15 +http://semanticweb.com/session-for-the-rdfa-initiative-in-drupal-7-and-how-it-will-impact-the-semantic-web_b15681|tag|http://www.semanlink.net/tag/drupal_rdf +http://semanticweb.com/session-for-the-rdfa-initiative-in-drupal-7-and-how-it-will-impact-the-semantic-web_b15681|title|Q&A Session for “The RDFa initiative in Drupal 7, and how it will impact the Semantic Web” - semanticweb.com +http://semanticweb.com/session-for-the-rdfa-initiative-in-drupal-7-and-how-it-will-impact-the-semantic-web_b15681|creationTime|2011-09-15T14:02:59Z +https://visualvm.dev.java.net/|creationDate|2010-02-11 +https://visualvm.dev.java.net/|tag|http://www.semanlink.net/tag/java_profiling +https://visualvm.dev.java.net/|comment|VisualVM is a visual tool integrating several commandline JDK tools and lightweight profiling capabilities. Designed for both production and development time use, it further enhances the capability of monitoring and performance analysis for the Java SE platform. +https://visualvm.dev.java.net/|title|visualvm +https://visualvm.dev.java.net/|creationTime|2010-02-11T16:11:12Z +http://www-128.ibm.com/developerworks/java/library/j-jtp09275.html?ca=dgr-lnxw01JavaUrbanLegends|creationDate|2005-10-10 +http://www-128.ibm.com/developerworks/java/library/j-jtp09275.html?ca=dgr-lnxw01JavaUrbanLegends|tag|http://www.semanlink.net/tag/java +http://www-128.ibm.com/developerworks/java/library/j-jtp09275.html?ca=dgr-lnxw01JavaUrbanLegends|comment|Java theory and practice: Urban performance legends, revisited +http://www-128.ibm.com/developerworks/java/library/j-jtp09275.html?ca=dgr-lnxw01JavaUrbanLegends|title|Java theory and practice: Urban performance legends, revisited +http://www.w3.org/2008/09/msnws/report|creationDate|2009-02-07 +http://www.w3.org/2008/09/msnws/report|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2008/09/msnws/report|tag|http://www.semanlink.net/tag/workshop +http://www.w3.org/2008/09/msnws/report|tag|http://www.semanlink.net/tag/social_networks +http://www.w3.org/2008/09/msnws/report|title|Report from W3C Workshop on the Future of Social Networking +http://www.w3.org/2008/09/msnws/report|creationTime|2009-02-07T23:11:08Z +http://blogs.esa.int/rosetta/2015/06/14/rosettas-lander-philae-wakes-up-from-hibernation/|creationDate|2015-06-14 +http://blogs.esa.int/rosetta/2015/06/14/rosettas-lander-philae-wakes-up-from-hibernation/|tag|http://www.semanlink.net/tag/philae +http://blogs.esa.int/rosetta/2015/06/14/rosettas-lander-philae-wakes-up-from-hibernation/|title|Rosetta’s lander Philae wakes up from hibernation Rosetta - ESA's comet chaser +http://blogs.esa.int/rosetta/2015/06/14/rosettas-lander-philae-wakes-up-from-hibernation/|creationTime|2015-06-14T14:24:56Z +http://www.w3.org/TR/rdfa-lite/|creationDate|2013-06-14 +http://www.w3.org/TR/rdfa-lite/|tag|http://www.semanlink.net/tag/rdfa_1_1_lite +http://www.w3.org/TR/rdfa-lite/|tag|http://www.semanlink.net/tag/w3c_recommendation +http://www.w3.org/TR/rdfa-lite/|title|RDFa Lite 1.1 +http://www.w3.org/TR/rdfa-lite/|creationTime|2013-06-14T18:23:18Z +https://en.wikipedia.org/wiki/Kyshtym_disaster|creationDate|2017-11-21 +https://en.wikipedia.org/wiki/Kyshtym_disaster|tag|http://www.semanlink.net/tag/industrie_nucleaire +https://en.wikipedia.org/wiki/Kyshtym_disaster|tag|http://www.semanlink.net/tag/catastrophe_ecologique +https://en.wikipedia.org/wiki/Kyshtym_disaster|comment|"> the CIA knew of the 1957 Mayak accident since 1959, but kept it secret to prevent adverse consequences for the fledgling American nuclear industry +" +https://en.wikipedia.org/wiki/Kyshtym_disaster|title|Kyshtym disaster +https://en.wikipedia.org/wiki/Kyshtym_disaster|creationTime|2017-11-21T13:12:06Z +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|creationDate|2012-07-05 +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|tag|http://www.semanlink.net/tag/delocalisations +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|tag|http://www.semanlink.net/tag/automobile +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|tag|http://www.semanlink.net/tag/france +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|title|Peut-on encore produire des voitures en France ? +http://www.lemonde.fr/economie/article/2012/07/02/peut-on-encore-produire-des-voitures-en-france_1727758_3234.html|creationTime|2012-07-05T23:15:40Z +https://lejournal.cnrs.fr/articles/amarna-la-cite-disparue-dakhenaton?utm_content=bufferbae84&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationDate|2016-04-10 +https://lejournal.cnrs.fr/articles/amarna-la-cite-disparue-dakhenaton?utm_content=bufferbae84&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|tag|http://www.semanlink.net/tag/telelamarna +https://lejournal.cnrs.fr/articles/amarna-la-cite-disparue-dakhenaton?utm_content=bufferbae84&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|title|Amarna, la cité disparue d'Akhenaton CNRS Le journal +https://lejournal.cnrs.fr/articles/amarna-la-cite-disparue-dakhenaton?utm_content=bufferbae84&utm_medium=social&utm_source=twitter.com&utm_campaign=buffer|creationTime|2016-04-10T15:48:18Z +http://fr.slideshare.net/basistech/multilingual-search-and-text-analytics-with-solr-open-source-search-conference|creationDate|2015-03-11 +http://fr.slideshare.net/basistech/multilingual-search-and-text-analytics-with-solr-open-source-search-conference|tag|http://www.semanlink.net/tag/solr_not_english_only +http://fr.slideshare.net/basistech/multilingual-search-and-text-analytics-with-solr-open-source-search-conference|title|Multilingual Search and Text Analytics with Solr - Open Source Search… +http://fr.slideshare.net/basistech/multilingual-search-and-text-analytics-with-solr-open-source-search-conference|creationTime|2015-03-11T00:26:43Z +http://www.codecademy.com/|creationDate|2013-12-20 +http://www.codecademy.com/|tag|http://www.semanlink.net/tag/online_course_materials +http://www.codecademy.com/|tag|http://www.semanlink.net/tag/programming +http://www.codecademy.com/|tag|http://www.semanlink.net/tag/maria +http://www.codecademy.com/|title|Apprenez à coder Codecademy +http://www.codecademy.com/|creationTime|2013-12-20T10:14:14Z +http://news.nationalgeographic.com/news/2008/09/080903-oldest-skeletons.html|creationDate|2008-09-05 +http://news.nationalgeographic.com/news/2008/09/080903-oldest-skeletons.html|tag|http://www.semanlink.net/tag/first_americans +http://news.nationalgeographic.com/news/2008/09/080903-oldest-skeletons.html|title|Oldest Skeleton in Americas Found in Underwater Cave? +http://news.nationalgeographic.com/news/2008/09/080903-oldest-skeletons.html|creationTime|2008-09-05T17:47:53Z +http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7|creationDate|2014-07-16 +http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7|tag|http://www.semanlink.net/tag/python_install +http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7|title|Installing numpy Lion with Python 2.7? - Stack Overflow +http://stackoverflow.com/questions/10057095/installing-numpy-lion-with-python-2-7|creationTime|2014-07-16T17:19:58Z +http://www.w3.org/TR/turtle/#in-html|creationDate|2013-09-06 +http://www.w3.org/TR/turtle/#in-html|tag|http://www.semanlink.net/tag/turtle_in_html +http://www.w3.org/TR/turtle/#in-html|title|Embedding Turtle in HTML documents +http://www.w3.org/TR/turtle/#in-html|creationTime|2013-09-06T18:30:31Z +http://www.javaworld.com/article/2995526/development-tools/jump-into-java-micro-frameworks-part-1.html|creationDate|2017-05-16 +http://www.javaworld.com/article/2995526/development-tools/jump-into-java-micro-frameworks-part-1.html|tag|http://www.semanlink.net/tag/java_microframeworks +http://www.javaworld.com/article/2995526/development-tools/jump-into-java-micro-frameworks-part-1.html|title|Jump into Java microframeworks, Part 1: Introduction JavaWorld +http://www.javaworld.com/article/2995526/development-tools/jump-into-java-micro-frameworks-part-1.html|creationTime|2017-05-16T02:06:58Z +http://www.paleolands.org/|creationDate|2008-05-18 +http://www.paleolands.org/|tag|http://www.semanlink.net/tag/paleontologie +http://www.paleolands.org/|tag|http://www.semanlink.net/tag/oregon +http://www.paleolands.org/|comment|We’ll take you and your family on a customized day hike to collect fossils, touch a 44-million-year-old tree, visit a lakeshore 33 million years old, or volunteer to assist paleontologists in their research. +http://www.paleolands.org/|title|Oregon Paleo Lands Institute +http://www.paleolands.org/|creationTime|2008-05-18T23:42:07Z +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|creationDate|2019-04-12 +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|tag|http://www.semanlink.net/tag/javascript_rdf +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|tag|http://www.semanlink.net/tag/good +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|tag|http://www.semanlink.net/tag/solid +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|tag|http://www.semanlink.net/tag/linked_data +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|tag|http://www.semanlink.net/tag/ruben_verborgh +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|title|Designing a Linked Data developer experience Ruben Verborgh +https://ruben.verborgh.org/blog/2018/12/28/designing-a-linked-data-developer-experience/#ldflex|creationTime|2019-04-12T19:32:12Z +http://www.ibiblio.org/hhalpin/homepage/publications/html/airedux/|creationDate|2007-02-06 +http://www.ibiblio.org/hhalpin/homepage/publications/html/airedux/|tag|http://www.semanlink.net/tag/semantic_web_and_ai +http://www.ibiblio.org/hhalpin/homepage/publications/html/airedux/|title|The Semantic Web: The Origins of Artificial Intelligence Redux +http://www.ibiblio.org/hhalpin/homepage/publications/html/airedux/|creationTime|2007-02-06T21:55:44Z +http://www.alvit.de/vf/en/essential-bookmarks-for-webdesigners-and-web-developers.html|creationDate|2005-04-27 +http://www.alvit.de/vf/en/essential-bookmarks-for-webdesigners-and-web-developers.html|tag|http://www.semanlink.net/tag/web_tools +http://www.alvit.de/vf/en/essential-bookmarks-for-webdesigners-and-web-developers.html|tag|http://www.semanlink.net/tag/links +http://www.alvit.de/vf/en/essential-bookmarks-for-webdesigners-and-web-developers.html|title|Bookmarks for web developers: a list of useful web-tools +http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/|creationDate|2012-09-17 +http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/|tag|http://www.semanlink.net/tag/jquery +http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/|tag|http://www.semanlink.net/tag/memory_leak +http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/|title|(Fix) Memory Leaks: Ajax page replacement Illyriad - Beneath the Misted Land +http://www.illyriad.co.uk/blog/index.php/2011/08/fix-memory-leaks-ajax-page-replacement/|creationTime|2012-09-17T00:11:33Z +http://www.xml.com/pub/a/2005/04/06/restful.html|creationDate|2005-09-03 +http://www.xml.com/pub/a/2005/04/06/restful.html|tag|http://www.semanlink.net/tag/dev +http://www.xml.com/pub/a/2005/04/06/restful.html|tag|http://www.semanlink.net/tag/bookmark_managers +http://www.xml.com/pub/a/2005/04/06/restful.html|tag|http://www.semanlink.net/tag/rest +http://www.xml.com/pub/a/2005/04/06/restful.html|tag|http://www.semanlink.net/tag/tagging +http://www.xml.com/pub/a/2005/04/06/restful.html|title|XML.com: Constructing or Traversing URIs? +http://www.h-net.org/~africa/threads/mamiwata.html|creationDate|2007-09-05 +http://www.h-net.org/~africa/threads/mamiwata.html|tag|http://www.semanlink.net/tag/mami_wata +http://www.h-net.org/~africa/threads/mamiwata.html|title|Mami Wata +http://www.h-net.org/~africa/threads/mamiwata.html|creationTime|2007-09-05T00:43:54Z +http://jubat.us/en/|creationDate|2013-04-16 +http://jubat.us/en/|tag|http://www.semanlink.net/tag/machine_learning +http://jubat.us/en/|title|Jubatus : Distributed Online Machine Learning Framework +http://jubat.us/en/|creationTime|2013-04-16T14:22:28Z +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|creationDate|2017-02-28 +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|tag|http://www.semanlink.net/tag/drm_in_html_5 +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|tag|http://www.semanlink.net/tag/encrypted_media_extensions +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|tag|http://www.semanlink.net/tag/tim_berners_lee +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|title|On EME in HTML5 W3C Blog +https://www.w3.org/blog/2017/02/on-eme-in-html5/?pk_campaign=feed&pk_kwd=on-eme-in-html5|creationTime|2017-02-28T14:01:14Z +http://json-ld.org/spec/latest/json-ld-api-best-practices/|creationDate|2016-07-04 +http://json-ld.org/spec/latest/json-ld-api-best-practices/|tag|http://www.semanlink.net/tag/json_ld_apis +http://json-ld.org/spec/latest/json-ld-api-best-practices/|tag|http://www.semanlink.net/tag/gregg_kellogg +http://json-ld.org/spec/latest/json-ld-api-best-practices/|title|Building JSON-LD APIs: Best Practices +http://json-ld.org/spec/latest/json-ld-api-best-practices/|creationTime|2016-07-04T09:38:26Z +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|creationDate|2007-11-30 +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|tag|http://www.semanlink.net/tag/javascript_dom +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|tag|http://www.semanlink.net/tag/javascript_and_tutorial +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|tag|http://www.semanlink.net/tag/compatibilite_javascript +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|title|JavaScript tutorial - DOM objects and methods +http://www.howtocreate.co.uk/tutorials/javascript/domstructure|creationTime|2007-11-30T16:40:14Z +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|creationDate|2009-05-12 +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|tag|http://www.semanlink.net/tag/slides +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|tag|http://www.semanlink.net/tag/lee_feigenbaum +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|tag|http://www.semanlink.net/tag/semantic_web_evangelization +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|title|Semantic Web Landscape 2009 +http://www.slideshare.net/LeeFeigenbaum/semantic-web-landscape-2009?type=powerpoint|creationTime|2009-05-12T23:23:15Z +https://english.lasindias.com/blockchain-is-a-threat-to-the-distributed-future-of-the-internet|creationDate|2016-05-18 +https://english.lasindias.com/blockchain-is-a-threat-to-the-distributed-future-of-the-internet|tag|http://www.semanlink.net/tag/blockchain +https://english.lasindias.com/blockchain-is-a-threat-to-the-distributed-future-of-the-internet|title|The blockchain is a threat to the distributed future of the Internet +https://english.lasindias.com/blockchain-is-a-threat-to-the-distributed-future-of-the-internet|creationTime|2016-05-18T00:31:30Z +http://www.nlp-class.org/|creationDate|2012-01-18 +http://www.nlp-class.org/|tag|http://www.semanlink.net/tag/stanford +http://www.nlp-class.org/|tag|http://www.semanlink.net/tag/nlp +http://www.nlp-class.org/|title|Natural Language Processing +http://www.nlp-class.org/|creationTime|2012-01-18T08:48:51Z +https://github.com/castagna/SARQ|creationDate|2012-05-04 +https://github.com/castagna/SARQ|tag|http://www.semanlink.net/tag/paolo_castagna +https://github.com/castagna/SARQ|tag|http://www.semanlink.net/tag/arq +https://github.com/castagna/SARQ|comment|SARQ is a combination of ARQ and Solr. It gives ARQ the ability to perform free text searches using a remote Solr server. Lucene indexes in Solr are additional information for accessing the RDF graph, not storage for the graph itself. +https://github.com/castagna/SARQ|title|castagna/SARQ · GitHub +https://github.com/castagna/SARQ|creationTime|2012-05-04T00:41:40Z +http://stackoverflow.com/questions/23996953/json-ldhydra-link-discovery/24066336#24066336|creationDate|2017-02-04 +http://stackoverflow.com/questions/23996953/json-ldhydra-link-discovery/24066336#24066336|tag|http://www.semanlink.net/tag/json_ld +http://stackoverflow.com/questions/23996953/json-ldhydra-link-discovery/24066336#24066336|title|angularjs - JSON-LD+Hydra link discovery - Stack Overflow +http://stackoverflow.com/questions/23996953/json-ldhydra-link-discovery/24066336#24066336|creationTime|2017-02-04T13:33:50Z +http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse|creationDate|2010-05-14 +http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse|tag|http://www.semanlink.net/tag/rdfa_1_1 +http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse|title|RDFa 1.1 pour corriger les erreurs de jeunesse ? Les petites cases +http://www.lespetitescases.net/RDFa-1-1-pour-corriger-les-erreurs-de-jeunesse|creationTime|2010-05-14T21:25:01Z +http://www.lemonde.fr/planete/article/2012/08/06/suivez-l-atterrissage-de-curiosity-sur-mars-en-direct_1742760_3244.html|creationDate|2012-08-06 +http://www.lemonde.fr/planete/article/2012/08/06/suivez-l-atterrissage-de-curiosity-sur-mars-en-direct_1742760_3244.html|tag|http://www.semanlink.net/tag/mars_curiosity +http://www.lemonde.fr/planete/article/2012/08/06/suivez-l-atterrissage-de-curiosity-sur-mars-en-direct_1742760_3244.html|title|Le robot Curiosity s'est posé avec succès sur la planète Mars +http://www.lemonde.fr/planete/article/2012/08/06/suivez-l-atterrissage-de-curiosity-sur-mars-en-direct_1742760_3244.html|creationTime|2012-08-06T08:50:50Z +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|creationDate|2015-03-05 +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|tag|http://www.semanlink.net/tag/ruben_verborgh +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|tag|http://www.semanlink.net/tag/code_on_demand +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|tag|http://www.semanlink.net/tag/hateoas +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|title|@inadarei: Code on Demand in APIs: Dumb Clients, Smart Endpoints +http://www.freshblurbs.com/blog/2015/02/27/code-on-demand-in-apis.html|creationTime|2015-03-05T10:23:09Z +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|creationDate|2011-02-14 +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|tag|http://www.semanlink.net/tag/open_world_assumption +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|tag|http://www.semanlink.net/tag/peter_patel_schneider +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|tag|http://www.semanlink.net/tag/ian_horrocks +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|comment|Classical logics and Datalog-related logics have both been proposed as underlying formalisms for conceptual modelling in the context of the Semantic Web. Although these two different formalism groups have some commonalities, and look similar in the context of expressively impoverished languages like RDF, their differences become apparent at more expressive language levels. After considering some of these differences, we argue that, although some of the characteristics of Datalog have their utility, the open environment of the Semantic Web is better served by standard logics. +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|title|A comparison of two modelling paradigms in the Semantic Web +http://www.cs.man.ac.uk/~horrocks/Publications/download/2006/PaHo06a.pdf|creationTime|2011-02-14T16:10:37Z +http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html|creationDate|2012-03-06 +http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html|tag|http://www.semanlink.net/tag/finance +http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html|title|IBM’s Watson Computer Gets a Wall Street Job - Bloomberg +http://www.bloomberg.com/news/2012-03-05/ibm-s-watson-computer-gets-wall-street-job-one-year-after-jeopardy-win.html|creationTime|2012-03-06T17:40:01Z +http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html|creationDate|2014-05-16 +http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html|tag|http://www.semanlink.net/tag/ogm +http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html|tag|http://www.semanlink.net/tag/europe_aberrations +http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html|title|Poker menteur autour de l'interdiction des OGM +http://www.lemonde.fr/idees/article/2014/05/16/poker-menteur-autour-de-l-interdiction-des-ogm_4419626_3232.html|creationTime|2014-05-16T22:07:57Z +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|creationDate|2013-11-21 +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|tag|http://www.semanlink.net/tag/wikidata +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|tag|http://www.semanlink.net/tag/denny_vrandecic +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|tag|http://www.semanlink.net/tag/freebase +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|title|Words and what not: #Wikidata & #Freebase - an #interview with Denny Vrandečić +http://ultimategerardm.blogspot.fr/2013/11/wikidata-freebase-interview-with-denny.html|creationTime|2013-11-21T10:37:19Z +https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e|creationDate|2018-11-15 +https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e|tag|http://www.semanlink.net/tag/overview +https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e|tag|http://www.semanlink.net/tag/nlp_current_state +https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e|title|What You Need to Know About Natural Language Processing +https://medium.com/the-official-integrate-ai-blog/what-you-need-to-know-about-natural-language-processing-2c8240e6c38e|creationTime|2018-11-15T23:13:40Z +https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence|creationDate|2016-06-26 +https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence|tag|http://www.semanlink.net/tag/jobbotization +https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence|tag|http://www.semanlink.net/tag/artificial_intelligence +https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence|title|AI will create 'useless class' of human, predicts bestselling historian Technology The Guardian +https://www.theguardian.com/technology/2016/may/20/silicon-assassins-condemn-humans-life-useless-artificial-intelligence|creationTime|2016-06-26T23:33:25Z +http://www.osema.org.ve/|creationDate|2011-02-16 +http://www.osema.org.ve/|tag|http://www.semanlink.net/tag/eswc_2011 +http://www.osema.org.ve/|tag|http://www.semanlink.net/tag/workshop +http://www.osema.org.ve/|title|OSEMA 2011. Ontology and Semantic Web for Manufacturing +http://www.osema.org.ve/|creationTime|2011-02-16T01:41:42Z +http://blog.revolutionanalytics.com/2016/02/because-its-friday-the-mysterious-rotating-woman.html|creationDate|2016-02-05 +http://blog.revolutionanalytics.com/2016/02/because-its-friday-the-mysterious-rotating-woman.html|tag|http://www.semanlink.net/tag/illusion_d_optique +http://blog.revolutionanalytics.com/2016/02/because-its-friday-the-mysterious-rotating-woman.html|title|Because it's Friday: The mysterious rotating woman +http://blog.revolutionanalytics.com/2016/02/because-its-friday-the-mysterious-rotating-woman.html|creationTime|2016-02-05T22:34:37Z +http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture|creationDate|2008-06-04 +http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture|tag|http://www.semanlink.net/tag/moussa_poussi +http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture|comment|Que la terre te soit légère, Moussa. +http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture|title|La mort d'une icone de la musique moderne nigérienne: Moussa Poussy nous a quitté. +http://www.fofomag.com/Index.asp?affiche=News_Display.asp&articleID=920&rub=Culture|creationTime|2008-06-04T20:57:03Z +http://www.servlets.com/isps/servlet/ISPViewAll|creationDate|2005-03-31 +http://www.servlets.com/isps/servlet/ISPViewAll|tag|http://www.semanlink.net/tag/servlet +http://www.servlets.com/isps/servlet/ISPViewAll|tag|http://www.semanlink.net/tag/isp +http://www.servlets.com/isps/servlet/ISPViewAll|title|ISPs Supporting Servlets +https://twitter.com/julie_grollier|creationDate|2018-10-21 +https://twitter.com/julie_grollier|tag|http://www.semanlink.net/tag/julie_grollier +https://twitter.com/julie_grollier|title|Julie Grollier (@julie_grollier) Twitter +https://twitter.com/julie_grollier|creationTime|2018-10-21T11:15:04Z +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMA1UULWFE_0.html|creationDate|2005-11-30 +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMA1UULWFE_0.html|tag|http://www.semanlink.net/tag/eau_de_mars +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMA1UULWFE_0.html|tag|http://www.semanlink.net/tag/mars_express +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMA1UULWFE_0.html|title|ESA - Results from Mars Express - Mars Express evidence for large aquifers on early Mars +http://www.semweb.pro/blogentry/3138|creationDate|2012-05-22 +http://www.semweb.pro/blogentry/3138|tag|http://www.semanlink.net/tag/semweb_pro_2012 +http://www.semweb.pro/blogentry/3138|title|Revue de presse SemWeb.Pro 2012 (SemWeb.Pro) +http://www.semweb.pro/blogentry/3138|creationTime|2012-05-22T09:29:04Z +http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf|creationDate|2011-03-01 +http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf|tag|http://www.semanlink.net/tag/minting_uris +http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf|tag|http://www.semanlink.net/tag/rdf_dev +http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf|title|The case for generating URIs by hashing RDF content +http://www.hpl.hp.com/techreports/2002/HPL-2002-216.pdf|creationTime|2011-03-01T13:36:04Z +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|creationDate|2013-04-11 +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|tag|http://www.semanlink.net/tag/les_petites_cases +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|tag|http://www.semanlink.net/tag/open_data +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|tag|http://www.semanlink.net/tag/michel_serres +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|title|Petite Poucette au secours de l'Open Data Les petites cases +http://www.lespetitescases.net/Petite-Poucette-au-secours-de-l-Open-Data|creationTime|2013-04-11T09:05:32Z +http://www.javascriptkit.com/javatutors/closures.shtml|creationDate|2012-08-14 +http://www.javascriptkit.com/javatutors/closures.shtml|tag|http://www.semanlink.net/tag/javascript_closures +http://www.javascriptkit.com/javatutors/closures.shtml|title|JavaScript Closures 101- they're not magic +http://www.javascriptkit.com/javatutors/closures.shtml|creationTime|2012-08-14T14:51:35Z +http://www.google.com/support/webmasters/bin/topic.py?topic=21997|creationDate|2011-02-09 +http://www.google.com/support/webmasters/bin/topic.py?topic=21997|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.google.com/support/webmasters/bin/topic.py?topic=21997|title|Rich snippets (microdata, microformats, RDFa) +http://www.google.com/support/webmasters/bin/topic.py?topic=21997|creationTime|2011-02-09T00:30:52Z +http://stackoverflow.com/questions/5250923/http-content-negotiation-conflicts-in-jax-rs-jersey|creationDate|2016-04-01 +http://stackoverflow.com/questions/5250923/http-content-negotiation-conflicts-in-jax-rs-jersey|tag|http://www.semanlink.net/tag/jersey_cache_control +http://stackoverflow.com/questions/5250923/http-content-negotiation-conflicts-in-jax-rs-jersey|title|rest - HTTP content negotiation conflicts in JAX-RS/Jersey? - Stack Overflow +http://stackoverflow.com/questions/5250923/http-content-negotiation-conflicts-in-jax-rs-jersey|creationTime|2016-04-01T10:13:57Z +http://www.programmableweb.com/mashup/youtube-subtitle-inserter|creationDate|2007-09-15 +http://www.programmableweb.com/mashup/youtube-subtitle-inserter|tag|http://www.semanlink.net/tag/youtube +http://www.programmableweb.com/mashup/youtube-subtitle-inserter|tag|http://www.semanlink.net/tag/subtitles +http://www.programmableweb.com/mashup/youtube-subtitle-inserter|title|YouTube Subtitle Inserter - ProgrammableWeb Mashup Detail +http://www.programmableweb.com/mashup/youtube-subtitle-inserter|creationTime|2007-09-15T23:06:33Z +http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects|creationDate|2012-08-28 +http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects|tag|http://www.semanlink.net/tag/javascript_tips +http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects|comment|"JSON.stringify(object) +" +http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects|title|JSON.stringify(object) - How to inspect Javascript Objects - Stack Overflow +http://stackoverflow.com/questions/5357442/how-to-inspect-javascript-objects|creationTime|2012-08-28T12:47:34Z +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|creationDate|2007-04-10 +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|tag|http://www.semanlink.net/tag/bbc +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|tag|http://www.semanlink.net/tag/semantic_web_use_cases +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|comment|Experimental internal-BBC-only project designed to allow users to collectively describe, segment and annotate audio in a Wikipedia-style fashion +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|title|On the BBC Annotatable Audio project... (plasticbag.org) +http://www.plasticbag.org/archives/2005/10/on_the_bbc_annotatable_audio_project/|creationTime|2007-04-10T23:51:55Z +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|creationDate|2017-09-04 +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|tag|http://www.semanlink.net/tag/semantic_media_wiki +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|tag|http://www.semanlink.net/tag/biomedical_data +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|tag|http://www.semanlink.net/tag/biological_data +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|title|RDFIO: extending Semantic MediaWiki for interoperable biomedical data management Journal of Biomedical Semantics Full Text +https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-017-0136-y|creationTime|2017-09-04T21:09:42Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|creationDate|2007-04-12 +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|tag|http://www.semanlink.net/tag/presidentielles_2007 +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|tag|http://www.semanlink.net/tag/aldous_huxley +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|tag|http://www.semanlink.net/tag/sarkozy +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|tag|http://www.semanlink.net/tag/1984 +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|comment|Comment dès lors ne pas nous inquiéter de voir un ancien ministre de l'intérieur, candidat à l'élection présidentielle, nous conduire vers les horizons mêlés du Meilleur des mondes d'Huxley et de 1984 d'Orwell ? +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|title|"Le ""meilleur des mondes"" de M. Sarkozy, par Hervé Chneiweiss" +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|creationTime|2007-04-12T09:02:16Z +http://www.lemonde.fr/web/article/0,1-0@2-3232,36-894003,0.html|source|Le Monde +http://terrier.org/docs/v3.5/dfr_description.html|creationDate|2017-07-21 +http://terrier.org/docs/v3.5/dfr_description.html|tag|http://www.semanlink.net/tag/okapi_bm25 +http://terrier.org/docs/v3.5/dfr_description.html|title|Divergence From Randomness (DFR) Framework +http://terrier.org/docs/v3.5/dfr_description.html|creationTime|2017-07-21T12:34:32Z +http://homepages.cwi.nl/~paulk/publications/SPE00.pdf|creationDate|2011-04-05 +http://homepages.cwi.nl/~paulk/publications/SPE00.pdf|tag|http://www.semanlink.net/tag/aterm +http://homepages.cwi.nl/~paulk/publications/SPE00.pdf|comment|How do distributed applications exchange tree-like data structures? +http://homepages.cwi.nl/~paulk/publications/SPE00.pdf|title|Efficient Annotated Terms +http://homepages.cwi.nl/~paulk/publications/SPE00.pdf|creationTime|2011-04-05T09:32:58Z +http://fr.dbpedia.org/|creationDate|2012-11-19 +http://fr.dbpedia.org/|tag|http://www.semanlink.net/tag/dbpedia_francophone +http://fr.dbpedia.org/|title|DBpédia en français +http://fr.dbpedia.org/|creationTime|2012-11-19T15:23:52Z +http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html|creationDate|2015-03-13 +http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html|tag|http://www.semanlink.net/tag/maven +http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html|comment|"note : non, le jar produit n'a pas le n° de version ds target (comme le war, d'ailleurs)
+replace dans jar
+${project.build.directory}/${project.artifactId}-${project.version}.jar +
par
+${project.build.directory}/${project.artifactId}.jar +" +http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html|title|Marlon Pierce's Community Grids Lab Blog: Maven: Making a War and Jar at the Same Time +http://communitygrids.blogspot.in/2007/11/maven-making-war-and-jar-at-same-time.html|creationTime|2015-03-13T15:31:18Z +http://www.w3.org/2001/sw/interest/webschema.html|creationDate|2013-06-28 +http://www.w3.org/2001/sw/interest/webschema.html|tag|http://www.semanlink.net/tag/web_schemas_task_force +http://www.w3.org/2001/sw/interest/webschema.html|title|Semantic Web Interest Group - Web Schemas Task Force +http://www.w3.org/2001/sw/interest/webschema.html|creationTime|2013-06-28T11:11:16Z +http://learn.arc.nasa.gov|creationDate|2005-03-14 +http://learn.arc.nasa.gov|tag|http://www.semanlink.net/tag/nasa +https://github.com/wikimedia/wikidata-query-gui|creationDate|2017-10-28 +https://github.com/wikimedia/wikidata-query-gui|tag|http://www.semanlink.net/tag/wikidata_query_service +https://github.com/wikimedia/wikidata-query-gui|comment|the GUI for the Wikidata Query Service. +https://github.com/wikimedia/wikidata-query-gui|title|wikimedia/wikidata-query-gui +https://github.com/wikimedia/wikidata-query-gui|creationTime|2017-10-28T11:04:18Z +https://en.wikipedia.org/wiki/The_Electric_Horseman|creationDate|2016-09-04 +https://en.wikipedia.org/wiki/The_Electric_Horseman|tag|http://www.semanlink.net/tag/film_americain +https://en.wikipedia.org/wiki/The_Electric_Horseman|comment|"Sydney Pollack, Redford, Jane Fonda +" +https://en.wikipedia.org/wiki/The_Electric_Horseman|title|The Electric Horseman +https://en.wikipedia.org/wiki/The_Electric_Horseman|creationTime|2016-09-04T22:46:27Z +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|creationDate|2016-11-26 +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|tag|http://www.semanlink.net/tag/github +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|tag|http://www.semanlink.net/tag/semanlink2_related +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|tag|http://www.semanlink.net/tag/knowledge_base +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|tag|http://www.semanlink.net/tag/markdown +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|title|Building a lightweight and portable knowledge base for fun and profit +https://anavarre.net/building-a-lightweight-and-portable-knowledge-base-for-fun-and-profit/|creationTime|2016-11-26T01:32:34Z +http://www.ajaxpatterns.org/AJAXFrameworks|creationDate|2005-06-15 +http://www.ajaxpatterns.org/AJAXFrameworks|tag|http://www.semanlink.net/tag/ajax +http://www.ajaxpatterns.org/AJAXFrameworks|title|Ajax Frameworks - Ajax Patterns +http://www.mail-archive.com/public-lod@w3.org/msg07196.html|creationDate|2010-12-28 +http://www.mail-archive.com/public-lod@w3.org/msg07196.html|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.mail-archive.com/public-lod@w3.org/msg07196.html|tag|http://www.semanlink.net/tag/httprange_14 +http://www.mail-archive.com/public-lod@w3.org/msg07196.html|title|Re: Is 303 really necessary? +http://www.mail-archive.com/public-lod@w3.org/msg07196.html|creationTime|2010-12-28T14:38:17Z +http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/|creationDate|2017-10-07 +http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/|tag|http://www.semanlink.net/tag/ia_limites +http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/|tag|http://www.semanlink.net/tag/i_b_m_s_watson +http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/|title|Watson : l’Intelligence artificielle en ses limites InternetActu +http://internetactu.blog.lemonde.fr/2017/10/07/watson-lintelligence-artificielle-en-ses-limites/|creationTime|2017-10-07T21:50:55Z +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|creationDate|2015-02-19 +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|tag|http://www.semanlink.net/tag/public_hydra_w3_org +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|tag|http://www.semanlink.net/tag/hydra_templated_links +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|comment|"suite" +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|title|-- ISSUE-45: Introduce hydra:filter (subPropertyOf hydra:search) -- from Thomas Hoppe on 2014-04-20 (public-hydra@w3.org from April 2014) +http://lists.w3.org/Archives/Public/public-hydra/2014Apr/0054.html|creationTime|2015-02-19T14:58:03Z +https://en.wikipedia.org/wiki/Lemmy_%28film%29|creationDate|2015-08-24 +https://en.wikipedia.org/wiki/Lemmy_%28film%29|tag|http://www.semanlink.net/tag/musicien +https://en.wikipedia.org/wiki/Lemmy_%28film%29|tag|http://www.semanlink.net/tag/film +https://en.wikipedia.org/wiki/Lemmy_%28film%29|title|Lemmy (film) +https://en.wikipedia.org/wiki/Lemmy_%28film%29|creationTime|2015-08-24T12:30:34Z +http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0|creationDate|2007-05-02 +http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0|tag|http://www.semanlink.net/tag/jerma +http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0|title|KASSEY : FIGURE MYTHIQUE FEMININE DANS LES RECITS D’ISSA KOROMBE +http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0|creationTime|2007-05-02T21:26:56Z +http://www.refer.sn/ethiopiques/article.php3?id_article=83&artsuite=0|source|Ethiopiques - Revue negro-africaine de littérature et de philosophie. +http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/|creationDate|2017-02-08 +http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/|tag|http://www.semanlink.net/tag/bayesian_reasoning +http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/|tag|http://www.semanlink.net/tag/multilevel_model +http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/|title|Why hierarchical models are awesome, tricky, and Bayesian +http://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/|creationTime|2017-02-08T13:20:51Z +https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html|creationDate|2019-05-15 +https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html|tag|http://www.semanlink.net/tag/pedro_almodovar +https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html|tag|http://www.semanlink.net/tag/film_espagnol +https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html|title|« Julieta », de Pedro Almodovar, les souvenirs et les regrets aussi +https://www.lemonde.fr/culture/article/2019/05/15/julieta-les-souvenirs-et-les-regrets-aussi_5462470_3246.html|creationTime|2019-05-15T22:52:38Z +http://jqueryui.com/|creationDate|2010-06-07 +http://jqueryui.com/|tag|http://www.semanlink.net/tag/javascript_librairies +http://jqueryui.com/|title|jQuery UI +http://jqueryui.com/|creationTime|2010-06-07T10:01:32Z +http://aperture.sourceforge.net/index.html|creationDate|2005-11-10 +http://aperture.sourceforge.net/index.html|tag|http://www.semanlink.net/tag/semantic_web_tools +http://aperture.sourceforge.net/index.html|tag|http://www.semanlink.net/tag/java +http://aperture.sourceforge.net/index.html|comment|a Java framework for getting data and metadata +http://aperture.sourceforge.net/index.html|title|Aperture Framework +http://lifehacker.com/five-best-text-editors-1564907215|creationDate|2014-09-09 +http://lifehacker.com/five-best-text-editors-1564907215|tag|http://www.semanlink.net/tag/text_editor +http://lifehacker.com/five-best-text-editors-1564907215|title|Five Best Text Editors +http://lifehacker.com/five-best-text-editors-1564907215|creationTime|2014-09-09T14:59:37Z +http://www.w3.org/2005/Incubator/cwl/|creationDate|2006-12-01 +http://www.w3.org/2005/Incubator/cwl/|tag|http://www.semanlink.net/tag/common_web_language +http://www.w3.org/2005/Incubator/cwl/|comment|"The mission of the Common Web Language Incubator Group, part of the Incubator Activity, is to develop the CWL (Common Web Language), a common language for exchanging information through the web and also for enabling computers to process information semantically.
The CWL is used to describe contents and meta-data of web pages written in natural languages. Then CWL will realize a language barrier free world in the web and will also enable computers to extract semantic information and knowledge from web pages accurately.
+The CWL will provide not only representation scheme but also a vocabulary with semantic background. It is an initiative to integrate existing and ongoing activities for providing a common description language with unambiguous grammar and enough amount of lexicons based on the CDL (Concept Description Language) scheme aiming at describing every kind of information understandable for computers." +http://www.w3.org/2005/Incubator/cwl/|title|W3C Common Web Language Incubator Group +http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html|creationDate|2011-10-25 +http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html|tag|http://www.semanlink.net/tag/livre +http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html|tag|http://www.semanlink.net/tag/chiffres +http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html|title|Inside Google Books: Books of the world, stand up and be counted! All 129,864,880 of you. +http://booksearch.blogspot.com/2010/08/books-of-world-stand-up-and-be-counted.html|creationTime|2011-10-25T16:40:09Z +https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html|creationDate|2019-04-27 +https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html|tag|http://www.semanlink.net/tag/deforestation +https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html|tag|http://www.semanlink.net/tag/congo_kinshasa +https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html|title|La RDC, deuxième front de la déforestation mondiale +https://www.lemonde.fr/afrique/article/2019/04/27/la-rdc-deuxieme-front-de-la-deforestation-mondiale_5455653_3212.html|creationTime|2019-04-27T19:58:05Z +https://www.theguardian.com/technology/2018/mar/14/tech-big-data-capitalism-give-wealth-back-to-people|creationDate|2018-03-15 +https://www.theguardian.com/technology/2018/mar/14/tech-big-data-capitalism-give-wealth-back-to-people|tag|http://www.semanlink.net/tag/big_data +https://www.theguardian.com/technology/2018/mar/14/tech-big-data-capitalism-give-wealth-back-to-people|title|Big data for the people: it's time to take it back from our tech overlords Technology The Guardian +https://www.theguardian.com/technology/2018/mar/14/tech-big-data-capitalism-give-wealth-back-to-people|creationTime|2018-03-15T13:26:54Z +http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png|creationDate|2010-07-02 +http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png|tag|http://www.semanlink.net/tag/semantic_enterprise_architecture +http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png|tag|http://www.semanlink.net/tag/mike_bergman +http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png|title|Layered_Semantic_Enterprise_Architecture.png +http://mike2.openmethodology.org/w/images/8/89/Layered_Semantic_Enterprise_Architecture.png|creationTime|2010-07-02T01:16:30Z +http://www.w3.org/2004/02/skos/|creationDate|2007-01-18 +http://www.w3.org/2004/02/skos/|tag|http://www.semanlink.net/tag/skos +http://www.w3.org/2004/02/skos/|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2004/02/skos/|title|Simple Knowledge Organisation Systems (SKOS) - home page +http://videolectures.net/eswc2014_tresp_machine_learning/|creationDate|2014-10-06 +http://videolectures.net/eswc2014_tresp_machine_learning/|tag|http://www.semanlink.net/tag/eswc_2014 +http://videolectures.net/eswc2014_tresp_machine_learning/|tag|http://www.semanlink.net/tag/machine_learning_semantic_web +http://videolectures.net/eswc2014_tresp_machine_learning/|title|Machine Learning with Knowledge Graphs - VideoLectures.NET +http://videolectures.net/eswc2014_tresp_machine_learning/|creationTime|2014-10-06T00:21:48Z +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|creationDate|2018-11-07 +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|tag|http://www.semanlink.net/tag/neural_machine_translation +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|tag|http://www.semanlink.net/tag/seq2seq_encoder_decoder +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|tag|http://www.semanlink.net/tag/encoder_decoder_architecture +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|title|Encoder-Decoder Recurrent Neural Network Models for Neural Machine Translation +https://machinelearningmastery.com/encoder-decoder-recurrent-neural-network-models-neural-machine-translation/|creationTime|2018-11-07T00:04:55Z +http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html|creationDate|2014-11-15 +http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html|tag|http://www.semanlink.net/tag/samuel_goto +http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html|tag|http://www.semanlink.net/tag/schema_org_actions +http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html|title|Hello World: schema.org actions implementations +http://blog.sgo.to/2014/09/schemaorg-actions-implementations.html|creationTime|2014-11-15T14:26:50Z +https://fr.slideshare.net/fpservant/makolab-semanticday|creationDate|2017-10-04 +https://fr.slideshare.net/fpservant/makolab-semanticday|tag|http://www.semanlink.net/tag/fpservant_slideshare +https://fr.slideshare.net/fpservant/makolab-semanticday|tag|http://www.semanlink.net/tag/makolab_semantic_day +https://fr.slideshare.net/fpservant/makolab-semanticday|title|How to publish data about a range of cars - in 3 slides and 2 links (2013) +https://fr.slideshare.net/fpservant/makolab-semanticday|creationTime|2017-10-04T00:50:36Z +http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/|creationDate|2013-06-26 +http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/|tag|http://www.semanlink.net/tag/cerveau +http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/|title|Un médecin italien veut greffer des têtes humaines Passeur de sciences +http://passeurdesciences.blog.lemonde.fr/2013/06/26/un-medecin-italien-veut-greffer-des-tetes-humaines/|creationTime|2013-06-26T20:25:56Z +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|creationDate|2009-01-31 +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|tag|http://www.semanlink.net/tag/pedro_almodovar +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|tag|http://www.semanlink.net/tag/femme +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|comment|Femmes au bord de la crise de nerfs +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|title|Mujeres al borde de un ataque de nervios +http://es.wikipedia.org/wiki/Mujeres_al_borde_de_un_ataque_de_nervios|creationTime|2009-01-31T23:54:49Z +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|creationDate|2014-01-20 +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|tag|http://www.semanlink.net/tag/solr +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|tag|http://www.semanlink.net/tag/maven +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|comment|http://stackoverflow.com/questions/10413781/how-do-you-set-up-jetty-start-with-maven +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|title|Running Solr with Maven +http://www.petrikainulainen.net/programming/maven/running-solr-with-maven/|creationTime|2014-01-20T11:48:07Z +http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)|creationDate|2010-11-11 +http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)|tag|http://www.semanlink.net/tag/film_americain +http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)|tag|http://www.semanlink.net/tag/clint_eastwood +http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)|title|Midnight in the Garden of Good and Evil (film) +http://en.wikipedia.org/wiki/Midnight_in_the_Garden_of_Good_and_Evil_(film)|creationTime|2010-11-11T23:10:29Z +https://medium.com/@jakefuentes/the-immigration-ban-is-a-headfake-and-were-falling-for-it-b8910e78f0c5#.txou3qh52|creationDate|2017-02-01 +https://medium.com/@jakefuentes/the-immigration-ban-is-a-headfake-and-were-falling-for-it-b8910e78f0c5#.txou3qh52|tag|http://www.semanlink.net/tag/trump +https://medium.com/@jakefuentes/the-immigration-ban-is-a-headfake-and-were-falling-for-it-b8910e78f0c5#.txou3qh52|title|The Immigration Ban is a Headfake, and We’re Falling For It – Medium +https://medium.com/@jakefuentes/the-immigration-ban-is-a-headfake-and-were-falling-for-it-b8910e78f0c5#.txou3qh52|creationTime|2017-02-01T08:20:10Z +http://www.lemonde.fr/idees/article/2018/03/26/biodiversite-l-urgence-du-politique_5276421_3232.html|creationDate|2018-03-26 +http://www.lemonde.fr/idees/article/2018/03/26/biodiversite-l-urgence-du-politique_5276421_3232.html|tag|http://www.semanlink.net/tag/biodiversite +http://www.lemonde.fr/idees/article/2018/03/26/biodiversite-l-urgence-du-politique_5276421_3232.html|title|Biodiversité : l’urgence du politique +http://www.lemonde.fr/idees/article/2018/03/26/biodiversite-l-urgence-du-politique_5276421_3232.html|creationTime|2018-03-26T23:17:12Z +http://ask.metafilter.com/54805/Watch-Cable-TV-on-a-MacBook|creationDate|2009-12-15 +http://ask.metafilter.com/54805/Watch-Cable-TV-on-a-MacBook|tag|http://www.semanlink.net/tag/television +http://ask.metafilter.com/54805/Watch-Cable-TV-on-a-MacBook|title|Watch Cable TV on a MacBook? Ask MetaFilter +http://ask.metafilter.com/54805/Watch-Cable-TV-on-a-MacBook|creationTime|2009-12-15T21:16:45Z +http://www.deeplearningbook.org/|creationDate|2017-12-16 +http://www.deeplearningbook.org/|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.deeplearningbook.org/|tag|http://www.semanlink.net/tag/ian_goodfellow +http://www.deeplearningbook.org/|tag|http://www.semanlink.net/tag/deep_learning_book +http://www.deeplearningbook.org/|title|Deep Learning (Ian Goodfellow and Yoshua Bengio and Aaron Courville) +http://www.deeplearningbook.org/|creationTime|2017-12-16T14:25:02Z +https://devcenter.heroku.com/articles/local-maven-dependencies|creationDate|2014-10-16 +https://devcenter.heroku.com/articles/local-maven-dependencies|tag|http://www.semanlink.net/tag/maven_tips +https://devcenter.heroku.com/articles/local-maven-dependencies|comment|"voir 2017/02/maven-local-repo?png + + + +project.local +project +file:${project.basedir}/repo + + + + +Phenix-Prototype +json-rdf +0.0.1 + + + + + +" +https://devcenter.heroku.com/articles/local-maven-dependencies|title|Adding Unmanaged Dependencies to a Maven Project +https://devcenter.heroku.com/articles/local-maven-dependencies|creationTime|2014-10-16T22:10:19Z +http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/|creationDate|2010-12-06 +http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/|tag|http://www.semanlink.net/tag/leigh_dodds +http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/|tag|http://www.semanlink.net/tag/rdf_in_json +http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/|title|RDF and JSON: A Clash of Model and Syntax « Lost Boy +http://www.ldodds.com/blog/2010/12/rdf-and-json-a-clash-of-model-and-syntax/|creationTime|2010-12-06T18:07:28Z +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work|creationDate|2012-08-14 +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work|tag|http://www.semanlink.net/tag/javascript_closures +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work|title|How do JavaScript closures work? - Stack Overflow +http://stackoverflow.com/questions/111102/how-do-javascript-closures-work|creationTime|2012-08-14T12:36:08Z +https://hacked.com/ancient-viruses-hacked-human-brains/|creationDate|2015-01-14 +https://hacked.com/ancient-viruses-hacked-human-brains/|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +https://hacked.com/ancient-viruses-hacked-human-brains/|tag|http://www.semanlink.net/tag/cerveau +https://hacked.com/ancient-viruses-hacked-human-brains/|tag|http://www.semanlink.net/tag/virus +https://hacked.com/ancient-viruses-hacked-human-brains/|comment|A new study from Lund University in Sweden indicates that inherited viruses that are millions of years old play an important role in building up the complex networks that characterize the human brain. +https://hacked.com/ancient-viruses-hacked-human-brains/|title|Ancient Viruses Hacked Human Brains +https://hacked.com/ancient-viruses-hacked-human-brains/|creationTime|2015-01-14T00:05:18Z +http://wiki.apache.org/solr/OpenNLP|creationDate|2014-03-15 +http://wiki.apache.org/solr/OpenNLP|tag|http://www.semanlink.net/tag/solr_and_nlp +http://wiki.apache.org/solr/OpenNLP|title|OpenNLP - Solr Wiki +http://wiki.apache.org/solr/OpenNLP|creationTime|2014-03-15T13:51:20Z +http://snipsnap.org/space/start|creationDate|2005-10-11 +http://snipsnap.org/space/start|tag|http://www.semanlink.net/tag/wiki_software +http://snipsnap.org/space/start|title|SnipSnap +http://www.bbc.com/news/health-32024158|creationDate|2015-03-28 +http://www.bbc.com/news/health-32024158|tag|http://www.semanlink.net/tag/islande +http://www.bbc.com/news/health-32024158|tag|http://www.semanlink.net/tag/adn +http://www.bbc.com/news/health-32024158|title|DNA of 'an entire nation' assessed - BBC News +http://www.bbc.com/news/health-32024158|creationTime|2015-03-28T01:31:10Z +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|creationDate|2013-03-21 +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|tag|http://www.semanlink.net/tag/hixie +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|tag|http://www.semanlink.net/tag/drm +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|comment|DRM's purpose is to give content providers control over software and hardware providers, and it is satisfying that purpose well. +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|title|Ian Hickson - Google+ - Discussions about DRM often land on the fundamental problem… +https://plus.google.com/107429617152575897589/posts/iPmatxBYuj2|creationTime|2013-03-21T15:05:40Z +http://jibbering.com/rdf-parser/|creationDate|2005-09-28 +http://jibbering.com/rdf-parser/|tag|http://www.semanlink.net/tag/javascript_rdf_parser +http://jibbering.com/rdf-parser/|title|Simple javascript RDF Parser and query thingy. +http://www.pbs.org/cringely/pulpit/pulpit20060112.html|creationDate|2006-01-16 +http://www.pbs.org/cringely/pulpit/pulpit20060112.html|tag|http://www.semanlink.net/tag/cringely +http://www.pbs.org/cringely/pulpit/pulpit20060112.html|title|PBS I, Cringely . January 12, 2006 - Win Some, Lose Some +http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21|creationDate|2013-10-14 +http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21|tag|http://www.semanlink.net/tag/google_hummingbird +http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21|tag|http://www.semanlink.net/tag/seo +http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21|title|Google Hummingbird: When Evolutionary Becomes Revolutionary +http://searchengineland.com/google-hummingbird-when-evolutionary-becomes-revolutionary-173740?goback=%2Egde_138726_member_5795206045756313600#%21|creationTime|2013-10-14T11:38:18Z +http://www.macdevcenter.com/pub/a/mac/collections/webserving.html|creationDate|2006-01-21 +http://www.macdevcenter.com/pub/a/mac/collections/webserving.html|tag|http://www.semanlink.net/tag/mac_os_x_web_serving +http://www.macdevcenter.com/pub/a/mac/collections/webserving.html|tag|http://www.semanlink.net/tag/apache +http://www.macdevcenter.com/pub/a/mac/collections/webserving.html|title|MacDevCenter.com: Apache and Web Serving with Mac OS X +https://m.mediawiki.org/wiki/Wikidata_query_service/User_Manual#SPARQL_endpoint|creationDate|2015-09-08 +https://m.mediawiki.org/wiki/Wikidata_query_service/User_Manual#SPARQL_endpoint|tag|http://www.semanlink.net/tag/wikidata_query_service +https://m.mediawiki.org/wiki/Wikidata_query_service/User_Manual#SPARQL_endpoint|title|Wikidata query service/User Manual - MediaWiki +https://m.mediawiki.org/wiki/Wikidata_query_service/User_Manual#SPARQL_endpoint|creationTime|2015-09-08T23:44:16Z +http://java.sun.com/products/servlet/Filters.html|creationDate|2005-07-05 +http://java.sun.com/products/servlet/Filters.html|tag|http://www.semanlink.net/tag/servlet +http://java.sun.com/products/servlet/Filters.html|title|The Essentials of Filters +http://www.w3.org/2003/12/semantic-extractor.html|creationDate|2009-02-13 +http://www.w3.org/2003/12/semantic-extractor.html|tag|http://www.semanlink.net/tag/html +http://www.w3.org/2003/12/semantic-extractor.html|tag|http://www.semanlink.net/tag/xslt +http://www.w3.org/2003/12/semantic-extractor.html|tag|http://www.semanlink.net/tag/w3c +http://www.w3.org/2003/12/semantic-extractor.html|comment|"This tool, geared by an XSLT stylesheet, tries to extract some information from a HTML semantic rich document. It only uses information available through a good usage of the semantics defined in HTML. Detects GRRDL and RDFa. +" +http://www.w3.org/2003/12/semantic-extractor.html|title|Semantic data extractor - QA @ W3C +http://www.w3.org/2003/12/semantic-extractor.html|creationTime|2009-02-13T15:08:40Z +https://transacl.org/ojs/index.php/tacl/article/view/582/158|creationDate|2017-05-20 +https://transacl.org/ojs/index.php/tacl/article/view/582/158|tag|http://www.semanlink.net/tag/topic_models_word_embedding +https://transacl.org/ojs/index.php/tacl/article/view/582/158|tag|http://www.semanlink.net/tag/word2vec +https://transacl.org/ojs/index.php/tacl/article/view/582/158|title|Improving Topic Models with Latent Feature Word Representations Nguyen Transactions of the Association for Computational Linguistics +https://transacl.org/ojs/index.php/tacl/article/view/582/158|creationTime|2017-05-20T14:05:12Z +https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf|creationDate|2017-08-21 +https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf|tag|http://www.semanlink.net/tag/backpropagation +https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf|comment|"a proof of the backpropagation algorithm based on a graphical approach in which the algorithm reduces to a graph labeling problem. This method is not only more general than the usual analytical derivations, which handle only the case of special network topologies, but also much easier to follow. It also shows how the algorithm can be efficiently implemented in computing systems in which only local information can be transported through the network. +" +https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf|title|The backpropagation algorithm +https://page.mi.fu-berlin.de/rojas/neural/chapter/K7.pdf|creationTime|2017-08-21T16:13:15Z +https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers|creationDate|2018-06-08 +https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers|tag|http://www.semanlink.net/tag/niger +https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers|tag|http://www.semanlink.net/tag/ei +https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers|title|Niger Islamic State hostage: 'They want to kill foreign soldiers' The Guardian +https://www.theguardian.com/world/2018/jun/05/hostage-niger-islamic-state-group-they-want-to-kill-foreign-soldiers|creationTime|2018-06-08T13:15:55Z +http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/|creationDate|2008-01-03 +http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/|tag|http://www.semanlink.net/tag/php +http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/|tag|http://www.semanlink.net/tag/leopard +http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/|title|Working with PHP 5 in Mac OS X 10.5 (Leopard) - Professional PHP +http://www.procata.com/blog/archives/2007/10/28/working-with-php-5-in-mac-os-x-105/|creationTime|2008-01-03T13:35:01Z +http://rapid-i.com/content/view/181/190/|creationDate|2013-09-03 +http://rapid-i.com/content/view/181/190/|tag|http://www.semanlink.net/tag/rapidminer +http://rapid-i.com/content/view/181/190/|title|Rapid - I, RapidMiner +http://rapid-i.com/content/view/181/190/|creationTime|2013-09-03T11:29:16Z +https://www.sciencedirect.com/science/article/pii/S0172219018300103|creationDate|2019-02-13 +https://www.sciencedirect.com/science/article/pii/S0172219018300103|tag|http://www.semanlink.net/tag/survey +https://www.sciencedirect.com/science/article/pii/S0172219018300103|tag|http://www.semanlink.net/tag/ip_ir_ml_ia +https://www.sciencedirect.com/science/article/pii/S0172219018300103|comment|"A literature review on artificial intelligence, machine learning and deep learning methods for analysing intellectual property data. + +Intellectual Property Analytics (IPA): data science of analysing large amount of IP information, to discover relationships, trends and patterns for decision +making + +" +https://www.sciencedirect.com/science/article/pii/S0172219018300103|title|The state-of-the-art on Intellectual Property Analytics (IPA) - ScienceDirect (2018) +https://www.sciencedirect.com/science/article/pii/S0172219018300103|creationTime|2019-02-13T23:22:59Z +http://www.internetactu.net/2013/07/12/open-data-13-la-technique-a-t-elle-pris-le-pas/|creationDate|2013-08-31 +http://www.internetactu.net/2013/07/12/open-data-13-la-technique-a-t-elle-pris-le-pas/|tag|http://www.semanlink.net/tag/open_data +http://www.internetactu.net/2013/07/12/open-data-13-la-technique-a-t-elle-pris-le-pas/|title|Open Data (1/3) : la technique a-t-elle pris le pas ? « InternetActu.net +http://www.internetactu.net/2013/07/12/open-data-13-la-technique-a-t-elle-pris-le-pas/|creationTime|2013-08-31T14:51:29Z +http://www.laconferencedeparis.fr/|creationDate|2014-03-27 +http://www.laconferencedeparis.fr/|tag|http://www.semanlink.net/tag/open_data +http://www.laconferencedeparis.fr/|tag|http://www.semanlink.net/tag/data_gouv_fr +http://www.laconferencedeparis.fr/|title|La Conférence de Paris : Open Data et Gouvernement Ouvert +http://www.laconferencedeparis.fr/|creationTime|2014-03-27T16:18:22Z +http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript|creationDate|2008-06-12 +http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript|tag|http://www.semanlink.net/tag/sparql +http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript|tag|http://www.semanlink.net/tag/nova_spivak +http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript|title|SPARQL/Extensions/SPARQLScript - ESW Wiki +http://esw.w3.org/topic/SPARQL/Extensions/SPARQLScript|creationTime|2008-06-12T08:13:22Z +http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html|creationDate|2013-07-11 +http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html|tag|http://www.semanlink.net/tag/sparql +http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html|title|Using VALUES to map values in a SPARQL query - bobdc.blog +http://www.snee.com/bobdc.blog/2013/07/using-values-to-map-values-in.html|creationTime|2013-07-11T01:11:43Z +http://diveintohtml5.info/storage.html|creationDate|2015-06-20 +http://diveintohtml5.info/storage.html|tag|http://www.semanlink.net/tag/html5 +http://diveintohtml5.info/storage.html|title|Local Storage - Dive Into HTML5 +http://diveintohtml5.info/storage.html|creationTime|2015-06-20T17:26:23Z +http://www.typo3-media.com/blog/solr-search-request-handlers.html|creationDate|2012-05-14 +http://www.typo3-media.com/blog/solr-search-request-handlers.html|tag|http://www.semanlink.net/tag/solr +http://www.typo3-media.com/blog/solr-search-request-handlers.html|title|SOLR Search Request Handlers explained +http://www.typo3-media.com/blog/solr-search-request-handlers.html|creationTime|2012-05-14T15:02:14Z +https://www.bbc.com/news/science-environment-47755275#|creationDate|2019-03-30 +https://www.bbc.com/news/science-environment-47755275#|tag|http://www.semanlink.net/tag/tanis_kt +https://www.bbc.com/news/science-environment-47755275#|title|Chicxulub asteroid impact: Stunning fossils record dinosaurs' demise - BBC News +https://www.bbc.com/news/science-environment-47755275#|creationTime|2019-03-30T13:16:45Z +http://www.apanews.net/apa.php?article51458|creationDate|2008-01-10 +http://www.apanews.net/apa.php?article51458|tag|http://www.semanlink.net/tag/niamey +http://www.apanews.net/apa.php?article51458|tag|http://www.semanlink.net/tag/rebellion_touaregue +http://www.apanews.net/apa.php?article51458|title|Un journaliste nigérien tué dans l’explosion d’une mine à Niamey +http://www.apanews.net/apa.php?article51458|creationTime|2008-01-10T00:41:00Z +http://web2.wsj2.com/the_best_web_20_software_of_2005.htm|creationDate|2005-12-12 +http://web2.wsj2.com/the_best_web_20_software_of_2005.htm|tag|http://www.semanlink.net/tag/web_2_0 +http://web2.wsj2.com/the_best_web_20_software_of_2005.htm|title|The Best Web 2.0 Software of 2005 +http://vcg.informatik.uni-rostock.de/~hs162/treeposter/poster.html|creationDate|2011-07-25 +http://vcg.informatik.uni-rostock.de/~hs162/treeposter/poster.html|tag|http://www.semanlink.net/tag/treeview +http://vcg.informatik.uni-rostock.de/~hs162/treeposter/poster.html|title|A Visual Bibliography of Tree Visualization +http://vcg.informatik.uni-rostock.de/~hs162/treeposter/poster.html|creationTime|2011-07-25T15:20:21Z +https://nlp.stanford.edu/software/CRF-NER.shtml|creationDate|2018-05-20 +https://nlp.stanford.edu/software/CRF-NER.shtml|tag|http://www.semanlink.net/tag/stanford_ner +https://nlp.stanford.edu/software/CRF-NER.shtml|comment|general implementation of (arbitrary order) linear chain Conditional Random Field (CRF) sequence models +https://nlp.stanford.edu/software/CRF-NER.shtml|title|Stanford Named Entity Recognizer +https://nlp.stanford.edu/software/CRF-NER.shtml|creationTime|2018-05-20T17:59:44Z +https://scholar.google.com/citations?view_op=list_works&hl=fr&user=WNFqgy8AAAAJ|creationDate|2018-01-05 +https://scholar.google.com/citations?view_op=list_works&hl=fr&user=WNFqgy8AAAAJ|tag|http://www.semanlink.net/tag/fps +https://scholar.google.com/citations?view_op=list_works&hl=fr&user=WNFqgy8AAAAJ|title|François-Paul Servant - Citations Google Scholar +https://scholar.google.com/citations?view_op=list_works&hl=fr&user=WNFqgy8AAAAJ|creationTime|2018-01-05T14:37:38Z +http://www.mondeca.com/foaf/voaf-doc.html|creationDate|2011-01-25 +http://www.mondeca.com/foaf/voaf-doc.html|tag|http://www.semanlink.net/tag/voaf +http://www.mondeca.com/foaf/voaf-doc.html|title|Vocabularies of a Friend (VOAF) +http://www.mondeca.com/foaf/voaf-doc.html|creationTime|2011-01-25T14:53:12Z +http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion|creationDate|2008-05-18 +http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion|tag|http://www.semanlink.net/tag/mailing_list +http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion|tag|http://www.semanlink.net/tag/dbpedia +http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion|title|SourceForge.net: dbpedia-discussion +http://sourceforge.net/mailarchive/forum.php?forum_name=dbpedia-discussion|creationTime|2008-05-18T02:01:45Z +http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html|creationDate|2008-09-08 +http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html|tag|http://www.semanlink.net/tag/http_cache +http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html|tag|http://www.semanlink.net/tag/servlet +http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html|title|Caching with Servlets +http://www.onjava.com/pub/a/onjava/excerpt/jebp_3/index2.html|creationTime|2008-09-08T22:39:35Z +https://github.com/NirantK/best-of-jupyter|creationDate|2018-05-29 +https://github.com/NirantK/best-of-jupyter|tag|http://www.semanlink.net/tag/tips +https://github.com/NirantK/best-of-jupyter|tag|http://www.semanlink.net/tag/jupyter +https://github.com/NirantK/best-of-jupyter|title|NirantK/best-of-jupyter: Jupyter Tips, Tricks, Best Practices with Sample Code for Productivity Boost +https://github.com/NirantK/best-of-jupyter|creationTime|2018-05-29T13:34:10Z +https://ieeexplore.ieee.org/document/7876817|creationDate|2019-02-02 +https://ieeexplore.ieee.org/document/7876817|tag|http://www.semanlink.net/tag/three_way_decisions +https://ieeexplore.ieee.org/document/7876817|title|Enhancing Binary Classification by Modeling Uncertain Boundary in Three-Way Decisions - IEEE Journals & Magazine +https://ieeexplore.ieee.org/document/7876817|creationTime|2019-02-02T15:52:54Z +https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html|creationDate|2018-08-07 +https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html|tag|http://www.semanlink.net/tag/ane +https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html|tag|http://www.semanlink.net/tag/chine_afrique +https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html|title|Commerce de peaux d’âne en Afrique, un conte moderne à la chinoise +https://www.lemonde.fr/idees/article/2018/08/07/peaux-d-ane-en-afrique-un-conte-moderne-a-la-chinoise_5340085_3232.html|creationTime|2018-08-07T17:51:23Z +https://www.lemonde.fr/international/article/2018/11/30/north-sentinel-derriere-la-mort-d-un-missionnaire-une-longue-histoire-de-resistance_5391100_3210.html|creationDate|2018-12-01 +https://www.lemonde.fr/international/article/2018/11/30/north-sentinel-derriere-la-mort-d-un-missionnaire-une-longue-histoire-de-resistance_5391100_3210.html|tag|http://www.semanlink.net/tag/uncontacted_peoples +https://www.lemonde.fr/international/article/2018/11/30/north-sentinel-derriere-la-mort-d-un-missionnaire-une-longue-histoire-de-resistance_5391100_3210.html|title|North Sentinel : derrière la mort d’un missionnaire, une longue histoire de résistance +https://www.lemonde.fr/international/article/2018/11/30/north-sentinel-derriere-la-mort-d-un-missionnaire-une-longue-histoire-de-resistance_5391100_3210.html|creationTime|2018-12-01T11:21:03Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|creationDate|2006-06-06 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|tag|http://www.semanlink.net/tag/evolution +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|tag|http://www.semanlink.net/tag/decouverte_d_especes_inconnues +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|comment|Eight previously unknown invertebrate creatures have been discovered in a cave in central Israel. The largest is a white shrimp-like crustacean. Another resembles a species of scorpion and is blind. The cave, near the city of Ramle, contains a lake and was uncovered during drilling at a quarry. Scientists say it is a unique ecosystem that has been sealed off from the rest of the world for five million years and could contain other ancient lifeforms. +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|title|BBC NEWS - Unknown creatures found in cave +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|source|BBC +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/5036618.stm|date|2006-06-01 +http://pisani.blog.lemonde.fr/pisani/2005/03/partager_nos_si.html|creationDate|2005-04-04 +http://pisani.blog.lemonde.fr/pisani/2005/03/partager_nos_si.html|tag|http://www.semanlink.net/tag/bookmarks +http://pisani.blog.lemonde.fr/pisani/2005/03/partager_nos_si.html|tag|http://www.semanlink.net/tag/blogs_le_monde +http://pisani.blog.lemonde.fr/pisani/2005/03/partager_nos_si.html|title|Partager nos signets +https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html|creationDate|2018-03-18 +https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html|tag|http://www.semanlink.net/tag/trump +https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html|title|How Trump Consultants Exploited the Facebook Data of Millions - The New York Times +https://www.nytimes.com/2018/03/17/us/politics/cambridge-analytica-trump-campaign.html|creationTime|2018-03-18T10:26:39Z +http://www.w3.org/2007/03/RdfRDB/papers/d2rq-positionpaper/|creationDate|2009-02-10 +http://www.w3.org/2007/03/RdfRDB/papers/d2rq-positionpaper/|tag|http://www.semanlink.net/tag/d2rq +http://www.w3.org/2007/03/RdfRDB/papers/d2rq-positionpaper/|title|D2RQ — Lessons Learned +http://www.w3.org/2007/03/RdfRDB/papers/d2rq-positionpaper/|creationTime|2009-02-10T22:43:40Z +http://www.paulgraham.com/investors.html|creationDate|2006-08-29 +http://www.paulgraham.com/investors.html|tag|http://www.semanlink.net/tag/paul_graham +http://www.paulgraham.com/investors.html|title|How to Present to Investors +http://www.bbc.com/news/technology-36376966|creationDate|2016-10-02 +http://www.bbc.com/news/technology-36376966|tag|http://www.semanlink.net/tag/foxconn +http://www.bbc.com/news/technology-36376966|tag|http://www.semanlink.net/tag/robotisation +http://www.bbc.com/news/technology-36376966|title|Foxconn replaces '60,000 factory workers with robots' - BBC News +http://www.bbc.com/news/technology-36376966|creationTime|2016-10-02T15:56:07Z +http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/|creationDate|2017-07-11 +http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/|tag|http://www.semanlink.net/tag/nlp_tools +http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/|tag|http://www.semanlink.net/tag/part_of_speech_tagging +http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/|title|TreeTagger - a part-of-speech tagger for many languages +http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/|creationTime|2017-07-11T15:44:58Z +http://www.theguardian.com/commentisfree/2013/dec/30/we-need-to-talk-about-ted|creationDate|2014-03-01 +http://www.theguardian.com/commentisfree/2013/dec/30/we-need-to-talk-about-ted|tag|http://www.semanlink.net/tag/ted +http://www.theguardian.com/commentisfree/2013/dec/30/we-need-to-talk-about-ted|title|We need to talk about TED Benjamin Bratton theguardian.com +http://www.theguardian.com/commentisfree/2013/dec/30/we-need-to-talk-about-ted|creationTime|2014-03-01T16:17:58Z +http://datahub.io/group/country-sn|creationDate|2013-09-02 +http://datahub.io/group/country-sn|tag|http://www.semanlink.net/tag/okfn_datahub +http://datahub.io/group/country-sn|tag|http://www.semanlink.net/tag/senegal +http://datahub.io/group/country-sn|tag|http://www.semanlink.net/tag/new_africa +http://datahub.io/group/country-sn|title|Senegal - the Datahub +http://datahub.io/group/country-sn|creationTime|2013-09-02T11:08:11Z +https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/|creationDate|2018-11-05 +https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/|tag|http://www.semanlink.net/tag/seq2seq_with_attention +https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/|title|Visualizing A Neural Machine Translation Model (Mechanics of Seq2seq Models With Attention) – Jay Alammar +https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/|creationTime|2018-11-05T00:27:12Z +http://www.correiobragantino.com.br/arq_gastronomia/gastronomia.html|creationDate|2014-01-18 +http://www.correiobragantino.com.br/arq_gastronomia/gastronomia.html|tag|http://www.semanlink.net/tag/ofir +http://www.correiobragantino.com.br/arq_gastronomia/gastronomia.html|title|Correio Bragantino News - CB NEWS :: Bragança - Pará - Brasil +http://www.correiobragantino.com.br/arq_gastronomia/gastronomia.html|creationTime|2014-01-18T10:57:32Z +https://www.huffingtonpost.fr/2018/11/21/les-spectateurs-du-2e-concert-de-lauryn-hill-a-paris-ont-adore-et-ils-lont-fait-savoir_a_23596467/|creationDate|2018-11-22 +https://www.huffingtonpost.fr/2018/11/21/les-spectateurs-du-2e-concert-de-lauryn-hill-a-paris-ont-adore-et-ils-lont-fait-savoir_a_23596467/|tag|http://www.semanlink.net/tag/lauryn_hill +https://www.huffingtonpost.fr/2018/11/21/les-spectateurs-du-2e-concert-de-lauryn-hill-a-paris-ont-adore-et-ils-lont-fait-savoir_a_23596467/|title|Les spectateurs du 2e concert de Lauryn Hill à Paris ont adoré (et ils l'ont fait savoir) Le Huffington Post +https://www.huffingtonpost.fr/2018/11/21/les-spectateurs-du-2e-concert-de-lauryn-hill-a-paris-ont-adore-et-ils-lont-fait-savoir_a_23596467/|creationTime|2018-11-22T09:46:04Z +https://deeplearning4j.org/lstm.html|creationDate|2017-10-22 +https://deeplearning4j.org/lstm.html|tag|http://www.semanlink.net/tag/lstm_networks +https://deeplearning4j.org/lstm.html|tag|http://www.semanlink.net/tag/recurrent_neural_network +https://deeplearning4j.org/lstm.html|tag|http://www.semanlink.net/tag/tutorial +https://deeplearning4j.org/lstm.html|title|A Beginner's Guide to Recurrent Networks and LSTMs - Deeplearning4j +https://deeplearning4j.org/lstm.html|creationTime|2017-10-22T13:39:42Z +http://www.diplomatie.gouv.fr/fr/actions-france_830/documentaire_1045/diffusion-non-commerciale_5378/offre-dvd_5373/grand-ecran_10336/hommage-jean-rouch_10341/index.html|creationDate|2007-08-22 +http://www.diplomatie.gouv.fr/fr/actions-france_830/documentaire_1045/diffusion-non-commerciale_5378/offre-dvd_5373/grand-ecran_10336/hommage-jean-rouch_10341/index.html|tag|http://www.semanlink.net/tag/jean_rouch +http://www.diplomatie.gouv.fr/fr/actions-france_830/documentaire_1045/diffusion-non-commerciale_5378/offre-dvd_5373/grand-ecran_10336/hommage-jean-rouch_10341/index.html|title|Hommage à Jean Rouch -Ministère des Affaires étrangères- +http://www.diplomatie.gouv.fr/fr/actions-france_830/documentaire_1045/diffusion-non-commerciale_5378/offre-dvd_5373/grand-ecran_10336/hommage-jean-rouch_10341/index.html|creationTime|2007-08-22T00:49:16Z +http://www.svenskaakademien.se/en/nobel-lecture|creationDate|2017-06-05 +http://www.svenskaakademien.se/en/nobel-lecture|tag|http://www.semanlink.net/tag/bob_dylan +http://www.svenskaakademien.se/en/nobel-lecture|tag|http://www.semanlink.net/tag/prix_nobel +http://www.svenskaakademien.se/en/nobel-lecture|comment|"""Sing in me, oh Muse, and through me tell the story""." +http://www.svenskaakademien.se/en/nobel-lecture|title|Bob Dylan's Nobel Lecture +http://www.svenskaakademien.se/en/nobel-lecture|creationTime|2017-06-05T19:16:12Z +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|creationDate|2007-11-08 +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|tag|http://www.semanlink.net/tag/ajax +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|comment|Use Ajax techniques to show movies and slide shows +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|title|Ajax and XML: Ajax for media +http://www.ibm.com/developerworks/library/x-ajaxxml7/?ca=dgr-lnxw01AjaxMedia|creationTime|2007-11-08T16:45:05Z +http://www.bradshawfoundation.com/giraffe/|creationDate|2011-01-04 +http://www.bradshawfoundation.com/giraffe/|tag|http://www.semanlink.net/tag/rockart +http://www.bradshawfoundation.com/giraffe/|tag|http://www.semanlink.net/tag/tenere +http://www.bradshawfoundation.com/giraffe/|tag|http://www.semanlink.net/tag/girafe +http://www.bradshawfoundation.com/giraffe/|title|The giraffe carvings of the Tenere desert +http://www.bradshawfoundation.com/giraffe/|creationTime|2011-01-04T03:02:01Z +http://www.pbs.org/wnet/secrets/flash/catastrophe1_script.html|creationDate|2005-12-19 +http://www.pbs.org/wnet/secrets/flash/catastrophe1_script.html|tag|http://www.semanlink.net/tag/pbs_program +http://www.pbs.org/wnet/secrets/flash/catastrophe1_script.html|tag|http://www.semanlink.net/tag/535 +http://meryl.net/2008/01/22/175-data-and-information-visualization-examples-and-resources/|creationDate|2008-02-15 +http://meryl.net/2008/01/22/175-data-and-information-visualization-examples-and-resources/|tag|http://www.semanlink.net/tag/information_visualization +http://meryl.net/2008/01/22/175-data-and-information-visualization-examples-and-resources/|title|Meryl.net » 175+ Data and Information Visualization Examples and Resources +http://meryl.net/2008/01/22/175-data-and-information-visualization-examples-and-resources/|creationTime|2008-02-15T22:40:15Z +http://www.henriverdier.com/2014/06/letat-innovateur-radical.html|creationDate|2014-06-26 +http://www.henriverdier.com/2014/06/letat-innovateur-radical.html|tag|http://www.semanlink.net/tag/modernisation_de_l_etat +http://www.henriverdier.com/2014/06/letat-innovateur-radical.html|tag|http://www.semanlink.net/tag/henri_verdier +http://www.henriverdier.com/2014/06/letat-innovateur-radical.html|title|Henri Verdier Blog: L'Etat peut-il être un innovateur radical ? +http://www.henriverdier.com/2014/06/letat-innovateur-radical.html|creationTime|2014-06-26T13:35:40Z +http://stackoverflow.com/questions/27818856/docker-add-warfile-to-official-tomcat-image|creationDate|2016-04-06 +http://stackoverflow.com/questions/27818856/docker-add-warfile-to-official-tomcat-image|tag|http://www.semanlink.net/tag/docker_tomcat +http://stackoverflow.com/questions/27818856/docker-add-warfile-to-official-tomcat-image|title|boot2docker - Docker add warfile to official Tomcat image - Stack Overflow +http://stackoverflow.com/questions/27818856/docker-add-warfile-to-official-tomcat-image|creationTime|2016-04-06T14:58:48Z +http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/|creationDate|2010-04-28 +http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/|tag|http://www.semanlink.net/tag/facebook +http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/|tag|http://www.semanlink.net/tag/linked_data +http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/|title|The Tetherless World Weblog » Putting open Facebook data into Linked Data Cloud +http://tw.rpi.edu/weblog/2010/04/28/putting-open-facebook-data-into-linked-data-cloud/|creationTime|2010-04-28T23:29:14Z +https://ibm.box.com/s/g72skghoelpd7c9f1swen960mcb3ujpd|creationDate|2017-10-24 +https://ibm.box.com/s/g72skghoelpd7c9f1swen960mcb3ujpd|title|"""IBM Box"" (zone de partage)" +https://ibm.box.com/s/g72skghoelpd7c9f1swen960mcb3ujpd|creationTime|2017-10-24T10:59:04Z +http://www.the-american-interest.com/article.cfm?piece=1352|creationDate|2013-01-02 +http://www.the-american-interest.com/article.cfm?piece=1352|tag|http://www.semanlink.net/tag/education +http://www.the-american-interest.com/article.cfm?piece=1352|tag|http://www.semanlink.net/tag/mooc +http://www.the-american-interest.com/article.cfm?piece=1352|title|The End of the University as We Know It - Nathan Harden - The American Interest Magazine +http://www.the-american-interest.com/article.cfm?piece=1352|creationTime|2013-01-02T10:42:51Z +http://ivan-herman.name/2008/11/14/calais-release-4-and-the-linking-data-cloud/|creationDate|2008-12-10 +http://ivan-herman.name/2008/11/14/calais-release-4-and-the-linking-data-cloud/|tag|http://www.semanlink.net/tag/calais +http://ivan-herman.name/2008/11/14/calais-release-4-and-the-linking-data-cloud/|title|Calais Release 4 and the Linking Data cloud… « Ivan’s private site +http://ivan-herman.name/2008/11/14/calais-release-4-and-the-linking-data-cloud/|creationTime|2008-12-10T14:08:43Z +http://stackoverflow.com/questions/30571/how-do-i-tell-maven-to-use-the-latest-version-of-a-dependency|creationDate|2012-08-16 +http://stackoverflow.com/questions/30571/how-do-i-tell-maven-to-use-the-latest-version-of-a-dependency|tag|http://www.semanlink.net/tag/maven +http://stackoverflow.com/questions/30571/how-do-i-tell-maven-to-use-the-latest-version-of-a-dependency|title|java - How do I tell Maven to use the latest version of a dependency? - Stack Overflow +http://stackoverflow.com/questions/30571/how-do-i-tell-maven-to-use-the-latest-version-of-a-dependency|creationTime|2012-08-16T11:27:57Z +http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros|creationDate|2016-08-31 +http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros|tag|http://www.semanlink.net/tag/whistleblower +http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros|title|Lanceurs d'alerte : coupables ou héros ? ARTE +http://www.arte.tv/guide/fr/051400-000-A/lanceurs-d-alerte-coupables-ou-heros|creationTime|2016-08-31T00:22:44Z +http://www.le-tigre.net/Marc-L.html|creationDate|2009-01-19 +http://www.le-tigre.net/Marc-L.html|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.le-tigre.net/Marc-L.html|title|Marc L*** +http://www.le-tigre.net/Marc-L.html|creationTime|2009-01-19T00:09:14Z +http://hsivonen.iki.fi/producing-xml/|creationDate|2006-12-09 +http://hsivonen.iki.fi/producing-xml/|tag|http://www.semanlink.net/tag/xml +http://hsivonen.iki.fi/producing-xml/|tag|http://www.semanlink.net/tag/dev +http://hsivonen.iki.fi/producing-xml/|title|HOWTO Avoid Being Called a Bozo When Producing XML +http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/|creationDate|2012-05-10 +http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/|tag|http://www.semanlink.net/tag/semantic_web +http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/|tag|http://www.semanlink.net/tag/big_data +http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/|title|RDF, Big Data and The Semantic Web I _Really_ Don't Know +http://dynamicorange.com/2012/04/24/rdf-big-data-and-the-semantic-web/|creationTime|2012-05-10T09:02:51Z +http://www.lemondeinformatique.fr/dossiers/lire-business-intelligence-l-avenir-s-annonce-sans-nuages-91.html|creationDate|2010-09-21 +http://www.lemondeinformatique.fr/dossiers/lire-business-intelligence-l-avenir-s-annonce-sans-nuages-91.html|tag|http://www.semanlink.net/tag/business_intelligence +http://www.lemondeinformatique.fr/dossiers/lire-business-intelligence-l-avenir-s-annonce-sans-nuages-91.html|title|Business Intelligence : L'avenir s'annonce sans nuages - Décisionnel - Le Monde Informatique +http://www.lemondeinformatique.fr/dossiers/lire-business-intelligence-l-avenir-s-annonce-sans-nuages-91.html|creationTime|2010-09-21T13:25:51Z +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|creationDate|2006-08-29 +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|tag|http://www.semanlink.net/tag/mashups +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|tag|http://www.semanlink.net/tag/semantic_web_services +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|tag|http://www.semanlink.net/tag/jena +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|tag|http://www.semanlink.net/tag/web_services +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|comment|In addition to single-service applications, developers are creating mashups, applications that combine data from multiple services to create something new. This series chronicles the creation of the ultimate mashup, an application that not only stores data from different mashups but uses semantic technology to enable users to create their own mashups by swapping services, or even by picking and choosing data. It uses Java™ programming and a combination of servlets, JSP, software from the open source Jena project, and DB2's new native XML capabilities. +http://www-128.ibm.com/developerworks/edu/x-dw-x-ultimashup1.html|title|The ultimate mashup -- Web services and the semantic Web +http://www.openstreetmap.org/|creationDate|2013-03-20 +http://www.openstreetmap.org/|tag|http://www.semanlink.net/tag/openstreetmap +http://www.openstreetmap.org/|title|OpenStreetMap +http://www.openstreetmap.org/|creationTime|2013-03-20T15:07:44Z +http://ivan-herman.name/2010/05/28/self-documenting-vocabularies-using-rdfa/|creationDate|2010-05-30 +http://ivan-herman.name/2010/05/28/self-documenting-vocabularies-using-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://ivan-herman.name/2010/05/28/self-documenting-vocabularies-using-rdfa/|title|Self-documenting vocabularies using RDFa +http://ivan-herman.name/2010/05/28/self-documenting-vocabularies-using-rdfa/|creationTime|2010-05-30T19:14:40Z +http://prism-break.org/|creationDate|2013-06-13 +http://prism-break.org/|tag|http://www.semanlink.net/tag/prism_surveillance_program +http://prism-break.org/|title|Opt out of PRISM, the NSA’s global data surveillance program - PRISM BREAK +http://prism-break.org/|creationTime|2013-06-13T00:18:20Z +http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html|creationDate|2013-11-22 +http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html|tag|http://www.semanlink.net/tag/c2gweb_js +http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html|title|C2GWeb-js +http://doc.rplug.renault.com/car-configurator/cold/coldjs/tutorial.html|creationTime|2013-11-22T11:09:48Z +http://wiki.tela-botanica.org/outildeterminationplantes/wakka.php?wiki=SpecificationsProjets|creationDate|2005-06-10 +http://wiki.tela-botanica.org/outildeterminationplantes/wakka.php?wiki=SpecificationsProjets|tag|http://www.semanlink.net/tag/botanique +http://wiki.tela-botanica.org/outildeterminationplantes/wakka.php?wiki=SpecificationsProjets|title|OutilDeterminationPlantes:SpecificationsProjets +http://www.kuro5hin.org/print/2005/8/22/182159/251|creationDate|2005-09-01 +http://www.kuro5hin.org/print/2005/8/22/182159/251|tag|http://www.semanlink.net/tag/ajax +http://www.kuro5hin.org/print/2005/8/22/182159/251|title|AJAX - beyond the buzzwords +http://scikit-learn.org/stable/modules/multiclass.html|creationDate|2018-03-17 +http://scikit-learn.org/stable/modules/multiclass.html|tag|http://www.semanlink.net/tag/multi_label_classification +http://scikit-learn.org/stable/modules/multiclass.html|tag|http://www.semanlink.net/tag/scikit_learn +http://scikit-learn.org/stable/modules/multiclass.html|title|Multiclass and multilabel algorithms — scikit-learn documentation +http://scikit-learn.org/stable/modules/multiclass.html|creationTime|2018-03-17T14:38:19Z +http://www.michaelrakowitz.com/the-invisible-enemy-should-not-exist|creationDate|2019-03-23 +http://www.michaelrakowitz.com/the-invisible-enemy-should-not-exist|tag|http://www.semanlink.net/tag/michael_rakowitz +http://www.michaelrakowitz.com/the-invisible-enemy-should-not-exist|title|The invisible enemy should not exist +http://www.michaelrakowitz.com/the-invisible-enemy-should-not-exist|creationTime|2019-03-23T01:26:33Z +https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-c38210537fe4|creationDate|2019-01-15 +https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-c38210537fe4|tag|http://www.semanlink.net/tag/fast_ai +https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-c38210537fe4|title|Finding Data Block Nirvana (a journey through the fastai data block API) +https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-c38210537fe4|creationTime|2019-01-15T07:42:17Z +http://co-operating.systems/|creationDate|2015-12-31 +http://co-operating.systems/|tag|http://www.semanlink.net/tag/henry_story +http://co-operating.systems/|title|Co-operating Systems +http://co-operating.systems/|creationTime|2015-12-31T18:21:53Z +http://www.scottbot.net/HIAL/index.html@p=19113.html|creationDate|2017-05-19 +http://www.scottbot.net/HIAL/index.html@p=19113.html|tag|http://www.semanlink.net/tag/nlp_and_humanities +http://www.scottbot.net/HIAL/index.html@p=19113.html|tag|http://www.semanlink.net/tag/topic_modeling +http://www.scottbot.net/HIAL/index.html@p=19113.html|title|Topic Modeling for Humanists: A Guided Tour +http://www.scottbot.net/HIAL/index.html@p=19113.html|creationTime|2017-05-19T08:26:01Z +http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey|creationDate|2017-03-27 +http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey|tag|http://www.semanlink.net/tag/java +http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey|tag|http://www.semanlink.net/tag/programming +http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey|title|"java - ""Use map instead of class to represent data"" -Rich Hickey" +http://softwareengineering.stackexchange.com/questions/272302/use-map-instead-of-class-to-represent-data-rich-hickey|creationTime|2017-03-27T19:03:12Z +http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html|creationDate|2008-05-12 +http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html|tag|http://www.semanlink.net/tag/semantics_of_skos_concept +http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html|tag|http://www.semanlink.net/tag/skos_owl +http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html|title|Topic: Concept Semantics -- Patterns for Working With SKOS and OWL +http://isegserv.itd.rl.ac.uk/public/skos/2007/10/f2f/skos-owl-patterns.html|creationTime|2008-05-12T19:30:14Z +http://mail-archives.apache.org/mod_mbox/jena-users/201306.mbox/%3C51ACA6CF.9000405@apache.org%3E|creationDate|2016-01-25 +http://mail-archives.apache.org/mod_mbox/jena-users/201306.mbox/%3C51ACA6CF.9000405@apache.org%3E|tag|http://www.semanlink.net/tag/sparql_perfs +http://mail-archives.apache.org/mod_mbox/jena-users/201306.mbox/%3C51ACA6CF.9000405@apache.org%3E|title|Re: sparql performance parameters and limitations +http://mail-archives.apache.org/mod_mbox/jena-users/201306.mbox/%3C51ACA6CF.9000405@apache.org%3E|creationTime|2016-01-25T18:12:33Z +http://tslearn.readthedocs.io/en/latest/index.html|creationDate|2017-08-09 +http://tslearn.readthedocs.io/en/latest/index.html|tag|http://www.semanlink.net/tag/machine_learning +http://tslearn.readthedocs.io/en/latest/index.html|tag|http://www.semanlink.net/tag/time_series +http://tslearn.readthedocs.io/en/latest/index.html|comment|machine learning tools for the analysis of time series +http://tslearn.readthedocs.io/en/latest/index.html|title|tslearn +http://tslearn.readthedocs.io/en/latest/index.html|creationTime|2017-08-09T19:35:49Z +https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858|creationDate|2015-03-04 +https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858|tag|http://www.semanlink.net/tag/good_question +https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858|tag|http://www.semanlink.net/tag/triplestore +https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858|title|Can we include algorithms/programs in RDF database like PL*SQL in Oracle database? +https://www.linkedin.com/groups/Can-we-include-algorithms-programs-86246.S.5975435892695801858|creationTime|2015-03-04T17:12:37Z +https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died|creationDate|2019-03-30 +https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died|tag|http://www.semanlink.net/tag/new_yorker +https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died|tag|http://www.semanlink.net/tag/tanis_kt +https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died|title|The Day the Dinosaurs Died The New Yorker +https://www.newyorker.com/magazine/2019/04/08/the-day-the-dinosaurs-died|creationTime|2019-03-30T13:26:10Z +http://foaf.me/|creationDate|2011-01-18 +http://foaf.me/|tag|http://www.semanlink.net/tag/foaf +http://foaf.me/|title|FOAF Me +http://foaf.me/|creationTime|2011-01-18T09:45:18Z +http://en.wikipedia.org/wiki/Woman_in_the_Dunes|creationDate|2011-11-19 +http://en.wikipedia.org/wiki/Woman_in_the_Dunes|tag|http://www.semanlink.net/tag/film_japonais +http://en.wikipedia.org/wiki/Woman_in_the_Dunes|title|The Woman in the Dunes - Wikipedia, the free encyclopedia +http://en.wikipedia.org/wiki/Woman_in_the_Dunes|creationTime|2011-11-19T12:30:11Z +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-739553,0.html|creationDate|2006-02-09 +http://m.mgafrica.com/article/2015-01-15-what-they-said-then-and-where-we-are-now#.Vg-LULT5TnS|creationDate|2015-10-03 +http://m.mgafrica.com/article/2015-01-15-what-they-said-then-and-where-we-are-now#.Vg-LULT5TnS|tag|http://www.semanlink.net/tag/new_africa +http://m.mgafrica.com/article/2015-01-15-what-they-said-then-and-where-we-are-now#.Vg-LULT5TnS|title|What US intelligence predicted about Africa today 15 years ago, and how terribly wrong they were Mail & Guardian Africa (Mobile edition) +http://m.mgafrica.com/article/2015-01-15-what-they-said-then-and-where-we-are-now#.Vg-LULT5TnS|creationTime|2015-10-03T10:09:21Z +http://modulaweb.fr/blog/2012/08/we-make-data-porn-les-donnees-sont-toujours-fausses/comment-page-1/#comment-406|creationDate|2012-08-15 +http://modulaweb.fr/blog/2012/08/we-make-data-porn-les-donnees-sont-toujours-fausses/comment-page-1/#comment-406|tag|http://www.semanlink.net/tag/open_data +http://modulaweb.fr/blog/2012/08/we-make-data-porn-les-donnees-sont-toujours-fausses/comment-page-1/#comment-406|title|We make data porn : Les données sont toujours fausses - Modulaweb – Web Open-Source et modulable à Montpellier +http://modulaweb.fr/blog/2012/08/we-make-data-porn-les-donnees-sont-toujours-fausses/comment-page-1/#comment-406|creationTime|2012-08-15T19:54:19Z +http://www.schtuff.com|creationDate|2005-10-11 +http://www.schtuff.com|tag|http://www.semanlink.net/tag/wiki +http://www.schtuff.com|comment|Free wiki hosting +http://www.schtuff.com|title|www.schtuff.com - What is Schtuff? +http://www.hippasus.com/resources/mapdelicious|creationDate|2005-04-28 +http://www.hippasus.com/resources/mapdelicious|tag|http://www.semanlink.net/tag/social_bookmarking +http://del.icio.us/url/950955a2af0f59f3607cbf9de59edfe2|creationDate|2006-05-13 +http://del.icio.us/url/950955a2af0f59f3607cbf9de59edfe2|tag|http://www.semanlink.net/tag/linkto_semanlink +http://del.icio.us/url/950955a2af0f59f3607cbf9de59edfe2|title|Semanlink on del.icio.us +http://www.cosmovisions.com/ChronoCroisades02.htm|creationDate|2006-07-26 +http://www.cosmovisions.com/ChronoCroisades02.htm|tag|http://www.semanlink.net/tag/croisade_des_enfants +http://www.cosmovisions.com/ChronoCroisades02.htm|tag|http://www.semanlink.net/tag/croisades +http://www.cosmovisions.com/ChronoCroisades02.htm|title|Les dernières croisades (1217 - 1270) +http://animaux.blog.lemonde.fr/2013/09/27/889/|creationDate|2013-09-29 +http://animaux.blog.lemonde.fr/2013/09/27/889/|tag|http://www.semanlink.net/tag/censure_et_maltraitance_animale +http://animaux.blog.lemonde.fr/2013/09/27/889/|title|Poules en batterie: des images de maltraitance censurées par la justice Un éléphant dans mon salon +http://animaux.blog.lemonde.fr/2013/09/27/889/|creationTime|2013-09-29T13:15:10Z +http://www.sciam.com/article.cfm?articleID=00048144-10D2-1C70-84A9809EC588EF21&pageNumber=1&catID=2|creationDate|2005-06-21 +http://www.sciam.com/article.cfm?articleID=00048144-10D2-1C70-84A9809EC588EF21&pageNumber=1&catID=2|tag|http://www.semanlink.net/tag/tim_berners_lee +http://www.sciam.com/article.cfm?articleID=00048144-10D2-1C70-84A9809EC588EF21&pageNumber=1&catID=2|title|Science & Technology at Scientific American.com: The Semantic Web -- A new form of Web content that is meaningful to computers will unleash a revolution of new possibilities +http://semanticweb.com/graphs-make-the-world-of-data-go-round_b36195#more-36195|creationDate|2013-04-02 +http://semanticweb.com/graphs-make-the-world-of-data-go-round_b36195#more-36195|tag|http://www.semanlink.net/tag/neo4j +http://semanticweb.com/graphs-make-the-world-of-data-go-round_b36195#more-36195|title|Graphs Make The World Of Data Go Round - semanticweb.com +http://semanticweb.com/graphs-make-the-world-of-data-go-round_b36195#more-36195|creationTime|2013-04-02T14:11:52Z +http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_|creationDate|2019-05-29 +http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_|tag|http://www.semanlink.net/tag/pocketsphinx +http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_|title|pocketsphinx [Wiki ubuntu-fr] +http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_|bookmarkOf|https://doc.ubuntu-fr.org/pocketsphinx +http://www.semanlink.net/doc/2019/05/pocketsphinx_wiki_ubuntu_fr_|creationTime|2019-05-29T01:03:09Z +http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html|creationDate|2017-08-21 +http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html|tag|http://www.semanlink.net/tag/shacl +http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html|title|Validating RDF data with SHACL - bobdc.blog +http://www.snee.com/bobdc.blog/2017/08/validating-rdf-data-with-shacl.html|creationTime|2017-08-21T14:12:50Z +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/semwebclient/|creationDate|2007-01-02 +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/semwebclient/|tag|http://www.semanlink.net/tag/semantic_web_client_library +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/semwebclient/|comment|"The Semantic Web Client Library represents the complete Semantic Web as a single RDF graph. The library enables applications to query this global graph using SPARQL- and find(SPO) queries. To answer queries, the library dynamically retrieves information from the Semantic Web by dereferencing HTTP URIs and by following rdfs:seeAlso links. +" +http://sites.wiwiss.fu-berlin.de/suhl/bizer/ng4j/semwebclient/|title|Semantic Web Client Library +http://i-semantics.tugraz.at/industry-track/bernhard-schandl-cluug|creationDate|2010-07-01 +http://i-semantics.tugraz.at/industry-track/bernhard-schandl-cluug|tag|http://www.semanlink.net/tag/personal_knowledge_management +http://i-semantics.tugraz.at/industry-track/bernhard-schandl-cluug|title|Bernhard Schandl: Semantically enabled Personal Knowledge Management with cluug.com – I-SEMANTICS +http://i-semantics.tugraz.at/industry-track/bernhard-schandl-cluug|creationTime|2010-07-01T16:22:35Z +http://djerma.nl/|creationDate|2015-08-26 +http://djerma.nl/|tag|http://www.semanlink.net/tag/jerma +http://djerma.nl/|title|Cawyan Zarma Sanni +http://djerma.nl/|creationTime|2015-08-26T19:50:06Z +http://charlie.cu.cc/2012/06/how-add-external-libraries-maven/|creationDate|2012-08-18 +http://charlie.cu.cc/2012/06/how-add-external-libraries-maven/|tag|http://www.semanlink.net/tag/maven +http://charlie.cu.cc/2012/06/how-add-external-libraries-maven/|title|How to add external libraries in Maven - Charlie Wu +http://charlie.cu.cc/2012/06/how-add-external-libraries-maven/|creationTime|2012-08-18T14:57:22Z +http://worrydream.com/LearnableProgramming/|creationDate|2012-09-27 +http://worrydream.com/LearnableProgramming/|tag|http://www.semanlink.net/tag/programming +http://worrydream.com/LearnableProgramming/|comment|Designing a programming system for understanding programs +http://worrydream.com/LearnableProgramming/|title|Learnable Programming +http://worrydream.com/LearnableProgramming/|creationTime|2012-09-27T18:23:38Z +http://www.flickr.com/photos/iks_project/sets/72157630176990928/show/|creationDate|2012-06-19 +http://www.flickr.com/photos/iks_project/sets/72157630176990928/show/|tag|http://www.semanlink.net/tag/iks_workshop_salzburg_2012 +http://www.flickr.com/photos/iks_project/sets/72157630176990928/show/|title|IKS Salzburg Workshop June 2012 - pictures +http://www.flickr.com/photos/iks_project/sets/72157630176990928/show/|creationTime|2012-06-19T10:00:36Z +http://www.the325project.org/|creationDate|2006-01-30 +http://www.the325project.org/|tag|http://www.semanlink.net/tag/semantic_web_application +http://www.the325project.org/|tag|http://www.semanlink.net/tag/antiquite_romaine +http://www.the325project.org/|title|The 325 Project +http://www.nytimes.com/2006/11/12/business/12web.html|creationDate|2006-11-14 +http://www.nytimes.com/2006/11/12/business/12web.html|tag|http://www.semanlink.net/tag/web_3_0 +http://www.nytimes.com/2006/11/12/business/12web.html|title|Entrepreneurs See a Web Guided by Common Sense - New York Times +http://www.pbs.org/wnet/secrets/flash/catastrophe2_script.html|creationDate|2005-12-19 +http://www.pbs.org/wnet/secrets/flash/catastrophe2_script.html|tag|http://www.semanlink.net/tag/pbs_program +http://www.pbs.org/wnet/secrets/flash/catastrophe2_script.html|tag|http://www.semanlink.net/tag/535 +https://dl.dropboxusercontent.com/u/172199972/map/index.html|creationDate|2013-07-12 +https://dl.dropboxusercontent.com/u/172199972/map/index.html|tag|http://www.semanlink.net/tag/wikidata +https://dl.dropboxusercontent.com/u/172199972/map/index.html|tag|http://www.semanlink.net/tag/denny_vrandecic +https://dl.dropboxusercontent.com/u/172199972/map/index.html|title|Wikidata Map Interface +https://dl.dropboxusercontent.com/u/172199972/map/index.html|creationTime|2013-07-12T11:06:44Z +http://www.bisharat.net/Zarma/|creationDate|2006-03-30 +http://www.bisharat.net/Zarma/|tag|http://www.semanlink.net/tag/peace_corps +http://www.bisharat.net/Zarma/|tag|http://www.semanlink.net/tag/jerma +http://www.bisharat.net/Zarma/|title|Zarma Dictionnary (Peace Corps / Niger) +http://zone47.com/crotos/|creationDate|2014-03-04 +http://zone47.com/crotos/|tag|http://www.semanlink.net/tag/semanticpedia +http://zone47.com/crotos/|title|CROTOS +http://zone47.com/crotos/|creationTime|2014-03-04T10:01:08Z +https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview|creationDate|2019-03-26 +https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview|tag|http://www.semanlink.net/tag/passage_ai +https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview|tag|http://www.semanlink.net/tag/chatbot +https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview|title|Passage AI Conversational Chatbot with AI/NLP by Passage AI +https://cxexchange.niceincontact.com/apps/182334/passage-ai-conversational-chatbot-with-ainlp#!overview|creationTime|2019-03-26T08:46:06Z +http://nlp.cs.nyu.edu/sekine/papers/li07.pdf|creationDate|2018-03-05 +http://nlp.cs.nyu.edu/sekine/papers/li07.pdf|tag|http://www.semanlink.net/tag/named_entity_recognition +http://nlp.cs.nyu.edu/sekine/papers/li07.pdf|title|A survey of named entity recognition and classification (2006) +http://nlp.cs.nyu.edu/sekine/papers/li07.pdf|creationTime|2018-03-05T01:35:29Z +https://towardsdatascience.com/deep-learning-for-ner-1-public-datasets-and-annotation-methods-8b1ad5e98caf|creationDate|2018-10-15 +https://towardsdatascience.com/deep-learning-for-ner-1-public-datasets-and-annotation-methods-8b1ad5e98caf|tag|http://www.semanlink.net/tag/named_entity_recognition +https://towardsdatascience.com/deep-learning-for-ner-1-public-datasets-and-annotation-methods-8b1ad5e98caf|title|Deep Learning for Named Entity Recognition #1: Public Datasets and Annotation Methods +https://towardsdatascience.com/deep-learning-for-ner-1-public-datasets-and-annotation-methods-8b1ad5e98caf|creationTime|2018-10-15T14:25:14Z +http://www.mentalfloss.com/article/54853/our-interview-jeopardy-champion-arthur-chu|creationDate|2014-02-18 +http://www.mentalfloss.com/article/54853/our-interview-jeopardy-champion-arthur-chu|tag|http://www.semanlink.net/tag/jeopardy +http://www.mentalfloss.com/article/54853/our-interview-jeopardy-champion-arthur-chu|title|Our Interview With Jeopardy! Champion Arthur Chu Mental Floss +http://www.mentalfloss.com/article/54853/our-interview-jeopardy-champion-arthur-chu|creationTime|2014-02-18T01:02:24Z +https://www.fast.ai/2019/03/06/fastai-swift/|creationDate|2019-03-07 +https://www.fast.ai/2019/03/06/fastai-swift/|tag|http://www.semanlink.net/tag/swift +https://www.fast.ai/2019/03/06/fastai-swift/|tag|http://www.semanlink.net/tag/fast_ai +https://www.fast.ai/2019/03/06/fastai-swift/|title|fast.ai Embracing Swift for Deep Learning · fast.ai +https://www.fast.ai/2019/03/06/fastai-swift/|creationTime|2019-03-07T09:06:47Z +https://cloud.google.com/blog/products/gcp/google-patents-public-datasets-connecting-public-paid-and-private-patent-data|creationDate|2019-02-09 +https://cloud.google.com/blog/products/gcp/google-patents-public-datasets-connecting-public-paid-and-private-patent-data|tag|http://www.semanlink.net/tag/google_patents +https://cloud.google.com/blog/products/gcp/google-patents-public-datasets-connecting-public-paid-and-private-patent-data|title|Google Patents Public Datasets: connecting public, paid, and private patent data Google Cloud Blog +https://cloud.google.com/blog/products/gcp/google-patents-public-datasets-connecting-public-paid-and-private-patent-data|creationTime|2019-02-09T00:26:21Z +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMTFSULWFE_0.html|creationDate|2005-12-01 +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMTFSULWFE_0.html|tag|http://www.semanlink.net/tag/cassini_huygens +http://www.esa.int/SPECIALS/Results_from_Mars_Express_and_Huygens/SEMTFSULWFE_0.html|title|ESA - Results from Huygens - Highlights of ESA’s Huygens mission +http://www.dbin.org/|creationDate|2006-02-17 +http://www.dbin.org/|tag|http://www.semanlink.net/tag/semantic_web_p2p +http://www.dbin.org/|comment|"A Semantic Web P2P with a ""Filesharing philosophy"" and/or a Semantic Newsgroup Client." +http://www.dbin.org/|title|DBin Project +http://code.google.com/p/pallet/|creationDate|2014-04-22 +http://code.google.com/p/pallet/|tag|http://www.semanlink.net/tag/mallet +http://code.google.com/p/pallet/|title|"pallet - A professionalization of the UMass project ""Mallet"" - Google Project Hosting" +http://code.google.com/p/pallet/|creationTime|2014-04-22T17:39:23Z +http://www.bbc.co.uk/news/science-environment-21668712|creationDate|2013-03-21 +http://www.bbc.co.uk/news/science-environment-21668712|tag|http://www.semanlink.net/tag/herschel_telescope +http://www.bbc.co.uk/news/science-environment-21668712|title|BBC News - Herschel space telescope to go blind +http://www.bbc.co.uk/news/science-environment-21668712|creationTime|2013-03-21T13:56:29Z +http://institutoamazoniabrasil.com/ofir.php|creationDate|2012-01-07 +http://institutoamazoniabrasil.com/ofir.php|tag|http://www.semanlink.net/tag/amazonie +http://institutoamazoniabrasil.com/ofir.php|tag|http://www.semanlink.net/tag/ofir +http://institutoamazoniabrasil.com/ofir.php|title|Instituto Amazônia Brasil: Ofir +http://institutoamazoniabrasil.com/ofir.php|creationTime|2012-01-07T22:07:20Z +http://www.ics.mq.edu.au/~cassidy/talks/semweb/semweb.html|creationDate|2005-01-05 +http://www.ics.mq.edu.au/~cassidy/talks/semweb/semweb.html|tag|http://www.semanlink.net/tag/semantic_web_introduction +http://www.wired.com/2016/03/sadness-beauty-watching-googles-ai-play-go|creationDate|2016-03-11 +http://www.wired.com/2016/03/sadness-beauty-watching-googles-ai-play-go|tag|http://www.semanlink.net/tag/alphago +http://www.wired.com/2016/03/sadness-beauty-watching-googles-ai-play-go|title|The Sadness and Beauty of Watching Google’s AI Play Go WIRED +http://www.wired.com/2016/03/sadness-beauty-watching-googles-ai-play-go|creationTime|2016-03-11T21:01:33Z +http://www.virtualchaos.co.uk/blog/2008/04/23/www2008-day-2-ldow2008-workshop/|creationDate|2008-05-04 +http://www.virtualchaos.co.uk/blog/2008/04/23/www2008-day-2-ldow2008-workshop/|tag|http://www.semanlink.net/tag/ldow2008 +http://www.virtualchaos.co.uk/blog/2008/04/23/www2008-day-2-ldow2008-workshop/|title|VirtualChaos - Nadeem’s blog » WWW2008: Day 2 - LDOW2008 Workshop +http://www.virtualchaos.co.uk/blog/2008/04/23/www2008-day-2-ldow2008-workshop/|creationTime|2008-05-04T20:15:39Z +http://www.mathkang.org/default.html|creationDate|2009-01-22 +http://www.mathkang.org/default.html|tag|http://www.semanlink.net/tag/mathematiques +http://www.mathkang.org/default.html|title|le Kangourou des mathematiques +http://www.mathkang.org/default.html|creationTime|2009-01-22T01:09:37Z +http://stackoverflow.com/questions/36136885/swagger-map-of-string-object|creationDate|2017-04-05 +http://stackoverflow.com/questions/36136885/swagger-map-of-string-object|title|Swagger: map of - Stack Overflow +http://stackoverflow.com/questions/36136885/swagger-map-of-string-object|creationTime|2017-04-05T11:59:24Z +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest|creationDate|2005-10-05 +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest|tag|http://www.semanlink.net/tag/ajax +http://www.fiftyfoureleven.com/resources/programming/xmlhttprequest|title|XMLHttpRequest & Ajax Based Applications - Links and Resources, Fiftyfoureleven.com +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|creationDate|2019-02-21 +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|tag|http://www.semanlink.net/tag/tutorial +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|tag|http://www.semanlink.net/tag/google_patents +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|comment|"A tutorial on how to use Google Patents Public Datasets, along with Apache Beam, Cloud Dataflow, TensorFlow, and Cloud ML Engine to create a machine learning model to estimate the ‘breadth’ of patent claims. + +" +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|title|Measuring patent claim breadth using Google Patents Public Datasets Google Cloud Blog +https://cloud.google.com/blog/products/ai-machine-learning/measuring-patent-claim-breadth-using-google-patents-public-datasets|creationTime|2019-02-21T00:35:11Z +http://xmlns.com/foaf/spec/|creationDate|2007-10-25 +http://xmlns.com/foaf/spec/|tag|http://www.semanlink.net/tag/foaf +http://xmlns.com/foaf/spec/|title|FOAF Vocabulary Specification +http://xmlns.com/foaf/spec/|creationTime|2007-10-25T18:19:00Z +http://www.sheridanprinting.com/14-websci4chRV610jmp/docs/p161.pdf|creationDate|2014-06-26 +http://www.sheridanprinting.com/14-websci4chRV610jmp/docs/p161.pdf|tag|http://www.semanlink.net/tag/latent_dirichlet_allocation +http://www.sheridanprinting.com/14-websci4chRV610jmp/docs/p161.pdf|title|Latent Dirichlet Allocation: stability +http://www.sheridanprinting.com/14-websci4chRV610jmp/docs/p161.pdf|creationTime|2014-06-26T00:52:10Z +http://www.airtightinteractive.com/projects/related_tag_browser/app/|creationDate|2005-05-04 +http://www.airtightinteractive.com/projects/related_tag_browser/app/|tag|http://www.semanlink.net/tag/flickr +http://www.visualdataweb.org/relfinder.php|creationDate|2015-03-04 +http://www.visualdataweb.org/relfinder.php|tag|http://www.semanlink.net/tag/rdf_data_visualization +http://www.visualdataweb.org/relfinder.php|comment|Interactive Relationship Discovery in RDF Data +http://www.visualdataweb.org/relfinder.php|title|RelFinder - Visual Data Web +http://www.visualdataweb.org/relfinder.php|creationTime|2015-03-04T16:57:53Z +https://www.technologyreview.com/s/608911/is-ai-riding-a-one-trick-pony/|creationDate|2017-11-21 +https://www.technologyreview.com/s/608911/is-ai-riding-a-one-trick-pony/|tag|http://www.semanlink.net/tag/ia_limites +https://www.technologyreview.com/s/608911/is-ai-riding-a-one-trick-pony/|title|Is AI Riding a One-Trick Pony? - MIT Technology Review +https://www.technologyreview.com/s/608911/is-ai-riding-a-one-trick-pony/|creationTime|2017-11-21T11:38:15Z +http://www.metagraph.org/index.html|creationDate|2005-10-28 +http://www.metagraph.org/index.html|tag|http://www.semanlink.net/tag/graph_visualization +http://www.metagraph.org/index.html|tag|http://www.semanlink.net/tag/bioinformatics +http://www.metagraph.org/index.html|title|MetaGraph.org +http://www.metagraph.org/index.html|seeAlso|http://www.dehora.net/journal/2004/08/metagraph_domain_knowledge_v_rdf.html +http://www.cnrs.fr/inee/communication/breves/b230.html|creationDate|2016-11-11 +http://www.cnrs.fr/inee/communication/breves/b230.html|tag|http://www.semanlink.net/tag/truffe +http://www.cnrs.fr/inee/communication/breves/b230.html|title|Du nouveau sur la fécondation de la truffe (CNRS) +http://www.cnrs.fr/inee/communication/breves/b230.html|creationTime|2016-11-11T01:04:41Z +http://json-ld.org/spec/latest/|creationDate|2011-06-09 +http://json-ld.org/spec/latest/|tag|http://www.semanlink.net/tag/json_ld +http://json-ld.org/spec/latest/|title|JSON-LD specification +http://json-ld.org/spec/latest/|creationTime|2011-06-09T16:29:43Z +http://www.foopad.com/account/about|creationDate|2005-11-06 +http://www.foopad.com/account/about|tag|http://www.semanlink.net/tag/wiki_service +http://www.foopad.com/account/about|title|Foopad :: The Very Simple Wiki +https://www.quora.com/How-can-I-use-machine-learning-to-propose-tags-for-content|creationDate|2018-08-07 +https://www.quora.com/How-can-I-use-machine-learning-to-propose-tags-for-content|tag|http://www.semanlink.net/tag/automatic_tagging +https://www.quora.com/How-can-I-use-machine-learning-to-propose-tags-for-content|title|How can I use machine learning to propose tags for content? - Quora +https://www.quora.com/How-can-I-use-machine-learning-to-propose-tags-for-content|creationTime|2018-08-07T17:44:18Z +http://ruder.io/deep-learning-optimization-2017/index.html|creationDate|2017-12-04 +http://ruder.io/deep-learning-optimization-2017/index.html|tag|http://www.semanlink.net/tag/deep_learning_optimization_methods +http://ruder.io/deep-learning-optimization-2017/index.html|title|Optimization for Deep Learning Highlights in 2017 +http://ruder.io/deep-learning-optimization-2017/index.html|creationTime|2017-12-04T12:11:44Z +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|creationDate|2017-10-06 +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|tag|http://www.semanlink.net/tag/eswc_2012 +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|tag|http://www.semanlink.net/tag/c2gweb +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|tag|http://www.semanlink.net/tag/configuration_as_linked_data +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|title|Product Customization as Linked Data SpringerLink (ESWC-2012) +https://link.springer.com/chapter/10.1007/978-3-642-30284-8_47|creationTime|2017-10-06T11:25:35Z +http://www.autorepair.eu.com/|creationDate|2008-03-04 +http://www.autorepair.eu.com/|tag|http://www.semanlink.net/tag/reparation_automobile +http://www.autorepair.eu.com/|title|Auto Repair Information in the EU +http://www.autorepair.eu.com/|creationTime|2008-03-04T22:54:08Z +http://bigbrowser.blog.lemonde.fr/2016/02/26/les-destructions-de-palmyre-documentees/|creationDate|2016-02-26 +http://bigbrowser.blog.lemonde.fr/2016/02/26/les-destructions-de-palmyre-documentees/|tag|http://www.semanlink.net/tag/palmyra +http://bigbrowser.blog.lemonde.fr/2016/02/26/les-destructions-de-palmyre-documentees/|title|Les destructions de Palmyre documentées Big Browser +http://bigbrowser.blog.lemonde.fr/2016/02/26/les-destructions-de-palmyre-documentees/|creationTime|2016-02-26T16:19:03Z +http://internetactu.blog.lemonde.fr/2015/06/20/la-batterie-delon-musk-peut-elle-tout-changer/|creationDate|2015-06-21 +http://internetactu.blog.lemonde.fr/2015/06/20/la-batterie-delon-musk-peut-elle-tout-changer/|tag|http://www.semanlink.net/tag/tesla_inc +http://internetactu.blog.lemonde.fr/2015/06/20/la-batterie-delon-musk-peut-elle-tout-changer/|title|La batterie d’Elon Musk peut-elle tout changer ? InternetActu +http://internetactu.blog.lemonde.fr/2015/06/20/la-batterie-delon-musk-peut-elle-tout-changer/|creationTime|2015-06-21T11:36:09Z +https://www.youtube.com/watch?v=eJYFubZfbfY&list=PL0FF1370EBACD1DAD&index=13|creationDate|2014-11-23 +https://www.youtube.com/watch?v=eJYFubZfbfY&list=PL0FF1370EBACD1DAD&index=13|title|Silent Night, Holy Cow (Part 1) - YouTube +https://www.youtube.com/watch?v=eJYFubZfbfY&list=PL0FF1370EBACD1DAD&index=13|creationTime|2014-11-23T23:30:56Z +http://mafihe.hu/~bnc/feynman/|creationDate|2005-04-05 +http://mafihe.hu/~bnc/feynman/|tag|http://www.semanlink.net/tag/feynman +http://www.lemonde.fr/planete/article/2012/05/25/la-croissance-mondiale-va-s-arreter_1707352_3244.html|creationDate|2013-01-04 +http://www.lemonde.fr/planete/article/2012/05/25/la-croissance-mondiale-va-s-arreter_1707352_3244.html|tag|http://www.semanlink.net/tag/the_limits_to_growth +http://www.lemonde.fr/planete/article/2012/05/25/la-croissance-mondiale-va-s-arreter_1707352_3244.html|title|"""La croissance mondiale va s'arrêter""" +http://www.lemonde.fr/planete/article/2012/05/25/la-croissance-mondiale-va-s-arreter_1707352_3244.html|creationTime|2013-01-04T13:47:20Z +http://www.kdnuggets.com/2015/12/tensor-flow-terrific-deep-learning-library.html|creationDate|2016-01-07 +http://www.kdnuggets.com/2015/12/tensor-flow-terrific-deep-learning-library.html|tag|http://www.semanlink.net/tag/tensorflow +http://www.kdnuggets.com/2015/12/tensor-flow-terrific-deep-learning-library.html|title|TensorFlow is Terrific – A Sober Take on Deep Learning Acceleration +http://www.kdnuggets.com/2015/12/tensor-flow-terrific-deep-learning-library.html|creationTime|2016-01-07T00:43:58Z +http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin|creationDate|2014-02-01 +http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin|tag|http://www.semanlink.net/tag/bitcoin +http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin|tag|http://www.semanlink.net/tag/fun +http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin|title|Mike Tsao puts the fun back in defunct. — Ten Things I Believe About Bitcoin +http://www.sowbug.com/post/54892479898/ten-things-i-believe-about-bitcoin|creationTime|2014-02-01T10:27:09Z +http://java-monitor.com/forum/showthread.php?t=22|creationDate|2010-02-11 +http://java-monitor.com/forum/showthread.php?t=22|tag|http://www.semanlink.net/tag/dev_tips +http://java-monitor.com/forum/showthread.php?t=22|tag|http://www.semanlink.net/tag/tomcat +http://java-monitor.com/forum/showthread.php?t=22|title|java.net.BindException: Address already in use:8080 ... uhm, who's listening? - Java-Monitor Forum +http://java-monitor.com/forum/showthread.php?t=22|creationTime|2010-02-11T14:52:56Z +https://www.nytimes.com/2018/12/05/technology/facebook-emails-privacy-data.html|creationDate|2018-12-08 +https://www.nytimes.com/2018/12/05/technology/facebook-emails-privacy-data.html|tag|http://www.semanlink.net/tag/facebook +https://www.nytimes.com/2018/12/05/technology/facebook-emails-privacy-data.html|title|Facebook Emails Show Its Real Mission: Making Money and Crushing Competition - The New York Times +https://www.nytimes.com/2018/12/05/technology/facebook-emails-privacy-data.html|creationTime|2018-12-08T11:06:31Z +https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml|creationDate|2012-05-10 +https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml|tag|http://www.semanlink.net/tag/solr +https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml|tag|http://www.semanlink.net/tag/openstructs +https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml|title|structWSF-Open-Semantic-Framework/framework/solr_schema_v1_2.xml +https://github.com/structureddynamics/structWSF-Open-Semantic-Framework/blob/a7d230edfe308fc82064bba452ca488c1dde1a8d/framework/solr_schema_v1_2.xml|creationTime|2012-05-10T00:40:59Z +http://lucene.472066.n3.nabble.com/Internationalization-td475088.html|creationDate|2012-05-03 +http://lucene.472066.n3.nabble.com/Internationalization-td475088.html|tag|http://www.semanlink.net/tag/solr_not_english_only +http://lucene.472066.n3.nabble.com/Internationalization-td475088.html|title|Solr - User - Internationalization +http://lucene.472066.n3.nabble.com/Internationalization-td475088.html|creationTime|2012-05-03T16:29:53Z +http://joelleguillais.blogspot.com/|creationDate|2007-02-18 +http://joelleguillais.blogspot.com/|tag|http://www.semanlink.net/tag/perche +http://joelleguillais.blogspot.com/|title|Les ateliers et les publications de la romancière Joëlle Guillais +http://joelleguillais.blogspot.com/|creationTime|2007-02-18T22:31:46Z +http://www.plasticbag.org/files/native/|creationDate|2006-03-11 +http://www.plasticbag.org/files/native/|tag|http://www.semanlink.net/tag/slides +http://www.plasticbag.org/files/native/|tag|http://www.semanlink.net/tag/data_web +http://www.plasticbag.org/files/native/|tag|http://www.semanlink.net/tag/web_2_0 +http://www.plasticbag.org/files/native/|title|Native to a Web of Data (Tom Coates, plasticbag.org) +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|creationDate|2013-08-18 +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|tag|http://www.semanlink.net/tag/drm +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|tag|http://www.semanlink.net/tag/cory_doctorow +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|tag|http://www.semanlink.net/tag/google_play +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|tag|http://www.semanlink.net/tag/ebooks +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|comment|"Hmm: Chill out, DRM’s not the reason that guy lost his (public domain) Google ebooks" +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|title|Cross a border, lose your ebooks - Boing Boing +http://boingboing.net/2013/08/17/cross-a-border-lose-your-eboo.html|creationTime|2013-08-18T13:02:12Z +http://www.kanzaki.com/works/2016/pub/image-annotator?u=/works/2017/annot/youtube-norrington-eroica-mov2.json|creationDate|2017-02-26 +http://www.kanzaki.com/works/2016/pub/image-annotator?u=/works/2017/annot/youtube-norrington-eroica-mov2.json|tag|http://www.semanlink.net/tag/media_fragments +http://www.kanzaki.com/works/2016/pub/image-annotator?u=/works/2017/annot/youtube-norrington-eroica-mov2.json|title|Demo of #mediafragments combined with web #annotations on a @YouTube video +http://www.kanzaki.com/works/2016/pub/image-annotator?u=/works/2017/annot/youtube-norrington-eroica-mov2.json|creationTime|2017-02-26T10:44:39Z +http://www.la-grange.net/2004/12/08.html#data|creationDate|2005-09-12 +http://www.la-grange.net/2004/12/08.html#data|tag|http://www.semanlink.net/tag/semanlink_related +http://www.la-grange.net/2004/12/08.html#data|tag|http://www.semanlink.net/tag/tagging +http://www.la-grange.net/2004/12/08.html#data|comment|Demandez à votre ordinateur de vous montrer votre activité de la journée du 7 décembre 2004. Combien de photos ai-je prise, à quelles heures, avec qui j'ai discuté ce jour là, de qui ai-je reçu des messages, etc. Impossible. +http://www.la-grange.net/2004/12/08.html#data|title|Un web sémantique pour l'utilisateur - 2004-12-08 - Carnet Web Karl +https://www.coursera.org/course/wh1300?utm_campaign=2013-august-newsletter&utm_date=1377177003&utm_source=newsletter&utm_user=97328&utm_medium=email&utm_recommendation=1&utm_variant=401|creationDate|2013-08-26 +https://www.coursera.org/course/wh1300?utm_campaign=2013-august-newsletter&utm_date=1377177003&utm_source=newsletter&utm_user=97328&utm_medium=email&utm_recommendation=1&utm_variant=401|tag|http://www.semanlink.net/tag/a_history_of_the_world_since_1300 +https://www.coursera.org/course/wh1300?utm_campaign=2013-august-newsletter&utm_date=1377177003&utm_source=newsletter&utm_user=97328&utm_medium=email&utm_recommendation=1&utm_variant=401|title|A History of the World since 1300 Coursera +https://www.coursera.org/course/wh1300?utm_campaign=2013-august-newsletter&utm_date=1377177003&utm_source=newsletter&utm_user=97328&utm_medium=email&utm_recommendation=1&utm_variant=401|creationTime|2013-08-26T16:57:54Z +http://perso.wanadoo.fr/philippe.boeuf/robert/physique/physiquequest.htm|creationDate|2005-08-24 +http://perso.wanadoo.fr/philippe.boeuf/robert/physique/physiquequest.htm|tag|http://www.semanlink.net/tag/physique +http://perso.wanadoo.fr/philippe.boeuf/robert/physique/physiquequest.htm|title|Questions de physique +http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/|creationDate|2015-06-28 +http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/|tag|http://www.semanlink.net/tag/rosetta +http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/|tag|http://www.semanlink.net/tag/philae +http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/|title|Rosetta and Philae: Searching for a good signal Rosetta - ESA's comet chaser +http://blogs.esa.int/rosetta/2015/06/26/rosetta-and-philae-searching-for-a-good-signal/|creationTime|2015-06-28T09:25:18Z +http://rdfa.info/2010/04/22/facebook-adopts-rdfa/|creationDate|2010-04-27 +http://rdfa.info/2010/04/22/facebook-adopts-rdfa/|tag|http://www.semanlink.net/tag/rdfa +http://rdfa.info/2010/04/22/facebook-adopts-rdfa/|tag|http://www.semanlink.net/tag/facebook +http://rdfa.info/2010/04/22/facebook-adopts-rdfa/|title|RDFa » Blog Archive » Facebook adopts RDFa +http://rdfa.info/2010/04/22/facebook-adopts-rdfa/|creationTime|2010-04-27T12:53:56Z +http://news.softpedia.com/news/Nanogenerator-Could-Draw-Energy-from-Human-Blood-60580.shtml|creationDate|2007-07-28 +http://news.softpedia.com/news/Nanogenerator-Could-Draw-Energy-from-Human-Blood-60580.shtml|tag|http://www.semanlink.net/tag/nous_vivons_une_epoque_moderne +http://news.softpedia.com/news/Nanogenerator-Could-Draw-Energy-from-Human-Blood-60580.shtml|title|Nanogenerator Could Draw Energy from Human Blood - Unlike the mythical vampire, it will help people - Softpedia +http://news.softpedia.com/news/Nanogenerator-Could-Draw-Energy-from-Human-Blood-60580.shtml|creationTime|2007-07-28T18:48:46Z +http://www.chant-orthoptere.com/|creationDate|2013-08-05 +http://www.chant-orthoptere.com/|tag|http://www.semanlink.net/tag/chant +http://www.chant-orthoptere.com/|tag|http://www.semanlink.net/tag/grillon +http://www.chant-orthoptere.com/|title|Chant Orthoptères +http://www.chant-orthoptere.com/|creationTime|2013-08-05T14:41:49Z +http://www-128.ibm.com/developerworks/java/library/j-jena/?ca=dgr-jw766j-jena|creationDate|2005-10-28 +http://www-128.ibm.com/developerworks/java/library/j-jena/?ca=dgr-jw766j-jena|tag|http://www.semanlink.net/tag/jena_introduction +http://www-128.ibm.com/developerworks/java/library/j-jena/?ca=dgr-jw766j-jena|title|Introduction to Jena +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|creationDate|2017-10-16 +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|tag|http://www.semanlink.net/tag/pytorch +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|tag|http://www.semanlink.net/tag/tensorflow +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|comment|see [What do people think of the TensorFlow sucks article? on Quora](https://www.quora.com/What-do-people-think-of-the-TensorFlow-sucks-article) +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|title|Tensorflow sucks +http://nicodjimenez.github.io/2017/10/08/tensorflow.html|creationTime|2017-10-16T14:34:28Z +http://www.hrw.org/reports/1999/rwanda/index.htm#TopOfPage|creationDate|2007-06-15 +http://www.hrw.org/reports/1999/rwanda/index.htm#TopOfPage|tag|http://www.semanlink.net/tag/genocide_rwandais +http://www.hrw.org/reports/1999/rwanda/index.htm#TopOfPage|title|Leave None to Tell the Story: Genocide in Rwanda (Human Rights Watch Report, March 1999) +http://www.hrw.org/reports/1999/rwanda/index.htm#TopOfPage|creationTime|2007-06-15T21:47:41Z +http://www.politico.com/magazine/story/2017/04/23/what-a-1973-french-novel-tells-us-about-marine-le-pen-steve-bannon-and-the-rise-of-the-populist-right-215064|creationDate|2017-04-24 +http://www.politico.com/magazine/story/2017/04/23/what-a-1973-french-novel-tells-us-about-marine-le-pen-steve-bannon-and-the-rise-of-the-populist-right-215064|tag|http://www.semanlink.net/tag/le_pen +http://www.politico.com/magazine/story/2017/04/23/what-a-1973-french-novel-tells-us-about-marine-le-pen-steve-bannon-and-the-rise-of-the-populist-right-215064|title|What a 1973 French Novel Tells Us About Marine Le Pen, Steve Bannon and the Rise of the Populist Right - POLITICO Magazine +http://www.politico.com/magazine/story/2017/04/23/what-a-1973-french-novel-tells-us-about-marine-le-pen-steve-bannon-and-the-rise-of-the-populist-right-215064|creationTime|2017-04-24T00:10:27Z +https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html|creationDate|2018-11-25 +https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html|tag|http://www.semanlink.net/tag/nlp_google +https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html|tag|http://www.semanlink.net/tag/emnlp_2018 +https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html|title|Google AI Blog: Google at EMNLP 2018 +https://ai.googleblog.com/2018/10/google-at-emnlp-2018.html|creationTime|2018-11-25T15:14:25Z +http://webmasters.stackexchange.com/questions/37721/does-a-303-status-code-pass-page-rank|creationDate|2013-12-11 +http://webmasters.stackexchange.com/questions/37721/does-a-303-status-code-pass-page-rank|tag|http://www.semanlink.net/tag/seo +http://webmasters.stackexchange.com/questions/37721/does-a-303-status-code-pass-page-rank|title|seo - Does a 303 status code pass page rank? - Webmasters Stack Exchange +http://webmasters.stackexchange.com/questions/37721/does-a-303-status-code-pass-page-rank|creationTime|2013-12-11T17:08:02Z +http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs|creationDate|2012-09-04 +http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs|tag|http://www.semanlink.net/tag/drupal +http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs|tag|http://www.semanlink.net/tag/backbone_js +http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs|title|DrupalCon Munich highlights and Backbone.js Drupal motion +http://drupalmotion.com/article/drupalcon-munich-highlights-and-backbonejs|creationTime|2012-09-04T11:14:53Z +http://business-intelligence.developpez.com/cours/|creationDate|2010-09-24 +http://business-intelligence.developpez.com/cours/|tag|http://www.semanlink.net/tag/business_intelligence +http://business-intelligence.developpez.com/cours/|tag|http://www.semanlink.net/tag/tutorial +http://business-intelligence.developpez.com/cours/|title|Tutoriels Business Intelligence +http://business-intelligence.developpez.com/cours/|creationTime|2010-09-24T12:57:26Z +http://vocamp.org/mw/index.php?title=HypiosVoCampParisMay2010|creationDate|2010-05-14 +http://vocamp.org/mw/index.php?title=HypiosVoCampParisMay2010|tag|http://www.semanlink.net/tag/hypiosvocampparismay2010 +http://vocamp.org/mw/index.php?title=HypiosVoCampParisMay2010|title|HypiosVoCampParisMay2010 +http://vocamp.org/mw/index.php?title=HypiosVoCampParisMay2010|creationTime|2010-05-14T09:01:03Z +http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/|creationDate|2015-09-02 +http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/|tag|http://www.semanlink.net/tag/islande +http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/|tag|http://www.semanlink.net/tag/refugies +http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/|title|Les Islandais se mobilisent pour accueillir davantage de réfugiés Big Browser +http://bigbrowser.blog.lemonde.fr/2015/09/02/les-islandais-se-mobilisent-pour-accueillir-davantage-de-refugies/|creationTime|2015-09-02T23:19:46Z +http://hyperfp.blogspot.com/|creationDate|2006-01-23 +http://hyperfp.blogspot.com/|tag|http://www.semanlink.net/tag/fps_blog +http://hyperfp.blogspot.com/|title|Blog d'hyperfp sur blogger +http://pinboard.in/|creationDate|2010-12-21 +http://pinboard.in/|tag|http://www.semanlink.net/tag/social_bookmarking +http://pinboard.in/|title|Pinboard - antisocial bookmarking +http://pinboard.in/|creationTime|2010-12-21T14:47:26Z +http://news.independent.co.uk/world/asia/article1222214.ece|creationDate|2006-08-29 +http://news.independent.co.uk/world/asia/article1222214.ece|tag|http://www.semanlink.net/tag/age_du_bronze +http://news.independent.co.uk/world/asia/article1222214.ece|tag|http://www.semanlink.net/tag/archeologie_chinoise +http://news.independent.co.uk/world/asia/article1222214.ece|tag|http://www.semanlink.net/tag/celte +http://news.independent.co.uk/world/asia/article1222214.ece|tag|http://www.semanlink.net/tag/momie +http://news.independent.co.uk/world/asia/article1222214.ece|title|A meeting of civilisations: The mystery of China's celtic mummies +http://www-128.ibm.com/developerworks/java/library/j-thread.html|creationDate|2005-10-29 +http://www-128.ibm.com/developerworks/java/library/j-thread.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www-128.ibm.com/developerworks/java/library/j-thread.html|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www-128.ibm.com/developerworks/java/library/j-thread.html|title|Writing multithreaded Java applications +https://www.datacamp.com/community/tutorials/machine-learning-python|creationDate|2017-06-19 +https://www.datacamp.com/community/tutorials/machine-learning-python|tag|http://www.semanlink.net/tag/tutorial +https://www.datacamp.com/community/tutorials/machine-learning-python|title|Python Machine Learning: Scikit-Learn Tutorial (Article) +https://www.datacamp.com/community/tutorials/machine-learning-python|creationTime|2017-06-19T11:26:31Z +https://en.wikipedia.org/wiki/Neural_backpropagation|creationDate|2016-01-03 +https://en.wikipedia.org/wiki/Neural_backpropagation|tag|http://www.semanlink.net/tag/neuroscience_and_ai +https://en.wikipedia.org/wiki/Neural_backpropagation|tag|http://www.semanlink.net/tag/backpropagation +https://en.wikipedia.org/wiki/Neural_backpropagation|title|Neural backpropagation - Wikipedia, the free encyclopedia +https://en.wikipedia.org/wiki/Neural_backpropagation|creationTime|2016-01-03T16:21:06Z +https://jersey.java.net/documentation/latest/index.html|creationDate|2014-09-16 +https://jersey.java.net/documentation/latest/index.html|tag|http://www.semanlink.net/tag/jersey +https://jersey.java.net/documentation/latest/index.html|title|Jersey User Guide +https://jersey.java.net/documentation/latest/index.html|creationTime|2014-09-16T15:43:56Z +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|creationDate|2014-04-16 +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|tag|http://www.semanlink.net/tag/seattle +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|tag|http://www.semanlink.net/tag/rideshare +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|tag|http://www.semanlink.net/tag/internet +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|tag|http://www.semanlink.net/tag/taxi +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|title|Nick Grossman's Slow Hunch — Should we regulate the Internet the real world way or the real world the Internet way? +http://nickgrossman.is/post/76566568384/should-we-regulate-the-internet-the-real-world-way-or|creationTime|2014-04-16T01:08:36Z +http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/|creationDate|2013-01-30 +http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/|tag|http://www.semanlink.net/tag/google_rich_snippets +http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/|tag|http://www.semanlink.net/tag/rdfa +http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/|title|RDFa format data: what is its impact and when should you implement? +http://www.blueclawsearch.co.uk/blog/rdfa-format-data-what-is-its-impact-and-when-should-you-implement/|creationTime|2013-01-30T03:29:51Z +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|creationDate|2012-05-06 +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|tag|http://www.semanlink.net/tag/presidentielles_2012 +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|tag|http://www.semanlink.net/tag/michel_serres +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|comment|Tout ce que nous avons aujourd'hui de solide, ce sont les rêveurs qui l'ont fait ! +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|title|"Michel Serres : ""Cette campagne présidentielle est une campagne de vieux pépés !""" +http://www.lemonde.fr/idees/article/2012/04/12/michel-serres-cette-campagne-presidentielle-est-une-campagne-de-vieux-pepes_1684573_3232.html|creationTime|2012-05-06T09:31:24Z +http://youtube.com/ucberkeley|creationDate|2007-10-04 +http://youtube.com/ucberkeley|tag|http://www.semanlink.net/tag/berkeley +http://youtube.com/ucberkeley|tag|http://www.semanlink.net/tag/youtube +http://youtube.com/ucberkeley|tag|http://www.semanlink.net/tag/online_course_materials +http://youtube.com/ucberkeley|title|UC Berkeley to post full lectures to YouTube +http://youtube.com/ucberkeley|creationTime|2007-10-04T21:30:00Z +http://hypermedia.research.glam.ac.uk/kos/STAR/|creationDate|2009-04-20 +http://hypermedia.research.glam.ac.uk/kos/STAR/|tag|http://www.semanlink.net/tag/semantic_web +http://hypermedia.research.glam.ac.uk/kos/STAR/|tag|http://www.semanlink.net/tag/archeologie +http://hypermedia.research.glam.ac.uk/kos/STAR/|tag|http://www.semanlink.net/tag/skos +http://hypermedia.research.glam.ac.uk/kos/STAR/|title|Semantic Technologies for Archaeological Resources +http://hypermedia.research.glam.ac.uk/kos/STAR/|creationTime|2009-04-20T23:55:34Z +http://scratch.mit.edu/|creationDate|2008-05-13 +http://scratch.mit.edu/|tag|http://www.semanlink.net/tag/enfants +http://scratch.mit.edu/|tag|http://www.semanlink.net/tag/programming_language +http://scratch.mit.edu/|title|Scratch Home imagine, program, share +http://scratch.mit.edu/|creationTime|2008-05-13T00:19:23Z +http://www.interleaves.org/~rteeter/rss.html|creationDate|2005-09-01 +http://www.interleaves.org/~rteeter/rss.html|tag|http://www.semanlink.net/tag/rss +http://www.interleaves.org/~rteeter/rss.html|title|RSS: What it is, Where to get it, How to make it, How to use it +http://www.les-ernest.fr/lionel_zinsou|creationDate|2011-08-28 +http://www.les-ernest.fr/lionel_zinsou|tag|http://www.semanlink.net/tag/afrique +http://www.les-ernest.fr/lionel_zinsou|tag|http://www.semanlink.net/tag/normale_sup +http://www.les-ernest.fr/lionel_zinsou|title|Une vision optimiste de l'Afrique Les Ernest +http://www.les-ernest.fr/lionel_zinsou|creationTime|2011-08-28T01:41:17Z +http://esw.w3.org/mt/esw/archives/000048.html|creationDate|2008-01-23 +http://esw.w3.org/mt/esw/archives/000048.html|tag|http://www.semanlink.net/tag/validation +http://esw.w3.org/mt/esw/archives/000048.html|tag|http://www.semanlink.net/tag/faq +http://esw.w3.org/mt/esw/archives/000048.html|title|SWAD-Europe Weblog: FAQ: Using RDFS or OWL as a schema language for validating RDF +http://esw.w3.org/mt/esw/archives/000048.html|creationTime|2008-01-23T23:09:02Z +http://gears.google.com/|creationDate|2008-09-02 +http://gears.google.com/|tag|http://www.semanlink.net/tag/google +http://gears.google.com/|title|Gears +http://gears.google.com/|creationTime|2008-09-02T13:29:45Z +https://medium.com/data-from-the-trenches/training-cutting-edge-neural-networks-with-tensor2tensor-and-10-lines-of-code-10973c030b8|creationDate|2019-01-21 +https://medium.com/data-from-the-trenches/training-cutting-edge-neural-networks-with-tensor2tensor-and-10-lines-of-code-10973c030b8|tag|http://www.semanlink.net/tag/tensor2tensor +https://medium.com/data-from-the-trenches/training-cutting-edge-neural-networks-with-tensor2tensor-and-10-lines-of-code-10973c030b8|title|Training Cutting-Edge Neural Networks with Tensor2Tensor and 10 lines of code +https://medium.com/data-from-the-trenches/training-cutting-edge-neural-networks-with-tensor2tensor-and-10-lines-of-code-10973c030b8|creationTime|2019-01-21T10:58:18Z +http://www.arduino.cc/|creationDate|2013-04-19 +http://www.arduino.cc/|tag|http://www.semanlink.net/tag/arduino +http://www.arduino.cc/|comment|Open-source electronics prototyping platform based on flexible, easy-to-use hardware and software. +http://www.arduino.cc/|title|Arduino - HomePage +http://www.arduino.cc/|creationTime|2013-04-19T14:16:36Z +http://www.wired.com/wiredscience/2014/01/bells-theorem/|creationDate|2014-01-21 +http://www.wired.com/wiredscience/2014/01/bells-theorem/|tag|http://www.semanlink.net/tag/paradoxe_einstein_podolsky_rosen +http://www.wired.com/wiredscience/2014/01/bells-theorem/|title|The Experiment That Forever Changed How We Think About Reality - Wired Science +http://www.wired.com/wiredscience/2014/01/bells-theorem/|creationTime|2014-01-21T23:31:44Z +http://www.nltk.org/book/|creationDate|2017-06-26 +http://www.nltk.org/book/|tag|http://www.semanlink.net/tag/nltk +http://www.nltk.org/book/|title|NLTK Book +http://www.nltk.org/book/|creationTime|2017-06-26T18:32:53Z +http://www.vivevenezuela.com/foto_gran_sabana.htm|creationDate|2005-11-10 +http://www.vivevenezuela.com/foto_gran_sabana.htm|tag|http://www.semanlink.net/tag/tepuys +http://www.vivevenezuela.com/foto_gran_sabana.htm|title|Venezuela foto Tepuy della Gran Sabana +http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf|creationDate|2017-07-26 +http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf|tag|http://www.semanlink.net/tag/slides +http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf|title|Online Generation of Locality Sensitive Hash Signatures +http://www.cs.jhu.edu/%7Evandurme/papers/VanDurmeLallACL10-slides.pdf|creationTime|2017-07-26T13:45:31Z +http://www.nytimes.com/2008/03/18/us/politics/18text-obama.html|creationDate|2008-11-06 +http://www.nytimes.com/2008/03/18/us/politics/18text-obama.html|tag|http://www.semanlink.net/tag/obama +http://www.nytimes.com/2008/03/18/us/politics/18text-obama.html|title|Barack Obama’s Speech on Race +http://www.nytimes.com/2008/03/18/us/politics/18text-obama.html|creationTime|2008-11-06T21:26:20Z +http://machinelearning.wustl.edu/mlpapers/paper_files/LodhiSSCW02.pdf|creationDate|2014-04-08 +http://machinelearning.wustl.edu/mlpapers/paper_files/LodhiSSCW02.pdf|tag|http://www.semanlink.net/tag/nlp_text_classification +http://machinelearning.wustl.edu/mlpapers/paper_files/LodhiSSCW02.pdf|title|Text classification using string kernels +http://machinelearning.wustl.edu/mlpapers/paper_files/LodhiSSCW02.pdf|creationTime|2014-04-08T18:46:34Z +http://seevl.net/|creationDate|2011-05-15 +http://seevl.net/|tag|http://www.semanlink.net/tag/seevl +http://seevl.net/|comment|Seevl reinvents music discovery. We provide new ways to explore the cultural and musical universe of your favorite artists and to discover new ones by understanding how they are connected. +http://seevl.net/|title|reinventing music discovery - seevl.net +http://seevl.net/|creationTime|2011-05-15T14:36:32Z +http://jersey.java.net/nonav/documentation/latest/user-guide.html|creationDate|2011-07-29 +http://jersey.java.net/nonav/documentation/latest/user-guide.html|tag|http://www.semanlink.net/tag/jersey +http://jersey.java.net/nonav/documentation/latest/user-guide.html|title|Jersey User Guide +http://jersey.java.net/nonav/documentation/latest/user-guide.html|creationTime|2011-07-29T00:10:32Z +http://weblog.burningbird.net/archives/2005/08/27/photos-flickr-and-back-doors/|creationDate|2005-08-29 +http://weblog.burningbird.net/archives/2005/08/27/photos-flickr-and-back-doors/|tag|http://www.semanlink.net/tag/flickr +http://weblog.burningbird.net/archives/2005/08/27/photos-flickr-and-back-doors/|title|Burningbird » Photos, Flickr, and Back Doors +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3244,50-709190,0.html|creationDate|2005-11-15 +http://web.resource.org/rss/1.0/|creationDate|2005-05-02 +http://web.resource.org/rss/1.0/|tag|http://www.semanlink.net/tag/rss_dev +http://web.resource.org/rss/1.0/|tag|http://www.semanlink.net/tag/spec +http://web.resource.org/rss/1.0/|tag|http://www.semanlink.net/tag/links +http://web.resource.org/rss/1.0/|tag|http://www.semanlink.net/tag/rss +http://web.resource.org/rss/1.0/|tag|http://www.semanlink.net/tag/tutorial +http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213|creationDate|2012-07-23 +http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213|tag|http://www.semanlink.net/tag/graph_visualization +http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213|tag|http://www.semanlink.net/tag/linkedin +http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213|title|LinkedIn Labs InMaps - François-Paul Servant's professional network +http://inmaps.linkedinlabs.com/share/Fran%C3%A7ois-Paul_Servant/11288967125555947972534535528313432213|creationTime|2012-07-23T08:38:44Z +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|creationDate|2014-04-23 +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|tag|http://www.semanlink.net/tag/skos +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|tag|http://www.semanlink.net/tag/nlp_text_classification +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|tag|http://www.semanlink.net/tag/topic_modeling +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|tag|http://www.semanlink.net/tag/stack_overflow +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|title|nltk - hierarchical classification + topic model training data for internet articles and social media - Stack Overflow +http://stackoverflow.com/questions/19799560/hierarchical-classification-topic-model-training-data-for-internet-articles-an|creationTime|2014-04-23T22:03:44Z +http://tools.ietf.org/html/rfc6570|creationDate|2014-09-08 +http://tools.ietf.org/html/rfc6570|tag|http://www.semanlink.net/tag/uri_template +http://tools.ietf.org/html/rfc6570|title|RFC 6570 - URI Template +http://tools.ietf.org/html/rfc6570|creationTime|2014-09-08T13:45:16Z +http://www.myspace.com/crianca|creationDate|2006-08-21 +http://www.myspace.com/crianca|tag|http://www.semanlink.net/tag/punk +http://www.myspace.com/crianca|tag|http://www.semanlink.net/tag/robert +http://www.myspace.com/crianca|title|www.myspace.com/crianca +https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html|creationDate|2017-12-16 +https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html|tag|http://www.semanlink.net/tag/geologie +https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html|tag|http://www.semanlink.net/tag/pierres_precieuses +https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html|title|Precious Gems Bear Messages From Earth’s Molten Heart - The New York Times +https://www.nytimes.com/2017/12/11/science/gemstones-diamonds-sapphires-rubies.html|creationTime|2017-12-16T18:06:53Z +http://mspace.ecs.soton.ac.uk|creationDate|2005-02-10 +http://mspace.ecs.soton.ac.uk|tag|http://www.semanlink.net/tag/mspace +http://www.sofakolle.com|creationDate|2005-08-24 +http://www.sofakolle.com|tag|http://www.semanlink.net/tag/john_sofakolle +http://www.sofakolle.com|title|www.sofakolle.com +http://droppages.com/|creationDate|2011-03-26 +http://droppages.com/|tag|http://www.semanlink.net/tag/cms +http://droppages.com/|title|DropPages.com +http://droppages.com/|creationTime|2011-03-26T16:07:36Z +http://sameas.org/|creationDate|2010-03-10 +http://sameas.org/|tag|http://www.semanlink.net/tag/linked_data_service +http://sameas.org/|tag|http://www.semanlink.net/tag/hugh_glaser +http://sameas.org/|comment|The Web of Data has many equivalent URIs. This service helps you to find co-references between different data sets. +http://sameas.org/|title|sameas.org +http://sameas.org/|creationTime|2010-03-10T21:18:02Z +http://www.nytimes.com/2005/11/26/international/asia/26china.html?ex=1290661200&en=8031ed40f8a9bad5&ei=5088&partner=rssnyt&emc=rss|creationDate|2005-11-27 +http://www.palais-decouverte.fr/index.php?id=858|creationDate|2009-05-03 +http://www.palais-decouverte.fr/index.php?id=858|tag|http://www.semanlink.net/tag/pythagore +http://www.palais-decouverte.fr/index.php?id=858|title|Théorème de Pythagore, 6 démonstrations +http://www.palais-decouverte.fr/index.php?id=858|creationTime|2009-05-03T13:20:19Z +http://www.gnowsis.org|creationDate|2005-04-22 +http://www.gnowsis.org|tag|http://www.semanlink.net/tag/semantic_desktop +http://www.gnowsis.org|tag|http://www.semanlink.net/tag/semanlink_related +http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html|creationDate|2018-02-11 +http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html|tag|http://www.semanlink.net/tag/chine_afrique +http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html|tag|http://www.semanlink.net/tag/djibouti +http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html|title|A Djibouti, « la Chine commence à déchanter » +http://www.lemonde.fr/afrique/article/2018/02/05/a-djibouti-la-chine-commence-a-dechanter_5252153_3212.html|creationTime|2018-02-11T10:41:17Z +http://code.google.com/p/owl1-1/wiki/P2NotesOwled2007|creationDate|2007-06-29 +http://code.google.com/p/owl1-1/wiki/P2NotesOwled2007|tag|http://www.semanlink.net/tag/owled_2007_and_fps +http://code.google.com/p/owl1-1/wiki/P2NotesOwled2007|title|P2NotesOwled2007 - owl1-1 - Google Code +http://code.google.com/p/owl1-1/wiki/P2NotesOwled2007|creationTime|2007-06-29T20:09:50Z +http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html|creationDate|2012-09-18 +http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html|tag|http://www.semanlink.net/tag/jquery +http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html|tag|http://www.semanlink.net/tag/browser_back_button +http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html|title|jQuery BBQ: Back Button & Query Library +http://benalman.com/code/projects/jquery-bbq/docs/files/jquery-ba-bbq-js.html|creationTime|2012-09-18T17:36:51Z +http://bricolage.io/hosting-static-sites-with-docker-and-nginx/|creationDate|2016-05-22 +http://bricolage.io/hosting-static-sites-with-docker-and-nginx/|tag|http://www.semanlink.net/tag/nginx +http://bricolage.io/hosting-static-sites-with-docker-and-nginx/|tag|http://www.semanlink.net/tag/docker +http://bricolage.io/hosting-static-sites-with-docker-and-nginx/|title|Hosting static sites with Docker and Nginx Kyle Mathews +http://bricolage.io/hosting-static-sites-with-docker-and-nginx/|creationTime|2016-05-22T14:10:10Z +https://medium.com/an-american-developer-in-paris/a-year-in-paris-a235519a486b#.z4vgwhfkn|creationDate|2016-05-07 +https://medium.com/an-american-developer-in-paris/a-year-in-paris-a235519a486b#.z4vgwhfkn|tag|http://www.semanlink.net/tag/paris +https://medium.com/an-american-developer-in-paris/a-year-in-paris-a235519a486b#.z4vgwhfkn|title|A Year in Paris — An American Developer in Paris — Medium +https://medium.com/an-american-developer-in-paris/a-year-in-paris-a235519a486b#.z4vgwhfkn|creationTime|2016-05-07T00:37:07Z +http://edutechwiki.unige.ch/en/Latent_semantic_analysis_and_indexing|creationDate|2017-05-26 +http://edutechwiki.unige.ch/en/Latent_semantic_analysis_and_indexing|tag|http://www.semanlink.net/tag/latent_semantic_analysis +http://edutechwiki.unige.ch/en/Latent_semantic_analysis_and_indexing|title|Latent semantic analysis and indexing - EduTech Wiki +http://edutechwiki.unige.ch/en/Latent_semantic_analysis_and_indexing|creationTime|2017-05-26T01:26:35Z +http://www.azurs.net/mercredi/|creationDate|2005-04-15 +http://www.azurs.net/mercredi/|tag|http://www.semanlink.net/tag/blog +http://eprints.ecs.soton.ac.uk/10359/01/GDP04-mspaceReport.pdf|creationDate|2005-11-16 +http://eprints.ecs.soton.ac.uk/10359/01/GDP04-mspaceReport.pdf|tag|http://www.semanlink.net/tag/mspace +http://www.summly.com/en/introduction.html|creationDate|2011-12-28 +http://www.summly.com/en/introduction.html|tag|http://www.semanlink.net/tag/summly +http://www.summly.com/en/introduction.html|title|summly +http://www.summly.com/en/introduction.html|creationTime|2011-12-28T13:14:06Z +http://schemapedia.com/schemas/coo|creationDate|2011-03-24 +http://schemapedia.com/schemas/coo|tag|http://www.semanlink.net/tag/car_options_ontology +http://schemapedia.com/schemas/coo|title|Car Options Ontology at Schemapedia +http://schemapedia.com/schemas/coo|creationTime|2011-03-24T16:34:39Z +http://www.w3.org/wiki/WebID|creationDate|2011-03-02 +http://www.w3.org/wiki/WebID|tag|http://www.semanlink.net/tag/webid +http://www.w3.org/wiki/WebID|title|WebID - W3C Wiki +http://www.w3.org/wiki/WebID|creationTime|2011-03-02T12:58:58Z +http://www-128.ibm.com/developerworks/java/library/j-threads2.html|creationDate|2005-10-29 +http://www-128.ibm.com/developerworks/java/library/j-threads2.html|tag|http://www.semanlink.net/tag/java_concurrency +http://www-128.ibm.com/developerworks/java/library/j-threads2.html|title|Threading lightly, Part 2: Reducing contention +http://www.manageability.org/blog/stuff/soap-is-dead|creationDate|2005-10-13 +http://www.manageability.org/blog/stuff/soap-is-dead|tag|http://www.semanlink.net/tag/soap +http://www.manageability.org/blog/stuff/soap-is-dead|title|SOAP is Comatose But Not Officially Dead! +http://www.rap.prd.fr/pdf/technologie_streaming.pdf|creationDate|2014-11-17 +http://www.rap.prd.fr/pdf/technologie_streaming.pdf|title|Quelques mots sur la technologie de streaming +http://www.rap.prd.fr/pdf/technologie_streaming.pdf|creationTime|2014-11-17T20:53:58Z +http://www.forbes.com/sites/danielnyegriffiths/2013/01/12/aaron-swartz-uncompromising-reckless-and-delightful/|creationDate|2013-01-13 +http://www.forbes.com/sites/danielnyegriffiths/2013/01/12/aaron-swartz-uncompromising-reckless-and-delightful/|tag|http://www.semanlink.net/tag/aaron_swartz +http://www.forbes.com/sites/danielnyegriffiths/2013/01/12/aaron-swartz-uncompromising-reckless-and-delightful/|title|Open Data Activist Aaron Swartz Dies Aged 26: 'Uncompromising, Reckless and Delightful' - Forbes +http://www.forbes.com/sites/danielnyegriffiths/2013/01/12/aaron-swartz-uncompromising-reckless-and-delightful/|creationTime|2013-01-13T14:36:44Z +http://www.hydra-cg.com/spec/latest/linked-data-fragments/|creationDate|2014-10-29 +http://www.hydra-cg.com/spec/latest/linked-data-fragments/|tag|http://www.semanlink.net/tag/linked_data_fragments +http://www.hydra-cg.com/spec/latest/linked-data-fragments/|title|Linked Data Fragments +http://www.hydra-cg.com/spec/latest/linked-data-fragments/|creationTime|2014-10-29T01:04:40Z +http://uk.businessinsider.com/dao-hacked-ethereum-crashing-in-value-tens-of-millions-allegedly-stolen-2016-6|creationDate|2016-06-20 +http://uk.businessinsider.com/dao-hacked-ethereum-crashing-in-value-tens-of-millions-allegedly-stolen-2016-6|tag|http://www.semanlink.net/tag/dao_attack +http://uk.businessinsider.com/dao-hacked-ethereum-crashing-in-value-tens-of-millions-allegedly-stolen-2016-6|title|DAO hacked, Ethereum crashing in value - Business Insider +http://uk.businessinsider.com/dao-hacked-ethereum-crashing-in-value-tens-of-millions-allegedly-stolen-2016-6|creationTime|2016-06-20T10:18:34Z +http://www.slate.com/blogs/the_spot/2016/06/01/how_to_fix_the_penalty_shootout_play_it_before_extra_time.html|creationDate|2017-03-11 +http://www.slate.com/blogs/the_spot/2016/06/01/how_to_fix_the_penalty_shootout_play_it_before_extra_time.html|tag|http://www.semanlink.net/tag/football +http://www.slate.com/blogs/the_spot/2016/06/01/how_to_fix_the_penalty_shootout_play_it_before_extra_time.html|title|How to fix the penalty shootout: play it before extra time. +http://www.slate.com/blogs/the_spot/2016/06/01/how_to_fix_the_penalty_shootout_play_it_before_extra_time.html|creationTime|2017-03-11T01:18:37Z +https://fr.wikipedia.org/wiki/Breakthrough_Starshot|creationDate|2016-08-25 +https://fr.wikipedia.org/wiki/Breakthrough_Starshot|tag|http://www.semanlink.net/tag/conquete_spatiale +https://fr.wikipedia.org/wiki/Breakthrough_Starshot|title|Breakthrough Starshot +https://fr.wikipedia.org/wiki/Breakthrough_Starshot|creationTime|2016-08-25T00:44:46Z +http://www.memo.fr|creationDate|2005-04-17 +http://www.memo.fr|tag|http://www.semanlink.net/tag/histoire +http://www.nextapp.com/products/echo2/|creationDate|2005-09-27 +http://www.nextapp.com/products/echo2/|tag|http://www.semanlink.net/tag/ajax +http://www.nextapp.com/products/echo2/|title|NextApp . Echo2 +http://jena.sourceforge.net/ARQ/documentation.html|creationDate|2008-08-11 +http://jena.sourceforge.net/ARQ/documentation.html|tag|http://www.semanlink.net/tag/arq +http://jena.sourceforge.net/ARQ/documentation.html|title|ARQ - Documentation and Resources +http://jena.sourceforge.net/ARQ/documentation.html|creationTime|2008-08-11T14:20:02Z +http://www.pbs.org/cringely/pulpit/pulpit20060105.html|creationDate|2006-01-06 +http://www.pbs.org/cringely/pulpit/pulpit20060105.html|tag|http://www.semanlink.net/tag/google +http://www.pbs.org/cringely/pulpit/pulpit20060105.html|tag|http://www.semanlink.net/tag/tv_advertising +http://www.pbs.org/cringely/pulpit/pulpit20060105.html|title|PBS I, Cringely . January 5, 2006 - A Commercial Runs Through It +http://mobile.nytimes.com/2013/07/09/science/what-is-nostalgia-good-for-quite-a-bit-research-shows.html?pagewanted=all&_r=0|creationDate|2014-07-21 +http://mobile.nytimes.com/2013/07/09/science/what-is-nostalgia-good-for-quite-a-bit-research-shows.html?pagewanted=all&_r=0|tag|http://www.semanlink.net/tag/saudade +http://mobile.nytimes.com/2013/07/09/science/what-is-nostalgia-good-for-quite-a-bit-research-shows.html?pagewanted=all&_r=0|title|What Is Nostalgia Good For? Quite a Bit, Research Shows - NYTimes.com +http://mobile.nytimes.com/2013/07/09/science/what-is-nostalgia-good-for-quite-a-bit-research-shows.html?pagewanted=all&_r=0|creationTime|2014-07-21T22:16:49Z +http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm|creationDate|2009-07-10 +http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm|tag|http://www.semanlink.net/tag/fourmi +http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm|title|BBC - Earth News - Ant mega-colony takes over world +http://news.bbc.co.uk/earth/hi/earth_news/newsid_8127000/8127519.stm|creationTime|2009-07-10T15:16:36Z +http://developer.apple.com/internet/webcontent/objectdetection.html|creationDate|2006-01-20 +http://developer.apple.com/internet/webcontent/objectdetection.html|tag|http://www.semanlink.net/tag/javascript +http://developer.apple.com/internet/webcontent/objectdetection.html|title|Object Detection +http://autourduciel.blog.lemonde.fr/2014/07/17/regardez-la-rotation-dun-noyau-cometaire-double/|creationDate|2014-07-18 +http://autourduciel.blog.lemonde.fr/2014/07/17/regardez-la-rotation-dun-noyau-cometaire-double/|tag|http://www.semanlink.net/tag/rosetta +http://autourduciel.blog.lemonde.fr/2014/07/17/regardez-la-rotation-dun-noyau-cometaire-double/|title|Regardez la rotation d’un noyau cométaire double Autour du Ciel +http://autourduciel.blog.lemonde.fr/2014/07/17/regardez-la-rotation-dun-noyau-cometaire-double/|creationTime|2014-07-18T22:14:01Z +https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/|creationDate|2017-12-07 +https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/|tag|http://www.semanlink.net/tag/industrie_nucleaire +https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/|tag|http://www.semanlink.net/tag/greenpeace +https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/|title|Les saboteurs de la centrale de Doel toujours à l’intérieur ? - Greenpeace France +https://www.greenpeace.fr/saboteurs-de-centrale-de-doel-toujours-a-linterieur%E2%80%89/|creationTime|2017-12-07T00:39:19Z +http://www.betaversion.org/~stefano/|creationDate|2005-10-28 +http://www.betaversion.org/~stefano/|tag|http://www.semanlink.net/tag/stefano_mazzocchi +http://www.betaversion.org/~stefano/|title|Stefano Mazzocchi's Home Page +http://www.ledevoir.com/2003/06/30/30862.html|creationDate|2006-02-17 +http://www.ledevoir.com/2003/06/30/30862.html|tag|http://www.semanlink.net/tag/cathares +http://www.ledevoir.com/2003/06/30/30862.html|tag|http://www.semanlink.net/tag/croisade_des_albigeois +http://www.ledevoir.com/2003/06/30/30862.html|tag|http://www.semanlink.net/tag/citation +http://www.ledevoir.com/2003/06/30/30862.html|comment|" Béziers, juillet 1209. Les croisés pénètrent dans la ville et demandent à Arnaud Amaury, légat du pape, comment distinguer les hérétiques des catholiques : «Tuez-les tous, Dieu reconnaîtra les siens.» Difficile de dire si l'anecdote est vraie, mais Arnaud Amaury sut se montrer digne d'une telle déclaration lorsqu'il écrivit au pape Innocent III : «Les nôtres, n'épargnant ni le sang, ni le sexe, ni l'âge, ont fait périr par l'épée environ 20 000 personnes et, après un énorme massacre des ennemis, toute la cité a été pillée et brûlée. La vengeance divine a fait merveille.»
La doctrine cathare, contrairement au catholicisme selon lequel Dieu est le créateur de toutes choses, imputait au diable la création du monde matériel. +" +http://www.ledevoir.com/2003/06/30/30862.html|title|«Tuez-les tous, Dieu reconnaîtra les siens.» - Arnaud Amaury «Tuez-les tous, Dieu reconnaîtra les siens.» +http://www.sciencemag.org/site/extra/crispr/|creationDate|2016-01-03 +http://www.sciencemag.org/site/extra/crispr/|tag|http://www.semanlink.net/tag/crispr_cas9 +http://www.sciencemag.org/site/extra/crispr/|title|The CRISPR Revolution +http://www.sciencemag.org/site/extra/crispr/|creationTime|2016-01-03T17:07:39Z +http://www.ibm.com/developerworks/java/library/j-solr2/|creationDate|2012-05-15 +http://www.ibm.com/developerworks/java/library/j-solr2/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/java/library/j-solr2/|tag|http://www.semanlink.net/tag/solr +http://www.ibm.com/developerworks/java/library/j-solr2/|title|Search smarter with Apache Solr, Part 2: Solr for the enterprise +http://www.ibm.com/developerworks/java/library/j-solr2/|creationTime|2012-05-15T17:00:01Z +http://www-128.ibm.com/developerworks/library/j-sparql/|creationDate|2008-10-29 +http://www-128.ibm.com/developerworks/library/j-sparql/|tag|http://www.semanlink.net/tag/sparql_tips +http://www-128.ibm.com/developerworks/library/j-sparql/|title|SPARQL tips: using the GRAPH keyword +http://www-128.ibm.com/developerworks/library/j-sparql/|creationTime|2008-10-29T18:13:49Z +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|creationDate|2008-03-18 +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|tag|http://www.semanlink.net/tag/xslt +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|tag|http://www.semanlink.net/tag/ajax +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|title|XML processing in Ajax, Part 2: Two Ajax and XSLT approaches +http://www.ibm.com/developerworks/xml/library/x-xmlajaxpt2/|creationTime|2008-03-18T16:21:15Z +http://www.miss-safia.com/|creationDate|2007-08-24 +http://www.miss-safia.com/|tag|http://www.semanlink.net/tag/musique_du_niger +http://www.miss-safia.com/|tag|http://www.semanlink.net/tag/rap +http://www.miss-safia.com/|title|MISS SAFIA - LA VOIX EN OR DU NIGER +http://www.miss-safia.com/|creationTime|2007-08-24T23:11:47Z +http://www.zyvex.com/nanotech/feynman.html|creationDate|2005-08-17 +http://www.zyvex.com/nanotech/feynman.html|tag|http://www.semanlink.net/tag/feynman +http://www.zyvex.com/nanotech/feynman.html|tag|http://www.semanlink.net/tag/nanotechnologies +http://www.zyvex.com/nanotech/feynman.html|title|Feynman: there's plenty of room at the bottom +https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/|creationDate|2018-02-23 +https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/|tag|http://www.semanlink.net/tag/cryptocurrency +https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/|tag|http://www.semanlink.net/tag/bitcoin +https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/|title|I, Cringely Prediction #4 — Bitcoin stays crazy until traders learn it is not a currency +https://www.cringely.com/2018/01/24/prediction-4-bitcoin-crashes-booms-crashes-booms-2018-traders-figure-not-currency/|creationTime|2018-02-23T22:52:21Z +http://comments.gmane.org/gmane.comp.apache.jena.user/5473|creationDate|2014-11-18 +http://comments.gmane.org/gmane.comp.apache.jena.user/5473|tag|http://www.semanlink.net/tag/json_ld +http://comments.gmane.org/gmane.comp.apache.jena.user/5473|tag|http://www.semanlink.net/tag/jena +http://comments.gmane.org/gmane.comp.apache.jena.user/5473|title|controlling JSON-LD output, Apache Jena main user support list +http://comments.gmane.org/gmane.comp.apache.jena.user/5473|creationTime|2014-11-18T21:56:50Z +http://www.googletutor.com/2005/04/15/voyeur-heaven/|creationDate|2005-04-27 +http://www.googletutor.com/2005/04/15/voyeur-heaven/|tag|http://www.semanlink.net/tag/google +http://www.googletutor.com/2005/04/15/voyeur-heaven/|tag|http://www.semanlink.net/tag/rigolo +http://www.googletutor.com/2005/04/15/voyeur-heaven/|title|Voyeur Heaven: finding interesting video, sound and image files in unprotected directories +http://www.ivy.fr/revealicious/|creationDate|2005-09-21 +http://www.ivy.fr/revealicious/|tag|http://www.semanlink.net/tag/tag_clusters +http://webcomponents.org/|creationDate|2014-11-23 +http://webcomponents.org/|tag|http://www.semanlink.net/tag/webcomponents +http://webcomponents.org/|title|WebComponents.org +http://webcomponents.org/|creationTime|2014-11-23T12:41:39Z +http://www.xml.com/pub/a/ws/2004/05/26/binding.html|creationDate|2005-10-14 +http://www.xml.com/pub/a/ws/2004/05/26/binding.html|tag|http://www.semanlink.net/tag/xml_schema +http://www.xml.com/pub/a/ws/2004/05/26/binding.html|tag|http://www.semanlink.net/tag/web_services +http://www.xml.com/pub/a/ws/2004/05/26/binding.html|title|webservices.xml.com: Schema Binding for Java Web Services +http://internetalchemy.org/2005/05/tinky-and-skos|creationDate|2005-07-04 +http://internetalchemy.org/2005/05/tinky-and-skos|tag|http://www.semanlink.net/tag/skos +http://internetalchemy.org/2005/05/tinky-and-skos|title|Internet Alchemy Tinky and SKOS +https://www.braincreators.com/2018/06/memory-networks/|creationDate|2018-11-14 +https://www.braincreators.com/2018/06/memory-networks/|tag|http://www.semanlink.net/tag/memory_networks +https://www.braincreators.com/2018/06/memory-networks/|title|Memory networks and why they're a good Idea - BrainCreators +https://www.braincreators.com/2018/06/memory-networks/|creationTime|2018-11-14T01:29:28Z +http://www.ibm.com/developerworks/java/library/j-jwebunit/|creationDate|2008-11-18 +http://www.ibm.com/developerworks/java/library/j-jwebunit/|tag|http://www.semanlink.net/tag/httpunit +http://www.ibm.com/developerworks/java/library/j-jwebunit/|tag|http://www.semanlink.net/tag/ibm_developerworks +http://www.ibm.com/developerworks/java/library/j-jwebunit/|title|Create test cases for Web applications +http://www.ibm.com/developerworks/java/library/j-jwebunit/|creationTime|2008-11-18T11:34:32Z +http://www.tecmint.com/open-source-artificial-intelligence-tools-softwares-linux/|creationDate|2017-01-04 +http://www.tecmint.com/open-source-artificial-intelligence-tools-softwares-linux/|tag|http://www.semanlink.net/tag/machine_learning_tool +http://www.tecmint.com/open-source-artificial-intelligence-tools-softwares-linux/|title|10 Top Open Source Artificial Intelligence Tools for Linux +http://www.tecmint.com/open-source-artificial-intelligence-tools-softwares-linux/|creationTime|2017-01-04T14:32:37Z +http://erikbenson.typepad.com/mu/2005/02/using_bloglines.html|creationDate|2005-04-06 +http://erikbenson.typepad.com/mu/2005/02/using_bloglines.html|tag|http://www.semanlink.net/tag/blog +http://erikbenson.typepad.com/mu/2005/02/using_bloglines.html|title|Using Bloglines to manage my online presence +http://www.emusic.com|creationDate|2006-09-20 +http://www.emusic.com|tag|http://www.semanlink.net/tag/musique_en_ligne +http://economist.com/science/PrinterFriendly.cfm?Story_ID=4316021|creationDate|2005-09-01 +http://economist.com/science/PrinterFriendly.cfm?Story_ID=4316021|title|The self-driving car comes closer—but difficulties remain +http://www.theregister.co.uk/2005/05/11/open_access_research/print.html|creationDate|2005-05-12 +http://www.theregister.co.uk/2005/05/11/open_access_research/print.html|tag|http://www.semanlink.net/tag/recherche +http://www.theregister.co.uk/2005/05/11/open_access_research/print.html|title|Dutch academics declare research free-for-all - The Register +http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm|creationDate|2005-09-23 +http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm|tag|http://www.semanlink.net/tag/roy_t_fielding +http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm|tag|http://www.semanlink.net/tag/rest +http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm|comment|"DISSERTATION submitted in partial satisfaction of the requirements for the degree of DOCTOR OF PHILOSOPHY in Information and Computer Science +" +http://www.ics.uci.edu/~fielding/pubs/dissertation/top.htm|title|Architectural Styles and the Design of Network-based Software Architectures - Roy Thomas Fielding +http://www.gchagnon.fr/cours/dhtml/evenements.html|creationDate|2007-12-14 +http://www.gchagnon.fr/cours/dhtml/evenements.html|tag|http://www.semanlink.net/tag/javascript_dom +http://www.gchagnon.fr/cours/dhtml/evenements.html|title|Gestion des événements et DOM +http://www.gchagnon.fr/cours/dhtml/evenements.html|creationTime|2007-12-14T11:24:06Z +http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/|creationDate|2010-10-14 +http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/|tag|http://www.semanlink.net/tag/finance +http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/|tag|http://www.semanlink.net/tag/placements_ethiques +http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/|title|La finance peut-elle sauver la planète ? - Eco(lo) - Blog LeMonde.fr +http://ecologie.blog.lemonde.fr/2010/10/13/la-finance-peut-elle-sauver-la-planete/|creationTime|2010-10-14T18:45:06Z +https://github.com/datquocnguyen/LFTM|creationDate|2017-05-22 +https://github.com/datquocnguyen/LFTM|tag|http://www.semanlink.net/tag/topic_models_word_embedding +https://github.com/datquocnguyen/LFTM|tag|http://www.semanlink.net/tag/github_project +https://github.com/datquocnguyen/LFTM|title|datquocnguyen/LFTM: Improving Topic Models with Latent Feature Word Representations (GitHub) +https://github.com/datquocnguyen/LFTM|creationTime|2017-05-22T14:53:21Z +https://medium.com/@joeDiHare/deep-bayesian-neural-networks-952763a9537|creationDate|2018-11-05 +https://medium.com/@joeDiHare/deep-bayesian-neural-networks-952763a9537|tag|http://www.semanlink.net/tag/bayesian_deep_learning +https://medium.com/@joeDiHare/deep-bayesian-neural-networks-952763a9537|title|Deep Bayesian Neural Networks. – Stefano Cosentino – Medium +https://medium.com/@joeDiHare/deep-bayesian-neural-networks-952763a9537|creationTime|2018-11-05T10:16:26Z +http://esw.w3.org/topic/LargeTripleStores|creationDate|2005-09-17 +http://esw.w3.org/topic/LargeTripleStores|tag|http://www.semanlink.net/tag/triplestore +http://esw.w3.org/topic/LargeTripleStores|title|LargeTripleStores - ESW Wiki +http://dannyayers.com/archives/2005/11/10/semantic-web-challenge-winners/|creationDate|2005-11-10 +http://dannyayers.com/archives/2005/11/10/semantic-web-challenge-winners/|tag|http://www.semanlink.net/tag/semantic_web_dev +http://dannyayers.com/archives/2005/11/10/semantic-web-challenge-winners/|title|Danny Ayers, Raw Blog : » Semantic Web Challenge Winners +http://www.sitepoint.com/blogs/2008/03/16/twines-dual-personality/|creationDate|2008-03-19 +http://www.sitepoint.com/blogs/2008/03/16/twines-dual-personality/|tag|http://www.semanlink.net/tag/twine +http://www.sitepoint.com/blogs/2008/03/16/twines-dual-personality/|title|SitePoint Blogs » Twine’s dual personality +http://www.sitepoint.com/blogs/2008/03/16/twines-dual-personality/|creationTime|2008-03-19T22:19:49Z +http://www.webforefront.com/archives/2005/05/rest_-_represen.html|creationDate|2005-09-19 +http://www.webforefront.com/archives/2005/05/rest_-_represen.html|tag|http://www.semanlink.net/tag/soap_vs_rest +http://www.webforefront.com/archives/2005/05/rest_-_represen.html|title|Web Forefront: The Web Services debate : SOAP vs. REST [ XML/HTTP ] +http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html|creationDate|2015-03-13 +http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html|tag|http://www.semanlink.net/tag/maven +http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html|comment|"Beware to the proper management of the inclusion of libs with war overlays. I've been pubished with a java.lang.VerifyError because of it. +BTW don't trust eclipse's ""dependency hierarchy"" when using war overlays. And beware to war overlays chains + +" +http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html|title|Mike's Site: Maven's WAR Overlays: How to manage dependencies +http://www.ensor.cc/2011/07/mavens-war-overlays-how-to-manage.html|creationTime|2015-03-13T10:23:26Z +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4836752.stm|creationDate|2006-03-24 +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4836752.stm|tag|http://www.semanlink.net/tag/abel_prize +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4836752.stm|tag|http://www.semanlink.net/tag/fourier +http://newsvote.bbc.co.uk/mpapps/pagetools/print/news.bbc.co.uk/1/hi/sci/tech/4836752.stm|title|BBC NEWS - 2006 mathematics prize announced +http://semanticweb.com/semtechbiz-berlin-day-1_b26503|creationDate|2012-02-08 +http://semanticweb.com/semtechbiz-berlin-day-1_b26503|tag|http://www.semanlink.net/tag/semtechbiz_berlin_2012 +http://semanticweb.com/semtechbiz-berlin-day-1_b26503|title|#SemTechBiz Berlin – Day 1 - semanticweb.com +http://semanticweb.com/semtechbiz-berlin-day-1_b26503|creationTime|2012-02-08T18:11:24Z +http://www.service-public.fr/accueil/attestation_accueil_etranger.html|creationDate|2005-09-14 +http://www.service-public.fr/accueil/attestation_accueil_etranger.html|tag|http://www.semanlink.net/tag/accueil_etranger +http://www.service-public.fr/accueil/attestation_accueil_etranger.html|title|Modification des conditions de délivrance de l'attestation d'accueil (26/11/04) - Service-public.fr - +http://nlp.town/blog/sentence-similarity/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationDate|2018-05-07 +http://nlp.town/blog/sentence-similarity/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|title|Comparing Sentence Similarity Methods +http://nlp.town/blog/sentence-similarity/?utm_campaign=Revue%20newsletter&utm_medium=Newsletter&utm_source=NLP%20News|creationTime|2018-05-07T16:02:14Z +http://blog.dejavu.sk/2013/11/19/registering-resources-and-providers-in-jersey-2/|creationDate|2014-10-09 +http://blog.dejavu.sk/2013/11/19/registering-resources-and-providers-in-jersey-2/|tag|http://www.semanlink.net/tag/jersey +http://blog.dejavu.sk/2013/11/19/registering-resources-and-providers-in-jersey-2/|title|Registering Resources and Providers in Jersey 2 tl;dr +http://blog.dejavu.sk/2013/11/19/registering-resources-and-providers-in-jersey-2/|creationTime|2014-10-09T14:01:50Z +https://www.quora.com/What-are-the-best-open-source-tools-for-unsupervised-clustering-of-text-documents|creationDate|2017-05-22 +https://www.quora.com/What-are-the-best-open-source-tools-for-unsupervised-clustering-of-text-documents|tag|http://www.semanlink.net/tag/clustering_of_text_documents +https://www.quora.com/What-are-the-best-open-source-tools-for-unsupervised-clustering-of-text-documents|title|What are the best open source tools for unsupervised clustering of text documents? - Quora +https://www.quora.com/What-are-the-best-open-source-tools-for-unsupervised-clustering-of-text-documents|creationTime|2017-05-22T12:00:39Z +http://semanticweb.com/web-components-even-better-semantic-markup_b45129|creationDate|2014-11-23 +http://semanticweb.com/web-components-even-better-semantic-markup_b45129|tag|http://www.semanlink.net/tag/semantic_components +http://semanticweb.com/web-components-even-better-semantic-markup_b45129|tag|http://www.semanlink.net/tag/webcomponents +http://semanticweb.com/web-components-even-better-semantic-markup_b45129|title|Web Components: Even Better With Semantic Markup - Semanticweb.com +http://semanticweb.com/web-components-even-better-semantic-markup_b45129|creationTime|2014-11-23T12:44:17Z +http://www.informit.com/articles/article.aspx?p=759232|creationDate|2009-02-24 +http://www.informit.com/articles/article.aspx?p=759232|tag|http://www.semanlink.net/tag/wtp +http://www.informit.com/articles/article.aspx?p=759232|tag|http://www.semanlink.net/tag/eclipse +http://www.informit.com/articles/article.aspx?p=759232|title|InformIT: Organizing Your Eclipse Web Tools Platform Development Project > Web Project Types and J2EE Applications +http://www.informit.com/articles/article.aspx?p=759232|creationTime|2009-02-24T23:45:52Z +http://any23.apache.org/xref/org/apache/any23/extractor/html/TurtleHTMLExtractor.html|creationDate|2013-09-06 +http://any23.apache.org/xref/org/apache/any23/extractor/html/TurtleHTMLExtractor.html|tag|http://www.semanlink.net/tag/turtle_in_html +http://any23.apache.org/xref/org/apache/any23/extractor/html/TurtleHTMLExtractor.html|title|TurtleHTMLExtractor xref +http://any23.apache.org/xref/org/apache/any23/extractor/html/TurtleHTMLExtractor.html|creationTime|2013-09-06T18:48:42Z +http://www.w3.org/2001/tag/issues.html#httpRange-14|creationDate|2007-01-02 +http://www.w3.org/2001/tag/issues.html#httpRange-14|tag|http://www.semanlink.net/tag/httprange_14 +http://www.w3.org/2001/tag/issues.html#httpRange-14|title|TAG Issues List - httpRange-14: What is the range of the HTTP dereference function? +http://internetactu.blog.lemonde.fr/2013/08/30/ou-en-est-lopen-data/|creationDate|2013-08-31 +http://internetactu.blog.lemonde.fr/2013/08/30/ou-en-est-lopen-data/|tag|http://www.semanlink.net/tag/open_data +http://internetactu.blog.lemonde.fr/2013/08/30/ou-en-est-lopen-data/|title|Où en est l’Open Data ? InternetActu +http://internetactu.blog.lemonde.fr/2013/08/30/ou-en-est-lopen-data/|creationTime|2013-08-31T10:32:32Z +http://swirlstats.com/|creationDate|2015-01-14 +http://swirlstats.com/|tag|http://www.semanlink.net/tag/r +http://swirlstats.com/|comment|swirl teaches you R programming and data science interactively, at your own pace, and right in the R console! +http://swirlstats.com/|title|swirl: Learn R, in R. +http://swirlstats.com/|creationTime|2015-01-14T01:09:10Z +http://storynory.com/|creationDate|2006-12-30 +http://storynory.com/|tag|http://www.semanlink.net/tag/enfants +http://storynory.com/|tag|http://www.semanlink.net/tag/ipod +http://storynory.com/|title|Storynory: Free Audio Stories for Kids +http://www.lemonde.fr/web/imprimer_element/0,40-0@2-3230,50-702731,0.html|creationDate|2005-10-24 +http://technorati.com/help/tags.html|creationDate|2006-01-03 +http://ajaxpatterns.org/|creationDate|2005-11-16 diff --git a/ckb/datasets/semanlink/valid.csv b/ckb/datasets/semanlink/valid.csv index f093560..bd6fbaf 100644 --- a/ckb/datasets/semanlink/valid.csv +++ b/ckb/datasets/semanlink/valid.csv @@ -1,803 +1,7112 @@ -Java dev|skos:broader|Dev -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Moshe Vardi -Bob DuCharme|skos:broader|SW guys (and girls) -Cross-lingual Word Embeddings|skos:broader|Word embeddings -Stacked Approximated Regression Machine: A Simple Deep Learning Approach This paper seems too good to be true! They can train a VGG-like net VERY quickly to good accuracy, without backprop. With the agreement of my coauthors, I Zhangyang Wang would like to withdraw the manuscript Stacked Approximated Regression Machine: A Simple Deep Learning Approach. Some experimental procedures were not included in the manuscript, which makes a part of important claims not meaningful. In the relevant research, I was solely responsible for carrying out the experiments; the other coauthors joined in the discussions leading to the main algorithm. Please see the updated text for more details.|sl:arxiv_author|Xia Hu -Sinaï|skos:broader|Egypte -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:arxiv_author|Andrew McCallum -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_author|Francisco Charte -Engelbart|skos:broader|Technical girls and guys -Biodiversité|skos:broader|Biology -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:tag|tag:doc2vec -DBTune|skos:broader|Linking Open Data -Virtuoso|skos:broader|Federated database system -Semantic browsing|skos:broader|Semantic Web -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:tag|tag:arxiv_doc -The Matrix Calculus You Need For Deep Learning Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) This paper is an attempt to explain all the matrix calculus you need in order to understand the training of deep neural networks. We assume no math knowledge beyond what you learned in calculus 1, and provide links to help you refresh the necessary math where needed. Note that you do not need to understand this material before you start learning to train and use deep learning in practice; rather, this material is for those who are already familiar with the basics of neural networks, and wish to deepen their understanding of the underlying math. Don't worry if you get stuck at some point along the way---just go back and reread the previous section, and try writing down and working through some examples. And if you're still stuck, we're happy to answer your questions in the Theory category at forums.fast.ai. Note: There is a reference section at the end of the paper summarizing all the key matrix calculus rules and terminology discussed here. See related articles at http://explained.ai|sl:tag|tag:arxiv_doc -Exploration spatiale|skos:broader|Astronomie -SearchMonkey|skos:broader|Yahoo! -Semanlink|skos:broader|Tagging -Missoula Floods|skos:broader|Périodes glacières -Cathares|skos:broader|Religion -Cross-lingual NLP|skos:broader|NLP tasks / problems -Poutine|skos:broader|Russie -StarSpace|skos:broader|NLP@Facebook -Dietrich Schulten|skos:broader|Technical girls and guys -170, rue de Lourmel|skos:broader|Paris -RDF and social networks|skos:broader|RDF -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Steffen Staab -SW has failed|skos:broader|Semantic Web -Découverte archéologique|skos:broader|Archéologie -RDF browser|skos:broader|Linked Data Browser -Markets are efficient if and only if P = NP Hmm wow I prove that if markets are weak-form efficient, meaning current prices fully reflect all information available in past prices, then P = NP, meaning every computational problem whose solution can be verified in polynomial time can also be solved in polynomial time. I also prove the converse by showing how we can program the market to solve NP-complete problems. Since P probably does not equal NP, markets are probably not efficient. Specifically, markets become increasingly inefficient as the time series lengthens or becomes more frequent. An illustration by way of partitioning the excess returns to momentum strategies based on data availability confirms this prediction.|sl:tag|tag:arxiv_doc -Stanford classifier|skos:broader|Text Classification -Lenka Zdeborová|skos:broader|Statistical physics -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:tag|tag:survey -Banque Centrale Européenne|skos:broader|Politique monétaire -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_author|Pasquale Minervini -NumPy|skos:broader|Python -Google Structured Data Testing Tool|skos:broader|SEO -Moustique|skos:broader|Insecte -Semantic Desktop|skos:broader|Semanlink related -Wikipedia|skos:broader|Knowledge bases -Irrigation|skos:broader|Eau -Lab-grown organs|skos:broader|Bio-Engineering -Automating the search for a patent's prior art with a full text similarity search [github](https://github.com/helmersl/patent_similarity_search) mouais More than ever, technical inventions are the symbol of our society's advance. Patents guarantee their creators protection against infringement. For an invention being patentable, its novelty and inventiveness have to be assessed. Therefore, a search for published work that describes similar inventions to a given patent application needs to be performed. Currently, this so-called search for prior art is executed with semi-automatically composed keyword queries, which is not only time consuming, but also prone to errors. In particular, errors may systematically arise by the fact that different keywords for the same technical concepts may exist across disciplines. In this paper, a novel approach is proposed, where the full text of a given patent application is compared to existing patents using machine learning and natural language processing techniques to automatically detect inventions that are similar to the one described in the submitted document. Various state-of-the-art approaches for feature extraction and document comparison are evaluated. In addition to that, the quality of the current search process is assessed based on ratings of a domain expert. The evaluation results show that our automated approach, besides accelerating the search process, also improves the search results for prior art with respect to their quality.|sl:tag|tag:ip_ir_ml_ia -An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at http://github.com/locuslab/TCN .|sl:arxiv_firstAuthor|Shaojie Bai -Hash Embeddings for Efficient Word Representations A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). recommandé par [Raphaël Sourty](tag:raphaelsty) We present hash embeddings, an efficient method for representing words in a continuous vector form. A hash embedding may be seen as an interpolation between a standard word embedding and a word embedding created using a random hash function (the hashing trick). In hash embeddings each token is represented by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight vector. The final $d$ dimensional representation of the token is the product of the two. Rather than fitting the embedding vectors for each token these are selected by the hashing trick from a shared pool of $B$ embedding vectors. Our experiments show that hash embeddings can easily deal with huge vocabularies consisting of millions of tokens. When using a hash embedding there is no need to create a dictionary before training nor to perform any kind of vocabulary pruning after training. We show that models trained using hash embeddings exhibit at least the same level of performance as models trained using regular embeddings across a wide range of tasks. Furthermore, the number of parameters needed by such an embedding is only a fraction of what is required by a regular embedding. Since standard embeddings and embeddings constructed using the hashing trick are actually just special cases of a hash embedding, hash embeddings can be considered an extension and improvement over the existing regular embedding types.|sl:tag|tag:word_embedding -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Amit Sahu -Télescope|skos:broader|Exploration spatiale -LDAP|skos:broader|Database -CORS|skos:broader|js -Sample code|skos:broader|Dev -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:tag|tag:text_in_kg_embeddings -ADN|skos:broader|Genetics Génétique -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:facebook_fair -Seevl|skos:broader|Musique -Capitalisme financier|skos:broader|Capitalisme -AWS Machine Learning|skos:broader|Machine Learning tool -Multi-hop inference|skos:broader|QA -Insect collapse|skos:broader|Biodiversité : effondrement -Eve africaine|skos:broader|Origines de l'homme -Self-Taught Hashing for Fast Similarity Search Emphasise following issue in Semantic Hashing: obtaining the codes for previously unseen documents. Propose following approach: first find the optimal l-bit binary codes for all documents in the given corpus via unsupervised learning, then train l classifiers via supervised learning to predict the l-bit code for any query document unseen before. (méthode résumée [ici](https://www.semanticscholar.org/paper/Semantic-hashing-using-tags-and-topic-modeling-Wang-Zhang/1a0f660f70fd179003edc271694736baaa39dec4)) The ability of fast similarity search at large scale is of great importance to many Information Retrieval (IR) applications. A promising way to accelerate similarity search is semantic hashing which designs compact binary codes for a large number of documents so that semantically similar documents are mapped to similar codes (within a short Hamming distance). Although some recently proposed techniques are able to generate high-quality codes for documents known in advance, obtaining the codes for previously unseen documents remains to be a very challenging problem. In this paper, we emphasise this issue and propose a novel Self-Taught Hashing (STH) approach to semantic hashing: we first find the optimal $l$-bit binary codes for all documents in the given corpus via unsupervised learning, and then train $l$ classifiers via supervised learning to predict the $l$-bit code for any query document unseen before. Our experiments on three real-world text datasets show that the proposed approach using binarised Laplacian Eigenmap (LapEig) and linear Support Vector Machine (SVM) outperforms state-of-the-art techniques significantly.|sl:arxiv_author|Jun Wang -Google|skos:broader|Search Engines -Configuration ontology|skos:broader|Ontologies -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:tag|tag:acl_2019 -AI@Amazon|skos:broader|AI teams -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_firstAuthor|Zhilin Yang -Niger : agriculture|skos:broader|Agriculture africaine -Greffe (arbre)|skos:broader|Arbres -Suède|skos:broader|Scandinavie -Boube Gado|skos:broader|Archéologie du Niger -Cité des sciences et de l'industrie|skos:broader|Musée -Anzo|skos:broader|Semantic Web Platform -Fondamentalisme islamique|skos:broader|Islamisme -Bolsonaro|skos:broader|Brésil -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:contextualised_word_representations -Snorkel|skos:broader|Training data -US vs Europe|skos:broader|USA -Library (code)|skos:broader|Programming -Kapuscinski|skos:broader|Journaliste -Semantic Overflow|skos:broader|Howto -Python (2 and 3) library for processing textual data. API for diving into common NLP tasks such as part-of-speech tagging, noun phrase extraction, sentiment analysis, classification, translation, and more. Based on NLTK |skos:broader|NLP for Python Provides easy-to-use interfaces to over 50 corpora and lexical resources such as WordNet, along with a suite of text processing libraries for classification, tokenization, stemming, tagging, parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries, and an active discussion forum. Install the data on mac: ``/Applications/Python 3.6/Install Certificates.command``(cf ssl), then ``import nltk ; nltk.download()`` -Cybersex|skos:broader|Sexe -Compressive Transformers for Long-Range Sequence Modelling the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. [Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon) We present the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. We find the Compressive Transformer obtains state-of-the-art language modelling results in the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc respectively. We also find it can model high-frequency speech effectively and can be used as a memory mechanism for RL, demonstrated on an object matching task. To promote the domain of long-range sequence learning, we propose a new open-vocabulary language modelling benchmark derived from books, PG-19.|sl:arxiv_firstAuthor|Jack W. Rae -Serpent|skos:broader|Reptile -Description Logic|skos:broader|Logic -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:arxiv_author|Abraham Ittycheriah -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:arxiv_author|Yihe Dong -Transductive Learning|skos:broader|Missing Labels (ML) -2eme guerre mondiale|skos:broader|Histoire du XXe siècle -Subword Semantic Hashing for Intent Classification on Small Datasets In this paper, we introduce the use of Semantic Hashing as embedding for the task of Intent Classification and achieve state-of-the-art performance on three frequently used benchmarks. Intent Classification on a small dataset is a challenging task for data-hungry state-of-the-art Deep Learning based systems. Semantic Hashing is an attempt to overcome such a challenge and learn robust text classification. Current word embedding based are dependent on vocabularies. One of the major drawbacks of such methods is out-of-vocabulary terms, especially when having small training datasets and using a wider vocabulary. This is the case in Intent Classification for chatbots, where typically small datasets are extracted from internet communication. Two problems arise by the use of internet communication. First, such datasets miss a lot of terms in the vocabulary to use word embeddings efficiently. Second, users frequently make spelling errors. Typically, the models for intent classification are not trained with spelling errors and it is difficult to think about ways in which users will make mistakes. Models depending on a word vocabulary will always face such issues. An ideal classifier should handle spelling errors inherently. With Semantic Hashing, we overcome these challenges and achieve state-of-the-art results on three datasets: AskUbuntu, Chatbot, and Web Application. Our benchmarks are available online: https://github.com/kumar-shridhar/Know-Your-Intent|sl:arxiv_author|Ayushman Dash -BERT + Sentence Embeddings|skos:broader|Sentence Embeddings -Boura|skos:broader|Art d'Afrique -Coursera: Machine Learning|skos:broader|Andrew Ng -Exploit|skos:broader|Divers -Things, not strings |skos:broader|[Surveys](/tag/?and=knowledge_graph&and=survey) (see also [surveys about graphs](/tag/?and=graph&and=survey)) -Tabulator|skos:broader|Javascript RDF -Google Maps|skos:broader|Earth map -Thèse IRIT-Renault NLP-KB|skos:broader|Knowledge Graphs and NLP -C2GWeb, Product description and Makolab|skos:broader|Makolab -Text Embeddings|skos:broader|NLP: Text Representation -Boura|skos:broader|Archéologie du Niger -Sent2Vec|skos:broader|Sentence Embeddings -Jena TDB|skos:broader|Jena -Ontologies|skos:broader|Semantic Web -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:few_shot_learning -CNN Convnet Convnets Convolutional neural networks|skos:broader|ANN NN Artificial neural network -Russie|skos:broader|Ex URSS URSS -Bénin|skos:broader|Afrique de l'Ouest -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:arxiv_author|Matteo Pagliardini -A system for rapidly creating training sets with weak supervision The System for Programmatically Building and Managing Training Data|skos:broader|the bottleneck of getting labeled training data -Interactive Concept Mining on Personal Data -- Bootstrapping Semantic Services Semantic services (e.g. Semantic Desktops) are still afflicted by a cold start problem: in the beginning, the user's personal information sphere, i.e. files, mails, bookmarks, etc., is not represented by the system. Information extraction tools used to kick-start the system typically create 1:1 representations of the different information items. Higher level concepts, for example found in file names, mail subjects or in the content body of these items, are not extracted. Leaving these concepts out may lead to underperformance, having to many of them (e.g. by making every found term a concept) will clutter the arising knowledge graph with non-helpful relations. In this paper, we present an interactive concept mining approach proposing concept candidates gathered by exploiting given schemata of usual personal information management applications and analysing the personal information sphere using various metrics. To heed the subjective view of the user, a graphical user interface allows to easily rank and give feedback on proposed concept candidates, thus keeping only those actually considered relevant. A prototypical implementation demonstrates major steps of our approach.|sl:arxiv_firstAuthor|Markus Schröder -Word Translation Without Parallel Data we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.|sl:tag|tag:word_embedding -Rayons cosmiques|skos:broader|Physique -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_author|Bunyamin Sisman -Chris Bizer|skos:broader|SW guys (and girls) -Histoire de la Chine|skos:broader|Chine -Langues vivantes|skos:broader|Langues -Entity Embeddings of Categorical Variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables We map categorical variables in a function approximation problem into Euclidean spaces, which are the entity embeddings of the categorical variables. The mapping is learned by a neural network during the standard supervised training process. Entity embedding not only reduces memory usage and speeds up neural networks compared with one-hot encoding, but more importantly by mapping similar values close to each other in the embedding space it reveals the intrinsic properties of the categorical variables. We applied it successfully in a recent Kaggle competition and were able to reach the third position with relative simple features. We further demonstrate in this paper that entity embedding helps the neural network to generalize better when the data is sparse and statistics is unknown. Thus it is especially useful for datasets with lots of high cardinality features, where other methods tend to overfit. We also demonstrate that the embeddings obtained from the trained neural network boost the performance of all tested machine learning methods considerably when used as the input features instead. As entity embedding defines a distance measure for categorical variables it can be used for visualizing categorical data and for data clustering.|sl:arxiv_author|Felix Berkhahn -Uriqr|skos:broader|Tom Heath -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:arxiv_author|Ning Xie -Perelman|skos:broader|Médaille Fields -Convolutional neural network|skos:broader|Neural networks -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:arxiv_author|Martin Jaggi -Allemagne|skos:broader|Europe -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:tag|tag:nlp_facebook -Obama|skos:broader|Homme politique -Music source separation|skos:broader|Musique -Linux hosting|skos:broader|ISP -Semantic Search|skos:broader|Semantic Web -JVisualVM|skos:broader|Java profiling -Online Learning|skos:broader|Technology Enhanced Learning -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Been Kim -Semantic Web Client Library|skos:broader|Richard Cyganiak -Paolo Castagna|skos:broader|SW guys (and girls) -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:arxiv_author|Weizhu Chen -Schema.org Actions|skos:broader|schema.org -Concept learning|skos:broader|Unsupervised machine learning -Hebb's rule|skos:broader|Neuroscience -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:tag|tag:statistical_classification -httpRange-14|skos:broader|URI -Sarkozy : immigration|skos:broader|Sarkozy -Probabilistic relevance model|skos:broader|Ranking (information retrieval) -Language Models as Knowledge Bases? an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as fill-in-the-blank cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.|sl:tag|tag:language_models_knowledge -Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data When presented with Out-of-Distribution (OOD) examples, deep neural networks yield confident, incorrect predictions. Detecting OOD examples is challenging, and the potential risks are high. In this paper, we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted. We find that characterizing activity patterns by Gram matrices and identifying anomalies in gram matrix values can yield high OOD detection rates. We identify anomalies in the gram matrices by simply comparing each value with its respective range observed over the training data. Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data for fine-tuning hyperparameters, nor does it require OOD access for inferring parameters. The method is applicable across a variety of architectures and vision datasets and, for the important and surprisingly hard task of detecting far-from-distribution out-of-distribution examples, it generally performs better than or equal to state-of-the-art OOD detection methods (including those that do assume access to OOD examples).|sl:arxiv_author|Sageev Oore -Siamese networks|skos:broader|Neural networks -Entities as Experts: Sparse Memory Access with Entity Supervision the problem of capturing declarative knowledge in the learned parameters of a language model... Entities as Experts (EaE) can access distinct memories of the entities mentioned in a piece of text; To understand the motivation for distinct and independent entity representations: A traditional Transformer would need to build an internal representation of Charles Darwin from the words “Charles” and “Darwin”... Conversely, EAE can access a dedicated representation of “Charles Darwin”, which is a memory of all of the contexts in which this entity has previously been mentioned.... Having retrieved and re-integrated this memory it is much easier for EAE to relate the question to the answer EaE's entity representations are learned directly from text. Correct identification, and representation, of entities is essential to EaE's performance Based on transformer architecture Extension: [Facts as Experts](doc:2020/07/2007_00849_facts_as_experts_) We focus on the problem of capturing declarative knowledge in the learned parameters of a language model. We introduce a new model, Entities as Experts (EaE), that can access distinct memories of the entities mentioned in a piece of text. Unlike previous efforts to integrate entity knowledge into sequence models, EaE's entity representations are learned directly from text. These representations capture sufficient knowledge to answer TriviaQA questions such as Which Dr. Who villain has been played by Roger Delgado, Anthony Ainley, Eric Roberts?. EaE outperforms a Transformer model with $30\\times$ the parameters on this task. According to the Lama knowledge probes, EaE also contains more factual knowledge than a similar sized Bert. We show that associating parameters with specific entities means that EaE only needs to access a fraction of its parameters at inference time, and we show that the correct identification, and representation, of entities is essential to EaE's performance. We also argue that the discrete and independent entity representations in EaE make it more modular and interpretable than the Transformer architecture on which it is based.|sl:tag|tag:memory_networks -Structured Knowledge Distillation for Dense Prediction In this paper, we consider transferring the structure information from large networks to small ones for dense prediction tasks. Previous knowledge distillation strategies used for dense prediction tasks often directly borrow the distillation scheme for image classification and perform knowledge distillation for each pixel separately, leading to sub-optimal performance. Here we propose to distill structured knowledge from large networks to small networks, taking into account the fact that dense prediction is a structured prediction problem. Specifically, we study two structured distillation schemes: i)pair-wise distillation that distills the pairwise similarities by building a static graph, and ii)holistic distillation that uses adversarial training to distill holistic knowledge. The effectiveness of our knowledge distillation approaches is demonstrated by extensive experiments on three dense prediction tasks: semantic segmentation, depth estimation, and object detection.|sl:arxiv_author|Changyong Shun -Zoroastre|skos:broader|Philosophe -Oléoduc|skos:broader|Pétrole -Free will|skos:broader|Liberté -Lab-grown organs|skos:broader|Nous vivons une époque moderne -Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA) Extreme multi-label text classification (XMTC) aims at tagging a document with most relevant labels from an extremely large-scale label set. It is a challenging problem especially for the tail labels because there are only few training documents to build classifier. This paper is motivated to better explore the semantic relationship between each document and extreme labels by taking advantage of both document content and label correlation. Our objective is to establish an explicit label-aware representation for each document with a hybrid attention deep neural network model(LAHA). LAHA consists of three parts. The first part adopts a multi-label self-attention mechanism to detect the contribution of each word to labels. The second part exploits the label structure and document content to determine the semantic connection between words and labels in a same latent space. An adaptive fusion strategy is designed in the third part to obtain the final label-aware document representation so that the essence of previous two parts can be sufficiently integrated. Extensive experiments have been conducted on six benchmark datasets by comparing with the state-of-the-art methods. The results show the superiority of our proposed LAHA method, especially on the tail labels.|sl:arxiv_author|Lin Xiao -SHACL|skos:broader|W3C Recommendation -LOD2|skos:broader|European project -LDP @ W3C|skos:broader|Linked Data Platform -Aldous Huxley|skos:broader|Anticipation -Javascript framework|skos:broader|Frameworks -ROC Curve|skos:broader|Statistical classification -Arduino|skos:broader|Electronics -K-BERT: Enabling Language Representation with Knowledge Graph a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge Pre-trained language representation models, such as BERT, capture a general language representation from large-scale corpora, but lack domain-specific knowledge. When reading a domain text, experts make inferences with relevant knowledge. For machines to achieve this capability, we propose a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge. However, too much knowledge incorporation may divert the sentence from its correct meaning, which is called knowledge noise (KN) issue. To overcome KN, K-BERT introduces soft-position and visible matrix to limit the impact of knowledge. K-BERT can easily inject domain knowledge into the models by equipped with a KG without pre-training by-self because it is capable of loading model parameters from the pre-trained BERT. Our investigation reveals promising results in twelve NLP tasks. Especially in domain-specific tasks (including finance, law, and medicine), K-BERT significantly outperforms BERT, which demonstrates that K-BERT is an excellent choice for solving the knowledge-driven problems that require experts.|sl:tag|tag:bert -Venus de Brassempouy|skos:broader|Vénus préhistoriques -V2V|skos:broader|Automotive -Music store|skos:broader|Musique en ligne -AWS|skos:broader|Cloud -JavaScript|skos:broader|Dev -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_author|Luke Zettlemoyer -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:tag|tag:connectionist_vs_symbolic_debate -Hydra|skos:broader|Linked Data -PHP|skos:broader|Programming language -AllegroGraph|skos:broader|TripleStore -Deep NLP|skos:broader|NN 4 NLP -How much information in a language?|skos:broader|Language -Indo-européen|skos:broader|Langues -Manu Dibango|skos:broader|Musicien -Jean-Paul Cardinal|skos:broader|Ami -Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.|sl:arxiv_author|Yiming Yang -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:arxiv_author|Sida Wang -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_author|Mikhail Khodak -Jean-Claude Ameisen|skos:broader|France Inter -Norman Walsh|skos:broader|Technical girls and guys -RDF Schema inferencing|skos:broader|RDF Schema -Semantic Web Client Library|skos:broader|Semantic Web : Application -Industrie pharmaceutique|skos:broader|Economie -Soja|skos:broader|Agriculture -Neural Bag of Words|skos:broader|Bag-of-words -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:nlp_google -Danny Ayers|skos:broader|Technical girls and guys -Semantic Blog|skos:broader|Semantic Web -Médaille Fields|skos:broader|Mathématiques -Diplomatie américaine|skos:broader|USA -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:arxiv_author|Ali Farhadi -Semi-supervised Clustering for Short Text via Deep Representation Learning semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: 1. assign each short text to its nearest centroid based on its representation from the current neural networks; 2. re-estimate the cluster centroids based on cluster assignments from step (1); 3. update neural networks according to the objective by keeping centroids and cluster assignments fixed. In this work, we propose a semi-supervised method for short text clustering, where we represent texts as distributed vectors with neural networks, and use a small amount of labeled data to specify our intention for clustering. We design a novel objective to combine the representation learning process and the k-means clustering process together, and optimize the objective with both labeled data and unlabeled data iteratively until convergence through three steps: (1) assign each short text to its nearest centroid based on its representation from the current neural networks; (2) re-estimate the cluster centroids based on cluster assignments from step (1); (3) update neural networks according to the objective by keeping centroids and cluster assignments fixed. Experimental results on four datasets show that our method works significantly better than several other text clustering methods.|sl:arxiv_author|Haitao Mi -OS X 10.6 - Snow leopard|skos:broader|Mac OS X -BLINK|skos:broader|Zero-shot Entity Linking -Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. Snorkel DryBell, a new weak supervision management system for this setting. [Blog post](/doc/2019/06/google_ai_blog_harnessing_orga) Labeling training data is one of the most costly bottlenecks in developing machine learning-based applications. We present a first-of-its-kind study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude, and introduce Snorkel DryBell, a new weak supervision management system for this setting. Snorkel DryBell builds on the Snorkel framework, extending it in three critical aspects: flexible, template-based ingestion of diverse organizational knowledge, cross-feature production serving, and scalable, sampling-free execution. On three classification tasks at Google, we find that Snorkel DryBell creates classifiers of comparable quality to ones trained with tens of thousands of hand-labeled examples, converts non-servable organizational resources to servable models for an average 52% performance improvement, and executes over millions of data points in tens of minutes.|sl:tag|tag:nlp_using_knowledge -Rétrovirus|skos:broader|Virus -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:tag|tag:ai_facebook -WWW 2013|skos:broader|TheWebConf -Facebook|skos:broader|Social Networks -NN 4 NLP|skos:broader|NLP techniques -New York|skos:broader|Ville -Configuration as Linked Data|skos:broader|Configuration and SW -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Griffin Weber -LibShortText|skos:broader|National Taiwan University -Génocide rwandais|skos:broader|Rwanda -httpRange-14|skos:broader|Dereferencing HTTP URIs -Greasemonkey|skos:broader|Firefox -CEA|skos:broader|Industrie nucléaire -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Zhilin Yang -BBC semantic publishing|skos:broader|Dynamic Semantic Publishing -Sony Hack|skos:broader|Hack -Semantic Web search engine|skos:broader|Search Engines -Quora Question Pairs|skos:broader|Quora -Visual Recognition|skos:broader|IA AI -Term Frequency-Inverse Document Frequency. major limitations: - It computes document similarity directly in the word-count space, which could be slow for large vocabularies. - It assumes that the counts of different words provide independent evidence of similarity. - It makes no use of semantic similarities between words. |skos:broader|formalism of information retrieval useful to derive functions that rank matching documents according to their relevance to a given search query. -Peace Corps|skos:broader|USA -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Yong Zhu -Himalaya|skos:broader|Montagne -Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs predicting embeddings instead of word IDs (avoids a discrete softmax, using a new loss) [@honnibal](https://twitter.com/honnibal/status/1073513114468081664) The Softmax function is used in the final layer of nearly all existing sequence-to-sequence models for language generation. However, it is usually the slowest layer to compute which limits the vocabulary size to a subset of most frequent types; and it has a large memory footprint. We propose a general technique for replacing the softmax layer with a continuous embedding layer. Our primary innovations are a novel probabilistic loss, and a training and inference procedure in which we generate a probability distribution over pre-trained word embeddings, instead of a multinomial distribution over the vocabulary obtained via softmax. We evaluate this new class of sequence-to-sequence models with continuous outputs on the task of neural machine translation. We show that our models obtain upto 2.5x speed-up in training time while performing on par with the state-of-the-art models in terms of translation quality. These models are capable of handling very large vocabularies without compromising on translation quality. They also produce more meaningful errors than in the softmax-based models, as these errors typically lie in a subspace of the vector space of the reference translations.|sl:tag|tag:neural_machine_translation -NLP@Facebook|skos:broader|AI@Facebook -Origine de l'agriculture|skos:broader|Néolithique -GraphQL|skos:broader|Facebook -Design Challenges and Misconceptions in Neural Sequence Labeling design challenges of constructing effective and efficient neural sequence labeling systems We investigate the design challenges of constructing effective and efficient neural sequence labeling systems, by reproducing twelve neural sequence labeling models, which include most of the state-of-the-art structures, and conduct a systematic model comparison on three benchmarks (i.e. NER, Chunking, and POS tagging). Misconceptions and inconsistent conclusions in existing literature are examined and clarified under statistical experiments. In the comparison and analysis process, we reach several practical conclusions which can be useful to practitioners.|sl:arxiv_author|Jie Yang -Film français|skos:broader|Cinéma français -Machine Learning library|skos:broader|Library (code) -A practical tutorial on autoencoders for nonlinear feature fusion: Taxonomy, models, software and guidelines Many of the existing machine learning algorithms, both supervised and unsupervised, depend on the quality of the input characteristics to generate a good model. The amount of these variables is also important, since performance tends to decline as the input dimensionality increases, hence the interest in using feature fusion techniques, able to produce feature sets that are more compact and higher level. A plethora of procedures to fuse original variables for producing new ones has been developed in the past decades. The most basic ones use linear combinations of the original variables, such as PCA (Principal Component Analysis) and LDA (Linear Discriminant Analysis), while others find manifold embeddings of lower dimensionality based on non-linear combinations, such as Isomap or LLE (Linear Locally Embedding) techniques. More recently, autoencoders (AEs) have emerged as an alternative to manifold learning for conducting nonlinear feature fusion. Dozens of AE models have been proposed lately, each with its own specific traits. Although many of them can be used to generate reduced feature sets through the fusion of the original ones, there also AEs designed with other applications in mind. The goal of this paper is to provide the reader with a broad view of what an AE is, how they are used for feature fusion, a taxonomy gathering a broad range of models, and how they relate to other classical techniques. In addition, a set of didactic guidelines on how to choose the proper AE for a given task is supplied, together with a discussion of the software tools available. Finally, two case studies illustrate the usage of AEs with datasets of handwritten digits and breast cancer.|sl:arxiv_author|Francisco Herrera -FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence [github](https://github.com/google-research/fixmatch) we demonstrate the power of a simple combination of two common Semi-Supervised Learning methods: consistency regularization and pseudo-labeling. 1. First generates pseudo-labels using the model’s predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. 2. The model is then trained to predict the pseudo-label when fed a strongly augmented version of the same image. Semi-supervised learning (SSL) provides an effective means of leveraging unlabeled data to improve a model's performance. In this paper, we demonstrate the power of a simple combination of two common SSL methods: consistency regularization and pseudo-labeling. Our algorithm, FixMatch, first generates pseudo-labels using the model's predictions on weakly-augmented unlabeled images. For a given image, the pseudo-label is only retained if the model produces a high-confidence prediction. The model is then trained to predict the pseudo-label when fed a strongly-augmented version of the same image. Despite its simplicity, we show that FixMatch achieves state-of-the-art performance across a variety of standard semi-supervised learning benchmarks, including 94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just 4 labels per class. Since FixMatch bears many similarities to existing SSL methods that achieve worse performance, we carry out an extensive ablation study to tease apart the experimental factors that are most important to FixMatch's success. We make our code available at https://github.com/google-research/fixmatch.|sl:tag|tag:semi_supervised_learning -fps AND LDOW2008|skos:broader|fps -OKFN Datahub|skos:broader|Open Knowledge Foundation -Apache Mahout|skos:broader|Data mining tools -LHC|skos:broader|CERN -NSA spying scandal|skos:broader|PRISM -Astéroïde|skos:broader|Astronomie -Interactive Knowledge Stack|skos:broader|Semantic CMS -Machine Learning library|skos:broader|Machine Learning tool -Haoussa|skos:broader|Afrique de l'Ouest -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:arxiv_author|Hanxiao Liu -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:tag|tag:word_embeddings_with_lexical_resources -Bon chef d'état|skos:broader|Chef d'état -Natural Language Processing (almost) from Scratch seminal work Abstract: a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements We propose a unified neural network architecture and learning algorithm that can be applied to various natural language processing tasks including: part-of-speech tagging, chunking, named entity recognition, and semantic role labeling. This versatility is achieved by trying to avoid task-specific engineering and therefore disregarding a lot of prior knowledge. Instead of exploiting man-made input features carefully optimized for each task, our system learns internal representations on the basis of vast amounts of mostly unlabeled training data. This work is then used as a basis for building a freely available tagging system with good performance and minimal computational requirements.|sl:tag|tag:deep_nlp -TAP|skos:broader|Stanford -Replace or Retrieve Keywords In Documents at Scale For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). Different from Aho Corasick Algorithm, as it doesn't match substrings. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning [Github](https://github.com/vi3k6i5/flashtext) In this paper we introduce, the FlashText algorithm for replacing keywords or finding keywords in a given text. FlashText can search or replace keywords in one pass over a document. The time complexity of this algorithm is not dependent on the number of terms being searched or replaced. For a document of size N (characters) and a dictionary of M keywords, the time complexity will be O(N). This algorithm is much faster than Regex, because regex time complexity is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't match substrings. FlashText is designed to only match complete words (words with boundary characters on both sides). For an input dictionary of {Apple}, this algorithm won't match it to 'I like Pineapple'. This algorithm is also designed to go for the longest match first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning. We have made python implementation of this algorithm available as open-source on GitHub, released under the permissive MIT License.|sl:tag|tag:aho_corasick_algorithm -SQL|skos:broader|Database -InceptionTime: Finding AlexNet for Time Series Classification Time series classification (TSC) is the area of machine learning interested in learning how to assign labels to time series. The last few decades of work in this area have led to significant progress in the accuracy of classifiers, with the state of the art now represented by the HIVE-COTE algorithm. While extremely accurate, HIVE-COTE is infeasible to use in many applications because of its very high training time complexity in O(N^2T^4) for a dataset with N time series of length T. For example, it takes HIVE-COTE more than 72,000s to learn from a small dataset with N=700 time series of short length T=46. Deep learning, on the other hand, has now received enormous attention because of its high scalability and state-of-the-art accuracy in computer vision and natural language processing tasks. Deep learning for TSC has only very recently started to be explored, with the first few architectures developed over the last 3 years only. The accuracy of deep learning for TSC has been raised to a competitive level, but has not quite reached the level of HIVE-COTE. This is what this paper achieves: outperforming HIVE-COTE's accuracy together with scalability. We take an important step towards finding the AlexNet network for TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural Network (CNN) models, inspired by the Inception-v4 architecture. Our experiments show that InceptionTime slightly outperforms HIVE-COTE with a win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more accurate, but it is much faster: InceptionTime learns from that same dataset with 700 time series in 2,300s but can also learn from a dataset with 8M time series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE.|sl:tag|tag:arxiv_doc -Mathématicien|skos:broader|Mathématiques -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:arxiv_firstAuthor|Yuan Zhang -IP address|skos:broader|Internet -Nearest neighbor search|skos:broader|Algorithmes -Keyword Spotting|skos:broader|Keywords -Election|skos:broader|Politique -Neuroscience AND Machine learning|skos:broader|Machine learning -Semantic Web : introduction|skos:broader|Semantic web: evangelization -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Mark Neumann -Prix Nobel de physique|skos:broader|Prix Nobel -Justice américaine|skos:broader|Justice -US vs Europe|skos:broader|Europe -Detroit|skos:broader|Ville -RDFj|skos:broader|backplanejs -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Rhomni St. John -Katie Portwin|skos:broader|SW guys (and girls) -Uncontacted peoples|skos:broader|Peuples -Regex|skos:broader|Dev tools -Generalization through Memorization: Nearest Neighbor Language Models extend LMs with nearest neighbor search in embedding space We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The nearest neighbors are computed according to distance in the pre-trained LM embedding space, and can be drawn from any text collection, including the original LM training data. Applying this augmentation to a strong Wikitext-103 LM, with neighbors drawn from the original training set, our $k$NN-LM achieves a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no additional training. We also show that this approach has implications for efficiently scaling up to larger training sets and allows for effective domain adaptation, by simply varying the nearest neighbor datastore, again without further training. Qualitatively, the model is particularly helpful in predicting rare patterns, such as factual knowledge. Together, these results strongly suggest that learning similarity between sequences of text is easier than predicting the next word, and that nearest neighbor search is an effective approach for language modeling in the long tail.|sl:arxiv_author|Urvashi Khandelwal -Europe and UK|skos:broader|Royaume Uni -Film noir|skos:broader|Film -Coursera: Web Intelligence and Big Data|skos:broader|Web Intelligence -RDF Fails|skos:broader|RDF -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:tag|tag:clustering_of_text_documents -Massacre de la Saint-Barthélemy|skos:broader|Guerres de religion -End-To-End Memory Networks Neural network with a recurrent attention model over a possibly large external memory. cité par [#A. Bordes](/tag/antoine_bordes) à [#ParisIsAI conf 2018](/tag/france_is_ai_2018.html) We introduce a neural network with a recurrent attention model over a possibly large external memory. The architecture is a form of Memory Network (Weston et al., 2015) but unlike the model in that work, it is trained end-to-end, and hence requires significantly less supervision during training, making it more generally applicable in realistic settings. It can also be seen as an extension of RNNsearch to the case where multiple computational steps (hops) are performed per output symbol. The flexibility of the model allows us to apply it to tasks as diverse as (synthetic) question answering and to language modeling. For the former our approach is competitive with Memory Networks, but with less supervision. For the latter, on the Penn TreeBank and Text8 datasets our approach demonstrates comparable performance to RNNs and LSTMs. In both cases we show that the key concept of multiple computational hops yields improved results.|sl:arxiv_author|Jason Weston -On the Efficacy of Knowledge Distillation Evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. IEEE International Conference on Computer Vision (ICCV), 2019 Despite widespread use, an understanding of when the student can learn from the teacher is missing. Our key finding is that knowledge distillation is not a panacea and cannot succeed when student capacity is too low to successfully mimic the teacher. We have presented an approach to mitigate this issue by stopping teacher training early In this paper, we present a thorough evaluation of the efficacy of knowledge distillation and its dependence on student and teacher architectures. Starting with the observation that more accurate teachers often don't make good teachers, we attempt to tease apart the factors that affect knowledge distillation performance. We find crucially that larger models do not often make better teachers. We show that this is a consequence of mismatched capacity, and that small students are unable to mimic large teachers. We find typical ways of circumventing this (such as performing a sequence of knowledge distillation steps) to be ineffective. Finally, we show that this effect can be mitigated by stopping the teacher's training early. Our results generalize across datasets and models.|sl:tag|tag:knowledge_distillation -Femme célèbre (où qui mérite de l'être)|skos:broader|Homme célèbre -Semantic Hashing|skos:broader|Similarity queries -W3C Submission|skos:broader|W3C -Film japonais|skos:broader|Japon -Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs [GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) The recent proliferation of knowledge graphs (KGs) coupled with incomplete or partial information, in the form of missing relations (links) between entities, has fueled a lot of research on knowledge base completion (also known as relation prediction). Several recent works suggest that convolutional neural network (CNN) based models generate richer and more expressive feature embeddings and hence also perform well on relation prediction. However, we observe that these KG embeddings treat triples independently and thus fail to cover the complex and hidden information that is inherently implicit in the local neighborhood surrounding a triple. To this effect, our paper proposes a novel attention based feature embedding that captures both entity and relation features in any given entity's neighborhood. Additionally, we also encapsulate relation clusters and multihop relations in our model. Our empirical study offers insights into the efficacy of our attention based model and we show marked performance gains in comparison to state of the art methods on all datasets.|sl:arxiv_author|Manohar Kaul -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|William Yang Wang -Jerma|skos:broader|Peuples -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:arxiv_author|Naftali Tishby Hebrew University and NEC Research Institute -Ballmer|skos:broader|Microsoft -Cory Doctorow|skos:broader|Anticipation -Probing Neural Network Comprehension of Natural Language Arguments what has BERT learned about argument comprehension? [Comments](/doc/2019/07/bert_s_success_in_some_benchmar) We are surprised to find that BERT's peak performance of 77% on the Argument Reasoning Comprehension Task reaches just three points below the average untrained human baseline. However, we show that this result is entirely accounted for by exploitation of spurious statistical cues in the dataset. We analyze the nature of these cues and demonstrate that a range of models all exploit them. This analysis informs the construction of an adversarial dataset on which all models achieve random accuracy. Our adversarial dataset provides a more robust assessment of argument comprehension and should be adopted as the standard in future work.|sl:tag|tag:bert -Quantum biology|skos:broader|Biology -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:tag|tag:reseaux_bayesiens -Dosso|skos:broader|Niger -Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. Neural-symbolic computing has now become the subject of interest of both academic and industry research laboratories. Graph Neural Networks (GNN) have been widely used in relational and symbolic domains, with widespread application of GNNs in combinatorial optimization, constraint satisfaction, relational reasoning and other scientific domains. The need for improved explainability, interpretability and trust of AI systems in general demands principled methodologies, as suggested by neural-symbolic computing. In this paper, we review the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. This includes the application of GNNs in several domains as well as its relationship to current developments in neural-symbolic computing.|sl:arxiv_author|Marco Gori -Google Knowledge Graph|skos:broader|Knowledge Graphs -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:arxiv_author|Yajuan Lyu -Revisiting Semi-Supervised Learning with Graph Embeddings We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.|sl:tag|tag:ruslan_salakhutdinov -Efficient Estimation of Word Representations in Vector Space We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities. We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.|sl:arxiv_author|Greg Corrado -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:tag|tag:arxiv_doc -Deep latent variable models|skos:broader|Deep Learning -Subspace clustering|skos:broader|Clustering -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:arxiv_firstAuthor|Rakshit Trivedi -Heredia|skos:broader|Poète -SIMILE Timeline|skos:broader|SIMILE -Linear Algebraic Structure of Word Senses, with Applications to Polysemy Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses Each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. The success of the approach is mathematically explained using a variant of the random walk on discourses model (random walk: a generative model for language). Under the assumptions of this model, there exists a linear relationship between the vector of a word w and the vectors of the words in its contexts (It is not the average of the words in w's context, but in a given corpus the matrix of the linear relationship does not depend on w. It can be estimated, and so we can compute the embedding of a word from the contexts it belongs to) [Related blog post](/doc/?uri=https%3A%2F%2Fwww.offconvex.org%2F2016%2F07%2F10%2Fembeddingspolysemy%2F) Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 discourse atoms that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.|sl:arxiv_author|Sanjeev Arora -Coupe du monde 2006|skos:broader|Coupe du monde de football -Océan|skos:broader|Mer -GAO|skos:broader|C2GWeb, Product description and Makolab -Compagnies pétrolières|skos:broader|Pétrole -DeepType: Multilingual Entity Linking by Neural Type System Evolution The wealth of structured (e.g. Wikidata) and unstructured data about the world available today presents an incredible opportunity for tomorrow's Artificial Intelligence. So far, integration of these two different modalities is a difficult process, involving many decisions concerning how best to represent the information so that it will be captured or useful, and hand-labeling large amounts of data. DeepType overcomes this challenge by explicitly integrating symbolic information into the reasoning process of a neural network with a type system. First we construct a type system, and second, we use it to constrain the outputs of a neural network to respect the symbolic structure. We achieve this by reformulating the design problem into a mixed integer problem: create a type system and subsequently train a neural network with it. In this reformulation discrete variables select which parent-child relations from an ontology are types within the type system, while continuous variables control a classifier fit to the type system. The original problem cannot be solved exactly, so we propose a 2-step algorithm: 1) heuristic search or stochastic optimization over discrete variables that define a type system informed by an Oracle and a Learnability heuristic, 2) gradient descent to fit classifier parameters. We apply DeepType to the problem of Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC KBP 2010) and find that it outperforms all existing solutions by a wide margin, including approaches that rely on a human-designed type system or recent deep learning-based entity embeddings, while explicitly using symbolic information lets it integrate new entities without retraining.|sl:arxiv_author|Olivier Raiman -A Survey Of Cross-lingual Word Embedding Models Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.|sl:arxiv_author|Sebastian Ruder -Obélisque d'Axoum|skos:broader|Axoum -Statistical physics|skos:broader|Physique -Génétique et Évolution|skos:broader|Evolution -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:tag|tag:arxiv_doc -Lexicon Infused Phrase Embeddings for Named Entity Resolution Employs lexicons as part of the word embedding training: The skip-gram model can be trained to predict not only neighboring words but also lexicon membership of the central word (or phrase). Quickly demonstrates how we can plug phrase embeddings into an existing log-linear CRF System. Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.|sl:tag|tag:word_embeddings_with_lexical_resources -DocBERT: BERT for Document Classification We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work.|sl:arxiv_firstAuthor|Ashutosh Adhikari -Universal Language Model Fine-tuning for Text Classification code is available in the fastai lib [blog post](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) [see also](/doc/?uri=https%3A%2F%2Fyashuseth.blog%2F2018%2F06%2F17%2Funderstanding-universal-language-model-fine-tuning-ulmfit%2F) Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100x more data. We open-source our pretrained models and code.|sl:arxiv_author|Sebastian Ruder -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:tag|tag:chris_manning -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:tag|tag:fasttext -MOAT|skos:broader|Alexandre Passant -AI@Facebook|skos:broader|Artificial Intelligence -Word embeddings|skos:broader|Embeddings in NLP -fps pres|skos:broader|fps -MVC|skos:broader|Design pattern -Datalift|skos:broader|Semantic Web project -TextBlob|skos:broader|NLP tools -Stanislas Dehaene|skos:broader|Neuroscience -A Strong Baseline for Learning Cross-Lingual Word Embeddings from Sentence Alignments While cross-lingual word embeddings have been studied extensively in recent years, the qualitative differences between the different algorithms remain vague. We observe that whether or not an algorithm uses a particular feature set (sentence IDs) accounts for a significant performance gap among these algorithms. This feature set is also used by traditional alignment algorithms, such as IBM Model-1, which demonstrate similar performance to state-of-the-art embedding algorithms on a variety of benchmarks. Overall, we observe that different algorithmic approaches for utilizing the sentence ID feature space result in similar performance. This paper draws both empirical and theoretical parallels between the embedding and alignment literature, and suggests that adding additional sources of information, which go beyond the traditional signal of bilingual sentence-aligned corpora, may substantially improve cross-lingual word embeddings, and that future baselines should at least take such features into account.|sl:tag|tag:cross_lingual_word_embeddings -bi-LSTM|skos:broader|LSTM -Sony Hack|skos:broader|Sony -AI@Stanford|skos:broader|AI teams -Human-like AI|skos:broader|Artificial general intelligence -A Theoretical Analysis of Contrastive Unsupervised Representation Learning [blog post](/doc/?uri=http%3A%2F%2Fwww.offconvex.org%2F2019%2F03%2F19%2FCURL%2F) Recent empirical works have successfully used unlabeled data to learn feature representations that are broadly useful in downstream classification tasks. Several of these methods are reminiscent of the well-known word2vec embedding algorithm: leveraging availability of pairs of semantically similar data points and negative samples, the learner forces the inner product of representations of similar pairs with each other to be higher on average than with negative samples. The current paper uses the term contrastive learning for such algorithms and presents a theoretical framework for analyzing them by introducing latent classes and hypothesizing that semantically similar points are sampled from the same latent class. This framework allows us to show provable guarantees on the performance of the learned representations on the average classification task that is comprised of a subset of the same set of latent classes. Our generalization bound also shows that learned representations can reduce (labeled) sample complexity on downstream tasks. We conduct controlled experiments in both the text and image domains to support the theory.|sl:arxiv_author|Hrishikesh Khandeparkar -Python tools|skos:broader|Dev tools -Semantics of skos:Concept|skos:broader|SKOS -Médecins sans frontières|skos:broader|ONG -Poincaré Embeddings for Learning Hierarchical Representations While complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space Representation learning has become an invaluable approach for learning from symbolic data such as text and graphs. However, while complex symbolic datasets often exhibit a latent hierarchical structure, state-of-the-art methods typically learn embeddings in Euclidean vector spaces, which do not account for this property. For this purpose, we introduce a new approach for learning hierarchical representations of symbolic data by embedding them into hyperbolic space -- or more precisely into an n-dimensional Poincar\\'e ball. Due to the underlying hyperbolic geometry, this allows us to learn parsimonious representations of symbolic data by simultaneously capturing hierarchy and similarity. We introduce an efficient algorithm to learn the embeddings based on Riemannian optimization and show experimentally that Poincar\\'e embeddings outperform Euclidean embeddings significantly on data with latent hierarchies, both in terms of representation capacity and in terms of generalization ability.|sl:tag|tag:these_irit_renault_biblio_initiale -Chêne|skos:broader|Arbres -535|skos:broader|Krakatoa -Lilian Weng|skos:broader|NLP girls and guys -Teacher-student learning|skos:broader|ANN NN Artificial neural network -Learning to Remember Rare Events a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network Despite recent advances, memory-augmented deep neural networks are still limited when it comes to life-long and one-shot learning, especially in remembering rare events. We present a large-scale life-long memory module for use in deep learning. The module exploits fast nearest-neighbor algorithms for efficiency and thus scales to large memory sizes. Except for the nearest-neighbor query, the module is fully differentiable and trained end-to-end with no extra supervision. It operates in a life-long manner, i.e., without the need to reset it during training. Our memory module can be easily added to any part of a supervised neural network. To show its versatility we add it to a number of networks, from simple convolutional ones tested on image classification to deep sequence-to-sequence and recurrent-convolutional models. In all cases, the enhanced network gains the ability to remember and do life-long one-shot learning. Our module remembers training examples shown many thousands of steps in the past and it can successfully generalize from them. We set new state-of-the-art for one-shot learning on the Omniglot dataset and demonstrate, for the first time, life-long one-shot learning in recurrent neural networks on a large-scale machine translation task.|sl:tag|tag:arxiv_doc -Entity linking|skos:broader|NLP tasks / problems -Big bang|skos:broader|Astrophysique -WWW 2008|skos:broader|J'y étais -Neuroscience AND Machine learning|skos:broader|Neuroscience -Indiens du Brésil|skos:broader|Amérindien -Learning with Memory Embeddings Embedding learning, a.k.a. representation learning, has been shown to be able to model large-scale semantic knowledge graphs. A key concept is a mapping of the knowledge graph to a tensor representation whose entries are predicted by models using latent representations of generalized entities. Latent variable models are well suited to deal with the high dimensionality and sparsity of typical knowledge graphs. In recent publications the embedding models were extended to also consider time evolutions, time patterns and subsymbolic representations. In this paper we map embedding models, which were developed purely as solutions to technical problems for modelling temporal knowledge graphs, to various cognitive memory functions, in particular to semantic and concept memory, episodic memory, sensory memory, short-term memory, and working memory. We discuss learning, query answering, the path from sensory input to semantic decoding, and the relationship between episodic memory and semantic memory. We introduce a number of hypotheses on human memory that can be derived from the developed mathematical models.|sl:arxiv_firstAuthor|Volker Tresp -Earth map|skos:broader|La Terre vue du ciel -danbri|skos:broader|Technical guys -ULMFiT|skos:broader|Text Embeddings -A Metric Learning Reality Check Deep metric learning papers from the past four years have consistently claimed great advances in accuracy, often more than doubling the performance of decade-old methods. In this paper, we take a closer look at the field to see if this is actually true. We find flaws in the experimental setup of these papers, and propose a new way to evaluate metric learning algorithms. Finally, we present experimental results that show that the improvements over time have been marginal at best.|sl:arxiv_author|Kevin Musgrave -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:arxiv_author|Robert Ormandi -Library of Alexandria|skos:broader|Egypte antique -fps AND LDOW2008|skos:broader|Semantic Enterprise -Jena and database|skos:broader|Jena -One-Shot Generalization in Deep Generative Models Humans have an impressive ability to reason about new concepts and experiences from just a single example. In particular, humans have an ability for one-shot generalization: an ability to encounter a new concept, understand its structure, and then be able to generate compelling alternative variations of the concept. We develop machine learning systems with this important capacity by developing new deep generative models, models that combine the representational power of deep learning with the inferential power of Bayesian reasoning. We develop a class of sequential generative models that are built on the principles of feedback and attention. These two characteristics lead to generative models that are among the state-of-the art in density estimation and image generation. We demonstrate the one-shot generalization ability of our models using three tasks: unconditional sampling, generating new exemplars of a given concept, and generating new exemplars of a family of concepts. In all cases our models are able to generate compelling and diverse samples---having seen new examples just once---providing an important class of general-purpose models for one-shot machine learning.|sl:tag|tag:one_shot_generalization -Named entity disambiguation|skos:broader|Entity Analysis -"Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model."|sl:arxiv_author|Mohammad Norouzi -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:arxiv_firstAuthor|Armand Joulin -Python install|skos:broader|Notes d'install -Découverte d'espèces inconnues|skos:broader|Biology -SATNet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver Integrating logical reasoning within deep learning architectures has been a major goal of modern AI systems. In this paper, we propose a new direction toward this goal by introducing a differentiable (smoothed) maximum satisfiability (MAXSAT) solver that can be integrated into the loop of larger deep learning systems. Our (approximate) solver is based upon a fast coordinate descent approach to solving the semidefinite program (SDP) associated with the MAXSAT problem. We show how to analytically differentiate through the solution to this SDP and efficiently solve the associated backward pass. We demonstrate that by integrating this solver into end-to-end learning systems, we can learn the logical structure of challenging problems in a minimally supervised fashion. In particular, we show that we can learn the parity function using single-bit supervision (a traditionally hard task for deep networks) and learn how to play 9x9 Sudoku solely from examples. We also solve a visual Sudok problem that maps images of Sudoku puzzles to their associated logical solutions by combining our MAXSAT solver with a traditional convolutional architecture. Our approach thus shows promise in integrating logical structures within deep learning.|sl:tag|tag:arxiv_doc -fps @ LDOW 2013|skos:broader|fps: paper -Text to semantic data|skos:broader|Semantic Web -Alexandre Passant|skos:broader|Technical girls and guys -Grands problèmes mathématiques|skos:broader|Mathématiques -TransE|skos:broader|Knowledge Graph Completion -Wikidata|skos:broader|Linked Data -SIF embeddings|skos:broader|Word Embedding Compositionality -Hepp's PropertyValue|skos:broader|schema.org -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:arxiv_author|Robert L. Logan IV -Noos|skos:broader|Fournisseurs d'accès à internet -What Does BERT Look At? An Analysis of BERT's Attention Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.|sl:arxiv_author|Kevin Clark -Société de consommation|skos:broader|Société -Marseillaise|skos:broader|France -Eruption volcanique|skos:broader|Volcan -A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? Knowledge Graphs (KGs) are composed of structured information about a particular domain in the form of entities and relations. In addition to the structured information KGs help in facilitating interconnectivity and interoperability between different resources represented in the Linked Data Cloud. KGs have been used in a variety of applications such as entity linking, question answering, recommender systems, etc. However, KG applications suffer from high computational and storage costs. Hence, there arises the necessity for a representation able to map the high dimensional KGs into low dimensional spaces, i.e., embedding space, preserving structural as well as relational information. This paper conducts a survey of KG embedding models which not only consider the structured information contained in the form of entities and relations in a KG but also the unstructured information represented as literals such as text, numerical values, images, etc. Along with a theoretical analysis and comparison of the methods proposed so far for generating KG embeddings with literals, an empirical evaluation of the different methods under identical settings has been performed for the general task of link prediction.|sl:arxiv_author|Russa Biswas -Micropayments on the web|skos:broader|Money -Wikidata/RDF|skos:broader|Wikidata -Informatique|skos:broader|Technologie -RDF123|skos:broader|Mapping data from spreadsheets to RDF -Prolétarisation|skos:broader|Travail -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:arxiv_firstAuthor|Cedric De Boom -Histoire de l'Afrique|skos:broader|Afrique -Coal seam fire|skos:broader|Charbon -MINOS Neutrino Experiment|skos:broader|Neutrino -Blues|skos:broader|Musique -Cohn-Bendit|skos:broader|Homme politique -Structured Data Embedding|skos:broader|Embeddings -OntoWiki|skos:broader|Semantic data wiki -Universités américaines|skos:broader|USA -Sanjeev Arora|skos:broader|NLP girls and guys -Javascript patterns|skos:broader|JavaScript -VOAF|skos:broader|Bernard Vatant -A Comprehensive Survey on Graph Neural Networks an overview of graph neural networks (GNNs) in data mining and machine learning fields Deep learning has revolutionized many machine learning tasks in recent years, ranging from image classification and video processing to speech recognition and natural language understanding. The data in these tasks are typically represented in the Euclidean space. However, there is an increasing number of applications where data are generated from non-Euclidean domains and are represented as graphs with complex relationships and interdependency between objects. The complexity of graph data has imposed significant challenges on existing machine learning algorithms. Recently, many studies on extending deep learning approaches for graph data have emerged. In this survey, we provide a comprehensive overview of graph neural networks (GNNs) in data mining and machine learning fields. We propose a new taxonomy to divide the state-of-the-art graph neural networks into four categories, namely recurrent graph neural networks, convolutional graph neural networks, graph autoencoders, and spatial-temporal graph neural networks. We further discuss the applications of graph neural networks across various domains and summarize the open source codes, benchmark data sets, and model evaluation of graph neural networks. Finally, we propose potential research directions in this rapidly growing field.|sl:arxiv_author|Shirui Pan -Greasemonkey|skos:broader|JavaScript -Bitmap index|skos:broader|Database -Peste|skos:broader|Pandémie -Word co-occurrence based topic model that learns topics by modeling word-word co-occurrences patterns. (In constrast, LDA and PLSA are word-document co-occurrence topic models) A biterm consists of two words co-occurring in the same context (short text). BTM models the biterm occurrences in a corpus. Conventional topic models exploit word co-occurrence patterns to reveal the latent semantic structure of a corpus in an implicit way by modeling the generation of words in each document. These approaches are sensitive to the shortness of documents since the word co- occurrence patterns in a single short document are sparse and not reliable. Instead, if we aggregate all the word co-occurrence patterns in the corpus, their frequencies are more stable and more clearly reveals the correlation between the words. With this idea, we developed the biterm topic model, which takes a novel way to reveal the latent topic components in a corpus by directly modeling the generation of word co-occurrence patterns.|skos:broader|Conventional topic models implicitly capture the document-level word co-occurrence patterns to reveal topics. This may not work well on short texts, because of data sparsity. Compared with long texts, topic discovery from short texts has the following three challenges: - only very limited word co-occurrence information is available, - the frequency of words plays a less discriminative role, - and the limited contexts make it more dicult to identify the senses of ambiguous words -Craig Venter|skos:broader|Celera ou Craig Venter -Improving Entity Linking by Modeling Latent Entity Type Information Existing state of the art neural entity linking models employ attention-based bag-of-words context model and pre-trained entity embeddings bootstrapped from word embeddings to assess topic level context compatibility. However, the latent entity type information in the immediate context of the mention is neglected, which causes the models often link mentions to incorrect entities with incorrect type. To tackle this problem, we propose to inject latent entity type information into the entity embeddings based on pre-trained BERT. In addition, we integrate a BERT-based entity similarity score into the local context model of a state-of-the-art model to better capture latent entity type information. Our model significantly outperforms the state-of-the-art entity linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis demonstrates that our model corrects most of the type errors produced by the direct baseline.|sl:arxiv_author|Jinpeng Wang -NSO/Pegasus|skos:broader|Cybersurveillance -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:tag|tag:nlp_ibm -Photos du Niger|skos:broader|Niger -Encyclopedia of Life|skos:broader|Biodiversité -Nazisme|skos:broader|Allemagne -Steve Jobs|skos:broader|Technical girls and guys -Automobile|skos:broader|Transport -Physics|skos:broader|sciences -CamemBERT|skos:broader|NLP@Facebook -A Dual Embedding Space Model for Document Ranking Investigate neural word embeddings as a source of evidence in document ranking. Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) They train a word2vec model, but retain both the input and the output projections. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives A fundamental goal of search engines is to identify, given a query, documents that have relevant text. This is intrinsically difficult because the query and the document may use different vocabulary, or the document may contain query words without being relevant. We investigate neural word embeddings as a source of evidence in document ranking. We train a word2vec embedding model on a large unlabelled query corpus, but in contrast to how the model is commonly used, we retain both the input and the output projections, allowing us to leverage both the embedding spaces to derive richer distributional relationships. During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. We postulate that the proposed Dual Embedding Space Model (DESM) captures evidence on whether a document is about a query term in addition to what is modelled by traditional term-frequency based approaches. Our experiments show that the DESM can re-rank top documents returned by a commercial Web search engine, like Bing, better than a term-matching based signal like TF-IDF. However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives, retrieving documents that are only loosely related to the query. We demonstrate that this problem can be solved effectively by ranking based on a linear mixture of the DESM and the word counting features.|sl:tag|tag:bhaskar_mitra -Amérique|skos:broader|Géographie -Python tools|skos:broader|Python -Javascript closures|skos:broader|JavaScript -Automotive AND W3C|skos:broader|W3C -Andy Seaborne|skos:broader|SW guys (and girls) -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:tag|tag:attention_knowledge_graphs -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:tag|tag:arxiv_doc -Réalité augmentée|skos:broader|Réalité virtuelle -Australia's evolutionary history|skos:broader|Australie -Clinical Concept Embeddings Learned from Massive Sources of Multimodal Medical Data Word embeddings are a popular approach to unsupervised learning of word relationships that are widely used in natural language processing. In this article, we present a new set of embeddings for medical concepts learned using an extremely large collection of multimodal medical data. Leaning on recent theoretical insights, we demonstrate how an insurance claims database of 60 million members, a collection of 20 million clinical notes, and 1.7 million full text biomedical journal articles can be combined to embed concepts into a common space, resulting in the largest ever set of embeddings for 108,477 medical concepts. To evaluate our approach, we present a new benchmark methodology based on statistical power specifically designed to test embeddings of medical concepts. Our approach, called cui2vec, attains state-of-the-art performance relative to previous methods in most instances. Finally, we provide a downloadable set of pre-trained embeddings for other researchers to use, as well as an online tool for interactive exploration of the cui2vec embeddings|sl:arxiv_author|Tianxi Cai -Dynamic topic model|skos:broader|Topic Modeling -fps dev|skos:broader|Dev -Ivan Herman|skos:broader|SW guys (and girls) -Bacterial to Animal Gene Transfer|skos:broader|Genetics Génétique -Text mining|skos:broader|NLP -Text to SQL|skos:broader|SQL -Guéant|skos:broader|Sarkozy : immigration -Cassandra|skos:broader|Big Data -Norilsk|skos:broader|Polluted places -Lybie|skos:broader|Afrique du Nord -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:tag|tag:graph_embeddings -Support vector machine|skos:broader|Kernel methods -Natural Language Semantic Search|skos:broader|Semantic Search -Knowledge Enhanced Contextual Word Representations General method to embed multiple knowledge bases into pre-trained language models (KB in the sense as fixed collection of entity nodes) The key idea is to explicitly model entity spans in the input text and use an entity linker to retrieve relevant entity embeddings from a KB to form knowledge enhanced entity-span representations. Then, update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.|sl:tag|tag:knowledge_augmented_language_models -Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. [Github](https://github.com/stanis-morozov/prodige) Learning useful representations is a key ingredient to the success of modern machine learning. Currently, representation learning mostly relies on embedding data into Euclidean space. However, recent work has shown that data in some domains is better modeled by non-euclidean metric spaces, and inappropriate geometry can result in inferior performance. In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE: a method that learns a weighted graph representation of data end-to-end by gradient descent. Greater generality and fewer model assumptions make PRODIGE more powerful than existing embedding-based approaches. We confirm the superiority of our method via extensive experiments on a wide range of tasks, including classification, compression, and collaborative filtering.|sl:tag|tag:yandex -Social Content Services|skos:broader|Web 2.0 -Mapping data from spreadsheets to RDF|skos:broader|RDF -Category Embedding|skos:broader|Categorical Variables -Vie extraterrestre|skos:broader|Astronomie -Pêche industrielle|skos:broader|Pêche -Titan|skos:broader|Saturne -Probabilistic Graphical Models|skos:broader|Uncertainty Reasoning -Lenka Zdeborová|skos:broader|ML and physics -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:sequence_to_sequence_learning -Approach that relies on fine-tuning a Language Model to the task at hand 3 stages: - General domain language model pre-training - Target task language model fine-tuning - Target task classifier fine-tuning |skos:broader|replacement of the vectorial representation of words with a matrix representation where each word’s representation includes information about its context Embedding words through a language model Language-model-based encoders The key idea underneath is to train a contextual encoder with a language model objective on a large unannotated text corpus. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. During the training, part of the text is masked and the goal is to encode the remaining context and predict the missing part. ([source](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1902.11269)) -Unsupervised Deep Embedding for Clustering Analysis Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective Clustering is central to many data-driven application domains and has been studied extensively in terms of distance functions and grouping algorithms. Relatively little work has focused on learning representations for clustering. In this paper, we propose Deep Embedded Clustering (DEC), a method that simultaneously learns feature representations and cluster assignments using deep neural networks. DEC learns a mapping from the data space to a lower-dimensional feature space in which it iteratively optimizes a clustering objective. Our experimental evaluations on image and text corpora show significant improvement over state-of-the-art methods.|sl:arxiv_author|Junyuan Xie -Experience Grounds Language Successful linguistic communication relies on a shared experience of the world, and it is this shared experience that makes utterances meaningful. Despite the incredible effectiveness of language processing models trained on text alone, today's best systems still make mistakes that arise from a failure to relate language to the physical world it describes and to the social interactions it facilitates. Natural Language Processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large text corpora can be deeply enriched from the parallel tradition of research on the contextual and social nature of language. In this article, we consider work on the contextual foundations of language: grounding, embodiment, and social interaction. We describe a brief history and possible progression of how contextual information can factor into our representations, with an eye towards how this integration can move the field forward and where it is currently being pioneered. We believe this framing will serve as a roadmap for truly contextual language understanding.|sl:arxiv_author|Angeliki Lazaridou -Arte|skos:broader|Télévision -Web Services for JavaScript|skos:broader|JavaScript -Linked Learning 2012|skos:broader|Workshop -Grant Ingersoll|skos:broader|Technical girls and guys -Gradient boosting|skos:broader|Boosting -Thomson Reuters|skos:broader|Entreprise -Bhaskar Mitra|skos:broader|NLP@Microsoft -Sarraounia Mangou|skos:broader|Esprit de résistance -Time in RDF|skos:broader|RDF -Bulle spéculative|skos:broader|Economie -A Comparative Study of Word Embeddings for Reading Comprehension abstract: The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on 1. the use of pre-trained word embeddings, and 2. the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance The focus of past machine learning research for Reading Comprehension tasks has been primarily on the design of novel deep learning architectures. Here we show that seemingly minor choices made on (1) the use of pre-trained word embeddings, and (2) the representation of out-of-vocabulary tokens at test time, can turn out to have a larger impact than architectural choices on the final performance. We systematically explore several options for these choices, and provide recommendations to researchers working in this area.|sl:tag|tag:arxiv_doc -Extinction des dinosaures|skos:broader|Dinosaures -Slot filling|skos:broader|Chatbot -Crimes de l'église catholique|skos:broader|Eglise catholique -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:arxiv_author|Bhuwan Dhingra -JavaScript|skos:broader|Web dev -Bag of Tricks for Efficient Text Classification A simple and efficient baseline for text classification. Our word features can be averaged together to form good sentence representations. Our experiments show that fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute. This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore~CPU, and classify half a million sentences among~312K classes in less than a minute.|sl:arxiv_author|Armand Joulin -StarSpace: Embed All The Things! We present StarSpace, a general-purpose neural embedding model that can solve a wide variety of problems: labeling tasks such as text classification, ranking tasks such as information retrieval/web search, collaborative filtering-based or content-based recommendation, embedding of multi-relational graphs, and learning word, sentence or document level embeddings. In each case the model works by embedding those entities comprised of discrete features and comparing them against each other -- learning similarities dependent on the task. Empirical results on a number of tasks show that StarSpace is highly competitive with existing methods, whilst also being generally applicable to new cases where those methods are not.|sl:arxiv_author|Adam Fisch -Geste écologique|skos:broader|Écologie -Large deviations for the perceptron model and consequences for active learning the task of choosing the subset of samples to be labeled from a fixed finite pool of samples Active learning is a branch of machine learning that deals with problems where unlabeled data is abundant yet obtaining labels is expensive. The learning algorithm has the possibility of querying a limited number of samples to obtain the corresponding labels, subsequently used for supervised learning. In this work, we consider the task of choosing the subset of samples to be labeled from a fixed finite pool of samples. We assume the pool of samples to be a random matrix and the ground truth labels to be generated by a single-layer teacher random neural network. We employ replica methods to analyze the large deviations for the accuracy achieved after supervised learning on a subset of the original pool. These large deviations then provide optimal achievable performance boundaries for any active learning algorithm. We show that the optimal learning performance can be efficiently approached by simple message-passing active learning algorithms. We also provide a comparison with the performance of some other popular active learning strategies.|sl:tag|tag:lenka_zdeborova -Propriété intellectuelle|skos:broader|Grands problèmes -Krill|skos:broader|Crustacé -Genetic Programming|skos:broader|Programming -Essence of life|skos:broader|Biologie -Notes on Cardinal's Matrices These notes are motivated by the work of Jean-Paul Cardinal on symmetric matrices related to the Mertens function. He showed that certain norm bounds on his matrices implied the Riemann hypothesis. Using a different matrix norm we show an equivalence of the Riemann hypothesis to suitable norm bounds on his matrices in the new norm. Then we specify a deformed version of his Mertens function matrices that unconditionally satisfies a norm bound that is of the same strength as his Riemann hypothesis bound.|sl:arxiv_author|David Montague -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:tag|tag:end_to_end_entity_linking -Sharepoint|skos:broader|Microsoft -José Moreno|skos:broader|NLP girls and guys -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Mehdi Ali -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:tag|tag:arxiv_doc -Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms Extracting information from tables in documents presents a significant challenge in many industries and in academic research. Existing methods which take a bottom-up approach of integrating lines into cells and rows or columns neglect the available prior information relating to table structure. Our proposed method takes a top-down approach, first using a generative adversarial network to map a table image into a standardised `skeleton' table form denoting the approximate row and column borders without table content, then fitting renderings of candidate latent table structures to the skeleton structure using a distance measure optimised by a genetic algorithm.|sl:arxiv_firstAuthor|Nataliya Le Vine -TensorFlow 2.0|skos:broader|TensorFlow -Gaulois|skos:broader|Celte -Favoris|skos:broader|I like I like -Salsa|skos:broader|Musique -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:tag|tag:statistics -OWL RL|skos:broader|Rules -SPARQL extensions|skos:broader|SPARQL -XLNet|skos:broader|Pre-Trained Language Models -Accaparement des terres agricoles|skos:broader|Terres agricoles -Loropéni|skos:broader|Burkina Faso -Multi-Task Deep Neural Networks for Natural Language Understanding outperforms BERT in nine of eleven benchmark NLP tasks In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations in order to adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement). We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. The code and pre-trained models are publicly available at https://github.com/namisan/mt-dnn.|sl:tag|tag:pre_trained_language_models -From Word to Sense Embeddings: A Survey on Vector Representations of Meaning Survey focused on semantic representation of meaning (methods that try to directly model individual meanings of words). Pb with word embeddings: the meaning conflation deficiency (representing a word with all its possible meanings as a single vector). Can be addressed by a method for modelling unambiguous lexical meaning. two main branches of sense representation : - unsupervised - knowledge-based Over the past years, distributed semantic representations have proved to be effective and flexible keepers of prior knowledge to be integrated into downstream applications. This survey focuses on the representation of meaning. We start from the theoretical background behind word vector space models and highlight one of their major limitations: the meaning conflation deficiency, which arises from representing a word with all its possible meanings as a single vector. Then, we explain how this deficiency can be addressed through a transition from the word level to the more fine-grained level of word senses (in its broader acceptation) as a method for modelling unambiguous lexical meaning. We present a comprehensive overview of the wide range of techniques in the two main branches of sense representation, i.e., unsupervised and knowledge-based. Finally, this survey covers the main evaluation procedures and applications for this type of representation, and provides an analysis of four of its important aspects: interpretability, sense granularity, adaptability to different domains and compositionality.|sl:tag|tag:arxiv_doc -Afrique subsaharienne|skos:broader|Afrique -SemWeb Pro 2012|skos:broader|SemWeb Pro -Naftali Tishby|skos:broader|AI girls and guys -BAM! Born-Again Multi-Task Networks for Natural Language Understanding knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.|sl:tag|tag:arxiv_doc -RESTful Web Services|skos:broader|REST -BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).|sl:tag|tag:attention_is_all_you_need -Brain-computer interface|skos:broader|Brain -Kaguya|skos:broader|Exploration spatiale -Traversing Knowledge Graphs in Vector Space Knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \compositional\ training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. Path queries on a knowledge graph can be used to answer compositional questions such as What languages are spoken by people living in Lisbon?. However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new compositional training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.|sl:arxiv_firstAuthor|Kelvin Guu -Reagan|skos:broader|Président des USA -VSO|skos:broader|Automotive ontologies -DeepMind|skos:broader|AI@Google -Renato Matos|skos:broader|Musicien -Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study data redundancy (reverse relations), Cartesian product relations A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world In the active research area of employing embedding models for knowledge graph completion, particularly for the task of link prediction, most prior studies used two benchmark datasets FB15k and WN18 in evaluating such models. Most triples in these and other datasets in such studies belong to reverse and duplicate relations which exhibit high data redundancy due to semantic duplication, correlation or data incompleteness. This is a case of excessive data leakage---a model is trained using features that otherwise would not be available when the model needs to be applied for real prediction. There are also Cartesian product relations for which every triple formed by the Cartesian product of applicable subjects and objects is a true fact. Link prediction on the aforementioned relations is easy and can be achieved with even better accuracy using straightforward rules instead of sophisticated embedding models. A more fundamental defect of these models is that the link prediction scenario, given such data, is non-existent in the real-world. This paper is the first systematic study with the main objective of assessing the true effectiveness of embedding models when the unrealistic triples are removed. Our experiment results show these models are much less accurate than what we used to perceive. Their poor accuracy renders link prediction a task without truly effective automated solution. Hence, we call for re-investigation of possible effective approaches.|sl:arxiv_author|Chengkai Li Department of Computer Science and Engineering, University of Texas at Arlington -Presse|skos:broader|Journalisme -Templatic documents|skos:broader|2D-NLP -URI|skos:broader|Internet -Leo Sauermann|skos:broader|Technical girls and guys -Asie mineure|skos:broader|Antiquité -Topic2Vec: Learning Distributed Representations of Topics Topic2Vec aims at learning topic representations along with word representations. Considering the simplicity and efficient solution, we just follow the optimization scheme that used in Word2Vec Latent Dirichlet Allocation (LDA) mining thematic structure of documents plays an important role in nature language processing and machine learning areas. However, the probability distribution from LDA only describes the statistical relationship of occurrences in the corpus and usually in practice, probability is not the best choice for feature representations. Recently, embedding methods have been proposed to represent words and documents by learning essential concepts and representations, such as Word2Vec and Doc2Vec. The embedded representations have shown more effectiveness than LDA-style representations in many tasks. In this paper, we propose the Topic2Vec approach which can learn topic representations in the same semantic vector space with words, as an alternative to probability. The experimental results show that Topic2Vec achieves interesting and meaningful results.|sl:tag|tag:latent_dirichlet_allocation -Stefano Mazzocchi|skos:broader|SW guys (and girls) -Guerres de religion|skos:broader|War -Document Embedding with Paragraph Vectors Paragraph Vectors has been recently proposed as an unsupervised method for learning distributed representations for pieces of texts. In their work, the authors showed that the method can learn an embedding of movie review texts which can be leveraged for sentiment analysis. That proof of concept, while encouraging, was rather narrow. Here we consider tasks other than sentiment analysis, provide a more thorough comparison of Paragraph Vectors to other document modelling algorithms such as Latent Dirichlet Allocation, and evaluate performance of the method as we vary the dimensionality of the learned representation. We benchmarked the models on two document similarity data sets, one from Wikipedia, one from arXiv. We observe that the Paragraph Vector method performs significantly better than other methods, and propose a simple improvement to enhance embedding quality. Somewhat surprisingly, we also show that much like word embeddings, vector operations on Paragraph Vectors can perform useful semantic results.|sl:arxiv_author|Andrew M. Dai -Ghana Empire|skos:broader|Empires d'Afrique de l'Ouest -Zero-shot Entity Linking with Dense Entity Retrieval We consider the zero-shot entity-linking challenge where each entity is defined by a short textual description, and the model must read these descriptions together with the mention context to make the final linking decisions. In this setting, retrieving entity candidates can be particularly challenging, since many of the common linking cues such as entity alias tables and link popularity are not available. In this paper, we introduce a simple and effective two stage approach for zero-shot linking, based on fine-tuned BERT architectures. In the first stage, we do retrieval in a dense space defined by a bi-encoder that independently embeds the mention context and the entity descriptions. Each candidate is then examined more carefully with a cross-encoder, that concatenates the mention and entity text. Our approach achieves a nearly 5 point absolute gain on a recently introduced zero-shot entity linking benchmark, driven largely by improvements over previous IR-based candidate retrieval. We also show that it performs well in the non-zero-shot setting, obtaining the state-of-the-art result on TACKBP-2010.|sl:tag|tag:arxiv_doc -Guerre de Yougoslavie|skos:broader|Yougoslavie Ex Yougoslavie -Jena|skos:broader|Semantic Web : Tools -Hydrogen economy|skos:broader|Hydrogen -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:tag|tag:arxiv_doc -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:arxiv_author|Tao Ge -Java tool|skos:broader|Java -Javascript patterns|skos:broader|Design pattern -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_firstAuthor|Mehdi Ali -Knowledge distillation|skos:broader|Neural networks -Arundhati Roy|skos:broader|Ecrivain -Interactive Knowledge Stack|skos:broader|Linked Data -Palmyra|skos:broader|Syrie -Differentiable Reasoning over a Virtual Knowledge Base We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. [(Bhuwan Dhingra PhD Thesis)](doc:2020/07/end_to_end_learning_with_text_) We consider the task of answering complex multi-hop questions using a corpus as a virtual knowledge base (KB). In particular, we describe a neural module, DrKIT, that traverses textual data like a KB, softly following paths of relations between mentions of entities in the corpus. At each step the module uses a combination of sparse-matrix TFIDF indices and a maximum inner product search (MIPS) on a special index of contextual representations of the mentions. This module is differentiable, so the full system can be trained end-to-end using gradient based methods, starting from natural language inputs. We also describe a pretraining scheme for the contextual representation encoder by generating hard negative examples using existing knowledge bases. We show that DrKIT improves accuracy by 9 points on 3-hop questions in the MetaQA dataset, cutting the gap between text-based and KB-based state-of-the-art by 70%. On HotpotQA, DrKIT leads to a 10% improvement over a BERT-based re-ranking approach to retrieving the relevant passages required to answer a question. DrKIT is also very efficient, processing 10-100x more queries per second than existing multi-hop systems.|sl:tag|tag:neural_memory -Tchernobyl|skos:broader|Ukraine -Dan Brickley|skos:broader|Semantic Web -ML/NLP blog|skos:broader|Blog -Edd Dumbill|skos:broader|Technical girls and guys -A mathematical theory of semantic development in deep neural networks a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? An extensive body of empirical research has revealed remarkable regularities in the acquisition, organization, deployment, and neural representation of human semantic knowledge, thereby raising a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? We address this question by mathematically analyzing the nonlinear dynamics of learning in deep linear networks. We find exact solutions to this learning dynamics that yield a conceptual explanation for the prevalence of many disparate phenomena in semantic cognition, including the hierarchical differentiation of concepts through rapid developmental transitions, the ubiquity of semantic illusions between such transitions, the emergence of item typicality and category coherence as factors controlling the speed of semantic processing, changing patterns of inductive projection over development, and the conservation of semantic similarity in neural representations across species. Thus, surprisingly, our simple neural model qualitatively recapitulates many diverse regularities underlying semantic development, while providing analytic insight into how the statistical structure of an environment can interact with nonlinear deep learning dynamics to give rise to these regularities.|sl:arxiv_author|James L. McClelland -Suisse|skos:broader|Europe -A Tutorial on Network Embeddings Network embedding methods aim at learning low-dimensional latent representation of nodes in a network. These representations can be used as features for a wide range of tasks on graphs such as classification, clustering, link prediction, and visualization. In this survey, we give an overview of network embeddings by summarizing and categorizing recent advancements in this research field. We first discuss the desirable properties of network embeddings and briefly introduce the history of network embedding algorithms. Then, we discuss network embedding methods under different scenarios, such as supervised versus unsupervised learning, learning embeddings for homogeneous networks versus for heterogeneous networks, etc. We further demonstrate the applications of network embeddings, and conclude the survey with future work in this area.|sl:arxiv_author|Haochen Chen -HTML|skos:broader|Internet -Mémoire humaine|skos:broader|Brain -Modification du génome humain|skos:broader|Manipulations génétiques -Specializing Word Embeddings (for Parsing) by Information Bottleneck EMNLP best paper award. [Related blog post](doc:2020/06/information_bottleneck_for_nlp_) Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.|sl:arxiv_author|Jason Eisner -Chili|skos:broader|Amérique latine -Jena Fuseki|skos:broader|Jena -Distributed Representations of Sentences and Documents Paragraph Vector: an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents.Represents each document by a dense vector which is trained to predict words in the document. Overcomes the weaknesses of the [Bag Of Words](/tag/bag_of_words) model (order of words, semantic of words) Many machine learning algorithms require the input to be represented as a fixed-length feature vector. When it comes to texts, one of the most common fixed-length features is bag-of-words. Despite their popularity, bag-of-words features have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, powerful, strong and Paris are equally distant. In this paper, we propose Paragraph Vector, an unsupervised algorithm that learns fixed-length feature representations from variable-length pieces of texts, such as sentences, paragraphs, and documents. Our algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that Paragraph Vectors outperform bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.|sl:tag|tag:tomas_mikolov -Transformers|skos:broader|Sequence-to-sequence learning -Java 1.8. Trained tagger models for English, Arabic, Chinese, French, German. The tagger can be retrained on any language, given POS-annotated training text for the language. Uses the Penn Treebank tag set. [NLTK interface](http://www.nltk.org/api/nltk.tag.html#module-nltk.tag.stanford) |skos:broader|or grammatical tagging, or word-category disambiguation: the process of marking up a word in a text as corresponding to a particular part of speech -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:arxiv_author|Ed H. Chi -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:graph_neural_networks -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:tag|tag:sebastian_ruder -Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation distilling structured knowledge from a differentiable path-based recommendation model. proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons Recently, the embedding-based recommendation models (e.g., matrix factorization and deep models) have been prevalent in both academia and industry due to their effectiveness and flexibility. However, they also have such intrinsic limitations as lacking explainability and suffering from data sparsity. In this paper, we propose an end-to-end joint learning framework to get around these limitations without introducing any extra overhead by distilling structured knowledge from a differentiable path-based recommendation model. Through extensive experiments, we show that our proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons.|sl:tag|tag:arxiv_doc -Formal knowledge representation language|skos:broader|Knowledge Representation -Nathan Rixham|skos:broader|SW guys (and girls) -Backtranslation|skos:broader|NLP techniques -Médicaments génériques|skos:broader|Médicaments -Embeddings in Information Retrieval|skos:broader|Embeddings -Representation Learning for NLP|skos:broader|NLP tasks / problems -Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge.... The model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones. a neural language model which learns to access information in a symbolic knowledge graph. This model builds on the recently-proposed [Entities as Experts](doc:2020/07/2004_07202_entities_as_expert) (EaE) language model (Févry et al., 2020), which extends the same transformer (Vaswani et al., 2017) architecture of BERT (Devlin et al., 2019) with an additional external memory for entities. After training EaE, the embedding associated with an entity will (ideally) capture information about the textual context in which that entity appears, and by inference, the entity’s semantic properties we include an additional memory called a fact memory, which encodes triples from a symbolic KB. This combination results in a neural language model which learns to access information in a the symbolic knowledge graph. TODO: read again IBM's [Span Selection Pre-training for Question Answering](doc:2019/09/_1909_04120_span_selection_pre) (an effort to avoid encoding general knowledge in the transformer network itself) Massive language models are the core of modern NLP modeling and have been shown to encode impressive amounts of commonsense and factual information. However, that knowledge exists only within the latent parameters of the model, inaccessible to inspection and interpretation, and even worse, factual information memorized from the training corpora is likely to become stale as the world changes. Knowledge stored as parameters will also inevitably exhibit all of the biases inherent in the source materials. To address these problems, we develop a neural language model that includes an explicit interface between symbolically interpretable factual information and subsymbolic neural knowledge. We show that this model dramatically improves performance on two knowledge-intensive question-answering tasks. More interestingly, the model can be updated without re-training by manipulating its symbolic representations. In particular this model allows us to add new facts and overwrite existing ones in ways that are not possible for earlier models.|sl:tag|tag:neural_memory -Nubie|skos:broader|Soudan -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Yunkai Zhang -Scalable Nearest Neighbor Search for Optimal Transport The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly popular similarity measure for rich data domains, such as images or text documents. This raises the necessity for fast nearest neighbor search with respect to this distance, a problem that poses a substantial computational bottleneck for various tasks on massive datasets. In this work, we study fast tree-based approximation algorithms for searching nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based technique, known as Quadtree, has been previously shown to obtain good results. We introduce a variant of this algorithm, called Flowtree, and formally prove it achieves asymptotically better accuracy. Our extensive experiments, on real-world text and image datasets, show that Flowtree improves over various baselines and existing methods in either running time or accuracy. In particular, its quality of approximation is in line with previous high-accuracy methods, while its running time is much faster.|sl:tag|tag:nearest_neighbor_search -paggr|skos:broader|Web of data -Actrice|skos:broader|Cinéma -In machine learning, unsupervised learning refers to the problem of trying to find hidden structure in unlabeled data|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -D3js|skos:broader|JavaScript librairies -Future of the web|skos:broader|Web -Mona Lisa|skos:broader|Leonardo da Vinci -Nova Spivak|skos:broader|SW guys (and girls) -Energie|skos:broader|Grands problèmes -Text feature extraction|skos:broader|Feature extraction -Wordnet|skos:broader|Semantic Networks -Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning Current advances in Artificial Intelligence and machine learning in general, and deep learning in particular have reached unprecedented impact not only across research communities, but also over popular media channels. However, concerns about interpretability and accountability of AI have been raised by influential thinkers. In spite of the recent impact of AI, several works have identified the need for principled knowledge representation and reasoning mechanisms integrated with deep learning-based systems to provide sound and explainable models for such systems. Neural-symbolic computing aims at integrating, as foreseen by Valiant, two most fundamental cognitive abilities: the ability to learn from the environment, and the ability to reason from what has been learned. Neural-symbolic computing has been an active topic of research for many years, reconciling the advantages of robust learning in neural networks and reasoning and interpretability of symbolic representation. In this paper, we survey recent accomplishments of neural-symbolic computing as a principled methodology for integrated machine learning and reasoning. We illustrate the effectiveness of the approach by outlining the main characteristics of the methodology: principled integration of neural learning with symbolic knowledge representation and reasoning allowing for the construction of explainable AI systems. The insights provided by neural-symbolic computing shed new light on the increasingly prominent need for interpretable and accountable AI systems.|sl:tag|tag:neural_symbolic_computing -Expérience scientifique|skos:broader|Science -Swing|skos:broader|Java -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:arxiv_firstAuthor|Jihyeok Kim -Prélèvements obligatoires|skos:broader|Economie -SemWeb China|skos:broader|Semantic Web -Suède|skos:broader|Europe -Installing WordPress|skos:broader|Installing apps -Livres audio|skos:broader|Livre -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:arxiv_author|Xiaojing Liu -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_author|Edouard Grave -ML and physics|skos:broader|Physique -GloVe|skos:broader|Word embeddings -The Case for Learned Index Structures we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs Indexes are models: a B-Tree-Index can be seen as a model to map a key to the position of a record within a sorted array, a Hash-Index as a model to map a key to a position of a record within an unsorted array, and a BitMap-Index as a model to indicate if a data record exists or not. In this exploratory research paper, we start from this premise and posit that all existing index structures can be replaced with other types of models, including deep-learning models, which we term learned indexes. The key idea is that a model can learn the sort order or structure of lookup keys and use this signal to effectively predict the position or existence of records. We theoretically analyze under which conditions learned indexes outperform traditional index structures and describe the main challenges in designing learned index structures. Our initial results show, that by using neural nets we are able to outperform cache-optimized B-Trees by up to 70% in speed while saving an order-of-magnitude in memory over several real-world data sets. More importantly though, we believe that the idea of replacing core components of a data management system through learned models has far reaching implications for future systems designs and that this work just provides a glimpse of what might be possible.|sl:tag|tag:nips_2017 -Semantic Web and OOP|skos:broader|Object Oriented Programming -Map-reduce|skos:broader|Big Data -Cosine similarity|skos:broader|Similarity queries -Still a Pain in the Neck: Evaluating Text Representations on Lexical Composition How well do contextualized word embeddings address lexical composition? They are good in recognizing meaning shift (\give in\ is different from \give\) but much worse with revealing implicit meaning (\hot tea\ is about temperature, \hot debate\ isn't). Building meaningful phrase representations is challenging because phrase meanings are not simply the sum of their constituent meanings. Lexical composition can shift the meanings of the constituent words and introduce implicit information. We tested a broad range of textual representations for their capacity to address these issues. We found that as expected, contextualized word representations perform better than static word embeddings, more so on detecting meaning shift than in recovering implicit information, in which their performance is still far from that of humans. Our evaluation suite, including 5 tasks related to lexical composition effects, can serve future research aiming to improve such representations.|sl:arxiv_firstAuthor|Vered Shwartz -Pre-training via Paraphrasing We introduce MARGE, a pre-trained sequence-to-sequence model learned with an unsupervised multi-lingual multi-document paraphrasing objective. MARGE provides an alternative to the dominant masked language modeling paradigm, where we self-supervise the reconstruction of target text by retrieving a set of related texts (in many languages) and conditioning on them to maximize the likelihood of generating the original. We show it is possible to jointly learn to do retrieval and reconstruction, given only a random initialization. The objective noisily captures aspects of paraphrase, translation, multi-document summarization, and information retrieval, allowing for strong zero-shot performance on several tasks. For example, with no additional task-specific training we achieve BLEU scores of up to 35.8 for document translation. We further show that fine-tuning gives strong performance on a range of discriminative and generative tasks in many languages, making MARGE the most generally applicable pre-training method to date.|sl:tag|tag:nlp_facebook -OWL|skos:broader|Reasoning -Ranked Entities in Search Results|skos:broader|Ranking (information retrieval) -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:tag|tag:relational_inductive_biases -Virtuoso|skos:broader|Relational Databases and the Semantic Web -Australie|skos:broader|Océanie -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:tag|tag:deep_learning -Firefox|skos:broader|Brouteur -Semantic Web Client Library|skos:broader|Freie Universität Berlin -Enseignement en Afrique|skos:broader|Education -Origines de l'homme|skos:broader|Histoire de la vie -RNN-LM|skos:broader|Language Modeling Statistical Language Model -Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. Modeling human language requires the ability to not only generate fluent text but also encode factual knowledge. However, traditional language models are only capable of remembering facts seen at training time, and often have difficulty recalling them. To address this, we introduce the knowledge graph language model (KGLM), a neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. We also introduce the Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata knowledge graph whose contents (roughly) match the popular WikiText-2 benchmark. In experiments, we demonstrate that the KGLM achieves significantly better performance than a strong baseline language model. We additionally compare different language model's ability to complete sentences requiring factual knowledge, showing that the KGLM outperforms even very large language models in generating facts.|sl:tag|tag:kd_mkb_biblio -fastai: A Layered API for Deep Learning Paper describing the fast.ai v2 API fastai is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. It aims to do both things without substantial compromises in ease of use, flexibility, or performance. This is possible thanks to a carefully layered architecture, which expresses common underlying patterns of many deep learning and data processing techniques in terms of decoupled abstractions. These abstractions can be expressed concisely and clearly by leveraging the dynamism of the underlying Python language and the flexibility of the PyTorch library. fastai includes: a new type dispatch system for Python along with a semantic type hierarchy for tensors; a GPU-optimized computer vision library which can be extended in pure Python; an optimizer which refactors out the common functionality of modern optimizers into two basic pieces, allowing optimization algorithms to be implemented in 4-5 lines of code; a novel 2-way callback system that can access any part of the data, model, or optimizer and change it at any point during training; a new data block API; and much more. We have used this library to successfully create a complete deep learning course, which we were able to write more quickly than using previous approaches, and the code was more clear. The library is already in wide use in research, industry, and teaching. NB: This paper covers fastai v2, which is currently in pre-release at http://dev.fast.ai/|sl:tag|tag:fast_ai -Shenzhen|skos:broader|Ville -Histropedia|skos:broader|Wikipedia -Semanlink todo|skos:broader|To do -Niger|skos:broader|Favoris -Néandertal|skos:broader|Paléontologie humaine -Representing Sentences as Low-Rank Subspaces We observe a simple geometry of sentences -- the word representations of a given sentence roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. A sentence of N words is a matrix (300, N) (if 300 is the dim of the word embeddings space). We take the eg. 4 (hyperparam) heaviest singular values - a subspace with dim 4 Similarity between docs: principal angle between the subspaces (reminiscent of cosine similarity) Sentences are important semantic units of natural language. A generic, distributional representation of sentences that can capture the latent semantics is beneficial to multiple downstream applications. We observe a simple geometry of sentences -- the word representations of a given sentence (on average 10.23 words in all SemEval datasets with a standard deviation 4.84) roughly lie in a low-rank subspace (roughly, rank 4). Motivated by this observation, we represent a sentence by the low-rank subspace spanned by its word vectors. Such an unsupervised representation is empirically validated via semantic textual similarity tasks on 19 different datasets, where it outperforms the sophisticated neural network models, including skip-thought vectors, by 15% on average.|sl:tag|tag:sentence_similarity -Coursera: Introduction to Data Science|skos:broader|Data science -Large scale distributed neural network training through online distillation we use codistillation to refer to distillation performed: 1. using the same architecture for all the models; 2. using the same dataset to train all the models; and 3. using the distillation loss during training before any model has fully converged. In general, we believe the quality gains of codistillation over well-tuned offline distillation will be minor in practice and the more interesting research direction is exploring codistillation as a distributed training algorithm Codistillation with the same data seems to be slightly better than the baseline, but codistillation using different data gets much better results. These results show that the codistilling models are indeed successfully transmitting useful information about different parts of the training data to each other. Related to [Deep mutual learning](doc:2020/05/1706_00384_deep_mutual_learni) paper Techniques such as ensembling and distillation promise model quality improvements when paired with almost any base model. However, due to increased test-time cost (for ensembles) and increased complexity of the training pipeline (for distillation), these techniques are challenging to use in industrial settings. In this paper we explore a variant of distillation which is relatively straightforward to use as it does not require a complicated multi-stage setup or many new hyperparameters. Our first claim is that online distillation enables us to use extra parallelism to fit very large datasets about twice as fast. Crucially, we can still speed up training even after we have already reached the point at which additional parallelism provides no benefit for synchronous or asynchronous stochastic gradient descent. Two neural networks trained on disjoint subsets of the data can share knowledge by encouraging each model to agree with the predictions the other model would have made. These predictions can come from a stale version of the other model so they can be safely computed using weights that only rarely get transmitted. Our second claim is that online distillation is a cost-effective way to make the exact predictions of a model dramatically more reproducible. We support our claims using experiments on the Criteo Display Ad Challenge dataset, ImageNet, and the largest to-date dataset used for neural language modeling, containing $6\\times 10^{11}$ tokens and based on the Common Crawl repository of web data.|sl:tag|tag:geoffrey_hinton -Unsupervised Learning of Sentence Embeddings using Compositional n-Gram Features The recent tremendous success of unsupervised word embeddings in a multitude of applications raises the obvious question if similar methods could be derived to improve embeddings (i.e. semantic representations) of word sequences as well. We present a simple but efficient unsupervised objective to train distributed representations of sentences. Our method outperforms the state-of-the-art unsupervised models on most benchmark tasks, highlighting the robustness of the produced general-purpose sentence embeddings.|sl:tag|tag:sif_embeddings -Ontology Mapping|skos:broader|Ontologies -Ministère de l'enseignement supérieur et de la recherche|skos:broader|Enseignement supérieur -MutualArt.com|skos:broader|Semantic Web : Business -Multiple KB|skos:broader|Knowledge Base -MIT|skos:broader|Universités américaines -AST workshop|skos:broader|Workshop -Nissan|skos:broader|Japon -Domain Knowledge + Deep Learning|skos:broader|Domain Knowledge in AI -Detecting Potential Topics In News Using BERT, CRF and Wikipedia For a news content distribution platform like Dailyhunt, Named Entity Recognition is a pivotal task for building better user recommendation and notification algorithms. Apart from identifying names, locations, organisations from the news for 13+ Indian languages and use them in algorithms, we also need to identify n-grams which do not necessarily fit in the definition of Named-Entity, yet they are important. For example, me too movement, beef ban, alwar mob lynching. In this exercise, given an English language text, we are trying to detect case-less n-grams which convey important information and can be used as topics and/or hashtags for a news. Model is built using Wikipedia titles data, private English news corpus and BERT-Multilingual pre-trained model, Bi-GRU and CRF architecture. It shows promising results when compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of F1 and especially Recall.|sl:arxiv_firstAuthor|Swapnil Ashok Jadhav -Data Warehouse|skos:broader|Enterprise Data -a theory of brain function created by Jeff Hawkins about mammalian neocortex. Role of the mammalian neocortex in matching sensory inputs to stored memory patterns, and how this process leads to predictions of what will happen in the future.|skos:broader|Machine learning focuses on prediction, based on known properties learned from the training data. Data mining (which is the analysis step of Knowledge Discovery in Databases) focuses on the discovery of (previously) unknown properties on the data. [Glossary (by google)](https://developers.google.com/machine-learning/glossary/) -Mac OS X|skos:broader|Apple -RDF editor|skos:broader|RDF -Nous vivons une époque moderne|skos:broader|Technologie -Tom Heath|skos:broader|Technical girls and guys -Suisse|skos:broader|Pays d'Europe -Linked Data / collaborative editing|skos:broader|Linked Data -2eme guerre mondiale|skos:broader|War -KGAT: Knowledge Graph Attention Network for Recommendation To provide more accurate, diverse, and explainable recommendation, it is compulsory to go beyond modeling user-item interactions and take side information into account. Traditional methods like factorization machine (FM) cast it as a supervised learning problem, which assumes each interaction as an independent instance with side information encoded. Due to the overlook of the relations among instances or items (e.g., the director of a movie is also an actor of another movie), these methods are insufficient to distill the collaborative signal from the collective behaviors of users. In this work, we investigate the utility of knowledge graph (KG), which breaks down the independent interaction assumption by linking items with their attributes. We argue that in such a hybrid structure of KG and user-item graph, high-order relations --- which connect two items with one or multiple linked attributes --- are an essential factor for successful recommendation. We propose a new method named Knowledge Graph Attention Network (KGAT) which explicitly models the high-order connectivities in KG in an end-to-end fashion. It recursively propagates the embeddings from a node's neighbors (which can be users, items, or attributes) to refine the node's embedding, and employs an attention mechanism to discriminate the importance of the neighbors. Our KGAT is conceptually advantageous to existing KG-based recommendation methods, which either exploit high-order relations by extracting paths or implicitly modeling them with regularization. Empirical results on three public benchmarks show that KGAT significantly outperforms state-of-the-art methods like Neural FM and RippleNet. Further studies verify the efficacy of embedding propagation for high-order relation modeling and the interpretability benefits brought by the attention mechanism.|sl:arxiv_author|Yixin Cao -Histoire de France|skos:broader|France -TabFact: A Large-scale Dataset for Table-based Fact Verification fact verification given semi-structured data as evidence The problem of verifying whether a textual hypothesis holds based on the given evidence, also known as fact verification, plays an important role in the study of natural language understanding and semantic representation. However, existing studies are mainly restricted to dealing with unstructured evidence (e.g., natural language sentences and documents, news, etc), while verification under structured evidence, such as tables, graphs, and databases, remains under-explored. This paper specifically aims to study the fact verification given semi-structured data as evidence. To this end, we construct a large-scale dataset called TabFact with 16k Wikipedia tables as the evidence for 118k human-annotated natural language statements, which are labeled as either ENTAILED or REFUTED. TabFact is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning. To address these reasoning challenges, we design two different models: Table-BERT and Latent Program Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language model to encode the linearized tables and statements into continuous vectors for verification. LPA parses statements into programs and executes them against the tables to obtain the returned binary value for verification. Both methods achieve similar accuracy but still lag far behind human performance. We also perform a comprehensive analysis to demonstrate great future opportunities. The data and code of the dataset are provided in \\url{https://github.com/wenhuchen/Table-Fact-Checking}.|sl:arxiv_author|Jianshu Chen -Car Options Ontology|skos:broader|Volkswagen -Stanford NER|skos:broader|NLP@Stanford -Number of neurons|skos:broader|Neurones -Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia In this paper, we describe an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, learns complementary entity representations from their topology and content, and combines them with a lightweight learning-to-rank approach to recommend related entities on Wikipedia. Through offline and online evaluations, we show that the resulting embeddings and recommendations perform well in terms of quality and user engagement. Balancing simplicity and quality, this framework provides default entity recommendations for English and other languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of.|sl:arxiv_author|Nicolas Torzec -RDF Working Group|skos:broader|W3C -Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) Applied Data Scientists throughout various industries are commonly faced with the challenging task of encoding high-cardinality categorical features into digestible inputs for machine learning algorithms. This paper describes a Bayesian encoding technique developed for WeWork's lead scoring engine which outputs the probability of a person touring one of our office spaces based on interaction, enrichment, and geospatial data. We present a paradigm for ensemble modeling which mitigates the need to build complicated preprocessing and encoding schemes for categorical variables. In particular, domain-specific conjugate Bayesian models are employed as base learners for features in a stacked ensemble model. For each column of a categorical feature matrix we fit a problem-specific prior distribution, for example, the Beta distribution for a binary classification problem. In order to analytically derive the moments of the posterior distribution, we update the prior with the conjugate likelihood of the corresponding target variable for each unique value of the given categorical feature. This function of column and value encodes the categorical feature matrix so that the final learner in the ensemble model ingests low-dimensional numerical input. Experimental results on both curated and real world datasets demonstrate impressive accuracy and computational efficiency on a variety of problem archetypes. Particularly, for the lead scoring engine at WeWork -- where some categorical features have as many as 300,000 levels -- we have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate Bayesian model encoding.|sl:tag|tag:arxiv_doc -Pêche|skos:broader|Economie -Histoire de l'Afrique|skos:broader|Histoire -Grand Homme|skos:broader|Homme célèbre -Random forest|skos:broader|Ensemble learning -Yahoo - My Web 2.0|skos:broader|Social Networks -Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. Important because - BERT ist unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. - simple methods such as using the CLS token give low quality sentence embeddings However, the purpose of SBERT sentence embeddings are not to be used for transfer learning for other tasks. [Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers) BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.|sl:tag|tag:nearest_neighbor_search -Agriculture française|skos:broader|Economie française -Jena TDB|skos:broader|Jena and database -Aster Aweke|skos:broader|Musicien -African land grab|skos:broader|Accaparement des terres agricoles -Advances in Pre-Training Distributed Word Representations we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.|sl:arxiv_author|Christian Puhrsch -TextBlob|skos:broader|Python-NLP -Unsupervised Transfer Learning for Spoken Language Understanding in Intelligent Agents We apply ELMo, ULMFiT (unsupervised transfer) with supervised transfer to reduce labeled data required for launching domains in Alexa by 10-15x User interaction with voice-powered agents generates large amounts of unlabeled utterances. In this paper, we explore techniques to efficiently transfer the knowledge from these unlabeled utterances to improve model performance on Spoken Language Understanding (SLU) tasks. We use Embeddings from Language Model (ELMo) to take advantage of unlabeled data by learning contextualized word representations. Additionally, we propose ELMo-Light (ELMoL), a faster and simpler unsupervised pre-training method for SLU. Our findings suggest unsupervised pre-training on a large corpora of unlabeled utterances leads to significantly better SLU performance compared to training from scratch and it can even outperform conventional supervised transfer. Additionally, we show that the gains from unsupervised transfer techniques can be further improved by supervised transfer. The improvements are more pronounced in low resource settings and when using only 1000 labeled in-domain samples, our techniques match the performance of training from scratch on 10-15x more labeled in-domain data.|sl:arxiv_author|Anuj Goyal -Néolithique|skos:broader|Préhistoire -Mathematica|skos:broader|Stephen Wolfram -Online Learning|skos:broader|Education -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:allen_institute_for_ai_a2i -Graph neural networks|skos:broader|Graphs+Machine Learning -Jena TDB|skos:broader|SPARQL AND Jena -public-vocabs@w3.org|skos:broader|Mailing list -LinkNBed: Multi-Graph Representation Learning with Entity Linkage a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure We posit that combining graph alignment task with deep representation learning across multi-relational graphs has potential to induce a synergistic effect on both tasks Knowledge graphs have emerged as an important model for studying complex multi-relational data. This has given rise to the construction of numerous large scale but incomplete knowledge graphs encoding information extracted from various resources. An effective and scalable approach to jointly learn over multiple graphs and eventually construct a unified graph is a crucial next step for the success of knowledge-based inference for many downstream applications. To this end, we propose LinkNBed, a deep relational learning framework that learns entity and relationship representations across multiple graphs. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure. Experiments on link prediction and entity linkage demonstrate substantial improvements over the state-of-the-art relational learning approaches.|sl:tag|tag:ai_amazon -fps @ LDOW 2013|skos:broader|LDOW2013 -Sarkozy et la recherche|skos:broader|Sarkozy -WWW 2012|skos:broader|TheWebConf -Âge du bronze|skos:broader|Antiquité -LHC|skos:broader|Physique des particules -Taxonomies|skos:broader|Thesaurus & Taxonomies -A Primer in BERTology: What we know about how BERT works (article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) Transformer-based models are now widely used in NLP, but we still do not understand a lot about their inner workings. This paper describes what is known to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 analysis studies. We also provide an overview of the proposed modifications to the model and its training regime. We then outline the directions for further research.|sl:arxiv_author|Anna Rumshisky -Antiquité romaine|skos:broader|Antiquité -Job matching|skos:broader|NLP + Human Resources -Crise des banlieues|skos:broader|Société française Société française -Merisier|skos:broader|Arbres -Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.|sl:tag|tag:arxiv_doc -esa|skos:broader|Union européenne -UNESCO|skos:broader|Institutions internationales -Common Web Language|skos:broader|W3C -Hierarchical tags|skos:broader|Semanlink related -NLP: short texts|skos:broader|NLP tasks / problems -Distilling the Knowledge in a Neural Network a different kind of training, which we call “distillation” to transfer the knowledge from the cumbersome model to a small model that is more suitable for deployment Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST. A very simple way to improve the performance of almost any machine learning algorithm is to train many different models on the same data and then to average their predictions. Unfortunately, making predictions using a whole ensemble of models is cumbersome and may be too computationally expensive to allow deployment to a large number of users, especially if the individual models are large neural nets. Caruana and his collaborators have shown that it is possible to compress the knowledge in an ensemble into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST and we show that we can significantly improve the acoustic model of a heavily used commercial system by distilling the knowledge in an ensemble of models into a single model. We also introduce a new type of ensemble composed of one or more full models and many specialist models which learn to distinguish fine-grained classes that the full models confuse. Unlike a mixture of experts, these specialist models can be trained rapidly and in parallel.|sl:tag|tag:geoffrey_hinton -Cassandra|skos:broader|Database -Evolutionary computation|skos:broader|Artificial Intelligence -Enseignement en Afrique|skos:broader|Afrique -Ex URSS URSS|skos:broader|Communisme -PCA is a statistical procedure that converts a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. - PCA is based on extracting the axes on which the data shows the highest variability. PCA can be done by eigenvalue decomposition of a data covariance matrix or singular value decomposition of a data matrix, usually after mean centering and normalizing the data matrix for each attribute|skos:broader|techniques (mostly unsupervised learning algorithms) that learn a feature: a transformation of raw data input to a representation that can be effectively exploited in machine learning tasks (= aim at discovering better representations of the inputs provided during training. Classical examples include principal components analysis and cluster analysis. Representation learning algorithms often attempt to preserve the information in their input but transform it in a way that makes it useful) -Everest|skos:broader|Himalaya -JSON-LD|skos:broader|RDF-in-JSON -Universal Sentence Encoder models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task mixes an unsupervised task using a large corpus together with the supervised SNLI task, leveraging the [#Transformer](/tag/attention_is_all_you_need) architecture We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.|sl:arxiv_author|Nan Hua -Obélisque|skos:broader|Architecture -Sarkozy : immigration|skos:broader|Sarkozy et extrème droite -Wembedder: Wikidata entity embedding web service web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk I present a web service for querying an embedding of entities in the Wikidata knowledge graph. The embedding is trained on the Wikidata dump using Gensim's Word2Vec implementation and a simple graph walk. A REST API is implemented. Together with the Wikidata API the web service exposes a multilingual resource for over 600'000 Wikidata items and properties.|sl:tag|tag:arxiv_doc -RDF blank nodes|skos:broader|RDF dev -SPARQL Tips|skos:broader|Tips -Afripedia|skos:broader|Wikipedia -Categorical Metadata Representation for Customized Text Classification We observe that current representation methods for categorical metadata... are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category The performance of text classification has improved tremendously using intelligently engineered neural-based models, especially those injecting categorical metadata as additional information, e.g., using user/product information for sentiment classification. These information have been used to modify parts of the model (e.g., word embeddings, attention mechanisms) such that results can be customized according to the metadata. We observe that current representation methods for categorical metadata, which are devised for human consumption, are not as effective as claimed in popular classification methods, outperformed even by simple concatenation of categorical features in the final layer of the sentence encoder. We conjecture that categorical features are harder to represent for machine use, as available context only indirectly describes the category, and even such context is often scarce (for tail category). To this end, we propose to use basis vectors to effectively incorporate categorical metadata on various parts of a neural-based model. This additionally decreases the number of parameters dramatically, especially when the number of categorical features is large. Extensive experiments on various datasets with different properties are performed and show that through our method, we can represent categorical metadata more effectively to customize parts of the model, including unexplored ones, and increase the performance of the model greatly.|sl:tag|tag:arxiv_doc -StarSpace|skos:broader|AI@Facebook -Gilles Lepin|skos:broader|Technical girls and guys -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:arxiv_author|Jose Camacho-Collados -Product Types Ontology|skos:broader|GoodRelations -Conversational AI|skos:broader|NLP: use cases -Neocortex|skos:broader|Brain -Learning by Abstraction: The Neural State Machine Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)... We introduce the Neural State Machine, seeking to bridge the gap between the neural and symbolic views of AI and integrate their complementary strengths for the task of visual reasoning. Given an image, we first predict a probabilistic graph that represents its underlying semantics and serves as a structured world model. Then, we perform sequential reasoning over the graph, iteratively traversing its nodes to answer a given question or draw a new inference. In contrast to most neural architectures that are designed to closely interact with the raw sensory data, our model operates instead in an abstract latent space, by transforming both the visual and linguistic modalities into semantic concept-based representations, thereby achieving enhanced transparency and modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets that involve compositionality, multi-step inference and diverse reasoning skills, achieving state-of-the-art results in both cases. We provide further experiments that illustrate the model's strong generalization capacity across multiple dimensions, including novel compositions of concepts, changes in the answer distribution, and unseen linguistic structures, demonstrating the qualities and efficacy of our approach.|sl:arxiv_firstAuthor|Drew A. Hudson -Attention Is All You Need The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.|sl:arxiv_author|Aidan N. Gomez -Stanbol|skos:broader|apache.org -A Hierarchical Multi-task Approach for Learning Embeddings from Semantic Tasks [Blog post](https://medium.com/dair-ai/hmtl-multi-task-learning-for-state-of-the-art-nlp-245572bbb601), [GitHub repo](https://github.com/huggingface/hmtl) Much effort has been devoted to evaluate whether multi-task learning can be leveraged to learn rich representations that can be used in various Natural Language Processing (NLP) down-stream applications. However, there is still a lack of understanding of the settings in which multi-task learning has a significant effect. In this work, we introduce a hierarchical model trained in a multi-task learning setup on a set of carefully selected semantic tasks. The model is trained in a hierarchical fashion to introduce an inductive bias by supervising a set of low level tasks at the bottom layers of the model and more complex tasks at the top layers of the model. This model achieves state-of-the-art results on a number of tasks, namely Named Entity Recognition, Entity Mention Detection and Relation Extraction without hand-engineered features or external NLP tools like syntactic parsers. The hierarchical training supervision induces a set of shared semantic representations at lower layers of the model. We show that as we move from the bottom to the top layers of the model, the hidden states of the layers tend to represent more complex semantic information.|sl:tag|tag:multi_task_learning -OWLED 2007 AND fps|skos:broader|fps -Asie|skos:broader|Géographie -Knowledge Graphs Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After a general introduction, we motivate and contrast various graph-based data models and query languages that are used for knowledge graphs. We discuss the roles of schema, identity, and context in knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We summarise methods for the creation, enrichment, quality assessment, refinement, and publication of knowledge graphs. We provide an overview of prominent open knowledge graphs and enterprise knowledge graphs, their applications, and how they use the aforementioned techniques. We conclude with high-level future research directions for knowledge graphs.|sl:arxiv_author|Roberto Navigli -Deep Learning for Symbolic Mathematics Neural networks have a reputation for being better at solving statistical or approximate problems than at performing calculations or working with symbolic data. In this paper, we show that they can be surprisingly good at more elaborated tasks in mathematics, such as symbolic integration and solving differential equations. We propose a syntax for representing mathematical problems, and methods for generating large datasets that can be used to train sequence-to-sequence models. We achieve results that outperform commercial Computer Algebra Systems such as Matlab or Mathematica.|sl:arxiv_firstAuthor|Guillaume Lample -Word2Bits - Quantized Word Vectors We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer Word vectors require significant amounts of memory and storage, posing issues to resource limited devices like mobile phones and GPUs. We show that high quality quantized word vectors using 1-2 bits per parameter can be learned by introducing a quantization function into Word2Vec. We furthermore show that training with the quantization function acts as a regularizer. We train word vectors on English Wikipedia (2017) and evaluate them on standard word similarity and analogy tasks and on question answering (SQuAD). Our quantized word vectors not only take 8-16x less space than full precision (32 bit) word vectors but also outperform them on word similarity tasks and question answering.|sl:tag|tag:arxiv_doc -Hugging Face|skos:broader|NLP Teams -Jeremy Carroll|skos:broader|SW guys (and girls) -Hello Edge: Keyword Spotting on Microcontrollers Keyword spotting (KWS) is a critical component for enabling speech based user interactions on smart devices. It requires real-time response and high accuracy for good user experience. Recently, neural networks have become an attractive choice for KWS architecture because of their superior accuracy compared to traditional speech processing algorithms. Due to its always-on nature, KWS application has highly constrained power budget and typically runs on tiny microcontrollers with limited memory and compute capability. The design of neural network architecture for KWS must consider these constraints. In this work, we perform neural network architecture evaluation and exploration for running KWS on resource-constrained microcontrollers. We train various neural network architectures for keyword spotting published in literature to compare their accuracy and memory/compute requirements. We show that it is possible to optimize these neural network architectures to fit within the memory and compute constraints of microcontrollers without sacrificing accuracy. We further explore the depthwise separable convolutional neural network (DS-CNN) and compare it against other neural network architectures. DS-CNN achieves an accuracy of 95.4%, which is ~10% higher than the DNN model with similar number of parameters.|sl:tag|tag:keyword_spotting -JPL|skos:broader|NASA -Speech-to-Text|skos:broader|Sequence-to-sequence learning -Transfer Learning for Sequence Labeling Using Source Model and Target Data use-case ex: NER when the target data contains new categories In this paper, we propose an approach for transferring the knowledge of a neural model for sequence labeling, learned from the source domain, to a new model trained on a target domain, where new label categories appear. Our transfer learning (TL) techniques enable to adapt the source model using the target data and new categories, without accessing to the source data. Our solution consists in adding new neurons in the output layer of the target model and transferring parameters from the source model, which are then fine-tuned with the target data. Additionally, we propose a neural adapter to learn the difference between the source and the target label distribution, which provides additional important information to the target model. Our experiments on Named Entity Recognition show that (i) the learned knowledge in the source model can be effectively transferred when the target data contains new categories and (ii) our neural adapter further improves such transfer.|sl:arxiv_author|Lingzhen Chen -Span Selection Pre-training for Question Answering a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself Current transformer architectures store general knowledge - large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. Span selection as an additional auxiliary task: the query is a sentence drawn from a corpus with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is relevant as determined by a BM25 search, and answer-bearing (containing the answer term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage using language understanding. We hope to progress to a model of general purpose language modeling that uses an indexed long term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers. BERT (Bidirectional Encoder Representations from Transformers) and related pre-trained Transformers have provided large gains across many language understanding tasks, achieving a new state-of-the-art (SOTA). BERT is pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence Prediction. In this paper we introduce a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself. We find significant and consistent improvements over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and paraphrasing datasets. Specifically, our proposed model has strong empirical evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 points and supporting fact prediction by 1 F1 point. Moreover, we show that our pre-training approach is particularly effective when training data is limited, improving the learning curve by a large amount.|sl:arxiv_author|Michael Glass -Mental typewriter|skos:broader|Thought alone controlled device -Pablo Neruda|skos:broader|Chili -Regroupement familial|skos:broader|Immigration familiale -Long documents|skos:broader|NLP tasks / problems -Norvège|skos:broader|Scandinavie -Bat|skos:broader|Animal -Graph Convolution for Multimodal Information Extraction from Visually Rich Documents Visually rich documents (VRDs) are ubiquitous in daily business and life. Examples are purchase receipts, insurance policy documents, custom declaration forms and so on. In VRDs, visual and layout information is critical for document understanding, and texts in such documents cannot be serialized into the one-dimensional sequence without losing information. Classic information extraction models such as BiLSTM-CRF typically operate on text sequences and do not incorporate visual features. In this paper, we introduce a graph convolution based model to combine textual and visual information presented in VRDs. Graph embeddings are trained to summarize the context of a text segment in the document, and further combined with text embeddings for entity extraction. Extensive experiments have been conducted to show that our method outperforms BiLSTM-CRF baselines by significant margins, on two real-world datasets. Additionally, ablation studies are also performed to evaluate the effectiveness of each component of our model.|sl:arxiv_author|Qiong Zhang -Patti Smith|skos:broader|Musicien -Brain-Machine Interface|skos:broader|Neuroscience -CIMBA|skos:broader|Microblogs -sindice|skos:broader|Semantic Web : Tools -Tutorial|skos:broader|Dev -Politique monétaire|skos:broader|Economie -NOSQL vs SQL|skos:broader|NOSQL -Logic and semantic web|skos:broader|Logic -OSEMA 2011|skos:broader|Workshop -Facebook Graph Search|skos:broader|Facebook -SOAP|skos:broader|Web Services -Large-scale Multi-label Learning with Missing Labels The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.|sl:arxiv_author|Hsiang-Fu Yu -Towards a Seamless Integration of Word Senses into Downstream NLP Applications Lexical ambiguity can impede NLP systems from accurate understanding of semantics. Despite its potential benefits, the integration of sense-level information into NLP systems has remained understudied. By incorporating a novel disambiguation algorithm into a state-of-the-art classification model, we create a pipeline to integrate sense-level information into downstream NLP applications. We show that a simple disambiguation of the input text can lead to consistent performance improvement on multiple topic categorization and polarity detection datasets, particularly when the fine granularity of the underlying sense inventory is reduced and the document is sufficiently large. Our results also point to the need for sense representation research to focus more on in vivo evaluations which target the performance in downstream NLP applications rather than artificial benchmarks.|sl:tag|tag:sense_embeddings -t-SNE|skos:broader|Data visualisation -Knowledge Graph Embeddings and Explainable AI survey of - the state-of-the-art in the field of knowledge graph embeddings - methods for explaining predictions obtained via knowledge graph embeddings. Knowledge graph embeddings are now a widely adopted approach to knowledge representation in which entities and relationships are embedded in vector spaces. In this chapter, we introduce the reader to the concept of knowledge graph embeddings by explaining what they are, how they can be generated and how they can be evaluated. We summarize the state-of-the-art in this field by describing the approaches that have been introduced to represent knowledge in the vector space. In relation to knowledge representation, we consider the problem of explainability, and discuss models and methods for explaining predictions obtained via knowledge graph embeddings.|sl:arxiv_firstAuthor|Federico Bianchi -Grève du sexe|skos:broader|Grève -Neural Architectures for Named Entity Recognition Neural architectures for NER that use no language-specific resources or features beyond a small amount of supervised training data and unlabeled corpora. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora State-of-the-art named entity recognition systems rely heavily on hand-crafted features and domain-specific knowledge in order to learn effectively from the small, supervised training corpora that are available. In this paper, we introduce two new neural architectures---one based on bidirectional LSTMs and conditional random fields, and the other that constructs and labels segments using a transition-based approach inspired by shift-reduce parsers. Our models rely on two sources of information about words: character-based word representations learned from the supervised corpus and unsupervised word representations learned from unannotated corpora. Our models obtain state-of-the-art performance in NER in four languages without resorting to any language-specific knowledge or resources such as gazetteers.|sl:arxiv_author|Miguel Ballesteros -Sursauts gamma|skos:broader|Rayons cosmiques -Describing a Knowledge Base We aim to automatically generate natural language descriptions about an input structured knowledge base (KB). We build our generation framework based on a pointer network which can copy facts from the input KB, and add two attention mechanisms: (i) slot-aware attention to capture the association between a slot type and its corresponding slot value; and (ii) a new \\emph{table position self-attention} to capture the inter-dependencies among related slots. For evaluation, besides standard metrics including BLEU, METEOR, and ROUGE, we propose a KB reconstruction based metric by extracting a KB from the generation output and comparing it with the input KB. We also create a new data set which includes 106,216 pairs of structured KBs and their corresponding natural language descriptions for two distinct entity types. Experiments show that our approach significantly outperforms state-of-the-art methods. The reconstructed KB achieves 68.8% - 72.6% F-score.|sl:arxiv_author|Zhiying Jiang -GloVe|skos:broader|NLP tools -Argentine|skos:broader|Amérique du sud -\Post-Vérité\|skos:broader|Vérité -ACL 2020|skos:broader|ACL -Retrofitting Word Vectors to Semantic Lexicons Method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Graph-based learning technique for using lexical relational resources to obtain higher quality semantic vectors, which we call “retrofitting.” Retrofitting is applied as a post-processing step by running belief propagation on a graph constructed from lexicon-derived relational information to update word vectors. This allows retrofitting to be used on pre-trained word vectors obtained using any vector training model. [github](https://github.com/mfaruqui/retrofitting) Vector space word representations are learned from distributional information of words in large corpora. Although such statistics are semantically informative, they disregard the valuable information that is contained in semantic lexicons such as WordNet, FrameNet, and the Paraphrase Database. This paper proposes a method for refining vector space representations using relational information from semantic lexicons by encouraging linked words to have similar vector representations, and it makes no assumptions about how the input vectors were constructed. Evaluated on a battery of standard lexical semantic evaluation tasks in several languages, we obtain substantial improvements starting with a variety of word vector models. Our refinement method outperforms prior techniques for incorporating semantic lexicons into the word vector training algorithms.|sl:arxiv_author|Noah A. Smith -AWS|skos:broader|Amazon -Pablo Neruda|skos:broader|Poète -DeepType: Multilingual Entity Linking by Neural Type System Evolution The wealth of structured (e.g. Wikidata) and unstructured data about the world available today presents an incredible opportunity for tomorrow's Artificial Intelligence. So far, integration of these two different modalities is a difficult process, involving many decisions concerning how best to represent the information so that it will be captured or useful, and hand-labeling large amounts of data. DeepType overcomes this challenge by explicitly integrating symbolic information into the reasoning process of a neural network with a type system. First we construct a type system, and second, we use it to constrain the outputs of a neural network to respect the symbolic structure. We achieve this by reformulating the design problem into a mixed integer problem: create a type system and subsequently train a neural network with it. In this reformulation discrete variables select which parent-child relations from an ontology are types within the type system, while continuous variables control a classifier fit to the type system. The original problem cannot be solved exactly, so we propose a 2-step algorithm: 1) heuristic search or stochastic optimization over discrete variables that define a type system informed by an Oracle and a Learnability heuristic, 2) gradient descent to fit classifier parameters. We apply DeepType to the problem of Entity Linking on three standard datasets (i.e. WikiDisamb30, CoNLL (YAGO), TAC KBP 2010) and find that it outperforms all existing solutions by a wide margin, including approaches that rely on a human-designed type system or recent deep learning-based entity embeddings, while explicitly using symbolic information lets it integrate new entities without retraining.|sl:tag|tag:arxiv_doc -OKFN Datahub|skos:broader|Open Data -Representation learning for very short texts using weighted word embedding aggregation A method based on word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. a href=https://github.com/cedricdeboom/RepresentationLearningGithub/a (hmm...) (python code) Short text messages such as tweets are very noisy and sparse in their use of vocabulary. Traditional textual representations, such as tf-idf, have difficulty grasping the semantic meaning of such texts, which is important in applications such as event detection, opinion mining, news recommendation, etc. We constructed a method based on semantic word embeddings and frequency information to arrive at low-dimensional representations for short texts designed to capture semantic similarity. For this purpose we designed a weight-based model and a learning procedure based on a novel median-based loss function. This paper discusses the details of our model and the optimization methods, together with the experimental results on both Wikipedia and Twitter data. We find that our method outperforms the baseline approaches in the experiments, and that it generalizes well on different word embeddings without retraining. Our method is therefore capable of retaining most of the semantic information in the text, and is applicable out-of-the-box.|sl:tag|tag:tf_idf -The information bottleneck method We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y. That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . (from the intro) : how to define meaningful / relevant information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) -leads to consider statistical and information theoretic principles as almost irrelevant for the question of meaning. In contrast, we argue here that information theory, in particular lossy source compression, provides a natural quantitative approach to the question of “relevant information.” Specifically, we formulate a variational principle for the extraction or efficient representation of relevant information. We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.|sl:tag|tag:naftali_tishby -RotatE|skos:broader|Knowledge Graph Completion -Bill Joy|skos:broader|Technical girls and guys -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Kelsey Allen -BERT-of-Theseus: Compressing BERT by Progressive Module Replacing approach to compress BERT by progressive module replacing. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter [Github](https://github.com/JetRunner/BERT-of-Theseus) In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models, and smooths the training process. Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter, liberating human effort from hyper-parameter tuning. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.|sl:tag|tag:nlp_microsoft -Variational Inference: A Review for Statisticians One of the core problems of modern statistics is to approximate difficult-to-compute probability densities. This problem is especially important in Bayesian statistics, which frames all inference about unknown quantities as a calculation involving the posterior density. In this paper, we review variational inference (VI), a method from machine learning that approximates probability densities through optimization. VI has been used in many applications and tends to be faster than classical methods, such as Markov chain Monte Carlo sampling. The idea behind VI is to first posit a family of densities and then to find the member of that family which is close to the target. Closeness is measured by Kullback-Leibler divergence. We review the ideas behind mean-field variational inference, discuss the special case of VI applied to exponential family models, present a full example with a Bayesian mixture of Gaussians, and derive a variant that uses stochastic optimization to scale up to massive data. We discuss modern research in VI and highlight important open problems. VI is powerful, but it is not yet well understood. Our hope in writing this paper is to catalyze statistical research on this class of algorithms.|sl:arxiv_firstAuthor|David M. Blei -Wiki service|skos:broader|Wiki -Machine Learning Basics|skos:broader|Pour les nuls -Bringing Light Into the Dark: A Large-scale Evaluation of Knowledge Graph Embedding Models Under a Unified Framework The heterogeneity in recently published knowledge graph embedding models' implementations, training, and evaluation has made fair and thorough comparisons difficult. In order to assess the reproducibility of previously published results, we re-implemented and evaluated 19 interaction models in the PyKEEN software package. Here, we outline which results could be reproduced with their reported hyper-parameters, which could only be reproduced with alternate hyper-parameters, and which could not be reproduced at all as well as provide insight as to why this might be the case. We then performed a large-scale benchmarking on four datasets with several thousands of experiments and 21,246 GPU hours of computation time. We present insights gained as to best practices, best configurations for each model, and where improvements could be made over previously published best configurations. Our results highlight that the combination of model architecture, training approach, loss function, and the explicit modeling of inverse relations is crucial for a model's performances, and not only determined by the model architecture. We provide evidence that several architectures can obtain results competitive to the state-of-the-art when configured carefully. We have made all code, experimental configurations, results, and analyses that lead to our interpretations available at https://github.com/pykeen/pykeen and https://github.com/pykeen/benchmarking|sl:arxiv_author|Mikhail Galkin -Explainable Deep Learning: A Field Guide for the Uninitiated Deep neural network (DNN) is an indispensable machine learning tool for achieving human-level performance on many learning tasks. Yet, due to its black-box nature, it is inherently difficult to understand which aspects of the input data drive the decisions of the network. There are various real-world scenarios in which humans need to make actionable decisions based on the output DNNs. Such decision support systems can be found in critical domains, such as legislation, law enforcement, etc. It is important that the humans making high-level decisions can be sure that the DNN decisions are driven by combinations of data features that are appropriate in the context of the deployment of the decision support system and that the decisions made are legally or ethically defensible. Due to the incredible pace at which DNN technology is being developed, the development of new methods and studies on explaining the decision-making process of DNNs has blossomed into an active research field. A practitioner beginning to study explainable deep learning may be intimidated by the plethora of orthogonal directions the field is taking. This complexity is further exacerbated by the general confusion that exists in defining what it means to be able to explain the actions of a deep learning system and to evaluate a system's ability to explain. To alleviate this problem, this article offers a field guide to deep learning explainability for those uninitiated in the field. The field guide: i) Discusses the traits of a deep learning system that researchers enhance in explainability research, ii) places explainability in the context of other related deep learning research areas, and iii) introduces three simple dimensions defining the space of foundational methods that contribute to explainable deep learning. The guide is designed as an easy-to-digest starting point for those just embarking in the field.|sl:arxiv_author|Derek Doran -NEPOMUK|skos:broader|Semantic Desktop -techniques that make use of the spectrum (eigenvalues) of the similarity matrix of the data to perform dimensionality reduction before clustering in fewer dimensions.|skos:broader|process of reducing the number of random variables under consideration. Can be divided into feature selection and feature extraction. -Concept Bottleneck Models We seek to learn models that we can interact with using high-level concepts... We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction... These models allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time. We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like the existence of bone spurs, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these \\emph{concept bottleneck models} by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (bone spurs) or bird attributes (wing color). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.|sl:arxiv_author|Thao Nguyen -Sommet de Copenhague|skos:broader|Climate crisis -A Call for More Rigor in Unsupervised Cross-lingual Learning a scenario without any parallel data and abundant monolingual data is unrealistic in practice We review motivations, definition, approaches, and methodology for unsupervised cross-lingual learning and call for a more rigorous position in each of them. An existing rationale for such research is based on the lack of parallel data for many of the world's languages. However, we argue that a scenario without any parallel data and abundant monolingual data is unrealistic in practice. We also discuss different training signals that have been used in previous work, which depart from the pure unsupervised setting. We then describe common methodological issues in tuning and evaluation of unsupervised cross-lingual models and present best practices. Finally, we provide a unified outlook for different types of research in this area (i.e., cross-lingual word embeddings, deep multilingual pretraining, and unsupervised machine translation) and argue for comparable evaluation of these models.|sl:tag|tag:ml_evaluation -NLP as a service|skos:broader|NLP tools -Relational inductive biases, deep learning, and graph networks generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI Here we explore how to improve modern AI's capacity for combinatorial generalization by biasing learning towards structured representations and computations, and in particular, systems that operate on graphs. Artificial intelligence (AI) has undergone a renaissance recently, making major progress in key domains such as vision, language, control, and decision-making. This has been due, in part, to cheap data and cheap compute resources, which have fit the natural strengths of deep learning. However, many defining characteristics of human intelligence, which developed under much different pressures, remain out of reach for current approaches. In particular, generalizing beyond one's experiences--a hallmark of human intelligence from infancy--remains a formidable challenge for modern AI. The following is part position paper, part review, and part unification. We argue that combinatorial generalization must be a top priority for AI to achieve human-like abilities, and that structured representations and computations are key to realizing this objective. Just as biology uses nature and nurture cooperatively, we reject the false choice between hand-engineering and end-to-end learning, and instead advocate for an approach which benefits from their complementary strengths. We explore how using relational inductive biases within deep learning architectures can facilitate learning about entities, relations, and rules for composing them. We present a new building block for the AI toolkit with a strong relational inductive bias--the graph network--which generalizes and extends various approaches for neural networks that operate on graphs, and provides a straightforward interface for manipulating structured knowledge and producing structured behaviors. We discuss how graph networks can support relational reasoning and combinatorial generalization, laying the foundation for more sophisticated, interpretable, and flexible patterns of reasoning. As a companion to this paper, we have released an open-source software library for building graph networks, with demonstrations of how to use them in practice.|sl:arxiv_author|Mateusz Malinowski -Jean Rohmer|skos:broader|Technical girls and guys -SEO|skos:broader|Search Engines -CoKE: Contextualized Knowledge Graph Embedding A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations.. [Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE) Knowledge graph embedding, which projects symbolic entities and relations into continuous vector spaces, is gaining increasing attention. Previous methods allow a single static embedding for each entity or relation, ignoring their intrinsic contextual nature, i.e., entities and relations may appear in different graph contexts, and accordingly, exhibit different properties. This work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm that takes into account such contextual nature, and learns dynamic, flexible, and fully contextualized entity and relation embeddings. Two types of graph contexts are studied: edges and paths, both formulated as sequences of entities and relations. CoKE takes a sequence as input and uses a Transformer encoder to obtain contextualized representations. These representations are hence naturally adaptive to the input, capturing contextual meanings of entities and relations therein. Evaluation on a wide variety of public benchmarks verifies the superiority of CoKE in link prediction and path query answering. It performs consistently better than, or at least equally well as current state-of-the-art in almost every case, in particular offering an absolute improvement of 21.0% in H@10 on path query answering. Our code is available at \\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}.|sl:tag|tag:attention_knowledge_graphs -OWL tool|skos:broader|OWL -Huge RDF data source|skos:broader|RDF Data source -Java 8|skos:broader|Java -Maâli Mnasri|skos:broader|NLP girls and guys -Curiosité naturelle|skos:broader|Nature -Coupe du monde 1998|skos:broader|Coupe du monde de football -Rachel Thomas|skos:broader|AI girls and guys -Entity mining|skos:broader|Named Entity Recognition -Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. In practice, some questions are best answered using text, while others are best answered using KBs. A natural question, then, is how to effectively combine both types of information. Surprisingly little prior work has looked at this problem. Open Domain Question Answering (QA) is evolving from complex pipelined systems to end-to-end deep neural networks. Specialized neural models have been developed for extracting answers from either text alone or Knowledge Bases (KBs) alone. In this paper we look at a more practical setting, namely QA over the combination of a KB and entity-linked text, which is appropriate when an incomplete KB is available with a large text corpus. Building on recent advances in graph representation learning we propose a novel model, GRAFT-Net, for extracting answers from a question-specific subgraph containing text and KB entities and relations. We construct a suite of benchmark tasks for this problem, varying the difficulty of questions, the amount of training data, and KB completeness. We show that GRAFT-Net is competitive with the state-of-the-art when tested using either KBs or text alone, and vastly outperforms existing methods in the combined setting. Source code is available at https://github.com/OceanskySun/GraftNet .|sl:arxiv_author|Bhuwan Dhingra -Médicaments génériques|skos:broader|Industrie pharmaceutique -Similarity Search for Efficient Active Learning and Search of Rare Concepts Similarity search for Efficient Active Learning and Search (SEALS) In [Active Learning](tag:active_learning): instead of searching globally for the optimal examples to label, leverage the fact that data is often heavily skewed and expand the candidate pool with the nearest neighbors of the labeled set. Our work attacks both the labeling and computational costs of machine learning...SEALS dramatically reduces the barrier to machine learning, enabling small teams or individuals to build accurate classifiers. SEALS does, however, introduce another system component, a similarity search index, which adds some additional engineering complexity to build, tune, and maintain. Fortunately, several highly optimized implementations like Annoy and [Faiss](doc:2020/06/facebookresearch_faiss_a_libra) work reasonably well out of the box. Many active learning and search approaches are intractable for industrial settings with billions of unlabeled examples. Existing approaches, such as uncertainty sampling or information density, search globally for the optimal examples to label, scaling linearly or even quadratically with the unlabeled data. However, in practice, data is often heavily skewed; only a small fraction of collected data will be relevant for a given learning task. For example, when identifying rare classes, detecting malicious content, or debugging model performance, the ratio of positive to negative examples can be 1 to 1,000 or more. In this work, we exploit this skew in large training datasets to reduce the number of unlabeled examples considered in each selection round by only looking at the nearest neighbors to the labeled examples. Empirically, we observe that learned representations effectively cluster unseen concepts, making active learning very effective and substantially reducing the number of viable unlabeled examples. We evaluate several active learning and search techniques in this setting on three large-scale datasets: ImageNet, Goodreads spoiler detection, and OpenImages. For rare classes, active learning methods need as little as 0.31% of the labeled data to match the average precision of full supervision. By limiting active learning methods to only consider the immediate neighbors of the labeled data as candidates for labeling, we need only process as little as 1% of the unlabeled data while achieving similar reductions in labeling costs as the traditional global approach. This process of expanding the candidate pool with the nearest neighbors of the labeled set can be done efficiently and reduces the computational complexity of selection by orders of magnitude.|sl:tag|tag:arxiv_doc -AWS Machine Learning|skos:broader|AWS -Self-Taught Convolutional Neural Networks for Short Text Clustering We propose a flexible short text clustering framework which explores the feasibility and effectiveness of combining CNN and traditional unsupervised dimensionality reduction methods. Non-biased deep feature representations can be learned through our self- taught CNN framework which does not use any external tags/labels or complicated NLP pre-processing. The original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. [conf paper, same authors](http://www.aclweb.org/anthology/W15-1509) ; [gitgub repo (matlab)](https://github.com/jacoxu/STC2) Short text clustering is a challenging problem due to its sparseness of text representation. Here we propose a flexible Self-Taught Convolutional neural network framework for Short Text Clustering (dubbed STC^2), which can flexibly and successfully incorporate more useful semantic features and learn non-biased deep text representation in an unsupervised manner. In our framework, the original raw text features are firstly embedded into compact binary codes by using one existing unsupervised dimensionality reduction methods. Then, word embeddings are explored and fed into convolutional neural networks to learn deep feature representations, meanwhile the output units are used to fit the pre-trained binary codes in the training process. Finally, we get the optimal clusters by employing K-means to cluster the learned representations. Extensive experimental results demonstrate that the proposed framework is effective, flexible and outperform several popular clustering methods when tested on three public short text datasets.|sl:tag|tag:convolutional_neural_network_and_nn_4_nlp -Greffe de tête|skos:broader|Nous vivons une époque moderne -Neural networks|skos:broader|Data mining -Web tools|skos:broader|Web -Himalaya|skos:broader|Asie -Kurdes|skos:broader|Peuples -Crise des subprimes|skos:broader|Crise financière -GLoMo: Unsupervisedly Learned Relational Graphs as Transferable Representations Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Modern deep transfer learning approaches have mainly focused on learning generic feature vectors from one task that are transferable to other tasks, such as word embeddings in language and pretrained convolutional features in vision. However, these approaches usually transfer unary features and largely ignore more structured graphical representations. This work explores the possibility of learning generic latent relational graphs that capture dependencies between pairs of data units (e.g., words or pixels) from large-scale unlabeled data and transferring the graphs to downstream tasks. Our proposed transfer learning framework improves performance on various tasks including question answering, natural language inference, sentiment analysis, and image classification. We also show that the learned graphs are generic enough to be transferred to different embeddings on which the graphs have not been trained (including GloVe embeddings, ELMo embeddings, and task-specific RNN hidden unit), or embedding-free units such as image pixels.|sl:arxiv_author|Jake Zhao -Entity embeddings|skos:broader|Embeddings -Bernard Vatant|skos:broader|SW guys (and girls) -Négociations climat |skos:broader|Diplomatie -IKS Workshop Salzburg 2012|skos:broader|Interactive Knowledge Stack -Lego|skos:broader|Jeux -POWDER|skos:broader|Semantic Web -A Brief Introduction to Machine Learning for Engineers This monograph aims at providing an introduction to key concepts, algorithms, and theoretical results in machine learning. The treatment concentrates on probabilistic models for supervised and unsupervised learning problems. It introduces fundamental concepts and algorithms by building on first principles, while also exposing the reader to more advanced topics with extensive pointers to the literature, within a unified notation and mathematical framework. The material is organized according to clearly defined categories, such as discriminative and generative models, frequentist and Bayesian approaches, exact and approximate inference, as well as directed and undirected models. This monograph is meant as an entry point for researchers with a background in probability and linear algebra.|sl:arxiv_author|Osvaldo Simeone -Semantic Folding Theory And its Application in Semantic Fingerprinting Human language is recognized as a very complex domain since decades. No computer system has been able to reach human levels of performance so far. The only known computational system capable of proper language processing is the human brain. While we gather more and more data about the brain, its fundamental computational processes still remain obscure. The lack of a sound computational brain theory also prevents the fundamental understanding of Natural Language Processing. As always when science lacks a theoretical foundation, statistical modeling is applied to accommodate as many sampled real-world data as possible. An unsolved fundamental issue is the actual representation of language (data) within the brain, denoted as the Representational Problem. Starting with Jeff Hawkins' Hierarchical Temporal Memory (HTM) theory, a consistent computational theory of the human cortex, we have developed a corresponding theory of language data representation: The Semantic Folding Theory. The process of encoding words, by using a topographic semantic space as distributional reference frame into a sparse binary representational vector is called Semantic Folding and is the central topic of this document. Semantic Folding describes a method of converting language from its symbolic representation (text) into an explicit, semantically grounded representation that can be generically processed by Hawkins' HTM networks. As it turned out, this change in representation, by itself, can solve many complex NLP problems by applying Boolean operators and a generic similarity function like the Euclidian Distance. Many practical problems of statistical NLP systems, like the high cost of computation, the fundamental incongruity of precision and recall , the complex tuning procedures etc., can be elegantly overcome by applying Semantic Folding.|sl:tag|tag:semantic_fingerprints -DuckDuckGo|skos:broader|Privacy and internet -Bertology|skos:broader|BERT -Sequence-to-sequence learning|skos:broader|Machine learning: problems -Elevage industriel|skos:broader|Elevage -Quantum computing|skos:broader|NTIC -Chrome|skos:broader|Google -Transductive SVM|skos:broader|Transductive Learning -Transformers as Soft Reasoners over Language AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. AI has long pursued the goal of having systems reason over explicitly provided knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but using rules expressed in language, thus bypassing a formal representation. We provide the first demonstration that this is possible, and characterize the extent of this capability. To do this, we use a collection of synthetic datasets that test increasing levels of reasoning complexity (number of rules, presence of negation, and depth of chaining). We find transformers appear to learn rule-based reasoning with high (99%) accuracy on these datasets, and in a way that generalizes to test data requiring substantially deeper chaining than in the training data (95%+ scores). We also demonstrate that the models transfer well to two hand-authored rulebases, and to rulebases paraphrased into more natural language. These findings are significant as it suggests a new role for transformers, namely as a limited soft theorem prover operating over explicit theories in language. This in turn suggests new possibilities for explainability, correctability, and counterfactual reasoning in question-answering. All datasets and a live demo are available at http://rule-reasoning.apps.allenai.org/|sl:tag|tag:knowledge_representation -Dremel|skos:broader|Big Data -Rupert Westenthaler|skos:broader|SW guys (and girls) -End-to-End Neural Entity Linking We presented the first neural end-to-end entity linking model and show the benefit of jointly optimizing entity recognition and linking. Leveraging key components, namely word, entity and mention embeddings, we prove that engineered features can be almost completely replaced by modern neural networks. Entity Linking (EL) is an essential task for semantic text understanding and information extraction. Popular methods separately address the Mention Detection (MD) and Entity Disambiguation (ED) stages of EL, without leveraging their mutual dependency. We here propose the first neural end-to-end EL system that jointly discovers and links entities in a text document. The main idea is to consider all possible spans as potential mentions and learn contextual similarity scores over their entity candidates that are useful for both MD and ED decisions. Key components are context-aware mention embeddings, entity embeddings and a probabilistic mention - entity map, without demanding other engineered features. Empirically, we show that our end-to-end method significantly outperforms popular systems on the Gerbil platform when enough training data is available. Conversely, if testing datasets follow different annotation conventions compared to the training set (e.g. queries/ tweets vs news documents), our ED model coupled with a traditional NER system offers the best or second best EL accuracy.|sl:arxiv_author|Octavian-Eugen Ganea +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|creationDate|2019-09-18 +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/not_encoding_knowledge_in_language_model +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|tag|http://www.semanlink.net/tag/language_models_knowledge +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Avirup Sil +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Michael Glass +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|G P Shrivatsa Bhargav +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Rishav Chakravarti +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Dinesh Garg +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Alfio Gliozzo +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Anthony Ferritto +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_author|Lin Pan +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|comment|"> a **new pre-training task inspired by reading +comprehension** and an **effort to avoid encoding general knowledge in the transformer network itself** + +Current transformer architectures store general knowledge -> large models, long pre-training time. Better to offload the requirement of general knowledge to a sparsely activated network. + +""Span selection"" as an additional auxiliary task: the query is a sentence drawn from a corpus +with a term replaced with a special token: [BLANK]. The term replaced by the blank is the answer term. The passage is +relevant as determined by a BM25 search, and answer-bearing (containing the answer +term). Unlike BERT’s cloze task, where the answer must be drawn from the model itself, the answer is found in a passage +using language understanding. + +> **We hope to progress to a model of general purpose language modeling that uses an indexed long +term memory to retrieve world knowledge, rather than holding it in the densely activated transformer encoder layers.**" +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|title|[1909.04120] Span Selection Pre-training for Question Answering +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|bookmarkOf|https://arxiv.org/abs/1909.04120 +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|creationTime|2019-09-18T17:26:33Z +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_summary|"BERT (Bidirectional Encoder Representations from Transformers) and related +pre-trained Transformers have provided large gains across many language +understanding tasks, achieving a new state-of-the-art (SOTA). BERT is +pre-trained on two auxiliary tasks: Masked Language Model and Next Sentence +Prediction. In this paper we introduce a new pre-training task inspired by +reading comprehension and an effort to avoid encoding general knowledge in the +transformer network itself. We find significant and consistent improvements +over both BERT-BASE and BERT-LARGE on multiple reading comprehension (MRC) and +paraphrasing datasets. Specifically, our proposed model has strong empirical +evidence as it obtains SOTA results on Natural Questions, a new benchmark MRC +dataset, outperforming BERT-LARGE by 3 F1 points on short answer prediction. We +also establish a new SOTA in HotpotQA, improving answer prediction F1 by 4 F1 +points and supporting fact prediction by 1 F1 point. Moreover, we show that our +pre-training approach is particularly effective when training data is limited, +improving the learning curve by a large amount." +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_firstAuthor|Michael Glass +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_updated|2019-09-09T19:32:31Z +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_title|Span Selection Pre-training for Question Answering +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_published|2019-09-09T19:32:31Z +http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre|arxiv_num|1909.04120 +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|creationDate|2019-11-06 +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|tag|http://www.semanlink.net/tag/handwriting_recognition +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|tag|http://www.semanlink.net/tag/ocr +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|comment|optical text recognition applied to manuscripts +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|title|Homemade manuscript OCR (1): OCRopy +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|bookmarkOf|https://graal.hypotheses.org/786 +http://www.semanlink.net/doc/2019/11/homemade_manuscript_ocr_1_oc|creationTime|2019-11-06T10:54:21Z +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|creationDate|2020-04-12 +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|tag|http://www.semanlink.net/tag/irlande +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|title|"Lainey Doyle sur Twitter : Basic things: Ireland and the UK started this pandemic with roughly the same...""" +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|bookmarkOf|https://twitter.com/laineydoyle/status/1249127908876128259?s=20 +http://www.semanlink.net/doc/2020/04/lainey_doyle_sur_twitter_basi|creationTime|2020-04-12T11:38:36Z +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|creationDate|2019-08-21 +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|tag|http://www.semanlink.net/tag/juridique +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|tag|http://www.semanlink.net/tag/concept_extraction +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|title|Blackstone Concept Extractor — ICLR&D +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|bookmarkOf|https://research.iclr.co.uk/blog/blackstone-concept-extractor +http://www.semanlink.net/doc/2019/08/blackstone_concept_extractor_|creationTime|2019-08-21T08:35:18Z +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|tag|http://www.semanlink.net/tag/entity_type_representation +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_author|Chin-Yew Lin +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_author|Jinpeng Wang +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_author|Feng Jiang +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_author|Shuang Chen +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|title|[2001.01447] Improving Entity Linking by Modeling Latent Entity Type Information +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|bookmarkOf|https://arxiv.org/abs/2001.01447 +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|creationTime|2020-01-09T02:37:01Z +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_summary|"Existing state of the art neural entity linking models employ attention-based +bag-of-words context model and pre-trained entity embeddings bootstrapped from +word embeddings to assess topic level context compatibility. However, the +latent entity type information in the immediate context of the mention is +neglected, which causes the models often link mentions to incorrect entities +with incorrect type. To tackle this problem, we propose to inject latent entity +type information into the entity embeddings based on pre-trained BERT. In +addition, we integrate a BERT-based entity similarity score into the local +context model of a state-of-the-art model to better capture latent entity type +information. Our model significantly outperforms the state-of-the-art entity +linking models on standard benchmark (AIDA-CoNLL). Detailed experiment analysis +demonstrates that our model corrects most of the type errors produced by the +direct baseline." +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_firstAuthor|Shuang Chen +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_updated|2020-01-06T09:18:29Z +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_title|Improving Entity Linking by Modeling Latent Entity Type Information +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_published|2020-01-06T09:18:29Z +http://www.semanlink.net/doc/2020/01/_2001_01447v1_improving_entity|arxiv_num|2001.01447 +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|tag|http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|tag|http://www.semanlink.net/tag/approximate_nearest_neighbor +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|comment|"> How text embeddings and Elasticsearch’s dense_vector type **could be** used to support similarity search. + +> In practice, sentence embeddings often don’t generalize well to large sections of text. They are not commonly used to represent text longer than a short paragraph. + +Example of use: search similar question in a collection of Q/A + +Sample code is given to rank search results (TensorFlow + Google's universal sentence encoder + cosineSimilarity) + +Current limitation of vector similarity in Elasticsearch: vectors can be used for scoring documents, but not in the initial retrieval step. ([Ongoing work about approximate nearest neighbours search](https://github.com/elastic/elasticsearch/issues/42326). Will be a licensed feature of ES). + +> Conclusions: Using vectors for search is an important and **nuanced** area" +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|title|Text similarity search in Elasticsearch using vector fields Elastic Blog +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|bookmarkOf|https://www.elastic.co/fr/blog/text-similarity-search-with-vectors-in-elasticsearch +http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast|creationTime|2020-01-10T17:24:31Z +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|creationDate|2019-11-16 +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/artificial_human_intelligence +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/stanislas_dehaene +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/global_workspace_theory +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/personal_assistant +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|tag|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|comment|"> Our primary objective is to +build an end-to-end system for an individualized personal assistant that +focuses on a specific area of expertise, namely software engineering, that +learns from experience, works collaboratively with an expert programmer and +that provides value from day one. + +> Our **goal in developing systems that incorporate +characteristics of human intelligence** is two fold: +humans provide a complete solution that we can +use as a basic blueprint and then improve upon, +and **the resulting AI systems are likely to be well +suited to developing assistants** that complement +and extend human intelligence while **operating in +a manner comprehensible to our understanding**" +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|title|Artificial Human Intelligence: The Programmer’s Apprentice - Tom Dean and Rishabh Singh - Google Research +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|bookmarkOf|https://web.stanford.edu/class/cs379c/calendar_invited_talks/lectures/04/04/slides/CS379C_Thomas_Dean_Lecture_04_02_19.pdf +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|creationTime|2019-11-16T20:16:43Z +http://www.semanlink.net/doc/2019/11/artificial_human_intelligence_|mainDoc|http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|tag|http://www.semanlink.net/tag/combining_knowledge_graphs +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|tag|http://www.semanlink.net/tag/ai_amazon +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Christos Faloutsos +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Xin Luna Dong +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Jun Ma +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Rakshit Trivedi +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Bunyamin Sisman +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_author|Hongyuan Zha +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|comment|" > a deep relational learning framework that **learns entity and relationship representations across multiple graphs**. We identify entity linkage across graphs as a vital component to achieve our goal. We design a novel objective that leverage entity linkage and build an efficient multi-task training procedure +> +> We posit that **combining +graph alignment task with deep representation +learning across multi-relational graphs** has potential +to induce a synergistic effect on both tasks" +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|title|[1807.08447] LinkNBed: Multi-Graph Representation Learning with Entity Linkage +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|bookmarkOf|https://arxiv.org/abs/1807.08447 +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|creationTime|2020-05-11T22:30:47Z +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_summary|"Knowledge graphs have emerged as an important model for studying complex +multi-relational data. This has given rise to the construction of numerous +large scale but incomplete knowledge graphs encoding information extracted from +various resources. An effective and scalable approach to jointly learn over +multiple graphs and eventually construct a unified graph is a crucial next step +for the success of knowledge-based inference for many downstream applications. +To this end, we propose LinkNBed, a deep relational learning framework that +learns entity and relationship representations across multiple graphs. We +identify entity linkage across graphs as a vital component to achieve our goal. +We design a novel objective that leverage entity linkage and build an efficient +multi-task training procedure. Experiments on link prediction and entity +linkage demonstrate substantial improvements over the state-of-the-art +relational learning approaches." +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_firstAuthor|Rakshit Trivedi +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_updated|2018-07-23T06:47:57Z +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_title|LinkNBed: Multi-Graph Representation Learning with Entity Linkage +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_published|2018-07-23T06:47:57Z +http://www.semanlink.net/doc/2020/05/1807_08447_linknbed_multi_gr|arxiv_num|1807.08447 +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|creationDate|2019-08-17 +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|tag|http://www.semanlink.net/tag/adn +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|title|Exploring DNA with Deep Learning +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|bookmarkOf|https://blog.floydhub.com/exploring-dna-with-deep-learning/ +http://www.semanlink.net/doc/2019/08/exploring_dna_with_deep_learning|creationTime|2019-08-17T00:16:46Z +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|creationDate|2019-11-24 +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|tag|http://www.semanlink.net/tag/elasticsearch_annotated_text_field +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|comment|"Elasticsearch has a new field type which allows **structured data to be added into the text of documents as annotations**. This offers powerful new search features: for ex. NER (Named Entity Recognition) tools like OpenNLP and Spacy can be used to tag people, places and organisations mentioned in text. + +Sample code: [rss indexer](/doc/2019/11/elasticsearch_rss_feed_indexer_)" +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|relatedDoc|http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_ +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|title|Meetup Paris #40 : Beyond plain text: elasticsearch’s annotated text field type (en anglais) - YouTube +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|bookmarkOf|https://www.youtube.com/watch?v=d7HfpVXVYZA +http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain|creationTime|2019-11-24T17:08:26Z +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|creationDate|2020-05-26 +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|tag|http://www.semanlink.net/tag/brexit +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|tag|http://www.semanlink.net/tag/twitter +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|tag|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|tag|http://www.semanlink.net/tag/histoire_coloniale +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|title|"Karl Sharro sur Twitter : ""The British are finally experiencing what's it like to have the British rule your country"" / Twitter" +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|bookmarkOf|https://twitter.com/KarlreMarks/status/1265208022118391808?s=20 +http://www.semanlink.net/doc/2020/05/karl_sharro_sur_twitter_the_|creationTime|2020-05-26T16:20:00Z +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|creationDate|2020-01-16 +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|tag|http://www.semanlink.net/tag/github +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|tag|http://www.semanlink.net/tag/blog +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|comment|[twitter](https://twitter.com/jeremyphoward/status/1217909025259442176?s=20) +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|title|Your own hosted blog, the easy, free, open way· fast.ai +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|bookmarkOf|https://www.fast.ai/2020/01/16/fast_template/ +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|creationTime|2020-01-16T22:15:22Z +http://www.semanlink.net/doc/2020/01/your_own_hosted_blog_the_easy_|mainDoc|http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|creationDate|2020-04-10 +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|tag|http://www.semanlink.net/tag/musique_bresilienne +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|tag|http://www.semanlink.net/tag/robert +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|title|Projota - A Rezadeira (Video Oficial) - YouTube +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|bookmarkOf|https://www.youtube.com/watch?v=LYDESzzeeT4 +http://www.semanlink.net/doc/2020/04/projota_a_rezadeira_video_of|creationTime|2020-04-10T19:30:37Z +http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer|creationDate|2020-04-05 +http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer|title|Copier le fonctionnement du cerveau pour économiser de l'énergie Techniques de l'Ingénieur +http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer|bookmarkOf|https://www.techniques-ingenieur.fr/actualite/articles/consommation-energetique-intelligence-artificielle-74940/ +http://www.semanlink.net/doc/2020/04/copier_le_fonctionnement_du_cer|creationTime|2020-04-05T11:14:51Z +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|creationDate|2019-08-28 +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|title|Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|bookmarkOf|https://medium.com/huggingface/distilbert-8cf3380435b5 +http://www.semanlink.net/doc/2019/08/smaller_faster_cheaper_light|creationTime|2019-08-28T22:47:20Z +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|creationDate|2020-04-18 +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|tag|http://www.semanlink.net/tag/inegalites +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|comment|> Les quartiers populaires entament leur deuxième mois de confinement à bout de souffle +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|title|Dans les quartiers populaires, « si on remplit le frigo, on chope le corona » +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|bookmarkOf|https://www.lemonde.fr/societe/article/2020/04/18/dans-les-quartiers-populaires-si-on-remplit-le-frigo-on-chope-le-corona_6036998_3224.html +http://www.semanlink.net/doc/2020/04/dans_les_quartiers_populaires_|creationTime|2020-04-18T16:00:15Z +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|creationDate|2019-08-12 +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|tag|http://www.semanlink.net/tag/musique +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|tag|http://www.semanlink.net/tag/human_ai_collaboration +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|comment|"Musician Holly Herndon taught an electronic ‘collaborator’ to sing using the call-and-response hymns of her childhood. [Arte.tv ""tracks"" on youtube](https://www.youtube.com/watch?v=8oQdJqVOky4)" +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|title|Could an AI duet be the next chart-topper? Financial Times +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|bookmarkOf|https://www.ft.com/content/03737d6a-b849-11e9-8a88-aa6628ac896c?segmentid=acee4131-99c2-09d3-a635-873e61754ec6#comments-anchor +http://www.semanlink.net/doc/2019/08/could_an_ai_duet_be_the_next_ch|creationTime|2019-08-12T11:24:06Z +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|creationDate|2020-02-10 +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|comment|[paper](/doc/2020/02/_2002_02925_bert_of_theseus_c) +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|relatedDoc|http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|title|"Canwen Xu sur Twitter : ""WTF? We brutally dismember BERT and replace all his organs?""" +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|bookmarkOf|https://twitter.com/XuCanwen/status/1226682713983160324 +http://www.semanlink.net/doc/2020/02/canwen_xu_sur_twitter_wtf_w|creationTime|2020-02-10T09:21:44Z +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|creationDate|2020-01-23 +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|tag|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|tag|http://www.semanlink.net/tag/camembert_nlp +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|tag|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|comment|"- Siamese CNN for jobs-candidate matching: learning document embeddings with triplet loss. +- Sesame street-based naming schemes must fade out, long live CamemBERT et le French fromage!" +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|title|Paris NLP Season 4 Meetup #3 – Paris NLP +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|bookmarkOf|https://nlpparis.wordpress.com/2020/01/23/paris-nlp-season-4-meetup-3/ +http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_|creationTime|2020-01-23T22:26:20Z +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|creationDate|2020-02-10 +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_author|Canwen Xu +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_author|Furu Wei +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_author|Wangchunshu Zhou +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_author|Tao Ge +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_author|Ming Zhou +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|comment|"approach to compress BERT by progressive module replacing. + +> Compared to the previous knowledge distillation approaches for BERT compression, our approach leverages only one loss function and one hyper-parameter + +[Github](https://github.com/JetRunner/BERT-of-Theseus)" +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|title|[2002.02925] BERT-of-Theseus: Compressing BERT by Progressive Module Replacing +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|bookmarkOf|https://arxiv.org/abs/2002.02925 +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|creationTime|2020-02-10T21:50:03Z +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_summary|"In this paper, we propose a novel model compression approach to effectively +compress BERT by progressive module replacing. Our approach first divides the +original BERT into several modules and builds their compact substitutes. Then, +we randomly replace the original modules with their substitutes to train the +compact modules to mimic the behavior of the original modules. We progressively +increase the probability of replacement through the training. In this way, our +approach brings a deeper level of interaction between the original and compact +models, and smooths the training process. Compared to the previous knowledge +distillation approaches for BERT compression, our approach leverages only one +loss function and one hyper-parameter, liberating human effort from +hyper-parameter tuning. Our approach outperforms existing knowledge +distillation approaches on GLUE benchmark, showing a new perspective of model +compression." +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_firstAuthor|Canwen Xu +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_updated|2020-03-25T15:20:44Z +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_title|BERT-of-Theseus: Compressing BERT by Progressive Module Replacing +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_published|2020-02-07T17:52:16Z +http://www.semanlink.net/doc/2020/02/_2002_02925_bert_of_theseus_c|arxiv_num|2002.02925 +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|comment|"- Links to [this ES blog post](/doc/2020/01/text_similarity_search_in_elast) +- [somewhat related](/doc/2020/01/building_a_search_engine_with_b)" +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|relatedDoc|http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|relatedDoc|http://www.semanlink.net/doc/2020/01/text_similarity_search_in_elast +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|title|Elasticsearch meets BERT: Building Search Engine with Elasticsearch and BERT +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|bookmarkOf|https://github.com/Hironsan/bertsearch +http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build|creationTime|2020-01-10T17:23:50Z +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|creationDate|2020-03-03 +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|tag|http://www.semanlink.net/tag/rotate +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_author|Jian Tang +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_author|Jian-Yun Nie +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_author|Zhi-Hong Deng +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_author|Zhiqing Sun +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|comment|> We study the problem of learning representations of entities and relations in knowledge graphs for predicting missing links. +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|title|[1902.10197] RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|bookmarkOf|https://arxiv.org/abs/1902.10197 +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|creationTime|2020-03-03T13:27:48Z +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_summary|"We study the problem of learning representations of entities and relations in +knowledge graphs for predicting missing links. The success of such a task +heavily relies on the ability of modeling and inferring the patterns of (or +between) the relations. In this paper, we present a new approach for knowledge +graph embedding called RotatE, which is able to model and infer various +relation patterns including: symmetry/antisymmetry, inversion, and composition. +Specifically, the RotatE model defines each relation as a rotation from the +source entity to the target entity in the complex vector space. In addition, we +propose a novel self-adversarial negative sampling technique for efficiently +and effectively training the RotatE model. Experimental results on multiple +benchmark knowledge graphs show that the proposed RotatE model is not only +scalable, but also able to infer and model various relation patterns and +significantly outperform existing state-of-the-art models for link prediction." +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_firstAuthor|Zhiqing Sun +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_updated|2019-02-26T20:15:09Z +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_title|RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_published|2019-02-26T20:15:09Z +http://www.semanlink.net/doc/2020/03/_1902_10197_rotate_knowledge_|arxiv_num|1902.10197 +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|creationDate|2019-06-14 +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|tag|http://www.semanlink.net/tag/surveillance_capitalism +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|tag|http://www.semanlink.net/tag/livre_a_lire +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|comment|"The Age of Surveillance Capitalism, Shoshana Zuboff +" +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|title|La surveillance, stade suprême du capitalisme ? +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|bookmarkOf|https://www.lemonde.fr/idees/article/2019/06/14/la-surveillance-stade-supreme-du-capitalisme_5476001_3232.html +http://www.semanlink.net/doc/2019/06/la_surveillance_stade_supreme_|creationTime|2019-06-14T23:36:12Z +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|title|Sofie Van Landeghem: Entity linking functionality in spaCy (spaCy IRL 2019) - YouTube +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|bookmarkOf|https://www.youtube.com/watch?v=PW3RJM8tDGo&list=PLBmcuObd5An4UC6jvK_-eSl6jCvP1gwXc&index=6 +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|creationTime|2019-07-13T10:42:05Z +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin|mainDoc|http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1 +http://www.semanlink.net/doc/2019/10/le_viager|creationDate|2019-10-15 +http://www.semanlink.net/doc/2019/10/le_viager|tag|http://www.semanlink.net/tag/comedie +http://www.semanlink.net/doc/2019/10/le_viager|tag|http://www.semanlink.net/tag/film_francais +http://www.semanlink.net/doc/2019/10/le_viager|comment|avec Mchel Serault, de Pierre Tchernia, co-auteur du scénario avec René Goscinny +http://www.semanlink.net/doc/2019/10/le_viager|title|Le Viager +http://www.semanlink.net/doc/2019/10/le_viager|bookmarkOf|https://fr.wikipedia.org/wiki/Le_Viager +http://www.semanlink.net/doc/2019/10/le_viager|creationTime|2019-10-15T20:30:53Z +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|creationDate|2020-04-13 +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|tag|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_author|Matthew E. Peters +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_author|Iz Beltagy +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_author|Arman Cohan +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|comment|> **Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length**. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length. Longformer's attention mechanism is a **drop-in replacement** for the standard self-attention and **combines a local windowed attention with a task motivated global attention**. +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|title|[2004.05150] Longformer: The Long-Document Transformer +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|bookmarkOf|https://arxiv.org/abs/2004.05150 +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|creationTime|2020-04-13T11:06:40Z +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_summary|"Transformer-based models are unable to process long sequences due to their +self-attention operation, which scales quadratically with the sequence length. +To address this limitation, we introduce the Longformer with an attention +mechanism that scales linearly with sequence length, making it easy to process +documents of thousands of tokens or longer. Longformer's attention mechanism is +a drop-in replacement for the standard self-attention and combines a local +windowed attention with a task motivated global attention. Following prior work +on long-sequence transformers, we evaluate Longformer on character-level +language modeling and achieve state-of-the-art results on text8 and enwik8. In +contrast to most prior work, we also pretrain Longformer and finetune it on a +variety of downstream tasks. Our pretrained Longformer consistently outperforms +RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop +and TriviaQA." +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_firstAuthor|Iz Beltagy +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_updated|2020-04-10T17:54:09Z +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_title|Longformer: The Long-Document Transformer +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_published|2020-04-10T17:54:09Z +http://www.semanlink.net/doc/2020/04/2004_05150_longformer_the_lo|arxiv_num|2004.05150 +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|creationDate|2019-10-22 +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/multilingual_language_models +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/ulmfit +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|tag|http://www.semanlink.net/tag/multilingual_embeddings +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|title|Efficient multi-lingual language model fine-tuning · fast.ai NLP +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|bookmarkOf|http://nlp.fast.ai/classification/2019/09/10/multifit.html +http://www.semanlink.net/doc/2019/10/efficient_multi_lingual_languag|creationTime|2019-10-22T23:49:58Z +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|creationDate|2020-05-13 +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/knowledge_augmented_language_models +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/knowbert +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/knowledge_graph_augmented_language_models +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/multiple_knowledge_bases +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|tag|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Mark Neumann +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Matthew E. Peters +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Vidur Joshi +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Roy Schwartz +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Sameer Singh +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Robert L. Logan IV +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_author|Noah A. Smith +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|comment|"General method to **embed multiple knowledge bases into pre-trained language models** (KB in the +sense as fixed collection of entity nodes) + +> The key idea is to explicitly model +entity spans in the input text and use an **entity +linker** to retrieve relevant entity embeddings from +a KB to form knowledge enhanced entity-span +representations. +> Then, update contextual word representations via a form of **word-to-entity attention**. +> In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that **combines a small amount of entity linking supervision with a large amount of raw text**." +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|title|[1909.04164] Knowledge Enhanced Contextual Word Representations +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|bookmarkOf|https://www.aclweb.org/anthology/D19-1005.pdf +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|bookmarkOf|https://arxiv.org/abs/1909.04164 +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|bookmarkOf|https://www.aclweb.org/anthology/D19-1005/ +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|creationTime|2020-05-13T01:44:51Z +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_summary|"Contextual word representations, typically trained on unstructured, unlabeled +text, do not contain any explicit grounding to real world entities and are +often unable to remember facts about those entities. We propose a general +method to embed multiple knowledge bases (KBs) into large scale models, and +thereby enhance their representations with structured, human-curated knowledge. +For each KB, we first use an integrated entity linker to retrieve relevant +entity embeddings, then update contextual word representations via a form of +word-to-entity attention. In contrast to previous approaches, the entity +linkers and self-supervised language modeling objective are jointly trained +end-to-end in a multitask setting that combines a small amount of entity +linking supervision with a large amount of raw text. After integrating WordNet +and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) +demonstrates improved perplexity, ability to recall facts as measured in a +probing task and downstream performance on relationship extraction, entity +typing, and word sense disambiguation. KnowBert's runtime is comparable to +BERT's and it scales to large KBs." +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_firstAuthor|Matthew E. Peters +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_updated|2019-10-31T00:14:48Z +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_title|Knowledge Enhanced Contextual Word Representations +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_published|2019-09-09T21:18:50Z +http://www.semanlink.net/doc/2020/05/1909_04164_knowledge_enhanced|arxiv_num|1909.04164 +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|creationDate|2019-10-13 +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|tag|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|tag|http://www.semanlink.net/tag/andrew_mccallum +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_author|Charles Sutton +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_author|Andrew McCallum +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|title|[1011.4088] An Introduction to Conditional Random Fields +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|bookmarkOf|https://arxiv.org/abs/1011.4088 +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|creationTime|2019-10-13T23:51:20Z +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_summary|"Often we wish to predict a large number of variables that depend on each +other as well as on other observed variables. Structured prediction methods are +essentially a combination of classification and graphical modeling, combining +the ability of graphical models to compactly model multivariate data with the +ability of classification methods to perform prediction using large sets of +input features. This tutorial describes conditional random fields, a popular +probabilistic method for structured prediction. CRFs have seen wide application +in natural language processing, computer vision, and bioinformatics. We +describe methods for inference and parameter estimation for CRFs, including +practical issues for implementing large scale CRFs. We do not assume previous +knowledge of graphical modeling, so this tutorial is intended to be useful to +practitioners in a wide variety of fields." +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_firstAuthor|Charles Sutton +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_updated|2010-11-17T22:14:50Z +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_title|An Introduction to Conditional Random Fields +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_published|2010-11-17T22:14:50Z +http://www.semanlink.net/doc/2019/10/_1011_4088_an_introduction_to_|arxiv_num|1011.4088 +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|creationDate|2019-12-03 +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|tag|http://www.semanlink.net/tag/jose_moreno +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|tag|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|comment|"> la validation de relation semble être plus importante que +l’extraction ! mais très peu de personnes s’y intéressent :(" +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|title|Extraction de relation via la validation de relation +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|bookmarkOf|https://ia-ri.sciencesconf.org/data/pages/Extraction_des_relations_via_la_validation_des_relations_share.pdf +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|creationTime|2019-12-03T10:47:05Z +http://www.semanlink.net/doc/2019/12/extraction_de_relation_via_la_v|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|creationDate|2020-04-29 +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|tag|http://www.semanlink.net/tag/entity_embeddings +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|tag|http://www.semanlink.net/tag/entity_alignment +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|title|Iterative Entity Alignment with Improved Neural Attribute Embedding +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|bookmarkOf|http://ceur-ws.org/Vol-2377/paper_5.pdf +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|creationTime|2020-04-29T19:04:03Z +http://www.semanlink.net/doc/2020/04/iterative_entity_alignment_with|mainDoc|http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|tag|http://www.semanlink.net/tag/pierre_yves_vandenbussche +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|comment|> How to have a SotA identification of Disease and Chemical entities in 10 lines of code! +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|title|Named Entity Recognition with Pytorch Transformers – Pierre-Yves Vandenbussche +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|bookmarkOf|http://pyvandenbussche.info/2019/named-entity-recognition-with-pytorch-transformers/ +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|creationTime|2019-12-11T16:29:53Z +http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p|source|http://www.semanlink.net/doc/2019/12/named_entity_recognition_with_p +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|creationDate|2020-02-20 +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|tag|http://www.semanlink.net/tag/hugo +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|comment|"> Ils se battent - combat terrible !... + +> Il dit, et déracine un chêne. +> +> Sire Olivier arrache un orme dans la plaine + +> Nous lutterons ainsi que lions et panthères. +> +> Ne vaudrait-il pas mieux que nous devinssions frères ? +> +> Écoute, j’ai ma sœur, la belle Aude au bras blanc, +> +> Épouse-là. - Pardieu ! je veux bien, dit Roland. +> +> Et maintenant buvons, car l'affaire était chaude."" +> +> C'est ainsi que Roland épousa la belle Aude." +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|title|« Le Mariage de Roland », Victor Hugo, La Légende des Siècles, 1859. +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|bookmarkOf|https://matieredefrance.blogspot.com/2012/01/le-duel-de-roland-et-olivier.html +http://www.semanlink.net/doc/2020/02/hugo_la_legende_des_siecles|creationTime|2020-02-20T22:35:47Z +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|title|"Walker Bragman sur Twitter : ""This is how you respond when someone asks “how will you pay for it?”" +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|bookmarkOf|https://twitter.com/WalkerBragman/status/1190164719656673280 +http://www.semanlink.net/doc/2019/11/walker_bragman_sur_twitter_t|creationTime|2019-11-09T15:33:29Z +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|creationDate|2019-06-09 +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|comment|"Notes about [this paper](/doc/2019/06/_1906_02715_visualizing_and_me) + +> Exactly how neural nets represent linguistic information remains mysterious. But we're starting to see enticing clues..." +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|relatedDoc|http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|title|Language, trees, and geometry in neural networks +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|bookmarkOf|https://pair-code.github.io/interpretability/bert-tree/ +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|creationTime|2019-06-09T23:26:24Z +http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i|mainDoc|http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|creationDate|2019-06-08 +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|tag|http://www.semanlink.net/tag/catastrophic_forgetting +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|tag|http://www.semanlink.net/tag/pretrained_models +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|comment|[Github](https://github.com/alexandra-chron/siatl) +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|title|An Embarrassingly Simple Approach for Transfer Learning from Pretrained Language Models (NAACL 2019) +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|bookmarkOf|https://www.aclweb.org/anthology/N19-1213 +http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa|creationTime|2019-06-08T12:14:30Z +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|creationDate|2019-06-21 +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/what_s_encoded_by_a_nn +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|tag|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_author|Omer Levy +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_author|Urvashi Khandelwal +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_author|Kevin Clark +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_author|Christopher D. Manning +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|title|[1906.04341] What Does BERT Look At? An Analysis of BERT's Attention +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|bookmarkOf|https://arxiv.org/abs/1906.04341 +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|creationTime|2019-06-21T21:49:32Z +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_summary|"Large pre-trained neural networks such as BERT have had great recent success +in NLP, motivating a growing body of research investigating what aspects of +language they are able to learn from unlabeled data. Most recent analysis has +focused on model outputs (e.g., language model surprisal) or internal vector +representations (e.g., probing classifiers). Complementary to these works, we +propose methods for analyzing the attention mechanisms of pre-trained models +and apply them to BERT. BERT's attention heads exhibit patterns such as +attending to delimiter tokens, specific positional offsets, or broadly +attending over the whole sentence, with heads in the same layer often +exhibiting similar behaviors. We further show that certain attention heads +correspond well to linguistic notions of syntax and coreference. For example, +we find heads that attend to the direct objects of verbs, determiners of nouns, +objects of prepositions, and coreferent mentions with remarkably high accuracy. +Lastly, we propose an attention-based probing classifier and use it to further +demonstrate that substantial syntactic information is captured in BERT's +attention." +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_firstAuthor|Kevin Clark +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_updated|2019-06-11T01:31:41Z +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_title|What Does BERT Look At? An Analysis of BERT's Attention +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_published|2019-06-11T01:31:41Z +http://www.semanlink.net/doc/2019/06/_1906_04341_what_does_bert_loo|arxiv_num|1906.04341 +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|creationDate|2020-05-02 +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|tag|http://www.semanlink.net/tag/blink +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|tag|http://www.semanlink.net/tag/zero_shot_entity_linking +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_author|Fabio Petroni +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_author|Sebastian Riedel +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_author|Martin Josifoski +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_author|Ledell Wu +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|title|[1911.03814] Zero-shot Entity Linking with Dense Entity Retrieval +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|bookmarkOf|https://arxiv.org/abs/1911.03814 +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|creationTime|2020-05-02T11:43:47Z +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_summary|"We consider the zero-shot entity-linking challenge where each entity is +defined by a short textual description, and the model must read these +descriptions together with the mention context to make the final linking +decisions. In this setting, retrieving entity candidates can be particularly +challenging, since many of the common linking cues such as entity alias tables +and link popularity are not available. In this paper, we introduce a simple and +effective two stage approach for zero-shot linking, based on fine-tuned BERT +architectures. In the first stage, we do retrieval in a dense space defined by +a bi-encoder that independently embeds the mention context and the entity +descriptions. Each candidate is then examined more carefully with a +cross-encoder, that concatenates the mention and entity text. Our approach +achieves a nearly 5 point absolute gain on a recently introduced zero-shot +entity linking benchmark, driven largely by improvements over previous IR-based +candidate retrieval. We also show that it performs well in the non-zero-shot +setting, obtaining the state-of-the-art result on TACKBP-2010." +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_firstAuthor|Ledell Wu +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_updated|2019-11-10T01:01:45Z +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_title|Zero-shot Entity Linking with Dense Entity Retrieval +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_published|2019-11-10T01:01:45Z +http://www.semanlink.net/doc/2020/05/1911_03814_zero_shot_entity_l|arxiv_num|1911.03814 +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|creationDate|2020-02-18 +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|tag|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|tag|http://www.semanlink.net/tag/label_embedding +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Lawrence Carin +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Guoyin Wang +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Xinyuan Zhang +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Dinghan Shen +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Chunyuan Li +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Ricardo Henao +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Wenlin Wang +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_author|Yizhe Zhang +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|comment|"> text classification as +a label-word joint embedding problem: +**each label is embedded in the same space +with the word vectors**. We introduce +an attention framework that measures the +compatibility of embeddings between text +sequences and labels. The attention is +learned on a training set of labeled samples +to ensure that, given a text sequence, the +relevant words are weighted higher than +the irrelevant ones. + +(from introduction:) + +> For the task of text classification, +labels play a central role of the final performance. +A natural question to ask is how we can +directly use label information in constructing the +text-sequence representations + +> The proposed LEAM (Label- +Embedding Attentive Mode) is implemented by jointly +embedding the word and label in the same latent +space, and **the text representations are constructed +directly using the text-label compatibility**." +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|title|[1805.04174] Joint Embedding of Words and Labels for Text Classification (ACL Anthology 2018) +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|bookmarkOf|https://arxiv.org/abs/1805.04174 +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|creationTime|2020-02-18T15:01:31Z +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_summary|"Word embeddings are effective intermediate representations for capturing +semantic regularities between words, when learning the representations of text +sequences. We propose to view text classification as a label-word joint +embedding problem: each label is embedded in the same space with the word +vectors. We introduce an attention framework that measures the compatibility of +embeddings between text sequences and labels. The attention is learned on a +training set of labeled samples to ensure that, given a text sequence, the +relevant words are weighted higher than the irrelevant ones. Our method +maintains the interpretability of word embeddings, and enjoys a built-in +ability to leverage alternative sources of information, in addition to input +text sequences. Extensive results on the several large text datasets show that +the proposed framework outperforms the state-of-the-art methods by a large +margin, in terms of both accuracy and speed." +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_firstAuthor|Guoyin Wang +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_updated|2018-05-10T20:42:52Z +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_title|Joint Embedding of Words and Labels for Text Classification +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_published|2018-05-10T20:42:52Z +http://www.semanlink.net/doc/2020/02/joint_embedding_of_words_and_la|arxiv_num|1805.04174 +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|creationDate|2020-05-19 +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|tag|http://www.semanlink.net/tag/feature_hashing +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_author|Jonas Meinertz Hansen +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_author|Dan Svenstrup +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_author|Ole Winther +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|comment|"> A hash embedding may be seen as an interpolation between +a standard word embedding and a word embedding created using a random hash +function (the hashing trick). + +recommandé par [Raphaël Sourty](tag:raphaelsty)" +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|title|[1709.03933] Hash Embeddings for Efficient Word Representations +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|bookmarkOf|https://arxiv.org/abs/1709.03933 +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|creationTime|2020-05-19T11:14:12Z +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_summary|"We present hash embeddings, an efficient method for representing words in a +continuous vector form. A hash embedding may be seen as an interpolation +between a standard word embedding and a word embedding created using a random +hash function (the hashing trick). In hash embeddings each token is represented +by $k$ $d$-dimensional embeddings vectors and one $k$ dimensional weight +vector. The final $d$ dimensional representation of the token is the product of +the two. Rather than fitting the embedding vectors for each token these are +selected by the hashing trick from a shared pool of $B$ embedding vectors. Our +experiments show that hash embeddings can easily deal with huge vocabularies +consisting of millions of tokens. When using a hash embedding there is no need +to create a dictionary before training nor to perform any kind of vocabulary +pruning after training. We show that models trained using hash embeddings +exhibit at least the same level of performance as models trained using regular +embeddings across a wide range of tasks. Furthermore, the number of parameters +needed by such an embedding is only a fraction of what is required by a regular +embedding. Since standard embeddings and embeddings constructed using the +hashing trick are actually just special cases of a hash embedding, hash +embeddings can be considered an extension and improvement over the existing +regular embedding types." +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_firstAuthor|Dan Svenstrup +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_updated|2017-09-12T16:13:10Z +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_title|Hash Embeddings for Efficient Word Representations +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_published|2017-09-12T16:13:10Z +http://www.semanlink.net/doc/2020/05/1709_03933_hash_embeddings_fo|arxiv_num|1709.03933 +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|creationDate|2020-02-13 +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|title|"Jeremy Howard sur Twitter : ""The fastai paper (with @GuggerSylvain) covers v2...""" +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|bookmarkOf|https://twitter.com/jeremyphoward/status/1227975138097819650 +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|creationTime|2020-02-13T17:50:53Z +http://www.semanlink.net/doc/2020/02/jeremy_howard_sur_twitter_th|mainDoc|http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_ +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|creationDate|2019-08-23 +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|tag|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_author|Tat-Seng Chua +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_author|Xiangnan He +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_author|Xiang Wang +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_author|Yixin Cao +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_author|Meng Liu +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|title|[1905.07854] KGAT: Knowledge Graph Attention Network for Recommendation +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|bookmarkOf|https://arxiv.org/abs/1905.07854 +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|creationTime|2019-08-23T00:33:53Z +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_summary|"To provide more accurate, diverse, and explainable recommendation, it is +compulsory to go beyond modeling user-item interactions and take side +information into account. Traditional methods like factorization machine (FM) +cast it as a supervised learning problem, which assumes each interaction as an +independent instance with side information encoded. Due to the overlook of the +relations among instances or items (e.g., the director of a movie is also an +actor of another movie), these methods are insufficient to distill the +collaborative signal from the collective behaviors of users. In this work, we +investigate the utility of knowledge graph (KG), which breaks down the +independent interaction assumption by linking items with their attributes. We +argue that in such a hybrid structure of KG and user-item graph, high-order +relations --- which connect two items with one or multiple linked attributes +--- are an essential factor for successful recommendation. We propose a new +method named Knowledge Graph Attention Network (KGAT) which explicitly models +the high-order connectivities in KG in an end-to-end fashion. It recursively +propagates the embeddings from a node's neighbors (which can be users, items, +or attributes) to refine the node's embedding, and employs an attention +mechanism to discriminate the importance of the neighbors. Our KGAT is +conceptually advantageous to existing KG-based recommendation methods, which +either exploit high-order relations by extracting paths or implicitly modeling +them with regularization. Empirical results on three public benchmarks show +that KGAT significantly outperforms state-of-the-art methods like Neural FM and +RippleNet. Further studies verify the efficacy of embedding propagation for +high-order relation modeling and the interpretability benefits brought by the +attention mechanism." +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_firstAuthor|Xiang Wang +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_updated|2019-06-08T02:49:37Z +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_title|KGAT: Knowledge Graph Attention Network for Recommendation +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_published|2019-05-20T03:08:11Z +http://www.semanlink.net/doc/2019/08/_1905_07854_kgat_knowledge_gr|arxiv_num|1905.07854 +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|creationDate|2020-02-11 +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_author|Anna Potapenko +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_author|Siddhant M. Jayakumar +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_author|Timothy P. Lillicrap +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_author|Jack W. Rae +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|comment|"> the Compressive Transformer, an attentive sequence model which compresses past memories for long-range sequence learning. + +[Blog post](/doc/2020/02/a_new_model_and_dataset_for_lon)" +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|relatedDoc|http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|title|[1911.05507] Compressive Transformers for Long-Range Sequence Modelling +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|bookmarkOf|https://arxiv.org/abs/1911.05507 +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|creationTime|2020-02-11T08:48:20Z +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_summary|"We present the Compressive Transformer, an attentive sequence model which +compresses past memories for long-range sequence learning. We find the +Compressive Transformer obtains state-of-the-art language modelling results in +the WikiText-103 and Enwik8 benchmarks, achieving 17.1 ppl and 0.97 bpc +respectively. We also find it can model high-frequency speech effectively and +can be used as a memory mechanism for RL, demonstrated on an object matching +task. To promote the domain of long-range sequence learning, we propose a new +open-vocabulary language modelling benchmark derived from books, PG-19." +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_firstAuthor|Jack W. Rae +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_updated|2019-11-13T14:36:01Z +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_title|Compressive Transformers for Long-Range Sequence Modelling +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_published|2019-11-13T14:36:01Z +http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf|arxiv_num|1911.05507 +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|creationDate|2019-07-23 +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|tag|http://www.semanlink.net/tag/ile_maurice +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|tag|http://www.semanlink.net/tag/paradis_fiscaux +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|tag|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|tag|http://www.semanlink.net/tag/leaks +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|tag|http://www.semanlink.net/tag/finance +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|title|« Mauritius Leaks » : l’île qui siphonne les rentrées fiscales de l’Afrique +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|bookmarkOf|https://www.lemonde.fr/afrique/article/2019/07/23/mauritius-leaks-l-ile-qui-siphonne-les-rentrees-fiscales-de-l-afrique_5492493_3212.html +http://www.semanlink.net/doc/2019/07/%C2%AB_mauritius_leaks_%C2%BB_l%E2%80%99ile_qui|creationTime|2019-07-23T18:33:31Z +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|comment|Links to [this paper](doc:?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1806.01261) +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|relatedDoc|https://arxiv.org/abs/1806.01261 +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|title|"Aakash Kumar Nain sur Twitter : ""I want to start reading about Graph NNs but I have two questions in my mind: 1. Applications of GNNs 2. Which paper should I start with?"" / Twitter" +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|bookmarkOf|https://twitter.com/A_K_Nain/status/1259734259243204608?s=20 +http://www.semanlink.net/doc/2020/05/aakash_kumar_nain_sur_twitter_|creationTime|2020-05-11T10:59:16Z +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|creationDate|2019-12-03 +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|tag|http://www.semanlink.net/tag/insecticide +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|title|Devant l’imminence de l’interdiction du chlorpyrifos en Europe, les fabricants contre-attaquent +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|bookmarkOf|https://www.lemonde.fr/planete/article/2019/12/02/chlorpyrifos-les-fabricants-contre-attaquent_6021296_3244.html +http://www.semanlink.net/doc/2019/12/devant_l%E2%80%99imminence_de_l%E2%80%99interdi|creationTime|2019-12-03T00:08:32Z +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|creationDate|2019-06-21 +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|tag|http://www.semanlink.net/tag/privacy_and_internet +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|tag|http://www.semanlink.net/tag/amazon_alexa +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|title|The Terrible Truth About Amazon Alexa and Privacy +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|bookmarkOf|https://gizmodo.com/the-terrible-truth-about-alexa-1834075404 +http://www.semanlink.net/doc/2019/06/the_terrible_truth_about_amazon|creationTime|2019-06-21T21:44:22Z +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|creationDate|2020-01-12 +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|tag|http://www.semanlink.net/tag/berkeley +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|tag|http://www.semanlink.net/tag/nlp_teams +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|title|The Berkeley NLP Group +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|bookmarkOf|http://nlp.cs.berkeley.edu/ +http://www.semanlink.net/doc/2020/01/the_berkeley_nlp_group|creationTime|2020-01-12T10:48:23Z +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|creationDate|2019-07-10 +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|tag|http://www.semanlink.net/tag/consciousness_prior +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_author|Drew A. Hudson +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_author|Christopher D. Manning +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|comment|"> Given an image, we first predict a probabilistic graph +that represents its underlying semantics and serves as a structured world model. +Then, we perform sequential reasoning over the graph, iteratively traversing its +nodes to answer a given question or draw a new inference. In contrast to most +neural architectures that are designed to closely interact with the raw sensory +data, our model operates instead in an abstract latent space, by transforming both +the visual and linguistic modalities into semantic concept-based representations, +thereby achieving enhanced transparency and modularity. + +> Drawing inspiration from [Bengio’s consciousness prior](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1709.08568)..." +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|relatedDoc|https://arxiv.org/abs/1709.08568 +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|title|[1907.03950] Learning by Abstraction: The Neural State Machine +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|bookmarkOf|https://arxiv.org/abs/1907.03950 +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|creationTime|2019-07-10T22:05:52Z +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_summary|"We introduce the Neural State Machine, seeking to bridge the gap between the +neural and symbolic views of AI and integrate their complementary strengths for +the task of visual reasoning. Given an image, we first predict a probabilistic +graph that represents its underlying semantics and serves as a structured world +model. Then, we perform sequential reasoning over the graph, iteratively +traversing its nodes to answer a given question or draw a new inference. In +contrast to most neural architectures that are designed to closely interact +with the raw sensory data, our model operates instead in an abstract latent +space, by transforming both the visual and linguistic modalities into semantic +concept-based representations, thereby achieving enhanced transparency and +modularity. We evaluate our model on VQA-CP and GQA, two recent VQA datasets +that involve compositionality, multi-step inference and diverse reasoning +skills, achieving state-of-the-art results in both cases. We provide further +experiments that illustrate the model's strong generalization capacity across +multiple dimensions, including novel compositions of concepts, changes in the +answer distribution, and unseen linguistic structures, demonstrating the +qualities and efficacy of our approach." +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_firstAuthor|Drew A. Hudson +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_updated|2019-11-25T10:02:05Z +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_title|Learning by Abstraction: The Neural State Machine +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_published|2019-07-09T03:08:41Z +http://www.semanlink.net/doc/2019/07/_1907_03950_learning_by_abstra|arxiv_num|1907.03950 +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|creationDate|2019-09-11 +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|tag|http://www.semanlink.net/tag/rigolo +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_author|Raymond Li +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_author|Christopher Pal +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_author|Sandeep Subramanian +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_author|Jonathan Pilault +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|comment|> Note: The abstract above was not written by the authors, it was generated by one of the models presented in this paper. +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|title|[1909.03186] On Extractive and Abstractive Neural Document Summarization with Transformer Language Models +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|bookmarkOf|https://arxiv.org/abs/1909.03186 +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|creationTime|2019-09-11T18:15:42Z +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_summary|"We present a method to produce abstractive summaries of long documents that +exceed several thousand words via neural abstractive summarization. We perform +a simple extractive step before generating a summary, which is then used to +condition the transformer language model on relevant information before being +tasked with generating a summary. We show that this extractive step +significantly improves summarization results. We also show that this approach +produces more abstractive summaries compared to prior work that employs a copy +mechanism while still achieving higher rouge scores. Note: The abstract above +was not written by the authors, it was generated by one of the models presented +in this paper." +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_firstAuthor|Sandeep Subramanian +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_updated|2019-09-07T04:33:26Z +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_title|On Extractive and Abstractive Neural Document Summarization with Transformer Language Models +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_published|2019-09-07T04:33:26Z +http://www.semanlink.net/doc/2019/09/_1909_03186_on_extractive_and_|arxiv_num|1909.03186 +http://www.semanlink.net/doc/2019/08/sebastien_castellion|creationDate|2019-08-25 +http://www.semanlink.net/doc/2019/08/sebastien_castellion|tag|http://www.semanlink.net/tag/stefan_zweig +http://www.semanlink.net/doc/2019/08/sebastien_castellion|tag|http://www.semanlink.net/tag/citation +http://www.semanlink.net/doc/2019/08/sebastien_castellion|tag|http://www.semanlink.net/tag/heroisme +http://www.semanlink.net/doc/2019/08/sebastien_castellion|tag|http://www.semanlink.net/tag/michel_servet +http://www.semanlink.net/doc/2019/08/sebastien_castellion|tag|http://www.semanlink.net/tag/john_calvin +http://www.semanlink.net/doc/2019/08/sebastien_castellion|comment|"> tuer un homme ce n’est pas défendre une doctrine, c’est tuer un homme. + +(A l'adresse de Calvin, suite à la condamnation de Servet) + +1er à prêcher la tolérance en Europe, avant Locke et Hume. +" +http://www.semanlink.net/doc/2019/08/sebastien_castellion|title|Sébastien Castellion +http://www.semanlink.net/doc/2019/08/sebastien_castellion|bookmarkOf|https://fr.wikipedia.org/wiki/S%C3%A9bastien_Castellion +http://www.semanlink.net/doc/2019/08/sebastien_castellion|creationTime|2019-08-25T19:36:23Z +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|creationDate|2020-02-13 +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|tag|http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|comment|"Combining Locality-Sensitive Hashing and Elasticsearch for Scalable Online K-Nearest Neighbors Search. +[Github](https://github.com/alexklibisz/elastik-nearest-neighbors), [Improved version](https://github.com/alexklibisz/elastiknn)" +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|title|ElastiK Nearest Neighbors +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|bookmarkOf|https://blog.insightdatascience.com/elastik-nearest-neighbors-4b1f6821bd62 +http://www.semanlink.net/doc/2020/02/elastik_nearest_neighbors_ins|creationTime|2020-02-13T23:48:03Z +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|creationDate|2020-03-01 +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|tag|http://www.semanlink.net/tag/graph_attention_networks +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|comment|[about this blog post](/doc/2020/03/transformers_are_graph_neural_n) +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|relatedDoc|http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|title|"Chaitanya Joshi sur Twitter : ""Excited to share a blog post on the connection between #Transformers for NLP and #GraphNeuralNetworks""" +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|bookmarkOf|https://twitter.com/chaitjo/status/1233220586358181888 +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|creationTime|2020-03-01T03:17:11Z +http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_|mainDoc|http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|tag|http://www.semanlink.net/tag/intent_classification_and_slot_filling +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|tag|http://www.semanlink.net/tag/olivier_grisel +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|comment|"tutorial to build a simple Natural Language Understanding system using the +@snips + voice assistant dataset (English only)." +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|title|Joint Intent Classification and Slot Filling with Transformers (Jupyter Notebook Viewer) +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|bookmarkOf|https://nbviewer.jupyter.org/github/m2dsupsdlclass/lectures-labs/blob/master/labs/06_deep_nlp/Transformers_Joint_Intent_Classification_Slot_Filling_rendered.ipynb +http://www.semanlink.net/doc/2020/01/joint_intent_classification_and|creationTime|2020-01-09T01:15:16Z +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|creationDate|2020-02-15 +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|tag|http://www.semanlink.net/tag/yves_peirsman +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|title|Distilling BERT models with spaCy - Towards Data Science (2019) +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|bookmarkOf|https://towardsdatascience.com/distilling-bert-models-with-spacy-277c7edc426c +http://www.semanlink.net/doc/2020/02/distilling_bert_models_with_spa|creationTime|2020-02-15T11:15:11Z +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|creationDate|2020-04-27 +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|tag|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_author|Philip Resnik +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|title|[cmp-lg/9511007] Using Information Content to Evaluate Semantic Similarity in a Taxonomy (1995) +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|bookmarkOf|https://arxiv.org/abs/cmp-lg/9511007 +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|creationTime|2020-04-27T17:22:44Z +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_summary|"This paper presents a new measure of semantic similarity in an IS-A taxonomy, +based on the notion of information content. Experimental evaluation suggests +that the measure performs encouragingly well (a correlation of r = 0.79 with a +benchmark set of human similarity judgments, with an upper bound of r = 0.90 +for human subjects performing the same task), and significantly better than the +traditional edge counting approach (r = 0.66)." +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_firstAuthor|Philip Resnik +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_updated|1995-11-29T19:32:04Z +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_title|Using Information Content to Evaluate Semantic Similarity in a Taxonomy +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_published|1995-11-29T19:32:04Z +http://www.semanlink.net/doc/2020/04/cmp_lg_9511007_using_informat|arxiv_num|cmp-lg/9511007 +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|creationDate|2019-11-14 +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|tag|http://www.semanlink.net/tag/neutrino +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|tag|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|tag|http://www.semanlink.net/tag/eigenvectors +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|comment|"Neutrinos, Fields medalist Tao, and a formula to compute eigenvectors from eigenvalues - ""too good to be true"", but true" +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|title|Neutrinos Lead to Unexpected Discovery in Basic Math Quanta Magazine +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|bookmarkOf|https://www.quantamagazine.org/neutrinos-lead-to-unexpected-discovery-in-basic-math-20191113/ +http://www.semanlink.net/doc/2019/11/neutrinos_lead_to_unexpected_di|creationTime|2019-11-14T19:21:26Z +http://www.semanlink.net/doc/2019/11/camembert|creationDate|2019-11-10 +http://www.semanlink.net/doc/2019/11/camembert|tag|http://www.semanlink.net/tag/camembert_nlp +http://www.semanlink.net/doc/2019/11/camembert|comment|language model for French based on the RoBERTa architecture pretrained on the French subcorpus of the OSCAR multilingual corpus +http://www.semanlink.net/doc/2019/11/camembert|title|CamemBERT +http://www.semanlink.net/doc/2019/11/camembert|bookmarkOf|https://camembert-model.fr/ +http://www.semanlink.net/doc/2019/11/camembert|creationTime|2019-11-10T18:08:18Z +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|creationDate|2020-03-19 +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Xuanjing Huang +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Tianxiang Sun +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Yige Xu +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Xipeng Qiu +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Yunfan Shao +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_author|Ning Dai +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|title|[2003.08271] Pre-trained Models for Natural Language Processing: A Survey +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|bookmarkOf|https://arxiv.org/abs/2003.08271 +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|creationTime|2020-03-19T13:34:50Z +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_summary|"Recently, the emergence of pre-trained models (PTMs) has brought natural +language processing (NLP) to a new era. In this survey, we provide a +comprehensive review of PTMs for NLP. We first briefly introduce language +representation learning and its research progress. Then we systematically +categorize existing PTMs based on a taxonomy with four perspectives. Next, we +describe how to adapt the knowledge of PTMs to the downstream tasks. Finally, +we outline some potential directions of PTMs for future research. This survey +is purposed to be a hands-on guide for understanding, using, and developing +PTMs for various NLP tasks." +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_firstAuthor|Xipeng Qiu +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_updated|2020-03-24T10:32:40Z +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_title|Pre-trained Models for Natural Language Processing: A Survey +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_published|2020-03-18T15:22:51Z +http://www.semanlink.net/doc/2020/03/_2003_08271_pre_trained_models|arxiv_num|2003.08271 +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|creationDate|2019-11-12 +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/artificial_human_intelligence +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/global_workspace_theory +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/human_ai_collaboration +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/consciousness_prior +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/personal_assistant +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/ai_stanford +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Peter Lu +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Sophia Sanchez +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Rohun Saxena +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Lucy Wang +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Yousef Hindy +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Michael Smith +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Catherine Wong +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Michelle Lam +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Thomas Dean +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Marcus Gomez +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Maurice Chiang +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_author|Nate Gruver +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|comment|"**The use of natural language to facilitate communication +between the expert programmer and apprentice AI system.** + +> an overview of the material covered in a course taught at Stanford in the spring quarter of 2018. The course draws upon **insight from cognitive and systems neuroscience to implement hybrid connectionist and symbolic reasoning systems** that leverage and extend the state of the art in machine learning **by integrating human and machine intelligence**. As a concrete example we focus on digital assistants that learn from continuous dialog with an expert software engineer while providing initial value as powerful analytical, computational and mathematical savants. + +> [#Dehaene](/tag/stanislas_dehaene)'s work extends the [#Global Workspace Theory](/tag/global_workspace_theory) of Bernard Baars. Dehaene’s version of the theory combined with Yoshua Bengio’s concept of a [#consciousness prior](/tag/consciousness_prior.html) and deep reinforcement learning suggest a model for constructing and maintaining the cognitive states that arise and persist during complex problem solving." +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|title|[1807.00082] Amanuensis: The Programmer's Apprentice +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|bookmarkOf|https://arxiv.org/abs/1807.00082 +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|creationTime|2019-11-12T16:25:10Z +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_summary|"This document provides an overview of the material covered in a course taught +at Stanford in the spring quarter of 2018. The course draws upon insight from +cognitive and systems neuroscience to implement hybrid connectionist and +symbolic reasoning systems that leverage and extend the state of the art in +machine learning by integrating human and machine intelligence. As a concrete +example we focus on digital assistants that learn from continuous dialog with +an expert software engineer while providing initial value as powerful +analytical, computational and mathematical savants. Over time these savants +learn cognitive strategies (domain-relevant problem solving skills) and develop +intuitions (heuristics and the experience necessary for applying them) by +learning from their expert associates. By doing so these savants elevate their +innate analytical skills allowing them to partner on an equal footing as +versatile collaborators - effectively serving as cognitive extensions and +digital prostheses, thereby amplifying and emulating their human partner's +conceptually-flexible thinking patterns and enabling improved access to and +control over powerful computing resources." +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_firstAuthor|Thomas Dean +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_updated|2018-11-08T13:33:18Z +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_title|Amanuensis: The Programmer's Apprentice +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_published|2018-06-29T22:59:08Z +http://www.semanlink.net/doc/2019/11/_1807_00082_amanuensis_the_pr|arxiv_num|1807.00082 +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|creationDate|2019-06-18 +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_author|Salvador García +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_author|Francisco Herrera +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_author|Juan Luis Suárez +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|comment|distance metric learning, a branch of machine learning that aims to learn distances from the data +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|title|[1812.05944] A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|bookmarkOf|https://arxiv.org/abs/1812.05944 +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|creationTime|2019-06-18T10:41:40Z +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_summary|"Distance metric learning is a branch of machine learning that aims to learn +distances from the data. Distance metric learning can be useful to improve +similarity learning algorithms, and also has applications in dimensionality +reduction. This paper describes the distance metric learning problem and +analyzes its main mathematical foundations. In addition, it also discusses some +of the most popular distance metric learning techniques used in classification, +showing their goals and the required information to understand and use them. +Furthermore, some experiments to evaluate the performance of the different +algorithms are also provided. Finally, this paper discusses several +possibilities of future work in this topic." +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_firstAuthor|Juan Luis Suárez +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_updated|2019-12-17T14:42:23Z +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_title|A Tutorial on Distance Metric Learning: Mathematical Foundations, Algorithms and Experiments +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_published|2018-12-14T14:07:36Z +http://www.semanlink.net/doc/2019/06/a_tutorial_on_distance_metric_l|arxiv_num|1812.05944 +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|creationDate|2019-09-28 +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|tag|http://www.semanlink.net/tag/absurde +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|tag|http://www.semanlink.net/tag/methodes_agiles +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|tag|http://www.semanlink.net/tag/travail +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|tag|http://www.semanlink.net/tag/management +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|tag|http://www.semanlink.net/tag/silicon_valley +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|title|Pourquoi le travail est-il devenu absurde ? – InternetActu +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|bookmarkOf|https://www.lemonde.fr/blog/internetactu/2019/09/26/pourquoi-le-travail-est-il-devenu-absurde/ +http://www.semanlink.net/doc/2019/09/pourquoi_le_travail_est_il_deve|creationTime|2019-09-28T18:37:19Z +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|tag|http://www.semanlink.net/tag/scikit_learn +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|comment|[blog post with sample code](https://towardsdatascience.com/columntransformer-meets-natural-language-processing-da1f116dd69f) +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|title|Pipelines and composite estimators / ColumnTransformer for heterogeneous data — scikit-learn documentation +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|bookmarkOf|https://scikit-learn.org/stable/modules/compose.html#column-transformer +http://www.semanlink.net/doc/2019/07/pipelines_and_composite_estimat|creationTime|2019-07-02T01:01:11Z +http://www.semanlink.net/doc/2020/05/neruda_film_|creationDate|2020-05-07 +http://www.semanlink.net/doc/2020/05/neruda_film_|tag|http://www.semanlink.net/tag/film +http://www.semanlink.net/doc/2020/05/neruda_film_|tag|http://www.semanlink.net/tag/chili +http://www.semanlink.net/doc/2020/05/neruda_film_|tag|http://www.semanlink.net/tag/pablo_neruda +http://www.semanlink.net/doc/2020/05/neruda_film_|comment|"> Je suis une dame, Pablo. Je n’ai plus de sous-vêtements propres. Je ne peux pas me lever. + +Présentation par [ARTE](https://www.arte.tv/fr/videos/086872-000-A/neruda/) : + +La cavale du poète chilien Pablo Neruda, poursuivi à la fin des années 1940 par le gouvernement de Videla... Signée Pablo Larraín (""Jackie"", ""No""), une oeuvre onirique et audacieuse, avec Luis Gnecco et Gael García Bernal. + +Chili, 1948. Le gouvernement de Gabriel Gonzáles Videla lance une chasse aux communistes. Au Congrès national, le sénateur de gauche Pablo Neruda accuse le président de trahir ceux qui l’ont aidé à se hisser au pouvoir deux ans auparavant. Videla demande sa destitution et confie au redoutable inspecteur Oscar Peluchonneau le soin de procéder à son arrestation. Commence alors un jeu du chat et de la souris entre le policier et le poète... + +Mythe revisité + +Si Pablo Larraín retrace la traque subie, à la fin des années 1940, par l’intellectuel et sa seconde femme, Delia del Carril, il s’affranchit du biopic classique en entremêlant savamment réel et imaginaire. Narrée par l’obstiné Oscar Peluchonneau, brillamment campé par Gael García Bernal, cette course-poursuite haletante se mue en fable onirique : le vaniteux policier ne serait qu’un fantasme né de l’imagination de Neruda, heureux de forger sa propre légende. Des bordels de Santiago jusqu’à l’échappée finale au cœur de la cordillère des Andes, le film s’amuse à réinventer le mythe du héros adulé de toute une nation. Bluffant de mimétisme, Luis Gnecco compose un Neruda épris de justice et de liberté mais aussi provocateur et égoïste. Une œuvre iconoclaste entre polar, conte et western enneigé." +http://www.semanlink.net/doc/2020/05/neruda_film_|title|Neruda (film) +http://www.semanlink.net/doc/2020/05/neruda_film_|bookmarkOf|https://fr.wikipedia.org/wiki/Neruda_(film) +http://www.semanlink.net/doc/2020/05/neruda_film_|creationTime|2020-05-07T00:58:59Z +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|creationDate|2019-08-06 +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|tag|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|tag|http://www.semanlink.net/tag/image_recognition +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|tag|http://www.semanlink.net/tag/ruslan_salakhutdinov +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|comment|"> ""Humans exhibit a strong ability to acquire and recognize +new patterns."" + +> we learn image representations via a supervised +metric-based approach with siamese neural networks, **then +reuse that network’s features for one-shot learning without +any retraining**. + + +" +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|title|Siamese Neural Networks for One-shot Image Recognition (2015) +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|bookmarkOf|https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf +http://www.semanlink.net/doc/2019/08/siamese_neural_networks_for_one|creationTime|2019-08-06T18:36:48Z +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|tag|http://www.semanlink.net/tag/intent_classification_and_slot_filling +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|tag|http://www.semanlink.net/tag/alibaba +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_author|Qian Chen +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_author|Wen Wang +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_author|Zhu Zhuo +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|comment|"> Experimental results show that our +proposed joint BERT model outperforms BERT +models modeling intent classification and slot filling +separately, demonstrating the efficacy of exploiting +the relationship between the two tasks. + +Adding a CRF on top of the model doesn't improve the results." +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|title|[1902.10909] BERT for Joint Intent Classification and Slot Filling +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|bookmarkOf|https://arxiv.org/abs/1902.10909 +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|creationTime|2020-01-09T01:13:39Z +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_summary|"Intent classification and slot filling are two essential tasks for natural +language understanding. They often suffer from small-scale human-labeled +training data, resulting in poor generalization capability, especially for rare +words. Recently a new language representation model, BERT (Bidirectional +Encoder Representations from Transformers), facilitates pre-training deep +bidirectional representations on large-scale unlabeled corpora, and has created +state-of-the-art models for a wide variety of natural language processing tasks +after simple fine-tuning. However, there has not been much effort on exploring +BERT for natural language understanding. In this work, we propose a joint +intent classification and slot filling model based on BERT. Experimental +results demonstrate that our proposed model achieves significant improvement on +intent classification accuracy, slot filling F1, and sentence-level semantic +frame accuracy on several public benchmark datasets, compared to the +attention-based recurrent neural network models and slot-gated models." +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_firstAuthor|Qian Chen +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_updated|2019-02-28T05:54:16Z +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_title|BERT for Joint Intent Classification and Slot Filling +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_published|2019-02-28T05:54:16Z +http://www.semanlink.net/doc/2020/01/_1902_10909_bert_for_joint_int|arxiv_num|1902.10909 +http://www.semanlink.net/doc/2019/06/kokopelli_association_|creationDate|2019-06-18 +http://www.semanlink.net/doc/2019/06/kokopelli_association_|tag|http://www.semanlink.net/tag/jardinage +http://www.semanlink.net/doc/2019/06/kokopelli_association_|tag|http://www.semanlink.net/tag/semences_paysanes +http://www.semanlink.net/doc/2019/06/kokopelli_association_|tag|http://www.semanlink.net/tag/desobeissance_civile +http://www.semanlink.net/doc/2019/06/kokopelli_association_|tag|http://www.semanlink.net/tag/privatisation_du_vivant +http://www.semanlink.net/doc/2019/06/kokopelli_association_|title|Kokopelli (association) +http://www.semanlink.net/doc/2019/06/kokopelli_association_|bookmarkOf|https://fr.wikipedia.org/wiki/Kokopelli_%28association%29#cite_note-20 +http://www.semanlink.net/doc/2019/06/kokopelli_association_|creationTime|2019-06-18T22:34:39Z +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|creationDate|2019-09-07 +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|tag|http://www.semanlink.net/tag/learning +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|title|How I Track and Actually Learn New Things - Stevie Chancellor - Medium +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|bookmarkOf|https://medium.com/@stevie.chancellor/how-i-track-and-actually-learn-new-topics-4be554d8d8f7 +http://www.semanlink.net/doc/2019/09/how_i_track_and_actually_learn_|creationTime|2019-09-07T11:08:48Z +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|creationDate|2019-12-10 +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|tag|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|tag|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|comment|open source Speech-To-Text engine +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|title|mozilla/DeepSpeech: A TensorFlow implementation of Baidu's DeepSpeech architecture +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|bookmarkOf|https://github.com/mozilla/DeepSpeech +http://www.semanlink.net/doc/2019/12/mozilla_deepspeech_a_tensorflo|creationTime|2019-12-10T11:57:28Z +http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai|creationDate|2019-08-12 +http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai|tag|http://www.semanlink.net/tag/fast_ai_course +http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai|title|Rick Wierenga's blog posts about fast.ai +http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai|bookmarkOf|https://rickwierenga.com/tag/fastai +http://www.semanlink.net/doc/2019/08/rick_wierenga_about_fast_ai|creationTime|2019-08-12T19:20:26Z +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|creationDate|2019-10-07 +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|tag|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|comment|A [Related paper](/doc/2019/10/feature_wise_transformations) +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|relatedDoc|http://www.semanlink.net/doc/2019/10/feature_wise_transformations +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|title|Language and Perception in Deep Learning - Florian Strub DeepMind, Univ. Lille, Inria +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|bookmarkOf|https://nlpparis.files.wordpress.com/2019/10/language-and-perception-in-deep-learning.pdf +http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep|creationTime|2019-10-07T23:08:40Z +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|creationDate|2019-10-21 +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|tag|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|title|Document Embedding Techniques +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|bookmarkOf|https://towardsdatascience.com/document-embedding-techniques-fed3e7a6a25d +http://www.semanlink.net/doc/2019/10/document_embedding_techniques|creationTime|2019-10-21T22:33:24Z +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|creationDate|2020-03-08 +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|tag|http://www.semanlink.net/tag/deep_unsupervised_learning +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|tag|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|tag|http://www.semanlink.net/tag/generative_model +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|tag|http://www.semanlink.net/tag/berkeley +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|comment|cover two areas of deep learning in which labeled data is not required: Deep Generative Models and Self-supervised Learning +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|title|CS294-158-SP20 Deep Unsupervised Learning Spring 2020 +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|bookmarkOf|https://sites.google.com/view/berkeley-cs294-158-sp20/home +http://www.semanlink.net/doc/2020/03/cs294_158_sp20_deep_unsupervise|creationTime|2020-03-08T11:45:16Z +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|creationDate|2020-03-05 +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|comment|> It is as easy to use for graph data as WordPress is for web content +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|title|LinkedDataHub - AtomGraph's open-source Knowledge Graph management system +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|bookmarkOf|https://atomgraph.com/blog/finally-a-knowledge-graph-management-system/ +http://www.semanlink.net/doc/2020/03/linkeddatahub_atomgraph_s_ope|creationTime|2020-03-05T13:08:32Z +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|creationDate|2019-10-09 +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/poincare_embeddings +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/yandex +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/vector_space_model +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/geometry_of_language_embeddings +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_author|Vage Egiazarian +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_author|Denis Mazur +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_author|Artem Babenko +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_author|Stanislav Morozov +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|comment|"> In this paper, we aim to eliminate the inductive bias imposed by the embedding space geometry. Namely, we propose to map data into more general non-vector metric spaces: a weighted graph with a shortest path distance. By design, such graphs can model arbitrary geometry with a proper configuration of edges and weights. Our main contribution is PRODIGE (Probabilistic Differentiable Graph Embeddings): a method that learns a weighted graph representation of data end-to-end by gradient descent. + +[Github](https://github.com/stanis-morozov/prodige) + +" +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|title|[1910.03524] Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|bookmarkOf|https://arxiv.org/abs/1910.03524 +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|creationTime|2019-10-09T23:21:08Z +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_summary|"Learning useful representations is a key ingredient to the success of modern +machine learning. Currently, representation learning mostly relies on embedding +data into Euclidean space. However, recent work has shown that data in some +domains is better modeled by non-euclidean metric spaces, and inappropriate +geometry can result in inferior performance. In this paper, we aim to eliminate +the inductive bias imposed by the embedding space geometry. Namely, we propose +to map data into more general non-vector metric spaces: a weighted graph with a +shortest path distance. By design, such graphs can model arbitrary geometry +with a proper configuration of edges and weights. Our main contribution is +PRODIGE: a method that learns a weighted graph representation of data +end-to-end by gradient descent. Greater generality and fewer model assumptions +make PRODIGE more powerful than existing embedding-based approaches. We confirm +the superiority of our method via extensive experiments on a wide range of +tasks, including classification, compression, and collaborative filtering." +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_firstAuthor|Denis Mazur +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_updated|2019-10-16T16:43:20Z +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_title|Beyond Vector Spaces: Compact Data Representation as Differentiable Weighted Graphs +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_published|2019-10-08T16:31:11Z +http://www.semanlink.net/doc/2019/10/_1910_03524_beyond_vector_spac|arxiv_num|1910.03524 +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|creationDate|2019-08-22 +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/end_to_end_learning +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|comment|"a discussion on the potential of knowledge graphs for end-to-end learning and on +the challenges of this approach" +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|title|The knowledge graph as the default data model for learning on heterogeneous knowledge (2017) +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|bookmarkOf|https://content.iospress.com/articles/data-science/ds007 +http://www.semanlink.net/doc/2019/08/the_knowledge_graph_as_the_defa|creationTime|2019-08-22T10:49:49Z +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|creationDate|2019-07-04 +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|tag|http://www.semanlink.net/tag/stacking_ensemble_learning +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|tag|http://www.semanlink.net/tag/reseaux_bayesiens +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|tag|http://www.semanlink.net/tag/categorical_variables +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_author|Austin Slakey +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_author|Yoni Schamroth +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_author|Daniel Salas +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|comment|"> To encode high-cardinality categorical variables, we introduce a technique based on traditional Bayesian statistics. This technique is a paradigm for ensemble modeling, specifically stacking, where the base learner consists of a problem- specific conjugate Bayesian model (CBM) +" +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|title|[1904.13001] Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|bookmarkOf|https://arxiv.org/abs/1904.13001 +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|creationTime|2019-07-04T01:43:34Z +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_summary|"Applied Data Scientists throughout various industries are commonly faced with +the challenging task of encoding high-cardinality categorical features into +digestible inputs for machine learning algorithms. This paper describes a +Bayesian encoding technique developed for WeWork's lead scoring engine which +outputs the probability of a person touring one of our office spaces based on +interaction, enrichment, and geospatial data. We present a paradigm for +ensemble modeling which mitigates the need to build complicated preprocessing +and encoding schemes for categorical variables. In particular, domain-specific +conjugate Bayesian models are employed as base learners for features in a +stacked ensemble model. For each column of a categorical feature matrix we fit +a problem-specific prior distribution, for example, the Beta distribution for a +binary classification problem. In order to analytically derive the moments of +the posterior distribution, we update the prior with the conjugate likelihood +of the corresponding target variable for each unique value of the given +categorical feature. This function of column and value encodes the categorical +feature matrix so that the final learner in the ensemble model ingests +low-dimensional numerical input. Experimental results on both curated and real +world datasets demonstrate impressive accuracy and computational efficiency on +a variety of problem archetypes. Particularly, for the lead scoring engine at +WeWork -- where some categorical features have as many as 300,000 levels -- we +have seen an AUC improvement from 0.87 to 0.97 through implementing conjugate +Bayesian model encoding." +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_firstAuthor|Austin Slakey +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_updated|2019-04-30T00:24:06Z +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_title|Encoding Categorical Variables with Conjugate Bayesian Models for WeWork Lead Scoring Engine +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_published|2019-04-30T00:24:06Z +http://www.semanlink.net/doc/2019/07/_1904_13001_encoding_categoric|arxiv_num|1904.13001 +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|creationDate|2019-06-16 +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|tag|http://www.semanlink.net/tag/hydrogen_economy +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|tag|http://www.semanlink.net/tag/chine +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|title|Transports : la Chine prépare sa « société de l’hydrogène » - Le Parisien +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|bookmarkOf|http://www.leparisien.fr/economie/transports-la-chine-prepare-sa-societe-de-l-hydrogene-15-06-2019-8093991.php +http://www.semanlink.net/doc/2019/06/transports_la_chine_prepare_s|creationTime|2019-06-16T23:23:27Z +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|creationDate|2020-05-13 +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|comment|> [ Le nombre de contaminations] « C’est nettement moins qu’il y a un mois, mais c’est encore beaucoup, souligne Anne-Claude Crémieux, professeure d’infectiologie à l’hôpital Saint-Louis, à Paris. On va donc déconfiner avec des chaînes de contamination encore actives et une connaissance très grossière de ce qui se passe. On ne dispose pas d’un état des lieux sur l’ensemble des Ehpad [établissements d’hébergement pour personnes âgées dépendantes], ni dans tous les hôpitaux, et **on ne connaît pas les conditions d’infection des nouveaux contaminés alors que cette période aurait dû nous permettre de bien analyser tous ces points. Il n’y a pas eu de réelle stratégie de santé publique pour réussir le déconfinement.** » +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|title|Coronavirus : qui ont été les contaminés du confinement ? +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/05/12/covid-19-qui-ont-ete-les-contamines-du-confinement_6039366_1650684.html +http://www.semanlink.net/doc/2020/05/coronavirus_qui_ont_ete_les_c|creationTime|2020-05-13T14:42:13Z +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|tag|http://www.semanlink.net/tag/max_halford +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|tag|http://www.semanlink.net/tag/continual_learning +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|comment|Python library for online machine learning (ML on streaming data). Merge between creme and scikit-multiflow. [Paper](doc:2021/01/2012_04740_river_machine_lea) +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|relatedDoc|http://www.semanlink.net/doc/2021/01/2012_04740_river_machine_lea +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|title|online-ml/river (Online machine learning in Python) +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|bookmarkOf|https://github.com/online-ml/river +http://www.semanlink.net/doc/2020/01/creme_ml_creme_online_machine_|creationTime|2020-01-01T12:19:07Z +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|creationDate|2019-08-28 +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/sentence_similarity +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_author|Nils Reimers +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_author|Iryna Gurevych +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|comment|"> Sentence-BERT +(SBERT), a modification of the pretrained +BERT network that use siamese and triplet network +structures to derive **semantically meaningful +sentence embeddings** that can be compared +using cosine-similarity. + +Important because + +- BERT ist unsuitable for semantic similarity +search as well as for unsupervised tasks +like clustering. +- simple methods such as using the CLS token give low quality sentence embeddings + +However, the purpose of SBERT sentence embeddings +are **not to be used for transfer learning for other +tasks**. + +[Related blog post](/doc/2020/01/richer_sentence_embeddings_usin); [Github](https://github.com/UKPLab/sentence-transformers)" +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|relatedDoc|http://www.semanlink.net/doc/2020/07/ukplab_sentence_transformers_s +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|relatedDoc|http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|title|[1908.10084] Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|bookmarkOf|https://arxiv.org/abs/1908.10084 +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|creationTime|2019-08-28T22:41:55Z +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_summary|"BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new +state-of-the-art performance on sentence-pair regression tasks like semantic +textual similarity (STS). However, it requires that both sentences are fed into +the network, which causes a massive computational overhead: Finding the most +similar pair in a collection of 10,000 sentences requires about 50 million +inference computations (~65 hours) with BERT. The construction of BERT makes it +unsuitable for semantic similarity search as well as for unsupervised tasks +like clustering. +In this publication, we present Sentence-BERT (SBERT), a modification of the +pretrained BERT network that use siamese and triplet network structures to +derive semantically meaningful sentence embeddings that can be compared using +cosine-similarity. This reduces the effort for finding the most similar pair +from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while +maintaining the accuracy from BERT. +We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning +tasks, where it outperforms other state-of-the-art sentence embeddings methods." +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_firstAuthor|Nils Reimers +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_updated|2019-08-27T08:50:17Z +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_title|Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_published|2019-08-27T08:50:17Z +http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen|arxiv_num|1908.10084 +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|creationDate|2019-11-01 +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|comment|mixing tabular data and text using @fastdotai. An overview of how to use DataBlocks. +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|title|Finding Data Block Nirvana (a journey through the fastai data block API) — Part 2 +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|bookmarkOf|https://medium.com/@wgilliam/finding-data-block-nirvana-a-journey-through-the-fastai-data-block-api-part-2-9b23ea5d83ee +http://www.semanlink.net/doc/2019/11/finding_data_block_nirvana_a_j|creationTime|2019-11-01T10:06:17Z +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|creationDate|2020-01-12 +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|tag|http://www.semanlink.net/tag/tensorflow +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|tag|http://www.semanlink.net/tag/search_engines +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|tag|http://www.semanlink.net/tag/nlp_and_search +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|comment|[somewhat related](/doc/2020/01/elasticsearch_meets_bert_build) +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|relatedDoc|http://www.semanlink.net/doc/2020/01/elasticsearch_meets_bert_build +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|title|Building a Search Engine with BERT and TensorFlow - Towards Data Science +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|bookmarkOf|https://towardsdatascience.com/building-a-search-engine-with-bert-and-tensorflow-c6fdc0186c8a +http://www.semanlink.net/doc/2020/01/building_a_search_engine_with_b|creationTime|2020-01-12T17:13:45Z +http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po|creationDate|2019-06-20 +http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po|tag|http://www.semanlink.net/tag/extinction_des_dinosaures +http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po|title|Dinosaur asteroid hit 'worst possible place' - BBC News +http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po|bookmarkOf|https://www.bbc.com/news/science-environment-39922998 +http://www.semanlink.net/doc/2019/06/dinosaur_asteroid_hit_worst_po|creationTime|2019-06-20T01:13:51Z +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|creationDate|2020-03-15 +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|tag|http://www.semanlink.net/tag/neural_symbolic_computing +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Artur Garcez +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Marcelo Prates +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Moshe Vardi +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Marco Gori +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Pedro Avelar +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_author|Luis Lamb +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|comment|reviews the state-of-the-art on the use of GNNs as a model of neural-symbolic computing. +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|title|[2003.00330] Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|bookmarkOf|https://arxiv.org/abs/2003.00330 +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|creationTime|2020-03-15T10:39:59Z +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_summary|"Neural-symbolic computing has now become the subject of interest of both +academic and industry research laboratories. Graph Neural Networks (GNN) have +been widely used in relational and symbolic domains, with widespread +application of GNNs in combinatorial optimization, constraint satisfaction, +relational reasoning and other scientific domains. The need for improved +explainability, interpretability and trust of AI systems in general demands +principled methodologies, as suggested by neural-symbolic computing. In this +paper, we review the state-of-the-art on the use of GNNs as a model of +neural-symbolic computing. This includes the application of GNNs in several +domains as well as its relationship to current developments in neural-symbolic +computing." +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_firstAuthor|Luis Lamb +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_updated|2020-03-11T20:33:01Z +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_title|Graph Neural Networks Meet Neural-Symbolic Computing: A Survey and Perspective +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_published|2020-02-29T18:55:13Z +http://www.semanlink.net/doc/2020/03/_2003_00330_graph_neural_netwo|arxiv_num|2003.00330 +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|creationDate|2019-12-29 +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|tag|http://www.semanlink.net/tag/big_brother +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|tag|http://www.semanlink.net/tag/television +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|title|How to Turn Off Smart TV Snooping Features - Consumer Reports +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|bookmarkOf|https://www.consumerreports.org/privacy/how-to-turn-off-smart-tv-snooping-features/ +http://www.semanlink.net/doc/2019/12/how_to_turn_off_smart_tv_snoopi|creationTime|2019-12-29T11:43:46Z +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|creationDate|2019-07-27 +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|tag|http://www.semanlink.net/tag/pretrained_models +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|comment|(formerly known as pytorch-pretrained-bert) +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|title|huggingface/pytorch-transformers: A library of state-of-the-art pretrained models for NLP +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|bookmarkOf|https://github.com/huggingface/pytorch-transformers +http://www.semanlink.net/doc/2019/07/huggingface_pytorch_transformer|creationTime|2019-07-27T10:20:52Z +http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout|creationDate|2019-06-17 +http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout|tag|http://www.semanlink.net/tag/css +http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout|title|Relearn CSS layout: Every Layout +http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout|bookmarkOf|https://every-layout.dev/ +http://www.semanlink.net/doc/2019/06/relearn_css_layout_every_layout|creationTime|2019-06-17T23:40:17Z +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|creationDate|2020-04-02 +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|tag|http://www.semanlink.net/tag/generative_adversarial_network +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|tag|http://www.semanlink.net/tag/genetic_algorithm +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_author|Nataliya Le Vine +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_author|Mark Rowan +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_author|Matthew Zeigenfuse +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|title|[1904.01947] Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|bookmarkOf|https://arxiv.org/abs/1904.01947 +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|creationTime|2020-04-02T15:48:47Z +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_summary|"Extracting information from tables in documents presents a significant +challenge in many industries and in academic research. Existing methods which +take a bottom-up approach of integrating lines into cells and rows or columns +neglect the available prior information relating to table structure. Our +proposed method takes a top-down approach, first using a generative adversarial +network to map a table image into a standardised `skeleton' table form denoting +the approximate row and column borders without table content, then fitting +renderings of candidate latent table structures to the skeleton structure using +a distance measure optimised by a genetic algorithm." +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_firstAuthor|Nataliya Le Vine +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_updated|2019-04-03T12:12:03Z +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_title|Extracting Tables from Documents using Conditional Generative Adversarial Networks and Genetic Algorithms +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_published|2019-04-03T12:12:03Z +http://www.semanlink.net/doc/2020/04/1904_01947_extracting_tables_|arxiv_num|1904.01947 +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|creationDate|2020-02-19 +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|comment|Notebook: fine-tune a text classification model with HuggingFace transformers and fastai-v2. +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|title|FastHugs ntentional +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|bookmarkOf|http://www.ntentional.com/2020/02/18/fasthugs_demo.html +http://www.semanlink.net/doc/2020/02/fasthugs_%7C_ntentional|creationTime|2020-02-19T01:04:23Z +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|creationDate|2019-08-15 +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|tag|http://www.semanlink.net/tag/lenka_zdeborova +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|tag|http://www.semanlink.net/tag/statistical_physics +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|tag|http://www.semanlink.net/tag/france_is_ai_2018 +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|tag|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|comment|"> In data science, models are used to fit the data. In physics, models are the main tools for understanding + +" +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|title|France is AI 2018: Lenka Zdeborova - Statistical physics modelling of machine learning - YouTube +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|bookmarkOf|https://www.youtube.com/watch?v=1zU09Ow4WOE +http://www.semanlink.net/doc/2019/08/france_is_ai_2018_lenka_zdebor|creationTime|2019-08-15T12:39:39Z +http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos|creationDate|2019-07-12 +http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos|title|"spaCy sur Twitter : ""THE VIDEOS FROM #spaCyIRL ARE NOW LIVE!""" +http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos|bookmarkOf|https://twitter.com/spacy_io/status/1149635514544185345 +http://www.semanlink.net/doc/2019/07/spacy_sur_twitter_the_videos|creationTime|2019-07-12T13:59:36Z +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|creationDate|2019-09-12 +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|tag|http://www.semanlink.net/tag/richard_socher +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|tag|http://www.semanlink.net/tag/salesforce +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|title|CTRL: A CONDITIONAL TRANSFORMER LANGUAGE MODEL FOR CONTROLLABLE GENERATION +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|bookmarkOf|https://einstein.ai/presentations/ctrl.pdf +http://www.semanlink.net/doc/2019/09/ctrl_a_conditional_transformer|creationTime|2019-09-12T08:44:05Z +http://www.semanlink.net/doc/2019/08/bloom_filter|creationDate|2019-08-02 +http://www.semanlink.net/doc/2019/08/bloom_filter|tag|http://www.semanlink.net/tag/algorithmes +http://www.semanlink.net/doc/2019/08/bloom_filter|comment|"To test whether an element is a member of a set. False positive are possible, but false negatives are not (a query returns either ""possibly in set"" or ""definitely not in set"") +" +http://www.semanlink.net/doc/2019/08/bloom_filter|title|Bloom filter +http://www.semanlink.net/doc/2019/08/bloom_filter|bookmarkOf|https://en.wikipedia.org/wiki/Bloom_filter +http://www.semanlink.net/doc/2019/08/bloom_filter|creationTime|2019-08-02T18:02:22Z +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|creationDate|2019-07-06 +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|title|"Sebastian Ruder sur Twitter : ""@yoavgo on (some of the) missing elements in NLP. Future vision: humans writing rules aided by ML. #spaCyIRL… """ +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|bookmarkOf|https://twitter.com/seb_ruder/status/1147470371517534209 +http://www.semanlink.net/doc/2019/07/sebastian_ruder_sur_twitter_|creationTime|2019-07-06T16:45:59Z +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|creationDate|2020-02-13 +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|tag|http://www.semanlink.net/tag/api +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_author|Jeremy Howard +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_author|Sylvain Gugger +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|comment|Paper describing the fast.ai v2 API +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|title|[2002.04688] fastai: A Layered API for Deep Learning +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|bookmarkOf|https://arxiv.org/abs/2002.04688 +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|creationTime|2020-02-13T21:07:29Z +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_summary|"fastai is a deep learning library which provides practitioners with +high-level components that can quickly and easily provide state-of-the-art +results in standard deep learning domains, and provides researchers with +low-level components that can be mixed and matched to build new approaches. It +aims to do both things without substantial compromises in ease of use, +flexibility, or performance. This is possible thanks to a carefully layered +architecture, which expresses common underlying patterns of many deep learning +and data processing techniques in terms of decoupled abstractions. These +abstractions can be expressed concisely and clearly by leveraging the dynamism +of the underlying Python language and the flexibility of the PyTorch library. +fastai includes: a new type dispatch system for Python along with a semantic +type hierarchy for tensors; a GPU-optimized computer vision library which can +be extended in pure Python; an optimizer which refactors out the common +functionality of modern optimizers into two basic pieces, allowing optimization +algorithms to be implemented in 4-5 lines of code; a novel 2-way callback +system that can access any part of the data, model, or optimizer and change it +at any point during training; a new data block API; and much more. We have used +this library to successfully create a complete deep learning course, which we +were able to write more quickly than using previous approaches, and the code +was more clear. The library is already in wide use in research, industry, and +teaching. NB: This paper covers fastai v2, which is currently in pre-release at +http://dev.fast.ai/" +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_firstAuthor|Jeremy Howard +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_updated|2020-02-16T18:17:51Z +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_title|fastai: A Layered API for Deep Learning +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_published|2020-02-11T21:16:48Z +http://www.semanlink.net/doc/2020/02/_2002_04688_fastai_a_layered_|arxiv_num|2002.04688 +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|creationDate|2020-05-10 +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|tag|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|tag|http://www.semanlink.net/tag/metric_learning +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_author|Kevin Musgrave +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_author|Serge Belongie +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_author|Ser-Nam Lim +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|title|[2003.08505] A Metric Learning Reality Check +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|bookmarkOf|https://arxiv.org/abs/2003.08505 +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|creationTime|2020-05-10T11:06:07Z +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_summary|"Deep metric learning papers from the past four years have consistently +claimed great advances in accuracy, often more than doubling the performance of +decade-old methods. In this paper, we take a closer look at the field to see if +this is actually true. We find flaws in the experimental setup of these papers, +and propose a new way to evaluate metric learning algorithms. Finally, we +present experimental results that show that the improvements over time have +been marginal at best." +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_firstAuthor|Kevin Musgrave +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_updated|2020-03-18T23:28:04Z +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_title|A Metric Learning Reality Check +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_published|2020-03-18T23:28:04Z +http://www.semanlink.net/doc/2020/05/2003_08505_a_metric_learning_|arxiv_num|2003.08505 +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|creationDate|2019-06-28 +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|tag|http://www.semanlink.net/tag/knowledge_resources +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|tag|http://www.semanlink.net/tag/nlp_using_knowledge +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Stephen H. Bach +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Braden Hancock +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Yintao Liu +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Alexander Ratner +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Daniel Rodriguez +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Rob Malkin +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Chong Luo +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Christopher Ré +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Rahul Kuchhal +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Souvik Sen +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Houman Alborzi +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Haidong Shao +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_author|Cassandra Xia +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|comment|"> study showing how existing knowledge resources from across an organization can be used as weak supervision in order to bring development time and cost down by an order of magnitude. +> Snorkel DryBell, a new weak supervision management system for this setting. + +[Blog post](/doc/2019/06/google_ai_blog_harnessing_orga)" +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|relatedDoc|http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|title|[1812.00417] Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|bookmarkOf|https://arxiv.org/abs/1812.00417 +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|creationTime|2019-06-28T00:31:17Z +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_summary|"Labeling training data is one of the most costly bottlenecks in developing +machine learning-based applications. We present a first-of-its-kind study +showing how existing knowledge resources from across an organization can be +used as weak supervision in order to bring development time and cost down by an +order of magnitude, and introduce Snorkel DryBell, a new weak supervision +management system for this setting. Snorkel DryBell builds on the Snorkel +framework, extending it in three critical aspects: flexible, template-based +ingestion of diverse organizational knowledge, cross-feature production +serving, and scalable, sampling-free execution. On three classification tasks +at Google, we find that Snorkel DryBell creates classifiers of comparable +quality to ones trained with tens of thousands of hand-labeled examples, +converts non-servable organizational resources to servable models for an +average 52% performance improvement, and executes over millions of data points +in tens of minutes." +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_firstAuthor|Stephen H. Bach +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_updated|2019-06-03T22:52:25Z +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_title|Snorkel DryBell: A Case Study in Deploying Weak Supervision at Industrial Scale +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_published|2018-12-02T16:23:36Z +http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a|arxiv_num|1812.00417 +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|creationDate|2019-11-06 +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|tag|http://www.semanlink.net/tag/ibm +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|title|Project Debater - IBM Research AI +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|bookmarkOf|https://www.research.ibm.com/artificial-intelligence/project-debater/ +http://www.semanlink.net/doc/2019/11/project_debater_ibm_research_|creationTime|2019-11-06T01:12:43Z +http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve|tag|http://www.semanlink.net/tag/neurala_lifelong_dnn +http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve|title|Neurala: How Lifelong-DNN Solves for Inherent Problems with Traditional DNNs +http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve|bookmarkOf|https://info.neurala.com/hubfs/docs/Neurala_LifelongDNNWhitepaper.pdf +http://www.semanlink.net/doc/2020/01/neurala_how_lifelong_dnn_solve|creationTime|2020-01-01T12:06:57Z +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|creationDate|2019-06-21 +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|tag|http://www.semanlink.net/tag/xlnet +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Zhilin Yang +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Jaime Carbonell +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Zihang Dai +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Ruslan Salakhutdinov +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Quoc V. Le +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_author|Yiming Yang +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|comment|a new pretraining method for NLP that significantly improves upon BERT on 20 tasks (e.g., SQuAD, GLUE, RACE) +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|title|[1906.08237] XLNet: Generalized Autoregressive Pretraining for Language Understanding +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|bookmarkOf|https://arxiv.org/abs/1906.08237 +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|creationTime|2019-06-21T16:29:51Z +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_summary|"With the capability of modeling bidirectional contexts, denoising +autoencoding based pretraining like BERT achieves better performance than +pretraining approaches based on autoregressive language modeling. However, +relying on corrupting the input with masks, BERT neglects dependency between +the masked positions and suffers from a pretrain-finetune discrepancy. In light +of these pros and cons, we propose XLNet, a generalized autoregressive +pretraining method that (1) enables learning bidirectional contexts by +maximizing the expected likelihood over all permutations of the factorization +order and (2) overcomes the limitations of BERT thanks to its autoregressive +formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the +state-of-the-art autoregressive model, into pretraining. Empirically, under +comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a +large margin, including question answering, natural language inference, +sentiment analysis, and document ranking." +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_firstAuthor|Zhilin Yang +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_updated|2020-01-02T12:48:08Z +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_title|XLNet: Generalized Autoregressive Pretraining for Language Understanding +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_published|2019-06-19T17:35:48Z +http://www.semanlink.net/doc/2019/06/_1906_08237_xlnet_generalized|arxiv_num|1906.08237 +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/ernie +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Xu Han +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Zhengyan Zhang +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Zhiyuan Liu +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Xin Jiang +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Maosong Sun +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_author|Qun Liu +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|comment|"> We argue that informative entities in **KGs can enhance language representation with external knowledge**. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. + +> ERNIE achieves significant improvements on +various knowledge-driven tasks, and meanwhile +is comparable with the state-of-the-art +model BERT on other common NLP tasks + +[GitHub](https://github.com/thunlp/ERNIE) + +WARNING, there is another ERNIE (by [NLP@Baidu](tag:nlp_baidu)): Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi +Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, and +Hua Wu. 2019. Ernie: Enhanced representation through +knowledge integration. This doesn't happen when you choose François-Paul as the name for your child." +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|title|[1905.07129] ERNIE: Enhanced Language Representation with Informative Entities +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|bookmarkOf|https://arxiv.org/abs/1905.07129 +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|creationTime|2019-08-05T15:40:17Z +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_summary|"Neural language representation models such as BERT pre-trained on large-scale +corpora can well capture rich semantic patterns from plain text, and be +fine-tuned to consistently improve the performance of various NLP tasks. +However, the existing pre-trained language models rarely consider incorporating +knowledge graphs (KGs), which can provide rich structured knowledge facts for +better language understanding. We argue that informative entities in KGs can +enhance language representation with external knowledge. In this paper, we +utilize both large-scale textual corpora and KGs to train an enhanced language +representation model (ERNIE), which can take full advantage of lexical, +syntactic, and knowledge information simultaneously. The experimental results +have demonstrated that ERNIE achieves significant improvements on various +knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art +model BERT on other common NLP tasks. The source code of this paper can be +obtained from https://github.com/thunlp/ERNIE." +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_firstAuthor|Zhengyan Zhang +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_updated|2019-06-04T11:35:58Z +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_title|ERNIE: Enhanced Language Representation with Informative Entities +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_published|2019-05-17T06:24:16Z +http://www.semanlink.net/doc/2019/08/_1905_07129_ernie_enhanced_la|arxiv_num|1905.07129 +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|creationDate|2020-03-22 +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_author|Chengsheng Mao +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_author|Liang Yao +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_author|Yuan Luo +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|comment|"Pre-trained language models for knowledge graph completion. **Triples are treated as textual sequences**. (Hum, j'ai déjà vu ça quelque part. Ah, peut-être [RDF2VEC](tag:rdf2vec)? // TODO à voir) + +Takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model + +> we first treat entities, relations and triples as +textual sequences and turn knowledge graph completion into +a sequence classification problem. We then fine-tune BERT +model on these sequences for predicting the plausibility of +a triple or a relation. The method + +[GitHub](https://github.com/yao8839836/kg-bert)" +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|title|[1909.03193] KG-BERT: BERT for Knowledge Graph Completion +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|bookmarkOf|https://arxiv.org/abs/1909.03193 +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|creationTime|2020-03-22T18:56:43Z +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_summary|"Knowledge graphs are important resources for many artificial intelligence +tasks but often suffer from incompleteness. In this work, we propose to use +pre-trained language models for knowledge graph completion. We treat triples in +knowledge graphs as textual sequences and propose a novel framework named +Knowledge Graph Bidirectional Encoder Representations from Transformer +(KG-BERT) to model these triples. Our method takes entity and relation +descriptions of a triple as input and computes scoring function of the triple +with the KG-BERT language model. Experimental results on multiple benchmark +knowledge graphs show that our method can achieve state-of-the-art performance +in triple classification, link prediction and relation prediction tasks." +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_firstAuthor|Liang Yao +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_updated|2019-09-11T06:03:30Z +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_title|KG-BERT: BERT for Knowledge Graph Completion +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_published|2019-09-07T06:09:25Z +http://www.semanlink.net/doc/2020/03/_1909_03193_kg_bert_bert_for_|arxiv_num|1909.03193 +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|creationDate|2020-01-11 +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/approximate_nearest_neighbor +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/google_cloud_platform +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/similarity_queries +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|tag|http://www.semanlink.net/tag/doc_by_google +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|comment|"- an overview of approximate similarity +matching +- an end-to-end example solution for +performing real-time text semantic search" +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|title|Building a real-time embeddings similarity matching system    Solutions    Google Cloud +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|bookmarkOf|https://cloud.google.com/solutions/machine-learning/building-real-time-embeddings-similarity-matching-system +http://www.semanlink.net/doc/2020/01/building_a_real_time_embeddings|creationTime|2020-01-11T02:29:47Z +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|tag|http://www.semanlink.net/tag/flashtext_algorithm +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|tag|http://www.semanlink.net/tag/regex +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|tag|http://www.semanlink.net/tag/aho_corasick_algorithm +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|tag|http://www.semanlink.net/tag/string_searching_algorithm +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_author|Vikash Singh +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|comment|"FlashText algorithm for replacing keywords or finding keywords in a given text. + +For a document of size N (characters) and a dictionary of M keywords, the time complexity is O(N) (compared to O(MxN) with regex). FlashText is designed to only match complete words (words with boundary characters on both sides). **Different from Aho Corasick Algorithm, as it doesn't match substrings**. This algorithm is also **designed to go for the longest match** first. For an input dictionary {Machine, Learning, Machine learning} on a string 'I like Machine learning', it will only consider the longest match, which is Machine Learning + +[Github](https://github.com/vi3k6i5/flashtext) (python)" +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|title|[1711.00046] Replace or Retrieve Keywords In Documents at Scale +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|bookmarkOf|https://arxiv.org/abs/1711.00046 +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|creationTime|2020-01-09T16:26:49Z +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_summary|"In this paper we introduce, the FlashText algorithm for replacing keywords or +finding keywords in a given text. FlashText can search or replace keywords in +one pass over a document. The time complexity of this algorithm is not +dependent on the number of terms being searched or replaced. For a document of +size N (characters) and a dictionary of M keywords, the time complexity will be +O(N). This algorithm is much faster than Regex, because regex time complexity +is O(MxN). It is also different from Aho Corasick Algorithm, as it doesn't +match substrings. FlashText is designed to only match complete words (words +with boundary characters on both sides). For an input dictionary of {Apple}, +this algorithm won't match it to 'I like Pineapple'. This algorithm is also +designed to go for the longest match first. For an input dictionary {Machine, +Learning, Machine learning} on a string 'I like Machine learning', it will only +consider the longest match, which is Machine Learning. We have made python +implementation of this algorithm available as open-source on GitHub, released +under the permissive MIT License." +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_firstAuthor|Vikash Singh +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_updated|2017-11-09T18:56:44Z +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_title|Replace or Retrieve Keywords In Documents at Scale +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_published|2017-10-31T18:34:03Z +http://www.semanlink.net/doc/2020/01/_1711_00046_replace_or_retriev|arxiv_num|1711.00046 +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|tag|http://www.semanlink.net/tag/mutual_learning +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_author|Ying Zhang +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_author|Huchuan Lu +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_author|Tao Xiang +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_author|Timothy M. Hospedales +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|comment|"> In this paper we explore a different but related idea to model distillation – that of mutual learning. Distillation starts with a powerful large and pre-trained teacher network and performs one-way knowledge transfer to a small untrained student. In contrast, in mutual learning we start with a pool of untrained students who learn simultaneously to solve the task together. + +[critic here](doc:2020/06/1804_03235_large_scale_distri): + +> Zhang et al. (2017) reported a benefit in quality over +basic distillation, but they compare distilling model M1 into model M2 with training model M1 +and model M2 using codistillation; they do not compare to distilling an ensemble of models M1 +and M2 into model M3. +> +> ... +> +> we can achieve the 70.7% they report for online +distillation using traditional offline distillation." +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|relatedDoc|http://www.semanlink.net/doc/2020/06/1804_03235_large_scale_distri +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|title|[1706.00384] Deep Mutual Learning +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|bookmarkOf|https://arxiv.org/abs/1706.00384 +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|creationTime|2020-05-11T21:21:42Z +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_summary|"Model distillation is an effective and widely used technique to transfer +knowledge from a teacher to a student network. The typical application is to +transfer from a powerful large network or ensemble to a small network, that is +better suited to low-memory or fast execution requirements. In this paper, we +present a deep mutual learning (DML) strategy where, rather than one way +transfer between a static pre-defined teacher and a student, an ensemble of +students learn collaboratively and teach each other throughout the training +process. Our experiments show that a variety of network architectures benefit +from mutual learning and achieve compelling results on CIFAR-100 recognition +and Market-1501 person re-identification benchmarks. Surprisingly, it is +revealed that no prior powerful teacher network is necessary -- mutual learning +of a collection of simple student networks works, and moreover outperforms +distillation from a more powerful yet static teacher." +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_firstAuthor|Ying Zhang +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_updated|2017-06-01T16:57:15Z +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_title|Deep Mutual Learning +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_published|2017-06-01T16:57:15Z +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|arxiv_num|1706.00384 +http://www.semanlink.net/doc/2020/05/1706_00384_deep_mutual_learni|references|http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat +http://www.semanlink.net/doc/2020/05/do_the_right_thing|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/do_the_right_thing|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2020/05/do_the_right_thing|comment|Spike Lee. C'est à New York et il fait chaud +http://www.semanlink.net/doc/2020/05/do_the_right_thing|title|Do the Right Thing +http://www.semanlink.net/doc/2020/05/do_the_right_thing|bookmarkOf|https://fr.wikipedia.org/wiki/Do_the_Right_Thing +http://www.semanlink.net/doc/2020/05/do_the_right_thing|creationTime|2020-05-11T22:55:26Z +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|creationDate|2019-11-12 +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/emotions +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/global_workspace_theory +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/neuroscience +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/free_will +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|tag|http://www.semanlink.net/tag/conscience_artificielle +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|comment|> This talk describes neuroscientist Bernard Baars’ Global Workspace Model (GWM) of the brain, its implications for understanding consciousness, and a novel computer architecture that it inspires. The Model gives **insight for the design of machines that truly experience (as opposed to simulate) the ecstasy of joy and the agony of pain**. It also gives **a reasonable explanation of free will in a completely deterministic world**. +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|title|Towards a Conscious AI: A Computer Architecture inspired by Neuroscience - Microsoft Research +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|bookmarkOf|https://www.microsoft.com/en-us/research/video/towards-a-conscious-ai-a-computer-architecture-inspired-by-neuroscience/?OCID=msr_video_mblum_tw +http://www.semanlink.net/doc/2019/11/towards_a_conscious_ai_a_compu|creationTime|2019-11-12T01:30:49Z +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|creationDate|2020-03-15 +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|tag|http://www.semanlink.net/tag/neural_symbolic_computing +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Michael Spranger +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Luciano Serafini +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Son N. Tran +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Artur d'Avila Garcez +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Luis C. Lamb +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_author|Marco Gori +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|title|[1905.06088] Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|bookmarkOf|https://arxiv.org/abs/1905.06088 +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|creationTime|2020-03-15T11:06:28Z +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_summary|"Current advances in Artificial Intelligence and machine learning in general, +and deep learning in particular have reached unprecedented impact not only +across research communities, but also over popular media channels. However, +concerns about interpretability and accountability of AI have been raised by +influential thinkers. In spite of the recent impact of AI, several works have +identified the need for principled knowledge representation and reasoning +mechanisms integrated with deep learning-based systems to provide sound and +explainable models for such systems. Neural-symbolic computing aims at +integrating, as foreseen by Valiant, two most fundamental cognitive abilities: +the ability to learn from the environment, and the ability to reason from what +has been learned. Neural-symbolic computing has been an active topic of +research for many years, reconciling the advantages of robust learning in +neural networks and reasoning and interpretability of symbolic representation. +In this paper, we survey recent accomplishments of neural-symbolic computing as +a principled methodology for integrated machine learning and reasoning. We +illustrate the effectiveness of the approach by outlining the main +characteristics of the methodology: principled integration of neural learning +with symbolic knowledge representation and reasoning allowing for the +construction of explainable AI systems. The insights provided by +neural-symbolic computing shed new light on the increasingly prominent need for +interpretable and accountable AI systems." +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_firstAuthor|Artur d'Avila Garcez +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_updated|2019-05-15T11:00:48Z +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_title|Neural-Symbolic Computing: An Effective Methodology for Principled Integration of Machine Learning and Reasoning +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_published|2019-05-15T11:00:48Z +http://www.semanlink.net/doc/2020/03/_1905_06088_neural_symbolic_co|arxiv_num|1905.06088 +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|creationDate|2020-04-30 +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_author|Deepak Nathani +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_author|Charu Sharma +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_author|Manohar Kaul +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_author|Jatin Chauhan +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|comment|[GitHub](https://github.com/deepakn97/relationPrediction) [Blog post](/doc/2020/04/deepak_nathani_%7C_pay_attention_) +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|relatedDoc|http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_ +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|title|[1906.01195] Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|bookmarkOf|https://arxiv.org/abs/1906.01195 +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|creationTime|2020-04-30T12:59:24Z +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_summary|"The recent proliferation of knowledge graphs (KGs) coupled with incomplete or +partial information, in the form of missing relations (links) between entities, +has fueled a lot of research on knowledge base completion (also known as +relation prediction). Several recent works suggest that convolutional neural +network (CNN) based models generate richer and more expressive feature +embeddings and hence also perform well on relation prediction. However, we +observe that these KG embeddings treat triples independently and thus fail to +cover the complex and hidden information that is inherently implicit in the +local neighborhood surrounding a triple. To this effect, our paper proposes a +novel attention based feature embedding that captures both entity and relation +features in any given entity's neighborhood. Additionally, we also encapsulate +relation clusters and multihop relations in our model. Our empirical study +offers insights into the efficacy of our attention based model and we show +marked performance gains in comparison to state of the art methods on all +datasets." +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_firstAuthor|Deepak Nathani +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_updated|2019-06-04T04:59:08Z +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_title|Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_published|2019-06-04T04:59:08Z +http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention|arxiv_num|1906.01195 +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|creationDate|2019-11-10 +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|tag|http://www.semanlink.net/tag/elasticsearch +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|tag|http://www.semanlink.net/tag/text_search +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|tag|http://www.semanlink.net/tag/docker +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|title|Building a Full-Text Search App Using Docker and Elasticsearch +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|bookmarkOf|https://blog.patricktriest.com/text-search-docker-elasticsearch/ +http://www.semanlink.net/doc/2019/11/building_a_full_text_search_app|creationTime|2019-11-10T23:35:54Z +http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n|creationDate|2020-03-11 +http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n|title|Coronavirus: Why You Must Act Now - Tomas Pueyo - Medium +http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n|bookmarkOf|https://medium.com/@tomaspueyo/coronavirus-act-today-or-people-will-die-f4d3d9cd99ca +http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n|creationTime|2020-03-11T00:43:20Z +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|creationDate|2020-04-28 +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|comment|"1. Neural Reasoning for Complex QA with KGs +2. KG-augmented Language Models +3. KG Embeddings: Temporal and Inductive Inference +4. Entity Matching with GNNs" +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|title|Knowledge Graphs @ ICLR 2020 - Michael Galkin - Medium +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|bookmarkOf|https://medium.com/@mgalkin/knowledge-graphs-iclr-2020-f555c8ef10e3 +http://www.semanlink.net/doc/2020/04/knowledge_graphs_iclr_2020_|creationTime|2020-04-28T08:29:22Z +http://www.semanlink.net/doc/2020/05/obsidian|creationDate|2020-05-19 +http://www.semanlink.net/doc/2020/05/obsidian|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2020/05/obsidian|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/doc/2020/05/obsidian|tag|http://www.semanlink.net/tag/note_taking_app +http://www.semanlink.net/doc/2020/05/obsidian|tag|http://www.semanlink.net/tag/obsidian +http://www.semanlink.net/doc/2020/05/obsidian|comment|"> A second brain, +for you, forever. +Obsidian is a powerful knowledge base that works on top of +a local folder of plain text Markdown files." +http://www.semanlink.net/doc/2020/05/obsidian|title|Obsidian +http://www.semanlink.net/doc/2020/05/obsidian|bookmarkOf|https://obsidian.md/ +http://www.semanlink.net/doc/2020/05/obsidian|creationTime|2020-05-19T22:51:41Z +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|creationDate|2020-02-10 +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|tag|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|tag|http://www.semanlink.net/tag/thewebconf_2018 +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|comment|"Siamese adaptation of CNN, using contrastive loss. The document embedding of resumes and job descriptions +(dim 200) are generated using [#Doc2Vec](/tag/doc2vec.html) and are given as +inputs to the network." +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|title|Matching Resumes to Jobs via Deep Siamese Network Companion Proceedings of the The Web Conference 2018 +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|bookmarkOf|https://dl.acm.org/doi/10.1145/3184558.3186942 +http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de|creationTime|2020-02-10T13:43:44Z +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|creationDate|2020-04-29 +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|tag|http://www.semanlink.net/tag/raphaelsty +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|tag|http://www.semanlink.net/tag/blog +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|title|Blog de Raphaël Sourty +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|bookmarkOf|https://raphaelsty.github.io/ +http://www.semanlink.net/doc/2020/04/blog_de_raphael_sourty|creationTime|2020-04-29T16:43:58Z +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|tag|http://www.semanlink.net/tag/neurala_lifelong_dnn +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|title|Transfer Learning vs. Neurala’s L-DNN: clearing up minds LinkedIn +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|bookmarkOf|https://www.linkedin.com/pulse/transfer-learning-vs-neuralas-l-dnn-clearing-up-minds-versace/ +http://www.semanlink.net/doc/2020/01/transfer_learning_vs_neurala%E2%80%99s|creationTime|2020-01-01T12:11:03Z +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|creationDate|2019-12-01 +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|tag|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|tag|http://www.semanlink.net/tag/jussieu +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|tag|http://www.semanlink.net/tag/afia +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|title|Journée commune AFIA - ARIA - 2 décembre 2019  +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|bookmarkOf|https://ia-ri.sciencesconf.org/ +http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2|creationTime|2019-12-01T23:30:03Z +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/text_to_sql +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/caterpillar +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/conversational_ai +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/maintenance +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/graph_database_and_nlp +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/supply_chain +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|tag|http://www.semanlink.net/tag/ontologies_use_cases +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|comment| **The topic of natural language dialog between people and machines is probably going to be analytics**, and the mechanism to make that happen is natural language processing. **Graph databases make this possible because they have a very natural fit with language processing**. +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|title|NLP at Scale for Maintenance and Supply Chain Management +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|bookmarkOf|https://neo4j.com/blog/nlp-at-scale-maintenance-supply-chain-management/ +http://www.semanlink.net/doc/2019/12/nlp_at_scale_for_maintenance_an|creationTime|2019-12-07T18:53:05Z +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|creationDate|2020-02-15 +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|tag|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|tag|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|tag|http://www.semanlink.net/tag/lilian_weng +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|title|Self-Supervised Representation Learning +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|bookmarkOf|https://lilianweng.github.io/lil-log/2019/11/10/self-supervised-learning.html +http://www.semanlink.net/doc/2020/02/self_supervised_representation_|creationTime|2020-02-15T19:45:29Z +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|creationDate|2020-03-29 +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|tag|http://www.semanlink.net/tag/diy +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|title|DIY masks for all could help stop coronavirus - The Washington Post +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|bookmarkOf|https://www.washingtonpost.com/outlook/2020/03/28/masks-all-coronavirus/ +http://www.semanlink.net/doc/2020/03/diy_masks_for_all_could_help_st|creationTime|2020-03-29T10:47:45Z +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|creationDate|2019-07-24 +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_author|Timothy Niven +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_author|Hung-Yu Kao +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|comment|"what has BERT learned about argument comprehension? + +[Comments](/doc/2019/07/bert_s_success_in_some_benchmar)" +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|relatedDoc|http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|title|[1907.07355] Probing Neural Network Comprehension of Natural Language Arguments +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|bookmarkOf|https://arxiv.org/abs/1907.07355 +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|creationTime|2019-07-24T01:34:54Z +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_summary|"We are surprised to find that BERT's peak performance of 77% on the Argument +Reasoning Comprehension Task reaches just three points below the average +untrained human baseline. However, we show that this result is entirely +accounted for by exploitation of spurious statistical cues in the dataset. We +analyze the nature of these cues and demonstrate that a range of models all +exploit them. This analysis informs the construction of an adversarial dataset +on which all models achieve random accuracy. Our adversarial dataset provides a +more robust assessment of argument comprehension and should be adopted as the +standard in future work." +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_firstAuthor|Timothy Niven +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_updated|2019-09-16T04:07:54Z +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_title|Probing Neural Network Comprehension of Natural Language Arguments +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_published|2019-07-17T06:26:20Z +http://www.semanlink.net/doc/2019/07/_1907_07355_probing_neural_net|arxiv_num|1907.07355 +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|creationDate|2020-03-01 +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|tag|http://www.semanlink.net/tag/graph_attention_networks +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|comment|"> The key idea: Sentences are fully-connected graphs of words, and Transformers are very similar to Graph Attention Networks (GATs) which use multi-head attention to aggregate features from their neighborhood nodes (i.e., words). +[ twitter](https://twitter.com/chaitjo/status/1233220586358181888)" +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|relatedDoc|http://www.semanlink.net/doc/2020/03/chaitanya_joshi_sur_twitter_ +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|title|Transformers are Graph Neural Networks NTU Graph Deep Learning Lab +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|bookmarkOf|https://graphdeeplearning.github.io/post/transformers-are-gnns/ +http://www.semanlink.net/doc/2020/03/transformers_are_graph_neural_n|creationTime|2020-03-01T02:28:59Z +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|creationDate|2019-11-06 +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_author|Mei Zhang +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_author|Weiguo Zheng +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|title|[1910.09760] Question Answering over Knowledge Graphs via Structural Query Patterns +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|bookmarkOf|https://arxiv.org/abs/1910.09760 +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|creationTime|2019-11-06T13:19:45Z +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_summary|"Natural language question answering over knowledge graphs is an important and +interesting task as it enables common users to gain accurate answers in an easy +and intuitive manner. However, it remains a challenge to bridge the gap between +unstructured questions and structured knowledge graphs. To address the problem, +a natural discipline is building a structured query to represent the input +question. Searching the structured query over the knowledge graph can produce +answers to the question. Distinct from the existing methods that are based on +semantic parsing or templates, we propose an effective approach powered by a +novel notion, structural query pattern, in this paper. Given an input question, +we first generate its query sketch that is compatible with the underlying +structure of the knowledge graph. Then, we complete the query graph by labeling +the nodes and edges under the guidance of the structural query pattern. +Finally, answers can be retrieved by executing the constructed query graph over +the knowledge graph. Evaluations on three question answering benchmarks show +that our proposed approach outperforms state-of-the-art methods significantly." +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_firstAuthor|Weiguo Zheng +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_updated|2019-10-24T10:33:13Z +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_title|Question Answering over Knowledge Graphs via Structural Query Patterns +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_published|2019-10-22T04:21:06Z +http://www.semanlink.net/doc/2019/11/_1910_09760_question_answering|arxiv_num|1910.09760 +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|creationDate|2019-08-03 +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/ng +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|tag|http://www.semanlink.net/tag/richard_socher +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|comment|"**Predicting the likely truth of additional facts based on existing facts in the knowledge base.** + +> we introduce an expressive neural +tensor network suitable for reasoning over relationships between two entities. + +Most similar work: [Bordes et al.](http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_) (2011) + +Contributions: + +1. new neural tensor +network (**NTN**) suitable for reasoning over relationships between two entities. Generalizes several previous neural network models and provides a more +powerful way to model relational information than a standard neural network layer. +2. a new way to represent entities in knowledge bases, as the +average of their constituting word vectorss, allowing the sharing of statistical strength between the words describing +each entity (e.g., Bank of China and China). +3. incorporation of word vectors which are trained on large unlabeled text + +> We **learn to modify word representations +via grounding in world knowledge**. This essentially allows us to analyze word embeddings and +query them for specific relations. Furthermore, the resulting vectors could be used in other tasks +such as named entity recognition or relation classification in natural language + +**Makes use of entity name**: NTN +first learns word vectors from an auxiliary news corpus, and +then initializes the representation of each entity by averaging +the vectors of words contained in its name. For example, +**the embedding of AlfredHitchcock is initialized by the +average word vectors of “alfred” and “hitchcock”**... This kind of methods model textual information +separately from KG facts, and hence fail to leverage +interactions between them.[src](doc:2019/05/knowledge_graph_embedding_a_su) + +" +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|relatedDoc|http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|relatedDoc|http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_ +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|title|Reasoning With Neural Tensor Networks for Knowledge Base Completion (2013) +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|bookmarkOf|https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-networks-for-knowledge-base-completion.pdf +http://www.semanlink.net/doc/2019/08/reasoning_with_neural_tensor_ne|creationTime|2019-08-03T20:45:54Z +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|creationDate|2020-04-25 +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/taxonomy_expansion_task +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/thewebconf_2020 +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/taxonomies +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Chenyan Xiong +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Chi Wang +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Jiawei Han +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Kuansan Wang +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Jiaming Shen +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_author|Zhihong Shen +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|comment|"how to add a set of new concepts to an existing taxonomy. + +[Tweet](https://twitter.com/mickeyjs6/status/1253772146142216194?s=20) [GitHub](https://github.com/mickeystroller/TaxoExpan) + +> we study the taxonomy expansion task: given an +existing taxonomy and a set of new emerging concepts, we aim +to automatically expand the taxonomy to incorporate these new +concepts (without changing the existing relations in the given taxonomy). + +> To the best of our knowledge, this is the first study on **how to +expand an existing directed acyclic graph (as we model a taxonomy +as a DAG) using self-supervised learning**. + +Self-supervised framework, the existing taxonomy being used as training data: it learns a model to predict whether a query concept is the direct hyponym of an anchor concept. + +> 2 techniques: +> +> 1. a **position-enhanced graph neural network that encodes the local structure of an anchor concept** in the existing taxonomy, +> 2. a noise-robust training objective that enables the learned model to be insensitive to the label noise in the self-supervision data. + +Regarding 1: uses [GNN](/tag/graph_neural_networks.html) to model the ""ego network"" of concepts (potential “siblings” +and “grand parents” of the query concept). + +> Regular +GNNs fail to distinguish nodes with different relative positions to +the query (i.e., some nodes are grand parents of the query while +the others are siblings of the query). To address this limitation, we +present a simple but effective enhancement to inject such position +information into GNNs using position embedding. We show that +such embedding can be easily integrated with existing GNN architectures +(e.g., [GCN](/tag/graph_convolutional_networks) and GAT) and significantly boosts the +prediction performance + +Regarding point 2: uses InfoNCE loss, cf. [Contrastive Predictive Coding](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) + +> Instead of predicting +whether each individual ⟨query concept, anchor concept⟩ pair +is positive or not, we first group all pairs sharing the same query +concept into a single training instance and learn a model to select +the positive pair among other negative ones from the group. + +(Hum, ça me rappelle quelque chose) + +> assume each concept (in existing taxonomy + set of new concepts) has an initial embedding +vector learned from some text associated with this concept. + +To keep things tractable, only attempts to find a single parent node of each new concept." +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|relatedDoc|https://arxiv.org/abs/1807.03748 +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|title|[2001.09522] TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|bookmarkOf|https://arxiv.org/abs/2001.09522 +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|creationTime|2020-04-25T10:03:35Z +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_summary|"Taxonomies consist of machine-interpretable semantics and provide valuable +knowledge for many web applications. For example, online retailers (e.g., +Amazon and eBay) use taxonomies for product recommendation, and web search +engines (e.g., Google and Bing) leverage taxonomies to enhance query +understanding. Enormous efforts have been made on constructing taxonomies +either manually or semi-automatically. However, with the fast-growing volume of +web content, existing taxonomies will become outdated and fail to capture +emerging knowledge. Therefore, in many applications, dynamic expansions of an +existing taxonomy are in great demand. In this paper, we study how to expand an +existing taxonomy by adding a set of new concepts. We propose a novel +self-supervised framework, named TaxoExpan, which automatically generates a set +of pairs from the existing taxonomy as training +data. Using such self-supervision data, TaxoExpan learns a model to predict +whether a query concept is the direct hyponym of an anchor concept. We develop +two innovative techniques in TaxoExpan: (1) a position-enhanced graph neural +network that encodes the local structure of an anchor concept in the existing +taxonomy, and (2) a noise-robust training objective that enables the learned +model to be insensitive to the label noise in the self-supervision data. +Extensive experiments on three large-scale datasets from different domains +demonstrate both the effectiveness and the efficiency of TaxoExpan for taxonomy +expansion." +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_firstAuthor|Jiaming Shen +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_updated|2020-01-26T21:30:21Z +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_title|TaxoExpan: Self-supervised Taxonomy Expansion with Position-Enhanced Graph Neural Network +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_published|2020-01-26T21:30:21Z +http://www.semanlink.net/doc/2020/04/2001_09522_taxoexpan_self_su|arxiv_num|2001.09522 +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|creationDate|2020-02-18 +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|tag|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|title|Machine Learning at the VU University Amsterdam +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|bookmarkOf|https://mlvu.github.io/ +http://www.semanlink.net/doc/2020/02/machine_learning_at_the_vu_univ|creationTime|2020-02-18T13:52:09Z +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|creationDate|2019-12-05 +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|tag|http://www.semanlink.net/tag/ludovic_denoyer +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|tag|http://www.semanlink.net/tag/explainable_nlp +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_author|Ludovic Denoyer +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_author|Diane Bouchacourt +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|comment|"> Providing explanations along with predictions is crucial in some text processing tasks. Therefore, we propose a new self-interpretable model that performs output prediction and simultaneously provides an explanation in terms of the presence of particular concepts in the input. To do so, our model's prediction relies solely on a low-dimensional binary representation of the input, where each feature denotes the presence or absence of concepts. + +Presented in these [slides](/doc/2019/12/unsupervised_learning_with_text)" +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|relatedDoc|http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|title|[1905.11852] EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|bookmarkOf|https://arxiv.org/abs/1905.11852 +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|creationTime|2019-12-05T15:03:48Z +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_summary|"Providing explanations along with predictions is crucial in some text +processing tasks. Therefore, we propose a new self-interpretable model that +performs output prediction and simultaneously provides an explanation in terms +of the presence of particular concepts in the input. To do so, our model's +prediction relies solely on a low-dimensional binary representation of the +input, where each feature denotes the presence or absence of concepts. The +presence of a concept is decided from an excerpt i.e. a small sequence of +consecutive words in the text. Relevant concepts for the prediction task at +hand are automatically defined by our model, avoiding the need for +concept-level annotations. To ease interpretability, we enforce that for each +concept, the corresponding excerpts share similar semantics and are +differentiable from each others. We experimentally demonstrate the relevance of +our approach on text classification and multi-sentiment analysis tasks." +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_firstAuthor|Diane Bouchacourt +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_updated|2019-09-27T14:16:30Z +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_title|EDUCE: Explaining model Decisions through Unsupervised Concepts Extraction +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_published|2019-05-28T14:33:19Z +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_|arxiv_num|1905.11852 +http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_|creationDate|2019-06-24 +http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_|title|When and Why does King - Man + Woman = Queen? (ACL 2019) Kawin Ethayarajh +http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_|bookmarkOf|https://kawine.github.io/blog/nlp/2019/06/21/word-analogies.html +http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_|creationTime|2019-06-24T08:36:06Z +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|creationDate|2020-01-04 +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|tag|http://www.semanlink.net/tag/pac +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|title|Killer Slime, Dead Birds, an Expunged Map: The Dirty Secrets of European Farm Subsidies - The New York Times +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|bookmarkOf|https://www.nytimes.com/interactive/2019/12/25/world/europe/farms-environment.html +http://www.semanlink.net/doc/2020/01/killer_slime_dead_birds_an_ex|creationTime|2020-01-04T10:30:31Z +http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_|creationDate|2019-09-12 +http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_|tag|http://www.semanlink.net/tag/brain_computer_interface +http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_|title|Brain-computer interface: huge potential benefits and formidable challenges +http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_|bookmarkOf|https://www.news-medical.net/news/20190911/Brain-computer-interface-huge-potential-benefits-and-formidable-challenges.aspx +http://www.semanlink.net/doc/2019/09/brain_computer_interface_huge_|creationTime|2019-09-12T21:51:15Z +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|creationDate|2019-12-01 +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|tag|http://www.semanlink.net/tag/table_based_fact_verification +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Xiyou Zhou +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Wenhu Chen +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Shiyang Li +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|William Yang Wang +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Hong Wang +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Jianshu Chen +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Hongmin Wang +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_author|Yunkai Zhang +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|comment|fact verification given semi-structured data as evidence +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|title|[1909.02164] TabFact: A Large-scale Dataset for Table-based Fact Verification +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|bookmarkOf|https://arxiv.org/abs/1909.02164 +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|creationTime|2019-12-01T13:20:21Z +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_summary|"The problem of verifying whether a textual hypothesis holds based on the +given evidence, also known as fact verification, plays an important role in the +study of natural language understanding and semantic representation. However, +existing studies are mainly restricted to dealing with unstructured evidence +(e.g., natural language sentences and documents, news, etc), while verification +under structured evidence, such as tables, graphs, and databases, remains +under-explored. This paper specifically aims to study the fact verification +given semi-structured data as evidence. To this end, we construct a large-scale +dataset called TabFact with 16k Wikipedia tables as the evidence for 118k +human-annotated natural language statements, which are labeled as either +ENTAILED or REFUTED. TabFact is challenging since it involves both soft +linguistic reasoning and hard symbolic reasoning. To address these reasoning +challenges, we design two different models: Table-BERT and Latent Program +Algorithm (LPA). Table-BERT leverages the state-of-the-art pre-trained language +model to encode the linearized tables and statements into continuous vectors +for verification. LPA parses statements into programs and executes them against +the tables to obtain the returned binary value for verification. Both methods +achieve similar accuracy but still lag far behind human performance. We also +perform a comprehensive analysis to demonstrate great future opportunities. The +data and code of the dataset are provided in +\url{https://github.com/wenhuchen/Table-Fact-Checking}." +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_firstAuthor|Wenhu Chen +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_updated|2019-12-31T17:16:32Z +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_title|TabFact: A Large-scale Dataset for Table-based Fact Verification +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_published|2019-09-05T00:25:17Z +http://www.semanlink.net/doc/2019/12/_1909_02164_tabfact_a_large_s|arxiv_num|1909.02164 +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|creationDate|2020-05-24 +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|tag|http://www.semanlink.net/tag/roam +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|title|"Patrick OShaughnessy sur Twitter : ""Other than @RoamResearch, who is doing interesting work in knowledge databases?...""" +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|bookmarkOf|https://twitter.com/patrick_oshag/status/1264299702738173954?s=20 +http://www.semanlink.net/doc/2020/05/patrick_oshaughnessy_sur_twitte|creationTime|2020-05-24T14:31:57Z +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|creationDate|2019-10-13 +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|tag|http://www.semanlink.net/tag/keras +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|tag|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|title|One Shot learning, Siamese networks and Triplet Loss with Keras +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|bookmarkOf|https://medium.com/@crimy/one-shot-learning-siamese-networks-and-triplet-loss-with-keras-2885ed022352 +http://www.semanlink.net/doc/2019/10/one_shot_learning_siamese_netw|creationTime|2019-10-13T19:00:46Z +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|creationDate|2019-08-03 +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|tag|http://www.semanlink.net/tag/antoine_bordes +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|tag|http://www.semanlink.net/tag/ronan_collobert +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|title|Learning Structured Embeddings of Knowledge Bases (2011) +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|bookmarkOf|https://ronan.collobert.com/pub/matos/2011_knowbases_aaai.pdf +http://www.semanlink.net/doc/2019/08/learning_structured_embeddings_|creationTime|2019-08-03T21:55:22Z +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|creationDate|2020-04-22 +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|tag|http://www.semanlink.net/tag/python_4_data_science +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|comment|(Distiller is an open-source Python package for neural network compression research. The doc about knowledge distillation) +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|title|Knowledge Distillation - Neural Network Distiller +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|bookmarkOf|https://nervanasystems.github.io/distiller/knowledge_distillation.html +http://www.semanlink.net/doc/2020/04/knowledge_distillation_neural|creationTime|2020-04-22T21:52:26Z +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|tag|http://www.semanlink.net/tag/category_embedding +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|title|rakuten-nlp/category2vec (2015) +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|bookmarkOf|https://github.com/rakuten-nlp/category2vec +http://www.semanlink.net/doc/2019/08/rakuten_nlp_category2vec|creationTime|2019-08-05T09:31:44Z +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|creationDate|2019-09-05 +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/language_models_as_knowledge_bases +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Patrick Lewis +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Sebastian Riedel +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Tim Rocktäschel +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Yuxiang Wu +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Alexander H. Miller +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Fabio Petroni +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_author|Anton Bakhtin +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|comment|an analysis of the relational knowledge present in pretrained language models shows an ability of these models to recall factual knowledge +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|title|[1909.01066] Language Models as Knowledge Bases? +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|bookmarkOf|https://arxiv.org/abs/1909.01066 +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|creationTime|2019-09-05T22:32:00Z +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_summary|"Recent progress in pretraining language models on large textual corpora led +to a surge of improvements for downstream NLP tasks. Whilst learning linguistic +knowledge, these models may also be storing relational knowledge present in the +training data, and may be able to answer queries structured as +""fill-in-the-blank"" cloze statements. Language models have many advantages over +structured knowledge bases: they require no schema engineering, allow +practitioners to query about an open class of relations, are easy to extend to +more data, and require no human supervision to train. We present an in-depth +analysis of the relational knowledge already present (without fine-tuning) in a +wide range of state-of-the-art pretrained language models. We find that (i) +without fine-tuning, BERT contains relational knowledge competitive with +traditional NLP methods that have some access to oracle knowledge, (ii) BERT +also does remarkably well on open-domain question answering against a +supervised baseline, and (iii) certain types of factual knowledge are learned +much more readily than others by standard language model pretraining +approaches. The surprisingly strong ability of these models to recall factual +knowledge without any fine-tuning demonstrates their potential as unsupervised +open-domain QA systems. The code to reproduce our analysis is available at +https://github.com/facebookresearch/LAMA." +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_firstAuthor|Fabio Petroni +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_updated|2019-09-04T09:33:20Z +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_title|Language Models as Knowledge Bases? +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_published|2019-09-03T11:11:08Z +http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as|arxiv_num|1909.01066 +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|creationDate|2019-11-08 +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|title|Graph Neural Networks for Natural Language Processing tutorial at EMNLP 2019 +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|bookmarkOf|https://github.com/svjan5/GNNs-for-NLP +http://www.semanlink.net/doc/2019/11/graph_neural_networks_for_natur|creationTime|2019-11-08T00:12:07Z +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|creationDate|2019-10-11 +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|comment|"> Network embedding: +> +> - structure-oriented methods only consider similarity between neighbourhoods +> - content-oriented ones also take into textual similarity and yield word embeddings as a by-product. +> +> In practice, content-oriented methods outperform structure-oriented approaches significantly." +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|title|Sebastian Ruder sur Twitter : network embeddings in the biomedical domain. @eurnlp #EurNLP2019 +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|bookmarkOf|https://twitter.com/seb_ruder/status/1182618428382818305?s=20 +http://www.semanlink.net/doc/2019/10/sebastian_ruder_sur_twitter_n|creationTime|2019-10-11T14:19:05Z +http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces|creationDate|2019-09-30 +http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces|title|CS224n: Natural Language Processing with Deep Learning Stanford / Winter 2019 +http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces|bookmarkOf|http://web.stanford.edu/class/cs224n/index.html#schedule +http://www.semanlink.net/doc/2019/09/cs224n_natural_language_proces|creationTime|2019-09-30T10:10:15Z +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|creationDate|2020-02-20 +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|tag|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|comment|"> Here’s all the math for backprop written out & color-coded. This and other lessons I wrote in Colab are quickly becoming blog posts thanks to FastPages ( +@HamelHusain +) and nbdev ( +@GuggerSylvain + & +@jeremyphoward +)!" +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|title|My First NN Part 3. Multi-Layer Networks and Backpropagation Scott H. Hawley (alt. blog via fastpages) +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|bookmarkOf|https://drscotthawley.github.io/devblog3/2019/02/08/My-1st-NN-Part-3-Multi-Layer-and-Backprop.html +http://www.semanlink.net/doc/2020/02/my_first_nn_part_3_multi_layer|creationTime|2020-02-20T22:27:40Z +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|creationDate|2019-11-30 +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|comment|integrates HuggingFace into fastai +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|title|Fastai with Transformers (BERT, RoBERTa, XLNet, XLM, DistilBERT) +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|bookmarkOf|https://towardsdatascience.com/fastai-with-transformers-bert-roberta-xlnet-xlm-distilbert-4f41ee18ecb2 +http://www.semanlink.net/doc/2019/11/fastai_with_transformers_bert_|creationTime|2019-11-30T11:16:01Z +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|creationDate|2019-11-06 +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_author|Alexis Conneau +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_author|Veselin Stoyanov +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_author|Shijie Wu +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_author|Haoran Li +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|title|[1911.01464] Emerging Cross-lingual Structure in Pretrained Language Models +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|bookmarkOf|https://arxiv.org/abs/1911.01464 +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|creationTime|2019-11-06T13:09:03Z +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_summary|"We study the problem of multilingual masked language modeling, i.e. the +training of a single model on concatenated text from multiple languages, and +present a detailed study of several factors that influence why these models are +so effective for cross-lingual transfer. We show, contrary to what was +previously hypothesized, that transfer is possible even when there is no shared +vocabulary across the monolingual corpora and also when the text comes from +very different domains. The only requirement is that there are some shared +parameters in the top layers of the multi-lingual encoder. To better understand +this result, we also show that representations from independently trained +models in different languages can be aligned post-hoc quite effectively, +strongly suggesting that, much like for non-contextual word embeddings, there +are universal latent symmetries in the learned embedding spaces. For +multilingual masked language modeling, these symmetries seem to be +automatically discovered and aligned during the joint training process." +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_firstAuthor|Shijie Wu +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_updated|2019-11-10T06:55:02Z +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_title|Emerging Cross-lingual Structure in Pretrained Language Models +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_published|2019-11-04T19:41:13Z +http://www.semanlink.net/doc/2019/11/_1911_01464_emerging_cross_lin|arxiv_num|1911.01464 +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|creationDate|2020-02-28 +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_author|Anna Rogers +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_author|Anna Rumshisky +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_author|Olga Kovaleva +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|comment|(article praised on [twitter](https://twitter.com/dennybritz/status/1233343170596917248?s=20) by D Britz and Y. Goldberg) +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|title|[2002.12327] A Primer in BERTology: What we know about how BERT works +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|bookmarkOf|https://arxiv.org/abs/2002.12327 +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|creationTime|2020-02-28T13:25:30Z +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_summary|"Transformer-based models are now widely used in NLP, but we still do not +understand a lot about their inner workings. This paper describes what is known +to date about the famous BERT model (Devlin et al. 2019), synthesizing over 40 +analysis studies. We also provide an overview of the proposed modifications to +the model and its training regime. We then outline the directions for further +research." +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_firstAuthor|Anna Rogers +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_updated|2020-02-27T18:46:42Z +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_title|A Primer in BERTology: What we know about how BERT works +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_published|2020-02-27T18:46:42Z +http://www.semanlink.net/doc/2020/02/_2002_12327_a_primer_in_bertol|arxiv_num|2002.12327 +http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au|creationDate|2019-08-27 +http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au|tag|http://www.semanlink.net/tag/fasttext +http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au|title|Open-sourcing hyperparameter autotuning for fastText +http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au|bookmarkOf|https://ai.facebook.com/blog/fasttext-blog-post-open-source-in-brief/ +http://www.semanlink.net/doc/2019/08/open_sourcing_hyperparameter_au|creationTime|2019-08-27T08:39:41Z +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|creationDate|2019-06-07 +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/these_irit_renault_biblio +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/geometry_of_language_embeddings +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/tree_embeddings +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Adam Pearce +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Emily Reif +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Martin Wattenberg +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Ann Yuan +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Fernanda Viégas +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Andy Coenen +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_author|Been Kim +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|comment|> At a high level, linguistic features seem to be represented in separate semantic and syntactic subspaces. We find evidence of a fine-grained geometric representation of word senses. We also present empirical descriptions of syntactic representations in both attention matrices and individual word embeddings, as well as a mathematical argument to explain the geometry of these representations +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|title|[1906.02715] Visualizing and Measuring the Geometry of BERT +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|bookmarkOf|https://arxiv.org/abs/1906.02715 +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|creationTime|2019-06-07T23:33:36Z +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_summary|"Transformer architectures show significant promise for natural language +processing. Given that a single pretrained model can be fine-tuned to perform +well on many different tasks, these networks appear to extract generally useful +linguistic features. A natural question is how such networks represent this +information internally. This paper describes qualitative and quantitative +investigations of one particularly effective model, BERT. At a high level, +linguistic features seem to be represented in separate semantic and syntactic +subspaces. We find evidence of a fine-grained geometric representation of word +senses. We also present empirical descriptions of syntactic representations in +both attention matrices and individual word embeddings, as well as a +mathematical argument to explain the geometry of these representations." +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_firstAuthor|Andy Coenen +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_updated|2019-10-28T17:53:14Z +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_title|Visualizing and Measuring the Geometry of BERT +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_published|2019-06-06T17:33:22Z +http://www.semanlink.net/doc/2019/06/_1906_02715_visualizing_and_me|arxiv_num|1906.02715 +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|creationDate|2020-03-29 +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|comment|[paper](/doc/2020/05/2003_08001_realistic_re_evalu) +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|relatedDoc|http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|title|"Chengkai Li sur Twitter : ""Link prediction methods on knowledge graphs don't work...""" +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|bookmarkOf|https://twitter.com/Chengkai_Li/status/1243342637089898496 +http://www.semanlink.net/doc/2020/03/chengkai_li_sur_twitter_link|creationTime|2020-03-29T11:40:20Z +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|creationDate|2020-01-11 +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|tag|http://www.semanlink.net/tag/relation_extraction +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|tag|http://www.semanlink.net/tag/using_word_embedding +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|title|"davidsbatista/BREDS: ""Bootstrapping Relationship Extractors with Distributional Semantics"" (Batista et al., 2015) - code for EMNLP'15 paper" +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|bookmarkOf|https://github.com/davidsbatista/BREDS +http://www.semanlink.net/doc/2020/01/davidsbatista_breds_bootstrap|creationTime|2020-01-11T16:44:00Z +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|creationDate|2019-08-28 +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|tag|http://www.semanlink.net/tag/labeled_data +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_author|Jaime G. Carbonell +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_author|Aditi Chaudhary +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_author|Graham Neubig +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_author|Zaid Sheikh +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_author|Jiateng Xie +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|title|[1908.08983] A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|bookmarkOf|https://arxiv.org/abs/1908.08983 +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|creationTime|2019-08-28T22:57:43Z +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_summary|"Most state-of-the-art models for named entity recognition (NER) rely on the +availability of large amounts of labeled data, making them challenging to +extend to new, lower-resourced languages. However, there are now several +proposed approaches involving either cross-lingual transfer learning, which +learns from other highly resourced languages, or active learning, which +efficiently selects effective training data based on model predictions. This +paper poses the question: given this recent progress, and limited human +annotation, what is the most effective method for efficiently creating +high-quality entity recognizers in under-resourced languages? Based on +extensive experimentation using both simulated and real human annotation, we +find a dual-strategy approach best, starting with a cross-lingual transferred +model, then performing targeted annotation of only uncertain entity spans in +the target language, minimizing annotator effort. Results demonstrate that +cross-lingual transfer is a powerful tool when very little data can be +annotated, but an entity-targeted annotation strategy can achieve competitive +accuracy quickly, with just one-tenth of training data." +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_firstAuthor|Aditi Chaudhary +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_updated|2019-08-23T19:15:07Z +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_title|A Little Annotation does a Lot of Good: A Study in Bootstrapping Low-resource Named Entity Recognizers +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_published|2019-08-23T19:15:07Z +http://www.semanlink.net/doc/2019/08/_1908_08983_a_little_annotatio|arxiv_num|1908.08983 +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|creationDate|2019-06-06 +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|tag|http://www.semanlink.net/tag/humour +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|tag|http://www.semanlink.net/tag/rada_mihalcea +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|tag|http://www.semanlink.net/tag/naive_bayes_classifier +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|comment|[Jeremy Howard's answer](https://forums.fast.ai/t/nlp-challenge-project/44153) +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|title|"""I made a bet that a Naive Bayes classifier would work as well on humor recognition as a neural net with fine-tuned Bert embeddings. I won""" +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|bookmarkOf|https://twitter.com/pranav_nlp/status/1136688017630945280 +http://www.semanlink.net/doc/2019/06/_i_made_a_bet_that_a_naive_baye|creationTime|2019-06-06T22:48:05Z +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|creationDate|2020-01-23 +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|tag|http://www.semanlink.net/tag/pdf_format +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|tag|http://www.semanlink.net/tag/howto +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|tag|http://www.semanlink.net/tag/scraping +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|title|Scraping Data - UHack Guide +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|bookmarkOf|https://uhack-guide.readthedocs.io/en/latest/technical/scraping/ +http://www.semanlink.net/doc/2020/01/scraping_data_uhack_guide|creationTime|2020-01-23T18:14:58Z +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|creationDate|2020-02-17 +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/knowledge_representation +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/rules +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/reasoning +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_author|Peter Clark +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_author|Kyle Richardson +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_author|Oyvind Tafjord +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|comment|> AI has long pursued the goal of having systems reason over *explicitly provided* knowledge, but building suitable representations has proved challenging. Here we explore whether transformers can similarly learn to reason (or emulate reasoning), but **using rules expressed in language, thus bypassing a formal representation**. +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|title|[2002.05867] Transformers as Soft Reasoners over Language +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|bookmarkOf|https://arxiv.org/abs/2002.05867 +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|creationTime|2020-02-17T09:06:44Z +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_summary|"AI has long pursued the goal of having systems reason over *explicitly +provided* knowledge, but building suitable representations has proved +challenging. Here we explore whether transformers can similarly learn to reason +(or emulate reasoning), but using rules expressed in language, thus bypassing a +formal representation. We provide the first demonstration that this is +possible, and characterize the extent of this capability. To do this, we use a +collection of synthetic datasets that test increasing levels of reasoning +complexity (number of rules, presence of negation, and depth of chaining). We +find transformers appear to learn rule-based reasoning with high (99%) accuracy +on these datasets, and in a way that generalizes to test data requiring +substantially deeper chaining than in the training data (95%+ scores). We also +demonstrate that the models transfer well to two hand-authored rulebases, and +to rulebases paraphrased into more natural language. These findings are +significant as it suggests a new role for transformers, namely as a limited +""soft theorem prover"" operating over explicit theories in language. This in +turn suggests new possibilities for explainability, correctability, and +counterfactual reasoning in question-answering. All datasets and a live demo +are available at http://rule-reasoning.apps.allenai.org/" +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_firstAuthor|Peter Clark +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_updated|2020-02-14T04:23:28Z +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_title|Transformers as Soft Reasoners over Language +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_published|2020-02-14T04:23:28Z +http://www.semanlink.net/doc/2020/02/_2002_05867v1_transformers_as_|arxiv_num|2002.05867 +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|creationDate|2019-12-09 +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|tag|http://www.semanlink.net/tag/statistical_classification +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|Kuan-Chieh Wang +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|Kevin Swersky +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|David Duvenaud +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|Mohammad Norouzi +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|Jörn-Henrik Jacobsen +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_author|Will Grathwohl +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|title|[1912.03263] Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|bookmarkOf|https://arxiv.org/abs/1912.03263 +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|creationTime|2019-12-09T23:28:51Z +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_summary|"We propose to reinterpret a standard discriminative classifier of p(yx) as +an energy based model for the joint distribution p(x,y). In this setting, the +standard class probabilities can be easily computed as well as unnormalized +values of p(x) and p(xy). Within this framework, standard discriminative +architectures may beused and the model can also be trained on unlabeled data. +We demonstrate that energy based training of the joint distribution improves +calibration, robustness, andout-of-distribution detection while also enabling +our models to generate samplesrivaling the quality of recent GAN approaches. We +improve upon recently proposed techniques for scaling up the training of energy +based models and presentan approach which adds little overhead compared to +standard classification training. Our approach is the first to achieve +performance rivaling the state-of-the-artin both generative and discriminative +learning within one hybrid model." +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_firstAuthor|Will Grathwohl +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_updated|2019-12-11T19:57:55Z +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_title|Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_published|2019-12-06T18:00:36Z +http://www.semanlink.net/doc/2019/12/_1912_03263_your_classifier_is|arxiv_num|1912.03263 +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|creationDate|2019-07-15 +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Philip S. Yu +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Zonghan Wu +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Guodong Long +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Chengqi Zhang +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Shirui Pan +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_author|Fengwen Chen +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|comment|an overview of graph neural networks (GNNs) in data mining and machine learning fields +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|title|[1901.00596] A Comprehensive Survey on Graph Neural Networks +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|bookmarkOf|https://arxiv.org/abs/1901.00596 +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|creationTime|2019-07-15T23:15:09Z +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_summary|"Deep learning has revolutionized many machine learning tasks in recent years, +ranging from image classification and video processing to speech recognition +and natural language understanding. The data in these tasks are typically +represented in the Euclidean space. However, there is an increasing number of +applications where data are generated from non-Euclidean domains and are +represented as graphs with complex relationships and interdependency between +objects. The complexity of graph data has imposed significant challenges on +existing machine learning algorithms. Recently, many studies on extending deep +learning approaches for graph data have emerged. In this survey, we provide a +comprehensive overview of graph neural networks (GNNs) in data mining and +machine learning fields. We propose a new taxonomy to divide the +state-of-the-art graph neural networks into four categories, namely recurrent +graph neural networks, convolutional graph neural networks, graph autoencoders, +and spatial-temporal graph neural networks. We further discuss the applications +of graph neural networks across various domains and summarize the open source +codes, benchmark data sets, and model evaluation of graph neural networks. +Finally, we propose potential research directions in this rapidly growing +field." +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_firstAuthor|Zonghan Wu +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_updated|2019-12-04T01:43:00Z +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_title|A Comprehensive Survey on Graph Neural Networks +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_published|2019-01-03T03:20:55Z +http://www.semanlink.net/doc/2019/07/_1901_00596_a_comprehensive_su|arxiv_num|1901.00596 +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|creationDate|2019-12-17 +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|tag|http://www.semanlink.net/tag/annotation_tools +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|tag|http://www.semanlink.net/tag/nlp_data_anonymization +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|tag|http://www.semanlink.net/tag/nlp_juridique +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|tag|http://www.semanlink.net/tag/labeling_data +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|comment|"Second post, [First part: Why we switched from Spacy to Flair to anonymize French case law](doc:2021/02/why_we_switched_from_spacy_to_f) + +> It has been the most striking aspect of this project, each effort we put on the **annotation quality** has been translated to score improvement, even the smallest ones." +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|relatedDoc|http://www.semanlink.net/doc/2021/02/why_we_switched_from_spacy_to_f +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|title|NER algo benchmark: spaCy, Flair, m-BERT and camemBERT on anonymizing French commercial legal cases +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|bookmarkOf|https://towardsdatascience.com/benchmark-ner-algorithm-d4ab01b2d4c3 +http://www.semanlink.net/doc/2019/12/ner_algo_benchmark_spacy_flai|creationTime|2019-12-17T14:46:24Z +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|creationDate|2019-08-12 +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|tag|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|tag|http://www.semanlink.net/tag/representation_learning_for_nlp +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|comment|"Talks: + +- Language emergence as representation learning (Marco Baroni) + +> language emergence among deep neural network agents that have to jointly solve a task. Recent findings suggest that the language-like code developed by such agents both differs from and resembles natural language in interesting ways. For example, the emergent code does not naturally represent general concepts, but rather very specific invariances in the perceptual input + +- Representations shaped by dialogue interaction (Raquel Fernández) + +> When we use language to communicate with each other in conversation, we build an internal representation of our evolving common ground. Traditionally, in dialogue systems this is captured by an explicit dialogue state defined a priori. Can we develop dialogue agents that learn their own (joint) representations? + +- Knowledgeable and Adversarially-Robust Representation Learning (Mohit Bansal) + +- Modeling Output Spaces in Continuous-Output Language Generation (Yulia Tsvetkov)" +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|title|4th Workshop on Representation Learning for NLP +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|bookmarkOf|https://sites.google.com/view/repl4nlp2019/schedule +http://www.semanlink.net/doc/2019/08/4th_workshop_on_representation_|creationTime|2019-08-12T10:03:41Z +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|creationDate|2019-10-20 +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|tag|http://www.semanlink.net/tag/apache_org +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|tag|http://www.semanlink.net/tag/tika +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|tag|http://www.semanlink.net/tag/microsoft +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|comment|[here for word docs](http://poi.apache.org/components/document/index.html) +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|title|Apache POI - the Java API for Microsoft Documents +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|bookmarkOf|http://poi.apache.org/ +http://www.semanlink.net/doc/2019/10/apache_poi_the_java_api_for_m|creationTime|2019-10-20T14:47:49Z +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|creationDate|2020-01-22 +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|tag|http://www.semanlink.net/tag/semi_supervised_learning +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Nicholas Carlini +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|David Berthelot +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Han Zhang +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Zizhao Zhang +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Ekin D. Cubuk +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Kihyuk Sohn +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Chun-Liang Li +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Colin Raffel +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_author|Alex Kurakin +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|comment|"[github](https://github.com/google-research/fixmatch) + +> we demonstrate the **power of a +simple combination of two common Semi-Supervised Learning methods**: consistency +regularization and pseudo-labeling. + +1. First generates pseudo-labels using the model’s +predictions on weakly-augmented unlabeled images. For a +given image, the pseudo-label is only retained if the model +produces a high-confidence prediction. +2. The model is then +trained to predict the pseudo-label when fed a strongly augmented +version of the same image." +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|title|[2001.07685] FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|bookmarkOf|https://arxiv.org/abs/2001.07685 +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|creationTime|2020-01-22T18:11:37Z +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_summary|"Semi-supervised learning (SSL) provides an effective means of leveraging +unlabeled data to improve a model's performance. In this paper, we demonstrate +the power of a simple combination of two common SSL methods: consistency +regularization and pseudo-labeling. Our algorithm, FixMatch, first generates +pseudo-labels using the model's predictions on weakly-augmented unlabeled +images. For a given image, the pseudo-label is only retained if the model +produces a high-confidence prediction. The model is then trained to predict the +pseudo-label when fed a strongly-augmented version of the same image. Despite +its simplicity, we show that FixMatch achieves state-of-the-art performance +across a variety of standard semi-supervised learning benchmarks, including +94.93% accuracy on CIFAR-10 with 250 labels and 88.61% accuracy with 40 -- just +4 labels per class. Since FixMatch bears many similarities to existing SSL +methods that achieve worse performance, we carry out an extensive ablation +study to tease apart the experimental factors that are most important to +FixMatch's success. We make our code available at +https://github.com/google-research/fixmatch." +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_firstAuthor|Kihyuk Sohn +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_updated|2020-01-21T18:32:27Z +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_title|FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_published|2020-01-21T18:32:27Z +http://www.semanlink.net/doc/2020/01/_2001_07685_fixmatch_simplify|arxiv_num|2001.07685 +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|creationDate|2019-07-19 +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|tag|http://www.semanlink.net/tag/mots_expressions_remarquables +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|title|"average joe sur Twitter : ""everyone please share your favorite not-english word or phrase.""" +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|bookmarkOf|https://twitter.com/jazz_inmypants/status/1151867623723950080 +http://www.semanlink.net/doc/2019/07/average_joe_sur_twitter_ever|creationTime|2019-07-19T23:40:24Z +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|title|"Alexandria Ocasio-Cortez sur Twitter : ""When it comes to climate change, we are going to pay no matter what""" +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|bookmarkOf|https://twitter.com/AOC/status/1176575799710736384 +http://www.semanlink.net/doc/2019/11/alexandria_ocasio_cortez_sur_tw|creationTime|2019-11-09T15:30:37Z +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|creationDate|2019-08-25 +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_author|Bryan Perozzi +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_author|Haochen Chen +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_author|Rami Al-Rfou +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_author|Steven Skiena +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|title|[1808.02590] A Tutorial on Network Embeddings +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|bookmarkOf|https://arxiv.org/abs/1808.02590 +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|creationTime|2019-08-25T02:02:16Z +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_summary|"Network embedding methods aim at learning low-dimensional latent +representation of nodes in a network. These representations can be used as +features for a wide range of tasks on graphs such as classification, +clustering, link prediction, and visualization. In this survey, we give an +overview of network embeddings by summarizing and categorizing recent +advancements in this research field. We first discuss the desirable properties +of network embeddings and briefly introduce the history of network embedding +algorithms. Then, we discuss network embedding methods under different +scenarios, such as supervised versus unsupervised learning, learning embeddings +for homogeneous networks versus for heterogeneous networks, etc. We further +demonstrate the applications of network embeddings, and conclude the survey +with future work in this area." +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_firstAuthor|Haochen Chen +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_updated|2018-08-08T00:54:01Z +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_title|A Tutorial on Network Embeddings +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_published|2018-08-08T00:54:01Z +http://www.semanlink.net/doc/2019/08/_1808_02590_a_tutorial_on_netw|arxiv_num|1808.02590 +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|creationDate|2019-11-15 +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/short_text_clustering +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|tag|http://www.semanlink.net/tag/lexical_ambiguity +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|comment|> In order to cope with polysemy we adapt the SenseSearcher algorithm (SnS), by Kozlowski and Rybinski in Computational Intelligence 33(3): 335–367, 2017b. In addition, we test the possibilities of improving the quality of clustering ultra-short texts by means of enriching them semantically. We present two approaches, one based on neural-based distributional models, and the other based on external knowledge resources. +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|title|Clustering of semantically enriched short texts (2019) +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|bookmarkOf|https://rd.springer.com/article/10.1007/s10844-018-0541-4 +http://www.semanlink.net/doc/2019/11/clustering_of_semantically_enri|creationTime|2019-11-15T10:42:08Z +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|creationDate|2020-03-13 +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|tag|http://www.semanlink.net/tag/concept_extraction +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|title|AmbiverseNLU: A Natural Language Understanding suite by Max Planck Institute for Informatics +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|bookmarkOf|https://github.com/ambiverse-nlu/ambiverse-nlu +http://www.semanlink.net/doc/2020/03/ambiversenlu_a_natural_languag|creationTime|2020-03-13T10:30:41Z +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|creationDate|2020-02-14 +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|tag|http://www.semanlink.net/tag/distilbert +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|title|Hugging Face sur Twitter : DistilBERT-cased for Question Answering w/ just 3 lines of javascript +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|bookmarkOf|https://twitter.com/huggingface/status/1228025045412438019 +http://www.semanlink.net/doc/2020/02/hugging_face_sur_twitter_to_|creationTime|2020-02-14T00:23:36Z +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|creationDate|2020-02-18 +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|tag|http://www.semanlink.net/tag/label_embedding +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|tag|http://www.semanlink.net/tag/image_classification +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_author|Zeynep Akata +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_author|Zaid Harchaoui +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_author|Cordelia Schmid +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_author|Florent Perronnin +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|title|[1503.08677] Label-Embedding for Image Classification +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|bookmarkOf|https://arxiv.org/abs/1503.08677 +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|creationTime|2020-02-18T15:00:20Z +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_summary|"Attributes act as intermediate representations that enable parameter sharing +between classes, a must when training data is scarce. We propose to view +attribute-based image classification as a label-embedding problem: each class +is embedded in the space of attribute vectors. We introduce a function that +measures the compatibility between an image and a label embedding. The +parameters of this function are learned on a training set of labeled samples to +ensure that, given an image, the correct classes rank higher than the incorrect +ones. Results on the Animals With Attributes and Caltech-UCSD-Birds datasets +show that the proposed framework outperforms the standard Direct Attribute +Prediction baseline in a zero-shot learning scenario. Label embedding enjoys a +built-in ability to leverage alternative sources of information instead of or +in addition to attributes, such as e.g. class hierarchies or textual +descriptions. Moreover, label embedding encompasses the whole range of learning +settings from zero-shot learning to regular learning with a large number of +labeled examples." +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_firstAuthor|Zeynep Akata +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_updated|2015-10-01T10:48:38Z +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_title|Label-Embedding for Image Classification +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_published|2015-03-30T14:04:34Z +http://www.semanlink.net/doc/2020/02/_1503_08677_label_embedding_fo|arxiv_num|1503.08677 +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|creationDate|2020-04-27 +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|tag|http://www.semanlink.net/tag/multi_task_learning +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|tag|http://www.semanlink.net/tag/andrej_karpathy +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|title|Andrej Karpathy Multi-Task Learning in the Wilderness · SlidesLive +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|bookmarkOf|https://slideslive.com/38917690/multitask-learning-in-the-wilderness +http://www.semanlink.net/doc/2020/04/andrej_karpathy_%7C_multi_task_le|creationTime|2020-04-27T19:39:48Z +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|creationDate|2019-06-28 +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|tag|http://www.semanlink.net/tag/knowledge_resources +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|comment|"how existing knowledge in an organization can be used as noisier, higher-level supervision—or, as it is often termed, weak supervision—to quickly label large training datasets + +Snorkel Drybell, experimental internal system, which adapts the opensource +Snorkel framework to **use diverse organizational knowledge +resources—like internal models, ontologies, legacy rules, knowledge +graphs and more—in order to generate training data** for machine learning +models at web scale. + +Enables writing **labeling functions** that label training data programmatically + +[paper](/doc/2019/06/_1812_00417_snorkel_drybell_a) + +" +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|relatedDoc|http://www.semanlink.net/doc/2019/06/_1812_00417_snorkel_drybell_a +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|title|Google AI Blog: Harnessing Organizational Knowledge for Machine Learning (2019) +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|bookmarkOf|https://ai.googleblog.com/2019/03/harnessing-organizational-knowledge-for.html +http://www.semanlink.net/doc/2019/06/google_ai_blog_harnessing_orga|creationTime|2019-06-28T02:00:39Z +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|creationDate|2020-01-28 +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|tag|http://www.semanlink.net/tag/inde +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|title|En Inde, la résistance aux antibiotiques devient un problème sanitaire très sérieux +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|bookmarkOf|https://www.lemonde.fr/sciences/article/2020/01/27/en-inde-la-resistance-aux-antibiotiques-devient-un-probleme-sanitaire-tres-serieux_6027416_1650684.html +http://www.semanlink.net/doc/2020/01/en_inde_la_resistance_aux_anti|creationTime|2020-01-28T00:16:00Z +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|creationDate|2020-05-12 +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/quoc_le +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_author|Quoc V. Le +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_author|Christopher D. Manning +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_author|Urvashi Khandelwal +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_author|Minh-Thang Luong +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_author|Kevin Clark +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|comment|> **knowledge distillation where single-task models teach a multi-task model.** We enhance this training with **teacher annealing**, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|title|[1907.04829] BAM! Born-Again Multi-Task Networks for Natural Language Understanding +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|bookmarkOf|https://arxiv.org/abs/1907.04829 +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|creationTime|2020-05-12T19:08:45Z +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_summary|"It can be challenging to train multi-task neural networks that outperform or +even match their single-task counterparts. To help address this, we propose +using knowledge distillation where single-task models teach a multi-task model. +We enhance this training with teacher annealing, a novel method that gradually +transitions the model from distillation to supervised learning, helping the +multi-task model surpass its single-task teachers. We evaluate our approach by +multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently +improves over standard single-task and multi-task training." +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_firstAuthor|Kevin Clark +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_updated|2019-07-10T17:14:47Z +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_title|BAM! Born-Again Multi-Task Networks for Natural Language Understanding +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_published|2019-07-10T17:14:47Z +http://www.semanlink.net/doc/2020/05/1907_04829_bam_born_again_mu|arxiv_num|1907.04829 +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|creationDate|2019-06-06 +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|tag|http://www.semanlink.net/tag/on_device_nlp +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|comment|"Forget word embeddings? + +> Neural word representations representations occupy huge memory making it hard to deploy on-device and often do not generalize to unknown words due to vocabulary pruning. In this paper, we propose a skip-gram based architecture coupled with Locality-Sensitive Hashing (LSH) projections to learn efficient dynamically computable representations. Our model does not need to store lookup tables as representations are computed on-the-fly and require low memory footprint. The representations can be trained in an unsupervised fashion and can be easily transferred to other NLP tasks. For qualitative evaluation, we analyze the nearest neighbors of the word representations and discover semantically similar words even with misspellings. For quantitative evaluation, we plug our transferable projections into a simple LSTM and run it on multiple NLP tasks and show how our transferable projections achieve better performance compared to prior work." +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|title|Transferable Neural Projection Representations (2019) +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|bookmarkOf|https://www.aclweb.org/anthology/N19-1339/ +http://www.semanlink.net/doc/2019/06/transferable_neural_projection_|creationTime|2019-06-06T01:43:47Z +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|creationDate|2019-08-30 +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|comment|"The structure of the graph model makes natural language processing easier +" +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|title|Accelerating Towards Natural Language Search with Graphs +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|bookmarkOf|https://neo4j.com/blog/accelerating-towards-natural-language-search-graphs/ +http://www.semanlink.net/doc/2019/08/accelerating_towards_natural_la|creationTime|2019-08-30T21:05:05Z +http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_|creationDate|2019-12-14 +http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_|tag|http://www.semanlink.net/tag/elasticsearch_annotated_text_field +http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_|title|Introducing the Annotated Text Plugin for Elasticsearch: Search for Things (not Strings) Elastic Blog +http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_|bookmarkOf|https://www.elastic.co/fr/blog/search-for-things-not-strings-with-the-annotated-text-plugin +http://www.semanlink.net/doc/2019/12/introducing_the_annotated_text_|creationTime|2019-12-14T01:01:10Z +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|creationDate|2019-10-30 +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|tag|http://www.semanlink.net/tag/python_sample_code +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|comment|Include python sample code using WebSockets: streaming audio to the Watson Speech to Text service while also getting responses back at the same time. +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|title|Getting robots to listen: Using Watson's Speech to Text service - Watson +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|bookmarkOf|https://www.ibm.com/blogs/watson/2016/07/getting-robots-listen-using-watsons-speech-text-service/ +http://www.semanlink.net/doc/2019/10/getting_robots_to_listen_using|creationTime|2019-10-30T00:10:58Z +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|creationDate|2020-04-29 +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|tag|http://www.semanlink.net/tag/eswc_2019 +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|title|CEUR-WS.org/Vol-2377 - Workshop on Deep Learning for Knowledge Graphs 2019 +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|bookmarkOf|http://ceur-ws.org/Vol-2377/ +http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop|creationTime|2020-04-29T14:04:51Z +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|creationDate|2019-07-11 +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|tag|http://www.semanlink.net/tag/blog +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|tag|http://www.semanlink.net/tag/jupyter +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|comment|How to convert a Jupyter notebook to a blog post +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|title|Blogging with jupyter notebooks and jekyll - Claire Duvallet +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|bookmarkOf|https://cduvallet.github.io/posts/2018/03/ipython-notebooks-jekyll +http://www.semanlink.net/doc/2019/07/blogging_with_jupyter_notebooks|creationTime|2019-07-11T10:39:59Z +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|creationDate|2020-04-30 +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|comment|Blog post for this [paper](/doc/2020/04/1906_01195_learning_attention) +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|relatedDoc|http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|title|Deepak Nathani Pay Attention, Relations are Important +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|bookmarkOf|https://deepakn97.github.io/blog/2019/Knowledge-Base-Relation-Prediction/ +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|creationTime|2020-04-30T13:03:33Z +http://www.semanlink.net/doc/2020/04/deepak_nathani_%7C_pay_attention_|mainDoc|http://www.semanlink.net/doc/2020/04/1906_01195_learning_attention +http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm|creationDate|2020-04-27 +http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm|tag|http://www.semanlink.net/tag/html +http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm|title|7 Alternatives to the div HTML Tag - Zac Heisey - Medium +http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm|bookmarkOf|https://medium.com/@zac_heisey/7-alternatives-to-the-div-html-tag-7c888c7b5036 +http://www.semanlink.net/doc/2020/04/7_alternatives_to_the_div_htm|creationTime|2020-04-27T15:34:28Z +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|creationDate|2020-02-24 +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|tag|http://www.semanlink.net/tag/ml_nlp_blog +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|tag|http://www.semanlink.net/tag/gpt_2 +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|title|NLP Newsletter: The Annotated GPT-2, Understanding self-distillation, Haiku, GANILLA, Sparkwiki, Ethics in NLP, Torchmeta,… +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|bookmarkOf|https://medium.com/dair-ai/nlp-newsletter-the-annotated-gpt-2-understanding-self-distillation-haiku-ganilla-sparkwiki-b0f47f595c82 +http://www.semanlink.net/doc/2020/02/nlp_newsletter_the_annotated_g|creationTime|2020-02-24T09:48:11Z +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|tag|http://www.semanlink.net/tag/slot_tagging +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|tag|http://www.semanlink.net/tag/intent_classification_and_slot_filling +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|tag|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|tag|http://www.semanlink.net/tag/sequence_to_sequence_learning +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|title|Natural Language Understanding with Sequence to Sequence Models +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|bookmarkOf|https://towardsdatascience.com/natural-language-understanding-with-sequence-to-sequence-models-e87d41ad258b +http://www.semanlink.net/doc/2020/01/natural_language_understanding_|creationTime|2020-01-09T00:50:49Z +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|creationDate|2019-08-15 +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/neuroscience_and_ai +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/backpropagation +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/backpropagation_vs_biology +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_author|J. P. Lewis +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_author|W. Bastiaan Kleijn +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_author|Wan-Duo Kurt Ma +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|comment|"> we show that it is possible to learn classification tasks at near competitive accuracy **without +backpropagation**, by **maximizing a surrogate of the mutual information between hidden representations and labels** and +simultaneously **minimizing the mutual dependency between hidden representations and the inputs**... +the hidden units of a network trained in this way form useful representations. Specifically, fully competitive accuracy +can be obtained by freezing the network trained without backpropagation and appending and training a one-layer +network using conventional SGD to convert convert the representation to the desired format. + +The training method uses an approximation of the [#information bottleneck](/tag/information_bottleneck_method). + +Advantages: + +> - The method facilitates parallel processing and requires significantly less operations. +> - It does not suffer from exploding or vanishing gradients. +> - It is biologically more plausible than Backpropagation + +" +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|title|[1908.01580] The HSIC Bottleneck: Deep Learning without Back-Propagation +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|bookmarkOf|https://arxiv.org/abs/1908.01580 +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|creationTime|2019-08-15T17:13:21Z +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_summary|"We introduce the HSIC (Hilbert-Schmidt independence criterion) bottleneck for +training deep neural networks. The HSIC bottleneck is an alternative to the +conventional cross-entropy loss and backpropagation that has a number of +distinct advantages. It mitigates exploding and vanishing gradients, resulting +in the ability to learn very deep networks without skip connections. There is +no requirement for symmetric feedback or update locking. We find that the HSIC +bottleneck provides performance on MNIST/FashionMNIST/CIFAR10 classification +comparable to backpropagation with a cross-entropy target, even when the system +is not encouraged to make the output resemble the classification labels. +Appending a single layer trained with SGD (without backpropagation) to reformat +the information further improves performance." +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_firstAuthor|Wan-Duo Kurt Ma +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_updated|2019-12-05T09:24:24Z +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_title|The HSIC Bottleneck: Deep Learning without Back-Propagation +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_published|2019-08-05T12:23:24Z +http://www.semanlink.net/doc/2019/08/_1908_01580_the_hsic_bottlenec|arxiv_num|1908.01580 +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|tag|http://www.semanlink.net/tag/xgboost +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|comment|[Part 2](/doc/?uri=https%3A%2F%2Ftowardsdatascience.com%2Ffinding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d) +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|relatedDoc|https://towardsdatascience.com/finding-similar-quora-questions-with-word2vec-and-xgboost-1a19ad272c0d +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|title|Finding Similar Quora Questions with BOW, TFIDF and Xgboost +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|bookmarkOf|https://towardsdatascience.com/finding-similar-quora-questions-with-bow-tfidf-and-random-forest-c54ad88d1370 +http://www.semanlink.net/doc/2019/07/finding_similar_quora_questions|creationTime|2019-07-02T01:26:01Z +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|creationDate|2019-12-31 +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|title|Custom Named Entity Recognition Using spaCy - Towards Data Science +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|bookmarkOf|https://towardsdatascience.com/custom-named-entity-recognition-using-spacy-7140ebbb3718 +http://www.semanlink.net/doc/2019/12/custom_named_entity_recognition|creationTime|2019-12-31T11:31:41Z +http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel|title|Introducing the New Snorkel +http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel|bookmarkOf|https://www.snorkel.org/blog/hello-world-v-0-9 +http://www.semanlink.net/doc/2019/12/introducing_the_new_snorkel|creationTime|2019-12-07T11:24:42Z +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|tag|http://www.semanlink.net/tag/code +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|title|Papers With Code : the latest in machine learning +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|bookmarkOf|https://www.paperswithcode.com/ +http://www.semanlink.net/doc/2020/01/papers_with_code_the_latest_i|creationTime|2020-01-10T10:32:35Z +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|creationDate|2019-08-23 +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|tag|http://www.semanlink.net/tag/natural_language_generation +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_author|Yi Luan +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_author|Mirella Lapata +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_author|Hannaneh Hajishirzi +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_author|Dhanush Bekal +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_author|Rik Koncel-Kedziorski +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|title|[1904.02342] Text Generation from Knowledge Graphs with Graph Transformers +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|bookmarkOf|https://arxiv.org/abs/1904.02342 +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|creationTime|2019-08-23T00:39:46Z +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_summary|"Generating texts which express complex ideas spanning multiple sentences +requires a structured representation of their content (document plan), but +these representations are prohibitively expensive to manually produce. In this +work, we address the problem of generating coherent multi-sentence texts from +the output of an information extraction system, and in particular a knowledge +graph. Graphical knowledge representations are ubiquitous in computing, but +pose a significant challenge for text generation techniques due to their +non-hierarchical nature, collapsing of long-distance dependencies, and +structural variety. We introduce a novel graph transforming encoder which can +leverage the relational structure of such knowledge graphs without imposing +linearization or hierarchical constraints. Incorporated into an encoder-decoder +setup, we provide an end-to-end trainable system for graph-to-text generation +that we apply to the domain of scientific text. Automatic and human evaluations +show that our technique produces more informative texts which exhibit better +document structure than competitive encoder-decoder methods." +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_firstAuthor|Rik Koncel-Kedziorski +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_updated|2019-05-18T01:07:52Z +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_title|Text Generation from Knowledge Graphs with Graph Transformers +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_published|2019-04-04T04:33:15Z +http://www.semanlink.net/doc/2019/08/_1904_02342_text_generation_fr|arxiv_num|1904.02342 +http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la|creationDate|2019-08-29 +http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la|title|Knowledge Graphs and Natural Language Processing. The Year of the Graph Newsletter, July/August 2019 Linked Data Orchestration +http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la|bookmarkOf|https://linkeddataorchestration.com/2019/08/12/knowledge-graph-and-natural-language-processing/ +http://www.semanlink.net/doc/2019/08/knowledge_graphs_and_natural_la|creationTime|2019-08-29T14:11:34Z +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|creationDate|2019-11-16 +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|title|Hugging Face – On a mission to solve NLP, one commit at a time. +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|bookmarkOf|https://huggingface.co/ +http://www.semanlink.net/doc/2019/11/hugging_face_on_a_mission_to_|creationTime|2019-11-16T00:42:46Z +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|creationDate|2019-08-02 +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|tag|http://www.semanlink.net/tag/xlnet +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|title|What is XLNet and why it outperforms BERT - Towards Data Science +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|bookmarkOf|https://towardsdatascience.com/what-is-xlnet-and-why-it-outperforms-bert-8d8fce710335 +http://www.semanlink.net/doc/2019/08/what_is_xlnet_and_why_it_outper|creationTime|2019-08-02T17:46:14Z +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|creationDate|2020-02-12 +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|tag|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|title|Online speech recognition with wav2letter@anywhere +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|bookmarkOf|https://ai.facebook.com/blog/online-speech-recognition-with-wav2letteranywhere/ +http://www.semanlink.net/doc/2020/02/online_speech_recognition_with_|creationTime|2020-02-12T14:19:09Z +http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia|creationDate|2020-05-01 +http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia|tag|http://www.semanlink.net/tag/science_fiction +http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia|title|William Gibson — Wikipédia +http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia|bookmarkOf|https://fr.wikipedia.org/wiki/William_Gibson +http://www.semanlink.net/doc/2020/05/william_gibson_wikipedia|creationTime|2020-05-01T13:53:01Z +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|creationDate|2019-06-22 +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/text_multi_label_classification +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/classification_relations_between_classes +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/nlp_4_semanlink +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|tag|http://www.semanlink.net/tag/extreme_multi_label_classification +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_author|Xin Huang +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_author|Boli Chen +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_author|Liping Jing +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_author|Lin Xiao +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|comment|"> This paper is motivated to better explore the semantic **relationship between each document and extreme labels by taking advantage of both document content and label correlation**. Our objective is to establish an explicit **label-aware representation for each document**. + +> LAHA consists of three parts. +> 1. The first part +adopts a multi-label self-attention mechanism **to detect the contribution +of each word to labels**. +> 2. The second part exploits the label structure and +document content **to determine the semantic connection between words +and labels in a same latent space**. +> 3. An adaptive fusion strategy is designed +in the third part to obtain the final label-aware document representation + +[Github](https://github.com/HX-idiot/Hybrid_Attention_XML) + +// TODO compare with [this](doc:2020/08/2003_11644_multi_label_text_c)" +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|relatedDoc|http://www.semanlink.net/doc/2020/08/2003_11644_multi_label_text_c +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|title|[1905.10070] Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|bookmarkOf|https://arxiv.org/abs/1905.10070 +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|creationTime|2019-06-22T17:15:57Z +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_summary|"Extreme multi-label text classification (XMTC) aims at tagging a document +with most relevant labels from an extremely large-scale label set. It is a +challenging problem especially for the tail labels because there are only few +training documents to build classifier. This paper is motivated to better +explore the semantic relationship between each document and extreme labels by +taking advantage of both document content and label correlation. Our objective +is to establish an explicit label-aware representation for each document with a +hybrid attention deep neural network model(LAHA). LAHA consists of three parts. +The first part adopts a multi-label self-attention mechanism to detect the +contribution of each word to labels. The second part exploits the label +structure and document content to determine the semantic connection between +words and labels in a same latent space. An adaptive fusion strategy is +designed in the third part to obtain the final label-aware document +representation so that the essence of previous two parts can be sufficiently +integrated. Extensive experiments have been conducted on six benchmark datasets +by comparing with the state-of-the-art methods. The results show the +superiority of our proposed LAHA method, especially on the tail labels." +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_firstAuthor|Xin Huang +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_updated|2019-07-12T02:45:08Z +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_title|Label-aware Document Representation via Hybrid Attention for Extreme Multi-Label Text Classification +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_published|2019-05-24T07:30:34Z +http://www.semanlink.net/doc/2019/06/_1905_10070_label_aware_docume|arxiv_num|1905.10070 +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|creationDate|2020-05-13 +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/afrique +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/lord_s_resistance_army +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/parc_du_w +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/ong +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/centrafrique +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|tag|http://www.semanlink.net/tag/protection_de_la_nature +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|comment|> En Centrafrique, l’immense réserve naturelle de Chinko est protégée par une ONG sud-africaine qui, face aux bergers nomades, braconniers, mercenaires et miliciens lourdement armés sévissant dans la région, s’impose par des méthodes musclées. +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|title|Au cœur de l’Afrique, la guerre au nom de la nature +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/05/08/au-c-ur-de-l-afrique-la-guerre-au-nom-de-la-nature_6039073_3212.html +http://www.semanlink.net/doc/2020/05/au_coeur_de_l%E2%80%99afrique_la_guerre|creationTime|2020-05-13T15:21:32Z +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|creationDate|2020-02-11 +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|tag|http://www.semanlink.net/tag/knowledge_based_ai +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|comment|[paper](/doc/2020/02/how_much_knowledge_can_you_pack) +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|relatedDoc|http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|title|"Adam Roberts sur Twitter : ""New preprint: How Much Knowledge Can You Pack into the Parameters of a Language Model?...""" +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|bookmarkOf|https://twitter.com/ada_rob/status/1227062195671822336 +http://www.semanlink.net/doc/2020/02/adam_roberts_sur_twitter_new|creationTime|2020-02-11T12:24:21Z +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|creationDate|2020-03-01 +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|tag|http://www.semanlink.net/tag/astrophysique +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|comment|"L'événement fut si puissant qu'il aurait créé une brèche de la taille de 15 Voies lactées réunies dans le plasma environnant. + +> L'Univers est un endroit étrange." +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|title|La plus grosse explosion jamais observée depuis le Big Bang +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|bookmarkOf|https://www.sciencesetavenir.fr/espace/univers/un-trou-noir-responsable-de-la-plus-grosse-explosion-jamais-observee-dans-l-espace-depuis-le-big-bang_142029 +http://www.semanlink.net/doc/2020/03/la_plus_grosse_explosion_jamais|creationTime|2020-03-01T12:15:45Z +http://www.semanlink.net/doc/2019/10/chronas_enter_history|creationDate|2019-10-01 +http://www.semanlink.net/doc/2019/10/chronas_enter_history|tag|http://www.semanlink.net/tag/histoire +http://www.semanlink.net/doc/2019/10/chronas_enter_history|tag|http://www.semanlink.net/tag/carte +http://www.semanlink.net/doc/2019/10/chronas_enter_history|title|Chronas: Enter History +http://www.semanlink.net/doc/2019/10/chronas_enter_history|bookmarkOf|https://chronas.org/?year=1110&epics=&markers=ar,b,si,cp,c,ca,l,m,p,e,s,a,r,at,op,o&limit=2000&type=&fill=ruler&label=ruler&value=&locale=en&position=37,37,2.5#/ +http://www.semanlink.net/doc/2019/10/chronas_enter_history|creationTime|2019-10-01T15:12:38Z +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|creationDate|2019-12-09 +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/connectionist_vs_symbolic_debate +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/mathematiques +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|tag|http://www.semanlink.net/tag/guillaume_lample +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_author|Guillaume Lample +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_author|François Charton +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|title|[1912.01412] Deep Learning for Symbolic Mathematics +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|bookmarkOf|https://arxiv.org/abs/1912.01412 +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|creationTime|2019-12-09T17:11:42Z +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_summary|"Neural networks have a reputation for being better at solving statistical or +approximate problems than at performing calculations or working with symbolic +data. In this paper, we show that they can be surprisingly good at more +elaborated tasks in mathematics, such as symbolic integration and solving +differential equations. We propose a syntax for representing mathematical +problems, and methods for generating large datasets that can be used to train +sequence-to-sequence models. We achieve results that outperform commercial +Computer Algebra Systems such as Matlab or Mathematica." +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_firstAuthor|Guillaume Lample +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_updated|2019-12-02T15:05:24Z +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_title|Deep Learning for Symbolic Mathematics +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_published|2019-12-02T15:05:24Z +http://www.semanlink.net/doc/2019/12/_1912_01412_deep_learning_for_|arxiv_num|1912.01412 +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|creationDate|2020-05-15 +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/embedding_evaluation +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_author|Wei Hu State Key Laboratory for Novel Software Technology, Nanjing University +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_author|Qingheng Zhang State Key Laboratory for Novel Software Technology, Nanjing University +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_author|Mohammed Samiul Saeef Department of Computer Science and Engineering, University of Texas at Arlington +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_author|Farahnaz Akrami Department of Computer Science and Engineering, University of Texas at Arlington +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_author|Chengkai Li Department of Computer Science and Engineering, University of Texas at Arlington +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|comment|"data redundancy (reverse relations), Cartesian product relations + +> A more fundamental defect +of these models is that the link prediction scenario, given +such data, is non-existent in the real-world" +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|title|[2003.08001] Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|bookmarkOf|https://arxiv.org/abs/2003.08001 +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|creationTime|2020-05-15T17:26:28Z +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_summary|"In the active research area of employing embedding models for knowledge graph +completion, particularly for the task of link prediction, most prior studies +used two benchmark datasets FB15k and WN18 in evaluating such models. Most +triples in these and other datasets in such studies belong to reverse and +duplicate relations which exhibit high data redundancy due to semantic +duplication, correlation or data incompleteness. This is a case of excessive +data leakage---a model is trained using features that otherwise would not be +available when the model needs to be applied for real prediction. There are +also Cartesian product relations for which every triple formed by the Cartesian +product of applicable subjects and objects is a true fact. Link prediction on +the aforementioned relations is easy and can be achieved with even better +accuracy using straightforward rules instead of sophisticated embedding models. +A more fundamental defect of these models is that the link prediction scenario, +given such data, is non-existent in the real-world. This paper is the first +systematic study with the main objective of assessing the true effectiveness of +embedding models when the unrealistic triples are removed. Our experiment +results show these models are much less accurate than what we used to perceive. +Their poor accuracy renders link prediction a task without truly effective +automated solution. Hence, we call for re-investigation of possible effective +approaches." +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_firstAuthor|Farahnaz Akrami Department of Computer Science and Engineering, University of Texas at Arlington +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_updated|2020-03-18T01:18:09Z +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_title|Realistic Re-evaluation of Knowledge Graph Completion Methods: An Experimental Study +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_published|2020-03-18T01:18:09Z +http://www.semanlink.net/doc/2020/05/2003_08001_realistic_re_evalu|arxiv_num|2003.08001 +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|creationDate|2020-05-04 +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_author|Mehwish Alam +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_author|Genet Asefa Gesese +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_author|Russa Biswas +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_author|Harald Sack +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|title|[1910.12507] A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|bookmarkOf|https://arxiv.org/abs/1910.12507 +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|creationTime|2020-05-04T14:56:43Z +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_summary|"Knowledge Graphs (KGs) are composed of structured information about a +particular domain in the form of entities and relations. In addition to the +structured information KGs help in facilitating interconnectivity and +interoperability between different resources represented in the Linked Data +Cloud. KGs have been used in a variety of applications such as entity linking, +question answering, recommender systems, etc. However, KG applications suffer +from high computational and storage costs. Hence, there arises the necessity +for a representation able to map the high dimensional KGs into low dimensional +spaces, i.e., embedding space, preserving structural as well as relational +information. This paper conducts a survey of KG embedding models which not only +consider the structured information contained in the form of entities and +relations in a KG but also the unstructured information represented as literals +such as text, numerical values, images, etc. Along with a theoretical analysis +and comparison of the methods proposed so far for generating KG embeddings with +literals, an empirical evaluation of the different methods under identical +settings has been performed for the general task of link prediction." +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_firstAuthor|Genet Asefa Gesese +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_updated|2019-10-28T09:06:00Z +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_title|A Survey on Knowledge Graph Embeddings with Literals: Which model links better Literal-ly? +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_published|2019-10-28T09:06:00Z +http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle|arxiv_num|1910.12507 +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|creationDate|2020-03-28 +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|tag|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|title|BERT, ELMo, & GPT-2: How Contextual are Contextualized Word Representations? SAIL Blog +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|bookmarkOf|http://ai.stanford.edu/blog/contextual/ +http://www.semanlink.net/doc/2020/03/bert_elmo_gpt_2_how_contex|creationTime|2020-03-28T10:33:17Z +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|creationDate|2020-01-23 +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|tag|http://www.semanlink.net/tag/metric_learning +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|comment|"Author shares his learnings building a a text independent speaker identification system. General advices and detailed explanation of the difficulties of triplet loss implementation ((plateaued training, Hard negative mining very costly,...): + +> implementing triplet training +efficiently and correctly proved to be frustrating and +error-prone. + +""Light bulb moment"": this [paper](/doc/2020/02/_1703_07464_no_fuss_distance_m) ""**Proxy based triplet learning**"" + +> instead of generating triplets, we learn an +embedding for each class and use +the learnt embedding as a proxy for triplets as part of the +training. In other words, we can train end to end without +the computationally expensive step of resampling triplets +after each network update. +" +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|relatedDoc|http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|title|Training a Speaker Embedding from Scratch with Triplet Learning (2018) +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|bookmarkOf|https://blog.goodaudience.com/training-a-speaker-embedding-from-scratch-24baf990ccf +http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr|creationTime|2020-01-23T09:03:12Z +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|creationDate|2019-09-28 +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|tag|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Germain Forestier +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Daniel F. Schmidt +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Lhassane Idoumghar +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Pierre-Alain Muller +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|François Petitjean +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Hassan Ismail Fawaz +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Charlotte Pelletier +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Geoffrey I. Webb +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Benjamin Lucas +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_author|Jonathan Weber +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|title|[1909.04939] InceptionTime: Finding AlexNet for Time Series Classification +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|bookmarkOf|https://arxiv.org/abs/1909.04939 +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|creationTime|2019-09-28T10:23:53Z +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_summary|"Time series classification (TSC) is the area of machine learning interested +in learning how to assign labels to time series. The last few decades of work +in this area have led to significant progress in the accuracy of classifiers, +with the state of the art now represented by the HIVE-COTE algorithm. While +extremely accurate, HIVE-COTE is infeasible to use in many applications because +of its very high training time complexity in O(N^2*T^4) for a dataset with N +time series of length T. For example, it takes HIVE-COTE more than 72,000s to +learn from a small dataset with N=700 time series of short length T=46. Deep +learning, on the other hand, has now received enormous attention because of its +high scalability and state-of-the-art accuracy in computer vision and natural +language processing tasks. Deep learning for TSC has only very recently started +to be explored, with the first few architectures developed over the last 3 +years only. The accuracy of deep learning for TSC has been raised to a +competitive level, but has not quite reached the level of HIVE-COTE. This is +what this paper achieves: outperforming HIVE-COTE's accuracy together with +scalability. We take an important step towards finding the AlexNet network for +TSC by presenting InceptionTime---an ensemble of deep Convolutional Neural +Network (CNN) models, inspired by the Inception-v4 architecture. Our +experiments show that InceptionTime slightly outperforms HIVE-COTE with a +win/draw/loss on the UCR archive of 40/6/39. Not only is InceptionTime more +accurate, but it is much faster: InceptionTime learns from that same dataset +with 700 time series in 2,300s but can also learn from a dataset with 8M time +series in 13 hours, a quantity of data that is fully out of reach of HIVE-COTE." +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_firstAuthor|Hassan Ismail Fawaz +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_updated|2019-09-13T14:28:15Z +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_title|InceptionTime: Finding AlexNet for Time Series Classification +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_published|2019-09-11T09:32:40Z +http://www.semanlink.net/doc/2019/09/_1909_04939_inceptiontime_fin|arxiv_num|1909.04939 +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|creationDate|2020-03-19 +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|tag|http://www.semanlink.net/tag/combining_knowledge_graphs +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|tag|http://www.semanlink.net/tag/ai_amazon +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|tag|http://www.semanlink.net/tag/entity_alignment +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|comment|"Entity matching at Amazon: a new [#entity alignment](/tag/entity_alignment) technique that factors in information about the graph in the vicinity of the entity name. + +[#Graph neural network](/tag/graph_neural_networks) that specifically addresses the problem of **merging multi-type knowledge graphs**. " +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|title|Combining knowledge graphs, quickly and accurately +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|bookmarkOf|https://www.amazon.science/blog/combining-knowledge-graphs-quickly-and-accurately +http://www.semanlink.net/doc/2020/03/combining_knowledge_graphs_qui|creationTime|2020-03-19T21:33:27Z +http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t|creationDate|2019-10-18 +http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t|tag|http://www.semanlink.net/tag/pre_trained_language_models +http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t|title|What Every NLP Engineer Needs to Know About Pre-Trained Language Models TOPBOTS +http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t|bookmarkOf|https://www.topbots.com/ai-nlp-research-pretrained-language-models/ +http://www.semanlink.net/doc/2019/10/what_every_nlp_engineer_needs_t|creationTime|2019-10-18T00:55:40Z +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|creationDate|2020-02-20 +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|tag|http://www.semanlink.net/tag/word_mover_s_distance +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|tag|http://www.semanlink.net/tag/nearest_neighbor_search +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_author|Ilya Razenshteyn +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_author|Arturs Backurs +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_author|Yihe Dong +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_author|Tal Wagner +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_author|Piotr Indyk +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|title|[1910.04126] Scalable Nearest Neighbor Search for Optimal Transport +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|bookmarkOf|https://arxiv.org/abs/1910.04126 +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|creationTime|2020-02-20T09:11:40Z +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_summary|"The Optimal Transport (a.k.a. Wasserstein) distance is an increasingly +popular similarity measure for rich data domains, such as images or text +documents. This raises the necessity for fast nearest neighbor search with +respect to this distance, a problem that poses a substantial computational +bottleneck for various tasks on massive datasets. +In this work, we study fast tree-based approximation algorithms for searching +nearest neighbors w.r.t. the Wasserstein-1 distance. A standard tree-based +technique, known as Quadtree, has been previously shown to obtain good results. +We introduce a variant of this algorithm, called Flowtree, and formally prove +it achieves asymptotically better accuracy. Our extensive experiments, on +real-world text and image datasets, show that Flowtree improves over various +baselines and existing methods in either running time or accuracy. In +particular, its quality of approximation is in line with previous high-accuracy +methods, while its running time is much faster." +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_firstAuthor|Arturs Backurs +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_updated|2020-02-14T14:54:37Z +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_title|Scalable Nearest Neighbor Search for Optimal Transport +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_published|2019-10-09T17:12:41Z +http://www.semanlink.net/doc/2020/02/_1910_04126_scalable_nearest_n|arxiv_num|1910.04126 +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|creationDate|2019-11-17 +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|comment|"> a neural language model with +mechanisms for selecting and copying facts +from a knowledge graph that are relevant to +the context + +[GitHub](https://github.com/rloganiv/kglm-model)." +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|title|Barack’s Wife Hillary: Using Knowledge Graphs for Fact-Aware Language Modeling (ACL 2019) +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|bookmarkOf|https://www.aclweb.org/anthology/P19-1598.pdf +http://www.semanlink.net/doc/2019/11/barack%E2%80%99s_wife_hillary_using_kn|creationTime|2019-11-17T15:44:42Z +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|creationDate|2019-10-11 +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|tag|http://www.semanlink.net/tag/information_theory +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|tag|http://www.semanlink.net/tag/computational_neuroscience +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|tag|http://www.semanlink.net/tag/neural_coding +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|comment|"> we review information-theory basics before +demonstrating its use in neural coding. We show how to use information theory to validate simple stimulus– +response models of neural coding of dynamic stimuli. Because these models require specification of +spike timing precision, they can reveal which time scales contain information in neural coding. This +approach shows that dynamic stimuli can be encoded efficiently by single neurons and that each spike +contributes to information transmission. We argue, however, that the data obtained so far do not suggest +a temporal code, in which the placement of spikes relative to each other yields additional information" +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|title|Information theory and neural coding (1999) (Alexander Borst and Frédéric E. Theunissen) +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|bookmarkOf|https://www.cns.nyu.edu/csh/csh06/PDFs/BorstTheuneissenNN1999.pdf +http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c|creationTime|2019-10-11T01:01:41Z +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|creationDate|2020-04-17 +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/yahoo +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/entity_recommendation +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|tag|http://www.semanlink.net/tag/brad_pitt +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_author|Nicolas Torzec +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_author|Kin Sum Liu +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_author|Chien-Chun Ni +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|comment|an embedding-based entity recommendation framework for Wikipedia that organizes Wikipedia into a collection of graphs layered on top of each other, **learns complementary entity representations from their topology and content**, and combines them with a lightweight **learning-to-rank** approach to recommend related entities on Wikipedia +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|title|[2004.06842] Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|bookmarkOf|https://arxiv.org/abs/2004.06842 +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|creationTime|2020-04-17T19:14:01Z +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_summary|"In this paper, we describe an embedding-based entity recommendation framework +for Wikipedia that organizes Wikipedia into a collection of graphs layered on +top of each other, learns complementary entity representations from their +topology and content, and combines them with a lightweight learning-to-rank +approach to recommend related entities on Wikipedia. Through offline and online +evaluations, we show that the resulting embeddings and recommendations perform +well in terms of quality and user engagement. Balancing simplicity and quality, +this framework provides default entity recommendations for English and other +languages in the Yahoo! Knowledge Graph, which Wikipedia is a core subset of." +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_firstAuthor|Chien-Chun Ni +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_updated|2020-04-15T00:49:27Z +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_title|Layered Graph Embedding for Entity Recommendation using Wikipedia in the Yahoo! Knowledge Graph +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_published|2020-04-15T00:49:27Z +http://www.semanlink.net/doc/2020/04/2004_06842_layered_graph_embe|arxiv_num|2004.06842 +http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben|creationDate|2019-09-17 +http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben|title|What's next for AI - Yoshua Bengio (Interview) +http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben|bookmarkOf|https://www.ibm.com/watson/advantage-reports/future-of-artificial-intelligence/yoshua-bengio.html +http://www.semanlink.net/doc/2019/09/what_s_next_for_ai_yoshua_ben|creationTime|2019-09-17T18:29:52Z +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|creationDate|2019-10-07 +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|tag|http://www.semanlink.net/tag/ml_conditioning +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|comment|"> Many real-world problems require integrating multiple sources of information...When approaching such problems, it often makes sense to process one source of information in the context of another. In machine learning, we often refer to this context-based processing as conditioning: the computation carried out by a model is **conditioned** or **modulated** by information extracted from an auxiliary input. Eg.: **extract meaning from the image in the context of the question**. + +Related to this talk at Paris NLP meetup: [""Language and Perception in Deep Learning""](/doc/2019/10/language_and_perception_in_deep)" +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|relatedDoc|http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|title|Feature-wise transformations. A simple and surprisingly effective family of conditioning mechanisms. (2018) +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|bookmarkOf|https://distill.pub/2018/feature-wise-transformations/ +http://www.semanlink.net/doc/2019/10/feature_wise_transformations|creationTime|2019-10-07T23:30:41Z +http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j|creationDate|2019-10-30 +http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j|title|watson-developer-cloud/speech-javascript-sdk: IBM Watson Speech Services for Web Browsers +http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j|bookmarkOf|https://github.com/watson-developer-cloud/speech-javascript-sdk +http://www.semanlink.net/doc/2019/10/watson_developer_cloud_speech_j|creationTime|2019-10-30T00:47:08Z +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|creationDate|2019-06-11 +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|tag|http://www.semanlink.net/tag/chrome +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|tag|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|title|Voice Dictation - Online Speech Recognition using chrome +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|bookmarkOf|https://dictation.io/ +http://www.semanlink.net/doc/2019/06/voice_dictation_online_speech|creationTime|2019-06-11T11:10:18Z +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|creationDate|2020-04-25 +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|tag|http://www.semanlink.net/tag/environnement +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|tag|http://www.semanlink.net/tag/ils_commencent_a_me_gonfler +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|title|« La gestion de la pandémie de Covid-19 et les mesures nécessaires à la sortie de crise conspirent à faire de l’environnement une question subsidiaire » +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|bookmarkOf|https://www.lemonde.fr/idees/article/2020/04/25/la-gestion-de-la-pandemie-de-covid-19-et-les-mesures-necessaires-a-la-sortie-de-crise-conspirent-a-faire-de-l-environnement-une-question-subsidiaire_6037754_3232.html +http://www.semanlink.net/doc/2020/04/%C2%AB_la_gestion_de_la_pandemie_de_|creationTime|2020-04-25T21:35:21Z +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|creationDate|2019-08-18 +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|tag|http://www.semanlink.net/tag/neural_models_for_information_retrieval +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|tag|http://www.semanlink.net/tag/bhaskar_mitra +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|title|Neural Models for Information Retrieval (2017) +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|bookmarkOf|https://www.microsoft.com/en-us/research/uploads/prod/2018/04/NeuralIR-Nov2017.pdf +http://www.semanlink.net/doc/2019/08/neural_models_for_information_r|creationTime|2019-08-18T23:00:09Z +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|creationDate|2020-05-27 +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|tag|http://www.semanlink.net/tag/nlp_datasets +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|title|huggingface/nlp: nlp: datasets and evaluation metrics for NLP in NumPy, Pandas, PyTorch and TensorFlow +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|bookmarkOf|https://github.com/huggingface/nlp +http://www.semanlink.net/doc/2020/05/huggingface_nlp_nlp_datasets_|creationTime|2020-05-27T02:24:06Z +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|tag|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|comment|a method for clustering a set of objects into the optimum number of clusters without specifying that number in advance +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|title|Correlation clustering - Wikipedia +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|bookmarkOf|https://en.wikipedia.org/wiki/Correlation_clustering +http://www.semanlink.net/doc/2019/12/correlation_clustering_wikipe|creationTime|2019-12-11T03:25:37Z +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|creationDate|2019-12-20 +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|tag|http://www.semanlink.net/tag/k_nearest_neighbors_algorithm +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|tag|http://www.semanlink.net/tag/dan_jurafsky +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_author|Dan Jurafsky +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_author|Mike Lewis +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_author|Luke Zettlemoyer +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_author|Omer Levy +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_author|Urvashi Khandelwal +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|comment|extend LMs with nearest neighbor search in embedding space +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|title|[1911.00172] Generalization through Memorization: Nearest Neighbor Language Models +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|bookmarkOf|https://arxiv.org/abs/1911.00172 +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|creationTime|2019-12-20T23:44:45Z +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_summary|"We introduce $k$NN-LMs, which extend a pre-trained neural language model (LM) +by linearly interpolating it with a $k$-nearest neighbors ($k$NN) model. The +nearest neighbors are computed according to distance in the pre-trained LM +embedding space, and can be drawn from any text collection, including the +original LM training data. Applying this augmentation to a strong Wikitext-103 +LM, with neighbors drawn from the original training set, our $k$NN-LM achieves +a new state-of-the-art perplexity of 15.79 - a 2.9 point improvement with no +additional training. We also show that this approach has implications for +efficiently scaling up to larger training sets and allows for effective domain +adaptation, by simply varying the nearest neighbor datastore, again without +further training. Qualitatively, the model is particularly helpful in +predicting rare patterns, such as factual knowledge. Together, these results +strongly suggest that learning similarity between sequences of text is easier +than predicting the next word, and that nearest neighbor search is an effective +approach for language modeling in the long tail." +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_firstAuthor|Urvashi Khandelwal +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_updated|2020-02-15T01:04:52Z +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_title|Generalization through Memorization: Nearest Neighbor Language Models +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_published|2019-11-01T01:09:53Z +http://www.semanlink.net/doc/2019/12/_1911_00172_generalization_thr|arxiv_num|1911.00172 +http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex|creationDate|2019-10-11 +http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex|title|Why Knowledge Bases Are The Next Big Thing +http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex|bookmarkOf|https://www.forbes.com/sites/cognitiveworld/2019/10/10/why-knowledge-bases-are-the-next-big-thing/#170a1abcb3f2 +http://www.semanlink.net/doc/2019/10/why_knowledge_bases_are_the_nex|creationTime|2019-10-11T14:33:15Z +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|creationDate|2020-01-23 +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/semantic_text_matching +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/nlp_long_documents +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/thewebconf_2019 +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|comment|"**A document can be represented as a hierarchy +of paragraph, sentence and word sequences.** Different paragraphs +and sentences can have different semantic meaning +and importance. + +A multi-depth attention-based hierarchical RNN derive representations for each level of document +structure, which are then aggregated to build a representation of the entire document + +Uses a Siamese structure for semantic text matching." +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|title|Semantic Text Matching for Long-Form Documents (2019) +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|bookmarkOf|https://research.google/pubs/pub47856/ +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|bookmarkOf|https://jyunyu.csie.org/docs/pubs/www2019paper.pdf +http://www.semanlink.net/doc/2020/01/semantic_text_matching_for_long|creationTime|2020-01-23T10:21:17Z +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|creationDate|2020-01-11 +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|title|huggingface/tokenizers: Fast State-of-the-Art Tokenizers optimized for Research and Production +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|bookmarkOf|https://github.com/huggingface/tokenizers +http://www.semanlink.net/doc/2020/01/huggingface_tokenizers_fast_st|creationTime|2020-01-11T11:52:47Z +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|creationDate|2019-09-03 +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/baselines +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/sentiment_analysis +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/naive_bayes_classifier +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/support_vector_machine +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|title|Baselines and Bigrams: Simple, Good Sentiment and Topic Classification. Sida Wang and Christopher D. Manning +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|bookmarkOf|https://dl.acm.org/citation.cfm?id=2390688 +http://www.semanlink.net/doc/2019/09/baselines_and_bigrams_simple_|creationTime|2019-09-03T23:32:33Z +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|tag|http://www.semanlink.net/tag/meta_reinforcement_learning +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|tag|http://www.semanlink.net/tag/lilian_weng +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|title|Meta Reinforcement Learning +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|bookmarkOf|https://lilianweng.github.io/lil-log/2019/06/23/meta-reinforcement-learning.html +http://www.semanlink.net/doc/2019/12/meta_reinforcement_learning|creationTime|2019-12-07T11:26:22Z +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|creationDate|2020-01-03 +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|tag|http://www.semanlink.net/tag/nlp_introduction +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|tag|http://www.semanlink.net/tag/nlp_current_state +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|title|Thread by @wzuidema: The 2010s were an eventful decade for NLP! Here are ten shocking developments since 2010, and 13 papers* illustrating them, that have change… +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|bookmarkOf|https://threadreaderapp.com/thread/1212727352037429248.html +http://www.semanlink.net/doc/2020/01/thread_by_wzuidema_the_2010s_|creationTime|2020-01-03T12:15:41Z +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|creationDate|2020-01-21 +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|tag|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|tag|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|tag|http://www.semanlink.net/tag/noise_contrastive_estimation +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|title|Self-supervised learning and computer vision · fast.ai +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|bookmarkOf|https://www.fast.ai/2020/01/13/self_supervised/ +http://www.semanlink.net/doc/2020/01/self_supervised_learning_and_co|creationTime|2020-01-21T08:56:49Z +http://www.semanlink.net/doc/2019/08/peter_bloem|creationDate|2019-08-21 +http://www.semanlink.net/doc/2019/08/peter_bloem|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2019/08/peter_bloem|title|Peter Bloem +http://www.semanlink.net/doc/2019/08/peter_bloem|bookmarkOf|http://www.peterbloem.nl/ +http://www.semanlink.net/doc/2019/08/peter_bloem|creationTime|2019-08-21T22:05:00Z +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|creationDate|2020-04-18 +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|tag|http://www.semanlink.net/tag/aho_corasick_algorithm +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|title|BrunoRB/ahocorasick: Aho-corasick for javascript. +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|bookmarkOf|https://github.com/BrunoRB/ahocorasick +http://www.semanlink.net/doc/2020/04/brunorb_ahocorasick_aho_corasi|creationTime|2020-04-18T00:37:31Z +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|creationDate|2019-06-06 +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|tag|http://www.semanlink.net/tag/knowledge_driven_embeddings +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|tag|http://www.semanlink.net/tag/lynda_tamine +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|tag|http://www.semanlink.net/tag/laure_soulier +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|title|Visual and conceptual grounding for text representation learning +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|bookmarkOf|https://mlia.lip6.fr/soulier/wp-content/uploads/sites/2/2019/06/GDR_IA_2019_compressed.pdf +http://www.semanlink.net/doc/2019/06/visual_and_conceptual_grounding|creationTime|2019-06-06T08:34:01Z +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|creationDate|2019-12-03 +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|tag|http://www.semanlink.net/tag/targeted_ads +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|tag|http://www.semanlink.net/tag/mark_zuckerberg +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|title|Defiant Mark Zuckerberg defends Facebook policy to allow false ads Technology The Guardian +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|bookmarkOf|https://www.theguardian.com/technology/2019/dec/02/mark-zuckerberg-facebook-policy-fake-ads?CMP=Share_iOSApp_Other +http://www.semanlink.net/doc/2019/12/defiant_mark_zuckerberg_defends|creationTime|2019-12-03T00:47:10Z +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|creationDate|2019-06-24 +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_author|Kawin Ethayarajh +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_author|David Duvenaud +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_author|Graeme Hirst +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|title|[1810.04882] Towards Understanding Linear Word Analogies +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|bookmarkOf|https://arxiv.org/abs/1810.04882 +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|creationTime|2019-06-24T08:33:44Z +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_summary|"A surprising property of word vectors is that word analogies can often be +solved with vector arithmetic. However, it is unclear why arithmetic operators +correspond to non-linear embedding models such as skip-gram with negative +sampling (SGNS). We provide a formal explanation of this phenomenon without +making the strong assumptions that past theories have made about the vector +space and word distribution. Our theory has several implications. Past work has +conjectured that linear substructures exist in vector spaces because relations +can be represented as ratios; we prove that this holds for SGNS. We provide +novel justification for the addition of SGNS word vectors by showing that it +automatically down-weights the more frequent word, as weighting schemes do ad +hoc. Lastly, we offer an information theoretic interpretation of Euclidean +distance in vector spaces, justifying its use in capturing word dissimilarity." +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_firstAuthor|Kawin Ethayarajh +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_updated|2019-08-12T04:04:15Z +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_title|Towards Understanding Linear Word Analogies +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_published|2018-10-11T08:08:40Z +http://www.semanlink.net/doc/2019/06/_1810_04882_towards_understand|arxiv_num|1810.04882 +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|creationDate|2020-02-09 +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/zero_shot_learning +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/metric_learning +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_author|Sergey Ioffe +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_author|Thomas K. Leung +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_author|Saurabh Singh +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_author|Alexander Toshev +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_author|Yair Movshovitz-Attias +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|comment|"> We address the problem of distance metric learning (DML), defined as learning a distance consistent with a notion of semantic similarity... +> Traditionnaly, supervision is expressed in the form of sets of points that follow +an ordinal relationship – an anchor point x is similar to +a set of positive points Y , and dissimilar to a set of negative +points Z, and a loss defined over these distances is minimized. +> Triplet-Based methods are challenging to optimize (a main issue is the need for finding informative triplets). +> +> We propose to **optimize the triplet loss on a different space of triplets, consisting of an anchor data point and similar and dissimilar proxy points which are learned as well**. These proxies approximate the original data points, so that a triplet loss over the proxies is a tight upper bound of the original loss. + +Mentioned in this [blog post](/doc/2020/01/training_a_speaker_embedding_fr): + +> ""**Proxy based triplet learning**"": instead of generating triplets, we learn an embedding for each class and use the learnt embedding as a proxy for triplets as part of the training. In other words, we can train end to end without the computationally expensive step of resampling triplets after each network update. + +Near the conclusion: + +> Our formulation of Proxy-NCA loss produces a loss very +similar to the standard cross-entropy loss used in classification. +However, we arrive at our formulation from a different +direction: we are not interested in the actual classifier and +indeed discard the proxies once the model has been trained. +Instead, the proxies are auxiliary variables, enabling more +effective optimization of the embedding model parameters. +**As such, our formulation not only enables us to surpass the +state of the art in zero-shot learning, but also offers an explanation +to the effectiveness of the standard trick of training +a classifier, and using its penultimate layer’s output as the +embedding.**" +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|relatedDoc|http://www.semanlink.net/doc/2020/01/training_a_speaker_embedding_fr +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|title|[1703.07464] No Fuss Distance Metric Learning using Proxies +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|bookmarkOf|https://arxiv.org/abs/1703.07464 +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|creationTime|2020-02-09T18:44:26Z +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_summary|"We address the problem of distance metric learning (DML), defined as learning +a distance consistent with a notion of semantic similarity. Traditionally, for +this problem supervision is expressed in the form of sets of points that follow +an ordinal relationship -- an anchor point $x$ is similar to a set of positive +points $Y$, and dissimilar to a set of negative points $Z$, and a loss defined +over these distances is minimized. While the specifics of the optimization +differ, in this work we collectively call this type of supervision Triplets and +all methods that follow this pattern Triplet-Based methods. These methods are +challenging to optimize. A main issue is the need for finding informative +triplets, which is usually achieved by a variety of tricks such as increasing +the batch size, hard or semi-hard triplet mining, etc. Even with these tricks, +the convergence rate of such methods is slow. In this paper we propose to +optimize the triplet loss on a different space of triplets, consisting of an +anchor data point and similar and dissimilar proxy points which are learned as +well. These proxies approximate the original data points, so that a triplet +loss over the proxies is a tight upper bound of the original loss. This +proxy-based loss is empirically better behaved. As a result, the proxy-loss +improves on state-of-art results for three standard zero-shot learning +datasets, by up to 15% points, while converging three times as fast as other +triplet-based losses." +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_firstAuthor|Yair Movshovitz-Attias +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_updated|2017-08-01T19:52:13Z +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_title|No Fuss Distance Metric Learning using Proxies +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_published|2017-03-21T23:11:56Z +http://www.semanlink.net/doc/2020/02/_1703_07464_no_fuss_distance_m|arxiv_num|1703.07464 +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|creationDate|2020-01-06 +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|title|10 ML & NLP Research Highlights of 2019 +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|bookmarkOf|https://ruder.io/research-highlights-2019/ +http://www.semanlink.net/doc/2020/01/10_ml_nlp_research_highlights|creationTime|2020-01-06T10:28:48Z +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|creationDate|2020-05-02 +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|tag|http://www.semanlink.net/tag/blink +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|title|Sebastian Riedel sur Twitter : Happy to introduce BLINK, the @facebookai open-source entity linker!... +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|bookmarkOf|https://twitter.com/riedelcastro/status/1256180196698394626?s=20 +http://www.semanlink.net/doc/2020/05/sebastian_riedel_sur_twitter_|creationTime|2020-05-02T11:33:35Z +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|creationDate|2019-11-19 +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|tag|http://www.semanlink.net/tag/hello_world +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|tag|http://www.semanlink.net/tag/npm +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|tag|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|tag|http://www.semanlink.net/tag/browserify +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|title|Hello World for JavaScript with npm modules in the browser +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|bookmarkOf|https://medium.com/jeremy-keeshin/hello-world-for-javascript-with-npm-modules-in-the-browser-6020f82d1072 +http://www.semanlink.net/doc/2019/11/hello_world_for_javascript_with|creationTime|2019-11-19T16:13:06Z +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|creationDate|2019-06-29 +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|tag|http://www.semanlink.net/tag/knowledge +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_author|James L. McClelland +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_author|Andrew M. Saxe +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_author|Surya Ganguli +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|comment|"> a fundamental conceptual question: what are the theoretical principles governing the ability of neural networks to acquire, organize, and deploy abstract knowledge by integrating across many individual experiences? +" +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|title|[1810.10531] A mathematical theory of semantic development in deep neural networks +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|bookmarkOf|https://arxiv.org/abs/1810.10531 +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|creationTime|2019-06-29T15:22:55Z +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_summary|"An extensive body of empirical research has revealed remarkable regularities +in the acquisition, organization, deployment, and neural representation of +human semantic knowledge, thereby raising a fundamental conceptual question: +what are the theoretical principles governing the ability of neural networks to +acquire, organize, and deploy abstract knowledge by integrating across many +individual experiences? We address this question by mathematically analyzing +the nonlinear dynamics of learning in deep linear networks. We find exact +solutions to this learning dynamics that yield a conceptual explanation for the +prevalence of many disparate phenomena in semantic cognition, including the +hierarchical differentiation of concepts through rapid developmental +transitions, the ubiquity of semantic illusions between such transitions, the +emergence of item typicality and category coherence as factors controlling the +speed of semantic processing, changing patterns of inductive projection over +development, and the conservation of semantic similarity in neural +representations across species. Thus, surprisingly, our simple neural model +qualitatively recapitulates many diverse regularities underlying semantic +development, while providing analytic insight into how the statistical +structure of an environment can interact with nonlinear deep learning dynamics +to give rise to these regularities." +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_firstAuthor|Andrew M. Saxe +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_updated|2018-10-23T22:20:27Z +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_title|A mathematical theory of semantic development in deep neural networks +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_published|2018-10-23T22:20:27Z +http://www.semanlink.net/doc/2019/06/_1810_10531_a_mathematical_the|arxiv_num|1810.10531 +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|tag|http://www.semanlink.net/tag/ijcai +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|tag|http://www.semanlink.net/tag/entity_alignment +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|comment|"Embedding-based entity alignment: finds entity alignment by measuring the similarities between entity embeddings + +> Existing approaches are challenged by the lack of enough prior alignment as labeled training data. +> A bootstrapping approach: it iteratively labels likely entity alignment as training data for learning alignment-oriented KG embeddings. + +[GitHub](https://github.com/nju-websoft/BootEA)" +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|title|Bootstrapping Entity Alignment with Knowledge Graph Embedding IJCAI +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|bookmarkOf|https://www.ijcai.org/Proceedings/2018/611 +http://www.semanlink.net/doc/2020/05/bootstrapping_entity_alignment_|creationTime|2020-05-11T21:59:04Z +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|creationDate|2020-05-12 +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/recommender_systems +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/kd_mkb_related +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_author|Yan Zhang +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_author|Hanning Zhou +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_author|Xiaoran Xu +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_author|Yuan Zhang +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|comment|"distilling structured knowledge from a differentiable path-based recommendation model. + +> proposed framework can achieve state-of-the-art recommendation performance and meanwhile provide interpretable recommendation reasons" +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|title|[1912.08422] Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|bookmarkOf|https://arxiv.org/abs/1912.08422 +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|creationTime|2020-05-12T11:11:16Z +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_summary|"Recently, the embedding-based recommendation models (e.g., matrix +factorization and deep models) have been prevalent in both academia and +industry due to their effectiveness and flexibility. However, they also have +such intrinsic limitations as lacking explainability and suffering from data +sparsity. In this paper, we propose an end-to-end joint learning framework to +get around these limitations without introducing any extra overhead by +distilling structured knowledge from a differentiable path-based recommendation +model. Through extensive experiments, we show that our proposed framework can +achieve state-of-the-art recommendation performance and meanwhile provide +interpretable recommendation reasons." +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_firstAuthor|Yuan Zhang +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_updated|2019-12-18T07:43:52Z +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_title|Distilling Structured Knowledge into Embeddings for Explainable and Accurate Recommendation +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_published|2019-12-18T07:43:52Z +http://www.semanlink.net/doc/2020/05/1912_08422_distilling_structu|arxiv_num|1912.08422 +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|creationDate|2019-07-28 +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/automobile_manuals +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/user_manuals +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/nlp_reading_comprehension +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|tag|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|comment|"A transfer learning–based approach for +adapting models that have proven good at answering general interest– +type questions to documents in specialized domains using only limited +amounts of domain-specific example data +" +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|title|Bringing the power of machine reading comprehension to specialized documents - Microsoft Research +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|bookmarkOf|https://www.microsoft.com/en-us/research/blog/bringing-the-power-of-machine-reading-comprehension-to-specialized-documents/ +http://www.semanlink.net/doc/2019/07/bringing_the_power_of_machine_r|creationTime|2019-07-28T10:45:51Z +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|creationDate|2020-04-11 +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|tag|http://www.semanlink.net/tag/covid19_impreparation +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|title|Coronavirus en France : « En matière de prévention, nous ne sommes pas à la hauteur de l’épidémie » +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|bookmarkOf|https://www.lemonde.fr/planete/article/2020/04/11/en-matiere-de-prevention-nous-ne-sommes-pas-a-la-hauteur-de-l-epidemie_6036316_3244.html +http://www.semanlink.net/doc/2020/04/coronavirus_en_france_%C2%AB_en_ma|creationTime|2020-04-11T14:45:02Z +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|creationDate|2019-09-02 +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|tag|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|tag|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|title|Machine Translation for African Languages +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|bookmarkOf|https://docs.google.com/presentation/d/1IUQF2GmijfJd9RToDSEmXJOoa-oifcDTrcrV4IQPrfA/edit#slide=id.p +http://www.semanlink.net/doc/2019/09/machine_translation_for_african|creationTime|2019-09-02T00:53:25Z +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|creationDate|2019-09-15 +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|tag|http://www.semanlink.net/tag/big_bang +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|tag|http://www.semanlink.net/tag/matiere_noire +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|title|New Study Suggests Dark matter predates the ‘Big Bang’ — but what does that actually mean? +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|bookmarkOf|https://medium.com/swlh/new-study-suggests-dark-matter-predates-the-big-bang-but-what-does-that-actually-mean-cc3d359a8989 +http://www.semanlink.net/doc/2019/09/new_study_suggests_dark_matter_|creationTime|2019-09-15T15:21:39Z +http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t|creationDate|2019-07-08 +http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t|tag|http://www.semanlink.net/tag/twitter +http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t|title|All My Tweets - View all your tweets on one pagesss. +http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t|bookmarkOf|https://www.allmytweets.net/ +http://www.semanlink.net/doc/2019/07/all_my_tweets_view_all_your_t|creationTime|2019-07-08T14:42:34Z +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|creationDate|2020-02-14 +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|tag|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|tag|http://www.semanlink.net/tag/nlp_human_resources +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|tag|http://www.semanlink.net/tag/discounted_cumulative_gain +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|tag|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|tag|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|comment|"Meetup NLP #6 – July 25, 2018 Ismael Belghiti, CTO @ Hiresweet + +> comment différentes techniques de NLP peuvent être appliquées pour calculer un score de matching entre un profil et une offre, en comparant leur performance sur une métrique de ranking dédiée." +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|title|Information Retrieval for HR +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|bookmarkOf|https://nlpparis.files.wordpress.com/2018/07/meetup_nlp_hiresweet.pdf +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|creationTime|2020-02-14T16:57:51Z +http://www.semanlink.net/doc/2020/02/information_retrieval_for_hr|mainDoc|http://www.semanlink.net/doc/?uri=https%3A%2F%2Fwww.meetup.com%2Ffr-FR%2FParis-NLP%2Fevents%2F242014884%2F%3Fcomment_table_id%3D493219381%26comment_table_name%3Devent_comment +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|creationDate|2020-04-22 +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|tag|http://www.semanlink.net/tag/softmax +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|tag|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|comment|"About the [2015 Hinton's paper](/doc/2020/04/1503_02531_distilling_the_kno) + +> The role of temperature is to +push the model into a region where it’s emitting less extreme +probabilities, so that they’re more informative to our calculations" +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|relatedDoc|http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|title|Turning Up the Heat: The Mechanics of Model Distillation +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|bookmarkOf|https://towardsdatascience.com/turning-up-the-heat-the-mechanics-of-model-distillation-25ca337b5c7c +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|creationTime|2020-04-22T21:40:13Z +http://www.semanlink.net/doc/2020/04/turning_up_the_heat_the_mechan|mainDoc|http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|creationDate|2019-10-07 +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|tag|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|tag|http://www.semanlink.net/tag/j_y_etais +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|comment|"Slides of the [""Language and Perception in Deep Learning""](/doc/2019/10/language_and_perception_in_deep) talk" +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|relatedDoc|http://www.semanlink.net/doc/2019/10/language_and_perception_in_deep +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|title|Paris NLP Season 4 Meetup #1 at Algolia +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|bookmarkOf|https://nlpparis.wordpress.com/2019/10/02/paris-nlp-season-4-meetup-1-at-algolia/ +http://www.semanlink.net/doc/2019/10/paris_nlp_season_4_meetup_1_at|creationTime|2019-10-07T23:04:39Z +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|creationDate|2020-02-11 +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|tag|http://www.semanlink.net/tag/language_models_size +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|comment|"> It has recently been observed that neural language +models trained on unstructured text can +implicitly store and retrieve knowledge using +natural language queries. + +indeed, cf. Facebook's paper [Language Models as Knowledge Bases?](/doc/2019/09/_1909_01066_language_models_as) + +> In this short paper, +we measure the practical utility of this +approach by fine-tuning pre-trained models to +answer questions without access to any external +context or knowledge. + + +> we show that a large language +model pre-trained on unstructured text can +attain competitive results on open-domain question +answering benchmarks without any access +to external knowledge + +BUT: + +>1. state-of-the-art results only with the largest model +which had 11 billion parameters. +>1. “open-book” models +typically provide some indication of what information +they accessed when answering a question +that provides a useful form of interpretability. +In contrast, our model distributes knowledge +in its parameters in an inexplicable way, which +precludes this form of interpretability. +>1. **the maximum-likelihood objective provides no guarantees as to whether +a model will learn a fact or not.** + +So, what's the point? To be compared with this [IBM's paper](/doc/2019/09/_1909_04120_span_selection_pre): ""a new pre-training task inspired by reading comprehension and an effort to avoid encoding general knowledge in the transformer network itself""" +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|relatedDoc|http://www.semanlink.net/doc/2019/09/_1909_04120_span_selection_pre +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|relatedDoc|http://www.semanlink.net/doc/2019/09/_1909_01066_language_models_as +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|title|How Much Knowledge Can You Pack Into the Parameters of a Language Model? +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|bookmarkOf|https://craffel.github.io/publications/arxiv2020how.pdf +http://www.semanlink.net/doc/2020/02/how_much_knowledge_can_you_pack|creationTime|2020-02-11T22:56:31Z +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|tag|http://www.semanlink.net/tag/catastrophic_forgetting +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|tag|http://www.semanlink.net/tag/continual_learning +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_author|Jose L. Part +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_author|Christopher Kanan +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_author|Ronald Kemker +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_author|Stefan Wermter +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_author|German I. Parisi +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|title|[1802.07569] Continual Lifelong Learning with Neural Networks: A Review +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|bookmarkOf|https://arxiv.org/abs/1802.07569 +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|creationTime|2020-01-01T12:12:08Z +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_summary|"Humans and animals have the ability to continually acquire, fine-tune, and +transfer knowledge and skills throughout their lifespan. This ability, referred +to as lifelong learning, is mediated by a rich set of neurocognitive mechanisms +that together contribute to the development and specialization of our +sensorimotor skills as well as to long-term memory consolidation and retrieval. +Consequently, lifelong learning capabilities are crucial for autonomous agents +interacting in the real world and processing continuous streams of information. +However, lifelong learning remains a long-standing challenge for machine +learning and neural network models since the continual acquisition of +incrementally available information from non-stationary data distributions +generally leads to catastrophic forgetting or interference. This limitation +represents a major drawback for state-of-the-art deep neural network models +that typically learn representations from stationary batches of training data, +thus without accounting for situations in which information becomes +incrementally available over time. In this review, we critically summarize the +main challenges linked to lifelong learning for artificial learning systems and +compare existing neural network approaches that alleviate, to different +extents, catastrophic forgetting. We discuss well-established and emerging +research motivated by lifelong learning factors in biological systems such as +structural plasticity, memory replay, curriculum and transfer learning, +intrinsic motivation, and multisensory integration." +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_firstAuthor|German I. Parisi +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_updated|2019-02-11T01:28:39Z +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_title|Continual Lifelong Learning with Neural Networks: A Review +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_published|2018-02-21T13:53:35Z +http://www.semanlink.net/doc/2020/01/_1802_07569_continual_lifelong|arxiv_num|1802.07569 +http://www.semanlink.net/doc/2019/08/starlette|creationDate|2019-08-11 +http://www.semanlink.net/doc/2019/08/starlette|tag|http://www.semanlink.net/tag/python_tools +http://www.semanlink.net/doc/2019/08/starlette|comment|lightweight ASGI framework/toolkit, for building asyncio services +http://www.semanlink.net/doc/2019/08/starlette|title|Starlette +http://www.semanlink.net/doc/2019/08/starlette|bookmarkOf|https://www.starlette.io/ +http://www.semanlink.net/doc/2019/08/starlette|creationTime|2019-08-11T23:58:35Z +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|creationDate|2019-07-03 +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|title|Classifying duplicate questions from Quora with Keras R-bloggers +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|bookmarkOf|https://www.r-bloggers.com/classifying-duplicate-questions-from-quora-with-keras/ +http://www.semanlink.net/doc/2019/07/classifying_duplicate_questions|creationTime|2019-07-03T01:32:20Z +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|creationDate|2020-02-10 +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|tag|http://www.semanlink.net/tag/job_matching +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|tag|http://www.semanlink.net/tag/paris_nlp_meetup +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|relatedDoc|http://www.semanlink.net/doc/2020/02/matching_resumes_to_jobs_via_de +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|title|Siamese CNN for job–candidate matching (slides) +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|bookmarkOf|https://nlpparis.files.wordpress.com/2020/01/siamese_cnn_job_candidate_matching.pdf +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|creationTime|2020-02-10T14:19:40Z +http://www.semanlink.net/doc/2020/02/siamese_cnn_for_job_candidate_m_1|mainDoc|http://www.semanlink.net/doc/2020/01/paris_nlp_season_4_meetup_3_ +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|creationDate|2020-03-22 +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|tag|http://www.semanlink.net/tag/baidu +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|tag|http://www.semanlink.net/tag/link_prediction +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Yong Zhu +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Hua Wu +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Quan Wang +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Wenbin Jiang +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Haifeng Wang +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Pingping Huang +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Songtai Dai +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Yajuan Lyu +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_author|Jing Liu +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|comment|"A method to build contextualized entity and relation embeddings. Entities and relations may appear in different graph contexts. **Edges and paths, both formulated as sequences of entities and relations, are passed as input to a Transformer encoder to learn the contextualized representations..** + +[Github](https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE)" +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|title|[1911.02168] CoKE: Contextualized Knowledge Graph Embedding +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|bookmarkOf|https://arxiv.org/abs/1911.02168 +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|creationTime|2020-03-22T17:34:10Z +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_summary|"Knowledge graph embedding, which projects symbolic entities and relations +into continuous vector spaces, is gaining increasing attention. Previous +methods allow a single static embedding for each entity or relation, ignoring +their intrinsic contextual nature, i.e., entities and relations may appear in +different graph contexts, and accordingly, exhibit different properties. This +work presents Contextualized Knowledge Graph Embedding (CoKE), a novel paradigm +that takes into account such contextual nature, and learns dynamic, flexible, +and fully contextualized entity and relation embeddings. Two types of graph +contexts are studied: edges and paths, both formulated as sequences of entities +and relations. CoKE takes a sequence as input and uses a Transformer encoder to +obtain contextualized representations. These representations are hence +naturally adaptive to the input, capturing contextual meanings of entities and +relations therein. Evaluation on a wide variety of public benchmarks verifies +the superiority of CoKE in link prediction and path query answering. It +performs consistently better than, or at least equally well as current +state-of-the-art in almost every case, in particular offering an absolute +improvement of 21.0% in H@10 on path query answering. Our code is available at +\url{https://github.com/PaddlePaddle/Research/tree/master/KG/CoKE}." +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_firstAuthor|Quan Wang +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_updated|2020-04-04T07:22:20Z +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_title|CoKE: Contextualized Knowledge Graph Embedding +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_published|2019-11-06T02:27:39Z +http://www.semanlink.net/doc/2020/03/_1911_02168_coke_contextualiz|arxiv_num|1911.02168 +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|creationDate|2019-12-14 +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|tag|http://www.semanlink.net/tag/qwant +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|tag|http://www.semanlink.net/tag/intent_detection +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|tag|http://www.semanlink.net/tag/human_in_the_loop +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|title|Détection d'intention: application industrielle d'un projet de recherche +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|bookmarkOf|https://ia-ri.sciencesconf.org/data/pages/Maudet_and_Servan_Detection_d_intention_journee_Ri_and_IA_splitted.pdf +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|creationTime|2019-12-14T14:33:23Z +http://www.semanlink.net/doc/2019/12/detection_d_intention_applicat|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour|tag|http://www.semanlink.net/tag/mur_de_berlin +http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour|title|En 1989, 100 deutschemarks pour les citoyens de RDA : cadeau et ticket doux-amer vers l’inconnu +http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour|bookmarkOf|https://www.lemonde.fr/m-le-mag/article/2019/11/08/cent-deutschemarks-le-cadeau-de-bienvenue-de-l-allemagne-de-l-ouest-aux-citoyens-de-l-est_6018409_4500055.html +http://www.semanlink.net/doc/2019/11/en_1989_100_deutschemarks_pour|creationTime|2019-11-09T13:22:30Z +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|creationDate|2019-08-23 +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|tag|http://www.semanlink.net/tag/node_embeddings +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|tag|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|title|Watch Your Step: Learning Node Embeddings via Graph Attention +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|bookmarkOf|https://papers.nips.cc/paper/8131-watch-your-step-learning-node-embeddings-via-graph-attention.pdf +http://www.semanlink.net/doc/2019/08/watch_your_step_learning_node_|creationTime|2019-08-23T00:32:38Z +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|creationDate|2020-02-25 +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|tag|http://www.semanlink.net/tag/blog_software +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|title|fastai/fastpages: An easy to use blogging platform, with enhanced support for Jupyter Notebooks. +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|bookmarkOf|https://fastpages.fast.ai/ +http://www.semanlink.net/doc/2020/02/fastai_fastpages_an_easy_to_us|creationTime|2020-02-25T08:56:03Z +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|tag|http://www.semanlink.net/tag/active_learning +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|tag|http://www.semanlink.net/tag/lenka_zdeborova +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_author|Luca Saglietti +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_author|Lenka Zdeborová +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_author|Hugo Cui +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|comment|the task of choosing the subset of samples to be labeled from a fixed finite pool of samples +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|title|[1912.03927] Large deviations for the perceptron model and consequences for active learning +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|bookmarkOf|https://arxiv.org/abs/1912.03927 +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|creationTime|2019-12-11T02:26:25Z +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_summary|"Active learning is a branch of machine learning that deals with problems +where unlabeled data is abundant yet obtaining labels is expensive. The +learning algorithm has the possibility of querying a limited number of samples +to obtain the corresponding labels, subsequently used for supervised learning. +In this work, we consider the task of choosing the subset of samples to be +labeled from a fixed finite pool of samples. We assume the pool of samples to +be a random matrix and the ground truth labels to be generated by a +single-layer teacher random neural network. We employ replica methods to +analyze the large deviations for the accuracy achieved after supervised +learning on a subset of the original pool. These large deviations then provide +optimal achievable performance boundaries for any active learning algorithm. We +show that the optimal learning performance can be efficiently approached by +simple message-passing active learning algorithms. We also provide a comparison +with the performance of some other popular active learning strategies." +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_firstAuthor|Hugo Cui +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_updated|2019-12-09T09:50:52Z +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_title|Large deviations for the perceptron model and consequences for active learning +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_published|2019-12-09T09:50:52Z +http://www.semanlink.net/doc/2019/12/_1912_03927_large_deviations_f|arxiv_num|1912.03927 +http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d|creationDate|2019-06-08 +http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d|title|An Embarrassingly Simple Approach for Transfer Learning from Pretrained Language Models (NAACL 2019) (Slides) +http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d|bookmarkOf|https://drive.google.com/file/d/1JUyQq-Rz1ou6FvMc4P5-OA_qJnZ_zDv_/view +http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d|creationTime|2019-06-08T12:18:23Z +http://www.semanlink.net/doc/2019/06/siatl_naacl_2019_pdf_google_d|mainDoc|http://www.semanlink.net/doc/2019/06/an_embarrassingly_simple_approa +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|creationDate|2020-04-16 +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|tag|http://www.semanlink.net/tag/geoffrey_hinton +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_author|Oriol Vinyals +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_author|Geoffrey Hinton +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_author|Jeff Dean +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|comment|"> **a different kind of training**, which we call “**distillation**” to transfer the +knowledge from the cumbersome model to a small model that is more +suitable for deployment + + +> Caruana and his collaborators have shown that it is possible to compress the knowledge in an [#ensemble](/tag/ensemble_learning.html) into a single model which is much easier to deploy and we develop this approach further using a different compression technique. We achieve some surprising results on MNIST." +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|title|[1503.02531] Distilling the Knowledge in a Neural Network +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|bookmarkOf|https://arxiv.org/abs/1503.02531 +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|creationTime|2020-04-16T14:40:33Z +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_summary|"A very simple way to improve the performance of almost any machine learning +algorithm is to train many different models on the same data and then to +average their predictions. Unfortunately, making predictions using a whole +ensemble of models is cumbersome and may be too computationally expensive to +allow deployment to a large number of users, especially if the individual +models are large neural nets. Caruana and his collaborators have shown that it +is possible to compress the knowledge in an ensemble into a single model which +is much easier to deploy and we develop this approach further using a different +compression technique. We achieve some surprising results on MNIST and we show +that we can significantly improve the acoustic model of a heavily used +commercial system by distilling the knowledge in an ensemble of models into a +single model. We also introduce a new type of ensemble composed of one or more +full models and many specialist models which learn to distinguish fine-grained +classes that the full models confuse. Unlike a mixture of experts, these +specialist models can be trained rapidly and in parallel." +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_firstAuthor|Geoffrey Hinton +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_updated|2015-03-09T15:44:49Z +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_title|Distilling the Knowledge in a Neural Network +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_published|2015-03-09T15:44:49Z +http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno|arxiv_num|1503.02531 +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|creationDate|2019-06-29 +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|tag|http://www.semanlink.net/tag/lagos +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|tag|http://www.semanlink.net/tag/fulani +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|title|A Lagos, le roi des Peuls est aussi le boss des dockers +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|bookmarkOf|https://www.lemonde.fr/afrique/article/2019/06/29/a-lagos-le-roi-des-peuls-est-aussi-le-boss-des-dockers_5483080_3212.html +http://www.semanlink.net/doc/2019/06/a_lagos_le_roi_des_peuls_est_a|creationTime|2019-06-29T11:49:31Z +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|creationDate|2019-11-16 +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|tag|http://www.semanlink.net/tag/huggingface_transformers +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|comment|"(BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet, CTRL...) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch. + +[doc](https://huggingface.co/transformers/)" +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|title|huggingface/transformers: 🤗 Transformers: State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch. +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|bookmarkOf|https://github.com/huggingface/transformers +http://www.semanlink.net/doc/2019/11/huggingface_transformers_%F0%9F%A4%97_tr|creationTime|2019-11-16T00:41:04Z +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|creationDate|2020-04-25 +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|tag|http://www.semanlink.net/tag/ils_commencent_a_me_gonfler +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|tag|http://www.semanlink.net/tag/crise_ecologique +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|tag|http://www.semanlink.net/tag/vive_le_capitalisme +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|title|Climat : le patronat s’active pour infléchir les normes +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|bookmarkOf|https://www.lemonde.fr/economie/article/2020/04/21/climat-le-patronat-s-active-pour-inflechir-les-normes_6037283_3234.html +http://www.semanlink.net/doc/2020/04/climat_le_patronat_s%E2%80%99active_p|creationTime|2020-04-25T21:38:05Z +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|tag|http://www.semanlink.net/tag/end_to_end_entity_linking +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|tag|http://www.semanlink.net/tag/discute_avec_raphael +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_author|Samuel Broscheit +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|comment|"Training BERT-base-uncased on English Wikipedia and then fine-tuned and evaluating it +on an entity linking (EL) benchmark (EL implemented as a token classification over the entity vocabulary) + +> BERT+Entity is a straightforward extension on top +of BERT, i.e. we initialize BERT with the publicly +available weights from the BERT-base-uncased +model and add an output classification layer on +top of the architecture. Given a contextualized token, +the classifier computes the probability of an +entity link for each entry in the entity vocabulary. + +Can BERT’s architecture learn all entity +linking steps jointly? To answer: + +> an extreme +simplification of the **entity linking setup that +works surprisingly well**: simply cast it as **a +per token classification over the entire entity +vocabulary** (over 700K classes in our case). + +> the model +is the first that performs entity linking without any +pipeline or any heuristics, compared to all prior +approaches. We found that with our approach we +can learn additional entity knowledge in BERT that +helps in entity linking. **However, we also found +that almost none of the downstream tasks really +required entity knowledge**. + +### Related work + +- > [Durrett and Klein (2014)](/doc/2020/01/a_joint_model_for_entity_analys) were the first to propose +jointly modelling Mention detection, Candidate generation and Entity disambiguation in a graphical +model and could show that each of those steps are +interdependent and benefit from a joint objective + +This paper uses neural techniques instead of CRF. + +- > [Yamada](/showprop.do?pptyuri=http%3A%2F%2Fwww.semanlink.net%2F2001%2F00%2Fsemanlink-schema%23arxiv_author&pptyval=Ikuya%2BYamada) (2016, 2017) was the first to +investigate neural text representations and entity +linking, but their approach is limited to ED. + +cf. [#Wikipedia2Vec](tag:wikipedia2vec). Compare with [newer work by Yamada](doc:2020/09/1909_01259_neural_attentive_b)" +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|relatedDoc|http://www.semanlink.net/doc/2020/09/1909_01259_neural_attentive_b +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|relatedDoc|http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|title|[2003.05473] Investigating Entity Knowledge in BERT with Simple Neural End-To-End Entity Linking (CoNNL 2019) +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|bookmarkOf|https://arxiv.org/abs/2003.05473 +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|creationTime|2020-01-09T10:36:17Z +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_summary|"A typical architecture for end-to-end entity linking systems consists of +three steps: mention detection, candidate generation and entity disambiguation. +In this study we investigate the following questions: (a) Can all those steps +be learned jointly with a model for contextualized text-representations, i.e. +BERT (Devlin et al., 2019)? (b) How much entity knowledge is already contained +in pretrained BERT? (c) Does additional entity knowledge improve BERT's +performance in downstream tasks? To this end, we propose an extreme +simplification of the entity linking setup that works surprisingly well: simply +cast it as a per token classification over the entire entity vocabulary (over +700K classes in our case). We show on an entity linking benchmark that (i) this +model improves the entity representations over plain BERT, (ii) that it +outperforms entity linking architectures that optimize the tasks separately and +(iii) that it only comes second to the current state-of-the-art that does +mention detection and entity disambiguation jointly. Additionally, we +investigate the usefulness of entity-aware token-representations in the +text-understanding benchmark GLUE, as well as the question answering benchmarks +SQUAD V2 and SWAG and also the EN-DE WMT14 machine translation benchmark. To +our surprise, we find that most of those benchmarks do not benefit from +additional entity knowledge, except for a task with very small training data, +the RTE task in GLUE, which improves by 2%." +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_firstAuthor|Samuel Broscheit +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_updated|2020-03-11T18:23:00Z +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_title|Investigating Entity Knowledge in BERT with Simple Neural End-To-End Entity Linking +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_published|2020-03-11T18:23:00Z +http://www.semanlink.net/doc/2020/01/investigating_entity_knowledge_|arxiv_num|2003.05473 +http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r|creationDate|2019-07-11 +http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r|tag|http://www.semanlink.net/tag/hayabusa2 +http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r|title|La sonde japonaise Hayabusa-2 réussit à se poser une seconde fois sur l’astéroïde Ryugu +http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r|bookmarkOf|https://www.lemonde.fr/sciences/article/2019/07/11/la-sonde-japonaise-hayabusa-2-reussit-a-se-poser-une-seconde-fois-sur-l-asteroide-ryugu_5487964_1650684.html +http://www.semanlink.net/doc/2019/07/la_sonde_japonaise_hayabusa_2_r|creationTime|2019-07-11T09:44:22Z +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|creationDate|2019-12-15 +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|tag|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|tag|http://www.semanlink.net/tag/media_conversationnel +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|title|Les chatbots sont morts, vive les médias 100% messagerie ! +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|bookmarkOf|https://lareclame.fr/jam-marjolaine-grondin-jeunes-loups-222212 +http://www.semanlink.net/doc/2019/12/les_chatbots_sont_morts_vive_l|creationTime|2019-12-15T00:38:43Z +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|creationDate|2019-08-15 +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|tag|http://www.semanlink.net/tag/naftali_tishby +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_author|Naftali Tishby +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_author|Noga Zaslavsky +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|comment|"> Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN. +" +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|title|[1503.02406] Deep Learning and the Information Bottleneck Principle +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|bookmarkOf|https://arxiv.org/abs/1503.02406 +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|creationTime|2019-08-15T17:07:31Z +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_summary|"Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the +information bottleneck (IB) principle. We first show that any DNN can be +quantified by the mutual information between the layers and the input and +output variables. Using this representation we can calculate the optimal +information theoretic limits of the DNN and obtain finite sample generalization +bounds. The advantage of getting closer to the theoretical limit is +quantifiable both by the generalization bound and by the network's simplicity. +We argue that both the optimal architecture, number of layers and +features/connections at each layer, are related to the bifurcation points of +the information bottleneck tradeoff, namely, relevant compression of the input +layer with respect to the output layer. The hierarchical representations at the +layered network naturally correspond to the structural phase transitions along +the information curve. We believe that this new insight can lead to new +optimality bounds and deep learning algorithms." +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_firstAuthor|Naftali Tishby +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_updated|2015-03-09T09:39:41Z +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_title|Deep Learning and the Information Bottleneck Principle +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_published|2015-03-09T09:39:41Z +http://www.semanlink.net/doc/2019/08/_1503_02406_deep_learning_and_|arxiv_num|1503.02406 +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/ludovic_denoyer +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/guillaume_lample +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/memory_networks +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_author|Ludovic Denoyer +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_author|Guillaume Lample +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_author|Hervé Jégou +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_author|Marc'Aurelio Ranzato +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_author|Alexandre Sablayrolles +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|comment|"> **a structured memory which can be easily integrated into a neural network.** The memory is very large by design and therefore significantly increases the capacity of the architecture, by up to a billion parameters with a negligible computational overhead. Its design and access pattern is based on **product keys**, which enable fast and exact nearest neighbor search. The ability to increase the number of parameters while keeping the same computational budget lets the overall system strike a better trade-off between prediction accuracy and computation efficiency both at training and test time. + +> a key-value memory layer that can increase model capacity for a negligible computational cost. A 12-layer transformer with a memory outperforms a 24-layer transformer, and is 2x faster! + +[Implementation](/doc/2019/08/product_key_memory_pkm_minima) + +TODO: compare with [[2007.00849] Facts as Experts: Adaptable and Interpretable Neural Memory over Symbolic Knowledge](doc:2020/07/2007_00849_facts_as_experts_)" +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|relatedDoc|http://www.semanlink.net/doc/2020/07/2007_00849_facts_as_experts_ +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|relatedDoc|http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|title|[1907.05242] Large Memory Layers with Product Keys +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|bookmarkOf|https://arxiv.org/abs/1907.05242 +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|creationTime|2019-07-13T19:32:44Z +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_summary|"This paper introduces a structured memory which can be easily integrated into +a neural network. The memory is very large by design and significantly +increases the capacity of the architecture, by up to a billion parameters with +a negligible computational overhead. Its design and access pattern is based on +product keys, which enable fast and exact nearest neighbor search. The ability +to increase the number of parameters while keeping the same computational +budget lets the overall system strike a better trade-off between prediction +accuracy and computation efficiency both at training and test time. This memory +layer allows us to tackle very large scale language modeling tasks. In our +experiments we consider a dataset with up to 30 billion words, and we plug our +memory layer in a state-of-the-art transformer-based architecture. In +particular, we found that a memory augmented model with only 12 layers +outperforms a baseline transformer model with 24 layers, while being twice +faster at inference time. We release our code for reproducibility purposes." +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_firstAuthor|Guillaume Lample +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_updated|2019-12-16T03:46:57Z +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_title|Large Memory Layers with Product Keys +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_published|2019-07-10T14:52:12Z +http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer|arxiv_num|1907.05242 +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|creationDate|2019-12-06 +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|tag|http://www.semanlink.net/tag/keyword_spotting +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|tag|http://www.semanlink.net/tag/speech_recognition +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|tag|http://www.semanlink.net/tag/alibaba +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|title|Can You Hear Me Now? Improved Voice Assistant Keyword Spotting with Alibaba +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|bookmarkOf|https://medium.com/@alitech_2017/can-you-hear-me-now-improved-voice-assistant-keyword-spotting-with-alibaba-1f11823bea15 +http://www.semanlink.net/doc/2019/12/can_you_hear_me_now_improved_v|creationTime|2019-12-06T10:38:23Z +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|creationDate|2019-08-07 +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|tag|http://www.semanlink.net/tag/job_title_normalization +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|comment|"A deep architecture for +**learning a similarity metric** on variable length +character sequences. The model +combines a stack of character-level bidirectional +LSTM’s with a Siamese architecture. +It learns to project variable length +strings into a fixed-dimensional embedding +space **by using only information +about the similarity between pairs of +strings**. This model is applied to the task +of job title normalization based on a manually +annotated taxonomy. A small data set +is incrementally expanded and augmented +with new sources of variance. + +from the conclusion: The experiment shows that the explicit use +of prior knowledge to add these sources of invariance +to the system was crucial in learning. Without +this knowledge extra words and synonyms will negatively affect the performance of the system." +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|title|Learning Text Similarity with Siamese Recurrent Networks (2016) +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|bookmarkOf|https://www.aclweb.org/anthology/W16-1617/ +http://www.semanlink.net/doc/2019/08/learning_text_similarity_with_s|creationTime|2019-08-07T02:01:44Z +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|creationDate|2020-02-11 +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|tag|http://www.semanlink.net/tag/memory_in_deep_learning +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|comment|"the use of memory in deep learning, and how modelling language may be an ideal task for developing better memory architectures + +[paper](/doc/2020/02/_1911_05507_compressive_transf)" +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|relatedDoc|http://www.semanlink.net/doc/2020/02/_1911_05507_compressive_transf +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|title|A new model and dataset for long-range memory DeepMind +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|bookmarkOf|https://deepmind.com/blog/article/A_new_model_and_dataset_for_long-range_memory +http://www.semanlink.net/doc/2020/02/a_new_model_and_dataset_for_lon|creationTime|2020-02-11T08:40:48Z +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/chatbot +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_author|Nick Craswell +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_author|Hamed Zamani +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|title|[1912.08904] Macaw: An Extensible Conversational Information Seeking Platform +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|bookmarkOf|https://arxiv.org/abs/1912.08904 +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|creationTime|2020-01-01T10:55:09Z +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_summary|"Conversational information seeking (CIS) has been recognized as a major +emerging research area in information retrieval. Such research will require +data and tools, to allow the implementation and study of conversational +systems. This paper introduces Macaw, an open-source framework with a modular +architecture for CIS research. Macaw supports multi-turn, multi-modal, and +mixed-initiative interactions, and enables research for tasks such as document +retrieval, question answering, recommendation, and structured data exploration. +It has a modular design to encourage the study of new CIS algorithms, which can +be evaluated in batch mode. It can also integrate with a user interface, which +allows user studies and data collection in an interactive mode, where the back +end can be fully algorithmic or a wizard of oz setup. Macaw is distributed +under the MIT License." +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_firstAuthor|Hamed Zamani +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_updated|2019-12-18T21:51:22Z +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_title|Macaw: An Extensible Conversational Information Seeking Platform +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_published|2019-12-18T21:51:22Z +http://www.semanlink.net/doc/2020/01/_1912_08904_macaw_an_extensib|arxiv_num|1912.08904 +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|creationDate|2019-08-15 +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|tag|http://www.semanlink.net/tag/naftali_tishby +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_author|Naftali Tishby Hebrew University and NEC Research Institute +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_author|William Bialek NEC Research Institute +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_author|Fernando C. Pereira ATT Shannon Laboratory +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|comment|"> We define the relevant information in a signal x ∈ X as being the information that this signal provides about another signal y ∈ Y. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. **Understanding the signal x requires more than just predicting y, it also requires specifying which features of X play a role in the prediction. We formalize this problem as that of finding a short code for X that preserves the maximum information about Y.** That is, we squeeze the information that X provides about Y through a ‘bottleneck’ formed by a limited set of codewords X ̃... This approach yields an exact set of self consistent equations for the coding rules X → X ̃ and X ̃ → Y . + +(from the intro) : how to define ""meaningful / relevant"" information? An issue left out of information theory by Shannon (focus on the problem of transmitting information rather than judging its value to the recipient) ->leads to +consider statistical and information theoretic principles as almost irrelevant +for the question of meaning. + +> In contrast, **we argue here that information theory, +in particular lossy source compression, provides a natural quantitative +approach to the question of “relevant information.”** Specifically, we formulate +a **variational principle** for the extraction or efficient representation of +relevant information. + +" +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|title|[physics/0004057] The information bottleneck method +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|bookmarkOf|https://arxiv.org/abs/physics/0004057 +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|creationTime|2019-08-15T11:31:33Z +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_summary|"We define the relevant information in a signal $x\in X$ as being the +information that this signal provides about another signal $y\in \Y$. Examples +include the information that face images provide about the names of the people +portrayed, or the information that speech sounds provide about the words +spoken. Understanding the signal $x$ requires more than just predicting $y$, it +also requires specifying which features of $\X$ play a role in the prediction. +We formalize this problem as that of finding a short code for $\X$ that +preserves the maximum information about $\Y$. That is, we squeeze the +information that $\X$ provides about $\Y$ through a `bottleneck' formed by a +limited set of codewords $\tX$. This constrained optimization problem can be +seen as a generalization of rate distortion theory in which the distortion +measure $d(x,\x)$ emerges from the joint statistics of $\X$ and $\Y$. This +approach yields an exact set of self consistent equations for the coding rules +$X \to \tX$ and $\tX \to \Y$. Solutions to these equations can be found by a +convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. +Our variational principle provides a surprisingly rich framework for discussing +a variety of problems in signal processing and learning, as will be described +in detail elsewhere." +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_firstAuthor|Naftali Tishby Hebrew University and NEC Research Institute +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_updated|2000-04-24T15:22:30Z +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_title|The information bottleneck method +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_published|2000-04-24T15:22:30Z +http://www.semanlink.net/doc/2019/08/_physics_0004057_the_informati|arxiv_num|physics/0004057 +http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese|creationDate|2019-11-06 +http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese|tag|http://www.semanlink.net/tag/music_source_separation +http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese|title|Releasing Spleeter: Deezer Research source separation engine +http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese|bookmarkOf|https://deezer.io/releasing-spleeter-deezer-r-d-source-separation-engine-2b88985e797e +http://www.semanlink.net/doc/2019/11/releasing_spleeter_deezer_rese|creationTime|2019-11-06T01:21:13Z +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|creationDate|2019-10-20 +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|tag|http://www.semanlink.net/tag/tika +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|tag|http://www.semanlink.net/tag/troubleshooting +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|comment|"> Apache Tika is great when it works, but by default can be silently forgiving of configuration mistakes. + +That's not nice" +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|title|Troubleshooting Tika +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|bookmarkOf|https://cwiki.apache.org/confluence/display/tika/Troubleshooting%20Tika +http://www.semanlink.net/doc/2019/10/troubleshooting_tika_1|creationTime|2019-10-20T10:19:22Z +http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu|creationDate|2019-11-29 +http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu|tag|http://www.semanlink.net/tag/agriculture_industrielle +http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu|title|Des variétés de végétaux devenues tolérantes à un herbicide… à cause de ce même herbicide +http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu|bookmarkOf|https://www.lemonde.fr/planete/article/2019/11/28/agriculture-des-varietes-de-vegetaux-tolerantes-a-un-herbicide-a-cause-de-ce-meme-herbicide_6020909_3244.html +http://www.semanlink.net/doc/2019/11/des_varietes_de_vegetaux_devenu|creationTime|2019-11-29T02:07:56Z +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|creationDate|2020-03-29 +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|tag|http://www.semanlink.net/tag/taiwan +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|title|Taïwan, un modèle dans la lutte contre le coronavirus (RFI - 12/03/2020) +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|bookmarkOf|http://www.rfi.fr/fr/asie-pacifique/20200311-ta%C3%AFwan-bonne-gestion-crise-coronavirus +http://www.semanlink.net/doc/2020/03/taiwan_un_modele_dans_la_lutte|creationTime|2020-03-29T15:48:59Z +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|creationDate|2019-07-11 +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|tag|http://www.semanlink.net/tag/non_negative_matrix_factorization +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|tag|http://www.semanlink.net/tag/singular_value_decomposition +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|title|course-nlp/2-svd-nmf-topic-modeling.ipynb at master · fastai/course-nlp +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|bookmarkOf|https://github.com/fastai/course-nlp/blob/master/2-svd-nmf-topic-modeling.ipynb +http://www.semanlink.net/doc/2019/07/course_nlp_2_svd_nmf_topic_mode|creationTime|2019-07-11T16:57:41Z +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|creationDate|2019-11-02 +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|tag|http://www.semanlink.net/tag/iswc +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|title|Knowledge Graphs and Knowledge modelling took center stage at ISWC 2019 LinkedIn +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|bookmarkOf|https://www.linkedin.com/pulse/knowledge-graphs-modelling-took-center-stage-iswc-2019-armin-haller/ +http://www.semanlink.net/doc/2019/11/knowledge_graphs_and_knowledge_|creationTime|2019-11-02T09:32:20Z +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|creationDate|2020-05-19 +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|tag|http://www.semanlink.net/tag/noise_contrastive_estimation +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|tag|http://www.semanlink.net/tag/negative_sampling +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|comment|"[about this paper](doc:2020/05/you_can_teach_an_old_dog_new_tr) ""You CAN Teach an Old Dog New Tricks! On Training Knowledge Graph Embeddings"" + +> in knowledge base completion, negative sampling works better if you then take the softmax + multiclass CE over the whole sample (as opposed to sigmoids and binary CE) + +@Mniepert: + +> My take: softmax + multi-class CE better under “open world” assumption." +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|relatedDoc|http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|title|"Peter Bloem sur Twitter : ""One of the messages from Ruffinelli et al 2020...""" +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|bookmarkOf|https://twitter.com/pbloemesquire/status/1262336693186813953 +http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_|creationTime|2020-05-19T00:32:45Z +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|creationDate|2020-04-24 +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|tag|http://www.semanlink.net/tag/nothing_to_hide_argument +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|tag|http://www.semanlink.net/tag/edward_snowden +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|tag|http://www.semanlink.net/tag/citation +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|comment|"> Arguing that you don't care about the right to privacy because you have nothing to hide is no different than saying you don't care about free speech because you have nothing to say. +> When you say, 'I have nothing to hide,' you're saying, 'I don't care about this right'. + +E. Snowden" +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|title|Nothing to hide argument +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|bookmarkOf|https://en.wikipedia.org/wiki/Nothing_to_hide_argument +http://www.semanlink.net/doc/2020/04/nothing_to_hide_argument|creationTime|2020-04-24T12:39:27Z +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|creationDate|2020-03-14 +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|tag|http://www.semanlink.net/tag/elasticsearch_annotated_text_field +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|comment|"The doc about annotated text fields. See also elastic list: + +- +- " +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|title|Mapper Annotated Text Plugin Elastic +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|bookmarkOf|https://www.elastic.co/guide/en/elasticsearch/plugins/current/mapper-annotated-text.html +http://www.semanlink.net/doc/2020/03/mapper_annotated_text_plugin_%7C_|creationTime|2020-03-14T11:47:52Z +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|creationDate|2020-05-11 +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|tag|http://www.semanlink.net/tag/kd_mkb_biblio +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_author|Nelson F. Liu +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_author|Matthew E. Peters +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_author|Robert L. Logan IV +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_author|Sameer Singh +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_author|Matt Gardner +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|comment|> a **neural language model with mechanisms for selecting and copying facts from a knowledge graph that are relevant to the context**. These mechanisms enable the model to render information it has never seen before, as well as generate out-of-vocabulary tokens. +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|title|[1906.07241] Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|bookmarkOf|https://arxiv.org/abs/1906.07241 +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|creationTime|2020-05-11T18:55:35Z +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_summary|"Modeling human language requires the ability to not only generate fluent text +but also encode factual knowledge. However, traditional language models are +only capable of remembering facts seen at training time, and often have +difficulty recalling them. To address this, we introduce the knowledge graph +language model (KGLM), a neural language model with mechanisms for selecting +and copying facts from a knowledge graph that are relevant to the context. +These mechanisms enable the model to render information it has never seen +before, as well as generate out-of-vocabulary tokens. We also introduce the +Linked WikiText-2 dataset, a corpus of annotated text aligned to the Wikidata +knowledge graph whose contents (roughly) match the popular WikiText-2 +benchmark. In experiments, we demonstrate that the KGLM achieves significantly +better performance than a strong baseline language model. We additionally +compare different language model's ability to complete sentences requiring +factual knowledge, showing that the KGLM outperforms even very large language +models in generating facts." +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_firstAuthor|Robert L. Logan IV +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_updated|2019-06-20T18:37:00Z +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_title|Barack's Wife Hillary: Using Knowledge-Graphs for Fact-Aware Language Modeling +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_published|2019-06-17T19:48:41Z +http://www.semanlink.net/doc/2020/05/1906_07241_barack_s_wife_hill|arxiv_num|1906.07241 +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|creationDate|2020-01-05 +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|tag|http://www.semanlink.net/tag/contextualised_word_representations +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|tag|http://www.semanlink.net/tag/elmo +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|title|Lecture 14 – Contextual Vectors Stanford CS224U: Natural Language Understanding Spring 2019 +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|bookmarkOf|http://onlinehub.stanford.edu/cs224u-natural-language-understanding/stanford-cs224u-natural-language-understanding-spring-2019-lecture-14-contextual-vectors +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|creationTime|2020-01-05T18:17:47Z +http://www.semanlink.net/doc/2020/01/lecture_14_contextual_vectors|mainDoc|http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|creationDate|2019-08-03 +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|tag|http://www.semanlink.net/tag/musee_archeologique_de_bagdad +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|tag|http://www.semanlink.net/tag/pillage_de_vestiges_antiques +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|title|A la recherche de la lionne de Nimroud +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|bookmarkOf|https://www.lemonde.fr/m-le-mag/article/2019/08/02/a-la-recherche-de-la-lionne-de-nimroud_5495939_4500055.html +http://www.semanlink.net/doc/2019/08/a_la_recherche_de_la_lionne_de_|creationTime|2019-08-03T09:48:02Z +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|creationDate|2019-08-21 +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|tag|http://www.semanlink.net/tag/peter_bloem +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|comment|"The best explanation about the transformer. Code included. + +> Self-attention is a sequence-to-sequence operation: a sequence of +t vectors goes in, and a sequence of t vectors comes out (all vectors with same size). +> +> To produce output vector 𝐲i, the self attention operation simply takes a weighted average over all the input vectors +> +> 𝐲i=∑jwij𝐱j. +> +> Where the weights sum to one over all j. The weight wij is not a parameter, as in a normal neural net, but it is derived from a function over 𝐱i and 𝐱j. The simplest option for this function is the dot product. + +" +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|title|Transformers from scratch Peter Bloem +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|bookmarkOf|http://peterbloem.nl/blog/transformers +http://www.semanlink.net/doc/2019/08/transformers_from_scratch_%7C_pet|creationTime|2019-08-21T13:11:32Z +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|creationDate|2020-03-29 +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|tag|http://www.semanlink.net/tag/taiwan +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|title|How Taiwan fended off the coronavirus WORLD News Group +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|bookmarkOf|https://world.wng.org/2020/03/how_taiwan_fended_off_the_coronavirus +http://www.semanlink.net/doc/2020/03/how_taiwan_fended_off_the_coron|creationTime|2020-03-29T16:02:11Z +http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_|tag|http://www.semanlink.net/tag/hong_kong +http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_|title|Chaos à Hongkong après la mise à sac du Parlement +http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_|bookmarkOf|https://www.lemonde.fr/international/article/2019/07/01/chaos-a-hongkong-apres-la-mise-a-sac-du-parlement_5483977_3210.html +http://www.semanlink.net/doc/2019/07/chaos_a_hongkong_apres_la_mise_|creationTime|2019-07-02T01:30:09Z +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|creationDate|2019-06-17 +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|tag|http://www.semanlink.net/tag/ruben_verborgh +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|tag|http://www.semanlink.net/tag/solid +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|title|Shaping Linked Data apps Ruben Verborgh +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|bookmarkOf|https://ruben.verborgh.org/blog/2019/06/17/shaping-linked-data-apps/ +http://www.semanlink.net/doc/2019/06/shaping_linked_data_apps_%7C_rube|creationTime|2019-06-17T18:35:23Z +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|creationDate|2020-01-19 +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|tag|http://www.semanlink.net/tag/rdf +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|tag|http://www.semanlink.net/tag/neo4j +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|title|NSMNTX - Neo4j RDF & Semantics toolkit +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|bookmarkOf|https://neo4j.com/labs/nsmtx-rdf/ +http://www.semanlink.net/doc/2020/01/nsmntx_neo4j_rdf_semantics_|creationTime|2020-01-19T01:01:49Z +http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_|creationDate|2020-01-28 +http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_|tag|http://www.semanlink.net/tag/brexit +http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_|title|Les brexiters ont-ils eu ce qu'ils voulaient ? +http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_|bookmarkOf|https://www.lemonde.fr/les-decodeurs/article/2020/01/27/les-brexiters-ont-ils-eu-ce-qu-ils-voulaient_6027404_4355770.html +http://www.semanlink.net/doc/2020/01/les_brexiters_ont_ils_eu_ce_qu_|creationTime|2020-01-28T00:29:59Z +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|tag|http://www.semanlink.net/tag/end_to_end_entity_linking +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|tag|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|tag|http://www.semanlink.net/tag/multitask_learning_in_nlp +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|comment|"model interactions between the Mention Detection (MD), Candidate Generation (CG) and Entity Disambiguation (ED) tasks jointly. They find that the joint objective is beneficial (each task improves). They also note that there is +no natural order of the tasks and they should interact +freely. Their approach to CG is to learn to +generate queries to the KB" +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|title|A Joint Model for Entity Analysis: Coreference, Typing, and Linking (Greg Durrett, Dan Klein 2014) +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|bookmarkOf|https://transacl.org/ojs/index.php/tacl/article/view/412 +http://www.semanlink.net/doc/2020/01/a_joint_model_for_entity_analys|creationTime|2020-01-09T14:56:24Z +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|creationDate|2020-02-27 +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|tag|http://www.semanlink.net/tag/conditional_random_field +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|tag|http://www.semanlink.net/tag/wikipedia +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_author|Swapnil Ashok Jadhav +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|title|[2002.11402] Detecting Potential Topics In News Using BERT, CRF and Wikipedia +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|bookmarkOf|https://arxiv.org/abs/2002.11402 +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|creationTime|2020-02-27T23:36:54Z +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_summary|"For a news content distribution platform like Dailyhunt, Named Entity +Recognition is a pivotal task for building better user recommendation and +notification algorithms. Apart from identifying names, locations, organisations +from the news for 13+ Indian languages and use them in algorithms, we also need +to identify n-grams which do not necessarily fit in the definition of +Named-Entity, yet they are important. For example, ""me too movement"", ""beef +ban"", ""alwar mob lynching"". In this exercise, given an English language text, +we are trying to detect case-less n-grams which convey important information +and can be used as topics and/or hashtags for a news. Model is built using +Wikipedia titles data, private English news corpus and BERT-Multilingual +pre-trained model, Bi-GRU and CRF architecture. It shows promising results when +compared with industry best Flair, Spacy and Stanford-caseless-NER in terms of +F1 and especially Recall." +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_firstAuthor|Swapnil Ashok Jadhav +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_updated|2020-02-28T18:44:07Z +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_title|Detecting Potential Topics In News Using BERT, CRF and Wikipedia +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_published|2020-02-26T10:48:53Z +http://www.semanlink.net/doc/2020/02/_2002_11402_detecting_potentia|arxiv_num|2002.11402 +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|creationDate|2020-04-06 +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|tag|http://www.semanlink.net/tag/arundhati_roy +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|title|Arundhati Roy : « En Inde, le confinement le plus gigantesque et le plus punitif de la planète » +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|bookmarkOf|https://www.lemonde.fr/livres/article/2020/04/06/arundhati-roy-en-inde-le-confinement-le-plus-gigantesque-et-le-plus-punitif-de-la-planete_6035741_3260.html +http://www.semanlink.net/doc/2020/04/arundhati_roy_%C2%AB_en_inde_le_c|creationTime|2020-04-06T19:31:38Z +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|tag|http://www.semanlink.net/tag/lime +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|comment|Same author: [NER with BERT](/doc/2020/01/named_entity_recognition_with_b) +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|relatedDoc|http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|title|Interpretable Named entity recognition with keras and LIME – Depends on the definition +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|bookmarkOf|https://www.depends-on-the-definition.com/interpretable-named-entity-recognition/ +http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco|creationTime|2020-01-09T02:03:56Z +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|creationDate|2019-09-03 +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|tag|http://www.semanlink.net/tag/tensorflow +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|comment|Neural Structured Learning (NSL) is an open source framework for training deep neural networks with structured signals. It implements Neural Graph Learning, which enables developers to train neural networks using graphs. +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|title|Introducing Neural Structured Learning in TensorFlow +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|bookmarkOf|https://medium.com/tensorflow/introducing-neural-structured-learning-in-tensorflow-5a802efd7afd +http://www.semanlink.net/doc/2019/09/introducing_neural_structured_l|creationTime|2019-09-03T19:01:32Z +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|tag|http://www.semanlink.net/tag/brouteur +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|tag|http://www.semanlink.net/tag/eme +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|tag|http://www.semanlink.net/tag/drm +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|tag|http://www.semanlink.net/tag/w3c +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|tag|http://www.semanlink.net/tag/harry_halpin +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|title|Three years after the W3C approved a DRM standard, it's no longer possible to make a functional indie browser / Boing Boing +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|bookmarkOf|https://boingboing.net/2020/01/08/rip-open-web-platform.html +http://www.semanlink.net/doc/2020/01/three_years_after_the_w3c_appro|creationTime|2020-01-09T23:12:02Z +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|creationDate|2020-05-03 +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|tag|http://www.semanlink.net/tag/kbpedia +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|tag|http://www.semanlink.net/tag/target_entity_disambiguation +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|tag|http://www.semanlink.net/tag/deepwalk +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|title|Disambiguating KBpedia Knowledge Graph Concepts +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|bookmarkOf|https://kbpedia.org/use-cases/disambiguating-kbpedia-knowledge-graph-concepts/ +http://www.semanlink.net/doc/2020/05/disambiguating_kbpedia_knowledg|creationTime|2020-05-03T01:19:08Z +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|creationDate|2020-05-02 +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|tag|http://www.semanlink.net/tag/cross_lingual_nlp +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|tag|http://www.semanlink.net/tag/unsupervised_machine_translation +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|tag|http://www.semanlink.net/tag/ml_evaluation +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_author|Dani Yogatama +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_author|Eneko Agirre +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_author|Mikel Artetxe +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_author|Sebastian Ruder +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_author|Gorka Labaka +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|comment|> a scenario without any parallel data and abundant monolingual data is unrealistic in practice +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|title|[2004.14958] A Call for More Rigor in Unsupervised Cross-lingual Learning +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|bookmarkOf|https://arxiv.org/abs/2004.14958 +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|creationTime|2020-05-02T12:35:54Z +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_summary|"We review motivations, definition, approaches, and methodology for +unsupervised cross-lingual learning and call for a more rigorous position in +each of them. An existing rationale for such research is based on the lack of +parallel data for many of the world's languages. However, we argue that a +scenario without any parallel data and abundant monolingual data is unrealistic +in practice. We also discuss different training signals that have been used in +previous work, which depart from the pure unsupervised setting. We then +describe common methodological issues in tuning and evaluation of unsupervised +cross-lingual models and present best practices. Finally, we provide a unified +outlook for different types of research in this area (i.e., cross-lingual word +embeddings, deep multilingual pretraining, and unsupervised machine +translation) and argue for comparable evaluation of these models." +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_firstAuthor|Mikel Artetxe +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_updated|2020-04-30T17:06:23Z +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_title|A Call for More Rigor in Unsupervised Cross-lingual Learning +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_published|2020-04-30T17:06:23Z +http://www.semanlink.net/doc/2020/05/2004_14958_a_call_for_more_ri|arxiv_num|2004.14958 +http://www.semanlink.net/doc/2020/03/gilda|creationDate|2020-03-22 +http://www.semanlink.net/doc/2020/03/gilda|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2020/03/gilda|title|Gilda +http://www.semanlink.net/doc/2020/03/gilda|bookmarkOf|https://en.wikipedia.org/wiki/Gilda +http://www.semanlink.net/doc/2020/03/gilda|creationTime|2020-03-22T19:08:11Z +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|creationDate|2020-05-16 +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|tag|http://www.semanlink.net/tag/differentiable_reasoning_over_text +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|title|Differentiable Reasoning over Text – Machine Learning Blog ML@CMU Carnegie Mellon University +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|bookmarkOf|https://blog.ml.cmu.edu/2020/05/15/differentiable-reasoning-over-text/ +http://www.semanlink.net/doc/2020/05/differentiable_reasoning_over_t|creationTime|2020-05-16T12:17:07Z +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|creationDate|2019-09-29 +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|tag|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|title|Machine Learning for Unbalanced Datasets using Neural Networks +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|bookmarkOf|https://medium.com/analytics-vidhya/machine-learning-for-unbalanced-datasets-using-neural-networks-b0fc28ef6261 +http://www.semanlink.net/doc/2019/09/machine_learning_for_unbalanced|creationTime|2019-09-29T11:35:49Z +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|comment|"- Dialogue Systems over KGs +- Natural Language Generation of KG facts +- Complex QA over KGs +- KG Embeddings & Graph Representations +" +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|title|Knowledge graphs in Natural Language Processing @ ACL 2019 - Michael Galkin +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|bookmarkOf|https://migalkin.github.io/posts/2019/08/04/post/ +http://www.semanlink.net/doc/2019/08/knowledge_graphs_in_natural_lan|creationTime|2019-08-05T14:23:54Z +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|creationDate|2019-06-23 +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|tag|http://www.semanlink.net/tag/ulmfit +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|tag|http://www.semanlink.net/tag/combining_text_and_structured_data_ml_nlp +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|tag|http://www.semanlink.net/tag/entities +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|tag|http://www.semanlink.net/tag/nlp_text_classification +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|title|20% Accuracy Bump in Text Classification with ME-ULMFiT +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|bookmarkOf|https://www.novetta.com/2019/06/accuracy_bump_meulmfit/ +http://www.semanlink.net/doc/2019/06/20_accuracy_bump_in_text_class|creationTime|2019-06-23T23:58:05Z +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|creationDate|2019-11-24 +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|tag|http://www.semanlink.net/tag/elasticsearch_annotated_text_field +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|tag|http://www.semanlink.net/tag/rss +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|comment|[cf](/doc/2019/11/meetup_paris_40_beyond_plain) +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|relatedDoc|http://www.semanlink.net/doc/2019/11/meetup_paris_40_beyond_plain +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|title|Elasticsearch RSS feed indexer with Spacy entity extraction +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|bookmarkOf|https://gist.github.com/markharwood/e649882e8f44d40a68f60e89ffb131bf +http://www.semanlink.net/doc/2019/11/elasticsearch_rss_feed_indexer_|creationTime|2019-11-24T17:30:16Z +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|creationDate|2019-08-15 +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|tag|http://www.semanlink.net/tag/color_naming +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|tag|http://www.semanlink.net/tag/langage +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|tag|http://www.semanlink.net/tag/naftali_tishby +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|comment|"The Information Bottleneck principle applied to linguistics. + +>We argue that **languages efficiently compress ideas into words by optimizing the information bottleneck trade-off** between the complexity and accuracy of the lexicon. We test this proposal in the domain of color naming. + +word meanings may reflect adaptation to pressure for efficient communication— +that is, communication that is precise yet requires only minimal +cognitive resources. + +" +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|title|Efficient compression in color naming and its evolution +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|bookmarkOf|https://www.nogsky.com/publication/2018a-pnas/2018a-PNAS.pdf +http://www.semanlink.net/doc/2019/08/efficient_compression_in_color_|creationTime|2019-08-15T17:39:48Z +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|creationDate|2019-09-16 +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_author|Rico Sennrich +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_author|Elena Voita +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_author|Ivan Titov +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|comment|[blog post](http://www.semanlink.net/doc/2019/09/evolution_of_representations_in) +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|relatedDoc|http://www.semanlink.net/doc/2019/09/evolution_of_representations_in +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|title|[1909.01380] The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|bookmarkOf|https://arxiv.org/abs/1909.01380 +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|creationTime|2019-09-16T23:50:52Z +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_summary|"We seek to understand how the representations of individual tokens and the +structure of the learned feature space evolve between layers in deep neural +networks under different learning objectives. We focus on the Transformers for +our analysis as they have been shown effective on various tasks, including +machine translation (MT), standard left-to-right language models (LM) and +masked language modeling (MLM). Previous work used black-box probing tasks to +show that the representations learned by the Transformer differ significantly +depending on the objective. In this work, we use canonical correlation analysis +and mutual information estimators to study how information flows across +Transformer layers and how this process depends on the choice of learning +objective. For example, as you go from bottom to top layers, information about +the past in left-to-right language models gets vanished and predictions about +the future get formed. In contrast, for MLM, representations initially acquire +information about the context around the token, partially forgetting the token +identity and producing a more generalized token representation. The token +identity then gets recreated at the top MLM layers." +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_firstAuthor|Elena Voita +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_updated|2019-09-03T18:06:03Z +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_title|The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_published|2019-09-03T18:06:03Z +http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol|arxiv_num|1909.01380 +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|tag|http://www.semanlink.net/tag/time_series +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|tag|http://www.semanlink.net/tag/lstm_networks +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|comment|> Using orthogonal property of Legendre polynomials, Legendre Memory Units (LMU) can efficiently handle temporal dependencies spanning 100k timesteps, converge rapidly and use fewer internal state-variables compares to LSTMs. +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|title|hardmaru sur Twitter : Legendre Memory Units +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|bookmarkOf|https://twitter.com/hardmaru/status/1204668659630665728?s=20 +http://www.semanlink.net/doc/2019/12/hardmaru_sur_twitter_legendr|creationTime|2019-12-11T16:43:04Z +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|creationDate|2020-02-12 +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|tag|http://www.semanlink.net/tag/leningrad +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|tag|http://www.semanlink.net/tag/nikolai_vavilov +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|tag|http://www.semanlink.net/tag/urss +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|tag|http://www.semanlink.net/tag/heroisme +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|comment|> During the siege of Leningrad, a group of Russian botanists starved to death rather than consume the greatest collection of seeds they were guarding. Nikolay Vavilov, the man who had collected the seeds, also died of hunger in Stalin’s gulag. +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|title|The men who starved to death to save the world's seeds - Russia Beyond +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|bookmarkOf|https://www.rbth.com/blogs/2014/05/12/the_men_who_starved_to_death_to_save_the_worlds_seeds_35135 +http://www.semanlink.net/doc/2020/02/the_men_who_starved_to_death_to|creationTime|2020-02-12T00:39:56Z +http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar|creationDate|2019-12-04 +http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar|tag|http://www.semanlink.net/tag/venus_prehistoriques +http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar|title|"La ""Vénus"" de Tursac Musée archéologie nationale" +http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar|bookmarkOf|https://musee-archeologienationale.fr/objet/la-venus-de-tursac +http://www.semanlink.net/doc/2019/12/la_venus_de_tursac_%7C_musee_ar|creationTime|2019-12-04T20:13:30Z +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|creationDate|2019-08-07 +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|tag|http://www.semanlink.net/tag/python_tips +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|title|Make Delegation Work in Python · fast.ai +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|bookmarkOf|https://www.fast.ai/2019/08/06/delegation/ +http://www.semanlink.net/doc/2019/08/make_delegation_work_in_python_|creationTime|2019-08-07T08:33:32Z +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|creationDate|2019-08-31 +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|tag|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|tag|http://www.semanlink.net/tag/soja +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|tag|http://www.semanlink.net/tag/deforestation +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|tag|http://www.semanlink.net/tag/union_europeenne +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|title|Soja brésilien : la méprise d’Elisabeth Borne sur les OGM qui « ne sont pas autorisés en Europe » +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|bookmarkOf|https://www.lemonde.fr/les-decodeurs/article/2019/08/28/soja-bresilien-la-meprise-d-elisabeth-borne-sur-les-ogm-qui-ne-sont-pas-autorises-en-europe_5503671_4355770.html +http://www.semanlink.net/doc/2019/08/soja_bresilien_la_meprise_d%E2%80%99e|creationTime|2019-08-31T11:44:34Z +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|creationDate|2019-09-17 +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|tag|http://www.semanlink.net/tag/richard_stallman +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|tag|http://www.semanlink.net/tag/logiciel_libre +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|title|"Richard Stallman : ""L'utilisateur doit contrôler le programme, pas l'inverse""" +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|bookmarkOf|https://www.lemonde.fr/technologies/article/2012/03/14/richard-stallman-l-utilisateur-doit-controler-le-programme-pas-l-inverse_1535920_651865.html +http://www.semanlink.net/doc/2019/09/richard_stallman_l_utilisate|creationTime|2019-09-17T20:49:45Z +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|creationDate|2020-01-05 +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|tag|http://www.semanlink.net/tag/rechauffement_climatique +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|tag|http://www.semanlink.net/tag/exxonmobil +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|title|"The Independent sur Twitter : ""Alexandria Ocasio-Cortez grills former Exxon scientists on oil giant's climate change denial""" +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|bookmarkOf|https://twitter.com/Independent/status/1187719206562910209 +http://www.semanlink.net/doc/2020/01/the_independent_sur_twitter_|creationTime|2020-01-05T18:50:01Z +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|creationDate|2020-03-17 +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|tag|http://www.semanlink.net/tag/automl +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|tag|http://www.semanlink.net/tag/backpropagation_vs_biology +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|tag|http://www.semanlink.net/tag/quoc_le +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|tag|http://www.semanlink.net/tag/evolutionary_algorithm +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_author|David R. So +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_author|Chen Liang +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_author|Esteban Real +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_author|Quoc V. Le +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|comment|"> Fun AutoML-Zero experiments: Evolutionary search discovers fundamental ML algorithms from scratch, e.g., small neural nets with backprop. +> Can evolution be the “Master Algorithm”? ;)" +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|title|[2003.03384] AutoML-Zero: Evolving Machine Learning Algorithms From Scratch +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|bookmarkOf|https://arxiv.org/abs/2003.03384 +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|creationTime|2020-03-17T21:57:40Z +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_summary|"Machine learning research has advanced in multiple aspects, including model +structures and learning methods. The effort to automate such research, known as +AutoML, has also made significant progress. However, this progress has largely +focused on the architecture of neural networks, where it has relied on +sophisticated expert-designed layers as building blocks---or similarly +restrictive search spaces. Our goal is to show that AutoML can go further: it +is possible today to automatically discover complete machine learning +algorithms just using basic mathematical operations as building blocks. We +demonstrate this by introducing a novel framework that significantly reduces +human bias through a generic search space. Despite the vastness of this space, +evolutionary search can still discover two-layer neural networks trained by +backpropagation. These simple neural networks can then be surpassed by evolving +directly on tasks of interest, e.g. CIFAR-10 variants, where modern techniques +emerge in the top algorithms, such as bilinear interactions, normalized +gradients, and weight averaging. Moreover, evolution adapts algorithms to +different task types: e.g., dropout-like techniques appear when little data is +available. We believe these preliminary successes in discovering machine +learning algorithms from scratch indicate a promising new direction for the +field." +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_firstAuthor|Esteban Real +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_updated|2020-03-06T19:00:04Z +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_title|AutoML-Zero: Evolving Machine Learning Algorithms From Scratch +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_published|2020-03-06T19:00:04Z +http://www.semanlink.net/doc/2020/03/_2003_03384_automl_zero_evolv|arxiv_num|2003.03384 +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|creationDate|2020-04-16 +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|tag|http://www.semanlink.net/tag/pixelwise_dense_prediction +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_author|Changyong Shun +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_author|Jingdong Wang +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_author|Yifan Liu +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_author|Chunhua Shen +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|title|[1903.04197] Structured Knowledge Distillation for Dense Prediction +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|bookmarkOf|https://arxiv.org/abs/1903.04197 +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|creationTime|2020-04-16T14:13:03Z +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_summary|"In this paper, we consider transferring the structure information from large +networks to small ones for dense prediction tasks. Previous knowledge +distillation strategies used for dense prediction tasks often directly borrow +the distillation scheme for image classification and perform knowledge +distillation for each pixel separately, leading to sub-optimal performance. +Here we propose to distill structured knowledge from large networks to small +networks, taking into account the fact that dense prediction is a structured +prediction problem. Specifically, we study two structured distillation schemes: +i)pair-wise distillation that distills the pairwise similarities by building a +static graph, and ii)holistic distillation that uses adversarial training to +distill holistic knowledge. The effectiveness of our knowledge distillation +approaches is demonstrated by extensive experiments on three dense prediction +tasks: semantic segmentation, depth estimation, and object detection." +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_firstAuthor|Yifan Liu +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_updated|2020-02-20T23:52:50Z +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_title|Structured Knowledge Distillation for Dense Prediction +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_published|2019-03-11T10:05:09Z +http://www.semanlink.net/doc/2020/04/1903_04197_structured_knowled|arxiv_num|1903.04197 +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|creationDate|2019-12-14 +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|tag|http://www.semanlink.net/tag/afia +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|tag|http://www.semanlink.net/tag/ludovic_denoyer +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|tag|http://www.semanlink.net/tag/unsupervised_machine_learning +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|comment|includes presentation of [Educe](/doc/2019/12/_1905_11852_educe_explaining_) +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|relatedDoc|http://www.semanlink.net/doc/2019/12/_1905_11852_educe_explaining_ +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|title|Unsupervised Learning with Text (AFIA 2019) +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|bookmarkOf|https://ia-ri.sciencesconf.org/data/aria_afia_denoyer.pdf +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|creationTime|2019-12-14T01:11:17Z +http://www.semanlink.net/doc/2019/12/unsupervised_learning_with_text|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|creationDate|2020-05-01 +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|tag|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_author|Derek Doran +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_author|Gabrielle Ras +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_author|Ning Xie +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_author|Marcel van Gerven +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|title|[2004.14545] Explainable Deep Learning: A Field Guide for the Uninitiated +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|bookmarkOf|https://arxiv.org/abs/2004.14545 +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|creationTime|2020-05-01T13:56:26Z +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_summary|"Deep neural network (DNN) is an indispensable machine learning tool for +achieving human-level performance on many learning tasks. Yet, due to its +black-box nature, it is inherently difficult to understand which aspects of the +input data drive the decisions of the network. There are various real-world +scenarios in which humans need to make actionable decisions based on the output +DNNs. Such decision support systems can be found in critical domains, such as +legislation, law enforcement, etc. It is important that the humans making +high-level decisions can be sure that the DNN decisions are driven by +combinations of data features that are appropriate in the context of the +deployment of the decision support system and that the decisions made are +legally or ethically defensible. Due to the incredible pace at which DNN +technology is being developed, the development of new methods and studies on +explaining the decision-making process of DNNs has blossomed into an active +research field. A practitioner beginning to study explainable deep learning may +be intimidated by the plethora of orthogonal directions the field is taking. +This complexity is further exacerbated by the general confusion that exists in +defining what it means to be able to explain the actions of a deep learning +system and to evaluate a system's ""ability to explain"". To alleviate this +problem, this article offers a ""field guide"" to deep learning explainability +for those uninitiated in the field. The field guide: i) Discusses the traits of +a deep learning system that researchers enhance in explainability research, ii) +places explainability in the context of other related deep learning research +areas, and iii) introduces three simple dimensions defining the space of +foundational methods that contribute to explainable deep learning. The guide is +designed as an easy-to-digest starting point for those just embarking in the +field." +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_firstAuthor|Ning Xie +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_updated|2020-04-30T02:09:02Z +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_title|Explainable Deep Learning: A Field Guide for the Uninitiated +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_published|2020-04-30T02:09:02Z +http://www.semanlink.net/doc/2020/05/2004_14545_explainable_deep_l|arxiv_num|2004.14545 +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|tag|http://www.semanlink.net/tag/graph +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|title|benedekrozemberczki/awesome-graph-classification: A collection of important graph embedding, classification and representation learning papers with implementations. +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|bookmarkOf|https://github.com/benedekrozemberczki/awesome-graph-classification +http://www.semanlink.net/doc/2019/08/benedekrozemberczki_awesome_gra|creationTime|2019-08-05T23:20:38Z +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|creationDate|2019-06-11 +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|tag|http://www.semanlink.net/tag/ml_and_physics +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|comment|> Even though neural networks enjoy widespread use, they still struggle to learn the basic laws of physics. How might we endow them with better inductive biases? In this paper, we draw inspiration from Hamiltonian mechanics to train models that learn and respect exact conservation laws in an unsupervised manner. +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|title|Hamiltonian Neural Networks +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|bookmarkOf|https://greydanus.github.io/2019/05/15/hamiltonian-nns/ +http://www.semanlink.net/doc/2019/06/hamiltonian_neural_networks|creationTime|2019-06-11T11:51:14Z +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|creationDate|2020-02-16 +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|tag|http://www.semanlink.net/tag/pangolin +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|tag|http://www.semanlink.net/tag/chine +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|comment|[#Coronavirus: Pangolin's Revenge?](https://twitter.com/hyperfp/status/1226070112651948032?s=20) +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|title|« Le pangolin tient-il sa revanche avec le nouveau coronavirus ? » +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|bookmarkOf|https://www.lemonde.fr/idees/article/2020/02/15/le-pangolin-tient-il-sa-revanche-avec-le-nouveau-coronavirus_6029645_3232.html +http://www.semanlink.net/doc/2020/02/%C2%AB_le_pangolin_tient_il_sa_revan|creationTime|2020-02-16T11:12:31Z +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|creationDate|2019-08-11 +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|tag|http://www.semanlink.net/tag/python +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|tag|http://www.semanlink.net/tag/frameworks +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|tag|http://www.semanlink.net/tag/web_dev +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|title|Web Applications & Frameworks — The Hitchhiker's Guide to Python +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|bookmarkOf|https://docs.python-guide.org/scenarios/web/ +http://www.semanlink.net/doc/2019/08/web_applications_frameworks_|creationTime|2019-08-11T23:56:22Z +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|creationDate|2020-04-04 +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|tag|http://www.semanlink.net/tag/arxiv +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|tag|http://www.semanlink.net/tag/andrej_karpathy +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|title|"Damien Henry sur Twitter : ""This code is so beautifully written, it almost hurts.""" +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|bookmarkOf|https://twitter.com/dh7net/status/1246060429681532931?s=20 +http://www.semanlink.net/doc/2020/04/damien_henry_sur_twitter_thi|creationTime|2020-04-04T11:02:50Z +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|creationDate|2019-10-24 +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|tag|http://www.semanlink.net/tag/question_answering +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|comment|Facebook AI has developed a new method that improves the performance of long-form question answering models by compressing the text that’s used to train them, turning 300,000-word sections into 10,000-word linearized knowledge graphs +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|title|Improving long-form question answering by compressing search results +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|bookmarkOf|https://ai.facebook.com/blog/research-in-brief-training-ai-to-answer-questions-using-compressed-search-results/ +http://www.semanlink.net/doc/2019/10/improving_long_form_question_an|creationTime|2019-10-24T08:24:29Z +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|creationDate|2020-05-04 +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|tag|http://www.semanlink.net/tag/critical_evaluation +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|comment|"evidence over the limitations of the evaluation of knowledge graph embedding approaches. (Paper address only structure-based embeddings) + +> extensive **experimental study with popular KGE model architectures +and training strategies across a wide range of hyperparameter settings**. We found that when trained +appropriately, the relative performance differences between various model architectures often shrunk +and sometimes even reversed when compared to prior results. This suggests that (at least currently) +**training strategies have a significant impact on model performance and may account for a substantial +fraction of the progress made in recent years**. + +[Library](https://github.com/uma-pi1/kge) + +[Tweet](doc:2020/05/peter_bloem_sur_twitter_one_) by [Peter Bloem](tag:peter_bloem)" +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|relatedDoc|http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|relatedDoc|http://www.semanlink.net/doc/2020/05/peter_bloem_sur_twitter_one_ +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|title|You CAN Teach an Old Dog New Tricks! On Training Knowledge Graph Embeddings (ICLR 2020) +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|bookmarkOf|https://openreview.net/forum?id=BkxSmlBFvr +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|creationTime|2020-05-04T21:53:32Z +http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr|mainDoc|http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|creationDate|2020-01-24 +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|tag|http://www.semanlink.net/tag/consensus +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|tag|http://www.semanlink.net/tag/taiwan +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|tag|http://www.semanlink.net/tag/social_networks +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|tag|http://www.semanlink.net/tag/democratie +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|comment|In Taiwan, a social media platform to reach political consensus on tough questions +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|title|Crossing Divides: How a social network could save democracy from deadlock - BBC News +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|bookmarkOf|https://www.bbc.com/news/amp/technology-50127713?__twitter_impression=true +http://www.semanlink.net/doc/2020/01/crossing_divides_how_a_social_|creationTime|2020-01-24T13:24:35Z +http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s|creationDate|2019-07-30 +http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s|tag|http://www.semanlink.net/tag/plastic +http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s|title|L’île Henderson, paradis noyé sous le plastique +http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s|bookmarkOf|https://www.lemonde.fr/planete/article/2019/07/30/au-milieu-du-pacifique-l-ile-henderson-paradis-noye-sous-le-plastique_5494917_3244.html +http://www.semanlink.net/doc/2019/07/l%E2%80%99ile_henderson_paradis_noye_s|creationTime|2019-07-30T15:35:17Z +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|creationDate|2020-01-07 +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/nlp_juridique +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/nlp_ibm +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/human_in_the_loop +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/nlp_in_enterprise +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/combining_knowledge_graphs +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/attention_knowledge_graphs +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|tag|http://www.semanlink.net/tag/abstract_meaning_representation +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|comment|"Reviews 4 papers by IBM research. + +Introductive remark: the specificities of search in enterprises when compared to the web: +content stored in silos with much less repetition of key information, +intricate questions expecting detailed answers, +reluctance to blackbox. +Regarding NLP: silos, incomplete data, small data, changing environment. + +-> 3 themes of research at IBM Research to improve NLP for enterprises: + +- systems that can work with small data, external knowledge and use neurosymbolic approaches to language +- explainability on how a system reached a conclusion +- scaling to allow continuous adaptation" +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|title|Advancing Natural Language Processing (NLP) for Enterprise Domains +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|bookmarkOf|https://towardsdatascience.com/advancing-natural-language-processing-nlp-for-enterprise-domains-1060052294a +http://www.semanlink.net/doc/2020/01/advancing_natural_language_proc|creationTime|2020-01-07T12:05:46Z +http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi|creationDate|2019-06-02 +http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi|tag|http://www.semanlink.net/tag/michel_serres +http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi|title|Le philosophe et académicien Michel Serres est mort +http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi|bookmarkOf|https://www.lemonde.fr/disparitions/article/2019/06/01/le-philosophe-et-academicien-michel-serres-est-mort_5470322_3382.html +http://www.semanlink.net/doc/2019/06/le_philosophe_et_academicien_mi|creationTime|2019-06-02T09:46:55Z +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|creationDate|2020-01-05 +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|tag|http://www.semanlink.net/tag/nlu +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|tag|http://www.semanlink.net/tag/online_course_materials +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|title|CS224U: Natural Language Understanding +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|bookmarkOf|http://onlinehub.stanford.edu/cs224u-natural-language-understanding +http://www.semanlink.net/doc/2020/01/cs224u_natural_language_unders|creationTime|2020-01-05T18:12:42Z +http://www.semanlink.net/doc/2019/06/papers_acl_2019|creationDate|2019-06-12 +http://www.semanlink.net/doc/2019/06/papers_acl_2019|tag|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/doc/2019/06/papers_acl_2019|title|Papers - ACL 2019 +http://www.semanlink.net/doc/2019/06/papers_acl_2019|bookmarkOf|http://www.acl2019.org/EN/program/papers.xhtml +http://www.semanlink.net/doc/2019/06/papers_acl_2019|creationTime|2019-06-12T20:32:27Z +http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview|creationDate|2019-08-23 +http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview|tag|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview|title|Graph Transformer OpenReview +http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview|bookmarkOf|https://openreview.net/forum?id=HJei-2RcK7 +http://www.semanlink.net/doc/2019/08/graph_transformer_%7C_openreview|creationTime|2019-08-23T00:43:51Z +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|creationDate|2020-03-07 +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|tag|http://www.semanlink.net/tag/aidan_hogan +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|tag|http://www.semanlink.net/tag/axel_polleres +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Lukas Schmelzeisen +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Gerard de Melo +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Aidan Hogan +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Juan Sequeda +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Steffen Staab +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Claudio Gutierrez +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Sabbir M. Rashid +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Axel Polleres +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Claudia d'Amato +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|José Emilio Labra Gayo +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Eva Blomqvist +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Antoine Zimmermann +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Anisa Rula +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Roberto Navigli +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Axel-Cyrille Ngonga Ngomo +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Michael Cochez +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Sabrina Kirrane +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_author|Sebastian Neumaier +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|comment|Draws together many topics & perspectives regarding Knowledge Graphs. 18 co-authors, lead by Aidan Hogan. (Regarding language models for embedding, they refer to [Wang et al. Knowledge Graph Embedding: A Survey of Approaches and Applications](/doc/2019/05/knowledge_graph_embedding_a_su)) +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|relatedDoc|http://www.semanlink.net/doc/2019/05/knowledge_graph_embedding_a_su +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|title|[2003.02320] Knowledge Graphs +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|bookmarkOf|https://arxiv.org/abs/2003.02320 +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|creationTime|2020-03-07T09:20:34Z +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_summary|"In this paper we provide a comprehensive introduction to knowledge graphs, +which have recently garnered significant attention from both industry and +academia in scenarios that require exploiting diverse, dynamic, large-scale +collections of data. After a general introduction, we motivate and contrast +various graph-based data models and query languages that are used for knowledge +graphs. We discuss the roles of schema, identity, and context in knowledge +graphs. We explain how knowledge can be represented and extracted using a +combination of deductive and inductive techniques. We summarise methods for the +creation, enrichment, quality assessment, refinement, and publication of +knowledge graphs. We provide an overview of prominent open knowledge graphs and +enterprise knowledge graphs, their applications, and how they use the +aforementioned techniques. We conclude with high-level future research +directions for knowledge graphs." +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_firstAuthor|Aidan Hogan +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_updated|2020-04-17T00:07:00Z +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_title|Knowledge Graphs +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_published|2020-03-04T20:20:32Z +http://www.semanlink.net/doc/2020/03/_2003_02320_knowledge_graphs|arxiv_num|2003.02320 +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|creationDate|2020-05-09 +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|tag|http://www.semanlink.net/tag/thomas_piketty +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|title|Thomas Piketty : « Après la crise, le temps de la monnaie verte » +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|bookmarkOf|https://www.lemonde.fr/idees/article/2020/05/09/apres-la-crise-le-temps-de-la-monnaie-verte_6039129_3232.html +http://www.semanlink.net/doc/2020/05/thomas_piketty_%C2%AB_apres_la_cri|creationTime|2020-05-09T15:06:14Z +http://www.semanlink.net/doc/2020/01/tech_cliqz|creationDate|2020-01-20 +http://www.semanlink.net/doc/2020/01/tech_cliqz|tag|http://www.semanlink.net/tag/cliqz +http://www.semanlink.net/doc/2020/01/tech_cliqz|title|Tech @ Cliqz +http://www.semanlink.net/doc/2020/01/tech_cliqz|bookmarkOf|https://0x65.dev/ +http://www.semanlink.net/doc/2020/01/tech_cliqz|creationTime|2020-01-20T19:15:12Z +http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases|creationDate|2020-01-07 +http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases|tag|http://www.semanlink.net/tag/graph_database +http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases|title|Three Myths of Graph Databases +http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases|bookmarkOf|https://medium.com/@steve.sarsfield/three-myths-of-graph-databases-6494a9d2be49? +http://www.semanlink.net/doc/2020/01/three_myths_of_graph_databases|creationTime|2020-01-07T13:45:07Z +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|creationDate|2019-09-02 +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|tag|http://www.semanlink.net/tag/masakhane +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|tag|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|comment|[Slides](/doc/2019/09/machine_translation_for_african) +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|relatedDoc|http://www.semanlink.net/doc/2019/09/machine_translation_for_african +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|title|"Jade Abbott sur Twitter : ""Calling all African NLPers! Goal: Let's publish a paper with NMT baselines for all African languages Slides" +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|bookmarkOf|https://twitter.com/alienelf/status/1168159616167010305 +http://www.semanlink.net/doc/2019/09/jade_abbott_sur_twitter_call|creationTime|2019-09-02T00:52:02Z +http://www.semanlink.net/doc/2019/11/cookbook_vue_js|creationDate|2019-11-19 +http://www.semanlink.net/doc/2019/11/cookbook_vue_js|tag|http://www.semanlink.net/tag/vue_js +http://www.semanlink.net/doc/2019/11/cookbook_vue_js|title|Cookbook — Vue.js +http://www.semanlink.net/doc/2019/11/cookbook_vue_js|bookmarkOf|https://vuejs.org/v2/cookbook/index.html +http://www.semanlink.net/doc/2019/11/cookbook_vue_js|creationTime|2019-11-19T11:40:36Z +http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d|creationDate|2019-06-29 +http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d|tag|http://www.semanlink.net/tag/deep_nlp +http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d|title|Lessons Learned from Applying Deep Learning for NLP Without Big Data +http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d|bookmarkOf|https://towardsdatascience.com/lessons-learned-from-applying-deep-learning-for-nlp-without-big-data-d470db4f27bf +http://www.semanlink.net/doc/2019/06/lessons_learned_from_applying_d|creationTime|2019-06-29T11:52:44Z +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|tag|http://www.semanlink.net/tag/cluster_analysis +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_author|Thomas Brendan Murphy +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_author|Michael Fop +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|title|[1707.00306] Variable Selection Methods for Model-based Clustering +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|bookmarkOf|https://arxiv.org/abs/1707.00306 +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|creationTime|2019-12-11T03:15:56Z +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_summary|"Model-based clustering is a popular approach for clustering multivariate data +which has seen applications in numerous fields. Nowadays, high-dimensional data +are more and more common and the model-based clustering approach has adapted to +deal with the increasing dimensionality. In particular, the development of +variable selection techniques has received a lot of attention and research +effort in recent years. Even for small size problems, variable selection has +been advocated to facilitate the interpretation of the clustering results. This +review provides a summary of the methods developed for variable selection in +model-based clustering. Existing R packages implementing the different methods +are indicated and illustrated in application to two data analysis examples." +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_firstAuthor|Michael Fop +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_updated|2018-06-04T07:52:56Z +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_title|Variable Selection Methods for Model-based Clustering +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_published|2017-07-02T15:29:13Z +http://www.semanlink.net/doc/2019/12/_1707_00306_variable_selection|arxiv_num|1707.00306 +http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record|creationDate|2019-11-28 +http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record|tag|http://www.semanlink.net/tag/hydrogen_cars +http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record|title|Voiture à hydrogène : un record de distance et un début de filière industrielle +http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record|bookmarkOf|https://www.lemonde.fr/economie/article/2019/11/27/voiture-a-hydrogene-un-record-de-distance-et-un-debut-de-filiere-industrielle_6020706_3234.html +http://www.semanlink.net/doc/2019/11/voiture_a_hydrogene_un_record|creationTime|2019-11-28T08:19:43Z +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|creationDate|2019-10-11 +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/nlp_ens +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/facebook_fair +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/dl_why_does_it_work +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/minimum_description_length_principle +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/overfitting +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|tag|http://www.semanlink.net/tag/occam_s_razor +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_author|Léonard Blier +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_author|Yann Ollivier +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|comment|"> Solomonoff’s general theory of inference (Solomonoff, 1964) and the [Minimum Description Length Principle](tag:minimum_description_length_principle) (Grünwald, 2007; Rissanen, 2007) formalize [Occam's razor](tag:occam_s_razor), and hold that **a good model of data is a model that is good at losslessly +compressing the data, including the cost of describing the model itself**. Deep neural +networks might seem to go against this principle given the large number of +parameters to be encoded. +We demonstrate experimentally the ability of deep neural networks to compress +the training data even when accounting for parameter encoding." +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|title|[1802.07044] The Description Length of Deep Learning Models +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|bookmarkOf|https://arxiv.org/abs/1802.07044 +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|creationTime|2019-10-11T01:59:35Z +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_summary|"Solomonoff's general theory of inference and the Minimum Description Length +principle formalize Occam's razor, and hold that a good model of data is a +model that is good at losslessly compressing the data, including the cost of +describing the model itself. Deep neural networks might seem to go against this +principle given the large number of parameters to be encoded. +We demonstrate experimentally the ability of deep neural networks to compress +the training data even when accounting for parameter encoding. The compression +viewpoint originally motivated the use of variational methods in neural +networks. Unexpectedly, we found that these variational methods provide +surprisingly poor compression bounds, despite being explicitly built to +minimize such bounds. This might explain the relatively poor practical +performance of variational methods in deep learning. On the other hand, simple +incremental encoding methods yield excellent compression values on deep +networks, vindicating Solomonoff's approach." +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_firstAuthor|Léonard Blier +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_updated|2018-11-01T11:23:09Z +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_title|The Description Length of Deep Learning Models +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_published|2018-02-20T10:15:26Z +http://www.semanlink.net/doc/2019/10/_1802_07044_the_description_le|arxiv_num|1802.07044 +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|creationDate|2019-06-28 +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|tag|http://www.semanlink.net/tag/weak_supervision +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|tag|http://www.semanlink.net/tag/snorkel +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|comment|"> a broad, high-level overview of recent weak supervision approaches, where noisier or higher-level supervision is used as a more expedient and flexible way to get supervision signal, in particular from **subject matter experts** (SMEs). + +broad definition of weak supervision as being comprised of **one or more noisy conditional distributions over unlabeled data**. + +Key practical motivation: what if a SME could spend an afternoon specifying a set of +heuristics or other resources, that–if handled properly–could effectively replace thousands of training +labels? + +Contains a good comparison of the settings in active, semi-supervised, transfer learning (and links to surveys about them) + + +" +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|title|Weak Supervision: The New Programming Paradigm for Machine Learning +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|bookmarkOf|https://hazyresearch.github.io/snorkel/blog/ws_blog_post.html +http://www.semanlink.net/doc/2019/06/weak_supervision_the_new_progr|creationTime|2019-06-28T02:45:11Z +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|tag|http://www.semanlink.net/tag/benjamin_heinzerling +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|comment|Do neural networks learn what we think they learn? @benbenhh reviews research that suggests that they often instead fall prey to the so-called Clever Hans effect and discusses its implications for NLP. +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|title|NLP's Clever Hans Moment has Arrived +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|bookmarkOf|https://thegradient.pub/nlps-clever-hans-moment-has-arrived/ +http://www.semanlink.net/doc/2020/01/nlp_s_clever_hans_moment_has_ar|creationTime|2020-01-10T16:33:27Z +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|creationDate|2019-07-17 +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|tag|http://www.semanlink.net/tag/tchernobyl +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|title|The Heroes of Chernobyl +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|bookmarkOf|https://www.youtube.com/watch?v=_A6LmTwttgI +http://www.semanlink.net/doc/2019/07/the_heroes_of_chernobyl|creationTime|2019-07-17T15:20:33Z +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|creationDate|2019-12-06 +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|tag|http://www.semanlink.net/tag/bresil +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|tag|http://www.semanlink.net/tag/gastronomie +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|title|La nouvelle scène de la cuisine brésilienne +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|bookmarkOf|https://www.lemonde.fr/m-styles/article/2019/12/06/la-nouvelle-scene-de-la-cuisine-bresilienne_6021881_4497319.html +http://www.semanlink.net/doc/2019/12/la_nouvelle_scene_de_la_cuisine|creationTime|2019-12-06T12:59:55Z +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|creationDate|2019-08-29 +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/nlp_4_africa +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/machine_translation +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/african_languages +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|title|"Sebastian Ruder sur Twitter : ""In the second part of the NLP and speech processing session @DeepIndaba, @alienelf presents her journey and work on machine translation for African languages with @LauraMartinus #DLIndaba2019""" +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|bookmarkOf|https://twitter.com/seb_ruder/status/1167072120813342720 +http://www.semanlink.net/doc/2019/08/sebastian_ruder_sur_twitter_|creationTime|2019-08-29T23:07:37Z +http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d|tag|http://www.semanlink.net/tag/subspace_clustering +http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d|title|Subspace clustering - Towards Data Science +http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d|bookmarkOf|https://towardsdatascience.com/subspace-clustering-7b884e8fff73 +http://www.semanlink.net/doc/2019/12/subspace_clustering_towards_d|creationTime|2019-12-11T03:29:25Z +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|creationDate|2020-01-05 +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|tag|http://www.semanlink.net/tag/thomas_wolf +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|tag|http://www.semanlink.net/tag/deep_learning_attention +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|tag|http://www.semanlink.net/tag/locality_sensitive_hashing +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|title|"Thomas Wolf sur Twitter : ""I liked the LSH attention in the reformer...""" +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|bookmarkOf|https://twitter.com/Thom_Wolf/status/1213592668644749314?s=20 +http://www.semanlink.net/doc/2020/01/thomas_wolf_sur_twitter_i_li|creationTime|2020-01-05T18:29:05Z +http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_|creationDate|2019-11-12 +http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_|tag|http://www.semanlink.net/tag/afrique_medievale +http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_|title|Les mille et une connexions de l'Afrique médiévale CNRS Le journal +http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_|bookmarkOf|https://lejournal.cnrs.fr/articles/les-mille-et-une-connexions-de-lafrique-medievale +http://www.semanlink.net/doc/2019/11/les_mille_et_une_connexions_de_|creationTime|2019-11-12T21:08:30Z +http://www.semanlink.net/doc/2020/01/pandoc|creationDate|2020-01-18 +http://www.semanlink.net/doc/2020/01/pandoc|tag|http://www.semanlink.net/tag/markdown +http://www.semanlink.net/doc/2020/01/pandoc|tag|http://www.semanlink.net/tag/file_convert +http://www.semanlink.net/doc/2020/01/pandoc|tag|http://www.semanlink.net/tag/markup +http://www.semanlink.net/doc/2020/01/pandoc|comment|If you need to convert files from one markup format into another, pandoc is your swiss-army knife. +http://www.semanlink.net/doc/2020/01/pandoc|title|Pandoc +http://www.semanlink.net/doc/2020/01/pandoc|bookmarkOf|https://pandoc.org/ +http://www.semanlink.net/doc/2020/01/pandoc|creationTime|2020-01-18T23:45:03Z +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|creationDate|2019-07-30 +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|tag|http://www.semanlink.net/tag/zombie +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|tag|http://www.semanlink.net/tag/foret +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|title|L’arbre zombie qui pourrait changer notre regard sur la forêt +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|bookmarkOf|https://www.lemonde.fr/big-browser/article/2019/07/29/en-nouvelle-zelande-un-arbre-mort-vivant-pourrait-changer-notre-regard-sur-la-foret_5494707_4832693.html +http://www.semanlink.net/doc/2019/07/l%E2%80%99arbre_zombie_qui_pourrait_cha|creationTime|2019-07-30T11:21:21Z +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|creationDate|2020-01-12 +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|title|pfliu-nlp/Named-Entity-Recognition-NER-Papers: An elaborate and exhaustive paper list for Named Entity Recognition (NER) +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|bookmarkOf|https://github.com/pfliu-nlp/Named-Entity-Recognition-NER-Papers +http://www.semanlink.net/doc/2020/01/pfliu_nlp_named_entity_recognit|creationTime|2020-01-12T22:29:32Z +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|creationDate|2020-04-19 +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|tag|http://www.semanlink.net/tag/boris_johnson +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|title|Coronavirus: 38 days when Britain sleepwalked into disaster News The Sunday Times +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|bookmarkOf|https://archive.is/20200418182037/https://www.thetimes.co.uk/edition/news/coronavirus-38-days-when-britain-sleepwalked-into-disaster-hq3b9tlgh +http://www.semanlink.net/doc/2020/04/coronavirus_38_days_when_brita|creationTime|2020-04-19T12:20:40Z +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|creationDate|2019-08-09 +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|tag|http://www.semanlink.net/tag/reasoning +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|tag|http://www.semanlink.net/tag/research_papers +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|title|Knowledge Graph Reasoning Papers +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|bookmarkOf|https://github.com/THU-KEG/Knowledge_Graph_Reasoning_Papers +http://www.semanlink.net/doc/2019/08/knowledge_graph_reasoning_papers|creationTime|2019-08-09T16:59:43Z +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|creationDate|2020-04-22 +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|tag|http://www.semanlink.net/tag/grounded_language_learning +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|tag|http://www.semanlink.net/tag/meaning_in_nlp +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Yonatan Bisk +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Mirella Lapata +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Angeliki Lazaridou +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Ari Holtzman +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Jacob Andreas +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Joyce Chai +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Yoshua Bengio +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Jesse Thomason +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Joseph Turian +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Nicolas Pinto +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Aleksandr Nisnevich +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_author|Jonathan May +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|title|[2004.10151] Experience Grounds Language +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|bookmarkOf|https://arxiv.org/abs/2004.10151 +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|creationTime|2020-04-22T16:52:37Z +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_summary|"Successful linguistic communication relies on a shared experience of the +world, and it is this shared experience that makes utterances meaningful. +Despite the incredible effectiveness of language processing models trained on +text alone, today's best systems still make mistakes that arise from a failure +to relate language to the physical world it describes and to the social +interactions it facilitates. +Natural Language Processing is a diverse field, and progress throughout its +development has come from new representational theories, modeling techniques, +data collection paradigms, and tasks. We posit that the present success of +representation learning approaches trained on large text corpora can be deeply +enriched from the parallel tradition of research on the contextual and social +nature of language. +In this article, we consider work on the contextual foundations of language: +grounding, embodiment, and social interaction. We describe a brief history and +possible progression of how contextual information can factor into our +representations, with an eye towards how this integration can move the field +forward and where it is currently being pioneered. We believe this framing will +serve as a roadmap for truly contextual language understanding." +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_firstAuthor|Yonatan Bisk +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_updated|2020-04-21T16:56:27Z +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_title|Experience Grounds Language +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_published|2020-04-21T16:56:27Z +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|arxiv_num|2004.10151 +http://www.semanlink.net/doc/2020/04/2004_10151_experience_grounds|type|http://www.semanlink.net/2001/00/semanlink-schema#ArxivDoc +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|creationDate|2019-06-24 +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|comment|"[paper](doc/2019/06/_1810_04882_towards_understand) ; [blog post](/doc/2019/06/when_and_why_does_king_man_) +" +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|relatedDoc|http://www.semanlink.net/doc/2019/06/when_and_why_does_king_man_ +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|title|"Kawin Ethayarajh sur Twitter : ""When and why does king - man + woman = queen?""" +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|bookmarkOf|https://twitter.com/ethayarajh/status/1142854783377690625 +http://www.semanlink.net/doc/2019/06/kawin_ethayarajh_sur_twitter_|creationTime|2019-06-24T08:31:21Z +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|creationDate|2019-10-28 +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|tag|http://www.semanlink.net/tag/bob_ducharme +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|tag|http://www.semanlink.net/tag/flair +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|tag|http://www.semanlink.net/tag/document_embeddings +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|title|Document analysis with machine learning +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|bookmarkOf|http://www.bobdc.com/blog/docembeddings/ +http://www.semanlink.net/doc/2019/10/document_analysis_with_machine_|creationTime|2019-10-28T11:44:53Z +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|creationDate|2019-07-15 +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|title|Knowledge Graphs: Technical Review +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|bookmarkOf|https://persagen.com/resources/biokdd-review-knowledge_graphs.html +http://www.semanlink.net/doc/2019/07/knowledge_graphs_technical_rev|creationTime|2019-07-15T23:10:28Z +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|creationDate|2019-10-11 +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|tag|http://www.semanlink.net/tag/how_much_information_in_a_language +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|comment|"[Comments by Denny Vrandečić](http://simia.net/wiki/How_much_information_is_in_a_language%3F). Refers to this paper: [""Information theory and neural coding""](http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c)" +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|relatedDoc|http://simia.net/wiki/How_much_information_is_in_a_language%3F +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|relatedDoc|http://www.semanlink.net/doc/2019/10/information_theory_and_neural_c +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|title|Humans store about 1.5 megabytes of information during language acquisition Royal Society Open Science +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|bookmarkOf|https://royalsocietypublishing.org/doi/10.1098/rsos.181393 +http://www.semanlink.net/doc/2019/10/humans_store_about_1_5_megabyte_1|creationTime|2019-10-11T00:44:45Z +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|creationDate|2020-02-09 +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|tag|http://www.semanlink.net/tag/antibiotic_resistance +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|tag|http://www.semanlink.net/tag/inde +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|comment|> Plus de 90 % de nos antibiotiques sortent des usines chinoises ou indiennes, dont une partie des effluents finissent dans l’environnement, créant des foyers d’antibiorésistance +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|title|Enquête sur les usines d’antibiotiques indiennes, fabriques d’antibiorésistance (2018) +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|bookmarkOf|https://www.lemonde.fr/sciences/article/2018/12/10/les-usines-d-antibiotiques-indiennes-sont-des-fabriques-d-antibioresistance_5395476_1650684.html +http://www.semanlink.net/doc/2020/02/enquete_sur_les_usines_d%E2%80%99antibi|creationTime|2020-02-09T12:28:01Z +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|tag|http://www.semanlink.net/tag/new_york +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|tag|http://www.semanlink.net/tag/election +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|title|New York City election results: Ranked-choice voting ballot initiative passes +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|bookmarkOf|https://www.vox.com/policy-and-politics/2019/11/5/20948376/new-york-election-results-ranked-choice-voting?utm_campaign=vox.social&utm_content=voxdotcom&utm_medium=social&utm_source=twitter&utm_content=1573007655 +http://www.semanlink.net/doc/2019/11/new_york_city_election_results_|creationTime|2019-11-09T15:37:25Z +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|creationDate|2020-03-08 +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Haotang Deng +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Zhiruo Wang +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Ping Wang +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Qi Ju +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Weijie Liu +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Zhe Zhao +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_author|Peng Zhou +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|comment|a knowledge-enabled language representation model (K-BERT) with knowledge graphs (KGs), in which triples are injected into the sentences as domain knowledge +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|title|[1909.07606] K-BERT: Enabling Language Representation with Knowledge Graph +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|bookmarkOf|https://arxiv.org/abs/1909.07606 +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|creationTime|2020-03-08T22:54:15Z +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_summary|"Pre-trained language representation models, such as BERT, capture a general +language representation from large-scale corpora, but lack domain-specific +knowledge. When reading a domain text, experts make inferences with relevant +knowledge. For machines to achieve this capability, we propose a +knowledge-enabled language representation model (K-BERT) with knowledge graphs +(KGs), in which triples are injected into the sentences as domain knowledge. +However, too much knowledge incorporation may divert the sentence from its +correct meaning, which is called knowledge noise (KN) issue. To overcome KN, +K-BERT introduces soft-position and visible matrix to limit the impact of +knowledge. K-BERT can easily inject domain knowledge into the models by +equipped with a KG without pre-training by-self because it is capable of +loading model parameters from the pre-trained BERT. Our investigation reveals +promising results in twelve NLP tasks. Especially in domain-specific tasks +(including finance, law, and medicine), K-BERT significantly outperforms BERT, +which demonstrates that K-BERT is an excellent choice for solving the +knowledge-driven problems that require experts." +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_firstAuthor|Weijie Liu +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_updated|2019-09-17T06:16:04Z +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_title|K-BERT: Enabling Language Representation with Knowledge Graph +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_published|2019-09-17T06:16:04Z +http://www.semanlink.net/doc/2020/03/_1909_07606_k_bert_enabling_l|arxiv_num|1909.07606 +http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_|creationDate|2019-10-07 +http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_|tag|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_|title|Who says using RDF is hard? +http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_|bookmarkOf|https://www.rubensworks.net/blog/2019/10/06/using-rdf-in-javascript/ +http://www.semanlink.net/doc/2019/10/who_says_using_rdf_is_hard_|creationTime|2019-10-07T16:18:03Z +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|creationDate|2020-01-15 +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|tag|http://www.semanlink.net/tag/out_of_distribution_detection +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_author|Chandramouli Shama Sastry +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_author|Sageev Oore +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|comment|"> we propose to detect OOD examples by identifying inconsistencies between activity patterns and class predicted... +> Unlike many approaches, this can be used with any pre-trained softmax classifier and does not require access to OOD data" +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|title|[1912.12510] Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|bookmarkOf|https://arxiv.org/abs/1912.12510 +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|creationTime|2020-01-15T13:04:14Z +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_summary|"When presented with Out-of-Distribution (OOD) examples, deep neural networks +yield confident, incorrect predictions. Detecting OOD examples is challenging, +and the potential risks are high. In this paper, we propose to detect OOD +examples by identifying inconsistencies between activity patterns and class +predicted. We find that characterizing activity patterns by Gram matrices and +identifying anomalies in gram matrix values can yield high OOD detection rates. +We identify anomalies in the gram matrices by simply comparing each value with +its respective range observed over the training data. Unlike many approaches, +this can be used with any pre-trained softmax classifier and does not require +access to OOD data for fine-tuning hyperparameters, nor does it require OOD +access for inferring parameters. The method is applicable across a variety of +architectures and vision datasets and, for the important and surprisingly hard +task of detecting far-from-distribution out-of-distribution examples, it +generally performs better than or equal to state-of-the-art OOD detection +methods (including those that do assume access to OOD examples)." +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_firstAuthor|Chandramouli Shama Sastry +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_updated|2020-01-09T15:17:55Z +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_title|Detecting Out-of-Distribution Examples with In-distribution Examples and Gram Matrices +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_published|2019-12-28T19:44:03Z +http://www.semanlink.net/doc/2020/01/_1912_12510_detecting_out_of_d|arxiv_num|1912.12510 +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|creationDate|2020-04-15 +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|tag|http://www.semanlink.net/tag/computational_universe +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|tag|http://www.semanlink.net/tag/wolfram +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|title|Finally We May Have a Path to the Fundamental Theory of Physics… and It’s Beautiful—Stephen Wolfram Writings +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|bookmarkOf|https://writings.stephenwolfram.com/2020/04/finally-we-may-have-a-path-to-the-fundamental-theory-of-physics-and-its-beautiful/ +http://www.semanlink.net/doc/2020/04/finally_we_may_have_a_path_to_t|creationTime|2020-04-15T00:50:04Z +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|creationDate|2019-10-21 +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|tag|http://www.semanlink.net/tag/nlp_stanford +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|tag|http://www.semanlink.net/tag/open_domain_question_answering +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|title|Answering Complex Open-domain Questions at Scale SAIL Blog +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|bookmarkOf|http://ai.stanford.edu/blog/answering-complex-questions/ +http://www.semanlink.net/doc/2019/10/answering_complex_open_domain_q|creationTime|2019-10-21T22:45:17Z +http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter|creationDate|2019-07-31 +http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter|tag|http://www.semanlink.net/tag/internet +http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter|title|Chinese vlogger who used filter to look younger caught in live-stream glitch - BBC News +http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter|bookmarkOf|https://www.bbc.com/news/blogs-trending-49151042 +http://www.semanlink.net/doc/2019/07/chinese_vlogger_who_used_filter|creationTime|2019-07-31T00:33:14Z +http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas|creationDate|2019-11-15 +http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas|tag|http://www.semanlink.net/tag/jean_rouch +http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas|title|Jean Rouch, l’ethnologue-cinéaste CNRS Le journal +http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas|bookmarkOf|https://lejournal.cnrs.fr/infographies/jean-rouch-lethnologue-cineaste +http://www.semanlink.net/doc/2019/11/jean_rouch_l%E2%80%99ethnologue_cineas|creationTime|2019-11-15T11:36:22Z +http://www.semanlink.net/doc/2020/03/google_and_http|creationDate|2020-03-08 +http://www.semanlink.net/doc/2020/03/google_and_http|tag|http://www.semanlink.net/tag/google +http://www.semanlink.net/doc/2020/03/google_and_http|tag|http://www.semanlink.net/tag/http +http://www.semanlink.net/doc/2020/03/google_and_http|tag|http://www.semanlink.net/tag/abuse_of_power +http://www.semanlink.net/doc/2020/03/google_and_http|tag|http://www.semanlink.net/tag/dave_winer +http://www.semanlink.net/doc/2020/03/google_and_http|comment|> Google’s not secure message means this: “Google tried to take control of the open web and this site said no.” +http://www.semanlink.net/doc/2020/03/google_and_http|title|Google and HTTP +http://www.semanlink.net/doc/2020/03/google_and_http|bookmarkOf|http://this.how/googleAndHttp/ +http://www.semanlink.net/doc/2020/03/google_and_http|creationTime|2020-03-08T22:48:47Z +http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc|creationDate|2019-12-11 +http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc|title|Large Memory Layers with Product Keys (poster) +http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc|bookmarkOf|https://twitter.com/GuillaumeLample/status/1204539826437021702/photo/1 +http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc|creationTime|2019-12-11T16:47:48Z +http://www.semanlink.net/doc/2019/12/large_memory_layers_with_produc|mainDoc|http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|creationDate|2020-03-09 +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|tag|http://www.semanlink.net/tag/antiquite_africaine +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|tag|http://www.semanlink.net/tag/gabon +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|title|Au Gabon, une grotte pourrait révéler des secrets vieux de 700 ans +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/03/09/au-gabon-une-grotte-pourrait-reveler-des-secrets-vieux-de-700-ans_6032355_3212.html +http://www.semanlink.net/doc/2020/03/au_gabon_une_grotte_pourrait_r|creationTime|2020-03-09T17:21:00Z +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|creationDate|2020-03-08 +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|tag|http://www.semanlink.net/tag/music_source_separation +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|title|One-track minds: Using AI for music source separation +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|bookmarkOf|https://tech.fb.com/one-track-minds-using-ai-for-music-source-separation/ +http://www.semanlink.net/doc/2020/03/one_track_minds_using_ai_for_m|creationTime|2020-03-08T12:11:37Z +http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne|creationDate|2019-12-21 +http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne|title|Machine Learning on Graphs @ NeurIPS 2019 - ML Review - Medium +http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne|bookmarkOf|https://medium.com/mlreview/machine-learning-on-graphs-neurips-2019-875eecd41069 +http://www.semanlink.net/doc/2019/12/machine_learning_on_graphs_ne|creationTime|2019-12-21T01:10:52Z +http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central|creationDate|2020-02-26 +http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central|tag|http://www.semanlink.net/tag/industrie_nucleaire +http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central|title|Nucléaire : pourquoi la centrale de Flamanville ne produit plus d’électricité depuis six mois +http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central|bookmarkOf|https://www.lemonde.fr/economie/article/2020/02/26/nucleaire-pourquoi-la-centrale-de-flamanville-ne-produit-plus-d-electricite-depuis-six-mois_6030944_3234.html +http://www.semanlink.net/doc/2020/02/nucleaire_pourquoi_la_central|creationTime|2020-02-26T20:42:38Z +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|creationDate|2019-07-04 +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|tag|http://www.semanlink.net/tag/nlp_4_requirements_engineering +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|tag|http://www.semanlink.net/tag/workshop +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|comment|First Workshop on Natural Language Processing for Requirements Engineering (NLP4RE 18) +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|title|Natural Language Processing for Requirements Engineering: The Best Is Yet to Come +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|bookmarkOf|https://www.researchgate.net/publication/327926245_Natural_Language_Processing_for_Requirements_Engineering_The_Best_Is_Yet_to_Come +http://www.semanlink.net/doc/2019/07/natural_language_processing_for|creationTime|2019-07-04T23:53:46Z +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|creationDate|2020-04-09 +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|tag|http://www.semanlink.net/tag/rdflib +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|comment|[doc](https://rdflib.readthedocs.io/en/stable/) +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|title|RDFLib/rdflib: a Python library for working with RDF +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|bookmarkOf|https://github.com/RDFLib/rdflib +http://www.semanlink.net/doc/2020/04/rdflib_rdflib_a_python_library|creationTime|2020-04-09T01:56:54Z +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|creationDate|2020-04-29 +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|tag|http://www.semanlink.net/tag/text_aware_kg_embedding +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|comment|"> survey of the KG embedding +models which consider the structured information of the graph as well as +the unstructured information in form of literals such as text, numerical +values etc + +A le mérite de poser la question de l'utilisation de littéraux dans les embeddings de KG + +[Newer and longer version](/doc/2020/05/1910_12507_a_survey_on_knowle)" +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|relatedDoc|http://www.semanlink.net/doc/2020/05/1910_12507_a_survey_on_knowle +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|title|A Comprehensive Survey of Knowledge Graph Embeddings with Literals: Techniques and Applications +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|bookmarkOf|http://ceur-ws.org/Vol-2377/paper_4.pdf +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|creationTime|2020-04-29T14:09:42Z +http://www.semanlink.net/doc/2020/04/a_comprehensive_survey_of_knowl|mainDoc|http://www.semanlink.net/doc/2020/04/ceur_ws_org_vol_2377_workshop +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|creationDate|2019-10-24 +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|tag|http://www.semanlink.net/tag/postman +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|title|A Postman Collection for Training IBM Watson Speech to Text +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|bookmarkOf|https://medium.com/@petertuton/a-postman-collection-for-training-ibm-watson-speech-to-text-dfdda0c424f0 +http://www.semanlink.net/doc/2019/10/a_postman_collection_for_traini|creationTime|2019-10-24T23:37:39Z +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|creationDate|2020-01-17 +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|tag|http://www.semanlink.net/tag/neural_machine_translation +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|tag|http://www.semanlink.net/tag/open_source +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|title|GitHub - OpenNMT/OpenNMT-py: Open Source Neural Machine Translation in PyTorch +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|bookmarkOf|https://github.com/OpenNMT/OpenNMT-py#features +http://www.semanlink.net/doc/2020/01/github_opennmt_opennmt_py_op|creationTime|2020-01-17T12:57:35Z +http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest|creationDate|2019-07-03 +http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest|title|Is That a Duplicate Quora Question? LinkedIn +http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest|bookmarkOf|https://www.linkedin.com/pulse/duplicate-quora-question-abhishek-thakur/ +http://www.semanlink.net/doc/2019/07/is_that_a_duplicate_quora_quest|creationTime|2019-07-03T01:33:30Z +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|creationDate|2020-05-05 +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|tag|http://www.semanlink.net/tag/phd_thesis +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|tag|http://www.semanlink.net/tag/graph_neural_networks +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|tag|http://www.semanlink.net/tag/graph_embeddings +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|tag|http://www.semanlink.net/tag/graph_convolutional_networks +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|comment|Covers a range of emerging topics in Deep Learning: from graph neural nets (and graph convolutions) to structure discovery (objects, relations, events) +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|title|"Thomas Kipf's PhD thesis: ""Deep Learning with Graph-Structured Representations""" +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|bookmarkOf|https://dare.uva.nl/search?identifier=1b63b965-24c4-4bcd-aabb-b849056fa76d +http://www.semanlink.net/doc/2020/05/phd_thesis_deep_learning_with_|creationTime|2020-05-05T15:47:55Z +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|creationDate|2020-01-05 +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|title|NLP Year in Review — 2019 - dair.ai - Medium +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|bookmarkOf|https://medium.com/dair-ai/nlp-year-in-review-2019-fb8d523bcb19 +http://www.semanlink.net/doc/2020/01/nlp_year_in_review_2019_dai|creationTime|2020-01-05T17:37:18Z +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|creationDate|2019-07-03 +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|tag|http://www.semanlink.net/tag/mike_bergman +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|comment|"> “Graph-based knowledge representation has been researched for decades and the term knowledge graph does not constitute a new technology. Rather, it is a buzzword reinvented by Google and adopted by other companies and academia to describe different knowledge representation applications.” + +> ...knowledge graphs, like ontologies, have a broad range of applications & constructions. ...it is less important to reflect on some precise understanding as to realize that human language and knowledge is being presented in a connected, graph form" +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|title|A Common Sense View of Knowledge Graphs AI3:::Adaptive Information +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|bookmarkOf|http://www.mkbergman.com/2244/a-common-sense-view-of-knowledge-graphs/ +http://www.semanlink.net/doc/2019/07/a_common_sense_view_of_knowledg|creationTime|2019-07-03T08:30:02Z +http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d|creationDate|2020-04-10 +http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d|title|Barbara Stiegler : « La crise due au coronavirus reflète la vision néolibérale de la santé publique » +http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d|bookmarkOf|https://www.lemonde.fr/idees/article/2020/04/09/barbara-stiegler-la-crise-due-au-coronavirus-reflete-la-vision-neoliberale-de-la-sante-publique_6036059_3232.html +http://www.semanlink.net/doc/2020/04/barbara_stiegler_%C2%AB_la_crise_d|creationTime|2020-04-10T15:27:25Z +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|tag|http://www.semanlink.net/tag/deletefb +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|tag|http://www.semanlink.net/tag/election +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|title|"Facebook paid Teen Vogue to run a fake article praising Facebook for ""helping ensure the integrity of the 2020 election"" / Boing Boing" +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|bookmarkOf|https://boingboing.net/2020/01/09/facebook-paid-teen-vogue-to-ru.html +http://www.semanlink.net/doc/2020/01/facebook_paid_teen_vogue_to_run|creationTime|2020-01-09T14:05:23Z +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|creationDate|2020-04-17 +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|tag|http://www.semanlink.net/tag/roam +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|tag|http://www.semanlink.net/tag/semanlink_related +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|comment|> TLDR - it's excel for text + a graph database for your ideas +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|title|Roam Research – A note taking tool for networked thought. +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|bookmarkOf|https://roamresearch.com/ +http://www.semanlink.net/doc/2020/04/roam_research_a_note_taking_t|creationTime|2020-04-17T13:54:47Z +http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie|creationDate|2020-04-06 +http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie|title|« Les cas de Covid se multiplient. Ça tombe, ça tombe. Jusqu’où ? » : la course à la vie d’une réanimatrice +http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie|bookmarkOf|https://www.lemonde.fr/sante/article/2020/04/06/les-cas-de-covid-se-multiplient-ca-tombe-ca-tombe-jusqu-ou-la-course-a-la-vie-d-une-reanimatrice_6035677_1651302.html +http://www.semanlink.net/doc/2020/04/%C2%AB_les_cas_de_covid_se_multiplie|creationTime|2020-04-06T14:29:57Z +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|creationDate|2019-09-16 +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|tag|http://www.semanlink.net/tag/information_bottleneck_method +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|tag|http://www.semanlink.net/tag/information_theory_and_deep_learning +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|comment|Blog post about [this paper](http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol) +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|relatedDoc|http://www.semanlink.net/doc/2019/09/_1909_01380_the_bottom_up_evol +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|title|Evolution of Representations in the Transformer (2019) +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|bookmarkOf|https://lena-voita.github.io/posts/emnlp19_evolution.html +http://www.semanlink.net/doc/2019/09/evolution_of_representations_in|creationTime|2019-09-16T22:02:56Z +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|creationDate|2019-10-18 +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|tag|http://www.semanlink.net/tag/deep_learning +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|tag|http://www.semanlink.net/tag/google_deepmind +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|tag|http://www.semanlink.net/tag/ecriture +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|tag|http://www.semanlink.net/tag/grece_antique +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|title|Restoring ancient text using deep learning: a case study on Greek epigraphy DeepMind +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|bookmarkOf|https://deepmind.com/research/publications/Restoring-ancient-text-using-deep-learning-a-case-study-on-Greek-epigraphy +http://www.semanlink.net/doc/2019/10/restoring_ancient_text_using_de|creationTime|2019-10-18T00:50:20Z +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|creationDate|2020-04-25 +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|comment|Keras implementation of the algorithm presented in this DeepMind's [paper](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.03748) +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|relatedDoc|https://arxiv.org/abs/1807.03748 +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|title|Contrastive Predictive Coding +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|bookmarkOf|https://awesomeopensource.com/project/davidtellez/contrastive-predictive-coding +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|creationTime|2020-04-25T17:40:06Z +http://www.semanlink.net/doc/2020/04/contrastive_predictive_coding|mainDoc|https://arxiv.org/abs/1807.03748 +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|creationDate|2020-05-03 +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|tag|http://www.semanlink.net/tag/kbpedia +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|comment|Cognonto is an IT consulting boutique specializing in semantic technologies, knowledge graphs, and knowledge-based artificial intelligence. Developers of [KBpedia](https://kbpedia.org). +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|title|Cognonto - Data. Structure. Meaning. +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|bookmarkOf|https://cognonto.com/ +http://www.semanlink.net/doc/2020/05/cognonto_data_structure_mea|creationTime|2020-05-03T01:02:30Z +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|creationDate|2020-05-03 +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|tag|http://www.semanlink.net/tag/automatic_summarization +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|tag|http://www.semanlink.net/tag/allen_institute_for_ai_a2i +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|comment|"> a new automatic summarization task +with high source compression requiring expert background knowledge and complex language understanding" +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|title|"Isabel Cachola sur Twitter : ""TLDR: Extreme Summarization of Scientific Documents""" +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|bookmarkOf|https://twitter.com/isabelcachola/status/1256362038957228032?s=20 +http://www.semanlink.net/doc/2020/05/isabel_cachola_sur_twitter_t|creationTime|2020-05-03T10:51:15Z +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|creationDate|2020-01-09 +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|comment|Same author: [NER with Lime](/doc/2020/01/interpretable_named_entity_reco) +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|relatedDoc|http://www.semanlink.net/doc/2020/01/interpretable_named_entity_reco +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|title|Named Entity Recognition with Bert – Depends on the definition +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|bookmarkOf|https://www.depends-on-the-definition.com/named-entity-recognition-with-bert/ +http://www.semanlink.net/doc/2020/01/named_entity_recognition_with_b|creationTime|2020-01-09T02:01:52Z +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|creationDate|2019-06-28 +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|tag|http://www.semanlink.net/tag/one_shot_generalization +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|tag|http://www.semanlink.net/tag/keras +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|comment|"the network is learning a **similarity function**, which takes two images as input and expresses how similar they are. + +> Assume that we want to build face recognition system for a small organization with only 10 employees..." +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|title|One Shot Learning with Siamese Networks using Keras +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|bookmarkOf|https://towardsdatascience.com/one-shot-learning-with-siamese-networks-using-keras-17f34e75bb3d +http://www.semanlink.net/doc/2019/06/one_shot_learning_with_siamese_|creationTime|2019-06-28T19:00:27Z +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|creationDate|2019-11-15 +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|tag|http://www.semanlink.net/tag/horizontal_gene_transfer +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|tag|http://www.semanlink.net/tag/histoire_de_la_vie +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|title|Pour conquérir les terres, les plantes ont emprunté des gènes aux bactéries CNRS +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|bookmarkOf|http://www.cnrs.fr/fr/pour-conquerir-les-terres-les-plantes-ont-emprunte-des-genes-aux-bacteries +http://www.semanlink.net/doc/2019/11/pour_conquerir_les_terres_les_|creationTime|2019-11-15T13:42:33Z +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|creationDate|2019-08-10 +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|tag|http://www.semanlink.net/tag/orne +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|tag|http://www.semanlink.net/tag/paleontologie +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|tag|http://www.semanlink.net/tag/cjnn +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|title|Géologie - Normandie - Massif d'Ecouves (NS) - Pleurodictyum constantinopolitatum +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|bookmarkOf|http://geologie.discip.ac-caen.fr/paleozoi/EcouvesNS/devonien.html +http://www.semanlink.net/doc/2019/08/geologie_normandie_massif_d|creationTime|2019-08-10T13:13:33Z +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|creationDate|2020-01-06 +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|tag|http://www.semanlink.net/tag/sbert +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|comment|"Simplistic (and often used) methods for sentence embeddings with BERT are too simplistic to be good (avearaging the word vectors, or using the \[CLS\] special vector (start of sequence). + +[About this paper](/doc/2019/08/_1908_10084_sentence_bert_sen)" +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|relatedDoc|http://www.semanlink.net/doc/2019/08/_1908_10084_sentence_bert_sen +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|title|Richer Sentence Embeddings using Sentence-BERT — Part I +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|bookmarkOf|https://medium.com/genei-technology/richer-sentence-embeddings-using-sentence-bert-part-i-ce1d9e0b1343 +http://www.semanlink.net/doc/2020/01/richer_sentence_embeddings_usin|creationTime|2020-01-06T01:48:12Z +http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha|creationDate|2020-05-10 +http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha|tag|http://www.semanlink.net/tag/lord_of_the_flies +http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha|title|The real Lord of the Flies: what happened when six boys were shipwrecked for 15 months The Guardian +http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha|bookmarkOf|https://www.theguardian.com/books/2020/may/09/the-real-lord-of-the-flies-what-happened-when-six-boys-were-shipwrecked-for-15-months +http://www.semanlink.net/doc/2020/05/the_real_lord_of_the_flies_wha|creationTime|2020-05-10T16:58:42Z +http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp|tag|http://www.semanlink.net/tag/emnlp_2019 +http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp|title|Highlights from CoNLL and EMNLP 2019 +http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp|bookmarkOf|https://arun.chagantys.org/technical/2019/12/03/emnlp-2019.html +http://www.semanlink.net/doc/2019/12/highlights_from_conll_and_emnlp|creationTime|2019-12-07T11:20:22Z +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|creationDate|2020-01-25 +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|tag|http://www.semanlink.net/tag/ml_google +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|tag|http://www.semanlink.net/tag/face_recognition +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_author|Dmitry Kalenichenko +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_author|Florian Schroff +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_author|James Philbin +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|comment|"Learns a Euclidean embedding per image + +> Uses a deep CNN trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. + +> state-of-the-art face recognition performance using only **128-bytes per face**. + +" +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|title|[1503.03832] FaceNet: A Unified Embedding for Face Recognition and Clustering +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|bookmarkOf|https://arxiv.org/abs/1503.03832 +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|creationTime|2020-01-25T01:03:31Z +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_summary|"Despite significant recent advances in the field of face recognition, +implementing face verification and recognition efficiently at scale presents +serious challenges to current approaches. In this paper we present a system, +called FaceNet, that directly learns a mapping from face images to a compact +Euclidean space where distances directly correspond to a measure of face +similarity. Once this space has been produced, tasks such as face recognition, +verification and clustering can be easily implemented using standard techniques +with FaceNet embeddings as feature vectors. +Our method uses a deep convolutional network trained to directly optimize the +embedding itself, rather than an intermediate bottleneck layer as in previous +deep learning approaches. To train, we use triplets of roughly aligned matching +/ non-matching face patches generated using a novel online triplet mining +method. The benefit of our approach is much greater representational +efficiency: we achieve state-of-the-art face recognition performance using only +128-bytes per face. +On the widely used Labeled Faces in the Wild (LFW) dataset, our system +achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves +95.12%. Our system cuts the error rate in comparison to the best published +result by 30% on both datasets. +We also introduce the concept of harmonic embeddings, and a harmonic triplet +loss, which describe different versions of face embeddings (produced by +different networks) that are compatible to each other and allow for direct +comparison between each other." +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_firstAuthor|Florian Schroff +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_updated|2015-06-17T23:35:47Z +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_title|FaceNet: A Unified Embedding for Face Recognition and Clustering +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_published|2015-03-12T18:10:53Z +http://www.semanlink.net/doc/2020/01/_1503_03832_facenet_a_unified|arxiv_num|1503.03832 +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|creationDate|2019-07-19 +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|tag|http://www.semanlink.net/tag/finlande +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|tag|http://www.semanlink.net/tag/mots_expressions_remarquables +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|comment|"The feeling when you are going to get drunk home alone in your underwear – with no intention of going out. + +" +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|title|Kalsarikännit - thisisFINLAND +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|bookmarkOf|https://finland.fi/emoji/kalsarikannit/ +http://www.semanlink.net/doc/2019/07/kalsarikannit_thisisfinland|creationTime|2019-07-19T21:10:35Z +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|tag|http://www.semanlink.net/tag/nlp_automotive +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|tag|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|title|Natural Language Processing – Current Applications and Future Possibilities +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|bookmarkOf|https://emerj.com/partner-content/nlp-current-applications-and-future-possibilities/ +http://www.semanlink.net/doc/2019/12/natural_language_processing_c|creationTime|2019-12-07T16:47:28Z +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|creationDate|2019-07-27 +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|tag|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|tag|http://www.semanlink.net/tag/rwanda +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|tag|http://www.semanlink.net/tag/kigali +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|title|Balade dans Kigali, ville-ruche qui se rêve en « Singapour africain » +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|bookmarkOf|https://www.lemonde.fr/afrique/article/2019/07/26/balade-dans-kigali-ville-ruche-qui-se-reve-en-singapour-africain_5493909_3212.html +http://www.semanlink.net/doc/2019/07/balade_dans_kigali_ville_ruche|creationTime|2019-07-27T08:41:22Z +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|creationDate|2020-05-04 +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|tag|http://www.semanlink.net/tag/github_project +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|tag|http://www.semanlink.net/tag/reproducible_research +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|comment|PyTorch-based library for training, evaluation, and hyperparameter optimization of knowledge graph embeddings (KGE). Cf. ICLR Paper: [You CAN Teach an Old Dog New Tricks! On Training Knowledge Graph Embeddings](/doc/2020/05/you_can_teach_an_old_dog_new_tr) +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|relatedDoc|http://www.semanlink.net/doc/2020/05/you_can_teach_an_old_dog_new_tr +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|title|A knowledge graph embedding library for reproducible research +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|bookmarkOf|https://github.com/uma-pi1/kge +http://www.semanlink.net/doc/2020/05/a_knowledge_graph_embedding_lib|creationTime|2020-05-04T21:54:06Z +http://www.semanlink.net/doc/2019/09/how_complex_systems_fail|creationDate|2019-09-02 +http://www.semanlink.net/doc/2019/09/how_complex_systems_fail|tag|http://www.semanlink.net/tag/complexite +http://www.semanlink.net/doc/2019/09/how_complex_systems_fail|title|How Complex Systems Fail +http://www.semanlink.net/doc/2019/09/how_complex_systems_fail|bookmarkOf|https://web.mit.edu/2.75/resources/random/How%20Complex%20Systems%20Fail.pdf +http://www.semanlink.net/doc/2019/09/how_complex_systems_fail|creationTime|2019-09-02T00:48:49Z +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|creationDate|2019-07-05 +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|tag|http://www.semanlink.net/tag/ogm +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|tag|http://www.semanlink.net/tag/moustique +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|tag|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|title|Au Burkina, un premier lâcher de moustiques génétiquement modifiés crée la polémique +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|bookmarkOf|https://www.lemonde.fr/afrique/article/2019/07/04/au-burkina-un-premier-lacher-de-moustiques-genetiquement-modifies-cree-la-polemique_5485432_3212.html +http://www.semanlink.net/doc/2019/07/au_burkina_un_premier_lacher_d|creationTime|2019-07-05T22:42:29Z +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|creationDate|2019-07-30 +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|tag|http://www.semanlink.net/tag/nlp_conference +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|title|NAACL 2019 Highlights +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|bookmarkOf|http://ruder.io/naacl2019/ +http://www.semanlink.net/doc/2019/07/naacl_2019_highlights|creationTime|2019-07-30T15:52:24Z +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|creationDate|2019-06-29 +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|tag|http://www.semanlink.net/tag/audio_classification +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|tag|http://www.semanlink.net/tag/transfer_learning +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|title|Audio classification using transfer learning approach – mc.ai +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|bookmarkOf|https://mc.ai/audio-classification-using-transfer-learning-approach/ +http://www.semanlink.net/doc/2019/06/audio_classification_using_tran|creationTime|2019-06-29T10:17:42Z +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|creationDate|2020-01-21 +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|tag|http://www.semanlink.net/tag/github_pages +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|tag|http://www.semanlink.net/tag/blog +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|title|Your own blog with GitHub Pages and fast_template (4 part tutorial) · fast.ai +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|bookmarkOf|https://www.fast.ai/2020/01/20/blog_overview/ +http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages|creationTime|2020-01-21T12:18:58Z +http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_|creationDate|2020-03-12 +http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_|tag|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_|title|"Adrian Gschwend sur Twitter : ""getting started with RDF and JavaScript!...""" +http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_|bookmarkOf|https://twitter.com/linkedktk/status/1238031736522620928?s=20 +http://www.semanlink.net/doc/2020/03/adrian_gschwend_sur_twitter_|creationTime|2020-03-12T12:38:34Z +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|creationDate|2020-02-05 +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|tag|http://www.semanlink.net/tag/sparse_matrix +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|comment|"> are all the dimensions of my input +vector interacting with all the others? Usually not. So +going sparse maybe useful. + +> Convolutional layers are a smart and efficient way to +implement a sparse transformation on an input tensor... Some +other networks contain much larger matrices that may +benefit from sparsity: Transformers + +But + +> It’s hard to +implement general sparse matrice computations on +GPUs in an efficient way... Easier if the +matrices non-zeros grouped in small +fixed-size blocks +" +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|title|Is the future of Neural Networks Sparse? An Introduction +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|bookmarkOf|https://medium.com/huggingface/is-the-future-of-neural-networks-sparse-an-introduction-1-n-d03923ecbd70 +http://www.semanlink.net/doc/2020/02/is_the_future_of_neural_network|creationTime|2020-02-05T00:33:11Z +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|creationDate|2019-08-23 +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|tag|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|tag|http://www.semanlink.net/tag/knowledge_graph_deep_learning +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|title|Neural Knowledge Acquisition via Mutual Attention between Knowledge Graph and Text (2018) +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|bookmarkOf|http://nlp.csai.tsinghua.edu.cn/~lzy/publications/aaai2018_jointnre.pdf +http://www.semanlink.net/doc/2019/08/neural_knowledge_acquisition_vi|creationTime|2019-08-23T00:28:34Z +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|creationDate|2020-02-16 +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|tag|http://www.semanlink.net/tag/language_model +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|tag|http://www.semanlink.net/tag/hugging_face +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|tag|http://www.semanlink.net/tag/attention_is_all_you_need +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|title|Hugging Face: How to train a new language model from scratch using Transformers and Tokenizers +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|bookmarkOf|https://huggingface.co/blog/how-to-train +http://www.semanlink.net/doc/2020/02/hugging_face_how_to_train_a_ne|creationTime|2020-02-16T13:39:46Z +http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p|creationDate|2019-07-25 +http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p|title|Nominations for ACL 2019 Best Paper Awards - ACL 2019 +http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p|bookmarkOf|http://www.acl2019.org/EN/nominations-for-acl-2019-best-paper-awards.xhtml +http://www.semanlink.net/doc/2019/07/nominations_for_acl_2019_best_p|creationTime|2019-07-25T10:48:34Z +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|creationDate|2019-07-24 +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|title|BERT's success in some benchmarks tests may be simply due to the exploitation of spurious statistical cues in the dataset. Without them it is no better then random. : MachineLearning +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|bookmarkOf|https://www.reddit.com/r/MachineLearning/comments/cfxpxy/berts_success_in_some_benchmarks_tests_may_be/ +http://www.semanlink.net/doc/2019/07/bert_s_success_in_some_benchmar|creationTime|2019-07-24T01:35:24Z +http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe|creationDate|2019-10-29 +http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe|title|IBM Cloud Speech to Text : Références de recherche +http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe|bookmarkOf|https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-references&locale=fr#suzuki2019 +http://www.semanlink.net/doc/2019/10/ibm_cloud_speech_to_text_refe|creationTime|2019-10-29T17:57:39Z +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|creationDate|2020-05-04 +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|tag|http://www.semanlink.net/tag/good +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|tag|http://www.semanlink.net/tag/explainable_ai +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|tag|http://www.semanlink.net/tag/knowledge_graph_embeddings +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_author|Federico Bianchi +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_author|Pasquale Minervini +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_author|Matteo Palmonari +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_author|Luca Costabello +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_author|Gaetano Rossiello +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|comment|"survey of + +- the state-of-the-art in the field of knowledge graph embeddings +- methods for explaining predictions obtained via knowledge graph embeddings." +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|title|[2004.14843] Knowledge Graph Embeddings and Explainable AI +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|bookmarkOf|https://arxiv.org/abs/2004.14843 +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|creationTime|2020-05-04T13:29:14Z +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_summary|"Knowledge graph embeddings are now a widely adopted approach to knowledge +representation in which entities and relationships are embedded in vector +spaces. In this chapter, we introduce the reader to the concept of knowledge +graph embeddings by explaining what they are, how they can be generated and how +they can be evaluated. We summarize the state-of-the-art in this field by +describing the approaches that have been introduced to represent knowledge in +the vector space. In relation to knowledge representation, we consider the +problem of explainability, and discuss models and methods for explaining +predictions obtained via knowledge graph embeddings." +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_firstAuthor|Federico Bianchi +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_updated|2020-04-30T14:55:09Z +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_title|Knowledge Graph Embeddings and Explainable AI +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_published|2020-04-30T14:55:09Z +http://www.semanlink.net/doc/2020/05/2004_14843_knowledge_graph_em|arxiv_num|2004.14843 +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|creationDate|2020-02-14 +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|tag|http://www.semanlink.net/tag/elasticsearch_nearest_neighbor_s +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|tag|http://www.semanlink.net/tag/minhash +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|comment|One solution to make similarity search more practical and computationally feasible involves hashing of documents, in a way that similar documents are more likely to produce the same hash code (locality sensitive hashing, LSH). Depending on what constitutes the similarity between documents, various LSH functions have been proposed. For Jaccard similarity, a popular LSH function is MinHash. +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|title|MinHash token filter Elasticsearch Reference +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|bookmarkOf|https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-minhash-tokenfilter.html +http://www.semanlink.net/doc/2020/02/minhash_token_filter_%7C_elastics|creationTime|2020-02-14T00:03:14Z +http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_|creationDate|2019-09-30 +http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_|tag|http://www.semanlink.net/tag/javascript_rdf +http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_|title|rubensworks/rdf-dereference.js: Dereference any URL for its RDF contents +http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_|bookmarkOf|https://github.com/rubensworks/rdf-dereference.js +http://www.semanlink.net/doc/2019/09/rubensworks_rdf_dereference_js_|creationTime|2019-09-30T08:29:15Z +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|creationDate|2019-10-01 +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|tag|http://www.semanlink.net/tag/toyota +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|title|Meet ALBERT: a new ‘Lite BERT’ from Google & Toyota with State of the Art NLP performance and 18x fewer parameters. +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|bookmarkOf|https://medium.com/@lessw/meet-albert-a-new-lite-bert-from-google-toyota-with-state-of-the-art-nlp-performance-and-18x-df8f7b58fa28 +http://www.semanlink.net/doc/2019/10/meet_albert_a_new_%E2%80%98lite_bert%E2%80%99_|creationTime|2019-10-01T15:21:13Z +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|creationDate|2020-01-12 +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|tag|http://www.semanlink.net/tag/fastai_nbdev +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|comment|"a library that allows you to fully develop a library in Jupyter Notebooks, putting all your code, tests and documentation in one place + +[Blog post](https://www.fast.ai/2019/12/02/nbdev/)" +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|title|fastai/nbdev: Create delightful python projects using Jupyter Notebooks +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|bookmarkOf|https://github.com/fastai/nbdev +http://www.semanlink.net/doc/2020/01/fastai_nbdev_create_delightful|creationTime|2020-01-12T18:33:29Z +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|creationDate|2019-06-17 +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|tag|http://www.semanlink.net/tag/annees_50 +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|tag|http://www.semanlink.net/tag/texas +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|tag|http://www.semanlink.net/tag/amerique_profonde +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|tag|http://www.semanlink.net/tag/film_americain +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|title|The Last Picture Show (La Dernière Séance) +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|bookmarkOf|https://en.wikipedia.org/wiki/The_Last_Picture_Show +http://www.semanlink.net/doc/2019/06/the_last_picture_show_la_derni|creationTime|2019-06-17T22:56:54Z +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|creationDate|2019-07-17 +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|tag|http://www.semanlink.net/tag/apache_spark +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|tag|http://www.semanlink.net/tag/cnes +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|title|Why and How we use Pangeo at CNES - pangeo - Medium +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|bookmarkOf|https://medium.com/pangeo/why-and-how-we-use-pangeo-at-cnes-74553c7fb19b +http://www.semanlink.net/doc/2019/07/why_and_how_we_use_pangeo_at_cn|creationTime|2019-07-17T11:21:15Z +http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les|creationDate|2020-01-29 +http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les|tag|http://www.semanlink.net/tag/facebook +http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les|title|Comment voir (et supprimer) les données envoyées à Facebook par des sites tiers +http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les|bookmarkOf|https://www.lemonde.fr/pixels/article/2020/01/29/activite-en-dehors-de-facebook-comment-voir-et-supprimer-les-donnees-envoyees-a-facebok-par-des-sites-tiers_6027688_4408996.html +http://www.semanlink.net/doc/2020/01/comment_voir_et_supprimer_les|creationTime|2020-01-29T23:00:04Z +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|creationDate|2020-02-20 +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|tag|http://www.semanlink.net/tag/stack_overflow +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|tag|http://www.semanlink.net/tag/java_in_python +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|title|Calling Java from Python - Stack Overflow +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|bookmarkOf|https://stackoverflow.com/questions/3652554/calling-java-from-python +http://www.semanlink.net/doc/2020/02/calling_java_from_python_stac|creationTime|2020-02-20T17:14:23Z +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|creationDate|2019-09-14 +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|tag|http://www.semanlink.net/tag/reshaping +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|title|The dangers of reshaping and other fun mistakes I’ve learnt from PyTorch +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|bookmarkOf|https://medium.com/mcgill-artificial-intelligence-review/the-dangers-of-reshaping-and-other-fun-mistakes-ive-learnt-from-pytorch-b6a5bdc1c275 +http://www.semanlink.net/doc/2019/09/the_dangers_of_reshaping_and_ot|creationTime|2019-09-14T11:29:48Z +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|title|Semantic textual similarity NLP-progress +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|bookmarkOf|http://nlpprogress.com/english/semantic_textual_similarity.html +http://www.semanlink.net/doc/2019/07/semantic_textual_similarity_%7C_n|creationTime|2019-07-02T01:11:27Z +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|creationDate|2019-07-09 +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|tag|http://www.semanlink.net/tag/nlp +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|tag|http://www.semanlink.net/tag/fast_ai_course +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|comment|[forum](https://forums.fast.ai/t/a-code-first-introduction-to-natural-language-processing-2019/50203) +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|title|new fast.ai course: A Code-First Introduction to Natural Language Processing · fast.ai +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|bookmarkOf|https://www.fast.ai/2019/07/08/fastai-nlp/ +http://www.semanlink.net/doc/2019/07/new_fast_ai_course_a_code_firs|creationTime|2019-07-09T10:46:27Z +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|creationDate|2020-05-10 +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|comment|"> Fallait-il maintenir le scrutin du dimanche 15 mars dans un contexte d’épidémie ? Jusqu’au samedi soir minuit, la question a agité l’exécutif. + +Bah non, fallait pas, et ils étaient prévenus ""[Coronavirus: Why You Must Act Now](/doc/2020/03/coronavirus_why_you_must_act_n)"" (10 mars sur medium). Même moi je le disais [twitter](https://twitter.com/hyperfp/status/1237855520158928896?s=20)" +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|relatedDoc|http://www.semanlink.net/doc/2020/03/coronavirus_why_you_must_act_n +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|title|Confinement : du 12 mars au premier tour des municipales, une semaine de bascule au sommet de l’Etat +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|bookmarkOf|https://www.lemonde.fr/politique/article/2020/05/10/confinement-du-12-mars-au-premier-tour-des-municipales-une-semaine-de-bascule-au-sommet-de-l-etat_6039204_823448.html +http://www.semanlink.net/doc/2020/05/confinement_du_12_mars_au_pre|creationTime|2020-05-10T11:22:40Z +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|creationDate|2019-06-04 +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|tag|http://www.semanlink.net/tag/tutorial +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|title|Transfer Learning in Natural Language Processing - Google Slides +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|bookmarkOf|https://docs.google.com/presentation/d/1fIhGikFPnb7G5kr58OvYC3GN4io7MznnM0aAgadvJfc/edit#slide=id.g5888218f39_177_4 +http://www.semanlink.net/doc/2019/06/transfer_learning_in_natural_la|creationTime|2019-06-04T09:19:10Z +http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later|creationDate|2019-06-03 +http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later|tag|http://www.semanlink.net/tag/word_embedding +http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later|title|Word Embeddings: 6 Years Later +http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later|bookmarkOf|https://filedn.com/lIMcLnuUFFN5R3b3EtoMy94/WordEmbeddings_6YearsLater.pdf +http://www.semanlink.net/doc/2019/06/word_embeddings_6_years_later|creationTime|2019-06-03T08:48:30Z +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|creationDate|2019-09-12 +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|tag|http://www.semanlink.net/tag/graphs_machine_learning +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|comment|From data to KGs with machine learning +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|title|Knowledge Graphs and Machine Learning - Towards Data Science +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|bookmarkOf|https://towardsdatascience.com/knowledge-graphs-and-machine-learning-3939b504c7bc +http://www.semanlink.net/doc/2019/09/knowledge_graphs_and_machine_le|creationTime|2019-09-12T21:25:22Z +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|creationDate|2019-12-18 +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|tag|http://www.semanlink.net/tag/nlu_is_hard +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|comment|The city councilmen refused the demonstrators a permit because they [feared/advocated] violence. +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|title|Winograd Schema Challenge - Wikipedia +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|bookmarkOf|https://en.wikipedia.org/wiki/Winograd_Schema_Challenge +http://www.semanlink.net/doc/2019/12/winograd_schema_challenge_wik|creationTime|2019-12-18T14:47:30Z +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|creationDate|2020-01-22 +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|tag|http://www.semanlink.net/tag/keras +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|tag|http://www.semanlink.net/tag/triplet_loss +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|tag|http://www.semanlink.net/tag/sample_code +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|title|Siamese Network for Image and Text similarity using Keras +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|bookmarkOf|https://medium.com/@prabhnoor0212/siamese-network-keras-31a3a8f37d04 +http://www.semanlink.net/doc/2020/01/siamese_network_keras_for_image|creationTime|2020-01-22T16:50:08Z +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|creationDate|2020-05-31 +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|tag|http://www.semanlink.net/tag/knowledge_distillation +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|tag|http://www.semanlink.net/tag/machines_teaching_machines +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|tag|http://www.semanlink.net/tag/ai_facebook +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_author|Bernhard Schölkopf +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_author|Vladimir Vapnik +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_author|David Lopez-Paz +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_author|Léon Bottou +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|comment|"A framework to learn from multiple machines and data representations, unifying two techniques that enable machines to learn from other machines: [distillation](tag:knowledge_distillation) ([Hinton et al., 2015](doc:2020/04/1503_02531_distilling_the_kno)) and privileged information (Vapnik & Izmailov, 2015) +" +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|relatedDoc|http://www.semanlink.net/doc/2020/04/1503_02531_distilling_the_kno +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|title|[1511.03643] Unifying distillation and privileged information +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|bookmarkOf|https://arxiv.org/abs/1511.03643 +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|creationTime|2020-05-31T10:42:51Z +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_summary|"Distillation (Hinton et al., 2015) and privileged information (Vapnik & +Izmailov, 2015) are two techniques that enable machines to learn from other +machines. This paper unifies these two techniques into generalized +distillation, a framework to learn from multiple machines and data +representations. We provide theoretical and causal insight about the inner +workings of generalized distillation, extend it to unsupervised, semisupervised +and multitask learning scenarios, and illustrate its efficacy on a variety of +numerical simulations on both synthetic and real-world data." +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_firstAuthor|David Lopez-Paz +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_updated|2016-02-26T02:21:52Z +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_title|Unifying distillation and privileged information +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_published|2015-11-11T20:27:54Z +http://www.semanlink.net/doc/2020/05/1511_03643_unifying_distillat|arxiv_num|1511.03643 +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|creationDate|2019-12-03 +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|tag|http://www.semanlink.net/tag/kg_and_nlp +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|tag|http://www.semanlink.net/tag/concept_learning +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|tag|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|tag|http://www.semanlink.net/tag/laure_soulier +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|tag|http://www.semanlink.net/tag/nlp_text_representation +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|comment|"- Text grounding +- Enhancing text representation with knowledge resources +- Learning Multi-Modal Word +Representation Grounded in +Visual Context" +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|title|CONCEPTUAL GROUNDING FOR TEXT REPRESENTATION LEARNING +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|bookmarkOf|https://ia-ri.sciencesconf.org/data/pages/mainARIA_Soulier.pdf +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|creationTime|2019-12-03T10:53:50Z +http://www.semanlink.net/doc/2019/12/conceptual_grounding_for_text_r|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|creationDate|2019-12-17 +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|tag|http://www.semanlink.net/tag/jean_rohmer +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|title|Les réseaux sémantiques comme outil de travail quotidien (Jean Rohmer) +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|bookmarkOf|https://ia-ri.sciencesconf.org/data/ROHMER_AFIA_ARIA_2_De_cembre_2019.pdf +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|creationTime|2019-12-17T11:27:04Z +http://www.semanlink.net/doc/2019/12/les_reseaux_semantiques_comme_o|mainDoc|http://www.semanlink.net/doc/2019/12/journee_commune_afia_aria_2 +http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove|title|SpacyIRL 2019 Conference in Overview LinkedIn +http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove|bookmarkOf|https://www.linkedin.com/pulse/spacyirl-2019-conference-overview-ivan-bilan/ +http://www.semanlink.net/doc/2019/07/spacyirl_2019_conference_in_ove|creationTime|2019-07-13T10:39:31Z +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|creationDate|2019-10-23 +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|tag|http://www.semanlink.net/tag/these_irit_renault_biblio +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|tag|http://www.semanlink.net/tag/irit +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|comment|Related [slides](/doc/2019/10/textual_representation_learning) +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|relatedDoc|http://www.semanlink.net/doc/2019/10/textual_representation_learning +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|title|Thèse : Modèles neuronaux pour la recherche d’information : approches dirigées par les ressources sémantiques +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|bookmarkOf|https://www.irit.fr/publis/IRIS/These_GiaHung_Nguyen.pdf +http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_|creationTime|2019-10-23T20:18:26Z +http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g|creationDate|2020-04-04 +http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g|tag|http://www.semanlink.net/tag/medical_data +http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g|title|En pleine crise sanitaire, le géant américain Palantir lorgne les données des hôpitaux français +http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g|bookmarkOf|https://www.bfmtv.com/tech/en-pleine-crise-sanitaire-le-geant-americain-palantir-lorgne-les-donnees-des-hopitaux-francais-1887043.html# +http://www.semanlink.net/doc/2020/04/en_pleine_crise_sanitaire_le_g|creationTime|2020-04-04T10:57:29Z +http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil|creationDate|2019-08-12 +http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil|tag|http://www.semanlink.net/tag/monsanto +http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil|title|I’m a journalist. Monsanto built a step-by-step strategy to destroy my reputation Carey Gillam Opinion The Guardian +http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil|bookmarkOf|https://www.theguardian.com/commentisfree/2019/aug/08/monsanto-roundup-journalist-documents +http://www.semanlink.net/doc/2019/08/i%E2%80%99m_a_journalist_monsanto_buil|creationTime|2019-08-12T17:43:56Z +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|creationDate|2020-04-13 +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|tag|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|tag|http://www.semanlink.net/tag/musique +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|title|Lee Moses - Bad Girl (full song, no break) - YouTube +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|bookmarkOf|https://www.youtube.com/watch?v=3lxp7WVXiXU&list=RD3lxp7WVXiXU&start_radio=1 +http://www.semanlink.net/doc/2020/04/lee_moses_bad_girl_full_song|creationTime|2020-04-13T15:02:28Z +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|creationDate|2020-05-29 +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|tag|http://www.semanlink.net/tag/dictature +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|tag|http://www.semanlink.net/tag/portugal +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|tag|http://www.semanlink.net/tag/chico_buarque +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|tag|http://www.semanlink.net/tag/fado_tropical +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|title|Fado tropical de Chico Buarque et Portugal de Georges Moustaki. De la dictature de Salazar à la Révolution des œillets au Portugal +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|bookmarkOf|https://journals.openedition.org/etudesromanes/1348 +http://www.semanlink.net/doc/2020/05/fado_tropical_de_chico_buarque_|creationTime|2020-05-29T22:46:03Z +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|creationDate|2019-08-19 +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|title|The State of Transfer Learning in NLP (2019) +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|bookmarkOf|http://ruder.io/state-of-transfer-learning-in-nlp/ +http://www.semanlink.net/doc/2019/08/the_state_of_transfer_learning_|creationTime|2019-08-19T16:30:32Z +http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_|creationDate|2020-05-10 +http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_|title|At Columbia University virtual conference, masters ply the strange and beautiful art of knowledge graphs ZDNet +http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_|bookmarkOf|https://www.zdnet.com/article/at-columbia-university-virtual-conference-masters-ply-the-strange-and-beautiful-art-of-knowledge-graphs/ +http://www.semanlink.net/doc/2020/05/at_columbia_university_virtual_|creationTime|2020-05-10T10:46:58Z +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|comment|"- Reducing Bias +- NLP Applications Galore +- Pretrain then Finetune: A New Paradigm for NLP +- Infusing Knowledge into NLP Architectures +- Interpretability of Models +- Rethinking Evaluation and Assumptions of Natural Language Generation +- Going Beyond the Pretrain-Finetune Paradigm" +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|title|Trends in Natural Language Processing: ACL 2019 In Review - Mihail Eric +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|bookmarkOf|https://www.mihaileric.com/posts/nlp-trends-acl-2019/ +http://www.semanlink.net/doc/2019/08/trends_in_natural_language_proc|creationTime|2019-08-05T15:49:34Z +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|creationDate|2020-04-16 +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|tag|http://www.semanlink.net/tag/how_much_information_in_a_language +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|tag|http://www.semanlink.net/tag/langage +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|tag|http://www.semanlink.net/tag/dictionnaire +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|comment|How many words—and which ones—are sufficient to define all other words? +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|title|The Latent Structure of Dictionaries - Vincent‐Lamarre - 2016 +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|bookmarkOf|https://onlinelibrary.wiley.com/doi/full/10.1111/tops.12211 +http://www.semanlink.net/doc/2020/04/the_latent_structure_of_diction|creationTime|2020-04-16T19:15:38Z +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|creationDate|2020-03-02 +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|tag|http://www.semanlink.net/tag/brains_in_silicon +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|tag|http://www.semanlink.net/tag/julie_grollier +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|title|Neuromorphic spintronics Nature Electronics +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|bookmarkOf|https://www.nature.com/articles/s41928-019-0360-9.epdf?author_access_token=jtNeSAhVDL6lK9Q0yunEtdRgN0jAjWel9jnR3ZoTv0PgNbthozMob-1_2TKB9x_fHQcXMtfnbJHqU8V34xrEFK_D8iG774ueRc9x-R_k0v1d-2Pjco0iE67uXMbZ8pklVwMI2YTodu1XqKlKXwInjw%3D%3D +http://www.semanlink.net/doc/2020/03/neuromorphic_spintronics_%7C_natu|creationTime|2020-03-02T19:55:39Z +http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle|tag|http://www.semanlink.net/tag/quora_question_pairs +http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle|title|Quora Question Pairs Kaggle +http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle|bookmarkOf|https://www.kaggle.com/c/quora-question-pairs +http://www.semanlink.net/doc/2019/07/quora_question_pairs_%7C_kaggle|creationTime|2019-07-02T01:07:48Z +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|creationDate|2020-04-27 +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|comment|TL;DR: yes +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|title|Should you use FastAI? - deeplearningbrasilia - Medium +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|bookmarkOf|https://medium.com/deeplearningbrasilia/should-you-use-fastai-7ce994de67d0 +http://www.semanlink.net/doc/2020/04/should_you_use_fastai_deeple|creationTime|2020-04-27T15:33:37Z +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|creationDate|2020-03-13 +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|tag|http://www.semanlink.net/tag/nlp_using_knowledge_graphs +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|tag|http://www.semanlink.net/tag/martynas_jusevicius +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|title|"Martynas Jusevicius sur Twitter : ""Is there a solution for entity recognition that would use a local #KnowledgeGraph to look for matches? Ideally any SPARQL datasource...""" +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|bookmarkOf|https://twitter.com/namedgraph/status/1238387782944460802?s=20 +http://www.semanlink.net/doc/2020/03/martynas_jusevicius_sur_twitter|creationTime|2020-03-13T10:38:03Z +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|creationDate|2020-01-19 +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|tag|http://www.semanlink.net/tag/parente_a_plaisanterie +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|tag|http://www.semanlink.net/tag/burkina_faso +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|title|« Toi le Yadga mangeur de riz, tu es mon esclave » : pour rire et faire la paix, les Burkinabés s’insultent +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/01/17/au-burkina-faso-on-s-insulte-pour-rire-et-faire-la-paix_6026369_3212.html +http://www.semanlink.net/doc/2020/01/%C2%AB_toi_le_yadga_mangeur_de_riz_|creationTime|2020-01-19T17:18:04Z +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|creationDate|2019-09-16 +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|tag|http://www.semanlink.net/tag/jeu +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|tag|http://www.semanlink.net/tag/rats +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|title|Scientists taught these adorable rats to play hide and seek - Los Angeles Times +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|bookmarkOf|https://www.latimes.com/science/story/2019-09-12/rats-can-play-hide-and-seek +http://www.semanlink.net/doc/2019/09/scientists_taught_these_adorabl|creationTime|2019-09-16T01:51:23Z +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|comment|"Grounding textual mentions to knowledge base concepts. +[Video](/doc/2019/07/sofie_van_landeghem_entity_lin) [github](https://github.com/explosion/spaCy/pull/3864)" +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|relatedDoc|http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|title|Sofie Van Landeghem: Entity linking functionality in spaCy (spaCy IRL 2019) - Slides +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|bookmarkOf|https://drive.google.com/file/d/1EuGxcQLcXvjjkZ-KRUlwpr_doBVyEBEG/view +http://www.semanlink.net/doc/2019/07/sofie_van_landeghem_entity_lin_1|creationTime|2019-07-13T10:43:08Z +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|creationDate|2019-08-07 +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|tag|http://www.semanlink.net/tag/active_learning +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|title|Active Learning Synthesis Lectures on Artificial Intelligence and Machine Learning (2012) +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|bookmarkOf|https://www.morganclaypool.com/doi/abs/10.2200/S00429ED1V01Y201207AIM018 +http://www.semanlink.net/doc/2019/08/active_learning_%7C_synthesis_lec|creationTime|2019-08-07T01:34:08Z +http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la|creationDate|2019-12-07 +http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la|tag|http://www.semanlink.net/tag/nlp_use_cases +http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la|title|12 NLP Examples: How Natural Language Processing is Used +http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la|bookmarkOf|https://www.wonderflow.co/blog/natural-language-processing-examples#why-use-nlp +http://www.semanlink.net/doc/2019/12/12_nlp_examples_how_natural_la|creationTime|2019-12-07T20:18:50Z +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|creationDate|2020-01-31 +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|tag|http://www.semanlink.net/tag/hong_kong +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|title|"Hazukashi 🌩 sur Twitter : ""[THREAD] Je vous ai déjà parlé de la Cité Emmurée de Kowloon, le ghetto dystopique cyberpunk Hong-Kongais impénétrable des années 80 ?" +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|bookmarkOf|https://twitter.com/Hazukashi1/status/1222841442030247936?s=20 +http://www.semanlink.net/doc/2020/01/hazukashi_%F0%9F%8C%A9_sur_twitter_th|creationTime|2020-01-31T14:54:57Z +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|creationDate|2019-11-29 +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|tag|http://www.semanlink.net/tag/bayesian_deep_learning +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|title|Bayesian deep learning with Fastai : how not to be uncertain about your uncertainty ! +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|bookmarkOf|https://medium.com/@danielhuynh_48554/bayesian-deep-learning-with-fastai-how-not-to-be-uncertain-about-your-uncertainty-6a99d1aa686e +http://www.semanlink.net/doc/2019/11/bayesian_deep_learning_with_fas|creationTime|2019-11-29T17:42:54Z +http://www.semanlink.net/doc/2019/10/textual_representation_learning|creationDate|2019-10-21 +http://www.semanlink.net/doc/2019/10/textual_representation_learning|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/10/textual_representation_learning|tag|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/doc/2019/10/textual_representation_learning|tag|http://www.semanlink.net/tag/laure_soulier +http://www.semanlink.net/doc/2019/10/textual_representation_learning|tag|http://www.semanlink.net/tag/information_retrieval +http://www.semanlink.net/doc/2019/10/textual_representation_learning|tag|http://www.semanlink.net/tag/knowledge_resources +http://www.semanlink.net/doc/2019/10/textual_representation_learning|comment|Related to this [thesis](/doc/2019/10/these_modeles_neuronaux_pour_) +http://www.semanlink.net/doc/2019/10/textual_representation_learning|relatedDoc|http://www.semanlink.net/doc/2019/10/these_modeles_neuronaux_pour_ +http://www.semanlink.net/doc/2019/10/textual_representation_learning|title|TEXTUAL REPRESENTATION LEARNING DRIVEN BY KNOWLEDGE RESOURCES: APPLICATION TO INFORMATION RETRIEVAL +http://www.semanlink.net/doc/2019/10/textual_representation_learning|bookmarkOf|http://www-connex.lip6.fr/~soulier/data/talks/Lyon_KB_2019.pdf +http://www.semanlink.net/doc/2019/10/textual_representation_learning|creationTime|2019-10-21T22:11:17Z +http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec|creationDate|2019-12-18 +http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec|title|Yoshua Bengio, Revered Architect of AI, Has Some Ideas About What to Build Next - IEEE Spectrum +http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec|bookmarkOf|https://spectrum.ieee.org/tech-talk/robotics/artificial-intelligence/yoshua-bengio-revered-architect-of-ai-has-some-ideas-about-what-to-build-next#qaTopicTwo +http://www.semanlink.net/doc/2019/12/yoshua_bengio_revered_architec|creationTime|2019-12-18T14:55:47Z +http://www.semanlink.net/doc/2020/02/yoshua_bengio|creationDate|2020-02-12 +http://www.semanlink.net/doc/2020/02/yoshua_bengio|tag|http://www.semanlink.net/tag/yoshua_bengio +http://www.semanlink.net/doc/2020/02/yoshua_bengio|comment|[Yoshua Bengio’s blog – first words](https://yoshuabengio.org/2020/02/10/fusce-risus/) +http://www.semanlink.net/doc/2020/02/yoshua_bengio|title|Yoshua Bengio +http://www.semanlink.net/doc/2020/02/yoshua_bengio|bookmarkOf|https://yoshuabengio.org/ +http://www.semanlink.net/doc/2020/02/yoshua_bengio|creationTime|2020-02-12T08:38:52Z +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|creationDate|2019-07-17 +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|tag|http://www.semanlink.net/tag/ranking_information_retrieval +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|tag|http://www.semanlink.net/tag/embeddings_in_ir +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|tag|http://www.semanlink.net/tag/bhaskar_mitra +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_author|Nick Craswell +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_author|Bhaskar Mitra +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_author|Rich Caruana +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_author|Eric Nalisnick +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|comment|"Investigate neural word embeddings as a source of evidence in document ranking. + +Presented in [this Stanford course on IR](/doc/?uri=https%3A%2F%2Fweb.stanford.edu%2Fclass%2Fcs276%2Fhandouts%2Flecture20-distributed-representations.pdf) by Chris Manning (starting slide 44) + +They train a word2vec model, but retain both the input and the output projections. + +> During ranking we map the query words into the input space and the document words into the output space, and compute a query-document relevance score by aggregating the cosine similarities across all the query-document word pairs. + +> However, when ranking a larger set of candidate documents, we find the embeddings-based approach is prone to false positives" +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|relatedDoc|https://web.stanford.edu/class/cs276/handouts/lecture20-distributed-representations.pdf +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|title|[1602.01137] A Dual Embedding Space Model for Document Ranking +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|bookmarkOf|https://arxiv.org/abs/1602.01137 +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|creationTime|2019-07-17T12:15:50Z +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_summary|"A fundamental goal of search engines is to identify, given a query, documents +that have relevant text. This is intrinsically difficult because the query and +the document may use different vocabulary, or the document may contain query +words without being relevant. We investigate neural word embeddings as a source +of evidence in document ranking. We train a word2vec embedding model on a large +unlabelled query corpus, but in contrast to how the model is commonly used, we +retain both the input and the output projections, allowing us to leverage both +the embedding spaces to derive richer distributional relationships. During +ranking we map the query words into the input space and the document words into +the output space, and compute a query-document relevance score by aggregating +the cosine similarities across all the query-document word pairs. +We postulate that the proposed Dual Embedding Space Model (DESM) captures +evidence on whether a document is about a query term in addition to what is +modelled by traditional term-frequency based approaches. Our experiments show +that the DESM can re-rank top documents returned by a commercial Web search +engine, like Bing, better than a term-matching based signal like TF-IDF. +However, when ranking a larger set of candidate documents, we find the +embeddings-based approach is prone to false positives, retrieving documents +that are only loosely related to the query. We demonstrate that this problem +can be solved effectively by ranking based on a linear mixture of the DESM and +the word counting features." +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_firstAuthor|Bhaskar Mitra +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_updated|2016-02-02T22:23:18Z +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_title|A Dual Embedding Space Model for Document Ranking +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_published|2016-02-02T22:23:18Z +http://www.semanlink.net/doc/2019/07/_1602_01137_a_dual_embedding_s|arxiv_num|1602.01137 +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|creationDate|2019-12-19 +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|tag|http://www.semanlink.net/tag/graph_convolutional_networks +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|title|"(((ل()(ل() 'yoav)))) sur Twitter : ""is there a convincingly successful application of graph convolutions in NLP you can point me to?""" +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|bookmarkOf|https://twitter.com/yoavgo/status/1207414210482888705 +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte_1|creationTime|2019-12-19T13:48:55Z +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|tag|http://www.semanlink.net/tag/benjamin_heinzerling +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|tag|http://www.semanlink.net/tag/entity_discovery_and_linking +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|title|HITS at TAC KBP 2015:Entity Discovery and Linking, and Event Nugget Detection +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|bookmarkOf|https://tac.nist.gov/publications/2015/participant.papers/TAC2015.HITS.proceedings.pdf +http://www.semanlink.net/doc/2020/01/hits_at_tac_kbp_2015_entity_dis|creationTime|2020-01-10T17:15:24Z +http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav|creationDate|2020-05-06 +http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav|tag|http://www.semanlink.net/tag/javascript +http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav|title|Handling Events :: Eloquent JavaScript +http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav|bookmarkOf|https://eloquentjavascript.net/15_event.html +http://www.semanlink.net/doc/2020/05/handling_events_eloquent_jav|creationTime|2020-05-06T10:51:00Z +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|creationDate|2020-05-22 +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|tag|http://www.semanlink.net/tag/enterprise_knowledge_graph +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|tag|http://www.semanlink.net/tag/word_sense_disambiguation +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|comment|word sense induction and disambiguation (WSID) with knowledge graphs +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|title|Label unstructured data using Enterprise Knowledge Graphs 2 +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|bookmarkOf|https://medium.com/semantic-tech-hotspot/label-unstructured-data-using-enterprise-knowledge-graphs-2-d84bda281270 +http://www.semanlink.net/doc/2020/05/label_unstructured_data_using_e|creationTime|2020-05-22T16:21:55Z +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|creationDate|2020-04-24 +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|tag|http://www.semanlink.net/tag/covid19_conneries_gouvernementales +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|tag|http://www.semanlink.net/tag/administration_francaise +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|title|Dépistage du coronavirus : les raisons du fiasco français sur les tests +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|bookmarkOf|https://www.lemonde.fr/planete/article/2020/04/24/nous-attendons-d-etre-contactes-par-l-ars-mais-il-ne-se-passe-rien-le-fiasco-des-tests-en-france_6037647_3244.html +http://www.semanlink.net/doc/2020/04/depistage_du_coronavirus_les_|creationTime|2020-04-24T14:00:52Z +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|creationDate|2020-03-11 +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|tag|http://www.semanlink.net/tag/girafe +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|tag|http://www.semanlink.net/tag/l_humanite_merite_de_disparaitre +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|tag|http://www.semanlink.net/tag/kenya +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|title|Au Kenya, l’unique girafe blanche femelle et son petit tués par des braconniers +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|bookmarkOf|https://www.lemonde.fr/afrique/article/2020/03/11/au-kenya-l-unique-girafe-blanche-femelle-et-son-petit-tues-par-des-braconniers_6032601_3212.html +http://www.semanlink.net/doc/2020/03/au_kenya_l%E2%80%99unique_girafe_blanc|creationTime|2020-03-11T16:33:15Z +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|creationDate|2019-07-31 +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/knowledge_graph_completion +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/attention_in_graphs +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/nlp_google +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/google_research +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|comment|> State-of-the-art models for knowledge graph completion aim at learning a fixed embedding representation of entities in a multi-relational graph which can generalize to infer unseen entity relationships at test time. This can be sub-optimal as it requires memorizing and generalizing to all possible entity relationships using these fixed representations. We thus propose a novel **attention-based method to learn query-dependent representation of entities** which adaptively combines the relevant graph neighborhood of an entity leading to more accurate KG completion. +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|title|A2N: Attending to Neighbors for Knowledge Graph Inference - ACL 2019 +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|bookmarkOf|https://www.aclweb.org/anthology/papers/P/P19/P19-1431/ +http://www.semanlink.net/doc/2019/07/a2n_attending_to_neighbors_for|creationTime|2019-07-31T19:37:20Z +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|creationDate|2020-03-08 +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|tag|http://www.semanlink.net/tag/causal_inference +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|tag|http://www.semanlink.net/tag/nn_symbolic_ai_hybridation +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|tag|http://www.semanlink.net/tag/machine_learning +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|tag|http://www.semanlink.net/tag/bayesian_deep_learning +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|comment|> Techniques from causal inference, such as probabilistic causal diagrams and do-calculus, provide powerful (nonparametric) tools for drawing causal inferences from such observational data. However, these techniques are often incompatible with modern, nonparametric machine learning algorithms since they typically require explicit probabilistic models. Here, we develop causal bootstrapping for augmenting classical nonparametric bootstrap resampling with information on the causal relationship between variables +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|title|"Max Little sur Twitter : ""Causal bootstrapping - a simple way of doing causal inference using arbitrary machine learning algo...""" +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|bookmarkOf|https://twitter.com/MaxALittle/status/1236234054405627905?s=20 +http://www.semanlink.net/doc/2020/03/max_little_sur_twitter_causa|creationTime|2020-03-08T11:36:48Z +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|creationDate|2020-02-26 +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|tag|http://www.semanlink.net/tag/histoire_du_xxe_siecle +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|tag|http://www.semanlink.net/tag/urss +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|comment|Diplomate soviétique - si les Anglais et les Français l'avaient écouté, la 2eme guerre mondiale aurait peut-être été évitée +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|title|Ivan Maisky +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|bookmarkOf|https://en.wikipedia.org/wiki/Ivan_Maisky +http://www.semanlink.net/doc/2020/02/ivan_maisky_wikipedia|creationTime|2020-02-26T00:16:16Z +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|creationDate|2019-08-09 +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|tag|http://www.semanlink.net/tag/introduction +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|title|An easy introduction to Pytorch for Neural Networks +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|bookmarkOf|https://towardsdatascience.com/an-easy-introduction-to-pytorch-for-neural-networks-3ea08516bff2 +http://www.semanlink.net/doc/2019/08/an_easy_introduction_to_pytorch|creationTime|2019-08-09T10:25:24Z +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|tag|http://www.semanlink.net/tag/wikidata +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|title|"Nandana Mihindukulasooriya sur Twitter : ""I wonder if there is a service for disambiguation and entity linking for @wikidata where I can pass a text and get Wikidata entities mentioned in it!" +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|bookmarkOf|https://twitter.com/nandanamihindu/status/1136237289355522048 +http://www.semanlink.net/doc/2019/07/nandana_mihindukulasooriya_sur_|creationTime|2019-07-13T13:55:08Z +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|creationDate|2020-02-15 +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|tag|http://www.semanlink.net/tag/survey +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|tag|http://www.semanlink.net/tag/contrastive_self_supervised_learning +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|comment|"methods that build representations by learning to encode what makes two things similar or different + +> an +overview of how contrastive methods differ from other +self-supervised learning techniques, + +> can we build +representation learning algorithms that don’t concentrate +on pixel-level details, and only encode high-level features +sufficient enough to distinguish different objects? + +Learn an encoder such as: +score(f(x), f(x+)) >> score(f(x), f(x-)) +(where x+ similar to x, ""positiv"" example, and x- ""negative""). Can use a softmax classifier to optimize this (""InfoNCE loss"" ~ cross-entropy loss)" +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|title|Contrastive Self-Supervised Learning Ankesh Anand (2020) +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|bookmarkOf|https://ankeshanand.com/blog/2020/01/26/contrative-self-supervised-learning.html +http://www.semanlink.net/doc/2020/02/contrastive_self_supervised_lea|creationTime|2020-02-15T19:51:39Z +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|tag|http://www.semanlink.net/tag/mur_de_berlin +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|tag|http://www.semanlink.net/tag/attali +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|title|Jacques Attali : « La chute du mur de Berlin est une anecdote sans importance » - Le Point +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|bookmarkOf|https://www.lepoint.fr/politique/jacques-attali-la-chute-du-mur-de-berlin-est-une-anecdote-sans-importance-09-11-2019-2346190_20.php +http://www.semanlink.net/doc/2019/11/jacques_attali_%C2%AB_la_chute_du_|creationTime|2019-11-09T09:35:33Z +http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa|creationDate|2019-09-23 +http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa|tag|http://www.semanlink.net/tag/reinforcement_learning +http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa|title|Machine Learning for Humans, Part 5: Reinforcement Learning +http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa|bookmarkOf|https://medium.com/machine-learning-for-humans/reinforcement-learning-6eacf258b265 +http://www.semanlink.net/doc/2019/09/machine_learning_for_humans_pa|creationTime|2019-09-23T23:36:26Z +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|creationDate|2019-08-30 +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|tag|http://www.semanlink.net/tag/guillaume_lample +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|tag|http://www.semanlink.net/tag/nlp_facebook +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|comment|"Implementation for this [paper](/doc/2019/07/_1907_05242_large_memory_layer) +" +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|relatedDoc|http://www.semanlink.net/doc/2019/07/_1907_05242_large_memory_layer +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|title|Product-Key Memory (PKM) Minimalist implementation of a Product-Key Memory layer +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|bookmarkOf|https://github.com/facebookresearch/XLM/blob/master/PKM-layer.ipynb +http://www.semanlink.net/doc/2019/08/product_key_memory_pkm_minima|creationTime|2019-08-30T13:38:58Z +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|creationDate|2019-07-03 +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|tag|http://www.semanlink.net/tag/siamese_network +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|tag|http://www.semanlink.net/tag/slides +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|title|Similarity Learning with (or without) Convolutional Neural Network (slides) +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|bookmarkOf|http://slazebni.cs.illinois.edu/spring17/lec09_similarity.pdf +http://www.semanlink.net/doc/2019/07/similarity_learning_with_or_wi|creationTime|2019-07-03T01:20:21Z +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|creationDate|2020-02-18 +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|tag|http://www.semanlink.net/tag/doc_by_google +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|tag|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|title|Machine Learning Crash Course    Google Developers +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|bookmarkOf|https://developers.google.com/machine-learning/crash-course +http://www.semanlink.net/doc/2020/02/machine_learning_crash_course_|creationTime|2020-02-18T16:29:34Z +http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in|creationDate|2019-07-13 +http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in|tag|http://www.semanlink.net/tag/imbalanced_data +http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in|title|Handling imbalanced datasets in machine learning - Towards Data Science +http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in|bookmarkOf|https://towardsdatascience.com/handling-imbalanced-datasets-in-machine-learning-7a0e84220f28 +http://www.semanlink.net/doc/2019/07/handling_imbalanced_datasets_in|creationTime|2019-07-13T10:31:51Z +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|creationDate|2020-01-01 +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|tag|http://www.semanlink.net/tag/mark_zuckerberg +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|tag|http://www.semanlink.net/tag/facebook_cambridge_analytica +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|tag|http://www.semanlink.net/tag/alexandria_ocasio_cortez +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|comment|> Congresswoman, I think that lying is bad +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|title|Ocasio-Cortez stumps Zuckerberg with questions on far right and Cambridge Analytica Technology The Guardian +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|bookmarkOf|https://www.theguardian.com/technology/2019/oct/23/mark-zuckerberg-alexandria-ocasio-cortez-facebook-cambridge-analytica +http://www.semanlink.net/doc/2020/01/ocasio_cortez_stumps_zuckerberg|creationTime|2020-01-01T13:10:10Z +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|creationDate|2019-10-31 +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|tag|http://www.semanlink.net/tag/nlp_negation +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|tag|http://www.semanlink.net/tag/bertology +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|title|BERT is now part of Google Search, so let’s understand how it reasons +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|bookmarkOf|https://towardsdatascience.com/how-does-bert-reason-54feb363211 +http://www.semanlink.net/doc/2019/10/bert_is_now_part_of_google_sear|creationTime|2019-10-31T08:28:40Z +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|creationDate|2019-08-09 +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|tag|http://www.semanlink.net/tag/computer_vision +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|tag|http://www.semanlink.net/tag/mutual_information +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|tag|http://www.semanlink.net/tag/microsoft_research +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|tag|http://www.semanlink.net/tag/self_supervised_learning +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|tag|http://www.semanlink.net/tag/representation_learning +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|title|Deep InfoMax: Learning good representations through mutual information maximization - Microsoft Research +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|bookmarkOf|https://www.microsoft.com/en-us/research/blog/deep-infomax-learning-good-representations-through-mutual-information-maximization/?ocid=msr_blog_infomax_iclr_tw +http://www.semanlink.net/doc/2019/08/deep_infomax_learning_good_rep|creationTime|2019-08-09T01:42:22Z +http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi|creationDate|2019-06-18 +http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi|tag|http://www.semanlink.net/tag/graph_based_semi_supervised_learning +http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi|title|Experiments in Graph-based Semi-Supervised Learning Methods for Class-Instance Acquisition +http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi|bookmarkOf|http://talukdar.net/papers/355_Paper.pdf +http://www.semanlink.net/doc/2019/06/experiments_in_graph_based_semi|creationTime|2019-06-18T10:38:18Z +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|creationDate|2020-04-25 +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|tag|http://www.semanlink.net/tag/journal_le_monde +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|comment|"from ""(Ai-je le droit de mettre un lien vers Le Monde.fr sur mon site ?"": + +> Attention : l’URL des articles est modifiée au moment de leur passage en archive payante. + +c'est lamentable, et je ne renouvellerai donc pas mon abonnement, parce que je ne peux pas me servir du Monde comme d'une base de connaissance. + +""Cool URIs don't change"" ([#TBL](/tag/tim_berners_lee))" +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|title|Les URLs du Monde ne sont pas cool +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|bookmarkOf|https://www.lemonde.fr/faq/?question=25958-ai-je-droit-mettre-un-lien-vers-monde-fr-site +http://www.semanlink.net/doc/2020/04/ai_je_le_droit_de_mettre_un_lie|creationTime|2020-04-25T13:07:26Z +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|creationDate|2019-06-11 +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|tag|http://www.semanlink.net/tag/pytorch +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|tag|http://www.semanlink.net/tag/reproducible_research +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|title|Towards Reproducible Research with PyTorch Hub PyTorch +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|bookmarkOf|https://pytorch.org/blog/towards-reproducible-research-with-pytorch-hub/ +http://www.semanlink.net/doc/2019/06/towards_reproducible_research_w|creationTime|2019-06-11T11:39:01Z +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|creationDate|2019-08-29 +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|tag|http://www.semanlink.net/tag/archeologie +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|tag|http://www.semanlink.net/tag/megalithes +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|tag|http://www.semanlink.net/tag/auvergne +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|title|Les mégalithes de Veyre-Monton (Puy-de-Dôme) : ... Inrap +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|bookmarkOf|https://www.inrap.fr/les-megalithes-de-veyre-monton-puy-de-dome-alignements-de-menhirs-tombe-cairn-et-14540 +http://www.semanlink.net/doc/2019/08/les_megalithes_de_veyre_monton_|creationTime|2019-08-29T20:58:36Z +http://www.semanlink.net/doc/2020/05/http_javascript_post_request_|creationDate|2020-05-08 +http://www.semanlink.net/doc/2020/05/http_javascript_post_request_|tag|http://www.semanlink.net/tag/javascript_tips +http://www.semanlink.net/doc/2020/05/http_javascript_post_request_|title|http - JavaScript post request like a form submit - Stack Overflow +http://www.semanlink.net/doc/2020/05/http_javascript_post_request_|bookmarkOf|https://stackoverflow.com/questions/133925/javascript-post-request-like-a-form-submit +http://www.semanlink.net/doc/2020/05/http_javascript_post_request_|creationTime|2020-05-08T14:08:05Z +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|creationDate|2019-08-05 +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|tag|http://www.semanlink.net/tag/fast_ai_course +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|tag|http://www.semanlink.net/tag/driverless_car +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|title|Lesson 3 - Self Driving Cars +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|bookmarkOf|https://rickwierenga.com/blog/fast.ai/FastAI2019-3.html +http://www.semanlink.net/doc/2019/08/lesson_3_self_driving_cars|creationTime|2019-08-05T10:48:06Z +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|creationDate|2019-07-02 +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|tag|http://www.semanlink.net/tag/similarity_learning +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|tag|http://www.semanlink.net/tag/text_similarity +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|tag|http://www.semanlink.net/tag/text_corpora_and_lexical_resources +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|comment|"> Two main components of the model are a semantic interpreter of texts and a similarity function whose properties are derived from data. The first one associates particular documents with concepts defined in a knowledge base corresponding to the topics covered by the corpus. It shifts the representation of a meaning of the texts from words that can be ambiguous to concepts with predefined semantics. With this new representation, the similarity function is derived from data using a modification of the dynamic rule-based similarity model, which is adjusted to the unsupervised case. + +By same author: [Interactive Document Indexing Method Based on Explicit Semantic Analysis](https://link.springer.com/chapter/10.1007/978-3-642-32115-3_18)" +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|title|Unsupervised Similarity Learning from Textual Data (2012) +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|bookmarkOf|https://www.researchgate.net/publication/262401917_Unsupervised_Similarity_Learning_from_Textual_Data +http://www.semanlink.net/doc/2019/07/unsupervised_similarity_learnin|creationTime|2019-07-02T23:03:48Z +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|creationDate|2019-08-04 +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|tag|http://www.semanlink.net/tag/e_learning +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|tag|http://www.semanlink.net/tag/machine_learning_course +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|title|"Chip Huyen sur Twitter : ""This thread is a combination of 10 free online courses on machine learning that I find the most helpful""" +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|bookmarkOf|https://twitter.com/chipro/status/1157772112876060672 +http://www.semanlink.net/doc/2019/08/chip_huyen_sur_twitter_this_|creationTime|2019-08-04T12:17:22Z +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|creationDate|2019-06-10 +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|tag|http://www.semanlink.net/tag/chris_manning +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|tag|http://www.semanlink.net/tag/syntax_trees +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|tag|http://www.semanlink.net/tag/geometry_of_language_embeddings +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|comment|"Certain neural networks (e.g., BERT) build internal geometric representations of syntax trees. + +(A mysterious “squared distance” effect, explained [here](http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i)) + +[Related blog post](https://nlp.stanford.edu/~johnhew/structural-probe.html)" +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|relatedDoc|http://www.semanlink.net/doc/2019/06/language_trees_and_geometry_i +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|title|A Structural Probe for Finding Syntax in Word Representations +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|bookmarkOf|https://nlp.stanford.edu/pubs/hewitt2019structural.pdf +http://www.semanlink.net/doc/2019/06/a_structural_probe_for_finding_|creationTime|2019-06-10T00:04:56Z +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|creationDate|2020-01-07 +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|tag|http://www.semanlink.net/tag/keras +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|title|How to build deep neural network for custom NER with Keras +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|bookmarkOf|https://confusedcoders.com/data-science/deep-learning/how-to-build-deep-neural-network-for-custom-ner-with-keras +http://www.semanlink.net/doc/2020/01/how_to_build_deep_neural_networ|creationTime|2020-01-07T11:57:40Z +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|creationDate|2020-04-08 +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|tag|http://www.semanlink.net/tag/parthenogenese +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|tag|http://www.semanlink.net/tag/documentaire_tv +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|tag|http://www.semanlink.net/tag/puceron +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|title|Curiosités animales - Une vie sans sexe : le dragon de Komodo et le puceron ARTE +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|bookmarkOf|https://www.arte.tv/fr/videos/052739-004-A/curiosites-animales/ +http://www.semanlink.net/doc/2020/04/curiosites_animales_une_vie_s|creationTime|2020-04-08T21:08:07Z +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|creationDate|2020-01-06 +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|comment|"Enable **transfer learning for NLP on an incoming stream of tasks without training a new model for every new task**. + +In fine tuning, new layers are added and adjusted for each task. The proposed model adds new modules (""adapters"") between layers of the pretrained network. Parameters of the pretrained network remain fixed, and only a few +additional task-specific parameters are added for each new task, all +without affecting previous ones." +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|title|Adapters: A Compact and Extensible Transfer Learning Method for NLP +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|bookmarkOf|https://medium.com/dair-ai/adapters-a-compact-and-extensible-transfer-learning-method-for-nlp-6d18c2399f62 +http://www.semanlink.net/doc/2020/01/adapters_a_compact_and_extensi|creationTime|2020-01-06T01:45:19Z +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|creationDate|2019-07-31 +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|title|Neural Transfer Learning for Natural Language Processing - Seb Ruder's PhD Thesis +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|bookmarkOf|http://ruder.io/thesis/neural_transfer_learning_for_nlp.pdf +http://www.semanlink.net/doc/2019/07/neural_transfer_learning_for_na|creationTime|2019-07-31T19:25:24Z +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|creationDate|2020-02-19 +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|tag|http://www.semanlink.net/tag/arxiv_doc +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|tag|http://www.semanlink.net/tag/matrix_calculus +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_author|Terence Parr +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_author|Jeremy Howard +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|comment|Related blog post [The Math Behind Neural Networks](https://towardsdatascience.com/step-by-step-the-math-behind-neural-networks-490dc1f3cfd9) +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|title|[1802.01528] The Matrix Calculus You Need For Deep Learning +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|bookmarkOf|https://arxiv.org/abs/1802.01528 +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|creationTime|2020-02-19T21:52:12Z +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_summary|"This paper is an attempt to explain all the matrix calculus you need in order +to understand the training of deep neural networks. We assume no math knowledge +beyond what you learned in calculus 1, and provide links to help you refresh +the necessary math where needed. Note that you do not need to understand this +material before you start learning to train and use deep learning in practice; +rather, this material is for those who are already familiar with the basics of +neural networks, and wish to deepen their understanding of the underlying math. +Don't worry if you get stuck at some point along the way---just go back and +reread the previous section, and try writing down and working through some +examples. And if you're still stuck, we're happy to answer your questions in +the Theory category at forums.fast.ai. Note: There is a reference section at +the end of the paper summarizing all the key matrix calculus rules and +terminology discussed here. See related articles at http://explained.ai" +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_firstAuthor|Terence Parr +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_updated|2018-07-02T17:36:34Z +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_title|The Matrix Calculus You Need For Deep Learning +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_published|2018-02-05T17:37:59Z +http://www.semanlink.net/doc/2020/02/_1802_01528_the_matrix_calculu|arxiv_num|1802.01528 +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|creationDate|2019-09-13 +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|tag|http://www.semanlink.net/tag/nlp_tools +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|tag|http://www.semanlink.net/tag/transfer_learning_in_nlp +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|title|"Sebastian Ruder sur Twitter : ""It's great to see the growing landscape of NLP transfer learning libraries""" +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|bookmarkOf|https://twitter.com/seb_ruder/status/1172607702884933633 +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter__1|creationTime|2019-09-13T23:33:01Z +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|creationDate|2020-04-18 +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|tag|http://www.semanlink.net/tag/mensonge_d_etat +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|tag|http://www.semanlink.net/tag/cringely +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|tag|http://www.semanlink.net/tag/three_mile_island +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|title|COVID-19 Lessons from Three Mile Island #2 — the NRC I, Cringely +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|bookmarkOf|https://www.cringely.com/2020/04/17/covid-19-lessons-from-three-mile-island-2-the-nrc/ +http://www.semanlink.net/doc/2020/04/covid_19_lessons_from_three_mil|creationTime|2020-04-18T13:33:29Z +http://www.semanlink.net/doc/2019/09/blackrock|creationDate|2019-09-15 +http://www.semanlink.net/doc/2019/09/blackrock|tag|http://www.semanlink.net/tag/capitalisme_financier +http://www.semanlink.net/doc/2019/09/blackrock|title|BlackRock +http://www.semanlink.net/doc/2019/09/blackrock|bookmarkOf|https://fr.wikipedia.org/wiki/BlackRock +http://www.semanlink.net/doc/2019/09/blackrock|creationTime|2019-09-15T14:11:52Z +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|creationDate|2019-11-05 +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|tag|http://www.semanlink.net/tag/digital_humanities +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|tag|http://www.semanlink.net/tag/handwriting_recognition +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|title|DHQ: Digital Humanities Quarterly: Modelling Medieval Hands: Practical OCR for Caroline Minuscule +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|bookmarkOf|http://www.digitalhumanities.org/dhq/vol/13/1/000412/000412.html +http://www.semanlink.net/doc/2019/11/dhq_digital_humanities_quarter|creationTime|2019-11-05T18:37:52Z +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|creationDate|2020-02-09 +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|tag|http://www.semanlink.net/tag/extractive_summarization +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|tag|http://www.semanlink.net/tag/nlp_sample_code +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|title|Extractive Text Summarization Using spaCy in Python +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|bookmarkOf|https://medium.com/better-programming/extractive-text-summarization-using-spacy-in-python-88ab96d1fd97 +http://www.semanlink.net/doc/2020/02/extractive_text_summarization_u|creationTime|2020-02-09T23:35:36Z +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|creationDate|2019-06-03 +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|tag|http://www.semanlink.net/tag/knowledge_base +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|tag|http://www.semanlink.net/tag/knowledge_graph_construction +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|title|Trip Report: AKBC 2019 (1st Conference on Automated Knowledge Base Construction) +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|bookmarkOf|https://thinklinks.wordpress.com/2019/06/02/trip-report-akbc-2019/ +http://www.semanlink.net/doc/2019/06/trip_report_akbc_2019_1st_con|creationTime|2019-06-03T08:38:41Z +http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_|creationDate|2020-04-11 +http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_|tag|http://www.semanlink.net/tag/structured_data_embedding +http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_|title|"Julian Eisenschlos sur Twitter : ""Ever wondered how to pre-train models that understand tables and do QA?""" +http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_|bookmarkOf|https://twitter.com/eisenjulian/status/1248698516924399616 +http://www.semanlink.net/doc/2020/04/julian_eisenschlos_sur_twitter_|creationTime|2020-04-11T17:55:37Z +http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_|creationDate|2020-01-04 +http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_|tag|http://www.semanlink.net/tag/nuclear_power_no_thanks +http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_|title|I Oversaw the US Nuclear Power Industry. Now I Think It Should Be Banned. Common Dreams Views +http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_|bookmarkOf|https://www.commondreams.org/views/2019/05/17/i-oversaw-us-nuclear-power-industry-now-i-think-it-should-be-banned?utm_campaign=shareaholic&utm_medium=referral&utm_source=twitter +http://www.semanlink.net/doc/2020/01/i_oversaw_the_us_nuclear_power_|creationTime|2020-01-04T00:59:31Z +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|creationDate|2020-04-19 +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|tag|http://www.semanlink.net/tag/edgar_morin +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|comment|> les carences dans le mode de pensée, jointes à la domination incontestable d’une soif effrénée de profit, sont responsables d’innombrables désastres humains dont ceux survenus depuis février 2020 +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|title|Edgar Morin : « Cette crise nous pousse à nous interroger sur notre mode de vie, sur nos vrais besoins masqués dans les aliénations du quotidien » +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|bookmarkOf|https://www.lemonde.fr/idees/article/2020/04/19/edgar-morin-la-crise-due-au-coronavirus-devrait-ouvrir-nos-esprits-depuis-longtemps-confines-sur-l-immediat_6037066_3232.html +http://www.semanlink.net/doc/2020/04/edgar_morin_%C2%AB_cette_crise_nou|creationTime|2020-04-19T15:50:59Z +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|creationDate|2020-04-04 +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|tag|http://www.semanlink.net/tag/9_3 +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|comment|« Les infirmières, les caissières, les aides-soignantes, les agents d’entretien, les intérimaires, les agents de sécurité, les livreurs… bref, **tous ceux qui font tenir la France debout aujourd’hui, tous ceux qui vont au front et se mettent en danger**, ils viennent des quartiers populaires, ce sont des habitants du 93 ! » +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|title|Coronavirus : la Seine-Saint-Denis confrontée à une inquiétante surmortalité +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|bookmarkOf|https://www.lemonde.fr/planete/article/2020/04/04/coronavirus-la-seine-saint-denis-confrontee-a-une-inquietante-surmortalite_6035555_3244.html +http://www.semanlink.net/doc/2020/04/coronavirus_la_seine_saint_de|creationTime|2020-04-04T14:39:01Z +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|creationDate|2019-12-15 +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|tag|http://www.semanlink.net/tag/yoav_goldberg +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|title|(((ل()(ل() 'yoav)))) sur Twitter : what do you think should be an interesting and important achievement of 2020 for NLP? +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|bookmarkOf|https://twitter.com/yoavgo/status/1205987145112051713 +http://www.semanlink.net/doc/2019/12/_%D9%84_%D9%84_yoav_sur_twitte|creationTime|2019-12-15T10:36:50Z +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|creationDate|2020-05-27 +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|tag|http://www.semanlink.net/tag/denny_britz +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|tag|http://www.semanlink.net/tag/graph_visualization +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|title|"Denny Britz sur Twitter : ""I built a little frontend for my AI/ML papergraph tool...""" +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|bookmarkOf|https://twitter.com/dennybritz/status/1265665044051877889 +http://www.semanlink.net/doc/2020/05/denny_britz_sur_twitter_i_bu|creationTime|2020-05-27T18:48:55Z +http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd|creationDate|2019-06-12 +http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd|title|NLP: Contextualized word embeddings from BERT – Towards Data Science +http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd|bookmarkOf|https://towardsdatascience.com/nlp-extract-contextualized-word-embeddings-from-bert-keras-tf-67ef29f60a7b +http://www.semanlink.net/doc/2019/06/nlp_contextualized_word_embedd|creationTime|2019-06-12T08:24:42Z +http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v|creationDate|2019-11-19 +http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v|tag|http://www.semanlink.net/tag/vue_js +http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v|title|Using Axios to Consume APIs — Vue.js +http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v|bookmarkOf|https://vuejs.org/v2/cookbook/using-axios-to-consume-apis.html +http://www.semanlink.net/doc/2019/11/using_axios_to_consume_apis_v|creationTime|2019-11-19T11:34:49Z +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|creationDate|2020-04-14 +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|tag|http://www.semanlink.net/tag/minting_uris +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|comment|so old a question +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|title|How do different communities create unique identifiers? – Lost Boy +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|bookmarkOf|https://blog.ldodds.com/2020/04/14/how-do-different-communities-create-unique-identifiers/ +http://www.semanlink.net/doc/2020/04/how_do_different_communities_cr|creationTime|2020-04-14T23:49:35Z +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|creationDate|2019-08-07 +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|tag|http://www.semanlink.net/tag/propriete_intellectuelle +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|tag|http://www.semanlink.net/tag/artificial_intelligence +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|title|AI system 'should be recognised as inventor' - BBC News +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|bookmarkOf|https://www.bbc.com/news/technology-49191645 +http://www.semanlink.net/doc/2019/08/ai_system_should_be_recognised|creationTime|2019-08-07T22:58:34Z +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|creationDate|2019-09-08 +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/episodic_memory +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/sebastian_ruder +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|title|"Sebastian Ruder sur Twitter : ""1/ Our paper Episodic Memory in Lifelong Language Learning...""" +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|bookmarkOf|https://twitter.com/seb_ruder/status/1170642228592762880 +http://www.semanlink.net/doc/2019/09/sebastian_ruder_sur_twitter_|creationTime|2019-09-08T13:43:03Z +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|creationDate|2019-11-09 +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|tag|http://www.semanlink.net/tag/biodiversite_declin +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|tag|http://www.semanlink.net/tag/arthropodes +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|tag|http://www.semanlink.net/tag/insect_collapse +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|title|« L’effondrement de la vie sous nos latitudes reste largement sous le radar médiatique » +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|bookmarkOf|https://www.lemonde.fr/idees/article/2019/11/09/l-effondrement-de-la-vie-sous-nos-latitudes-reste-largement-sous-le-radar-mediatique_6018609_3232.html +http://www.semanlink.net/doc/2019/11/%C2%AB_l%E2%80%99effondrement_de_la_vie_sous|creationTime|2019-11-09T13:33:44Z +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|creationDate|2020-01-19 +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|tag|http://www.semanlink.net/tag/blog +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|tag|http://www.semanlink.net/tag/jeremy_howard +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|tag|http://www.semanlink.net/tag/fast_ai +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|title|Syncing your blog with your PC, and using your word processor · fast.ai +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|bookmarkOf|https://www.fast.ai/2020/01/18/gitblog/ +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|creationTime|2020-01-19T00:03:00Z +http://www.semanlink.net/doc/2020/01/syncing_your_blog_with_your_pc_|mainDoc|http://www.semanlink.net/doc/2020/01/your_own_blog_with_github_pages +http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the|creationDate|2019-08-09 +http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the|tag|http://www.semanlink.net/tag/moustique +http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the|title|How Mosquitoes Helped Shape the Course of Human History History Smithsonian +http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the|bookmarkOf|https://www.smithsonianmag.com/history/how-mosquitos-helped-shape-human-history-180972802/ +http://www.semanlink.net/doc/2019/08/how_mosquitoes_helped_shape_the|creationTime|2019-08-09T16:07:48Z +http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration|creationDate|2020-01-20 +http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration|tag|http://www.semanlink.net/tag/weapon_of_mass_distraction +http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration|title|A Call To Minimize Distration +http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration|bookmarkOf|http://minimizedistraction.com/ +http://www.semanlink.net/doc/2020/01/a_call_to_minimize_distration|creationTime|2020-01-20T00:10:19Z +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|creationDate|2020-04-09 +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|tag|http://www.semanlink.net/tag/walmart +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|tag|http://www.semanlink.net/tag/product_knowledge_graph +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|title|Retail Graph — Walmart’s Product Knowledge Graph +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|bookmarkOf|https://medium.com/walmartlabs/retail-graph-walmarts-product-knowledge-graph-6ef7357963bc +http://www.semanlink.net/doc/2020/04/retail_graph_walmart%E2%80%99s_produc|creationTime|2020-04-09T21:18:53Z +http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma|creationDate|2019-07-04 +http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma|tag|http://www.semanlink.net/tag/ensemble_learning +http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma|title|Ensemble Learning to Improve Machine Learning Results +http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma|bookmarkOf|https://blog.statsbot.co/ensemble-learning-d1dcd548e936 +http://www.semanlink.net/doc/2019/07/ensemble_learning_to_improve_ma|creationTime|2019-07-04T01:38:09Z +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|creationDate|2020-04-14 +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|tag|http://www.semanlink.net/tag/peace_corps +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|tag|http://www.semanlink.net/tag/niger +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|title|Camel Express News April 2020 +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|bookmarkOf|https://mailchi.mp/9122c6c9823c/camel-express-news-augsept2019-12286398?e=4618733398 +http://www.semanlink.net/doc/2020/04/camel_express_news_april_2020|creationTime|2020-04-14T21:22:47Z +http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise|creationDate|2019-08-01 +http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise|tag|http://www.semanlink.net/tag/new_africa +http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise|title|Danielle Akini, la Camerounaise qui parle aux ordinateurs +http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise|bookmarkOf|https://www.lemonde.fr/afrique/article/2019/08/01/la-camerounaise-qui-parle-aux-ordinateurs_5495458_3212.html +http://www.semanlink.net/doc/2019/08/danielle_akini_la_camerounaise|creationTime|2019-08-01T22:28:42Z +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|creationDate|2020-05-12 +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|tag|http://www.semanlink.net/tag/serpent +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|tag|http://www.semanlink.net/tag/youtube_video +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|tag|http://www.semanlink.net/tag/iguane +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|title|Iguana vs Snakes Planet Earth II - YouTube +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|bookmarkOf|https://www.youtube.com/watch?v=Rv9hn4IGofM&feature=youtu.be +http://www.semanlink.net/doc/2020/05/iguana_vs_snakes_%7C_planet_earth|creationTime|2020-05-12T12:50:42Z +http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo|creationDate|2019-08-29 +http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo|tag|http://www.semanlink.net/tag/spatial_search +http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo|title|A dive into spatial search algorithms +http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo|bookmarkOf|https://blog.mapbox.com/a-dive-into-spatial-search-algorithms-ebd0c5e39d2a +http://www.semanlink.net/doc/2019/08/a_dive_into_spatial_search_algo|creationTime|2019-08-29T00:34:39Z +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|creationDate|2019-08-31 +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|tag|http://www.semanlink.net/tag/inde +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|tag|http://www.semanlink.net/tag/nationalisme +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|title|En Inde, près de deux millions de citoyens, la plupart musulmans, déchus de leur nationalité +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|bookmarkOf|https://www.lemonde.fr/international/article/2019/08/31/en-inde-pres-de-deux-millions-de-citoyens-la-plupart-musulmans-dechus-de-leur-nationalite_5504902_3210.html +http://www.semanlink.net/doc/2019/08/en_inde_pres_de_deux_millions_|creationTime|2019-08-31T16:59:48Z +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|creationDate|2020-02-24 +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|tag|http://www.semanlink.net/tag/glyphosate +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|comment|"> La réanalyse des tests fournis aux autorités réglementaires, indique que l’herbicide controversé est susceptible de déclencher des cancers chez les rongeurs. + +> Si cette conclusion est notable, c’est que ces mêmes tests – dont la majorité ont été menés par les industriels eux-mêmes – ont servi de base aux avis des autorités réglementaires, notamment européennes et américaines. Or celles-ci ont unanimement estimé, à l’inverse, que le glyphosate n’a pas de potentiel cancérogène. + +> Confidentiels, les tests réglementaires ne peuvent généralement pas être consultés par la communauté scientifique," +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|title|L’évaluation officielle du glyphosate de nouveau mise en cause +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|bookmarkOf|https://www.lemonde.fr/planete/article/2020/02/24/l-evaluation-officielle-du-glyphosate-de-nouveau-mise-en-cause_6030625_3244.html +http://www.semanlink.net/doc/2020/02/l%E2%80%99evaluation_officielle_du_glyp|creationTime|2020-02-24T13:58:07Z +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|creationDate|2020-01-21 +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|tag|http://www.semanlink.net/tag/doc_by_google +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|tag|http://www.semanlink.net/tag/ml_engineering +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|tag|http://www.semanlink.net/tag/best_practices +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|title|Best Practices for ML Engineering    Google Developers +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|bookmarkOf|https://developers.google.com/machine-learning/guides/rules-of-ml/?_ga=2.85269593.-1158947199.1579618956&hl=en +http://www.semanlink.net/doc/2020/01/best_practices_for_ml_engineeri|creationTime|2020-01-21T16:40:23Z +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|creationDate|2019-08-13 +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|tag|http://www.semanlink.net/tag/curiosite_naturelle +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|tag|http://www.semanlink.net/tag/zombie +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|tag|http://www.semanlink.net/tag/parasitisme +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|title|Absurd Creature of the Week: The Parasitic Worm That Turns Snails Into Disco Zombies WIRED +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|bookmarkOf|https://www.wired.com/2014/09/absurd-creature-of-the-week-disco-worm/ +http://www.semanlink.net/doc/2019/08/absurd_creature_of_the_week_th|creationTime|2019-08-13T09:10:29Z +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|creationDate|2020-04-02 +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|tag|http://www.semanlink.net/tag/tools +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|title|pdftables.com: PDF to Excel converter - PDFTables +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|bookmarkOf|https://pdftables.com/ +http://www.semanlink.net/doc/2020/04/pdf_to_excel_pdftables|creationTime|2020-04-02T15:38:47Z +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|creationDate|2020-04-11 +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|tag|http://www.semanlink.net/tag/coronavirus +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|tag|http://www.semanlink.net/tag/wuhan +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|title|Les leçons de Wuhan pour enrayer l’épidémie +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|bookmarkOf|https://www.lemonde.fr/planete/article/2020/04/11/les-lecons-de-wuhan-pour-enrayer-l-epidemie_6036318_3244.html +http://www.semanlink.net/doc/2020/04/les_lecons_de_wuhan_pour_enraye|creationTime|2020-04-11T14:47:15Z +http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson|creationDate|2019-06-11 +http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson|title|Speech to Text Demo - Watson +http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson|bookmarkOf|https://speech-to-text-demo.ng.bluemix.net/ +http://www.semanlink.net/doc/2019/06/speech_to_text_demo_watson|creationTime|2019-06-11T11:04:08Z +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|creationDate|2020-02-16 +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|tag|http://www.semanlink.net/tag/tweet +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|tag|http://www.semanlink.net/tag/notre_dame_de_paris +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|title|"CARAA sur Twitter : ""Probably the first photo of Notre Dame de Paris in 1838 !! (daguerreotype)""" +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|bookmarkOf|https://twitter.com/CARAA_Center/status/1228220123854458888 +http://www.semanlink.net/doc/2020/02/caraa_sur_twitter_probably_t|creationTime|2020-02-16T13:48:54Z +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|creationDate|2020-04-02 +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|tag|http://www.semanlink.net/tag/pdf_extract +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|tag|http://www.semanlink.net/tag/pdf_format +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|title|pdf2table: A Method to Extract Table Information from PDF Files +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|bookmarkOf|http://ieg.ifs.tuwien.ac.at/projects/pdf2table/yildiz_iicai05.pdf +http://www.semanlink.net/doc/2020/04/pdf2table_a_method_to_extract_|creationTime|2020-04-02T15:35:47Z +http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te|creationDate|2020-01-20 +http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te|tag|http://www.semanlink.net/tag/weapon_of_mass_distraction +http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te|title|Marcel Fröhlich sur Twitter: Tech products, culture are ‘designed intentionally for mass deception’ +http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te|bookmarkOf|https://twitter.com/FroehlichMarcel/status/1219010997563928579?s=20 +http://www.semanlink.net/doc/2020/01/marcel_frohlich_sur_twitter_te|creationTime|2020-01-20T00:05:13Z +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|creationDate|2020-05-15 +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|tag|http://www.semanlink.net/tag/roam +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|tag|http://www.semanlink.net/tag/spacy +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|title|20,000 Roam Tags with Spacy +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|bookmarkOf|http://zachwill.com/20k-roam-tags/ +http://www.semanlink.net/doc/2020/05/20_000_roam_tags_with_spacy|creationTime|2020-05-15T16:41:34Z +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|creationDate|2019-07-15 +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|tag|http://www.semanlink.net/tag/nlp_microsoft +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|tag|http://www.semanlink.net/tag/nlp_topic_extraction +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|comment|"framework that extracts quality phrases from text corpora integrated with phrasal segmentation. + +> The framework requires only limited training but the quality of phrases so generated is close to human judgment. Moreover, the method is scalable: both computation time and required space grow linearly as corpus size increases + +[Related blog post](https://medium.com/@SherlockHumus/mining-quality-phrases-from-not-so-massive-text-corpora-part-i-b20b8336520a) + +Used in [this Entity Linking method](/doc/?uri=https%3A%2F%2Farxiv.org%2Fabs%2F1807.06036)" +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|relatedDoc|https://arxiv.org/abs/1807.06036 +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|title|Mining Quality Phrases from Massive Text Corpora (2015) +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|bookmarkOf|https://www.ncbi.nlm.nih.gov/pubmed/26705375 +http://www.semanlink.net/doc/2019/07/mining_quality_phrases_from_mas|creationTime|2019-07-15T13:02:36Z +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|creationDate|2020-03-06 +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|tag|http://www.semanlink.net/tag/bert +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|tag|http://www.semanlink.net/tag/named_entity_recognition +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|comment|[GitHub](https://github.com/ajitrajasekharan/unsupervised_NER) +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|title|Unsupervised NER using BERT - Hands-on NLP model review - Quora +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|bookmarkOf|https://www.quora.com/q/idpysofgzpanjxuh/Unsupervised-NER-using-BERT?ch=10&share=7b4a15bb +http://www.semanlink.net/doc/2020/03/unsupervised_ner_using_bert_h|creationTime|2020-03-06T00:12:06Z +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|creationDate|2019-10-27 +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|tag|http://www.semanlink.net/tag/juan_sequeda +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|tag|http://www.semanlink.net/tag/knowledge_graph +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|title|A Brief History of Knowledge Graph's Main Ideas: A tutorial, Claudio Gutierrez and Juan F. Sequeda +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|bookmarkOf|http://knowledgegraph.today/paper.html +http://www.semanlink.net/doc/2019/10/a_brief_history_of_knowledge_gr|creationTime|2019-10-27T08:40:38Z +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|creationDate|2020-01-10 +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|tag|http://www.semanlink.net/tag/entity_linking +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|tag|http://www.semanlink.net/tag/benjamin_heinzerling +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|title|Trust, but verify! Better entity linking through automatic verification (2017) +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|bookmarkOf|https://www.aclweb.org/anthology/E17-1078/ +http://www.semanlink.net/doc/2020/01/trust_but_verify_better_entit|creationTime|2020-01-10T17:49:11Z +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|creationDate|2020-04-03 +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|tag|http://www.semanlink.net/tag/technique_de_l_insecte_sterile +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|tag|http://www.semanlink.net/tag/lucilie_bouchere +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|title|Pour éradiquer la lucilie bouchère en Libye, des mouches mâles stériles sont lâchées vers les femelles dont les larves dévorent le bétail (1991) +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|bookmarkOf|https://www.lemonde.fr/archives/article/1991/01/23/chasse-a-la-mouche-pour-eradiquer-la-lucilie-bouchere-en-libye-des-mouches-males-steriles-sont-lachees-vers-les-femelles-dont-les-larves-devorent-le-betail_4014411_1819218.html +http://www.semanlink.net/doc/2020/04/chasse_a_la_mouche_pour_eradiqu|creationTime|2020-04-03T19:20:18Z +http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends|creationDate|2019-08-13 +http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends|tag|http://www.semanlink.net/tag/acl_2019 +http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends|title|ACL 2019: Highlights and Trends - Maria Khvalchik - Medium +http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends|bookmarkOf|https://medium.com/@mariekhvalchik/acl-2019-1adf4c748711 +http://www.semanlink.net/doc/2019/08/acl_2019_highlights_and_trends|creationTime|2019-08-13T13:42:28Z +http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_|creationDate|2019-10-28 +http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_|tag|http://www.semanlink.net/tag/ibm_watson_and_speech_to_text +http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_|title|Speech to Text - IBM Cloud API Docs +http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_|bookmarkOf|https://cloud.ibm.com/apidocs/speech-to-text/speech-to-text?code=node +http://www.semanlink.net/doc/2019/10/speech_to_text_ibm_cloud_api_|creationTime|2019-10-28T10:57:07Z diff --git a/ckb/models/__init__.py b/ckb/models/__init__.py index ed4d500..0398a4f 100644 --- a/ckb/models/__init__.py +++ b/ckb/models/__init__.py @@ -1,11 +1,13 @@ from .base import BaseModel from .distill_bert import DistillBert from .flaubert import FlauBERT +from .similarity import Similarity from .transformer import Transformer __all__ = [ "BaseModel", "DistillBert", "FlauBERT", + "Similarity", "Transformer", ] diff --git a/ckb/models/similarity.py b/ckb/models/similarity.py new file mode 100644 index 0000000..35c7db5 --- /dev/null +++ b/ckb/models/similarity.py @@ -0,0 +1,162 @@ +__all__ = ["Similarity"] + +import torch + +from ..scoring import TransE +from .base import BaseModel + + +class Similarity(BaseModel): + """Sentence Similarity models wrapper. + + Parameters + ---------- + gamma (int): A higher gamma parameter increases the upper and lower bounds of the latent + space and vice-versa. + entities (dict): Mapping between entities id and entities label. + relations (dict): Mapping between relations id and entities label. + + Examples + -------- + + >>> from ckb import models + >>> from ckb import datasets + + >>> from transformers import AutoTokenizer, AutoModel + + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-mpnet-base-v2') + + >>> model = AutoModel.from_pretrained('sentence-transformers/all-mpnet-base-v2') + + >>> _ = torch.manual_seed(42) + + >>> dataset = datasets.Semanlink(1, pre_compute=False) + + >>> model = models.Similarity( + ... model = model, + ... tokenizer = tokenizer, + ... entities = dataset.entities, + ... relations = dataset.relations, + ... gamma = 9, + ... device = 'cpu', + ... ) + + >>> sample = torch.tensor([[0, 0, 0], [2, 2, 2]]) + >>> model(sample) + tensor([[3.5273], + [3.6367]], grad_fn=) + + >>> sample = torch.tensor([[0, 0, 1], [2, 2, 1]]) + >>> model(sample) + tensor([[-78.3936], + [-79.7217]], grad_fn=) + + + >>> sample = torch.tensor([[1, 0, 0], [1, 2, 2]]) + >>> model(sample) + tensor([[-78.1690], + [-80.2369]], grad_fn=) + + >>> sample = torch.tensor([[0, 0, 0], [2, 2, 2]]) + >>> negative_sample = torch.tensor([[0], [2]]) + + >>> model(sample, negative_sample, mode='head-batch') + tensor([[3.5273], + [3.6367]], grad_fn=) + + >>> model(sample, negative_sample, mode='tail-batch') + tensor([[3.5273], + [3.6367]], grad_fn=) + + References + ---------- + 1. [Sentence Similarity models](https://huggingface.co/models?pipeline_tag=sentence-similarity&sort=downloads) + + """ + + def __init__( + self, + model, + tokenizer, + entities, + relations, + scoring=TransE(), + hidden_dim=None, + gamma=9, + device="cuda", + ): + + if hidden_dim is None: + hidden_dim = 768 + init_l2 = False + else: + init_l2 = True + + super(Similarity, self).__init__( + hidden_dim=hidden_dim, + entities=entities, + relations=relations, + scoring=scoring, + gamma=gamma, + ) + + self.tokenizer = tokenizer + self.model = model + self.max_length = list(self.tokenizer.max_model_input_sizes.values())[0] + self.device = device + + if init_l2: + self.l2 = torch.nn.Linear(768, hidden_dim) + else: + self.l2 = None + + def encoder(self, e): + """Encode input entities descriptions. + + Parameters: + e (list): List of description of entities. + + Returns: + Torch tensor of encoded entities. + """ + inputs = self.tokenizer.batch_encode_plus( + e, + add_special_tokens=True, + truncation=True, + max_length=self.max_length, + padding="max_length", + return_token_type_ids=True, + return_tensors="pt", + ) + + output = self.model( + input_ids=torch.tensor(inputs["input_ids"]).to(self.device), + attention_mask=torch.tensor(inputs["attention_mask"]).to(self.device), + ) + + sentence_embeddings = self.mean_pooling( + output=output, attention_mask=inputs["attention_mask"] + ) + + if self.l2 is not None: + sentence_embeddings = self.l2(sentence_embeddings) + + return sentence_embeddings + + @staticmethod + def mean_pooling(output, attention_mask): + """Mean pooling. + + References + ---------- + 1. [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) + """ + token_embeddings = ( + output.last_hidden_state + ) # First element of model_output contains all token embeddings + input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9 + ) diff --git a/ckb/models/transformer.py b/ckb/models/transformer.py index 9c6918b..84241a7 100644 --- a/ckb/models/transformer.py +++ b/ckb/models/transformer.py @@ -117,7 +117,7 @@ def encoder(self, e): add_special_tokens=True, truncation=True, max_length=self.max_length, - padding="max_length", + padding="longest", return_token_type_ids=True, ) diff --git a/docs/api/datasets/Semanlink.md b/docs/api/datasets/Semanlink.md index 2901774..7d0d7b2 100644 --- a/docs/api/datasets/Semanlink.md +++ b/docs/api/datasets/Semanlink.md @@ -8,6 +8,8 @@ Semanlink dataset. - **batch_size** +- **use_labels** – defaults to `True` + - **shuffle** – defaults to `True` - **pre_compute** – defaults to `True` diff --git a/docs/api/evaluation/Evaluation.md b/docs/api/evaluation/Evaluation.md index 55f044f..68e4d42 100644 --- a/docs/api/evaluation/Evaluation.md +++ b/docs/api/evaluation/Evaluation.md @@ -16,11 +16,7 @@ Wrapper for MKB evaluation module. - **device** – defaults to `cuda` -- **num_workers** – defaults to `1` - -- **entities_to_drop** – defaults to `[]` - -- **same_entities** – defaults to `{}` +- **num_workers** – defaults to `0` @@ -149,14 +145,6 @@ DistillBert model - **model** -???- note "solve_same_entities" - - Replace artificial entities by the target. Some description may be dedicated to the same entities. - - **Parameters** - - - **argsort** - ???- note "types_relations" Divide input dataset relations into different categories (i.e. ONE-TO-ONE, ONE-TO-MANY, MANY-TO-ONE and MANY-TO-MANY) according to the mapping properties of relationships. diff --git a/docs/api/models/BaseModel.md b/docs/api/models/BaseModel.md index 3466fa0..15608e4 100644 --- a/docs/api/models/BaseModel.md +++ b/docs/api/models/BaseModel.md @@ -135,7 +135,7 @@ Base model class. Moves all model parameters and buffers to the GPU. - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self + This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self **Parameters** @@ -238,11 +238,11 @@ Base model class. Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. - Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys + Arguments: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys **Parameters** - - **state_dict** (*'OrderedDict[str, Tensor]'*) + - **state_dict** (*Dict[str, torch.Tensor]*) - **strict** (*bool*) – defaults to `True` ???- note "modules" @@ -308,7 +308,7 @@ Base model class. Registers a backward hook on the module. - This function is deprecated in favor of :meth:`nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` + .. warning :: The current implementation will not have the presented behavior for complex :class:`Module` that perform many operations. In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only contain the gradients for a subset of the inputs and outputs. For such :class:`Module`, you should use :func:`torch.Tensor.register_hook` directly on a specific input or output to get the required gradients. The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> Tensor or None The :attr:`grad_input` and :attr:`grad_output` may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` **Parameters** @@ -346,16 +346,6 @@ Base model class. - **hook** (*Callable[..., NoneType]*) -???- note "register_full_backward_hook" - - Registers a backward hook on the module. - - The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` - - **Parameters** - - - **hook** (*Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]*) - ???- note "register_parameter" Adds a parameter to the module. @@ -406,7 +396,7 @@ Base model class. Moves and/or casts the parameters and buffers. - This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) + This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point desired :attr:`dtype` s. In addition, this method will only cast the floating point parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Example:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) **Parameters** @@ -427,27 +417,17 @@ Base model class. Casts all parameters and buffers to :attr:`dst_type`. - Args: dst_type (type or string): the desired type Returns: Module: self + Arguments: dst_type (type or string): the desired type Returns: Module: self **Parameters** - **dst_type** (*Union[torch.dtype, str]*) -???- note "xpu" - - Moves all model parameters and buffers to the XPU. - - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self - - **Parameters** - - - **device** (*Union[int, torch.device, NoneType]*) – defaults to `None` - ???- note "zero_grad" Sets gradients of all model parameters to zero. See similar function under :class:`torch.optim.Optimizer` for more context. - Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. + Arguments: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. **Parameters** diff --git a/docs/api/models/DistillBert.md b/docs/api/models/DistillBert.md index 10a800c..24c2d1a 100644 --- a/docs/api/models/DistillBert.md +++ b/docs/api/models/DistillBert.md @@ -164,7 +164,7 @@ tensor([[3.6504], Moves all model parameters and buffers to the GPU. - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self + This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self **Parameters** @@ -269,11 +269,11 @@ tensor([[3.6504], Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. - Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys + Arguments: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys **Parameters** - - **state_dict** (*'OrderedDict[str, Tensor]'*) + - **state_dict** (*Dict[str, torch.Tensor]*) - **strict** (*bool*) – defaults to `True` ???- note "modules" @@ -339,7 +339,7 @@ tensor([[3.6504], Registers a backward hook on the module. - This function is deprecated in favor of :meth:`nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` + .. warning :: The current implementation will not have the presented behavior for complex :class:`Module` that perform many operations. In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only contain the gradients for a subset of the inputs and outputs. For such :class:`Module`, you should use :func:`torch.Tensor.register_hook` directly on a specific input or output to get the required gradients. The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> Tensor or None The :attr:`grad_input` and :attr:`grad_output` may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` **Parameters** @@ -377,16 +377,6 @@ tensor([[3.6504], - **hook** (*Callable[..., NoneType]*) -???- note "register_full_backward_hook" - - Registers a backward hook on the module. - - The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` - - **Parameters** - - - **hook** (*Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]*) - ???- note "register_parameter" Adds a parameter to the module. @@ -437,7 +427,7 @@ tensor([[3.6504], Moves and/or casts the parameters and buffers. - This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) + This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point desired :attr:`dtype` s. In addition, this method will only cast the floating point parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Example:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) **Parameters** @@ -458,27 +448,17 @@ tensor([[3.6504], Casts all parameters and buffers to :attr:`dst_type`. - Args: dst_type (type or string): the desired type Returns: Module: self + Arguments: dst_type (type or string): the desired type Returns: Module: self **Parameters** - **dst_type** (*Union[torch.dtype, str]*) -???- note "xpu" - - Moves all model parameters and buffers to the XPU. - - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self - - **Parameters** - - - **device** (*Union[int, torch.device, NoneType]*) – defaults to `None` - ???- note "zero_grad" Sets gradients of all model parameters to zero. See similar function under :class:`torch.optim.Optimizer` for more context. - Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. + Arguments: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. **Parameters** diff --git a/docs/api/models/FlauBERT.md b/docs/api/models/FlauBERT.md index 2159c62..b17533d 100644 --- a/docs/api/models/FlauBERT.md +++ b/docs/api/models/FlauBERT.md @@ -133,7 +133,7 @@ FlauBERT model Moves all model parameters and buffers to the GPU. - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self + This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self **Parameters** @@ -238,11 +238,11 @@ FlauBERT model Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. - Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys + Arguments: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys **Parameters** - - **state_dict** (*'OrderedDict[str, Tensor]'*) + - **state_dict** (*Dict[str, torch.Tensor]*) - **strict** (*bool*) – defaults to `True` ???- note "modules" @@ -308,7 +308,7 @@ FlauBERT model Registers a backward hook on the module. - This function is deprecated in favor of :meth:`nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` + .. warning :: The current implementation will not have the presented behavior for complex :class:`Module` that perform many operations. In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only contain the gradients for a subset of the inputs and outputs. For such :class:`Module`, you should use :func:`torch.Tensor.register_hook` directly on a specific input or output to get the required gradients. The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> Tensor or None The :attr:`grad_input` and :attr:`grad_output` may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` **Parameters** @@ -346,16 +346,6 @@ FlauBERT model - **hook** (*Callable[..., NoneType]*) -???- note "register_full_backward_hook" - - Registers a backward hook on the module. - - The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` - - **Parameters** - - - **hook** (*Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]*) - ???- note "register_parameter" Adds a parameter to the module. @@ -406,7 +396,7 @@ FlauBERT model Moves and/or casts the parameters and buffers. - This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) + This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point desired :attr:`dtype` s. In addition, this method will only cast the floating point parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Example:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) **Parameters** @@ -427,27 +417,17 @@ FlauBERT model Casts all parameters and buffers to :attr:`dst_type`. - Args: dst_type (type or string): the desired type Returns: Module: self + Arguments: dst_type (type or string): the desired type Returns: Module: self **Parameters** - **dst_type** (*Union[torch.dtype, str]*) -???- note "xpu" - - Moves all model parameters and buffers to the XPU. - - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self - - **Parameters** - - - **device** (*Union[int, torch.device, NoneType]*) – defaults to `None` - ???- note "zero_grad" Sets gradients of all model parameters to zero. See similar function under :class:`torch.optim.Optimizer` for more context. - Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. + Arguments: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. **Parameters** diff --git a/docs/api/models/Transformer.md b/docs/api/models/Transformer.md index b42930f..417eb1c 100644 --- a/docs/api/models/Transformer.md +++ b/docs/api/models/Transformer.md @@ -18,6 +18,8 @@ Transformer for contextual representation of entities. - **hidden_dim** – defaults to `None` +- **max_length** – defaults to `None` + - **gamma** – defaults to `9` - **device** – defaults to `cuda` @@ -159,7 +161,7 @@ tensor([[-227.8486], Moves all model parameters and buffers to the GPU. - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Args: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self + This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self **Parameters** @@ -264,11 +266,11 @@ tensor([[-227.8486], Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. - Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys + Arguments: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys **Parameters** - - **state_dict** (*'OrderedDict[str, Tensor]'*) + - **state_dict** (*Dict[str, torch.Tensor]*) - **strict** (*bool*) – defaults to `True` ???- note "modules" @@ -334,7 +336,7 @@ tensor([[-227.8486], Registers a backward hook on the module. - This function is deprecated in favor of :meth:`nn.Module.register_full_backward_hook` and the behavior of this function will change in future versions. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` + .. warning :: The current implementation will not have the presented behavior for complex :class:`Module` that perform many operations. In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only contain the gradients for a subset of the inputs and outputs. For such :class:`Module`, you should use :func:`torch.Tensor.register_hook` directly on a specific input or output to get the required gradients. The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> Tensor or None The :attr:`grad_input` and :attr:`grad_output` may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` **Parameters** @@ -372,16 +374,6 @@ tensor([[-227.8486], - **hook** (*Callable[..., NoneType]*) -???- note "register_full_backward_hook" - - Registers a backward hook on the module. - - The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature:: hook(module, grad_input, grad_output) -> tuple(Tensor) or None The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients with respect to the inputs and outputs respectively. The hook should not modify its arguments, but it can optionally return a new gradient with respect to the input that will be used in place of :attr:`grad_input` in subsequent computations. :attr:`grad_input` will only correspond to the inputs given as positional arguments and all kwarg arguments are ignored. Entries in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor arguments. .. warning :: Modifying inputs or outputs inplace is not allowed when using backward hooks and will raise an error. Returns: :class:`torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling ``handle.remove()`` - - **Parameters** - - - **hook** (*Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]*) - ???- note "register_parameter" Adds a parameter to the module. @@ -432,7 +424,7 @@ tensor([[-227.8486], Moves and/or casts the parameters and buffers. - This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point or complex :attr:`dtype`s. In addition, this method will only cast the floating point or complex parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point or complex dtype of the parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Examples:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble) >>> linear.weight Parameter containing: tensor([[ 0.3741+0.j, 0.2382+0.j], [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128) >>> linear(torch.ones(3, 2, dtype=torch.cdouble)) tensor([[0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j], [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128) + This can be called as .. function:: to(device=None, dtype=None, non_blocking=False) .. function:: to(dtype, non_blocking=False) .. function:: to(tensor, non_blocking=False) .. function:: to(memory_format=torch.channels_last) Its signature is similar to :meth:`torch.Tensor.to`, but only accepts floating point desired :attr:`dtype` s. In addition, this method will only cast the floating point parameters and buffers to :attr:`dtype` (if given). The integral parameters and buffers will be moved :attr:`device`, if that is given, but with dtypes unchanged. When :attr:`non_blocking` is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices. See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`torch.device`): the desired device of the parameters and buffers in this module dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers in this module tensor (torch.Tensor): Tensor whose dtype and device are the desired dtype and device for all parameters and buffers in this module memory_format (:class:`torch.memory_format`): the desired memory format for 4D parameters and buffers in this module (keyword only argument) Returns: Module: self Example:: >>> linear = nn.Linear(2, 2) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]]) >>> linear.to(torch.double) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1913, -0.3420], [-0.5113, -0.2325]], dtype=torch.float64) >>> gpu1 = torch.device("cuda:1") >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') >>> cpu = torch.device("cpu") >>> linear.to(cpu) Linear(in_features=2, out_features=2, bias=True) >>> linear.weight Parameter containing: tensor([[ 0.1914, -0.3420], [-0.5112, -0.2324]], dtype=torch.float16) **Parameters** @@ -453,27 +445,17 @@ tensor([[-227.8486], Casts all parameters and buffers to :attr:`dst_type`. - Args: dst_type (type or string): the desired type Returns: Module: self + Arguments: dst_type (type or string): the desired type Returns: Module: self **Parameters** - **dst_type** (*Union[torch.dtype, str]*) -???- note "xpu" - - Moves all model parameters and buffers to the XPU. - - This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on XPU while being optimized. Arguments: device (int, optional): if specified, all parameters will be copied to that device Returns: Module: self - - **Parameters** - - - **device** (*Union[int, torch.device, NoneType]*) – defaults to `None` - ???- note "zero_grad" Sets gradients of all model parameters to zero. See similar function under :class:`torch.optim.Optimizer` for more context. - Args: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. + Arguments: set_to_none (bool): instead of setting to zero, set the grads to None. See :meth:`torch.optim.Optimizer.zero_grad` for details. **Parameters** diff --git a/docs/api/sampling/NegativeSampling.md b/docs/api/sampling/NegativeSampling.md index a0427d9..4a6bd88 100644 --- a/docs/api/sampling/NegativeSampling.md +++ b/docs/api/sampling/NegativeSampling.md @@ -59,18 +59,40 @@ tensor([[6, 3, 6, 2, 6], >>> negative_sample = negative_sampling.generate(sample, mode='head-batch') +>>> train = [ +... ("Le stratege", "is_available", "Netflix"), +... ("Le stratege", "is_available", "Le stratege"), +... ] + +>>> dataset = datasets.Dataset( +... train = train, +... batch_size = 2, +... seed = 42, +... shuffle = False, +... ) + +>>> negative_sampling = sampling.NegativeSampling( +... size = 5, +... train_triples = dataset.train, +... entities = dataset.entities, +... relations = dataset.relations, +... seed = 42, +... ) + +>>> sample = torch.tensor([[0, 0, 1], [0, 0, 0]]) + +>>> negative_sample = negative_sampling.generate(sample, mode='tail-batch') + >>> negative_sample -tensor([[6, 2, 2, 4, 3], - [6, 2, 2, 4, 3]]) +tensor([[0, 1, 0, 0, 0], + [0, 1, 0, 0, 0]]) ``` ## Methods ???- note "generate" - Generate negative samples from a head, relation tail - - If the mode is set to head-batch, this method will generate a tensor of fake heads. If the mode is set to tail-batch, this method will generate a tensor of fake tails. + Generate negative samples from a head, relation tail If the mode is set to head-batch, this method will generate a tensor of fake heads. If the mode is set to tail-batch, this method will generate a tensor of fake tails. **Parameters** diff --git a/setup.py b/setup.py index 98e5803..e45bff0 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ package_data={ "ckb": [ "datasets/semanlink/*.csv", + "datasets/semanlink/*.json", "datasets/wn18rr/*.csv", "datasets/fb15k237/*.csv", ]